Initial commit

This commit is contained in:
2026-03-03 00:39:30 +05:00
commit fc01f07d9b
29933 changed files with 5353098 additions and 0 deletions

View File

@@ -0,0 +1,75 @@
using System;
using System.Collections.Generic;
using UnityEngine;
namespace Unity.Jobs
{
/// <summary>
/// Used by automatically generated code. Do not use in projects.
/// </summary>
public class EarlyInitHelpers
{
/// <summary>
/// Used by automatically generated code. Do not use in projects.
/// Delegate used for early initialization
/// </summary>
public delegate void EarlyInitFunction();
private static List<EarlyInitFunction> s_PendingDelegates;
static EarlyInitHelpers()
{
FlushEarlyInits();
}
/// <summary>
/// Used by automatically generated code. Do not use in projects.
/// Calls all EarlyInit delegates and clears the invocation list
/// </summary>
public static void FlushEarlyInits()
{
while (s_PendingDelegates != null)
{
var oldList = s_PendingDelegates;
s_PendingDelegates = null;
for (int i = 0; i < oldList.Count; ++i)
{
try
{
oldList[i]();
}
catch (Exception ex)
{
Debug.LogException(ex);
}
}
}
}
/// <summary>
/// Used by automatically generated code. Do not use in projects.
/// Adds an EarlyInit helper function to invocation list.
/// </summary>
/// <param name="func">EarlyInitFunction add to early call list</param>
public static void AddEarlyInitFunction(EarlyInitFunction func)
{
if (s_PendingDelegates == null)
s_PendingDelegates = new List<EarlyInitFunction>();
s_PendingDelegates.Add(func);
}
/// <summary>
/// Used by automatically generated code. Do not use in projects.
/// This methods is called when JobReflectionData cannot be created during EarlyInit.
/// </summary>
/// <param name="ex">Exception type to throw</param>
public static void JobReflectionDataCreationFailed(Exception ex)
{
Debug.LogError($"Failed to create job reflection data. Please refer to callstack of exception for information on which job could not produce its reflection data.");
Debug.LogException(ex);
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 05a99ad2800ad2f49b22ab35d999bfc7
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,341 @@
using System;
using Unity.Jobs.LowLevel.Unsafe;
using Unity.Collections;
using Unity.Collections.LowLevel.Unsafe;
using System.Diagnostics;
using Unity.Burst;
using Unity.Mathematics;
namespace Unity.Jobs
{
/// <summary>
/// **Obsolete.** Use <see cref="IJobFilterExtensions"/> instead.
/// </summary>
[Obsolete("'JobParallelIndexListExtensions' has been deprecated; Use 'IJobFilterExtensions' instead.", false)]
public static class JobParallelIndexListExtensions
{
/// <summary>
/// **Obsolete.**
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="jobData"></param>
/// <param name="indices"></param>
/// <param name="arrayLength"></param>
/// <param name="innerloopBatchCount"></param>
/// <param name="dependsOn"></param>
/// <returns></returns>
[Obsolete("The signature for 'ScheduleAppend' has changed. 'innerloopBatchCount' is no longer part of this API.", false)]
public static unsafe JobHandle ScheduleAppend<T>(this T jobData, NativeList<int> indices, int arrayLength, int innerloopBatchCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobFilter
=> jobData.ScheduleAppend(indices, arrayLength, dependsOn);
/// <summary>
/// **Obsolete.**
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="jobData"></param>
/// <param name="indices"></param>
/// <param name="innerloopBatchCount"></param>
/// <param name="dependsOn"></param>
/// <returns></returns>
[Obsolete("The signature for 'ScheduleFilter' has changed. 'innerloopBatchCount' is no longer part of this API.")]
public static unsafe JobHandle ScheduleFilter<T>(this T jobData, NativeList<int> indices, int innerloopBatchCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobFilter
=> jobData.ScheduleFilter(indices, dependsOn);
}
/// <summary>
/// **Obsolete.** Use <see cref="IJobFilter"/> instead.
/// </summary>
[Obsolete("'IJobParallelForFilter' has been deprecated; use 'IJobFilter' instead. (UnityUpgradable) -> IJobFilter")]
public interface IJobParallelForFilter
{
/// <summary>
///
/// </summary>
/// <param name="index"></param>
/// <returns></returns>
bool Execute(int index);
}
/// <summary>
/// Filters a list of indices.
/// </summary>
/// <remarks>
/// IJobFilter allows for custom jobs to implement a bool Execute(int index) job function used to filter a list of indices.
/// For a provided list and index range, the list will be modified to append all indices for which Execute returns true or to exclude all indices for which Execute returns false
/// depending on if ScheduleAppend or Schedule is used, respectfully, for enqueuing the job with the job system.
/// </remarks>
[JobProducerType(typeof(IJobFilterExtensions.JobFilterProducer<>))]
public interface IJobFilter
{
/// <summary>
/// Filter function. A list of indices is provided when scheduling this job type. The
/// Execute function will be called once for each index returning true or false if the job data at
/// the passed in index should be filtered or not.
/// </summary>
/// <param name="index">Index to use when reading job data for the purpose of filtering</param>
/// <returns>Returns true for data at index</returns>
bool Execute(int index);
}
/// <summary>
/// Extension class for the IJobFilter job type providing custom overloads for scheduling and running.
/// </summary>
public static class IJobFilterExtensions
{
internal struct JobFilterProducer<T> where T : struct, IJobFilter
{
public struct JobWrapper
{
[NativeDisableParallelForRestriction]
public NativeList<int> outputIndices;
public int appendCount;
public T JobData;
}
internal static readonly SharedStatic<IntPtr> jobReflectionData = SharedStatic<IntPtr>.GetOrCreate<JobFilterProducer<T>>();
[BurstDiscard]
internal static void Initialize()
{
if (jobReflectionData.Data == IntPtr.Zero)
jobReflectionData.Data = JobsUtility.CreateJobReflectionData(typeof(JobWrapper), typeof(T), (ExecuteJobFunction)Execute);
}
public delegate void ExecuteJobFunction(ref JobWrapper jobWrapper, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex);
/// <summary>
/// Job Producer method invoked by the Job System when running an IJobFilter Job.
/// </summary>
/// <param name="jobWrapper">IJobFilter wrapper type</param>
/// <param name="additionalPtr">unused</param>
/// <param name="bufferRangePatchData">Buffer data JobRanges</param>
/// <param name="ranges">unused</param>
/// <param name="jobIndex">unused</param>
public static void Execute(ref JobWrapper jobWrapper, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex)
{
if (jobWrapper.appendCount == -1)
ExecuteFilter(ref jobWrapper, bufferRangePatchData);
else
ExecuteAppend(ref jobWrapper, bufferRangePatchData);
}
public static unsafe void ExecuteAppend(ref JobWrapper jobWrapper, System.IntPtr bufferRangePatchData)
{
int oldLength = jobWrapper.outputIndices.Length;
jobWrapper.outputIndices.Capacity = math.max(jobWrapper.appendCount + oldLength, jobWrapper.outputIndices.Capacity);
int* outputPtr = (int*)jobWrapper.outputIndices.GetUnsafePtr();
int outputIndex = oldLength;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.PatchBufferMinMaxRanges(bufferRangePatchData, UnsafeUtility.AddressOf(ref jobWrapper),
0, jobWrapper.appendCount);
#endif
for (int i = 0; i != jobWrapper.appendCount; i++)
{
if (jobWrapper.JobData.Execute(i))
{
outputPtr[outputIndex] = i;
outputIndex++;
}
}
jobWrapper.outputIndices.ResizeUninitialized(outputIndex);
}
public static unsafe void ExecuteFilter(ref JobWrapper jobWrapper, System.IntPtr bufferRangePatchData)
{
int* outputPtr = (int*)jobWrapper.outputIndices.GetUnsafePtr();
int inputLength = jobWrapper.outputIndices.Length;
int outputCount = 0;
for (int i = 0; i != inputLength; i++)
{
int inputIndex = outputPtr[i];
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.PatchBufferMinMaxRanges(bufferRangePatchData, UnsafeUtility.AddressOf(ref jobWrapper), inputIndex, 1);
#endif
if (jobWrapper.JobData.Execute(inputIndex))
{
outputPtr[outputCount] = inputIndex;
outputCount++;
}
}
jobWrapper.outputIndices.ResizeUninitialized(outputCount);
}
}
/// <summary>
/// Gathers and caches reflection data for the internal job system's managed bindings. Unity is responsible for calling this method - don't call it yourself.
/// </summary>
/// <typeparam name="T">Job type</typeparam>
/// <remarks>
/// When the Collections package is included in the project, Unity generates code to call EarlyJobInit at startup. This allows Burst compiled code to schedule jobs because the reflection part of initialization, which is not compatible with burst compiler constraints, has already happened in EarlyJobInit.
///
/// __Note__: While the Jobs package code generator handles this automatically for all closed job types, you must register those with generic arguments (like IJobFilter&amp;lt;MyJobType&amp;lt;T&amp;gt;&amp;gt;) manually for each specialization with [[Unity.Jobs.RegisterGenericJobTypeAttribute]].
/// </remarks>
public static void EarlyJobInit<T>()
where T : struct, IJobFilter
{
JobFilterProducer<T>.Initialize();
}
static IntPtr GetReflectionData<T>()
where T : struct, IJobFilter
{
JobFilterProducer<T>.Initialize();
var reflectionData = JobFilterProducer<T>.jobReflectionData.Data;
CollectionHelper.CheckReflectionDataCorrect<T>(reflectionData);
return reflectionData;
}
/// <summary>
/// Schedules a job that will execute the filter job for all integers in indices from index 0 until arrayLength. Each integer which passes the filter (i.e. true is returned from Execute()) will be appended to the indices list.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="indices">List of indices to be filtered. Filtered results will be appended to this list.</param>
/// <param name="arrayLength">Number of indices to filter starting from index 0.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
public static unsafe JobHandle ScheduleAppend<T>(this T jobData, NativeList<int> indices, int arrayLength, JobHandle dependsOn = new JobHandle())
where T : struct, IJobFilter
{
return jobData.ScheduleAppendByRef(indices, arrayLength, dependsOn);
}
/// <summary>
/// Schedules a job that will execute the filter job for all integers in indices from index 0 until arrayLength. Each integer which passes the filter (i.e. true is returned from Execute()) will be used to repopulate the indices list.
/// This has the effect of excluding all integer values that do not pass the filter.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="indices">List of indices to be filtered. Filtered results will be stored in this list.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
public static unsafe JobHandle ScheduleFilter<T>(this T jobData, NativeList<int> indices, JobHandle dependsOn = new JobHandle())
where T : struct, IJobFilter
{
return jobData.ScheduleFilterByRef(indices, dependsOn);
}
/// <summary>
/// Executes the appending filter job, on the main thread. See IJobFilterExtensions.ScheduleAppend for more information on how appending is performed.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="indices">List of indices to be filtered and appended to.</param>
/// <param name="arrayLength">Length of array the filter job will append to.</param>
/// <typeparam name="T">Job type</typeparam>
public static unsafe void RunAppend<T>(this T jobData, NativeList<int> indices, int arrayLength)
where T : struct, IJobFilter
{
jobData.RunAppendByRef(indices, arrayLength);
}
/// <summary>
/// Executes the filter job, on the main thread. See IJobFilterExtensions.Schedule for more information on how appending is performed.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="indices">List of indices to be filtered. Filtered results will be stored in this list.</param>
/// <typeparam name="T">Job type</typeparam>
public static unsafe void RunFilter<T>(this T jobData, NativeList<int> indices)
where T : struct, IJobFilter
{
jobData.RunFilterByRef(indices);
}
/// <summary>
/// Schedules a job that will execute the filter job for all integers in indices from index 0 until arrayLength. Each integer which passes the filter (i.e. true is returned from Execute()) will be appended to the indices list.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="indices">List of indices to be filtered. Filtered results will be appended to this list.</param>
/// <param name="arrayLength">Number of indices to filter starting from index 0.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
public static unsafe JobHandle ScheduleAppendByRef<T>(ref this T jobData, NativeList<int> indices, int arrayLength, JobHandle dependsOn = new JobHandle())
where T : struct, IJobFilter
{
JobFilterProducer<T>.JobWrapper jobWrapper = new JobFilterProducer<T>.JobWrapper()
{
JobData = jobData,
outputIndices = indices,
appendCount = arrayLength
};
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobWrapper), GetReflectionData<T>(), dependsOn, ScheduleMode.Single);
return JobsUtility.Schedule(ref scheduleParams);
}
/// <summary>
/// Schedules a job that will execute the filter job for all integers in indices from index 0 until arrayLength. Each integer which passes the filter (i.e. true is returned from Execute()) will be used to repopulate the indices list.
/// This has the effect of excluding all integer values that do not pass the filter.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="indices">List of indices to be filtered. Filtered results will be stored in this list.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
public static unsafe JobHandle ScheduleFilterByRef<T>(ref this T jobData, NativeList<int> indices, JobHandle dependsOn = new JobHandle())
where T : struct, IJobFilter
{
JobFilterProducer<T>.JobWrapper jobWrapper = new JobFilterProducer<T>.JobWrapper()
{
JobData = jobData,
outputIndices = indices,
appendCount = -1
};
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobWrapper), GetReflectionData<T>(), dependsOn, ScheduleMode.Single);
return JobsUtility.Schedule(ref scheduleParams);
}
/// <summary>
/// Executes the appending filter job, on the main thread. See IJobFilterExtensions.ScheduleAppend for more information on how appending is performed.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="indices">List of indices to be filtered. Filtered results will be appended to this list.</param>
/// <param name="arrayLength">Number of indices to filter starting from index 0.</param>
/// <typeparam name="T">Job type</typeparam>
public static unsafe void RunAppendByRef<T>(ref this T jobData, NativeList<int> indices, int arrayLength)
where T : struct, IJobFilter
{
JobFilterProducer<T>.JobWrapper jobWrapper = new JobFilterProducer<T>.JobWrapper()
{
JobData = jobData,
outputIndices = indices,
appendCount = arrayLength
};
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobWrapper), GetReflectionData<T>(), new JobHandle(), ScheduleMode.Run);
JobsUtility.Schedule(ref scheduleParams);
}
/// <summary>
/// Executes the filter job, on the main thread. See IJobFilterExtensions.Schedule for more information on how appending is performed.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="indices">List of indices to be filtered. Filtered results will be stored in this list.</param>
/// <typeparam name="T">Job type</typeparam>
public static unsafe void RunFilterByRef<T>(ref this T jobData, NativeList<int> indices)
where T : struct, IJobFilter
{
JobFilterProducer<T>.JobWrapper jobWrapper = new JobFilterProducer<T>.JobWrapper()
{
JobData = jobData,
outputIndices = indices,
appendCount = -1
};
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobWrapper), GetReflectionData<T>(), new JobHandle(), ScheduleMode.Run);
JobsUtility.Schedule(ref scheduleParams);
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: c3f9809ba4de01446ae82a0e4f3da278
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,252 @@
using System;
using Unity.Jobs.LowLevel.Unsafe;
using Unity.Collections.LowLevel.Unsafe;
using Unity.Collections;
using System.Diagnostics;
using Unity.Burst;
namespace Unity.Jobs
{
/// <summary>
/// Job type allowing for data to be operated on in parallel batches.
/// </summary>
/// <remarks>
/// When scheduling an IJobParallelForBatch job the number of elements to work on is specified along with a batch size. Jobs will then run in parallel
/// invoking Execute at a particular 'startIndex' of your working set and for a specified 'count' number of elements.
/// </remarks>
[JobProducerType(typeof(IJobParallelForBatchExtensions.JobParallelForBatchProducer<>))]
public interface IJobParallelForBatch
{
/// <summary>
/// Function operation on a "batch" of data contained within the job.
/// </summary>
/// <param name="startIndex">Starting index of job data to safely access.</param>
/// <param name="count">Number of elements to operate on in the batch.</param>
void Execute(int startIndex, int count);
}
/// <summary>
/// Extension class for the IJobParallelForBatch job type providing custom overloads for scheduling and running.
/// </summary>
public static class IJobParallelForBatchExtensions
{
internal struct JobParallelForBatchProducer<T> where T : struct, IJobParallelForBatch
{
internal static readonly SharedStatic<IntPtr> jobReflectionData = SharedStatic<IntPtr>.GetOrCreate<JobParallelForBatchProducer<T>>();
[BurstDiscard]
internal static void Initialize()
{
if (jobReflectionData.Data == IntPtr.Zero)
jobReflectionData.Data = JobsUtility.CreateJobReflectionData(typeof(T), (ExecuteJobFunction)Execute);
}
internal delegate void ExecuteJobFunction(ref T jobData, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex);
public unsafe static void Execute(ref T jobData, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex)
{
while (true)
{
if (!JobsUtility.GetWorkStealingRange(
ref ranges,
jobIndex, out int begin, out int end))
return;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.PatchBufferMinMaxRanges(bufferRangePatchData, UnsafeUtility.AddressOf(ref jobData), begin, end - begin);
#endif
jobData.Execute(begin, end - begin);
}
}
}
/// <summary>
/// Gathers and caches reflection data for the internal job system's managed bindings. Unity is responsible for calling this method - don't call it yourself.
/// </summary>
/// <typeparam name="T"></typeparam>
/// <remarks>
/// When the Jobs package is included in the project, Unity generates code to call EarlyJobInit at startup. This allows Burst compiled code to schedule jobs because the reflection part of initialization, which is not compatible with burst compiler constraints, has already happened in EarlyJobInit.
///
/// __Note__: While the Jobs package code generator handles this automatically for all closed job types, you must register those with generic arguments (like IJobParallelForBatch&amp;lt;MyJobType&amp;lt;T&amp;gt;&amp;gt;) manually for each specialization with [[Unity.Jobs.RegisterGenericJobTypeAttribute]].
/// </remarks>
public static void EarlyJobInit<T>()
where T : struct, IJobParallelForBatch
{
JobParallelForBatchProducer<T>.Initialize();
}
static IntPtr GetReflectionData<T>()
where T : struct, IJobParallelForBatch
{
JobParallelForBatchProducer<T>.Initialize();
var reflectionData = JobParallelForBatchProducer<T>.jobReflectionData.Data;
CollectionHelper.CheckReflectionDataCorrect<T>(reflectionData);
return reflectionData;
}
/// <summary>
/// Schedules a job that will execute the parallel batch job for all `arrayLength` elements in batches of `indicesPerJobCount`.
/// The Execute() method for Job T will be provided the start index and number of elements to safely operate on.
/// In cases where `indicesPerJobCount` is not a multiple of `arrayLength`, the `count` provided to the Execute method of Job T will be smaller than the `indicesPerJobCount` specified here.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="arrayLength">Total number of elements to consider when batching.</param>
/// <param name="indicesPerJobCount">Number of elements to consider in a single parallel batch.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
public static unsafe JobHandle Schedule<T>(this T jobData, int arrayLength, int indicesPerJobCount,
JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch
{
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), GetReflectionData<T>(), dependsOn, ScheduleMode.Single);
return JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, indicesPerJobCount);
}
/// <summary>
/// Schedules a job that will execute the parallel batch job for all `arrayLength` elements in batches of `indicesPerJobCount`.
/// The Execute() method for Job T will be provided the start index and number of elements to safely operate on.
/// In cases where `indicesPerJobCount` is not a multiple of `arrayLength`, the `count` provided to the Execute method of Job T will be smaller than the `indicesPerJobCount` specified here.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="arrayLength">Total number of elements to consider when batching.</param>
/// <param name="indicesPerJobCount">Number of elements to consider in a single parallel batch.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
public static unsafe JobHandle ScheduleByRef<T>(this ref T jobData, int arrayLength, int indicesPerJobCount,
JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch
{
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), GetReflectionData<T>(), dependsOn, ScheduleMode.Single);
return JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, indicesPerJobCount);
}
/// <summary>
/// Schedules a job that will execute the parallel batch job for all `arrayLength` elements in batches of `indicesPerJobCount`.
/// The Execute() method for Job T will be provided the start index and number of elements to safely operate on.
/// In cases where `indicesPerJobCount` is not a multiple of `arrayLength`, the `count` provided to the Execute method of Job T will be smaller than the `indicesPerJobCount` specified here.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="arrayLength">Total number of elements to consider when batching.</param>
/// <param name="indicesPerJobCount">Number of elements to consider in a single parallel batch.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
public static unsafe JobHandle ScheduleParallel<T>(this T jobData, int arrayLength, int indicesPerJobCount,
JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch
{
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), GetReflectionData<T>(), dependsOn, ScheduleMode.Parallel);
return JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, indicesPerJobCount);
}
/// <summary>
/// Schedules a job that will execute the parallel batch job for all `arrayLength` elements in batches of `indicesPerJobCount`.
/// The Execute() method for Job T will be provided the start index and number of elements to safely operate on.
/// In cases where `indicesPerJobCount` is not a multiple of `arrayLength`, the `count` provided to the Execute method of Job T will be smaller than the `indicesPerJobCount` specified here.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="arrayLength">Total number of elements to consider when batching.</param>
/// <param name="indicesPerJobCount">Number of elements to consider in a single parallel batch.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
public static unsafe JobHandle ScheduleParallelByRef<T>(this ref T jobData, int arrayLength, int indicesPerJobCount,
JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch
{
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), GetReflectionData<T>(), dependsOn, ScheduleMode.Parallel);
return JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, indicesPerJobCount);
}
/// <summary>
/// Schedules a job that will execute the parallel batch job for all `arrayLength` elements in batches of `indicesPerJobCount`.
/// The Execute() method for Job T will be provided the start index and number of elements to safely operate on.
/// In cases where `indicesPerJobCount` is not a multiple of `arrayLength`, the `count` provided to the Execute method of Job T will be smaller than the `indicesPerJobCount` specified here.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="arrayLength">Total number of elements to consider when batching.</param>
/// <param name="indicesPerJobCount">Number of elements to consider in a single parallel batch.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
public static unsafe JobHandle ScheduleBatch<T>(this T jobData, int arrayLength, int indicesPerJobCount,
JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch
{
return ScheduleParallel(jobData, arrayLength, indicesPerJobCount, dependsOn);
}
/// <summary>
/// Schedules a job that will execute the parallel batch job for all `arrayLength` elements in batches of `indicesPerJobCount`.
/// The Execute() method for Job T will be provided the start index and number of elements to safely operate on.
/// In cases where `indicesPerJobCount` is not a multiple of `arrayLength`, the `count` provided to the Execute method of Job T will be smaller than the `indicesPerJobCount` specified here.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="arrayLength">Total number of elements to consider when batching.</param>
/// <param name="indicesPerJobCount">Number of elements to consider in a single parallel batch.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
public static unsafe JobHandle ScheduleBatchByRef<T>(this ref T jobData, int arrayLength, int indicesPerJobCount,
JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch
{
return ScheduleParallelByRef(ref jobData, arrayLength, indicesPerJobCount, dependsOn);
}
/// <summary>
/// Executes the parallel batch job but on the main thread. See IJobParallelForBatchExtensions.Schedule for more information on how appending is performed.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="arrayLength">Total number of elements to consider when batching.</param>
/// <param name="indicesPerJobCount">Number of elements to consider in a single parallel batch. This argument is ignored when using .Run()</param>
/// <typeparam name="T">Job type</typeparam>
/// <remarks>
/// Unlike Schedule, since the job is running on the main thread no parallelization occurs and thus no `indicesPerJobCount` batch size is required to be specified.
/// </remarks>
public static unsafe void Run<T>(this T jobData, int arrayLength, int indicesPerJobCount) where T : struct, IJobParallelForBatch
{
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), GetReflectionData<T>(), new JobHandle(), ScheduleMode.Run);
JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, arrayLength);
}
/// <summary>
/// Executes the parallel batch job but on the main thread. See IJobParallelForBatchExtensions.Schedule for more information on how appending is performed.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="arrayLength">Total number of elements to consider when batching.</param>
/// <param name="indicesPerJobCount">Number of elements to consider in a single parallel batch. This argument is ignored when using .RunByRef()</param>
/// <typeparam name="T">Job type</typeparam>
public static unsafe void RunByRef<T>(this ref T jobData, int arrayLength, int indicesPerJobCount) where T : struct, IJobParallelForBatch
{
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), GetReflectionData<T>(), new JobHandle(), ScheduleMode.Run);
JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, arrayLength);
}
/// <summary>
/// Executes the parallel batch job but on the main thread. See IJobParallelForBatchExtensions.ScheduleBatch for more information on how appending is performed.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="arrayLength">Total number of elements to consider when batching.</param>
/// <typeparam name="T">Job type</typeparam>
/// <remarks>
/// Unlike ScheduleBatch, since the job is running on the main thread no parallelization occurs and thus no `indicesPerJobCount` batch size is required to be specified.
/// </remarks>
public static unsafe void RunBatch<T>(this T jobData, int arrayLength) where T : struct, IJobParallelForBatch
{
Run(jobData, arrayLength, arrayLength);
}
/// <summary>
/// Executes the parallel batch job but on the main thread. See IJobParallelForBatchExtensions.ScheduleBatch for more information on how appending is performed.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="arrayLength">Total number of elements to consider when batching.</param>
/// <typeparam name="T">Job type</typeparam>
public static unsafe void RunBatchByRef<T>(this ref T jobData, int arrayLength) where T : struct, IJobParallelForBatch
{
RunByRef(ref jobData, arrayLength, arrayLength);
}
}
}

View File

@@ -0,0 +1,13 @@
fileFormatVersion: 2
guid: 73fbfe0d21bc34441843483c0c2406f0
timeCreated: 1504270347
licenseType: Pro
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,214 @@
using System;
using Unity.Collections;
using Unity.Collections.LowLevel.Unsafe;
using Unity.Jobs.LowLevel.Unsafe;
using System.Diagnostics;
using Unity.Burst;
namespace Unity.Jobs
{
/// <summary>
/// Calculates the number of iterations to perform in a job that must execute before an IJobParallelForDefer job.
/// </summary>
/// <remarks>
/// A replacement for IJobParallelFor when the number of work items is not known at Schedule time.
///
/// When Scheduling the job's Execute(int index) method will be invoked on multiple worker threads in
/// parallel to each other.
///
/// Execute(int index) will be executed once for each index from 0 to the provided length. Each iteration
/// must be independent from other iterations and the safety system enforces this rule for you. The indices
/// have no guaranteed order and are executed on multiple cores in parallel.
///
/// Unity automatically splits the work into chunks of no less than the provided batchSize, and schedules
/// an appropriate number of jobs based on the number of worker threads, the length of the array and the batch size.
///
/// Choose a batch size sbased on the amount of work performed in the job. A simple job,
/// for example adding a couple of float3 to each other could have a batch size of 32 to 128. However,
/// if the work performed is very expensive then it's best to use a small batch size, such as a batch
/// size of 1. IJobParallelFor performs work stealing using atomic operations. Batch sizes can be
/// small but they aren't free.
///
/// The returned JobHandle can be used to ensure that the job has completed. Or it can be passed to other jobs as
/// a dependency, ensuring that the jobs are executed one after another on the worker threads.
/// </remarks>
[JobProducerType(typeof(IJobParallelForDeferExtensions.JobParallelForDeferProducer<>))]
public interface IJobParallelForDefer
{
/// <summary>
/// Implement this method to perform work against a specific iteration index.
/// </summary>
/// <param name="index">The index of the Parallel for loop at which to perform work.</param>
void Execute(int index);
}
/// <summary>
/// Extension class for the IJobParallelForDefer job type providing custom overloads for scheduling and running.
/// </summary>
public static class IJobParallelForDeferExtensions
{
internal struct JobParallelForDeferProducer<T> where T : struct, IJobParallelForDefer
{
internal static readonly SharedStatic<IntPtr> jobReflectionData = SharedStatic<IntPtr>.GetOrCreate<JobParallelForDeferProducer<T>>();
[BurstDiscard]
internal static void Initialize()
{
if (jobReflectionData.Data == IntPtr.Zero)
jobReflectionData.Data = JobsUtility.CreateJobReflectionData(typeof(T), (ExecuteJobFunction)Execute);
}
public delegate void ExecuteJobFunction(ref T jobData, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex);
public unsafe static void Execute(ref T jobData, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex)
{
while (true)
{
if (!JobsUtility.GetWorkStealingRange(ref ranges, jobIndex, out int begin, out int end))
break;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.PatchBufferMinMaxRanges(bufferRangePatchData, UnsafeUtility.AddressOf(ref jobData), begin, end - begin);
#endif
// Cache the end value to make it super obvious to the
// compiler that `end` will never change during the loops
// iteration.
var endThatCompilerCanSeeWillNeverChange = end;
for (var i = begin; i < endThatCompilerCanSeeWillNeverChange; ++i)
jobData.Execute(i);
}
}
}
/// <summary>
/// Gathers and caches reflection data for the internal job system's managed bindings. Unity is responsible for calling this method - don't call it yourself.
/// </summary>
/// <typeparam name="T"></typeparam>
/// <remarks>
/// When the Jobs package is included in the project, Unity generates code to call EarlyJobInit at startup. This allows Burst compiled code to schedule jobs because the reflection part of initialization, which is not compatible with burst compiler constraints, has already happened in EarlyJobInit.
///
/// __Note__: While the Jobs package code generator handles this automatically for all closed job types, you must register those with generic arguments (like IJobParallelForDefer&amp;lt;MyJobType&amp;lt;T&amp;gt;&amp;gt;) manually for each specialization with [[Unity.Jobs.RegisterGenericJobTypeAttribute]].
/// </remarks>
public static void EarlyJobInit<T>()
where T : struct, IJobParallelForDefer
{
JobParallelForDeferProducer<T>.Initialize();
}
/// <summary>
/// Schedule the job for execution on worker threads.
/// list.Length is used as the iteration count.
/// Note that it is required to embed the list on the job struct as well.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="list">list.Length is used as the iteration count.</param>
/// <param name="innerloopBatchCount">Granularity in which workstealing is performed. A value of 32, means the job queue will steal 32 iterations and then perform them in an efficient inner loop.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
/// <typeparam name="U">List element type</typeparam>
public static unsafe JobHandle Schedule<T, U>(this T jobData, NativeList<U> list, int innerloopBatchCount,
JobHandle dependsOn = new JobHandle())
where T : struct, IJobParallelForDefer
where U : unmanaged
{
void* atomicSafetyHandlePtr = null;
// Calculate the deferred atomic safety handle before constructing JobScheduleParameters so
// DOTS Runtime can validate the deferred list statically similar to the reflection based
// validation in Big Unity.
#if ENABLE_UNITY_COLLECTIONS_CHECKS
var safety = NativeListUnsafeUtility.GetAtomicSafetyHandle(ref list);
atomicSafetyHandlePtr = UnsafeUtility.AddressOf(ref safety);
#endif
return ScheduleInternal(ref jobData, innerloopBatchCount,
NativeListUnsafeUtility.GetInternalListDataPtrUnchecked(ref list),
atomicSafetyHandlePtr, dependsOn);
}
/// <summary>
/// Schedule the job for execution on worker threads.
/// list.Length is used as the iteration count.
/// Note that it is required to embed the list on the job struct as well.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="list">list.Length is used as the iteration count.</param>
/// <param name="innerloopBatchCount">Granularity in which workstealing is performed. A value of 32, means the job queue will steal 32 iterations and then perform them in an efficient inner loop.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
/// <typeparam name="U">List element type</typeparam>
public static unsafe JobHandle ScheduleByRef<T, U>(this ref T jobData, NativeList<U> list, int innerloopBatchCount,
JobHandle dependsOn = new JobHandle())
where T : struct, IJobParallelForDefer
where U : unmanaged
{
void* atomicSafetyHandlePtr = null;
// Calculate the deferred atomic safety handle before constructing JobScheduleParameters so
// DOTS Runtime can validate the deferred list statically similar to the reflection based
// validation in Big Unity.
#if ENABLE_UNITY_COLLECTIONS_CHECKS
var safety = NativeListUnsafeUtility.GetAtomicSafetyHandle(ref list);
atomicSafetyHandlePtr = UnsafeUtility.AddressOf(ref safety);
#endif
return ScheduleInternal(ref jobData, innerloopBatchCount,
NativeListUnsafeUtility.GetInternalListDataPtrUnchecked(ref list),
atomicSafetyHandlePtr, dependsOn);
}
/// <summary>
/// Schedule the job for execution on worker threads.
/// forEachCount is a pointer to the number of iterations, when dependsOn has completed.
/// This API is unsafe, it is recommended to use the NativeList based Schedule method instead.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="forEachCount">*forEachCount is used as the iteration count.</param>
/// <param name="innerloopBatchCount">Granularity in which workstealing is performed. A value of 32, means the job queue will steal 32 iterations and then perform them in an efficient inner loop.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T">Job type</typeparam>
/// <returns></returns>
public static unsafe JobHandle Schedule<T>(this T jobData, int* forEachCount, int innerloopBatchCount,
JobHandle dependsOn = new JobHandle())
where T : struct, IJobParallelForDefer
{
var forEachListPtr = (byte*)forEachCount - sizeof(void*);
return ScheduleInternal(ref jobData, innerloopBatchCount, forEachListPtr, null, dependsOn);
}
/// <summary>
/// Schedule the job for execution on worker threads.
/// forEachCount is a pointer to the number of iterations, when dependsOn has completed.
/// This API is unsafe, it is recommended to use the NativeList based Schedule method instead.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="forEachCount">*forEachCount is used as the iteration count.</param>
/// <param name="innerloopBatchCount">Granularity in which workstealing is performed. A value of 32, means the job queue will steal 32 iterations and then perform them in an efficient inner loop.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T"></typeparam>
/// <returns></returns>
public static unsafe JobHandle ScheduleByRef<T>(this ref T jobData, int* forEachCount, int innerloopBatchCount,
JobHandle dependsOn = new JobHandle())
where T : struct, IJobParallelForDefer
{
var forEachListPtr = (byte*)forEachCount - sizeof(void*);
return ScheduleInternal(ref jobData, innerloopBatchCount, forEachListPtr, null, dependsOn);
}
private static unsafe JobHandle ScheduleInternal<T>(ref T jobData,
int innerloopBatchCount,
void* forEachListPtr,
void *atomicSafetyHandlePtr,
JobHandle dependsOn) where T : struct, IJobParallelForDefer
{
JobParallelForDeferProducer<T>.Initialize();
var reflectionData = JobParallelForDeferProducer<T>.jobReflectionData.Data;
CollectionHelper.CheckReflectionDataCorrect<T>(reflectionData);
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), reflectionData, dependsOn, ScheduleMode.Parallel);
return JobsUtility.ScheduleParallelForDeferArraySize(ref scheduleParams, innerloopBatchCount, forEachListPtr, atomicSafetyHandlePtr);
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: ad8b28c0bd73eb94cbbaffc184e68af1
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,34 @@
using System;
using UnityEngine.Scripting.APIUpdating;
namespace Unity.Jobs
{
/// <summary>
/// When added as an assembly-level attribute, allows creating job reflection data for instances of generic jobs.
/// </summary>
/// <remarks>
/// This attribute allows specific instances of generic jobs to be registered for reflection data generation.
/// </remarks>
[MovedFrom(true, "Unity.Entities", "Unity.Entities")]
[AttributeUsage(AttributeTargets.Assembly, AllowMultiple = true)]
public class RegisterGenericJobTypeAttribute : Attribute
{
/// <summary>
/// Fully closed generic job type to register with the job system
/// </summary>
public Type ConcreteType;
/// <summary>
/// Registers a fully closed generic job type with the job system
/// </summary>
/// <param name="type"></param>
public RegisterGenericJobTypeAttribute(Type type)
{
ConcreteType = type;
}
}
[AttributeUsage(AttributeTargets.Class)]
internal class DOTSCompilerGeneratedAttribute : Attribute
{}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: dad573b7a5d91e84bab5aada3f2f0530
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant: