starting to collapse cross platform duplication
parent
24857007bc
commit
6908775450
|
@ -545,8 +545,8 @@ internal void
|
||||||
standard_build(char *cdir, u32 flags, u32 arch){
|
standard_build(char *cdir, u32 flags, u32 arch){
|
||||||
fsm_generator(cdir);
|
fsm_generator(cdir);
|
||||||
metagen(cdir);
|
metagen(cdir);
|
||||||
do_buildsuper(cdir, fm_str(custom_files[Custom_Default]), arch);
|
//do_buildsuper(cdir, fm_str(custom_files[Custom_Default]), arch);
|
||||||
//do_buildsuper(cdir, fm_str(custom_files[Custom_Experiments]), arch);
|
do_buildsuper(cdir, fm_str(custom_files[Custom_Experiments]), arch);
|
||||||
//do_buildsuper(cdir, fm_str(custom_files[Custom_Casey]), arch);
|
//do_buildsuper(cdir, fm_str(custom_files[Custom_Casey]), arch);
|
||||||
//do_buildsuper(cdir, fm_str(custom_files[Custom_ChronalVim]), arch);
|
//do_buildsuper(cdir, fm_str(custom_files[Custom_ChronalVim]), arch);
|
||||||
build_main(cdir, true, flags, arch);
|
build_main(cdir, true, flags, arch);
|
||||||
|
|
|
@ -806,7 +806,6 @@ system_set_font(System_Functions *system, Partition *part, Render_Font *font, ch
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// BOTTOM
|
// BOTTOM
|
||||||
|
|
|
@ -27,11 +27,6 @@ global File_Data null_file_data = {0};
|
||||||
#define Sys_File_Can_Be_Made_Sig(name) b32 name(u8 *filename)
|
#define Sys_File_Can_Be_Made_Sig(name) b32 name(u8 *filename)
|
||||||
internal Sys_File_Can_Be_Made_Sig(system_file_can_be_made);
|
internal Sys_File_Can_Be_Made_Sig(system_file_can_be_made);
|
||||||
|
|
||||||
#if 0
|
|
||||||
#define Sys_Get_Binary_Path_Sig(name) i32 name(String *out)
|
|
||||||
internal Sys_Get_Binary_Path_Sig(system_get_binary_path);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct Shared_Vars{
|
struct Shared_Vars{
|
||||||
File_Track_System track;
|
File_Track_System track;
|
||||||
void *track_table;
|
void *track_table;
|
||||||
|
@ -40,7 +35,6 @@ struct Shared_Vars{
|
||||||
|
|
||||||
Partition scratch;
|
Partition scratch;
|
||||||
};
|
};
|
||||||
|
|
||||||
global Shared_Vars shared_vars;
|
global Shared_Vars shared_vars;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -0,0 +1,158 @@
|
||||||
|
/*
|
||||||
|
* Mr. 4th Dimention - Allen Webster
|
||||||
|
*
|
||||||
|
* 18.07.2017
|
||||||
|
*
|
||||||
|
* Cross platform logic for work queues.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// TOP
|
||||||
|
|
||||||
|
internal void
|
||||||
|
job_proc(System_Functions *system, void *lpParameter){
|
||||||
|
Thread_Context *thread = (Thread_Context*)lpParameter;
|
||||||
|
Work_Queue *queue = win32vars.queues + thread->group_id;
|
||||||
|
Thread_Group *group = win32vars.groups + thread->group_id;
|
||||||
|
|
||||||
|
i32 thread_index = thread->id - 1;
|
||||||
|
|
||||||
|
i32 cancel_lock = group->cancel_lock0 + thread_index;
|
||||||
|
i32 cancel_cv = group->cancel_cv0 + thread_index;
|
||||||
|
|
||||||
|
Thread_Memory *thread_memory = win32vars.thread_memory + thread_index;
|
||||||
|
|
||||||
|
if (thread_memory->size == 0){
|
||||||
|
i32 new_size = KB(64);
|
||||||
|
thread_memory->data = system_memory_allocate(new_size);
|
||||||
|
thread_memory->size = new_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (;;){
|
||||||
|
u32 read_index = queue->read_position;
|
||||||
|
u32 write_index = queue->write_position;
|
||||||
|
|
||||||
|
if (read_index != write_index){
|
||||||
|
u32 next_read_index = (read_index + 1) % QUEUE_WRAP;
|
||||||
|
u32 safe_read_index = InterlockedCompareExchange(&queue->read_position, next_read_index, read_index);
|
||||||
|
|
||||||
|
if (safe_read_index == read_index){
|
||||||
|
Full_Job_Data *full_job = queue->jobs + safe_read_index;
|
||||||
|
// NOTE(allen): This is interlocked so that it plays nice
|
||||||
|
// with the cancel job routine, which may try to cancel this job
|
||||||
|
// at the same time that we try to run it
|
||||||
|
|
||||||
|
i32 safe_running_thread =InterlockedCompareExchange(&full_job->running_thread, thread->id, THREAD_NOT_ASSIGNED);
|
||||||
|
|
||||||
|
if (safe_running_thread == THREAD_NOT_ASSIGNED){
|
||||||
|
thread->job_id = full_job->id;
|
||||||
|
thread->running = true;
|
||||||
|
full_job->job.callback(system, thread, thread_memory, full_job->job.data);
|
||||||
|
system_schedule_step();
|
||||||
|
thread->running = false;
|
||||||
|
|
||||||
|
system_acquire_lock(cancel_lock);
|
||||||
|
if (thread->cancel){
|
||||||
|
thread->cancel = 0;
|
||||||
|
system_signal_cv(cancel_lock, cancel_cv);
|
||||||
|
}
|
||||||
|
system_release_lock(cancel_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
system_wait_on(queue->semaphore);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
internal void
|
||||||
|
initialize_unbounded_queue(Unbounded_Work_Queue *source_queue){
|
||||||
|
i32 max = 512;
|
||||||
|
source_queue->jobs = (Full_Job_Data*)system_memory_allocate(max*sizeof(Full_Job_Data));
|
||||||
|
source_queue->count = 0;
|
||||||
|
source_queue->max = max;
|
||||||
|
source_queue->skip = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline i32
|
||||||
|
get_work_queue_available_space(i32 write, i32 read){
|
||||||
|
// NOTE(allen): The only time that queue->write_position == queue->read_position
|
||||||
|
// is allowed is when the queue is empty. Thus if (write_position+1 == read_position)
|
||||||
|
// the available space is zero. So these computations both end up leaving one slot unused.
|
||||||
|
|
||||||
|
// TODO(allen): The only way I can think to easily eliminate this is to have read and write wrap
|
||||||
|
// at twice the size of the underlying array but modulo their values into the array then if write
|
||||||
|
// has caught up with read it still will not be equal... but lots of modulos... ehh.
|
||||||
|
|
||||||
|
i32 available_space = 0;
|
||||||
|
if (write >= read){
|
||||||
|
available_space = QUEUE_WRAP - (write - read) - 1;
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
available_space = (read - write) - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return(available_space);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define UNBOUNDED_SKIP_MAX 128
|
||||||
|
|
||||||
|
internal i32
|
||||||
|
flush_unbounded_queue_to_main(Unbounded_Work_Queue *source_queue, Work_Queue *queue, i32 thread_count){
|
||||||
|
// NOTE(allen): It is understood that read_position may be changed by other
|
||||||
|
// threads but it will only make more space in the queue if it is changed.
|
||||||
|
// Meanwhile write_position should not ever be changed by anything but the
|
||||||
|
// main thread in this system, so it will not be interlocked.
|
||||||
|
u32 read_position = queue->read_position;
|
||||||
|
u32 write_position = queue->write_position;
|
||||||
|
u32 available_space = get_work_queue_available_space(write_position, read_position);
|
||||||
|
u32 available_jobs = source_queue->count - source_queue->skip;
|
||||||
|
|
||||||
|
u32 writable_count = Min(available_space, available_jobs);
|
||||||
|
|
||||||
|
if (writable_count > 0){
|
||||||
|
u32 count1 = writable_count;
|
||||||
|
|
||||||
|
if (count1+write_position > QUEUE_WRAP){
|
||||||
|
count1 = QUEUE_WRAP - write_position;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 count2 = writable_count - count1;
|
||||||
|
|
||||||
|
Full_Job_Data *job_src1 = source_queue->jobs + source_queue->skip;
|
||||||
|
Full_Job_Data *job_src2 = job_src1 + count1;
|
||||||
|
|
||||||
|
Full_Job_Data *job_dst1 = queue->jobs + write_position;
|
||||||
|
Full_Job_Data *job_dst2 = queue->jobs;
|
||||||
|
|
||||||
|
Assert((job_src1->id % QUEUE_WRAP) == write_position);
|
||||||
|
|
||||||
|
memcpy(job_dst1, job_src1, sizeof(Full_Job_Data)*count1);
|
||||||
|
memcpy(job_dst2, job_src2, sizeof(Full_Job_Data)*count2);
|
||||||
|
queue->write_position = (write_position + writable_count) % QUEUE_WRAP;
|
||||||
|
|
||||||
|
source_queue->skip += writable_count;
|
||||||
|
|
||||||
|
if (source_queue->skip == source_queue->count){
|
||||||
|
source_queue->skip = source_queue->count = 0;
|
||||||
|
}
|
||||||
|
else if (source_queue->skip > UNBOUNDED_SKIP_MAX){
|
||||||
|
u32 left_over = source_queue->count - source_queue->skip;
|
||||||
|
memmove(source_queue->jobs, source_queue->jobs + source_queue->skip,
|
||||||
|
sizeof(Full_Job_Data)*left_over);
|
||||||
|
source_queue->count = left_over;
|
||||||
|
source_queue->skip = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
i32 semaphore_release_count = writable_count;
|
||||||
|
if (semaphore_release_count > thread_count){
|
||||||
|
semaphore_release_count = thread_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
return(semaphore_release_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
// BOTTOM
|
||||||
|
|
|
@ -503,158 +503,30 @@ system_signal_cv(i32 lock_id, i32 cv_id){
|
||||||
pthread_cond_signal(linuxvars.conds + cv_id);
|
pthread_cond_signal(linuxvars.conds + cv_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
internal void*
|
// HACK(allen): Reduce this down to just one layer of call.
|
||||||
JobThreadProc(void* lpParameter){
|
internal void
|
||||||
Thread_Context *thread = (Thread_Context*)lpParameter;
|
system_schedule_step(){
|
||||||
Work_Queue *queue = linuxvars.queues + thread->group_id;
|
LinuxScheduleStep();
|
||||||
Thread_Group *group = linuxvars.groups + thread->group_id;
|
|
||||||
|
|
||||||
i32 thread_index = thread->id - 1;
|
|
||||||
|
|
||||||
i32 cancel_lock = group->cancel_lock0 + thread_index;
|
|
||||||
i32 cancel_cv = group->cancel_cv0 + thread_index;
|
|
||||||
|
|
||||||
Thread_Memory *thread_memory = linuxvars.thread_memory + thread_index;
|
|
||||||
|
|
||||||
if (thread_memory->size == 0){
|
|
||||||
i32 new_size = KB(64);
|
|
||||||
thread_memory->data = system_memory_allocate(new_size);
|
|
||||||
thread_memory->size = new_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (;;){
|
|
||||||
u32 read_index = queue->read_position;
|
|
||||||
u32 write_index = queue->write_position;
|
|
||||||
|
|
||||||
if (read_index != write_index){
|
|
||||||
// NOTE(allen): Previously I was wrapping by the job wrap then
|
|
||||||
// wrapping by the queue wrap. That was super stupid what was that?
|
|
||||||
// Now it just wraps by the queue wrap.
|
|
||||||
u32 next_read_index = (read_index + 1) % QUEUE_WRAP;
|
|
||||||
u32 safe_read_index =
|
|
||||||
InterlockedCompareExchange(&queue->read_position,
|
|
||||||
next_read_index, read_index);
|
|
||||||
|
|
||||||
if (safe_read_index == read_index){
|
|
||||||
Full_Job_Data *full_job = queue->jobs + safe_read_index;
|
|
||||||
// NOTE(allen): This is interlocked so that it plays nice
|
|
||||||
// with the cancel job routine, which may try to cancel this job
|
|
||||||
// at the same time that we try to run it
|
|
||||||
|
|
||||||
i32 safe_running_thread =
|
|
||||||
InterlockedCompareExchange(&full_job->running_thread,
|
|
||||||
thread->id, THREAD_NOT_ASSIGNED);
|
|
||||||
|
|
||||||
if (safe_running_thread == THREAD_NOT_ASSIGNED){
|
|
||||||
thread->job_id = full_job->id;
|
|
||||||
thread->running = 1;
|
|
||||||
|
|
||||||
full_job->job.callback(&linuxvars.system,
|
|
||||||
thread, thread_memory, full_job->job.data);
|
|
||||||
LinuxScheduleStep();
|
|
||||||
thread->running = 0;
|
|
||||||
|
|
||||||
system_acquire_lock(cancel_lock);
|
|
||||||
if (thread->cancel){
|
|
||||||
thread->cancel = 0;
|
|
||||||
system_signal_cv(cancel_lock, cancel_cv);
|
|
||||||
}
|
|
||||||
system_release_lock(cancel_lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
sem_wait(LinuxHandleToSem(queue->semaphore));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
internal void
|
internal void
|
||||||
initialize_unbounded_queue(Unbounded_Work_Queue *source_queue){
|
system_wait_on(Plat_Handle handle){
|
||||||
i32 max = 512;
|
sem_wait(LinuxHandleToSem(handle));
|
||||||
source_queue->jobs = (Full_Job_Data*)system_memory_allocate(max*sizeof(Full_Job_Data));
|
|
||||||
source_queue->count = 0;
|
|
||||||
source_queue->max = max;
|
|
||||||
source_queue->skip = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline i32
|
#include "4ed_work_queues.cpp"
|
||||||
get_work_queue_available_space(i32 write, i32 read){
|
|
||||||
// NOTE(allen): The only time that queue->write_position == queue->read_position
|
|
||||||
// is allowed is when the queue is empty. Thus if
|
|
||||||
// queue->write_position+1 == queue->read_position the available space is zero.
|
|
||||||
// So these computations both end up leaving one slot unused. The only way I can
|
|
||||||
// think to easily eliminate this is to have read and write wrap at twice the size
|
|
||||||
// of the underlying array but modulo their values into the array then if write
|
|
||||||
// has caught up with read it still will not be equal... but lots of modulos... ehh.
|
|
||||||
|
|
||||||
i32 available_space = 0;
|
internal void*
|
||||||
if (write >= read){
|
JobThreadProc(void* lpParameter){
|
||||||
available_space = QUEUE_WRAP - (write - read) - 1;
|
System_Functions *system = &win32vars.system;
|
||||||
}
|
job_proc(system, lpParameter);
|
||||||
else{
|
InvalidCodePath;
|
||||||
available_space = (read - write) - 1;
|
return(0);
|
||||||
}
|
|
||||||
|
|
||||||
return(available_space);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define UNBOUNDED_SKIP_MAX 128
|
|
||||||
|
|
||||||
internal void
|
internal void
|
||||||
flush_to_direct_queue(Unbounded_Work_Queue *source_queue, Work_Queue *queue, i32 thread_count){
|
flush_to_direct_queue(Unbounded_Work_Queue *source_queue, Work_Queue *queue, i32 thread_count){
|
||||||
// NOTE(allen): It is understood that read_position may be changed by other
|
i32 semaphore_release_count = flush_to_direct_queue(source_queue, queue, thread_count);
|
||||||
// threads but it will only make more space in the queue if it is changed.
|
|
||||||
// Meanwhile write_position should not ever be changed by anything but the
|
|
||||||
// main thread in this system, so it will not be interlocked.
|
|
||||||
u32 read_position = queue->read_position;
|
|
||||||
u32 write_position = queue->write_position;
|
|
||||||
u32 available_space = get_work_queue_available_space(write_position, read_position);
|
|
||||||
u32 available_jobs = source_queue->count - source_queue->skip;
|
|
||||||
|
|
||||||
u32 writable_count = Min(available_space, available_jobs);
|
|
||||||
|
|
||||||
if (writable_count > 0){
|
|
||||||
u32 count1 = writable_count;
|
|
||||||
|
|
||||||
if (count1+write_position > QUEUE_WRAP){
|
|
||||||
count1 = QUEUE_WRAP - write_position;
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 count2 = writable_count - count1;
|
|
||||||
|
|
||||||
Full_Job_Data *job_src1 = source_queue->jobs + source_queue->skip;
|
|
||||||
Full_Job_Data *job_src2 = job_src1 + count1;
|
|
||||||
|
|
||||||
Full_Job_Data *job_dst1 = queue->jobs + write_position;
|
|
||||||
Full_Job_Data *job_dst2 = queue->jobs;
|
|
||||||
|
|
||||||
Assert((job_src1->id % QUEUE_WRAP) == write_position);
|
|
||||||
|
|
||||||
memcpy(job_dst1, job_src1, sizeof(Full_Job_Data)*count1);
|
|
||||||
memcpy(job_dst2, job_src2, sizeof(Full_Job_Data)*count2);
|
|
||||||
queue->write_position = (write_position + writable_count) % QUEUE_WRAP;
|
|
||||||
|
|
||||||
source_queue->skip += writable_count;
|
|
||||||
|
|
||||||
if (source_queue->skip == source_queue->count){
|
|
||||||
source_queue->skip = source_queue->count = 0;
|
|
||||||
}
|
|
||||||
else if (source_queue->skip > UNBOUNDED_SKIP_MAX){
|
|
||||||
u32 left_over = source_queue->count - source_queue->skip;
|
|
||||||
memmove(source_queue->jobs, source_queue->jobs + source_queue->skip,
|
|
||||||
sizeof(Full_Job_Data)*left_over);
|
|
||||||
source_queue->count = left_over;
|
|
||||||
source_queue->skip = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
i32 semaphore_release_count = writable_count;
|
|
||||||
if (semaphore_release_count > thread_count){
|
|
||||||
semaphore_release_count = thread_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE(allen): platform dependent portion...
|
|
||||||
for (i32 i = 0; i < semaphore_release_count; ++i){
|
for (i32 i = 0; i < semaphore_release_count; ++i){
|
||||||
sem_post(LinuxHandleToSem(queue->semaphore));
|
sem_post(LinuxHandleToSem(queue->semaphore));
|
||||||
}
|
}
|
||||||
|
|
|
@ -357,156 +357,29 @@ system_signal_cv(i32 crit_id, i32 cv_id){
|
||||||
WakeConditionVariable(win32vars.condition_vars + cv_id);
|
WakeConditionVariable(win32vars.condition_vars + cv_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
internal DWORD CALL_CONVENTION
|
internal void
|
||||||
JobThreadProc(LPVOID lpParameter){
|
system_schedule_step(){
|
||||||
Thread_Context *thread = (Thread_Context*)lpParameter;
|
PostMessage(win32vars.window_handle, WM_4coder_ANIMATE, 0, 0);
|
||||||
Work_Queue *queue = win32vars.queues + thread->group_id;
|
|
||||||
Thread_Group *group = win32vars.groups + thread->group_id;
|
|
||||||
|
|
||||||
i32 thread_index = thread->id - 1;
|
|
||||||
|
|
||||||
i32 cancel_lock = group->cancel_lock0 + thread_index;
|
|
||||||
i32 cancel_cv = group->cancel_cv0 + thread_index;
|
|
||||||
|
|
||||||
Thread_Memory *thread_memory = win32vars.thread_memory + thread_index;
|
|
||||||
|
|
||||||
if (thread_memory->size == 0){
|
|
||||||
i32 new_size = KB(64);
|
|
||||||
thread_memory->data = system_memory_allocate(new_size);
|
|
||||||
thread_memory->size = new_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (;;){
|
|
||||||
u32 read_index = queue->read_position;
|
|
||||||
u32 write_index = queue->write_position;
|
|
||||||
|
|
||||||
if (read_index != write_index){
|
|
||||||
// NOTE(allen): Previously I was wrapping by the job wrap then
|
|
||||||
// wrapping by the queue wrap. That was super stupid what was that?
|
|
||||||
// Now it just wraps by the queue wrap.
|
|
||||||
u32 next_read_index = (read_index + 1) % QUEUE_WRAP;
|
|
||||||
u32 safe_read_index = InterlockedCompareExchange(&queue->read_position, next_read_index, read_index);
|
|
||||||
|
|
||||||
if (safe_read_index == read_index){
|
|
||||||
Full_Job_Data *full_job = queue->jobs + safe_read_index;
|
|
||||||
// NOTE(allen): This is interlocked so that it plays nice
|
|
||||||
// with the cancel job routine, which may try to cancel this job
|
|
||||||
// at the same time that we try to run it
|
|
||||||
|
|
||||||
i32 safe_running_thread =InterlockedCompareExchange(&full_job->running_thread, thread->id, THREAD_NOT_ASSIGNED);
|
|
||||||
|
|
||||||
if (safe_running_thread == THREAD_NOT_ASSIGNED){
|
|
||||||
thread->job_id = full_job->id;
|
|
||||||
thread->running = 1;
|
|
||||||
|
|
||||||
full_job->job.callback(&win32vars.system, thread, thread_memory, full_job->job.data);
|
|
||||||
PostMessage(win32vars.window_handle, WM_4coder_ANIMATE, 0, 0);
|
|
||||||
//full_job->running_thread = 0;
|
|
||||||
thread->running = 0;
|
|
||||||
|
|
||||||
system_acquire_lock(cancel_lock);
|
|
||||||
if (thread->cancel){
|
|
||||||
thread->cancel = 0;
|
|
||||||
system_signal_cv(cancel_lock, cancel_cv);
|
|
||||||
}
|
|
||||||
system_release_lock(cancel_lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
WaitForSingleObject(Win32Handle(queue->semaphore), INFINITE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
internal void
|
internal void
|
||||||
initialize_unbounded_queue(Unbounded_Work_Queue *source_queue){
|
system_wait_on(Plat_Handle handle){
|
||||||
i32 max = 512;
|
WaitForSingleObject(Win32Handle(handle), INFINITE);
|
||||||
source_queue->jobs = (Full_Job_Data*)system_memory_allocate(max*sizeof(Full_Job_Data));
|
|
||||||
source_queue->count = 0;
|
|
||||||
source_queue->max = max;
|
|
||||||
source_queue->skip = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline i32
|
#include "4ed_work_queues.cpp"
|
||||||
get_work_queue_available_space(i32 write, i32 read){
|
|
||||||
// NOTE(allen): The only time that queue->write_position == queue->read_position
|
|
||||||
// is allowed is when the queue is empty. Thus if
|
|
||||||
// queue->write_position+1 == queue->read_position the available space is zero.
|
|
||||||
// So these computations both end up leaving one slot unused. The only way I can
|
|
||||||
// think to easily eliminate this is to have read and write wrap at twice the size
|
|
||||||
// of the underlying array but modulo their values into the array then if write
|
|
||||||
// has caught up with read it still will not be equal... but lots of modulos... ehh.
|
|
||||||
|
|
||||||
i32 available_space = 0;
|
internal DWORD CALL_CONVENTION
|
||||||
if (write >= read){
|
JobThreadProc(LPVOID lpParameter){
|
||||||
available_space = QUEUE_WRAP - (write - read) - 1;
|
System_Functions *system = &win32vars.system;
|
||||||
}
|
job_proc(system, lpParameter);
|
||||||
else{
|
InvalidCodePath;
|
||||||
available_space = (read - write) - 1;
|
return(0);
|
||||||
}
|
|
||||||
|
|
||||||
return(available_space);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define UNBOUNDED_SKIP_MAX 128
|
|
||||||
|
|
||||||
internal void
|
internal void
|
||||||
flush_to_direct_queue(Unbounded_Work_Queue *source_queue, Work_Queue *queue, i32 thread_count){
|
flush_to_direct_queue(Unbounded_Work_Queue *source_queue, Work_Queue *queue, i32 thread_count){
|
||||||
// NOTE(allen): It is understood that read_position may be changed by other
|
i32 semaphore_release_count = flush_unbounded_queue_to_main(source_queue, queue, thread_count);
|
||||||
// threads but it will only make more space in the queue if it is changed.
|
|
||||||
// Meanwhile write_position should not ever be changed by anything but the
|
|
||||||
// main thread in this system, so it will not be interlocked.
|
|
||||||
u32 read_position = queue->read_position;
|
|
||||||
u32 write_position = queue->write_position;
|
|
||||||
u32 available_space = get_work_queue_available_space(write_position, read_position);
|
|
||||||
u32 available_jobs = source_queue->count - source_queue->skip;
|
|
||||||
|
|
||||||
u32 writable_count = Min(available_space, available_jobs);
|
|
||||||
|
|
||||||
if (writable_count > 0){
|
|
||||||
u32 count1 = writable_count;
|
|
||||||
|
|
||||||
if (count1+write_position > QUEUE_WRAP){
|
|
||||||
count1 = QUEUE_WRAP - write_position;
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 count2 = writable_count - count1;
|
|
||||||
|
|
||||||
Full_Job_Data *job_src1 = source_queue->jobs + source_queue->skip;
|
|
||||||
Full_Job_Data *job_src2 = job_src1 + count1;
|
|
||||||
|
|
||||||
Full_Job_Data *job_dst1 = queue->jobs + write_position;
|
|
||||||
Full_Job_Data *job_dst2 = queue->jobs;
|
|
||||||
|
|
||||||
Assert((job_src1->id % QUEUE_WRAP) == write_position);
|
|
||||||
|
|
||||||
memcpy(job_dst1, job_src1, sizeof(Full_Job_Data)*count1);
|
|
||||||
memcpy(job_dst2, job_src2, sizeof(Full_Job_Data)*count2);
|
|
||||||
queue->write_position = (write_position + writable_count) % QUEUE_WRAP;
|
|
||||||
|
|
||||||
source_queue->skip += writable_count;
|
|
||||||
|
|
||||||
if (source_queue->skip == source_queue->count){
|
|
||||||
source_queue->skip = source_queue->count = 0;
|
|
||||||
}
|
|
||||||
else if (source_queue->skip > UNBOUNDED_SKIP_MAX){
|
|
||||||
u32 left_over = source_queue->count - source_queue->skip;
|
|
||||||
memmove(source_queue->jobs, source_queue->jobs + source_queue->skip,
|
|
||||||
sizeof(Full_Job_Data)*left_over);
|
|
||||||
source_queue->count = left_over;
|
|
||||||
source_queue->skip = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
i32 semaphore_release_count = writable_count;
|
|
||||||
if (semaphore_release_count > thread_count){
|
|
||||||
semaphore_release_count = thread_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE(allen): platform dependent portion...
|
|
||||||
// TODO(allen): pull out the duplicated part once I see
|
|
||||||
// that this is pretty much the same on linux.
|
|
||||||
for (i32 i = 0; i < semaphore_release_count; ++i){
|
for (i32 i = 0; i < semaphore_release_count; ++i){
|
||||||
ReleaseSemaphore(Win32Handle(queue->semaphore), 1, 0);
|
ReleaseSemaphore(Win32Handle(queue->semaphore), 1, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -335,7 +335,6 @@ get_change_event(File_Track_System *system, Partition *scratch, u8 *buffer, i32
|
||||||
i32 req_size = dir_len + (len + 1)*2;
|
i32 req_size = dir_len + (len + 1)*2;
|
||||||
*size = req_size;
|
*size = req_size;
|
||||||
|
|
||||||
// TODO(allen): This check isn't really right, it should rely on the result from GetFinalPathNameByHandle_utf8.
|
|
||||||
if (req_size < max){
|
if (req_size < max){
|
||||||
u32 path_pos = GetFinalPathNameByHandle_utf8(listener.dir, buffer, max, FILE_NAME_NORMALIZED);
|
u32 path_pos = GetFinalPathNameByHandle_utf8(listener.dir, buffer, max, FILE_NAME_NORMALIZED);
|
||||||
buffer[path_pos++] = '\\';
|
buffer[path_pos++] = '\\';
|
||||||
|
|
|
@ -189,7 +189,9 @@ get_timestamp_string_at_cursor(Application_Links *app, Buffer_Summary *buffer, i
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Miblo_Timestamp{
|
struct Miblo_Timestamp{
|
||||||
int32_t hour, minute, second;
|
int32_t second;
|
||||||
|
int32_t minute;
|
||||||
|
int32_t hour;
|
||||||
};
|
};
|
||||||
static Miblo_Timestamp null_miblo_timestamp = {0};
|
static Miblo_Timestamp null_miblo_timestamp = {0};
|
||||||
|
|
||||||
|
@ -203,38 +205,45 @@ static Miblo_Timestamp
|
||||||
increment_timestamp(Miblo_Timestamp t, int32_t type, int32_t amt){
|
increment_timestamp(Miblo_Timestamp t, int32_t type, int32_t amt){
|
||||||
Miblo_Timestamp r = t;
|
Miblo_Timestamp r = t;
|
||||||
switch (type){
|
switch (type){
|
||||||
case MIBLO_SECOND:
|
case MIBLO_SECOND: /* CASE second */
|
||||||
r.second += amt;
|
r.second += amt;
|
||||||
|
|
||||||
|
// 1. Modulo r.second into [0,59]
|
||||||
|
// 2. What is thrown away by (1) store in amt, divide by 60, round down even when negative.
|
||||||
amt = 0;
|
amt = 0;
|
||||||
|
if (r.second < 0){
|
||||||
// TODO(allen): someday do the math, instead of being lazy.
|
int32_t pos_second = -r.second;
|
||||||
while (r.second < 0){
|
amt = -((pos_second + 59)/60);
|
||||||
--amt;
|
r.second = 60 - (pos_second % 60);
|
||||||
r.second += 60;
|
}
|
||||||
|
else if (r.second >= 60){
|
||||||
|
amt = r.second/60;
|
||||||
|
r.second = (r.second % 60);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (r.second >= 60){
|
case MIBLO_MINUTE: /* CASE minute */
|
||||||
++amt;
|
|
||||||
r.second -= 60;
|
|
||||||
}
|
|
||||||
|
|
||||||
case MIBLO_MINUTE:
|
|
||||||
r.minute += amt;
|
r.minute += amt;
|
||||||
|
|
||||||
|
// 1. Modulo r.minute into [0,59]
|
||||||
|
// 2. What is thrown away by (1) store in amt, divide by 60, round down even when negative.
|
||||||
amt = 0;
|
amt = 0;
|
||||||
|
if (r.minute < 0){
|
||||||
// TODO(allen): someday do the math, instead of being lazy.
|
int32_t pos_minute = -r.minute;
|
||||||
while (r.minute < 0){
|
amt = -((pos_minute + 59)/60);
|
||||||
--amt;
|
r.minute = 60 - (pos_minute % 60);
|
||||||
r.minute += 60;
|
}
|
||||||
|
else if (r.minute >= 60){
|
||||||
|
amt = r.minute/60;
|
||||||
|
r.minute = (r.minute % 60);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (r.minute >= 60){
|
case MIBLO_HOUR: /* CASE hour */
|
||||||
++amt;
|
|
||||||
r.minute -= 60;
|
|
||||||
}
|
|
||||||
|
|
||||||
case MIBLO_HOUR:
|
|
||||||
r.hour += amt;
|
r.hour += amt;
|
||||||
|
if (r.hour < 0){
|
||||||
|
r.second = 0;
|
||||||
|
r.minute = 0;
|
||||||
|
r.hour = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return(r);
|
return(r);
|
||||||
|
|
|
@ -1857,7 +1857,6 @@ This call returns non-zero on success.) */{
|
||||||
return(result);
|
return(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(allen): Add hash-table extension to string sets.
|
|
||||||
CPP_NAME(string_set_match)
|
CPP_NAME(string_set_match)
|
||||||
API_EXPORT FSTRING_LINK b32_4tech
|
API_EXPORT FSTRING_LINK b32_4tech
|
||||||
string_set_match_table(void *str_set, i32_4tech item_size, i32_4tech count, String str, i32_4tech *match_index)/*
|
string_set_match_table(void *str_set, i32_4tech item_size, i32_4tech count, String str, i32_4tech *match_index)/*
|
||||||
|
|
Loading…
Reference in New Issue