So right now I can run multiple predictions on a single GPU, fully utilizing its memory as such:
mp.set_start_method('spawn', force = True)
if __name__ == '__main__':
files = []
model = init_model()
procs = []
for raw_data_file in glob.glob('data/*.txt'):
files.append(raw_data_file)
pool = mp.Pool(8)
for fi in files:
print (fi)
pool.apply_async(generator, args = (fi, model))
pool.close()
pool.join()
How can I use the rest of my GPUs?