Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
#!/usr/bin/env python
import os, sys, multiprocessing, argparse, re, glob
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--output-dir', metavar='DIR', required=True,
help='Store testgen shards in DIR')
parser.add_argument('-j', '--jobs', type=int,
default=multiprocessing.cpu_count(),
help='Number of shards (and resulting make jobs)')
parser.usage = parser.format_usage().split(':',1)[1].strip() + ' < testgen.c'
args = parser.parse_args()
shard_names = [os.path.join(args.output_dir, 'testgen.%d.c' % i)
for i in xrange(max(args.jobs - 1, 1))]
if set(glob.glob(os.path.join(args.output_dir, 'testgen.*.c'))) - \
set(shard_names):
parser.error('Output directory is dirty')
outarray = file(os.path.join(args.output_dir, 'testgen.c'), 'w')
outshards = [file(path, 'w') for path in shard_names]
testparts = [p + '\n\n' for p in sys.stdin.read().split('\n\n')]
# Emit common headers
while testparts:
else:
ef_map[line[0]].append(line[1])
for line in open(fmap2):
line = line.rstrip('\n').split('@@@')
if len(line) != 2:
continue
vocab_f.append(line[0])
if fe_map.get(line[1]) == None:
fe_map[line[1]] = [line[0]]
else:
fe_map[line[1]].append(line[0])
print "Loaded en_fr fr_en mappings."
#en:...
manager = Manager()
lock1 = Lock()
past_num = Value('i', 0, lock=True)
score = manager.list()#store hit @ k
rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)
cpu_count = multiprocessing.cpu_count()
t0 = time.time()
def test(model, vocab, index, src_lan, tgt_lan, map, score, past_num):
while index.value < len(vocab):
id = index.value
index.value += 1
word = vocab[id]
if id % 100 == 0:
A connection owned by a parent is unusable by a child if the parent
(the owning process) closes the connection.
"""
conn = Connection()
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def target(conn, ev):
ev.wait()
# the parent closed the connection. because it also created the
# connection, the connection is shutdown and the child
# cannot use it.
with pytest.raises(ConnectionError):
conn.send_command('ping')
ev = multiprocessing.Event()
proc = multiprocessing.Process(target=target, args=(conn, ev))
proc.start()
conn.disconnect()
ev.set()
proc.join(3)
assert proc.exitcode == 0
def teardown(runtimes, request_handler, hostname):
request_handler.set_credentials({"user": "user0", "password": "pass0"})
for runtime in runtimes:
request_handler.quit(runtime["RT"])
time.sleep(0.2)
for p in multiprocessing.active_children():
p.terminate()
# They will die eventually (about 5 seconds) in most cases, but this makes sure without wasting time
for i in range(len(runtimes)):
os.system("pkill -9 -f 'csruntime -n {} -p 500{}'" .format(hostname,i))
time.sleep(0.2)
# the port out of Flask once we call `run`.
original_socket_bind = socketserver.TCPServer.server_bind
def socket_bind_wrapper(self):
ret = original_socket_bind(self)
# Get the port and save it into the port_value, so the parent process
# can read it.
(_, port) = self.socket.getsockname()
port_value.value = port
socketserver.TCPServer.server_bind = original_socket_bind
return ret
socketserver.TCPServer.server_bind = socket_bind_wrapper
app.run(port=port, use_reloader=False)
self._process = multiprocessing.Process(
target=worker, args=(self.app, self._configured_port)
)
self._process.start()
# We must wait for the server to start listening, but give up
# after a specified maximum timeout
timeout = self.app.config.get('LIVESERVER_TIMEOUT', 5)
start_time = time.time()
while True:
elapsed_time = (time.time() - start_time)
if elapsed_time > timeout:
raise RuntimeError(
"Failed to start the server after %d seconds. " % timeout
)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('patterns', metavar='pattern', nargs='*',
help='test patterns.')
parser.add_argument('-j', '--num-processes',
type=int, default=multiprocessing.cpu_count(),
help='num processes.')
parser.add_argument('-e', '--exe', help='path to tester')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='show more info')
parser.add_argument('-g', '--generate', action='store_true',
help='generate test result markdown')
options = parser.parse_args(args)
pattern_re = common.MakePatternRE(options.patterns)
passed = 0
if not os.path.exists(TEST_RESULT_DIR):
os.makedirs(TEST_RESULT_DIR)
tests = [Test(*test) for test in json.load(open(TEST_JSON))]
tests = [test for test in tests if pattern_re.match(test.rom)]
start_time = time.time()
@param num_threads the number of worker processes to use.
@param test_work_items the iterable of test work item tuples
to run.
"""
# Initialize our global state.
initialize_global_vars_multiprocessing(num_threads, test_work_items)
# Create jobs.
job_queue = multiprocessing.Queue(len(test_work_items))
for test_work_item in test_work_items:
job_queue.put(test_work_item)
result_queue = multiprocessing.Queue(len(test_work_items))
# Create queues for started child pids. Terminating
# the multiprocess processes does not terminate the
# child processes they spawn. We can remove this tracking
# if/when we move to having the multiprocess process directly
# perform the test logic. The Queue size needs to be able to
# hold 2 * (num inferior dotest.py processes started) entries.
inferior_pid_events = multiprocessing.Queue(4096)
# Worker dictionary allows each worker to figure out its worker index.
manager = multiprocessing.Manager()
worker_index_map = manager.dict()
# Create workers. We don't use multiprocessing.Pool due to
# challenges with handling ^C keyboard interrupts.
workers = []
def __init__(self):
self.queue = Queue()
self.points_taken = Value('i', 0)
def push(self, data):
:type multinet_base_dir: str
:type conf: str
:type output_dir: str
"""
test_type = '[sb_idle_scalability_multinet]'
logging.info('{0} initializing test parameters'.format(test_type))
# Global variables read-write shared between monitor-main thread.
cpid = 0
global_sample_id = 0
t_start = multiprocessing.Value('d', 0.0)
# Multinet parameters
multinet_hosts_per_switch = multiprocessing.Value('i', 0)
multinet_topo_size = multiprocessing.Value('i', 0)
multinet_worker_ip_list = conf['multinet_worker_ip_list']
multinet_worker_port_list = conf['multinet_worker_port_list']
# Controller parameters
controller_logs_dir = ctrl_base_dir + conf['controller_logs_dir']
controller_rebuild = conf['controller_rebuild']
controller_cleanup = conf['controller_cleanup']
if 'controller_cpu_shares' in conf:
controller_cpu_shares = conf['controller_cpu_shares']
else:
controller_cpu_shares = 100
multinet_switch_type = conf['multinet_switch_type']
controller_handlers_set = conf_collections_util.controller_handlers(
def manager():
tasks = mp.cpu_count() - 1
que = mp.Queue()
initque(que)
lock = mp.Lock()
plist = []
for i in xrange(tasks):
p = mp.Process(target=worker, args=(que, lock, i+1))
p.start()
plist.append(p)
for p in plist:
p.join()