code stringlengths 281 23.7M |
|---|
def replaceInternalLinks(text):
cur = 0
res = ''
for (s, e) in findBalanced(text):
m = tailRE.match(text, e)
if m:
trail = m.group(0)
end = m.end()
else:
trail = ''
end = e
inner = text[(s + 2):(e - 2)]
pipe = inner.find('|')
if (pipe < 0):
title = inner
label = title
else:
title = inner[:pipe].rstrip()
curp = (pipe + 1)
for (s1, e1) in findBalanced(inner):
last = inner.rfind('|', curp, s1)
if (last >= 0):
pipe = last
curp = e1
label = inner[(pipe + 1):].strip()
res += ((text[cur:s] + makeInternalLink(title, label)) + trail)
cur = end
return (res + text[cur:]) |
def triplet_to_binary(triplet_data):
ret = []
for example in triplet_data:
anchor = example['anchor']
pos = example['positive']
neg = example['negative']
ret.append({'entity_a': anchor, 'entity_b': pos, 'label': 1})
ret.append({'entity_a': anchor, 'entity_b': neg, 'label': 0})
return ret |
def RandomForest(filename, x_predict, model_name, RF_outputname, set_now, game_name, change_side):
data = pd.read_csv(filename)
data = data[needed]
data.dropna(inplace=True)
data = data[(data.type != '')]
data = data[(data.type != '')]
data = data[(data.type != '')]
data = data[(data.type != '')]
data.reset_index(drop=True, inplace=True)
label = [0, 1, 2, 3, 4, 5, 6]
type_to_num = {'cut': 0, 'drive': 1, 'lob': 2, 'long': 3, 'netplay': 4, 'rush': 5, 'smash': 6}
num_to_type = {0: 'cut', 1: 'drive', 2: 'lob', 3: 'long', 4: 'netplay', 5: 'rush', 6: 'smash'}
real_num = []
real_eng = []
predict_result_eng = []
model = joblib.load(model_name)
prediction = model.predict(x_predict)
for ball in data['type']:
real_eng.append(num_to_type[type_to_num[ball_type_convertion(ball)]])
real_num.append(type_to_num[ball_type_convertion(ball)])
for ball in prediction:
predict_result_eng.append(num_to_type[ball])
result = pd.DataFrame([])
result['Real'] = real_num
result['Predict'] = prediction
result.to_csv(RF_outputname, index=None)
cnt = 0
for i in range(len(result['Real'])):
if (result['Real'][i] == result['Predict'][i]):
cnt += 1
print(('RF Total correct: ' + str(cnt)))
print(('RF Total number: ' + str(len(prediction))))
print(('RF Accuracy: ' + str(accuracy_score(real_num, prediction))))
print(('RF Overall precision: ' + str(precision_score(real_num, prediction, labels=label, average='micro'))))
print(('RF Overall recall: ' + str(recall_score(real_num, prediction, labels=label, average='micro')))) |
def sorted_tests(min_line: int=1, max_line: int=1000, min_length: int=10, max_length: int=12):
return lists(builds(Test, line=integers(min_value=min_line, max_value=max_line).map((lambda line: (line * 2)))), min_size=min_length, max_size=max_length, unique_by=(lambda test: test.line)).map((lambda tests: sorted(tests, key=(lambda test: test.line)))) |
class AnonEnv():
list_intersection_id = ['intersection_1_1']
def __init__(self, path_to_log, path_to_work_directory, dic_traffic_env_conf):
self.path_to_log = path_to_log
self.path_to_work_directory = path_to_work_directory
self.dic_traffic_env_conf = dic_traffic_env_conf
self.simulator_type = self.dic_traffic_env_conf['SIMULATOR_TYPE']
self.list_intersection = None
self.list_inter_log = None
self.list_lanes = None
self.system_states = None
self.feature_name_for_neighbor = self._reduce_duplicates(self.dic_traffic_env_conf['LIST_STATE_FEATURE'])
if (self.dic_traffic_env_conf['MIN_ACTION_TIME'] <= self.dic_traffic_env_conf['YELLOW_TIME']):
print('MIN_ACTION_TIME should include YELLOW_TIME')
pass
for inter_ind in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']):
path_to_log_file = os.path.join(self.path_to_log, 'inter_{0}.pkl'.format(inter_ind))
f = open(path_to_log_file, 'wb')
f.close()
def reset(self):
print('# self.eng.reset() to be implemented')
cityflow_config = {'interval': self.dic_traffic_env_conf['INTERVAL'], 'seed': 0, 'laneChange': False, 'dir': (self.path_to_work_directory + '/'), 'roadnetFile': self.dic_traffic_env_conf['ROADNET_FILE'], 'flowFile': self.dic_traffic_env_conf['TRAFFIC_FILE'], 'rlTrafficLight': self.dic_traffic_env_conf['RLTRAFFICLIGHT'], 'saveReplay': self.dic_traffic_env_conf['SAVEREPLAY'], 'roadnetLogFile': 'frontend/web/roadnetLogFile.json', 'replayLogFile': 'frontend/web/replayLogFile.txt'}
print('')
print(cityflow_config)
with open(os.path.join(self.path_to_work_directory, 'cityflow.config'), 'w') as json_file:
json.dump(cityflow_config, json_file)
self.eng = engine.Engine(os.path.join(self.path_to_work_directory, 'cityflow.config'), thread_num=1)
if self.dic_traffic_env_conf['USE_LANE_ADJACENCY']:
self.traffic_light_node_dict = self._adjacency_extraction_lane()
else:
self.traffic_light_node_dict = self._adjacency_extraction()
self.list_intersection = [Intersection(((i + 1), (j + 1)), self.dic_traffic_env_conf, self.eng, self.traffic_light_node_dict['intersection_{0}_{1}'.format((i + 1), (j + 1))], self.path_to_log) for i in range(self.dic_traffic_env_conf['NUM_ROW']) for j in range(self.dic_traffic_env_conf['NUM_COL'])]
self.list_inter_log = [[] for i in range((self.dic_traffic_env_conf['NUM_ROW'] * self.dic_traffic_env_conf['NUM_COL']))]
self.id_to_index = {}
count_inter = 0
for i in range(self.dic_traffic_env_conf['NUM_ROW']):
for j in range(self.dic_traffic_env_conf['NUM_COL']):
self.id_to_index['intersection_{0}_{1}'.format((i + 1), (j + 1))] = count_inter
count_inter += 1
self.lane_id_to_index = {}
count_lane = 0
for i in range(len(self.list_intersection)):
for j in range(len(self.list_intersection[i].list_entering_lanes)):
lane_id = self.list_intersection[i].list_entering_lanes[j]
if (lane_id not in self.lane_id_to_index.keys()):
self.lane_id_to_index[lane_id] = count_lane
count_lane += 1
for inter in self.list_intersection:
inter.build_adjacency_row_lane(self.lane_id_to_index)
system_state_start_time = time.time()
if self.dic_traffic_env_conf['FAST_COMPUTE']:
self.system_states = {'get_lane_vehicles': self.eng.get_lane_vehicles(), 'get_lane_waiting_vehicle_count': self.eng.get_lane_waiting_vehicle_count(), 'get_vehicle_speed': None, 'get_vehicle_distance': None}
else:
self.system_states = {'get_lane_vehicles': self.eng.get_lane_vehicles(), 'get_lane_waiting_vehicle_count': self.eng.get_lane_waiting_vehicle_count(), 'get_vehicle_speed': self.eng.get_vehicle_speed(), 'get_vehicle_distance': self.eng.get_vehicle_distance()}
print('Get system state time: ', (time.time() - system_state_start_time))
update_start_time = time.time()
for inter in self.list_intersection:
inter.update_current_measurements_map(self.system_states)
print('Update_current_measurements_map time: ', (time.time() - update_start_time))
neighbor_start_time = time.time()
if self.dic_traffic_env_conf['NEIGHBOR']:
for inter in self.list_intersection:
neighbor_inter_ids = inter.neighbor_ENWS
neighbor_inters = []
for neighbor_inter_id in neighbor_inter_ids:
if (neighbor_inter_id is not None):
neighbor_inters.append(self.list_intersection[self.id_to_index[neighbor_inter_id]])
else:
neighbor_inters.append(None)
inter.dic_feature = inter.update_neighbor_info(neighbor_inters, deepcopy(inter.dic_feature))
print('Update_neighbor time: ', (time.time() - neighbor_start_time))
(state, done) = self.get_state()
return state
def step(self, action):
step_start_time = time.time()
list_action_in_sec = [action]
list_action_in_sec_display = [action]
for i in range((self.dic_traffic_env_conf['MIN_ACTION_TIME'] - 1)):
if (self.dic_traffic_env_conf['ACTION_PATTERN'] == 'switch'):
list_action_in_sec.append(np.zeros_like(action).tolist())
elif (self.dic_traffic_env_conf['ACTION_PATTERN'] == 'set'):
list_action_in_sec.append(np.copy(action).tolist())
list_action_in_sec_display.append(np.full_like(action, fill_value=(- 1)).tolist())
average_reward_action_list = ([0] * len(action))
for i in range(self.dic_traffic_env_conf['MIN_ACTION_TIME']):
action_in_sec = list_action_in_sec[i]
action_in_sec_display = list_action_in_sec_display[i]
instant_time = self.get_current_time()
self.current_time = self.get_current_time()
before_action_feature = self.get_feature()
if self.dic_traffic_env_conf['DEBUG']:
print('time: {0}'.format(instant_time))
elif (i == 0):
print('time: {0}'.format(instant_time))
self._inner_step(action_in_sec)
if self.dic_traffic_env_conf['DEBUG']:
start_time = time.time()
reward = self.get_reward()
if self.dic_traffic_env_conf['DEBUG']:
print('Reward time: {}'.format((time.time() - start_time)))
for j in range(len(reward)):
average_reward_action_list[j] = (((average_reward_action_list[j] * i) + reward[j]) / (i + 1))
self.log(cur_time=instant_time, before_action_feature=before_action_feature, action=action_in_sec_display)
(next_state, done) = self.get_state()
print('Step time: ', (time.time() - step_start_time))
return (next_state, reward, done, average_reward_action_list)
def _inner_step(self, action):
for inter in self.list_intersection:
inter.update_previous_measurements()
for (inter_ind, inter) in enumerate(self.list_intersection):
inter.set_signal(action=action[inter_ind], action_pattern=self.dic_traffic_env_conf['ACTION_PATTERN'], yellow_time=self.dic_traffic_env_conf['YELLOW_TIME'], all_red_time=self.dic_traffic_env_conf['ALL_RED_TIME'])
for i in range(int((1 / self.dic_traffic_env_conf['INTERVAL']))):
self.eng.next_step()
if self.dic_traffic_env_conf['DEBUG']:
start_time = time.time()
system_state_start_time = time.time()
if self.dic_traffic_env_conf['FAST_COMPUTE']:
self.system_states = {'get_lane_vehicles': self.eng.get_lane_vehicles(), 'get_lane_waiting_vehicle_count': self.eng.get_lane_waiting_vehicle_count(), 'get_vehicle_speed': None, 'get_vehicle_distance': None}
else:
self.system_states = {'get_lane_vehicles': self.eng.get_lane_vehicles(), 'get_lane_waiting_vehicle_count': self.eng.get_lane_waiting_vehicle_count(), 'get_vehicle_speed': self.eng.get_vehicle_speed(), 'get_vehicle_distance': self.eng.get_vehicle_distance()}
if self.dic_traffic_env_conf['DEBUG']:
print('Get system state time: {}'.format((time.time() - start_time)))
if self.dic_traffic_env_conf['DEBUG']:
start_time = time.time()
update_start_time = time.time()
for inter in self.list_intersection:
inter.update_current_measurements_map(self.system_states)
if self.dic_traffic_env_conf['NEIGHBOR']:
for inter in self.list_intersection:
neighbor_inter_ids = inter.neighbor_ENWS
neighbor_inters = []
for neighbor_inter_id in neighbor_inter_ids:
if (neighbor_inter_id is not None):
neighbor_inters.append(self.list_intersection[self.id_to_index[neighbor_inter_id]])
else:
neighbor_inters.append(None)
inter.dic_feature = inter.update_neighbor_info(neighbor_inters, deepcopy(inter.dic_feature))
if self.dic_traffic_env_conf['DEBUG']:
print('Update measurements time: {}'.format((time.time() - start_time)))
def load_roadnet(self, roadnetFile=None):
print('Start load roadnet')
start_time = time.time()
if (not roadnetFile):
roadnetFile = 'roadnet_1_1.json'
self.eng.load_roadnet(os.path.join(self.path_to_work_directory, roadnetFile))
print('successfully load roadnet:{0}, time: {1}'.format(roadnetFile, (time.time() - start_time)))
def load_flow(self, flowFile=None):
print('Start load flowFile')
start_time = time.time()
if (not flowFile):
flowFile = 'flow_1_1.json'
self.eng.load_flow(os.path.join(self.path_to_work_directory, flowFile))
print('successfully load flowFile: {0}, time: {1}'.format(flowFile, (time.time() - start_time)))
def _check_episode_done(self, list_state):
return False
def convert_dic_to_df(dic):
list_df = []
for key in dic:
df = pd.Series(dic[key], name=key)
list_df.append(df)
return pd.DataFrame(list_df)
def get_feature(self):
list_feature = [inter.get_feature() for inter in self.list_intersection]
return list_feature
def get_state(self):
list_state = [inter.get_state(self.dic_traffic_env_conf['LIST_STATE_FEATURE']) for inter in self.list_intersection]
done = self._check_episode_done(list_state)
return (list_state, done)
def _reduce_duplicates(feature_name_list):
new_list = set()
for feature_name in feature_name_list:
if (feature_name[(- 1)] in ['0', '1', '2', '3']):
new_list.add(feature_name[:(- 2)])
return list(new_list)
def get_reward(self):
list_reward = [inter.get_reward(self.dic_traffic_env_conf['DIC_REWARD_INFO']) for inter in self.list_intersection]
return list_reward
def get_current_time(self):
return self.eng.get_current_time()
def log(self, cur_time, before_action_feature, action):
for inter_ind in range(len(self.list_intersection)):
self.list_inter_log[inter_ind].append({'time': cur_time, 'state': before_action_feature[inter_ind], 'action': action[inter_ind]})
def batch_log(self, start, stop):
for inter_ind in range(start, stop):
if ((int(inter_ind) % 100) == 0):
print('Batch log for inter ', inter_ind)
path_to_log_file = os.path.join(self.path_to_log, 'vehicle_inter_{0}.csv'.format(inter_ind))
dic_vehicle = self.list_intersection[inter_ind].get_dic_vehicle_arrive_leave_time()
df = pd.DataFrame.from_dict(dic_vehicle, orient='index')
df.to_csv(path_to_log_file, na_rep='nan')
path_to_log_file = os.path.join(self.path_to_log, 'inter_{0}.pkl'.format(inter_ind))
f = open(path_to_log_file, 'wb')
pickle.dump(self.list_inter_log[inter_ind], f)
f.close()
def bulk_log_multi_process(self, batch_size=100):
assert (len(self.list_intersection) == len(self.list_inter_log))
if (batch_size > len(self.list_intersection)):
batch_size_run = len(self.list_intersection)
else:
batch_size_run = batch_size
process_list = []
for batch in range(0, len(self.list_intersection), batch_size_run):
start = batch
stop = min((batch + batch_size), len(self.list_intersection))
p = Process(target=self.batch_log, args=(start, stop))
print('before')
p.start()
print('end')
process_list.append(p)
print('before join')
for t in process_list:
t.join()
print('end join')
def bulk_log(self):
for inter_ind in range(len(self.list_intersection)):
path_to_log_file = os.path.join(self.path_to_log, 'vehicle_inter_{0}.csv'.format(inter_ind))
dic_vehicle = self.list_intersection[inter_ind].get_dic_vehicle_arrive_leave_time()
df = self.convert_dic_to_df(dic_vehicle)
df.to_csv(path_to_log_file, na_rep='nan')
for inter_ind in range(len(self.list_inter_log)):
path_to_log_file = os.path.join(self.path_to_log, 'inter_{0}.pkl'.format(inter_ind))
f = open(path_to_log_file, 'wb')
pickle.dump(self.list_inter_log[inter_ind], f)
f.close()
self.eng.print_log(os.path.join(self.path_to_log, self.dic_traffic_env_conf['ROADNET_FILE']), os.path.join(self.path_to_log, 'replay_1_1.txt'))
def log_attention(self, attention_dict):
path_to_log_file = os.path.join(self.path_to_log, 'attention.pkl')
f = open(path_to_log_file, 'wb')
pickle.dump(attention_dict, f)
f.close()
def log_hidden_state(self, hidden_states):
path_to_log_file = os.path.join(self.path_to_log, 'hidden_states.pkl')
with open(path_to_log_file, 'wb') as f:
pickle.dump(hidden_states, f)
def log_lane_vehicle_position(self):
def list_to_str(alist):
new_str = ''
for s in alist:
new_str = ((new_str + str(s)) + ' ')
return new_str
dic_lane_map = {'road_0_1_0_0': 'w', 'road_2_1_2_0': 'e', 'road_1_0_1_0': 's', 'road_1_2_3_0': 'n'}
for inter in self.list_intersection:
for lane in inter.list_entering_lanes:
print(((((str(self.get_current_time()) + ', ') + lane) + ', ') + list_to_str(inter._get_lane_vehicle_position([lane])[0])), file=open(os.path.join(self.path_to_log, ('lane_vehicle_position_%s.txt' % dic_lane_map[lane])), 'a'))
def log_lane_vehicle_position(self):
def list_to_str(alist):
new_str = ''
for s in alist:
new_str = ((new_str + str(s)) + ' ')
return new_str
dic_lane_map = {'road_0_1_0_0': 'w', 'road_2_1_2_0': 'e', 'road_1_0_1_0': 's', 'road_1_2_3_0': 'n'}
for inter in self.list_intersection:
for lane in inter.list_entering_lanes:
print(((((str(self.get_current_time()) + ', ') + lane) + ', ') + list_to_str(inter._get_lane_vehicle_position([lane])[0])), file=open(os.path.join(self.path_to_log, ('lane_vehicle_position_%s.txt' % dic_lane_map[lane])), 'a'))
def log_first_vehicle(self):
_veh_id = 'flow_0_'
_veh_id_2 = 'flow_2_'
_veh_id_3 = 'flow_4_'
_veh_id_4 = 'flow_6_'
for inter in self.list_intersection:
for i in range(100):
veh_id = (_veh_id + str(i))
veh_id_2 = (_veh_id_2 + str(i))
(pos, speed) = inter._get_vehicle_info(veh_id)
(pos_2, speed_2) = inter._get_vehicle_info(veh_id_2)
if (not os.path.exists(os.path.join(self.path_to_log, 'first_vehicle_info_a'))):
os.makedirs(os.path.join(self.path_to_log, 'first_vehicle_info_a'))
if (not os.path.exists(os.path.join(self.path_to_log, 'first_vehicle_info_b'))):
os.makedirs(os.path.join(self.path_to_log, 'first_vehicle_info_b'))
if (pos and speed):
print(('%f, %f, %f' % (self.get_current_time(), pos, speed)), file=open(os.path.join(self.path_to_log, 'first_vehicle_info_a', ('first_vehicle_info_a_%d.txt' % i)), 'a'))
if (pos_2 and speed_2):
print(('%f, %f, %f' % (self.get_current_time(), pos_2, speed_2)), file=open(os.path.join(self.path_to_log, 'first_vehicle_info_b', ('first_vehicle_info_b_%d.txt' % i)), 'a'))
veh_id_3 = (_veh_id_3 + str(i))
veh_id_4 = (_veh_id_4 + str(i))
(pos_3, speed_3) = inter._get_vehicle_info(veh_id_3)
(pos_4, speed_4) = inter._get_vehicle_info(veh_id_4)
if (not os.path.exists(os.path.join(self.path_to_log, 'first_vehicle_info_c'))):
os.makedirs(os.path.join(self.path_to_log, 'first_vehicle_info_c'))
if (not os.path.exists(os.path.join(self.path_to_log, 'first_vehicle_info_d'))):
os.makedirs(os.path.join(self.path_to_log, 'first_vehicle_info_d'))
if (pos_3 and speed_3):
print(('%f, %f, %f' % (self.get_current_time(), pos_3, speed_3)), file=open(os.path.join(self.path_to_log, 'first_vehicle_info_c', ('first_vehicle_info_a_%d.txt' % i)), 'a'))
if (pos_4 and speed_4):
print(('%f, %f, %f' % (self.get_current_time(), pos_4, speed_4)), file=open(os.path.join(self.path_to_log, 'first_vehicle_info_d', ('first_vehicle_info_b_%d.txt' % i)), 'a'))
def log_phase(self):
for inter in self.list_intersection:
print(('%f, %f' % (self.get_current_time(), inter.current_phase_index)), file=open(os.path.join(self.path_to_log, 'log_phase.txt'), 'a'))
def _adjacency_extraction(self):
traffic_light_node_dict = {}
file = os.path.join(self.path_to_work_directory, self.dic_traffic_env_conf['ROADNET_FILE'])
with open('{0}'.format(file)) as json_data:
net = json.load(json_data)
for inter in net['intersections']:
if (not inter['virtual']):
traffic_light_node_dict[inter['id']] = {'location': {'x': float(inter['point']['x']), 'y': float(inter['point']['y'])}, 'total_inter_num': None, 'adjacency_row': None, 'inter_id_to_index': None, 'neighbor_ENWS': None, 'entering_lane_ENWS': None}
top_k = self.dic_traffic_env_conf['TOP_K_ADJACENCY']
total_inter_num = len(traffic_light_node_dict.keys())
inter_id_to_index = {}
edge_id_dict = {}
for road in net['roads']:
if (road['id'] not in edge_id_dict.keys()):
edge_id_dict[road['id']] = {}
edge_id_dict[road['id']]['from'] = road['startIntersection']
edge_id_dict[road['id']]['to'] = road['endIntersection']
edge_id_dict[road['id']]['num_of_lane'] = len(road['lanes'])
edge_id_dict[road['id']]['length'] = np.sqrt(np.square(pd.DataFrame(road['points'])).sum(axis=1)).sum()
index = 0
for i in traffic_light_node_dict.keys():
inter_id_to_index[i] = index
index += 1
for i in traffic_light_node_dict.keys():
traffic_light_node_dict[i]['inter_id_to_index'] = inter_id_to_index
traffic_light_node_dict[i]['neighbor_ENWS'] = []
traffic_light_node_dict[i]['entering_lane_ENWS'] = {'lane_ids': [], 'lane_length': []}
for j in range(4):
road_id = ((i.replace('intersection', 'road') + '_') + str(j))
neighboring_node = edge_id_dict[road_id]['to']
if (neighboring_node not in traffic_light_node_dict.keys()):
traffic_light_node_dict[i]['neighbor_ENWS'].append(None)
else:
traffic_light_node_dict[i]['neighbor_ENWS'].append(neighboring_node)
for (key, value) in edge_id_dict.items():
if ((value['from'] == neighboring_node) and (value['to'] == i)):
neighboring_road = key
neighboring_lanes = []
for k in range(value['num_of_lane']):
neighboring_lanes.append((neighboring_road + '_{0}'.format(k)))
traffic_light_node_dict[i]['entering_lane_ENWS']['lane_ids'].append(neighboring_lanes)
traffic_light_node_dict[i]['entering_lane_ENWS']['lane_length'].append(value['length'])
for i in traffic_light_node_dict.keys():
location_1 = traffic_light_node_dict[i]['location']
if (not self.dic_traffic_env_conf['ADJACENCY_BY_CONNECTION_OR_GEO']):
row = np.array(([0] * total_inter_num))
for j in traffic_light_node_dict.keys():
location_2 = traffic_light_node_dict[j]['location']
dist = AnonEnv._cal_distance(location_1, location_2)
row[inter_id_to_index[j]] = dist
if (len(row) == top_k):
adjacency_row_unsorted = np.argpartition(row, (- 1))[:top_k].tolist()
elif (len(row) > top_k):
adjacency_row_unsorted = np.argpartition(row, top_k)[:top_k].tolist()
else:
adjacency_row_unsorted = [k for k in range(total_inter_num)]
adjacency_row_unsorted.remove(inter_id_to_index[i])
traffic_light_node_dict[i]['adjacency_row'] = ([inter_id_to_index[i]] + adjacency_row_unsorted)
else:
traffic_light_node_dict[i]['adjacency_row'] = [inter_id_to_index[i]]
for j in traffic_light_node_dict[i]['neighbor_ENWS']:
if (j is not None):
traffic_light_node_dict[i]['adjacency_row'].append(inter_id_to_index[j])
else:
traffic_light_node_dict[i]['adjacency_row'].append((- 1))
traffic_light_node_dict[i]['total_inter_num'] = total_inter_num
return traffic_light_node_dict
def _adjacency_extraction_lane(self):
traffic_light_node_dict = {}
file = os.path.join(self.path_to_work_directory, self.dic_traffic_env_conf['ROADNET_FILE'])
roadnet = RoadNet('{0}'.format(file))
with open('{0}'.format(file)) as json_data:
net = json.load(json_data)
for inter in net['intersections']:
if (not inter['virtual']):
traffic_light_node_dict[inter['id']] = {'location': {'x': float(inter['point']['x']), 'y': float(inter['point']['y'])}, 'total_inter_num': None, 'adjacency_row': None, 'inter_id_to_index': None, 'neighbor_ENWS': None, 'entering_lane_ENWS': None, 'total_lane_num': None, 'adjacency_matrix_lane': None, 'lane_id_to_index': None, 'lane_ids_in_intersction': []}
top_k = self.dic_traffic_env_conf['TOP_K_ADJACENCY']
top_k_lane = self.dic_traffic_env_conf['TOP_K_ADJACENCY_LANE']
total_inter_num = len(traffic_light_node_dict.keys())
edge_id_dict = {}
for road in net['roads']:
if (road['id'] not in edge_id_dict.keys()):
edge_id_dict[road['id']] = {}
edge_id_dict[road['id']]['from'] = road['startIntersection']
edge_id_dict[road['id']]['to'] = road['endIntersection']
edge_id_dict[road['id']]['num_of_lane'] = len(road['lanes'])
edge_id_dict[road['id']]['length'] = np.sqrt(np.square(pd.DataFrame(road['points'])).sum(axis=1)).sum()
inter_id_to_index = {}
index = 0
for i in traffic_light_node_dict.keys():
inter_id_to_index[i] = index
index += 1
for i in traffic_light_node_dict.keys():
traffic_light_node_dict[i]['inter_id_to_index'] = inter_id_to_index
traffic_light_node_dict[i]['neighbor_ENWS'] = []
traffic_light_node_dict[i]['entering_lane_ENWS'] = {'lane_ids': [], 'lane_length': []}
for j in range(4):
road_id = ((i.replace('intersection', 'road') + '_') + str(j))
neighboring_node = edge_id_dict[road_id]['to']
if (neighboring_node not in traffic_light_node_dict.keys()):
traffic_light_node_dict[i]['neighbor_ENWS'].append(None)
else:
traffic_light_node_dict[i]['neighbor_ENWS'].append(neighboring_node)
for (key, value) in edge_id_dict.items():
if ((value['from'] == neighboring_node) and (value['to'] == i)):
neighboring_road = key
neighboring_lanes = []
for k in range(value['num_of_lane']):
neighboring_lanes.append((neighboring_road + '_{0}'.format(k)))
traffic_light_node_dict[i]['entering_lane_ENWS']['lane_ids'].append(neighboring_lanes)
traffic_light_node_dict[i]['entering_lane_ENWS']['lane_length'].append(value['length'])
lane_id_dict = roadnet.net_lane_dict
total_lane_num = len(lane_id_dict.keys())
def _get_top_k_lane(lane_id_list, top_k_input):
top_k_lane_indexes = []
for i in range(top_k_input):
lane_id = (lane_id_list[i] if (i < len(lane_id_list)) else None)
top_k_lane_indexes.append(lane_id)
return top_k_lane_indexes
adjacency_matrix_lane = {}
for i in lane_id_dict.keys():
adjacency_matrix_lane[i] = [_get_top_k_lane(lane_id_dict[i]['input_lanes'], top_k_lane), _get_top_k_lane(lane_id_dict[i]['output_lanes'], top_k_lane)]
for i in traffic_light_node_dict.keys():
location_1 = traffic_light_node_dict[i]['location']
if (not self.dic_traffic_env_conf['ADJACENCY_BY_CONNECTION_OR_GEO']):
row = np.array(([0] * total_inter_num))
for j in traffic_light_node_dict.keys():
location_2 = traffic_light_node_dict[j]['location']
dist = AnonEnv._cal_distance(location_1, location_2)
row[inter_id_to_index[j]] = dist
if (len(row) == top_k):
adjacency_row_unsorted = np.argpartition(row, (- 1))[:top_k].tolist()
elif (len(row) > top_k):
adjacency_row_unsorted = np.argpartition(row, top_k)[:top_k].tolist()
else:
adjacency_row_unsorted = [k for k in range(total_inter_num)]
adjacency_row_unsorted.remove(inter_id_to_index[i])
traffic_light_node_dict[i]['adjacency_row'] = ([inter_id_to_index[i]] + adjacency_row_unsorted)
else:
traffic_light_node_dict[i]['adjacency_row'] = [inter_id_to_index[i]]
for j in traffic_light_node_dict[i]['neighbor_ENWS']:
if (j is not None):
traffic_light_node_dict[i]['adjacency_row'].append(inter_id_to_index[j])
else:
traffic_light_node_dict[i]['adjacency_row'].append((- 1))
traffic_light_node_dict[i]['total_inter_num'] = total_inter_num
traffic_light_node_dict[i]['total_lane_num'] = total_lane_num
traffic_light_node_dict[i]['adjacency_matrix_lane'] = adjacency_matrix_lane
return traffic_light_node_dict
def _cal_distance(loc_dict1, loc_dict2):
a = np.array((loc_dict1['x'], loc_dict1['y']))
b = np.array((loc_dict2['x'], loc_dict2['y']))
return np.sqrt(np.sum(((a - b) ** 2)))
def end_sumo(self):
print('anon process end')
pass |
class EngineFromConfigTests(unittest.TestCase):
def setUp(self):
secrets = FakeSecretsStore({'secrets': {'secret/sql/account': {'type': 'credential', 'username': 'reddit', 'password': 'password'}}})
self.secrets = secrets
def test_url(self):
engine = engine_from_config({'database.url': 'sqlite://'})
self.assertEqual(engine.url, URL('sqlite'))
('baseplate.clients.sqlalchemy.create_engine')
def test_credentials(self, create_engine_mock):
engine_from_config({'database.url': 'postgresql://fizz::9000/db', 'database.credentials_secret': 'secret/sql/account', 'database.pool_recycle': '60', 'database.pool_size': '10', 'database.max_overflow': '5'}, self.secrets)
create_engine_mock.assert_called_once_with(URL(drivername='postgresql', username='reddit', password='password', host='localhost', port='9000', database='db'), pool_recycle=60, pool_size=10, max_overflow=5)
('baseplate.clients.sqlalchemy.create_engine')
def test_credentials_no_secrets(self, create_engine_mock):
with self.assertRaises(TypeError):
engine_from_config({'database.url': 'postgresql://fizz::9000/db', 'database.credentials_secret': 'secret/sql/account'})
self.assertEqual(create_engine_mock.call_count, 0) |
def display_pedigree(ds: xr.Dataset, parent: Hashable=variables.parent, graph_attrs: Optional[Dict[(Hashable, str)]]=None, node_attrs: Optional[Dict[(Hashable, ArrayLike)]]=None, edge_attrs: Optional[Dict[(Hashable, ArrayLike)]]=None) -> Any:
try:
from graphviz import Digraph
except ImportError:
raise RuntimeError('Visualizing pedigrees requires the `graphviz` python library and the `graphviz` system library to be installed.')
ds = define_variable_if_absent(ds, variables.parent, parent, parent_indices)
variables.validate(ds, {parent: variables.parent_spec})
parent = ds[parent].values
(n_samples, n_parent_types) = parent.shape
graph_attrs = (graph_attrs or {})
node_attrs = (node_attrs or {})
edge_attrs = (edge_attrs or {})
if (('label' not in node_attrs) and ('samples' in ds.coords)):
node_attrs['label'] = ds.samples.values
node_attrs = {k: np.broadcast_to(v, n_samples) for (k, v) in node_attrs.items()}
edge_attrs = {k: np.broadcast_to(v, parent.shape) for (k, v) in edge_attrs.items()}
graph = Digraph()
graph.attr(**graph_attrs)
for i in range(n_samples):
d = {k: str(v[i]) for (k, v) in node_attrs.items()}
graph.node(str(i), **d)
for i in range(n_samples):
for j in range(n_parent_types):
p = parent[(i, j)]
if (p >= 0):
d = {}
for (k, v) in edge_attrs.items():
d[k] = str(v[(i, j)])
graph.edge(str(p), str(i), **d)
return graph |
class TestEntropySchemeStaticGrid():
def test_model_with_entropy_scheme(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 16, 3, padding='same')
self.conv2 = torch.nn.Conv2d(16, 16, 3, padding='same')
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.conv2(x)
return x
model = Model()
dummy_input = torch.rand(1, 3, 224, 224)
def forward_pass(model, args):
model.eval()
model(dummy_input)
sim1 = QuantizationSimModel(model, dummy_input, quant_scheme='tf')
sim1.compute_encodings(forward_pass, None)
sim2 = QuantizationSimModel(model, dummy_input, quant_scheme='tf')
import aimet_common.libpymo as libpymo
from aimet_common.defs import MAP_QUANT_SCHEME_TO_PYMO
MAP_QUANT_SCHEME_TO_PYMO['entropy'] = libpymo.QuantizationMode.QUANTIZATION_ENTROPY
for (_, quant_wrapper) in sim2.quant_wrappers():
for quantizer in quant_wrapper.input_quantizers:
quantizer.quant_scheme = 'entropy'
for quantizer in quant_wrapper.output_quantizers:
quantizer.quant_scheme = 'entropy'
for param_quantizer in quant_wrapper.param_quantizers.values():
param_quantizer.quant_scheme = 'entropy'
sim2.compute_encodings(forward_pass, None)
assert (sim1.model.conv1.output_quantizers[0].encoding.max != sim2.model.conv1.output_quantizers[0].encoding.max) |
def char_padding(inputs, voca_size, embedding_dim, wordMaxLen, charMaxLen):
sentences_embed = list()
sentences_embed_len = list()
for (senIdx, sentence) in enumerate(inputs):
inputs_embed = list()
inputs_embed_len = list()
for (wordIdx, words) in enumerate(sentence):
words_padded = (([0] + words) + ([0] * (charMaxLen - (1 + len(words)))))
inputs_embed.append(words_padded)
inputs_embed_len.append(len(words))
paddings = ([0] * charMaxLen)
inputs_embed = (inputs_embed + ([paddings] * (wordMaxLen - len(inputs_embed))))
sentences_embed.append(inputs_embed)
sentences_embed_len.append(inputs_embed_len)
return (sentences_embed, sentences_embed_len) |
def test_model(ds1000: DS1000Dataset, model: str, mode: str, num_procs: int=16, output_dir: Union[(str, Path)]='codex_greedy_outputs'):
check_cpu_count(num_procs)
score = defaultdict(list)
for lib in ds1000.libs:
lib_results = []
problem_code_pairs = []
for problem_id in range(len(ds1000[lib])):
generated_code_path = ((((Path(output_dir) / model) / lib) / mode) / ('q' + str(problem_id)))
code_list = []
for generated_code_sample_path in glob.glob(str((generated_code_path / '*.py'))):
code = open(generated_code_sample_path, 'r', encoding='UTF-8').read()
code_list.append(code)
problem_code_pairs.append((ds1000[lib][problem_id], code_list))
if ((num_procs > 1) and (lib != 'Sklearn')):
with Pool(processes=num_procs) as pool:
for test_results in tqdm(pool.imap(test_helper, problem_code_pairs), total=len(ds1000[lib]), desc=f'Executing test for {lib} questions'):
lib_results.append(test_results)
else:
for problem_code_pair in tqdm(problem_code_pairs):
lib_results.append(test_helper(problem_code_pair))
for problem_id in range(len(ds1000[lib])):
score[lib].append(ScoreRecord(library=lib, problem_id=problem_id, perturbation_type=ds1000[lib][problem_id]['perturbation_type'], perturbation_origin_id=int(ds1000[lib][problem_id]['perturbation_origin_id']), test_results=lib_results[problem_id]))
for lib in ds1000.libs:
result_cache_path = ((Path(output_dir) / f'{model}_{mode}_result_cache') / lib)
os.makedirs(result_cache_path, exist_ok=True)
for record in score[lib]:
record.write_to_json((result_cache_path / f'{record.problem_id}.json'))
report_acc(score)
return score |
class TrainSet(torch.utils.data.Dataset):
def __init__(self, data_root, image_size):
super().__init__()
self.transform = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.imgs = torchvision.datasets.ImageFolder(root=data_root, transform=self.transform)
def __getitem__(self, idx):
sample = {'img': self.imgs[idx][0]}
return sample
def __len__(self):
return len(self.imgs) |
class StandardScheme(VersionSchemeInterface):
PLUGIN_NAME = 'standard'
def update(self, desired_version: str, original_version: str, version_data: dict) -> str:
from packaging.version import Version
original = Version(original_version)
versions = desired_version.split(',')
for version in versions:
if (version == 'release'):
reset_version_parts(original, release=original.release)
elif (version == 'major'):
reset_version_parts(original, release=update_release(original, [(original.major + 1)]))
elif (version == 'minor'):
reset_version_parts(original, release=update_release(original, [original.major, (original.minor + 1)]))
elif (version in {'micro', 'patch', 'fix'}):
reset_version_parts(original, release=update_release(original, [original.major, original.minor, (original.micro + 1)]))
elif (version in {'a', 'b', 'c', 'rc', 'alpha', 'beta', 'pre', 'preview'}):
(phase, number) = parse_letter_version(version, 0)
if original.pre:
(current_phase, current_number) = parse_letter_version(*original.pre)
if (phase == current_phase):
number = (current_number + 1)
reset_version_parts(original, pre=(phase, number))
elif (version in {'post', 'rev', 'r'}):
number = (0 if (original.post is None) else (original.post + 1))
reset_version_parts(original, post=parse_letter_version(version, number))
elif (version == 'dev'):
number = (0 if (original.dev is None) else (original.dev + 1))
reset_version_parts(original, dev=(version, number))
else:
if (len(versions) > 1):
message = 'Cannot specify multiple update operations with an explicit version'
raise ValueError(message)
next_version = Version(version)
if (self.config.get('validate-bump', True) and (next_version <= original)):
message = f'Version `{version}` is not higher than the original version `{original_version}`'
raise ValueError(message)
return str(next_version)
return str(original) |
.parametrize('coords,expected', [((0, 1), (1, 0)), (([0], [1]), ([1], [0])), ((0, [1]), ([1], [0])), (([0], 1), ([1], [0])), (([0, 1], [2, 3]), ([2, 3], [0, 1])), ((0, [1, 2]), ([1, 2], [0, 0])), (([0, 1], 2), ([2, 2], [0, 1])), (([0], [1, 2]), ([1, 2], [0, 0])), (([0, 1], [2]), ([2, 2], [0, 1]))])
def test_ensure_arr_input(coords, expected):
transformer = transform.AffineTransformer(Affine.identity())
assert (transformer.xy(*coords, offset='ul') == expected) |
class DataTrainingArguments():
task_name: Optional[str] = field(default=None, metadata={'help': ('The name of the task to train on: ' + ', '.join(task_to_keys.keys()))})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
max_seq_length: Optional[int] = field(default=196, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
train_file: Optional[str] = field(default=None, metadata={'help': 'A csv or a json file containing the training data.'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'A csv or a json file containing the validation data.'})
test_file: Optional[str] = field(default=None, metadata={'help': 'A csv or a json file containing the test data.'})
def __post_init__(self):
if (self.task_name is not None):
self.task_name = self.task_name.lower()
if (self.task_name not in task_to_keys.keys()):
raise ValueError(('Unknown task, you should pick one in ' + ','.join(task_to_keys.keys())))
elif (self.dataset_name is not None):
pass
elif ((self.train_file is None) or (self.validation_file is None)):
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
train_extension = self.train_file.split('.')[(- 1)]
assert (train_extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
validation_extension = self.validation_file.split('.')[(- 1)]
assert (validation_extension == train_extension), '`validation_file` should have the same extension (csv or json) as `train_file`.' |
def check_relevancy(file, relevant_if_metadata_above, relevant_if_metadata_below, verbose=True, key='default', engine='guess'):
if (engine == 'guess'):
engine = DataFileManager.guess_engine(file)
manager = DataFileManager(engine)
file_metadata = manager.read_metadata(file, key=key)
for (k, v) in relevant_if_metadata_above.items():
if (float(file_metadata[k]) < v):
raise IrrelevantFileWarning('Database file {0} irrelevant: {1}={2} [file metadata] < {3} [expected], not loaded'.format(file, k, file_metadata[k], v))
for (k, v) in relevant_if_metadata_below.items():
if (float(file_metadata[k]) > v):
raise IrrelevantFileWarning('Database file {0} irrelevant ({1}={2} [file metadata] > {3} [expected]), not loaded'.format(file, k, file_metadata[k], v)) |
_pipeline_test
class ZeroShotClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def get_test_pipeline(self, model, tokenizer, feature_extractor):
classifier = ZeroShotClassificationPipeline(model=model, tokenizer=tokenizer, candidate_labels=['polics', 'health'])
return (classifier, ['Who are you voting for in 2020?', 'My stomach hurts.'])
def run_pipeline_test(self, classifier, _):
outputs = classifier('Who are you voting for in 2020?', candidate_labels='politics')
self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str)], 'scores': [ANY(float)]})
outputs = classifier('Who are you voting for in 2020?', ['politics'])
self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str)], 'scores': [ANY(float)]})
outputs = classifier('Who are you voting for in 2020?', candidate_labels=['politics'])
self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str)], 'scores': [ANY(float)]})
outputs = classifier('Who are you voting for in 2020?', candidate_labels='politics, public health')
self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str), ANY(str)], 'scores': [ANY(float), ANY(float)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])), 1.0)
outputs = classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health'])
self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str), ANY(str)], 'scores': [ANY(float), ANY(float)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])), 1.0)
outputs = classifier('Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template='This text is about {}')
self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str)], 'scores': [ANY(float)]})
outputs = classifier(['I am happy'], ['positive', 'negative'])
self.assertEqual(outputs, [{'sequence': ANY(str), 'labels': [ANY(str), ANY(str)], 'scores': [ANY(float), ANY(float)]} for i in range(1)])
outputs = classifier(['I am happy', 'I am sad'], ['positive', 'negative'])
self.assertEqual(outputs, [{'sequence': ANY(str), 'labels': [ANY(str), ANY(str)], 'scores': [ANY(float), ANY(float)]} for i in range(2)])
with self.assertRaises(ValueError):
classifier('', candidate_labels='politics')
with self.assertRaises(TypeError):
classifier(None, candidate_labels='politics')
with self.assertRaises(ValueError):
classifier('Who are you voting for in 2020?', candidate_labels='')
with self.assertRaises(TypeError):
classifier('Who are you voting for in 2020?', candidate_labels=None)
with self.assertRaises(ValueError):
classifier('Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template='Not formatting template')
with self.assertRaises(AttributeError):
classifier('Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template=None)
self.run_entailment_id(classifier)
def run_entailment_id(self, zero_shot_classifier: Pipeline):
config = zero_shot_classifier.model.config
original_label2id = config.label2id
original_entailment = zero_shot_classifier.entailment_id
config.label2id = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id, (- 1))
config.label2id = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
config.label2id = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
config.label2id = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
zero_shot_classifier.model.config.label2id = original_label2id
self.assertEqual(original_entailment, zero_shot_classifier.entailment_id)
_torch
def test_truncation(self):
zero_shot_classifier = pipeline('zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='pt')
zero_shot_classifier(('Who are you voting for in 2020?' * 100), candidate_labels=['politics', 'public health', 'science'])
_torch
def test_small_model_pt(self):
zero_shot_classifier = pipeline('zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='pt')
outputs = zero_shot_classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(nested_simplify(outputs), {'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.333, 0.333, 0.333]})
_tf
def test_small_model_tf(self):
zero_shot_classifier = pipeline('zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='tf')
outputs = zero_shot_classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(nested_simplify(outputs), {'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.333, 0.333, 0.333]})
_torch
def test_large_model_pt(self):
zero_shot_classifier = pipeline('zero-shot-classification', model='roberta-large-mnli', framework='pt')
outputs = zero_shot_classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(nested_simplify(outputs), {'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.976, 0.015, 0.009]})
outputs = zero_shot_classifier('The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.', candidate_labels=['machine learning', 'statistics', 'translation', 'vision'], multi_label=True)
self.assertEqual(nested_simplify(outputs), {'sequence': 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.', 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.817, 0.713, 0.018, 0.018]})
_tf
def test_large_model_tf(self):
zero_shot_classifier = pipeline('zero-shot-classification', model='roberta-large-mnli', framework='tf')
outputs = zero_shot_classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(nested_simplify(outputs), {'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.976, 0.015, 0.009]})
outputs = zero_shot_classifier('The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.', candidate_labels=['machine learning', 'statistics', 'translation', 'vision'], multi_label=True)
self.assertEqual(nested_simplify(outputs), {'sequence': 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.', 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.817, 0.713, 0.018, 0.018]}) |
def prepare_collect_config(option, opt):
if (not os.path.exists(opt.collect_path)):
os.makedirs(opt.collect_path)
names = [option['dataset'], option['method'], opt.evaluation_mode]
if opt.not_only_best_candidate:
names.insert(0, 'nobc')
if (option['decoding_type'] == 'ARFormer'):
parameter = ('bs%d_topk%d.pkl' % (option['beam_size'], option['topk']))
else:
names.append((('%s' % ('CT' if option['use_ct'] else '')) + option['paradigm']))
if (option['paradigm'] == 'mp'):
parameter = ('i%db%da%03d.pkl' % (option['iterations'], option['length_beam_size'], int((100 * option['beam_alpha']))))
else:
parameter = ('q%dqi%db%da%03d.pkl' % (option['q'], option['q_iterations'], option['length_beam_size'], int((100 * option['beam_alpha']))))
filename = '_'.join((names + [parameter]))
opt.collect_path = os.path.join(opt.collect_path, filename) |
def SetPyKeyVal(key_name, value_name, value):
root_hkey = get_root_hkey()
root_key = winreg.OpenKey(root_hkey, root_key_name)
try:
my_key = winreg.CreateKey(root_key, key_name)
try:
winreg.SetValueEx(my_key, value_name, 0, winreg.REG_SZ, value)
finally:
my_key.Close()
finally:
root_key.Close()
if verbose:
print(('-> %s\\%s[%s]=%r' % (root_key_name, key_name, value_name, value))) |
class DirectionalLightHelper(Line):
def __init__(self, ray_length=1, color=None, show_shadow_extent=False):
self._color = color
super().__init__(Geometry(positions=np.zeros((8, 3), np.float32)), LineArrowMaterial(color='#fff', thickness=5))
self._shadow_helper = Line(Geometry(positions=np.zeros((14, 3), np.float32)), LineSegmentMaterial())
self.add(self._shadow_helper)
self.ray_length = ray_length
self.show_shadow_extent = show_shadow_extent
self.world.on_update(self._update)
def ray_length(self):
return self._ray_length
_length.setter
def ray_length(self, value):
self._ray_length = float(value)
length = self._ray_length
len5 = (length / 5)
positions = np.array([[len5, 0, 0], [len5, 0, (- length)], [(- len5), 0, 0], [(- len5), 0, (- length)], [0, len5, 0], [0, len5, (- length)], [0, (- len5), 0], [0, (- len5), (- length)]], np.float32)
self.geometry.positions.data[:] = positions
self.geometry.positions.update_range(0, 8)
def show_shadow_extent(self):
return self._show_shadow_extent
_shadow_extent.setter
def show_shadow_extent(self, value):
self._show_shadow_extent = bool(value)
self._shadow_helper.visible = self._show_shadow_extent
def _update(self, transform: AffineBase):
if (not isinstance(self.parent, Light)):
return
if (self._color is None):
color = self.parent.color
if (color != self.material.color):
self.material.color = color
self._shadow_helper.material.color = color
half_w = (self.parent.shadow.camera.width / 2)
half_h = (self.parent.shadow.camera.height / 2)
cur_size = np.abs(self._shadow_helper.geometry.positions.data[0])
ref_size = (half_w, half_h, 0)
if (not np.isclose(cur_size, ref_size).all()):
positions = np.array([[(- half_w), half_h, 0], [half_w, half_h, 0], [half_w, half_h, 0], [half_w, (- half_h), 0], [half_w, (- half_h), 0], [(- half_w), (- half_h), 0], [(- half_w), (- half_h), 0], [(- half_w), half_h, 0], [(- half_w), (- half_h), 0], [half_w, half_h, 0], [half_w, (- half_h), 0], [(- half_w), half_h, 0]], np.float32)
self._shadow_helper.geometry.positions.data[:12] = positions
self._shadow_helper.geometry.positions.update_range(0, 12)
lastval = (- self.parent._gfx_distance_to_target)
if (not np.isclose(lastval, self._shadow_helper.geometry.positions.data[(13, 2)])):
self._shadow_helper.geometry.positions.data[13] = (0, 0, lastval)
self._shadow_helper.geometry.positions.update_range(13, 14) |
class WireLabel():
def __init__(self, text, count):
self.text = text
self.num_wires = count
self.positions_seen = []
self.tops_seen = []
self.wires_seen = []
self.bottoms_seen = []
self.colors_seen = []
self.info_seen = []
self.ready = 0
if (count == 0):
sys.exit(('Error: somehow no wires for label %s\n' % text))
if (self.text[0] in ['<', '>']):
self.start_brace = self.text[0]
self.text = self.text[1:]
add_to_predocument('decorate')
else:
self.start_brace = ''
if (self.text[(- 1)] in ['<', '>']):
self.end_brace = self.text[(- 1)]
self.text = self.text[:(- 1)]
add_to_predocument('decorate')
else:
self.end_brace = ''
def register(self, pos, loc, breadth, color, info):
self.positions_seen.append(pos)
self.tops_seen.append((loc + (0.5 * breadth)))
self.wires_seen.append(loc)
self.bottoms_seen.append((loc - (0.5 * breadth)))
self.colors_seen.append(color)
self.info_seen.append(info)
if (len(self.positions_seen) == self.num_wires):
self.ready = 1
def is_ready(self):
return self.ready
def draw_label(self):
global orientation, premath_str, wire_prefix, postmath_str, bgcolor
if (not self.ready):
sys.exit('Error: label %s is not ready\n')
for i in range(1, len(self.positions_seen)):
if (self.positions_seen[i] != self.positions_seen[0]):
sys.exit(('Error: inconsistent positions for label %s\n' % self.text))
if (self.info_seen[i] != self.info_seen[0]):
sys.exit(('Error: inconsistent start/end for label %s\n' % self.text))
pos = self.positions_seen[0]
(lr, ab, ns, ew, shift, angle, tikz_str, gate_length) = self.info_seen[0]
max_loc = max(self.tops_seen)
min_loc = min(self.bottoms_seen)
loc = (0.5 * (min_loc + max_loc))
delta = BRACE_AMPLITUDE
adjust = (0.75 * WIRE_PAD)
brace_max = (0.5 * (max_loc + max(self.wires_seen)))
brace_min = (0.5 * (min_loc + min(self.wires_seen)))
if self.start_brace:
if (lr == 'left'):
if (gate_length == None):
brace_pos = ((pos - GATE_SIZE) - delta)
else:
brace_pos = (pos - gate_length)
gate_length -= delta
else:
brace_pos = pos
pos += delta
if (gate_length != None):
gate_length -= delta
print(('\\filldraw[color=%s,fill=%s] (%f,%f) rectangle (%f,%f);' % (((bgcolor, bgcolor) + get_x_y(brace_pos, brace_min)) + get_x_y((brace_pos + delta), brace_max))))
if (self.start_brace == '<'):
draw_breadthwise_brace(brace_min, brace_max, (brace_pos + delta), (- delta))
else:
draw_breadthwise_brace(brace_min, brace_max, brace_pos, delta)
if self.end_brace:
if (lr == 'left'):
brace_pos = pos
pos -= delta
if (gate_length != None):
gate_length -= delta
elif (gate_length == None):
brace_pos = ((pos + GATE_SIZE) + delta)
else:
brace_pos = (pos + gate_length)
gate_length -= delta
print(('\\filldraw[color=%s,fill=%s] (%f,%f) rectangle (%f,%f);' % (((bgcolor, bgcolor) + get_x_y(brace_pos, brace_min)) + get_x_y((brace_pos - delta), brace_max))))
if (self.end_brace == '<'):
draw_breadthwise_brace(brace_min, brace_max, brace_pos, (- delta))
else:
draw_breadthwise_brace(brace_min, brace_max, (brace_pos - delta), delta)
if (gate_length == None):
gate_length = 0
if (self.num_wires > 1):
shift = '0pt'
command = ('\\draw[color=%s] ' % self.colors_seen[0])
command += ('(%f,%f) ' % get_x_y(pos, loc))
rotate_me = 0
command += 'node['
if tikz_str:
bg_command = ((command + tikz_str) + ',')
if (orientation == 'horizontal'):
breadth_word = 'height'
length_word = 'width'
bg_command += ('%s,' % lr)
else:
breadth_word = 'width'
length_word = 'height'
bg_command += ('%s,' % ab)
bg_command += ('minimum %s=%fpt,' % (breadth_word, (max_loc - min_loc)))
bg_command += ('minimum %s=%fpt,' % (length_word, gate_length))
if (self.text == '...'):
if (orientation == 'horizontal'):
math_text = '\\;\\vdots\\;'
bg_command += 'anchor = base,'
else:
math_text = '\\cdots'
else:
math_text = self.text
bg_command += 'inner sep=0pt] '
bg_command += ('{\\phantom{%s$%s%s$%s}};' % (premath_str, wire_prefix, math_text, postmath_str))
print(bg_command)
if (self.text == '...'):
if (orientation == 'horizontal'):
command += ('anchor=mid %s] {%s$%s\\vdots$%s}' % (ew, premath_str, wire_prefix, postmath_str))
else:
command += ('%s] {%s$%s\\cdots%s$}' % (ab, premath_str, wire_prefix, postmath_str))
else:
if (orientation == 'horizontal'):
command += ('%s' % lr)
elif (angle == 0):
command += ('%s' % ab)
elif (angle == 90):
command += ('%s,anchor=%s,inner sep=0pt' % (ab, ew))
rotate_me = 1
else:
command += ('%s,anchor=%s %s,xshift=%s,inner sep=0pt' % (ab, ns, ew, shift))
rotate_me = 1
if rotate_me:
command += (',rotate=%i' % (0 - angle))
command += ('] {%s$%s%s$%s}' % (premath_str, wire_prefix, self.text, postmath_str))
command += ';'
print(command) |
def test_prefix():
assert ((TABLE_PREFIX + b'_') == connection._table_name(''))
assert ((TABLE_PREFIX + b'_foo') == connection._table_name('foo'))
assert (connection.table('foobar').name == (TABLE_PREFIX + b'_foobar'))
assert (connection.table('foobar', use_prefix=False).name == b'foobar')
c = Connection(autoconnect=False)
assert (b'foo' == c._table_name('foo'))
with assert_raises(TypeError):
Connection(autoconnect=False, table_prefix=123)
with assert_raises(TypeError):
Connection(autoconnect=False, table_prefix_separator=2.1) |
class Water_density_fitting(unittest.TestCase):
def setUpClass(cls):
mol = gto.Mole()
mol.verbose = 4
mol.output = '/dev/null'
mol.atom = '\n O 0.00000 0.00000 0.11779\n H 0.00000 0.75545 -0.47116\n H 0.00000 -0.75545 -0.47116\n '
mol.pseudo = 'gth-hf-rev'
mol.basis = 'cc-pvdz'
mol.precision = 1e-10
mol.build()
mf = scf.RHF(mol).density_fit(auxbasis='cc-pvdz-jkfit').run()
cls.mol = mol
cls.mf = mf
def tearDownClass(cls):
cls.mol.stdout.close()
del cls.mol, cls.mf
def kernel(self, CC, **kwargs):
mcc = CC(self.mf, **kwargs)
eris = mcc.ao2mo()
mcc.kernel(eris=eris)
et = CCSD_T(mcc, eris=eris)
return (mcc.e_corr, et)
('fail due to updates of pp_int?')
def test_fno_by_thresh(self):
threshs = [0.01, 0.001, 0.0001]
refs = [[(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)]]
for (thresh, ref) in zip(threshs, refs):
(eccsd, et) = self.kernel(cc.FNOCCSD, thresh=thresh)
self.assertAlmostEqual(eccsd, ref[0], 6)
self.assertAlmostEqual(et, ref[1], 6)
(eccsd0, et0) = self.kernel(cc.CCSD)
(eccsd, et) = self.kernel(cc.FNOCCSD, thresh=1e-100)
self.assertAlmostEqual(eccsd, eccsd0, 6)
self.assertAlmostEqual(et, et0, 6)
('fail due to updates of pp_int?')
def test_fno_by_thresh_frozen(self):
threshs = [0.01, 0.001, 0.0001]
refs = [[(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)]]
for (thresh, ref) in zip(threshs, refs):
(eccsd, et) = self.kernel(cc.FNOCCSD, thresh=thresh, frozen=1)
self.assertAlmostEqual(eccsd, ref[0], 6)
self.assertAlmostEqual(et, ref[1], 6)
(eccsd0, et0) = self.kernel(cc.CCSD, frozen=1)
(eccsd, et) = self.kernel(cc.FNOCCSD, thresh=1e-100, frozen=1)
self.assertAlmostEqual(eccsd, eccsd0, 6)
self.assertAlmostEqual(et, et0, 6) |
class RoundRobinArbiterEn(Component):
def construct(s, nreqs):
nreqsX2 = (nreqs * 2)
Type = mk_bits(nreqs)
s.en = InPort()
s.reqs = InPort(Type)
s.grants = OutPort(Type)
s.priority_en = Wire()
s.priority_reg = m = RegEnRst(mk_bits(nreqs), reset_value=1)
m.en //= s.priority_en
m.in_[1:nreqs] //= s.grants[0:(nreqs - 1)]
m.in_[0] //= s.grants[(nreqs - 1)]
s.kills = Wire(((2 * nreqs) + 1))
s.priority_int = Wire((2 * nreqs))
s.reqs_int = Wire((2 * nreqs))
s.grants_int = Wire((2 * nreqs))
def comb_reqs_int():
s.reqs_int[0:nreqs] = s.reqs
s.reqs_int[nreqs:nreqsX2] = s.reqs
def comb_grants():
for i in range(nreqs):
s.grants[i] = (s.grants_int[i] | s.grants_int[(nreqs + i)])
def comb_priority_en():
s.priority_en = ((s.grants != 0) & s.en)
def comb_priority_int():
s.priority_int[0:nreqs] = s.priority_reg.out
s.priority_int[nreqs:nreqsX2] = 0
def comb_kills():
s.kills[0] = 1
for i in range(nreqsX2):
if s.priority_int[i]:
s.kills[(i + 1)] = s.reqs_int[i]
else:
s.kills[(i + 1)] = (s.kills[i] | ((~ s.kills[i]) & s.reqs_int[i]))
def comb_grants_int():
for i in range(nreqsX2):
if s.priority_int[i]:
s.grants_int[i] = s.reqs_int[i]
else:
s.grants_int[i] = ((~ s.kills[i]) & s.reqs_int[i])
def line_trace(s):
return f'{s.reqs} | {s.grants}' |
def test_AddValueToZero_simple_both():
dm = skcriteria.mkdm(matrix=[[1, 0, 3], [0, 5, 6]], objectives=[min, max, min], weights=[1, 2, 0])
expected = skcriteria.mkdm(matrix=[[1.5, 0.5, 3], [0.5, 5.5, 6]], objectives=[min, max, min], weights=[1.5, 2.5, 0.5])
scaler = AddValueToZero(value=0.5, target='both')
result = scaler.transform(dm)
assert result.equals(expected) |
class CmdCombatHelp(CmdHelp):
def func(self):
if (is_in_combat(self.caller) and (not self.args)):
self.caller.msg(((('Available combat commands:|/' + '|wAttack:|n Attack a target, attempting to deal damage.|/') + '|wPass:|n Pass your turn without further action.|/') + '|wDisengage:|n End your turn and attempt to end combat.|/'))
else:
super().func() |
def read_tables(config, c=None):
table_reader = build_reader(data_format=config['file_format'], basepath=config['data_dir'], split_row_groups=config['split_row_groups'], backend=config['backend'])
store_sales_df = table_reader.read('store_sales', relevant_cols=store_sales_cols)
date_dim_df = table_reader.read('date_dim', relevant_cols=date_cols)
web_sales_df = table_reader.read('web_sales', relevant_cols=websale_cols)
store_returns_df = table_reader.read('store_returns', relevant_cols=sr_cols)
store_table_df = table_reader.read('store', relevant_cols=store_cols)
item_table_df = table_reader.read('item', relevant_cols=item_cols)
if c:
c.create_table('store_sales', store_sales_df, persist=False)
c.create_table('date_dim', date_dim_df, persist=False)
c.create_table('item', item_table_df, persist=False)
c.create_table('web_sales', web_sales_df, persist=False)
c.create_table('store_returns', store_returns_df, persist=False)
c.create_table('store', store_table_df, persist=False)
return (store_sales_df, date_dim_df, web_sales_df, store_returns_df, store_table_df, item_table_df) |
def test_1epoch_fuse(class_limit=None, n_snip=5, opt_flow_len=10, saved_model=None, saved_spatial_weights=None, saved_temporal_weights=None, image_shape=(224, 224), original_image_shape=(341, 256), batch_size=128, fuse_method='average'):
print('class_limit = ', class_limit)
data = DataSet(class_limit=class_limit, image_shape=image_shape, original_image_shape=original_image_shape, n_snip=n_snip, opt_flow_len=opt_flow_len, batch_size=batch_size)
val_generator = data.validation_generator()
steps = data.n_batch
two_stream_fuse = ResearchModels(nb_classes=len(data.classes), n_snip=n_snip, opt_flow_len=opt_flow_len, image_shape=image_shape, saved_model=saved_model, saved_temporal_weights=saved_temporal_weights, saved_spatial_weights=saved_spatial_weights)
two_stream_fuse.model.fit_generator(generator=val_generator, steps_per_epoch=steps, max_queue_size=1) |
def submit(modelpath, savepath):
bs = 1
model = UNet(2, 3, opt.start_channel).cuda()
torch.backends.cudnn.benchmark = True
transform = SpatialTransform_1().cuda()
model.load_state_dict(torch.load(modelpath))
model.eval()
transform.eval()
Dices_35 = []
use_cuda = True
device = torch.device(('cuda' if use_cuda else 'cpu'))
test_set = ValidationDataset(opt.datapath)
test_generator = Data.DataLoader(dataset=test_set, batch_size=bs, shuffle=False, num_workers=2)
for (fix_name, mov_name, fix_img, mov_img, fix_lab, mov_lab) in test_generator:
with torch.no_grad():
(XX, YY, ZZ) = mov_lab.squeeze(0).squeeze(0).data.numpy().shape
identity = np.meshgrid(np.arange(XX), np.arange(YY), np.arange(ZZ), indexing='ij')
mask_value1 = np.unique(mov_lab.data.cpu().numpy())
mask_value2 = np.unique(fix_lab.data.cpu().numpy())
mask_value = list((set(mask_value1) & set(mask_value2)))
V_xy = model(mov_img.float().to(device), fix_img.float().to(device))
(pytorch_Warped, pytorch_grid) = transform(mov_lab.float().to(device), V_xy.permute(0, 2, 3, 4, 1), mod='nearest')
dice_bs = []
for i in mask_value[1:]:
dice_bs.append(dice(pytorch_Warped.squeeze(0).squeeze(0).data.cpu().numpy().copy(), fix_lab.squeeze(0).squeeze(0).data.numpy().copy(), k=i))
print('Full res pytorch_grid : ', np.mean(dice_bs))
pytorch_grid = pytorch_grid.squeeze(0).permute(3, 0, 1, 2)
scipy_disp = convert_pytorch_grid2scipy(pytorch_grid.data.cpu().numpy())
moving_warped = map_coordinates(mov_lab.squeeze(0).squeeze(0).data.numpy(), (identity + scipy_disp), order=0)
dice_bs = []
for i in mask_value[1:]:
dice_bs.append(dice(moving_warped.copy(), fix_lab.squeeze(0).squeeze(0).data.numpy().copy(), k=i))
print('Full res scipy_disp : ', np.mean(dice_bs))
save_npz_name = os.path.join(savepath, 'disp_{:04d}_{:04d}.npz'.format(int(fix_name[0]), int(mov_name[0])))
disp1 = np.array([zoom(scipy_disp[i], 0.5, order=2) for i in range(3)])
np.savez(save_npz_name, np.array(disp1).astype(np.float16))
disp_field = np.load(save_npz_name)['arr_0'].astype('float32')
disp_field = np.array([zoom(disp_field[i], 2, order=2) for i in range(3)])
moving_warped = map_coordinates(mov_lab.squeeze(0).squeeze(0).data.numpy(), (identity + disp_field), order=0)
dice_bs = []
for i in mask_value[1:]:
dice_bs.append(dice(moving_warped.copy(), fix_lab.squeeze(0).squeeze(0).data.numpy().copy(), k=i))
print('Half res scipy_disp : ', np.mean(dice_bs))
Dices_35.append(np.mean(dice_bs))
print(np.mean(Dices_35)) |
.parametrize('uri', [rfc3986.uri_reference(' rfc3986.uri_reference('/path/to/resource')])
def test_missing_host_component(uri):
validators.Validator().validate(uri)
validator = validators.Validator().require_presence_of('host')
with pytest.raises(exceptions.MissingComponentError):
validator.validate(uri) |
def _detectFoamDir():
foam_dir = None
if (platform.system() == 'Linux'):
cmdline = ['bash', '-i', '-c', 'echo $WM_PROJECT_DIR']
foam_dir = subprocess.check_output(cmdline, stderr=subprocess.PIPE)
if (platform.system() == 'Windows'):
foam_dir = _runCommandOnWSL('echo $WM_PROJECT_DIR')
if (sys.version_info.major >= 3):
foam_dir = foam_dir.decode('utf-8').rstrip()
else:
foam_dir = foam_dir.rstrip()
if (len(foam_dir) > 1):
if (platform.system() != 'Windows'):
if (foam_dir and (not os.path.exists(os.path.join(foam_dir, 'etc', 'bashrc')))):
print((foam_dir + '/etc/bashrc does not exist'))
foam_dir = None
else:
print("environment var 'WM_PROJECT_DIR' is not defined\n,\n fallback to default {}".format(_DEFAULT_FOAM_DIR))
foam_dir = None
if (not foam_dir):
foam_dir = _detectDefaultFoamDir()
return foam_dir |
class connecting(controlbase):
def __init__(self, lcd):
super(connecting, self).__init__(lcd)
self.connecting_dots = 0
def display(self, refresh):
if refresh:
self.box(rectangle(0, 0, 1, 0.4), black)
self.fittext(rectangle(0, 0, 1, 0.4), _('connect to server'), True)
self.drawn_text = True
self.box(rectangle(0, 0.4, 1, 0.52), black)
dots = ''
for i in range(self.connecting_dots):
dots += '.'
size = self.text((0, 0.4), dots, 12)
self.connecting_dots += 1
if ((size[0] >= 1) or (self.connecting_dots > 20)):
self.connecting_dots = 0
super(connecting, self).display(refresh)
def process(self):
if self.lcd.client.connection:
return control(self.lcd)
if self.testkeydown(MENU):
return self.lcd.getmenu() |
class SuperExpr(Expression):
__slots__ = ('name', 'info', 'call')
__match_args__ = ('name', 'call', 'info')
name: str
info: (TypeInfo | None)
call: CallExpr
def __init__(self, name: str, call: CallExpr) -> None:
super().__init__()
self.name = name
self.call = call
self.info = None
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_super_expr(self) |
def test_download_periodic_stop_at_first_usable(tmp_path, mocker, time_freeze):
time_freeze(_UP_NOW)
wheel = get_embed_wheel('pip', '3.9')
app_data_outer = AppDataDiskFolder(str((tmp_path / 'app')))
pip_version_remote = [wheel_path(wheel, (0, 1, 1)), wheel_path(wheel, (0, 1, 0))]
rel_date_remote = [(_UP_NOW - timedelta(days=1)), (_UP_NOW - timedelta(days=30))]
download_wheel = mock_download(mocker, pip_version_remote)
rel_date_gen = iter(rel_date_remote)
release_date = mocker.patch('virtualenv.seed.wheels.periodic_update.release_date_for_wheel_path', side_effect=(lambda *a, **k: next(rel_date_gen)))
last_update = (_UP_NOW - timedelta(days=14))
u_log = UpdateLog(started=last_update, completed=last_update, versions=[], periodic=True)
read_dict = mocker.patch('virtualenv.app_data.via_disk_folder.JSONStoreDisk.read', return_value=u_log.to_dict())
write = mocker.patch('virtualenv.app_data.via_disk_folder.JSONStoreDisk.write')
do_update('pip', '3.9', str(wheel.path), str(app_data_outer), [], True)
assert (download_wheel.call_count == 2)
assert (release_date.call_count == 2)
assert (read_dict.call_count == 1)
assert (write.call_count == 1) |
def random_interaction_operator(n_orbitals, expand_spin=False, real=True, seed=None):
if (seed is not None):
numpy.random.seed(seed)
if real:
dtype = float
else:
dtype = complex
constant = numpy.random.randn()
one_body_coefficients = random_hermitian_matrix(n_orbitals, real)
two_body_coefficients = numpy.zeros((n_orbitals, n_orbitals, n_orbitals, n_orbitals), dtype)
for (p, q, r, s) in itertools.product(range(n_orbitals), repeat=4):
coeff = numpy.random.randn()
if ((not real) and (len(set([p, q, r, s])) >= 3)):
coeff += (1j * numpy.random.randn())
two_body_coefficients[(p, q, r, s)] = coeff
two_body_coefficients[(q, p, s, r)] = coeff
two_body_coefficients[(s, r, q, p)] = coeff.conjugate()
two_body_coefficients[(r, s, p, q)] = coeff.conjugate()
if real:
two_body_coefficients[(r, q, p, s)] = coeff
two_body_coefficients[(p, s, r, q)] = coeff
two_body_coefficients[(s, p, q, r)] = coeff
two_body_coefficients[(q, r, s, p)] = coeff
if expand_spin:
n_spin_orbitals = (2 * n_orbitals)
one_body_coefficients = numpy.kron(one_body_coefficients, numpy.eye(2))
new_two_body_coefficients = numpy.zeros((n_spin_orbitals, n_spin_orbitals, n_spin_orbitals, n_spin_orbitals), dtype=complex)
for (p, q, r, s) in itertools.product(range(n_orbitals), repeat=4):
coefficient = two_body_coefficients[(p, q, r, s)]
new_two_body_coefficients[((2 * p), ((2 * q) + 1), ((2 * r) + 1), (2 * s))] = coefficient
new_two_body_coefficients[(((2 * p) + 1), (2 * q), (2 * r), ((2 * s) + 1))] = coefficient
new_two_body_coefficients[((2 * p), (2 * q), (2 * r), (2 * s))] = coefficient
new_two_body_coefficients[(((2 * p) + 1), ((2 * q) + 1), ((2 * r) + 1), ((2 * s) + 1))] = coefficient
two_body_coefficients = new_two_body_coefficients
interaction_operator = InteractionOperator(constant, one_body_coefficients, two_body_coefficients)
return interaction_operator |
class QueueWrapper(NonBlocking):
def __init__(self, protocol, response_wait_time=1e-05):
if (not isinstance(protocol, communication.protocol.IterDataPipeQueueProtocolClient)):
raise Exception('Got', protocol)
self.protocol = protocol
self.counter = 0
self._stop_iteration = False
self._response_wait_time = response_wait_time
def request_reset_epoch(self, seed_generator, iter_reset_fn):
self._stop_iteration = False
self.counter = 0
self.protocol.request_reset_epoch(seed_generator, iter_reset_fn)
def _get_response(self, fn_name) -> None:
assert (hasattr(self.protocol, fn_name) and callable(getattr(self.protocol, fn_name)))
get_response_fn = getattr(self.protocol, fn_name)
while True:
try:
get_response_fn()
break
except communication.protocol.EmptyQueue:
if (NonBlocking.not_available_hook is not None):
NonBlocking.not_available_hook()
def get_reset_epoch_response(self) -> None:
self._get_response('get_response_reset_epoch')
def request_limit(self, num_batches: Optional[int], limit_fn: Optional[Callable[([DataPipe, Optional[int]], DataPipe)]]=None, worker_num_batches: Optional[int]=None) -> None:
self.protocol.request_limit(num_batches, limit_fn, worker_num_batches)
def get_limit_response(self) -> None:
self._get_response('get_response_limit')
def request_pause(self, pause_fn: Optional[Callable[([DataPipe], DataPipe)]]=None) -> None:
self.protocol.request_pause(pause_fn)
def get_pause_response(self) -> None:
self._get_response('get_response_pause')
def request_resume(self, resume_fn: Optional[Callable[([DataPipe], DataPipe)]]=None) -> None:
self.protocol.request_resume(resume_fn)
def get_resume_response(self) -> None:
self._get_response('get_response_resume')
def nonblocking_next(self):
if self._stop_iteration:
raise Exception('`next` or `nonblocking_next` called after receiving StopIteration')
if self.protocol.can_take_request():
self.protocol.request_next()
try:
response = self.protocol.get_response_next(block=True, timeout=self._response_wait_time)
except communication.protocol.EmptyQueue:
raise NotAvailable
if isinstance(response, communication.messages.StopIterationResponse):
self._stop_iteration = True
raise StopIteration
if isinstance(response, communication.messages.InvalidStateResponse):
raise NotAvailable
return response.value |
class CertificateErrorWrapper(usertypes.AbstractCertificateErrorWrapper):
def __init__(self, error: QWebEngineCertificateError) -> None:
super().__init__()
self._error = error
self.ignore = False
def __str__(self) -> str:
if machinery.IS_QT5:
return self._error.errorDescription()
else:
return self._error.description()
def _type(self) -> Any:
if machinery.IS_QT5:
return self._error.error()
else:
return self._error.type()
def reject_certificate(self) -> None:
super().reject_certificate()
self._error.rejectCertificate()
def accept_certificate(self) -> None:
super().accept_certificate()
if machinery.IS_QT5:
self._error.ignoreCertificateError()
else:
self._error.acceptCertificate()
def __repr__(self) -> str:
return utils.get_repr(self, error=debug.qenum_key(QWebEngineCertificateError, self._type()), string=str(self))
def url(self) -> QUrl:
return self._error.url()
def is_overridable(self) -> bool:
return self._error.isOverridable()
def defer(self) -> None:
raise usertypes.UndeferrableError('PyQt bug') |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, use_cbam=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
if use_cbam:
self.cbam = CBAM(planes, 16)
else:
self.cbam = None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
if (not (self.cbam is None)):
out = self.cbam(out)
out += residual
out = self.relu(out)
return out |
def compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p, g2p_dict):
d_cnt = 0
w_cnt = 0
w_cnt_h = 0
for uid in hyp_uid_to_tra:
ref = ref_uid_to_tra[uid].split()
if (g2p_dict is not None):
hyp = []
for word in hyp_uid_to_tra[uid].split():
if (word in g2p_dict):
hyp = (hyp + g2p_dict[word])
else:
logger.warning(f'{word} not in g2p_dict')
elif (g2p is not None):
hyp = g2p(hyp_uid_to_tra[uid])
hyp = [p for p in hyp if ((p != "'") and (p != ' '))]
hyp = [(p[:(- 1)] if p[(- 1)].isnumeric() else p) for p in hyp]
else:
hyp = hyp_uid_to_tra[uid].split()
logger.debug(f'''
HYP: {' '.join(hyp)}
REF: {' '.join(ref)}''')
d_cnt += editdistance.eval(ref, hyp)
w_cnt += len(ref)
w_cnt_h += len(hyp)
wer = (float(d_cnt) / w_cnt)
logger.debug(f'wer = {(wer * 100):.2f}%; num. of ref words = {w_cnt}; num. of hyp words = {w_cnt_h}; num. of sentences = {len(ref_uid_to_tra)}')
return wer |
.parametrize('dist_params, obs', [((np.array([0, 0, 0, 0], dtype=np.float64),), np.array([0, 0.5, 1, (- 1)], dtype=np.float64)), ((np.array(0, dtype=np.int64),), np.array(0, dtype=np.int64))])
def test_dirac_delta_logprob(dist_params, obs):
(dist_params_at, obs_at, _) = create_pytensor_params(dist_params, obs, ())
dist_params = dict(zip(dist_params_at, dist_params))
x = dirac_delta(*dist_params_at)
def scipy_logprob(obs, c):
return (0.0 if (obs == c) else (- np.inf))
scipy_logprob_tester(x, obs, dist_params, test_fn=scipy_logprob) |
def main():
logger.info('Launching the SAN')
start_test = False
dev_name = 'dev'
opt = vars(args)
logger.info('Loading data')
(embedding, opt) = load_meta(opt, os.path.join(args.multitask_data_path, args.meta))
gold_data = load_gold(args.dev_datasets, args.data_dir, dev_name=dev_name)
(best_em_score, best_f1_score) = (0.0, 0.0)
all_train_batchgen = []
all_dev_batchgen = []
all_train_iters = []
for dataset_name in args.train_datasets:
path = os.path.join(args.multitask_data_path, (dataset_name + '_train.json'))
this_extra_score = extra_score.get(dataset_name, None)
all_train_batchgen.append(BatchGen(path, batch_size=args.batch_size, gpu=args.cuda, dataset_name=dataset_name, doc_maxlen=args.doc_maxlen, drop_less=args.drop_less, num_gpu=args.num_gpu, dropout_w=args.dropout_w, dw_type=args.dw_type, extra_score=this_extra_score, extra_score_cap=args.extra_score_cap))
all_train_iters = [iter(item) for item in all_train_batchgen]
for dataset_name in args.dev_datasets:
path = os.path.join(args.multitask_data_path, (dataset_name + ('_%s.json' % dev_name)))
all_dev_batchgen.append(BatchGen(path, batch_size=args.valid_batch_size, gpu=args.cuda, is_train=False, dataset_name=dataset_name, doc_maxlen=args.doc_maxlen, num_gpu=args.num_gpu))
if ('marco' in dataset_name):
rank_path = os.path.join(args.data_dir, dataset_name)
dev_rank_path = os.path.join(rank_path, 'dev_rank_scores.json')
dev_rank_scores = load_rank_score(dev_rank_path)
dev_yn = json.load(open(os.path.join(rank_path, 'dev_yn_dict.json')))
dev_gold_path = os.path.join(args.data_dir, dataset_name, 'dev_original.json')
dev_gold_data_marco = load_jsonl(dev_gold_path)
if args.resume_last_epoch:
latest_time = 0
for o in os.listdir(model_dir):
if (o.startswith('checkpoint_') and ('trim' not in o)):
edit_time = os.path.getmtime(os.path.join(model_dir, o))
if (edit_time > latest_time):
latest_time = edit_time
args.resume_dir = model_dir
args.resume = o
if (args.resume_dir is not None):
print('resuming model in ', os.path.join(args.resume_dir, args.resume))
checkpoint = torch.load(os.path.join(args.resume_dir, args.resume))
model_opt = (checkpoint['config'] if args.resume_options else opt)
model_opt['multitask_data_path'] = opt['multitask_data_path']
model_opt['covec_path'] = opt['covec_path']
model_opt['data_dir'] = opt['data_dir']
if args.resume_options:
logger.info('resume old options')
else:
logger.info('use new options.')
model_opt['train_datasets'] = checkpoint['config']['train_datasets']
state_dict = checkpoint['state_dict']
model = DocReaderModel(model_opt, embedding, state_dict)
if (not args.new_random_state):
logger.info('use old random state.')
random.setstate(checkpoint['random_state'])
torch.random.set_rng_state(checkpoint['torch_state'])
if args.cuda:
torch.cuda.set_rng_state(checkpoint['torch_cuda_state'])
if model.scheduler:
if args.new_scheduler:
model.scheduler = torch.optim.lr_scheduler.MultiStepLR(model.optimizer, milestones=[2, 5, 8], gamma=args.lr_gamma)
elif ('scheduler_state' in checkpoint):
model.scheduler.load_state_dict(checkpoint['scheduler_state'])
else:
print("warning: not loading scheduler state because didn't save.")
start_epoch = (checkpoint['epoch'] + 1)
else:
model = DocReaderModel(opt, embedding)
start_epoch = 0
logger.info('using {} GPUs'.format(torch.cuda.device_count()))
headline = ' Model Arch of SAN '
logger.info('\n{}\n{}\n'.format(headline, model.network))
model.setup_eval_embed(embedding)
logger.info('Total number of params: {}'.format(model.total_param))
if args.cuda:
model.cuda()
all_lens = [len(bg) for bg in all_train_batchgen]
if (args.continue_epoches is not None):
args.epoches = (start_epoch + args.continue_epoches)
num_all_batches = (args.epoches * sum(all_lens))
best_performance = {name: 0.0 for name in args.dev_datasets}
best_performance['total'] = 0.0
for epoch in range(start_epoch, args.epoches):
logger.warning('At epoch {}'.format(epoch))
all_call_indices = []
for train_data in all_train_batchgen:
train_data.reset()
if (args.dataset_include_ratio >= 0):
other_indices = []
for i in range(1, len(all_train_batchgen)):
other_indices += ([i] * len(all_train_batchgen[i]))
if (args.dataset_include_ratio > 1):
inverse_ratio = (1 / args.dataset_include_ratio)
batch0_indices = ([0] * int((len(other_indices) * inverse_ratio)))
else:
batch0_indices = ([0] * len(all_train_batchgen[0]))
other_picks = int((len(other_indices) * args.dataset_include_ratio))
other_indices = random.sample(other_indices, other_picks)
all_call_indices = (batch0_indices + other_indices)
else:
for i in range(len(all_train_batchgen)):
all_call_indices += ([i] * len(all_train_batchgen[i]))
random.shuffle(all_call_indices)
all_call_indices = all_call_indices[:10]
start = datetime.now()
for i in range(len(all_call_indices)):
(batch_list, name_map) = next(all_train_iters[all_call_indices[i]])
dataset_name = args.train_datasets[all_call_indices[i]]
model.update(batch_list, name_map, dataset_name)
if (((model.updates % args.log_per_updates) == 0) or (i == 0)):
logger.info('o(*^~^*) Task [{0:2}] #updates[{1:6}] train loss[{2:.5f}] remaining[{3}]'.format(all_call_indices[i], model.updates, model.train_loss.avg, str((((datetime.now() - start) / (i + 1)) * ((len(all_call_indices) - i) - 1))).split('.')[0]))
em_sum = 0
f1_sum = 0
model.eval()
this_performance = {}
for i in range(len(all_dev_batchgen)):
dataset_name = args.dev_datasets[i]
if (dataset_name in ['squad', 'newsqa']):
(em, f1, results, scores) = check(model, all_dev_batchgen[i], gold_data[dataset_name])
output_path = os.path.join(model_dir, 'dev_output_{}_{}.json'.format(dataset_name, epoch))
output_scores_path = os.path.join(model_dir, 'dev_scores_{}_{}.pt'.format(dataset_name, epoch))
for repeat_times in range(10):
try:
with open(output_path, 'w') as f:
json.dump(results, f)
with open(output_scores_path, 'wb') as f:
pickle.dump(scores, f)
break
except Exception as e:
print('save predict failed. error:', e)
em_sum += em
f1_sum += f1
this_performance[dataset_name] = (em + f1)
logger.warning('Epoch {0} - Task {1:6} dev EM: {2:.3f} F1: {3:.3f}'.format(epoch, dataset_name, em, f1))
elif (dataset_name == 'wdw'):
(acc, results, scores) = check_wdw(model, all_dev_batchgen[i])
output_path = os.path.join(model_dir, 'dev_output_{}_{}.json'.format(dataset_name, epoch))
output_scores_path = os.path.join(model_dir, 'dev_scores_{}_{}.pt'.format(dataset_name, epoch))
for repeat_times in range(10):
try:
with open(output_path, 'w') as f:
json.dump(results, f)
with open(output_scores_path, 'wb') as f:
pickle.dump(scores, f)
break
except Exception as e:
print('save predict failed. error:', e)
em_sum += acc
f1_sum += acc
logger.warning('Epoch {0} - Task {1:6} dev ACC: {2:.3f}'.format(epoch, dataset_name, acc))
this_performance[dataset_name] = acc
elif ('marco' in dataset_name):
output = os.path.join(model_dir, 'dev_pred_{}.json'.format(epoch))
output_yn = os.path.join(model_dir, 'dev_pred_yn_{}.json'.format(epoch))
span_output = os.path.join(model_dir, 'dev_pred_span_{}.json'.format(epoch))
(dev_predictions, dev_best_scores, dev_ids_list) = eval_model_marco(model, all_dev_batchgen[i])
(answer_list, rank_answer_list, yesno_answer_list) = generate_submit(dev_ids_list, dev_best_scores, dev_predictions, dev_rank_scores, dev_yn)
dev_gold_path = os.path.join(args.data_dir, dataset_name, 'dev_original.json')
metrics = compute_metrics_from_files(dev_gold_data_marco, rank_answer_list, MAX_BLEU_ORDER)
rouge_score = metrics['rouge_l']
blue_score = metrics['bleu_1']
logger.warning('Epoch {0} - dev ROUGE-L: {1:.4f} BLEU-1: {2:.4f}'.format(epoch, rouge_score, blue_score))
for metric in sorted(metrics):
logger.info(('%s: %s' % (metric, metrics[metric])))
this_performance[dataset_name] = (rouge_score + blue_score)
this_performance['total'] = sum([v for v in this_performance.values()])
model.train()
if (model.scheduler is not None):
logger.info('scheduler_type {}'.format(opt['scheduler_type']))
if (opt['scheduler_type'] == 'rop'):
model.scheduler.step(f1, epoch=epoch)
else:
model.scheduler.step()
for try_id in range(10):
try:
model_file = os.path.join(model_dir, 'checkpoint_epoch_{}.pt'.format(epoch))
model.save(model_file, epoch, best_em_score, best_f1_score)
if ((em_sum + f1_sum) > (best_em_score + best_f1_score)):
copyfile(os.path.join(model_dir, model_file), os.path.join(model_dir, 'best_checkpoint.pt'))
(best_em_score, best_f1_score) = (em_sum, f1_sum)
logger.info('Saved the new best model and prediction')
break
except Exception as e:
print('save model failed: outer step. error=', e) |
_on_failure
.parametrize('number_of_nodes', [1])
.parametrize('channels_per_node', [0])
.parametrize('enable_rest_api', [True])
def test_api_get_channel_list(api_server_test_instance: APIServer, token_addresses, reveal_timeout):
partner_address = '0x61C808D82A3AcdaDc13c777b59310bD9'
request = grequests.get(api_url_for(api_server_test_instance, 'channelsresource'))
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
json_response = get_json_response(response)
assert (json_response == [])
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': str(settle_timeout), 'reveal_timeout': str(reveal_timeout)}
request = grequests.put(api_url_for(api_server_test_instance, 'channelsresource'), json=channel_data_obj)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
request = grequests.get(api_url_for(api_server_test_instance, 'channelsresource'))
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
json_response = get_json_response(response)
channel_info = json_response[0]
assert (channel_info['partner_address'] == partner_address)
assert (channel_info['token_address'] == to_checksum_address(token_address))
assert (channel_info['total_deposit'] == '0')
assert ('token_network_address' in channel_info) |
class WeightedClassSplitter_(Splitter):
def __init__(self, shuffle=True, min_num_samples=1, max_num_samples=None, weights=None, train_weights=None, test_weights=None, support_weights=None, query_weights=None, force_equal_per_class=False, random_state_seed=0):
self.shuffle = shuffle
self.force_equal_per_class = force_equal_per_class
if (weights is None):
weights = OrderedDict()
if (train_weights is not None):
weights['train'] = train_weights
elif (support_weights is not None):
weights['support'] = support_weights
if (test_weights is not None):
weights['test'] = test_weights
elif (query_weights is not None):
weights['query'] = query_weights
assert (len(weights) > 0)
assert (sum(weights.values()) <= 1.0)
if ((min_num_samples is None) or isinstance(min_num_samples, int)):
if (min_num_samples is None):
min_num_samples = 0
self.min_num_samples = OrderedDict([(split, min_num_samples) for split in weights])
elif isinstance(min_num_samples, dict):
self.min_num_samples = OrderedDict(min_num_samples)
else:
raise NotImplementedError('Argument `min_num_samples` in `WeightedClassSplitter` must be of type `dict` or `int`. Got type `{0}`.'.format(type(min_num_samples)))
if (max_num_samples is None):
self.max_num_samples = None
elif isinstance(max_num_samples, int):
self.max_num_samples = OrderedDict([(split, max_num_samples) for split in weights])
elif isinstance(max_num_samples, dict):
self.max_num_samples = OrderedDict(max_num_samples)
else:
raise NotImplementedError('Argument `max_num_samples` in `WeightedClassSplitter` must be of type `dict` or `int`. Got type `{0}`.'.format(type(min_num_samples)))
self._min_samples_per_class = sum(self.min_num_samples.values())
super(WeightedClassSplitter_, self).__init__(weights, random_state_seed)
def get_indices_task(self, task):
all_class_indices = self._get_class_indices(task)
indices = OrderedDict([(split, []) for split in self.splits])
min_samples = min([len(class_indices) for class_indices in all_class_indices.values()])
if (min_samples < self._min_samples_per_class):
raise ValueError('The smallest number of samples in a class ({0}) is smaller than the minimum number of samples per class required by `WeightedClassSplitter` ({1}).'.format(min_samples, self._min_samples_per_class))
for class_indices in all_class_indices.values():
num_samples = (min_samples if self.force_equal_per_class else len(class_indices))
if self.shuffle:
seed = ((hash(task) + self.random_state_seed) % (2 ** 32))
dataset_indices = np.random.RandomState(seed).permutation(num_samples)
else:
dataset_indices = np.arange(num_samples)
ptr = 0
for (split, weight) in self.splits.items():
num_split = max(self.min_num_samples[split], int((weight * num_samples)))
if (self.max_num_samples is not None):
num_split = min(self.max_num_samples[split], num_split)
split_indices = dataset_indices[ptr:(ptr + num_split)]
if self.shuffle:
self.np_random.shuffle(split_indices)
indices[split].extend([class_indices[idx] for idx in split_indices])
ptr += num_split
return indices
def get_indices_concattask(self, task):
indices = OrderedDict([(split, []) for split in self.splits])
cum_size = 0
min_samples = min([len(dataset) for dataset in task.datasets])
if (min_samples < self._min_samples_per_class):
raise ValueError('The smallest number of samples in a class ({0}) is smaller than the minimum number of samples per class required by `WeightedClassSplitter` ({1}).'.format(min_samples, self._min_samples_per_class))
for dataset in task.datasets:
num_samples = (min_samples if self.force_equal_per_class else len(dataset))
if self.shuffle:
seed = ((hash(task) + self.random_state_seed) % (2 ** 32))
dataset_indices = np.random.RandomState(seed).permutation(num_samples)
else:
dataset_indices = np.arange(num_samples)
ptr = 0
for (split, weight) in self.splits.items():
num_split = max(self.min_num_samples, int((weight * num_samples)))
split_indices = dataset_indices[ptr:(ptr + num_split)]
if self.shuffle:
self.np_random.shuffle(split_indices)
indices[split].extend((split_indices + cum_size))
cum_size += num_samples
return indices |
class TestPrometheusMetrics():
def setup(self):
REQUEST_LATENCY.clear()
REQUESTS_TOTAL.clear()
ACTIVE_REQUESTS.clear()
.parametrize('exc,exc_type,status,status_code,expectation', [(None, '', '', '', does_not_raise()), (TApplicationException(TApplicationException.UNKNOWN_METHOD, 'unknown method'), 'TApplicationException', '', '', pytest.raises(TApplicationException)), (TProtocolException(message='Required field is unset!'), 'TProtocolException', '', '', pytest.raises(TProtocolException)), (TTransportException(message='Something is wrong with the transport'), 'TTransportException', '', '', pytest.raises(TTransportException, match='retry policy exhausted while attempting.*')), (Error(ErrorCode.NOT_FOUND, '404 not found'), 'Error', 'NOT_FOUND', '404', pytest.raises(Error)), (Error(ErrorCode.SERVICE_UNAVAILABLE, '503 unavailable'), 'Error', 'SERVICE_UNAVAILABLE', '503', pytest.raises(Error)), (TException(message='Some other generic thrift exception'), 'TException', '', '', pytest.raises(TException)), (Exception('Some very generic exception'), 'Exception', '', '', pytest.raises(Exception))])
def test_build_thrift_proxy_method(self, exc, exc_type, status, status_code, expectation):
def handle(*args, **kwargs):
if (exc is None):
return 42
else:
raise exc
proxy_method = _build_thrift_proxy_method('handle')
pool = mock.MagicMock(timeout=None)
prot = mock.MagicMock()
pool.connection().__enter__.return_value = prot
client_cls = mock.MagicMock()
client_cls.handle = handle
handler = mock.MagicMock(retry_policy=[None, None], pool=pool, namespace='test_namespace')
handler.client_cls.return_value = client_cls
thrift_success = str((exc is None)).lower()
prom_labels = {'thrift_method': 'handle', 'thrift_client_name': 'test_namespace'}
requests_total_prom_labels = {'thrift_exception_type': exc_type, 'thrift_baseplate_status': status, 'thrift_baseplate_status_code': status_code}
with expectation:
proxy_method(self=handler)
tries = (1 if (exc_type != 'TTransportException') else 2)
assert (REGISTRY.get_sample_value('thrift_client_requests_total', {**prom_labels, **requests_total_prom_labels, 'thrift_success': thrift_success}) == tries)
assert (REGISTRY.get_sample_value('thrift_client_latency_seconds_bucket', {**prom_labels, 'thrift_success': thrift_success, 'le': '+Inf'}) == tries)
assert (REGISTRY.get_sample_value('thrift_client_active_requests', prom_labels) == 0)
def test_build_thrift_proxy_method_fail_connection(self):
def handle(*args, **kwargs):
return 42
proxy_method = _build_thrift_proxy_method('handle')
pool = mock.MagicMock(timeout=None)
pool.connection().__enter__.side_effect = Exception('failed to establish connection')
client_cls = mock.MagicMock()
client_cls.handle = handle
handler = mock.MagicMock(retry_policy=[None, None], pool=pool, namespace='test_namespace')
handler.client_cls.return_value = client_cls
with pytest.raises(Exception):
proxy_method(self=handler)
prom_labels = {'thrift_method': 'handle', 'thrift_client_name': 'test_namespace'}
assert (REGISTRY.get_sample_value('thrift_client_active_requests', prom_labels) is None) |
class SolverResult():
def __init__(self, root: ProjectPackage, packages: list[Package], attempted_solutions: int) -> None:
self._root = root
self._packages = packages
self._attempted_solutions = attempted_solutions
def packages(self) -> list[Package]:
return self._packages
def attempted_solutions(self) -> int:
return self._attempted_solutions |
def test_custom_init_unknown_params():
assert (get_attrs_shape(CustomInitUnknownParams) == Shape(input=InputShape(constructor=CustomInitUnknownParams, kwargs=None, fields=(InputField(type=int, id='a', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY), InputField(type=str, id='b', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY), InputField(type=bytes, id='c', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY)), params=(Param(field_id='a', name='a', kind=ParamKind.POS_OR_KW), Param(field_id='b', name='b', kind=ParamKind.POS_OR_KW), Param(field_id='c', name='c', kind=ParamKind.POS_OR_KW)), overriden_types=frozenset({'a', 'b', 'c'})), output=OutputShape(fields=(OutputField(type=int, id='a', default=NoDefault(), accessor=create_attr_accessor('a', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=dict, id='other', default=NoDefault(), accessor=create_attr_accessor('other', is_required=True), metadata=MappingProxyType({}), original=ANY)), overriden_types=frozenset({'a', 'other'})))) |
class HContainer(SplitContainer):
def __init__(self, area):
SplitContainer.__init__(self, area, QtCore.Qt.Orientation.Horizontal)
def type(self):
return 'horizontal'
def updateStretch(self):
x = 0
y = 0
sizes = []
for i in range(self.count()):
(wx, wy) = self.widget(i).stretch()
x += wx
y = max(y, wy)
sizes.append(wx)
self.setStretch(x, y)
tot = float(sum(sizes))
if (tot == 0):
scale = 1.0
else:
scale = (self.width() / tot)
self.setSizes([int((s * scale)) for s in sizes]) |
def update_winnowed_channels(original_mask: List[int], new_mask: List[int]):
assert (len(new_mask) == sum(original_mask))
original_mask_ones_indices = get_one_positions_in_binary_mask(original_mask)
new_mask_zero_indices = get_zero_positions_in_binary_mask(new_mask)
for idx in new_mask_zero_indices:
original_mask[original_mask_ones_indices[idx]] = 0 |
def create_fscommands(root):
dirlist = os.listdir(root)
commands = {'.hg': MercurialCommands, '.svn': SubversionCommands, '.git': GITCommands, '_svn': SubversionCommands, '_darcs': DarcsCommands}
for key in commands:
if (key in dirlist):
try:
return commands[key](root)
except (ImportError, OSError):
pass
return FileSystemCommands() |
_bpe('hf_byte_bpe', dataclass=HuggingFaceByteLevelBPEConfig)
class HuggingFaceByteLevelBPE(object):
def __init__(self, cfg):
try:
from tokenizers import ByteLevelBPETokenizer
except ImportError:
raise ImportError('Please install huggingface/tokenizers with: pip install tokenizers')
self.bpe = ByteLevelBPETokenizer(cfg.bpe_vocab, cfg.bpe_merges, add_prefix_space=cfg.bpe_add_prefix_space)
def encode(self, x: str) -> str:
return ' '.join(map(str, self.bpe.encode(x).ids))
def decode(self, x: str) -> str:
return self.bpe.decode([(int(tok) if (tok not in {'<unk>', '<mask>'}) else tok) for tok in x.split()])
def is_beginning_of_word(self, x: str) -> bool:
return self.decode(x).startswith(' ') |
class AstViewer(ida_graph.GraphViewer):
def __init__(self, title: str, ast: TritonAst):
ida_graph.GraphViewer.__init__(self, title)
self._ast = ast
def OnRefresh(self) -> bool:
self.Clear()
self.draw()
return True
def OnGetText(self, ida_node_id: int) -> str:
return self[ida_node_id]
def Show(self) -> bool:
if (not ida_graph.GraphViewer.Show(self)):
return False
return True
def draw(self) -> None:
n_id = self.AddNode(self._ast.symbol)
worklist = [(n_id, self._ast)]
while worklist:
(node_id, node) = worklist.pop(0)
for c in node.get_children():
child_id = self.AddNode(c.symbol)
self.AddEdge(node_id, child_id)
worklist.append((child_id, c)) |
class QuestionSetQuestionSetValidator(InstanceValidator):
def __call__(self, data, serializer=None):
super().__call__(data, serializer)
questionsets = data.get('questionsets')
if (not questionsets):
return
if ((not self.serializer) and (not self.instance)):
return
if self.serializer:
view = self.serializer.context.get('view')
if (view and (view.action == 'copy')):
obj = view.get_object()
for questionset in questionsets:
if (obj in [questionset, *questionset.descendants]):
self.raise_validation_error({'questionset': [_('A question set may not be cloned to be a child of itself or one of its descendants.')]})
if (not self.instance):
return
if (not self.instance):
return
for questionset in questionsets:
if (self.instance in [questionset, *questionset.descendants]):
self.raise_validation_error({'questionsets': [_('A question set may not be a child of itself or one of its descendants.')]}) |
class TestRandomAccessIntReader(_TestRandomAccessReaders, unittest.TestCase, IntExampleMixin):
def checkRead(self, reader):
self.assertEqual(1, reader['one'])
self.assertEqual(3, reader['three'])
self.assertEqual(2, reader['two'])
with self.assertRaises(KeyError):
reader['four'] |
class SDR_LIKE_Loss(Unfolding_Loss):
def __init__(self, window_length, hop_length, **kwargs):
super().__init__(window_length, hop_length)
def criterion(self, target_signal_hat, target_signal):
s_target = ((((target_signal_hat * target_signal).sum((- 1), keepdims=True) + 1e-08) / ((target_signal ** 2).sum(axis=(- 1), keepdims=True) + 1e-08)) * target_signal)
distortion = (target_signal_hat - s_target)
loss = (((distortion ** 2).sum((- 1)) + 1e-08) - ((s_target ** 2).sum((- 1)) + 1e-08))
return loss.sum((- 2)).mean() |
def main():
model = Net_binary().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, (args.epochs + 1)):
train(args, model, device, train_loader, optimizer, epoch)
print('')
eval_train(model, device, train_loader)
eval_test(model, device, test_loader)
eval_adv_test(model, device, test_loader)
print('') |
class InvertedDoublePendulumEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'inverted_double_pendulum.xml', 5)
utils.EzPickle.__init__(self)
def _step(self, action):
self.do_simulation(action, self.frame_skip)
ob = self._get_obs()
(x, _, y) = self.model.data.site_xpos[0]
dist_penalty = ((0.01 * (x ** 2)) + ((y - 2) ** 2))
(v1, v2) = self.model.data.qvel[1:3]
vel_penalty = ((0.001 * (v1 ** 2)) + (0.005 * (v2 ** 2)))
alive_bonus = 10
r = ((alive_bonus - dist_penalty) - vel_penalty)[0]
done = bool((y <= 1))
return (ob, r, done, {})
def _get_obs(self):
return np.concatenate([self.model.data.qpos[:1], np.sin(self.model.data.qpos[1:]), np.cos(self.model.data.qpos[1:]), np.clip(self.model.data.qvel, (- 10), 10), np.clip(self.model.data.qfrc_constraint, (- 10), 10)]).ravel()
def reset_model(self):
self.set_state((self.init_qpos + self.np_random.uniform(low=(- 0.1), high=0.1, size=self.model.nq)), (self.init_qvel + (self.np_random.randn(self.model.nv) * 0.1)))
return self._get_obs()
def viewer_setup(self):
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = (v.model.stat.extent * 0.5)
v.cam.lookat[2] += 3 |
def efa_to_devicemounts(num_devices: int) -> List[DeviceMount]:
device_mounts = []
for device_index in range(0, num_devices):
device_mounts.append(DeviceMount(src_path=('/dev/infiniband/uverbs' + str(device_index)), dst_path=('/dev/infiniband/uverbs' + str(device_index))))
return device_mounts |
class InvalidRangeIndexChecker(BaseChecker):
name = 'invalid_range_index'
msgs = {'E9993': ('You should not use invalid range index on line %s', 'invalid-range-index', 'Used when you use invalid index range')}
_required_for_messages('invalid-range-index')
def visit_call(self, node: nodes.Call) -> None:
if isinstance(node.func, nodes.Name):
name = node.func.name
if ((not ((name in node.frame()) or (name in node.root()))) and (name == 'range')):
args = node.args
if any((any(arg.nodes_of_class(nodes.Name)) for arg in args)):
return
inferred_params = [utils.safe_infer(arg) for arg in args]
if (not all((isinstance(node, nodes.Const) for node in inferred_params))):
return
eval_params = [const.value for const in inferred_params]
if ((len(args) == 0) or (len(args) > 3) or (not all([isinstance(c, int) for c in eval_params]))):
self.add_message('invalid-range-index', node=node, args=str(node.lineno))
return
start = (eval_params[0] if (len(args) > 1) else 0)
stop = (eval_params[0] if (len(args) == 1) else eval_params[1])
step = (eval_params[2] if (len(args) == 3) else 1)
if (not is_valid_range(start, stop, step)):
self.add_message('invalid-range-index', node=node, args=str(node.lineno)) |
(context_settings=dict(ignore_unknown_options=True), cls=cli_tools.DocumentedCommand, section=doc.UNSECTIONED)
('pip_args', nargs=(- 1), type=click.UNPROCESSED)
_context
def pip(ctx, pip_args):
cli_args = ([sys.executable, '-m', 'pip'] + list(pip_args))
ctx.exit(subprocess.run(cli_args, check=False).returncode) |
def main():
data_path = './Data/GasPrice.csv'
P = 12
step = 1
(X_train, Y_train, X_test, Y_test, data_df_combined_clean) = load_data(data_path, P=P, step=step)
print(X_train.shape)
print(Y_train.shape)
model = Wavelet_LSTM(P, 32, 1)
model = model.double()
train(model, X_train, Y_train, epochs=20)
test(model, X_test, Y_test, data_df_combined_clean) |
def test_purview(s):
mechanisms = powerset(s.node_indices)
purviews = powerset(s.node_indices)
for (mechanism, purview) in zip(mechanisms, purviews):
repertoire = s.cause_repertoire(mechanism, purview)
assert (distribution.purview(repertoire) == purview)
assert (distribution.purview(None) is None) |
class MaskedBasicblock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, args=None):
super(MaskedBasicblock, self).__init__()
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.downsample = downsample
self.mb1 = MaskBlock(inplanes, planes, args=args)
self.mb2 = MaskBlock(planes, planes, args=args)
def forward(self, x):
(x, _mask_list, _lasso_loss, _mask_before_list, _avg_fea_list) = x
residual = x
(mask1, _lasso1, mask1_before) = self.mb1(x)
_mask_list.append(mask1)
_lasso_loss.append(_lasso1)
_mask_before_list.append(mask1_before)
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = F.relu(basicblock, inplace=True)
_avg_fea_list.append(F.adaptive_avg_pool2d(basicblock, 1))
basicblock = (basicblock * mask1.unsqueeze((- 1)).unsqueeze((- 1)))
(mask2, _lasso2, mask2_before) = self.mb2(basicblock)
_mask_list.append(mask2)
_lasso_loss.append(_lasso2)
_mask_before_list.append(mask2_before)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
_avg_fea_list.append(F.adaptive_avg_pool2d(basicblock, 1))
basicblock = (basicblock * mask2.unsqueeze((- 1)).unsqueeze((- 1)))
if (self.downsample is not None):
residual = self.downsample(x)
return [F.relu((residual + basicblock), inplace=True), _mask_list, _lasso_loss, _mask_before_list, _avg_fea_list] |
def _generate_reference(source: Path, destination: Path, ext: str):
nav_items: Dict[(str, List[str])] = {'Code Reference': []}
for (module_name, aliases) in _parse_package(source):
for alias in aliases:
_write_ref_content((destination / f'{module_name}.{ext}'), module_name, alias.name)
if (ext == 'md'):
nav_items['Code Reference'].append(f'reference/{module_name}.md')
return nav_items |
def test_find_extra_reqs(tmp_path: Path) -> None:
installed_not_imported_required_package = pytest
installed_imported_required_package = pip
fake_requirements_file = (tmp_path / 'requirements.txt')
fake_requirements_file.write_text(textwrap.dedent(f''' not_installed_package_12345==1
{installed_imported_required_package.__name__}
{installed_not_imported_required_package.__name__}
'''))
source_dir = (tmp_path / 'source')
source_dir.mkdir()
source_file = (source_dir / 'source.py')
source_file.write_text(textwrap.dedent(f''' import pprint
import {installed_imported_required_package.__name__}
'''))
result = find_extra_reqs.find_extra_reqs(requirements_filename=fake_requirements_file, paths=[source_dir], ignore_files_function=common.ignorer(ignore_cfg=[]), ignore_modules_function=common.ignorer(ignore_cfg=[]), ignore_requirements_function=common.ignorer(ignore_cfg=[]), skip_incompatible=False)
expected_result = ['not-installed-package-12345', installed_not_imported_required_package.__name__]
assert (sorted(result) == sorted(expected_result)) |
class TestComposite(TestNameCheckVisitorBase):
_passes()
def test_assignment(self):
class Capybara(object):
def __init__(self, x):
self.x = x
def eat(self):
assert_is_value(self.x, MultiValuedValue([AnyValue(AnySource.unannotated), KnownValue(1)]))
self.x = 1
assert_is_value(self.x, KnownValue(1))
self = Capybara(2)
assert_is_value(self.x, MultiValuedValue([AnyValue(AnySource.unannotated), KnownValue(1)]))
_passes()
def test_conditional_attribute_assign(self):
class Capybara(object):
def __init__(self, x):
self.x = int(x)
def eat(self, cond, val):
if cond:
self.x = int(val)
x = self.x
assert_is_value(x, TypedValue(int))
_passes()
def test_attribute_to_never(self):
from typing import Union
class TypedValue():
typ: Union[(type, str)]
def get_generic_args_for_type(self) -> object:
if isinstance(self.typ, super):
return self.typ.__self_class__
else:
assert False
_passes()
def test_constraint(self):
class Capybara(object):
def __init__(self, x):
self.x = x
def eat(self, val):
self.x = val
if isinstance(self.x, int):
assert_is_value(self.x, TypedValue(int))
def eat_no_assign(self):
if isinstance(self.x, int):
assert_is_value(self.x, TypedValue(int))
_passes()
def test_subscript(self):
from typing import Any, Dict
def capybara(x: Dict[(str, Any)], y) -> None:
assert_is_value(x['a'], AnyValue(AnySource.explicit))
x['a'] = 1
assert_is_value(x['a'], KnownValue(1))
if isinstance(x['c'], int):
assert_is_value(x['c'], TypedValue(int))
if (x['b'] is None):
assert_is_value(x['b'], KnownValue(None))
_passes()
def test_unhashable_subscript(self):
def capybara(df):
df[['a', 'b']] = 42
print(df[['a', 'b']]) |
class ModelConfigs(BaseModelConfigs):
def __init__(self):
super().__init__()
self.model_path = os.path.join('Models/04_sentence_recognition', datetime.strftime(datetime.now(), '%Y%m%d%H%M'))
self.vocab = ''
self.height = 96
self.width = 1408
self.max_text_length = 0
self.batch_size = 32
self.learning_rate = 0.0005
self.train_epochs = 1000
self.train_workers = 20 |
.supported(only_if=(lambda backend: backend.x448_supported()), skip_message='Requires OpenSSL with X448 support')
def test_public_key_equality(backend):
key_bytes = load_vectors_from_file(os.path.join('asymmetric', 'X448', 'x448-pkcs8.der'), (lambda derfile: derfile.read()), mode='rb')
key1 = serialization.load_der_private_key(key_bytes, None).public_key()
key2 = serialization.load_der_private_key(key_bytes, None).public_key()
key3 = X448PrivateKey.generate().public_key()
assert (key1 == key2)
assert (key1 != key3)
assert (key1 != object())
with pytest.raises(TypeError):
(key1 < key2) |
def test_include_parser():
text = '\n//======//\n// X86 Instruction Format Definitions.\n//\n\ninclude "X86InstrFormats.td"\n\ninclude "X86InstrExtension.td"\n\ninclude "llvm/Target/Target.td"\n\n//======//\n// Pattern fragments.\n//\n\n// X86 specific condition code. These correspond to CondCode in\n// X86InstrInfo.h. They must be kept in synch.\ndef X86_COND_A : PatLeaf<(i8 0)>; // alt. COND_NBE\ndef X86_COND_AE : PatLeaf<(i8 1)>; // alt. COND_NC\ndef X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C\n'
includes = list(TableGenParser.parse_includes(text.split('\n')))
assert (includes == ['X86InstrFormats.td', 'X86InstrExtension.td', 'llvm/Target/Target.td']) |
def SaveGameObjects(gameObjects, data, project):
for gameObject in gameObjects:
attrs = {'name': gameObject.name, 'tag': gameObject.tag.tag, 'enabled': gameObject.enabled, 'transform': ObjectInfo.SkipConv(project.GetUuid(gameObject.transform))}
data.append(ObjectInfo('GameObject', project.GetUuid(gameObject), attrs))
for gameObject in gameObjects:
gameObjectID = project.GetUuid(gameObject)
for component in gameObject.components:
attrs = {'gameObject': ObjectInfo.SkipConv(gameObjectID), 'enabled': gameObject.enabled}
for k in component._saved.keys():
v = getattr(component, k)
if isinstance(v, SavesProjectID):
if ((v not in project._ids) and isinstance(v, Asset)):
project.ImportAsset(v, gameObject)
v = ObjectInfo.SkipConv(project.GetUuid(v))
elif hasattr(v, '_wrapper'):
if isinstance(getattr(v, '_wrapper'), SavableStruct):
wrapper = getattr(v, '_wrapper')
struct = {}
for key in wrapper.attrs:
if hasattr(v, key):
item = getattr(v, key)
if isinstance(item, SavesProjectID):
if ((item not in project._ids) and isinstance(item, Asset)):
project.ImportAsset(item, gameObject)
struct[key] = project.GetUuid(item)
else:
struct[key] = ObjectInfo.convString(item)
sep = '\n '
v = ObjectInfo.SkipConv((sep + sep.join((': '.join(x) for x in struct.items()))))
if ((v is not None) and (not isinstance(v, savable))):
continue
attrs[k] = v
if isinstance(component, Behaviour):
behaviour = component.__class__
if (behaviour not in project._ids):
filename = (Path('Scripts') / (behaviour.__name__ + '.py'))
os.makedirs((project.path / 'Scripts'), exist_ok=True)
with open((project.path / filename), 'w+') as f:
f.write((GetImports(inspect.getsourcefile(behaviour)) + inspect.getsource(behaviour)))
uuid = project.GetUuid(behaviour)
file = File(filename, uuid)
project.ImportFile(file, write=False)
attrs['_script'] = ObjectInfo.SkipConv(project._ids[behaviour])
name = (behaviour.__name__ + '(Behaviour)')
else:
name = (component.__class__.__name__ + '(Component)')
data.append(ObjectInfo(name, project.GetUuid(component), attrs)) |
(hookwrapper=True, trylast=True)
def pytest_runtest_call(item):
with timeout_for_setup_and_call(item):
outcome = (yield)
did_fail = (isinstance(outcome._excinfo, tuple) and isinstance(outcome._excinfo[1], BaseException))
is_xdist = ('PYTEST_XDIST_WORKER' in os.environ)
is_flaky_test = (item.get_closest_marker('flaky') is not None)
should_print = (did_fail and (item.config.option.verbose > 0) and is_flaky_test and (not is_xdist))
if should_print:
capmanager = item.config.pluginmanager.getplugin('capturemanager')
with capmanager.global_and_fixture_disabled():
item.config.pluginmanager.get_plugin('terminalreporter')._tw.write('FLAKY ', yellow=True) |
class DockLockDetailsTab(BaseConnectionDetailsTab):
def tab_title(self) -> str:
return 'Door Locks'
def should_appear_for(cls, configuration: BaseConfiguration, all_patches: dict[(int, GamePatches)], players: PlayersConfiguration) -> bool:
return configuration.dock_rando.is_enabled()
def _fill_per_region_connections(self, per_region: dict[(str, dict[(str, (str | dict[(str, str)]))])], region_list: RegionList, patches: GamePatches):
for (source, weakness) in patches.all_dock_weaknesses():
(source_region, source_area) = region_list.region_and_area_by_area_identifier(source.identifier.area_identifier)
if (source_area.name not in per_region[source_region.name]):
per_region[source_region.name][source_area.name] = {}
per_region[source_region.name][source_area.name][source.name] = weakness.long_name |
def build_request(endian):
fc = open('genrequest.c', 'w')
fc.write(C_HEADER)
reqlist = list(request.major_codes.items())
reqlist.sort(key=(lambda x: x[0]))
genfuncs = []
req_args = {}
reply_args = {}
for (code, req) in reqlist:
name = req.__name__
creqname = name
cdefs = request_defs.get(name)
if (cdefs is None):
cdefs = mini_request_defs.get(name)
creqname = ''
if (cdefs is None):
cdefs = resource_request_defs.get(name)
creqname = 'Resource'
creqname = ('x%sReq' % creqname)
if (cdefs is None):
sys.stderr.write(('missing def for request: %s\n' % name))
else:
vardefs = request_var_defs.get(name, [()])
if (type(vardefs) is not list):
vardefs = [vardefs]
i = 0
for v in vardefs:
if (i > 0):
uname = (name + str(i))
else:
uname = name
try:
req_args[uname] = gen_func(fc, ('genrequest_' + uname), creqname, ('REQUEST ' + uname), req._request, cdefs, v)
except:
sys.stderr.write(('Error in %s request\n' % uname))
raise
genfuncs.append(('genrequest_' + uname))
i = (i + 1)
if issubclass(req, rq.ReplyRequest):
cdefs = reply_defs.get(name)
if (cdefs is None):
sys.stderr.write(('missing def for reply: %s\n' % name))
else:
vardefs = reply_var_defs.get(name, ())
if (type(vardefs) is not list):
vardefs = [vardefs]
i = 0
for v in vardefs:
if (i > 0):
uname = (name + str(i))
else:
uname = name
try:
reply_args[uname] = gen_func(fc, ('genreply_' + uname), ('x%sReply' % name), ('REPLY ' + uname), req._reply, cdefs, v)
except:
sys.stderr.write(('Error in %s reply\n' % uname))
raise
genfuncs.append(('genreply_' + uname))
i = (i + 1)
fc.write('\n\n int main(void)\n {\n ')
for gf in genfuncs:
fc.write((' %s();\n' % gf))
fc.write('\n return 0;\n }\n ')
fc.close()
os.system('gcc -Wall -g genrequest.c -o genrequest')
req_bins = {}
reply_bins = {}
pc = os.popen('./genrequest', 'r')
for line in pc.readlines():
parts = line.strip().split()
if (parts[0] == 'REQUEST'):
req_bins[parts[1]] = parts[2]
elif (parts[0] == 'REPLY'):
reply_bins[parts[1]] = parts[2]
fpy = open(('../test_requests_%s.py' % endian), 'w')
os.chmod(('../test_requests_%s.py' % endian), 493)
if (endian == 'be'):
e = 'BigEndian'
v = 1
else:
e = 'LittleEndian'
v = 0
fpy.write((PY_HEADER % {'endname': e, 'endvalue': v}))
for (code, req) in reqlist:
name = req.__name__
fpy.write(('\n\nclass Test%s(EndianTest):\n' % name))
fpy.write(' def setUp(self):\n')
i = 0
reqs = (- 1)
replies = (- 1)
while 1:
if (i > 0):
uname = (name + str(i))
else:
uname = name
reqbin = req_bins.get(uname)
replybin = reply_bins.get(uname)
if ((reqbin is None) and (replybin is None)):
break
if reqbin:
reqs = i
fpy.write((' self.req_args_%d = %s\n' % (i, build_args(req_args[uname]))))
fpy.write((' self.req_bin_%d = %s\n\n' % (i, build_bin(reqbin))))
if replybin:
replies = i
fpy.write((' self.reply_args_%d = %s\n' % (i, build_args(reply_args[uname]))))
fpy.write((' self.reply_bin_%d = %s\n\n' % (i, build_bin(replybin))))
i = (i + 1)
for i in range(0, (reqs + 1)):
fpy.write(('\n def testPackRequest%(n)d(self):\n bin = request.%(req)s._request.to_binary(*(), **self.req_args_%(n)d)\n self.assertBinaryEqual(bin, self.req_bin_%(n)d)\n\n def testUnpackRequest%(n)d(self):\n args, remain = request.%(req)s._request.parse_binary(self.req_bin_%(n)d, dummy_display, 1)\n self.assertBinaryEmpty(remain)\n self.assertEqual(args, self.req_args_%(n)d)\n' % {'req': req.__name__, 'n': i}))
for i in range(0, (replies + 1)):
fpy.write(('\n def testPackReply%(n)d(self):\n bin = request.%(req)s._reply.to_binary(*(), **self.reply_args_%(n)d)\n self.assertBinaryEqual(bin, self.reply_bin_%(n)d)\n\n def testUnpackReply%(n)d(self):\n args, remain = request.%(req)s._reply.parse_binary(self.reply_bin_%(n)d, dummy_display, 1)\n self.assertBinaryEmpty(remain)\n self.assertEqual(args, self.reply_args_%(n)d)\n' % {'req': req.__name__, 'n': i}))
fpy.write('\n\nif __name__ == "__main__":\n unittest.main()\n') |
def send_contract_view(ModelAdmin, request, pk):
contract = get_object_or_404(ModelAdmin.get_queryset(request), pk=pk)
if ((request.method.upper() == 'POST') and (request.POST.get('confirm') == 'yes')):
use_case = use_cases.SendContractUseCase.build()
try:
use_case.execute(contract, request=request)
ModelAdmin.message_user(request, 'Contract was sent!', messages.SUCCESS)
except InvalidStatusException:
status = contract.get_status_display().title()
ModelAdmin.message_user(request, f"Contract with status {status} can't be sent.", messages.ERROR)
redirect_url = reverse('admin:sponsors_contract_change', args=[contract.pk])
return redirect(redirect_url)
context = {'contract': contract}
return render(request, 'sponsors/admin/send_contract.html', context=context) |
class Speech2Text2Processor(ProcessorMixin):
feature_extractor_class = 'AutoFeatureExtractor'
tokenizer_class = 'Speech2Text2Tokenizer'
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
def __call__(self, *args, **kwargs):
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
if ('raw_speech' in kwargs):
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
audio = kwargs.pop('raw_speech')
else:
audio = kwargs.pop('audio', None)
text = kwargs.pop('text', None)
if (len(args) > 0):
audio = args[0]
args = args[1:]
if ((audio is None) and (text is None)):
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if (audio is not None):
inputs = self.feature_extractor(audio, *args, **kwargs)
if (text is not None):
encodings = self.tokenizer(text, **kwargs)
if (text is None):
return inputs
elif (audio is None):
return encodings
else:
inputs['labels'] = encodings['input_ids']
return inputs
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def as_target_processor(self):
warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.')
self._in_target_context_manager = True
self.current_processor = self.tokenizer
(yield)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False |
def compute_dense_reward(self, action, obs) -> float:
distance_weight = 1.0
goal_weight = 1.0
action_weight = 0.01
grip_to_handle_dist = np.linalg.norm((self.robot.ee_position - self.obj1.position))
handle_to_goal_dist = np.linalg.norm((self.obj1.position - self.goal_position))
action_regularization = np.linalg.norm(action)
reward = ((((- distance_weight) * grip_to_handle_dist) - (goal_weight * handle_to_goal_dist)) - (action_weight * action_regularization))
if (handle_to_goal_dist < 0.05):
reward += 1.0
if ((handle_to_goal_dist < 0.05) and (self.robot.gripper_openness > 0)):
reward -= 1.0
return reward |
def test_send_chat_failed(settings, requests_mock):
settings.PLAIN_API = '
requests_mock.post(settings.PLAIN_API, json={'data': {'upsertCustomTimelineEntry': {'result': 'NOOP', 'timelineEntry': None, 'error': {'message': 'There was a validation error.', 'type': 'VALIDATION', 'code': 'input_validation', 'fields': [{'field': 'customerId', 'message': 'ID does not match expected format', 'type': 'VALIDATION'}]}}}})
with pytest.raises(PlainError, match='There was a validation error. customerId: ID does not match expected format'):
_send_chat('c_ABC25904A1DA4E0AF2', title='wtf', message='hello world') |
def arg_options():
short = 'hi:o:amv:'
long = ['ifile=', 'ofile=', 'crepe=', 'crepe_step_size=', 'whisper=', 'whisper_align_model=', 'whisper_batch_size=', 'whisper_compute_type=', 'language=', 'plot=', 'midi=', 'hyphenation=', 'disable_separation=', 'disable_karaoke=', 'create_audio_chunks=', 'force_cpu=', 'force_whisper_cpu=', 'force_crepe_cpu=', 'format_version=']
return (long, short) |
def validate_keys(dict_, expected, funcname):
expected = set(expected)
received = set(dict_)
missing = (expected - received)
if missing:
raise ValueError('Missing keys in {}:\nExpected Keys: {}\nReceived Keys: {}'.format(funcname, sorted(expected), sorted(received)))
unexpected = (received - expected)
if unexpected:
raise ValueError('Unexpected keys in {}:\nExpected Keys: {}\nReceived Keys: {}'.format(funcname, sorted(expected), sorted(received))) |
_torch
class DecisionTransformerModelIntegrationTest(unittest.TestCase):
def test_autoregressive_prediction(self):
NUM_STEPS = 2
TARGET_RETURN = 10
model = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert')
model = model.to(torch_device)
config = model.config
torch.manual_seed(0)
state = torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32)
expected_outputs = torch.tensor([[0.2384, (- 0.2955), 0.8741], [0.6765, (- 0.0793), (- 0.1298)]], device=torch_device)
returns_to_go = torch.tensor(TARGET_RETURN, device=torch_device, dtype=torch.float32).reshape(1, 1, 1)
states = state
actions = torch.zeros(1, 0, config.act_dim, device=torch_device, dtype=torch.float32)
rewards = torch.zeros(1, 0, device=torch_device, dtype=torch.float32)
timesteps = torch.tensor(0, device=torch_device, dtype=torch.long).reshape(1, 1)
for step in range(NUM_STEPS):
actions = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=torch_device)], dim=1)
rewards = torch.cat([rewards, torch.zeros(1, 1, device=torch_device)], dim=1)
attention_mask = torch.ones(1, states.shape[1]).to(dtype=torch.long, device=states.device)
with torch.no_grad():
(_, action_pred, _) = model(states=states, actions=actions, rewards=rewards, returns_to_go=returns_to_go, timesteps=timesteps, attention_mask=attention_mask, return_dict=False)
self.assertEqual(action_pred.shape, actions.shape)
self.assertTrue(torch.allclose(action_pred[(0, (- 1))], expected_outputs[step], atol=0.0001))
(state, reward, _, _) = (torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32), 1.0, False, {})
actions[(- 1)] = action_pred[(0, (- 1))]
states = torch.cat([states, state], dim=1)
pred_return = (returns_to_go[(0, (- 1))] - reward)
returns_to_go = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1)], dim=1)
timesteps = torch.cat([timesteps, (torch.ones((1, 1), device=torch_device, dtype=torch.long) * (step + 1))], dim=1) |
class SingleStageNetwork(nn.Module):
def __init__(self, has_skip=False, gen_skip=False, gen_cross_conv=False, unit_channels=256, num_units=4, num_blocks=[2, 2, 2, 2], norm_cfg=dict(type='BN'), in_channels=64):
norm_cfg = cp.deepcopy(norm_cfg)
num_blocks = cp.deepcopy(num_blocks)
super().__init__()
assert (len(num_blocks) == num_units)
self.has_skip = has_skip
self.gen_skip = gen_skip
self.gen_cross_conv = gen_cross_conv
self.num_units = num_units
self.unit_channels = unit_channels
self.num_blocks = num_blocks
self.norm_cfg = norm_cfg
self.downsample = DownsampleModule(Bottleneck, num_blocks, num_units, has_skip, norm_cfg, in_channels)
self.upsample = UpsampleModule(unit_channels, num_units, gen_skip, gen_cross_conv, norm_cfg, in_channels)
def forward(self, x, skip1, skip2):
mid = self.downsample(x, skip1, skip2)
(out, skip1, skip2, cross_conv) = self.upsample(mid)
return (out, skip1, skip2, cross_conv) |
def restart_and_wait_for_server(nursery: Nursery, port_generator: Iterator[Port], node: RunningNode, retry_timeout: int) -> Optional[RunningNode]:
node.process.send_signal(signal.SIGINT)
exit_code = node.process.result.get()
if (exit_code != 0):
raise Exception(f'Node did not shut down cleanly {node!r}')
return start_and_wait_for_server(nursery, port_generator, node.config, retry_timeout) |
class Server(ServerModule):
def __init__(self, args):
super(Server, self).__init__(args, Client)
self.c2s_sum = []
self.c2s_sig = []
self.c2s_psi = []
self.s2c_sum = []
self.s2c_sig = []
self.s2c_psi = []
self.s2c_hlp = []
self.restored_clients = {}
self.rid_to_cid = {}
self.cid_to_vectors = {}
self.cid_to_weights = {}
self.curr_round = (- 1)
(mu, std, lower, upper) = (125, 125, 0, 255)
self.rgauss = self.loader.scale(truncnorm(((lower - mu) / std), ((upper - mu) / std), loc=mu, scale=std).rvs((1, 32, 32, 3)))
def build_network(self):
self.global_model = self.net.build_resnet9(decomposed=True)
self.sig = self.net.get_sigma()
self.psi = self.net.get_psi()
self.trainables = [sig for sig in self.sig]
num_connected = int(round((self.args.num_clients * self.args.frac_clients)))
self.restored_clients = {i: self.net.build_resnet9(decomposed=False) for i in range(num_connected)}
for (rid, rm) in self.restored_clients.items():
rm.trainable = False
def _train_clients(self):
sigma = [s.numpy() for s in self.sig]
psi = [p.numpy() for p in self.psi]
while (len(self.connected_ids) > 0):
for (gpu_id, gpu_client) in self.clients.items():
cid = self.connected_ids.pop(0)
helpers = self.get_similar_models(cid)
with tf.device('/device:GPU:{}'.format(gpu_id)):
thrd = threading.Thread(target=self.invoke_client, args=(gpu_client, cid, self.curr_round, sigma, psi, helpers))
self.threads.append(thrd)
thrd.start()
if (len(self.connected_ids) == 0):
break
for thrd in self.threads:
thrd.join()
self.threads = []
self.client_similarity(self.updates)
self.set_weights(self.aggregate(self.updates))
self.train.evaluate_after_aggr()
self.avg_c2s()
self.avg_s2c()
self.logger.save_current_state('server', {'c2s': {'sum': self.c2s_sum, 'sig': self.c2s_sig, 'psi': self.c2s_psi}, 's2c': {'sum': self.s2c_sum, 'sig': self.s2c_sig, 'psi': self.s2c_psi, 'hlp': self.s2c_hlp}, 'scores': self.train.get_scores()})
self.updates = []
def invoke_client(self, client, cid, curr_round, sigma, psi, helpers):
update = client.train_one_round(cid, curr_round, sigma=sigma, psi=psi, helpers=helpers)
self.updates.append(update)
def client_similarity(self, updates):
self.restore_clients(updates)
for (rid, rmodel) in self.restored_clients.items():
cid = self.rid_to_cid[rid]
self.cid_to_vectors[cid] = np.squeeze(rmodel(self.rgauss))
self.vid_to_cid = list(self.cid_to_vectors.keys())
self.vectors = list(self.cid_to_vectors.values())
self.tree = spatial.KDTree(self.vectors)
def restore_clients(self, updates):
rid = 0
self.rid_to_cid = {}
for (cwgts, csize, cid, _, _) in updates:
self.cid_to_weights[cid] = cwgts
rwgts = self.restored_clients[rid].get_weights()
if (self.args.scenario == 'labels-at-client'):
half = (len(cwgts) // 2)
for lid in range(len(rwgts)):
rwgts[lid] = (cwgts[lid] + cwgts[(lid + half)])
elif (self.args.scenario == 'labels-at-server'):
for lid in range(len(rwgts)):
rwgts[lid] = (self.sig[lid].numpy() + cwgts[lid])
self.restored_clients[rid].set_weights(rwgts)
self.rid_to_cid[rid] = cid
rid += 1
def get_similar_models(self, cid):
if ((cid in self.cid_to_vectors) and (((self.curr_round + 1) % self.args.h_interval) == 0)):
cout = self.cid_to_vectors[cid]
sims = self.tree.query(cout, (self.args.num_helpers + 1))
hids = []
weights = []
for vid in sims[1]:
selected_cid = self.vid_to_cid[vid]
if (selected_cid == cid):
continue
w = self.cid_to_weights[selected_cid]
if (self.args.scenario == 'labels-at-client'):
half = (len(w) // 2)
w = w[half:]
weights.append(w)
hids.append(selected_cid)
return weights[:self.args.num_helpers]
else:
return None
def set_weights(self, new_weights):
if (self.args.scenario == 'labels-at-client'):
half = (len(new_weights) // 2)
for (i, nwghts) in enumerate(new_weights):
if (i < half):
self.sig[i].assign(new_weights[i])
else:
self.psi[(i - half)].assign(new_weights[i])
elif (self.args.scenario == 'labels-at-server'):
for (i, nwghts) in enumerate(new_weights):
self.psi[i].assign(new_weights[i])
def avg_c2s(self):
ratio_list = []
sig_list = []
psi_list = []
for upd in self.updates:
c2s = upd[3]
ratio_list.append(c2s['ratio'][(- 1)])
sig_list.append(c2s['sig_ratio'][(- 1)])
psi_list.append(c2s['psi_ratio'][(- 1)])
try:
self.c2s_sum.append(np.mean(ratio_list, axis=0))
self.c2s_sig.append(np.mean(sig_list, axis=0))
self.c2s_psi.append(np.mean(psi_list, axis=0))
except:
pdb.set_trace()
def avg_s2c(self):
sum_list = []
sig_list = []
psi_list = []
hlp_list = []
for upd in self.updates:
s2c = upd[4]
sum_list.append(s2c['ratio'][(- 1)])
sig_list.append(s2c['sig_ratio'][(- 1)])
psi_list.append(s2c['psi_ratio'][(- 1)])
hlp_list.append(s2c['hlp_ratio'][(- 1)])
self.s2c_sum.append(np.mean(sum_list, axis=0))
self.s2c_sig.append(np.mean(sig_list, axis=0))
self.s2c_psi.append(np.mean(psi_list, axis=0))
self.s2c_hlp.append(np.mean(hlp_list, axis=0)) |
.parametrize('inline_views', (False, True))
def test_deterministics(inline_views):
with pm.Model() as m:
x = pm.Normal('x')
mu = pm.Deterministic('mu', pm.math.abs(x))
sigma = pm.math.exp(x)
pm.Deterministic('sigma', sigma)
y = pm.Normal('y', mu, sigma)
y_ = pm.Deterministic('y_', y)
y__ = pm.Deterministic('y__', y_)
z = pm.Normal('z', y__)
assert (m['y'].owner.inputs[3] is m['mu'])
assert (m['y'].owner.inputs[4] is not m['sigma'])
(fg, _) = fgraph_from_model(m, inlined_views=inline_views)
(x, y, z, det_mu, det_sigma, det_y_, det_y__) = fg.outputs
mu = det_mu.owner.inputs[0]
sigma = det_sigma.owner.inputs[0]
assert (y.owner.inputs[0].owner.inputs[4] is sigma)
assert (det_y_ is not det_y__)
assert (det_y_.owner.inputs[0] is y)
if (not inline_views):
assert (y.owner.inputs[0].owner.inputs[3] is mu)
assert (z.owner.inputs[0].owner.inputs[3] is y)
assert (det_y__.owner.inputs[0] is y)
else:
assert (y.owner.inputs[0].owner.inputs[3] is det_mu)
assert (z.owner.inputs[0].owner.inputs[3] is det_y__)
assert (det_y__.owner.inputs[0] is det_y_)
m = model_from_fgraph(fg)
assert (m['y'].owner.inputs[3] is m['mu'])
assert (m['y'].owner.inputs[4] is m['sigma'])
assert (m['z'].owner.inputs[3] is m['y'])
assert (m['y_'].owner.inputs[0] is m['y'])
assert (m['y__'].owner.inputs[0] is m['y']) |
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, deconv=None, delinear=None, channel_deconv=None):
super(PreActResNet, self).__init__()
self.in_planes = 64
if (deconv is None):
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
else:
self.conv1 = deconv(3, 64, kernel_size=3, stride=1, padding=1, bias=True, freeze=True, n_iter=10)
if channel_deconv:
self.deconv1 = channel_deconv()
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, deconv=deconv)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, deconv=deconv)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, deconv=deconv)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, deconv=deconv)
if delinear:
self.linear = delinear((512 * block.expansion), num_classes)
else:
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride, deconv):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, deconv))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
if hasattr(self, 'deconv1'):
out = self.deconv1(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def test_raiden_defaults(cli_runner, tmp_path):
datadir = (tmp_path / '.raiden')
datadir.mkdir(parents=True, exist_ok=True)
config_file = (datadir / 'config.toml')
config_file.touch()
expected_defaults = {'datadir': str(datadir), 'config_file': str(config_file), 'chain_id': 1, 'environment_type': Environment.PRODUCTION, 'accept_disclaimer': False, 'blockchain_query_interval': 5.0, 'default_reveal_timeout': 50, 'default_settle_timeout': 500, 'sync_check': True, 'gas_price': faster_gas_price_strategy, 'eth_rpc_endpoint': ' 'routing_mode': RoutingMode.PFS, 'pathfinding_service_address': 'auto', 'pathfinding_max_paths': 3, 'pathfinding_max_fee': , 'pathfinding_iou_timeout': 200000, 'enable_monitoring': False, 'matrix_server': 'auto', 'log_config': {'': 'INFO'}, 'log_json': False, 'debug_logfile': True, 'rpc': True, 'rpccorsdomain': ' 'api_address': '127.0.0.1:5001', 'web_ui': True, 'switch_tracing': False, 'unrecoverable_error_should_crash': False, 'log_memory_usage_interval': 0.0, 'cap_mediation_fees': True, 'console': False}
cli_command = 'raiden'
expected_invoke_kwargs = {arg_name: (ParameterSource.DEFAULT, arg_value) for (arg_name, arg_value) in expected_defaults.items()}
(_, kwargs) = get_invoked_kwargs(cli_command, cli_runner, 'raiden.ui.cli._run')
assert_invoked_kwargs(kwargs, expected_invoke_kwargs) |
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--num_init', type=int, help='(int) number of initial points', default=10)
parser.add_argument('--num_total', type=int, default=1000000)
parser.add_argument('--data_loc', type=str, default='../../datasets/malaria_df.hdf5')
parser.add_argument('--sketch_size', type=int, default=512)
parser.add_argument('--cholesky_size', type=int, default=901)
parser.add_argument('--output', type=str, default='./malaria_output.pt')
parser.add_argument('--exact', action='store_true')
parser.add_argument('--toeplitz', action='store_true')
parser.add_argument('--reset_training_data', action='store_true')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--model', type=str, choices=['exact', 'wiski'])
parser.add_argument('--num_steps', type=int, default=5)
parser.add_argument('--random', action='store_true')
parser.add_argument('--seed', type=int, default=0)
return parser.parse_args() |
class ExpiredCopyrightLicense(PublicDomainLicense):
def __init__(self, author_death_year: int):
self.author_death_year = author_death_year
def __repr__(self) -> str:
years_since_author_death = str((datetime.now().year - self.author_death_year))
return f'This image is in the public domain in countries and areas, where the copyright term is at most the authors life plus {years_since_author_death} years. Check your local laws before proceeding.' |
def parse_specifier_for_install(package_spec: str, pip_args: List[str]) -> Tuple[(str, List[str])]:
parsed_package = _parse_specifier(package_spec)
package_or_url = _parsed_package_to_package_or_url(parsed_package, remove_version_specifiers=False)
if (('--editable' in pip_args) and (not parsed_package.valid_local_path)):
logger.warning(pipx_wrap(f'''
{hazard} Ignoring --editable install option. pipx disallows it
for anything but a local path, to avoid having to create a new
src/ directory.
''', subsequent_indent=(' ' * 4)))
pip_args.remove('--editable')
return (package_or_url, pip_args) |
def approximate_normalized_graph_laplacian(A, rank, which='LA'):
n = A.shape[0]
(L, d_rt) = csgraph.laplacian(A, normed=True, return_diag=True)
X = (sparse.identity(n) - L)
logger.info('Eigen decomposition...')
(evals, evecs) = sparse.linalg.eigsh(X, rank, which=which)
logger.info('Maximum eigenvalue %f, minimum eigenvalue %f', np.max(evals), np.min(evals))
logger.info('Computing D^{-1/2}U..')
D_rt_inv = sparse.diags((d_rt ** (- 1)))
D_rt_invU = D_rt_inv.dot(evecs)
return (evals, D_rt_invU) |
class SharedMemoryRingBuffer():
def __init__(self, shm_manager: SharedMemoryManager, array_specs: List[ArraySpec], get_max_k: int, get_time_budget: float, put_desired_frequency: float, safety_margin: float=1.5):
counter = SharedAtomicCounter(shm_manager)
buffer_size = (int(np.ceil(((put_desired_frequency * get_time_budget) * safety_margin))) + get_max_k)
shared_arrays = dict()
for spec in array_specs:
key = spec.name
assert (key not in shared_arrays)
array = SharedNDArray.create_from_shape(mem_mgr=shm_manager, shape=((buffer_size,) + tuple(spec.shape)), dtype=spec.dtype)
shared_arrays[key] = array
timestamp_array = SharedNDArray.create_from_shape(mem_mgr=shm_manager, shape=(buffer_size,), dtype=np.float64)
timestamp_array.get()[:] = (- np.inf)
self.buffer_size = buffer_size
self.array_specs = array_specs
self.counter = counter
self.shared_arrays = shared_arrays
self.timestamp_array = timestamp_array
self.get_time_budget = get_time_budget
self.get_max_k = get_max_k
self.put_desired_frequency = put_desired_frequency
def count(self):
return self.counter.load()
def create_from_examples(cls, shm_manager: SharedMemoryManager, examples: Dict[(str, Union[(np.ndarray, numbers.Number)])], get_max_k: int=32, get_time_budget: float=0.01, put_desired_frequency: float=60):
specs = list()
for (key, value) in examples.items():
shape = None
dtype = None
if isinstance(value, np.ndarray):
shape = value.shape
dtype = value.dtype
assert (dtype != np.dtype('O'))
elif isinstance(value, numbers.Number):
shape = tuple()
dtype = np.dtype(type(value))
else:
raise TypeError(f'Unsupported type {type(value)}')
spec = ArraySpec(name=key, shape=shape, dtype=dtype)
specs.append(spec)
obj = cls(shm_manager=shm_manager, array_specs=specs, get_max_k=get_max_k, get_time_budget=get_time_budget, put_desired_frequency=put_desired_frequency)
return obj
def clear(self):
self.counter.store(0)
def put(self, data: Dict[(str, Union[(np.ndarray, numbers.Number)])], wait: bool=True):
count = self.counter.load()
next_idx = (count % self.buffer_size)
timestamp_lookahead_idx = (((next_idx + self.get_max_k) - 1) % self.buffer_size)
old_timestamp = self.timestamp_array.get()[timestamp_lookahead_idx]
t = time.monotonic()
if ((t - old_timestamp) < self.get_time_budget):
deltat = (t - old_timestamp)
if wait:
time.sleep((self.get_time_budget - deltat))
else:
past_iters = (self.buffer_size - self.get_max_k)
hz = (past_iters / deltat)
raise TimeoutError('Put executed too fast {}items/{:.4f}s ~= {}Hz'.format(past_iters, deltat, hz))
for (key, value) in data.items():
arr: np.ndarray
arr = self.shared_arrays[key].get()
if isinstance(value, np.ndarray):
arr[next_idx] = value
else:
arr[next_idx] = np.array(value, dtype=arr.dtype)
self.timestamp_array.get()[next_idx] = time.monotonic()
self.counter.add(1)
def _allocate_empty(self, k=None):
result = dict()
for spec in self.array_specs:
shape = spec.shape
if (k is not None):
shape = ((k,) + shape)
result[spec.name] = np.empty(shape=shape, dtype=spec.dtype)
return result
def get(self, out=None) -> Dict[(str, np.ndarray)]:
if (out is None):
out = self._allocate_empty()
start_time = time.monotonic()
count = self.counter.load()
curr_idx = ((count - 1) % self.buffer_size)
for (key, value) in self.shared_arrays.items():
arr = value.get()
np.copyto(out[key], arr[curr_idx])
end_time = time.monotonic()
dt = (end_time - start_time)
if (dt > self.get_time_budget):
raise TimeoutError(f'Get time out {dt} vs {self.get_time_budget}')
return out
def get_last_k(self, k: int, out=None) -> Dict[(str, np.ndarray)]:
assert (k <= self.get_max_k)
if (out is None):
out = self._allocate_empty(k)
start_time = time.monotonic()
count = self.counter.load()
assert (k <= count)
curr_idx = ((count - 1) % self.buffer_size)
for (key, value) in self.shared_arrays.items():
arr = value.get()
target = out[key]
end = (curr_idx + 1)
start = max(0, (end - k))
target_end = k
target_start = (target_end - (end - start))
target[target_start:target_end] = arr[start:end]
remainder = (k - (end - start))
if (remainder > 0):
end = self.buffer_size
start = (end - remainder)
target_start = 0
target_end = (end - start)
target[target_start:target_end] = arr[start:end]
end_time = time.monotonic()
dt = (end_time - start_time)
if (dt > self.get_time_budget):
raise TimeoutError(f'Get time out {dt} vs {self.get_time_budget}')
return out
def get_all(self) -> Dict[(str, np.ndarray)]:
k = min(self.count, self.get_max_k)
return self.get_last_k(k=k) |
def main():
parser = argparse.ArgumentParser(description='PyTorch Siamese network Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N', help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR', help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M', help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training')
parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False, help='For Saving the current Model')
args = parser.parse_args()
use_cuda = ((not args.no_cuda) and torch.cuda.is_available())
use_mps = ((not args.no_mps) and torch.backends.mps.is_available())
torch.manual_seed(args.seed)
if use_cuda:
device = torch.device('cuda')
elif use_mps:
device = torch.device('mps')
else:
device = torch.device('cpu')
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1, 'pin_memory': True, 'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
train_dataset = APP_MATCHER('../data', train=True, download=True)
test_dataset = APP_MATCHER('../data', train=False)
train_loader = torch.utils.data.DataLoader(train_dataset, **train_kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, **test_kwargs)
model = SiameseNetwork().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, (args.epochs + 1)):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), 'siamese_network.pt') |
.parametrize('dt', [None, 0, 0.1])
def test_composition_override(dt):
(A, B, C, D) = ([[1, 1], [0, 1]], [[0], [1]], [[1, 0]], 0)
sys1 = ct.ss(A, B, C, D, None, inputs='u1', outputs='y1')
sys2 = ct.ss(A, B, C, D, None, inputs='y1', outputs='y2')
sys3 = ct.interconnect([sys1, sys2], inputs='u1', outputs='y2', dt=dt)
assert (sys3.dt == dt)
sys1 = ct.StateSpace(A, B, C, D, 0.1, inputs='u1', outputs='y1')
if ((dt != 0.1) and (dt is not None)):
with pytest.raises(ValueError, match='incompatible timebases'):
sys3 = ct.interconnect([sys1, sys2], inputs='u1', outputs='y2', dt=dt)
sys1 = ct.StateSpace(A, B, C, D, 0, inputs='u1', outputs='y1')
if ((dt != 0) and (dt is not None)):
with pytest.raises(ValueError, match='incompatible timebases'):
sys3 = ct.interconnect([sys1, sys2], inputs='u1', outputs='y2', dt=dt) |
def test_restart_hook_and_state(manager_nospawn, request, backend, backend_name):
if (backend_name == 'wayland'):
pytest.skip('Skipping test on Wayland.')
manager = manager_nospawn
inject = textwrap.dedent('\n from libqtile.core.lifecycle import lifecycle\n\n def no_op(*args, **kwargs):\n pass\n\n self.lifecycle = lifecycle\n self._do_stop = self._stop\n self._stop = no_op\n ')
def inc_restart_call():
manager.restart_calls.value += 1
manager.restart_calls = Value('i', 0)
hook.subscribe.restart(inc_restart_call)
manager.start(TwoScreenConfig)
assert (manager.restart_calls.value == 0)
manager.c.group['c'].toscreen(0)
manager.c.group['d'].toscreen(1)
manager.test_window('one')
manager.test_window('two')
wins = {w['name']: w['id'] for w in manager.c.windows()}
manager.c.window[wins['one']].togroup('c')
manager.c.window[wins['two']].togroup('d')
manager.c.eval(inject)
manager.c.restart()
assert (manager.restart_calls.value == 1)
(_, state_file) = manager.c.eval('self.lifecycle.state_file')
assert state_file
original_state = f'{state_file}-original'
shutil.copy(state_file, original_state)
manager.c.eval('self._do_stop()')
with pytest.raises((IPCError, ConnectionResetError)):
assert manager.c.status()
with BareManager(backend, request.config.getoption('--debuglog')) as restarted_manager:
restarted_manager.start(TwoScreenConfig, state=state_file)
screen0_info = restarted_manager.c.screen[0].group.info()
assert (screen0_info['name'] == 'c')
assert (screen0_info['screen'] == 0)
screen1_info = restarted_manager.c.screen[1].group.info()
assert (screen1_info['name'] == 'd')
assert (screen1_info['screen'] == 1)
assert (len(restarted_manager.c.windows()) == 2)
name_to_group = {w['name']: w['group'] for w in restarted_manager.c.windows()}
assert (name_to_group['one'] == 'c')
assert (name_to_group['two'] == 'd')
restarted_manager.c.eval(inject)
restarted_manager.c.restart()
(_, restarted_state) = restarted_manager.c.eval('self.lifecycle.state_file')
assert restarted_state
restarted_manager.c.eval('self._do_stop()')
with open(original_state, 'rb') as f:
original = pickle.load(f)
with open(restarted_state, 'rb') as f:
restarted = pickle.load(f)
assert (original.groups == restarted.groups)
assert (original.screens == restarted.screens)
assert (original.current_screen == restarted.current_screen)
assert (original.scratchpads == restarted.scratchpads) |
def extension_file(module, canary):
if (ENABLE_SUPPORT_DETECTION and (not hasattr(GSSAPI_LIB, canary))):
print(('Skipping the %s extension because it is not supported by your GSSAPI implementation...' % module))
return
try:
ENUM_EXTS.append(make_extension('gssapi.raw._enum_extensions.ext_%s', module, include_dirs=['gssapi/raw/']))
except OSError:
pass
return make_extension('gssapi.raw.ext_%s', module) |
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ((':' + str(len(str(len(iterable))))) + 'd')
log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}']
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (len(iterable) - 1))):
eta_seconds = (iter_time.global_avg * (len(iterable) - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
logger.info(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)))
else:
logger.info(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable)))) |
class FuncItem(FuncBase):
__slots__ = ('arguments', 'arg_names', 'arg_kinds', 'min_args', 'max_pos', 'body', 'is_overload', 'is_generator', 'is_coroutine', 'is_async_generator', 'is_awaitable_coroutine', 'expanded')
__deletable__ = ('arguments', 'max_pos', 'min_args')
def __init__(self, arguments: (list[Argument] | None)=None, body: (Block | None)=None, typ: (mypy.types.FunctionLike | None)=None) -> None:
super().__init__()
self.arguments = (arguments or [])
self.arg_names = [(None if arg.pos_only else arg.variable.name) for arg in self.arguments]
self.arg_kinds: list[ArgKind] = [arg.kind for arg in self.arguments]
self.max_pos: int = (self.arg_kinds.count(ARG_POS) + self.arg_kinds.count(ARG_OPT))
self.body: Block = (body or Block([]))
self.type = typ
self.unanalyzed_type = typ
self.is_overload: bool = False
self.is_generator: bool = False
self.is_coroutine: bool = False
self.is_async_generator: bool = False
self.is_awaitable_coroutine: bool = False
self.expanded: list[FuncItem] = []
self.min_args = 0
for i in range(len(self.arguments)):
if ((self.arguments[i] is None) and (i < self.max_fixed_argc())):
self.min_args = (i + 1)
def max_fixed_argc(self) -> int:
return self.max_pos
def is_dynamic(self) -> bool:
return (self.type is None) |
def test_make_reservation(test_session, room_display):
room = test_session.query(Room).first()
new_reservation = Reservation.make(room=room, date_in=datetime.datetime(2023, 4, 1), date_out=datetime.datetime(2023, 4, 2), guest=Guest(mobile='+82-10-1111-2222', name='Guido'))
test_session.add(new_reservation)
test_session.commit()
assert test_session.query(Reservation).filter((Reservation.room == room)).first() |
def test_custom_locale_selector():
app = flask.Flask(__name__)
b = babel.Babel(app)
d = datetime(2010, 4, 12, 13, 46)
the_timezone = 'UTC'
the_locale = 'en_US'
def select_locale():
return the_locale
def select_timezone():
return the_timezone
get_babel(app).locale_selector = select_locale
get_babel(app).timezone_selector = select_timezone
with app.test_request_context():
assert (babel.format_datetime(d) == 'Apr 12, 2010, 1:46:00\u202fPM')
the_locale = 'de_DE'
the_timezone = 'Europe/Vienna'
with app.test_request_context():
assert (babel.format_datetime(d) == '12.04.2010, 15:46:00') |
def general_ict_model_provider(only_query_model=False, only_block_model=False):
args = get_args()
assert (args.ict_head_size is not None), 'Need to specify --ict-head-size to provide an ICTBertModel'
assert (args.model_parallel_size == 1), 'Model parallel size > 1 not supported for ICT'
print_rank_0('building ICTBertModel...')
model = ICTBertModel(ict_head_size=args.ict_head_size, num_tokentypes=2, parallel_output=True, only_query_model=only_query_model, only_block_model=only_block_model)
return model |
.parametrize('template', ['{}', 'attachment; filename="{}"', 'inline; {}', 'attachment; {}="foo"', "attachment; filename*=iso-8859-1''{}", 'attachment; filename*={}'])
(strategies.text(alphabet=[chr(x) for x in range(255)]))
def test_parse_content_disposition_hypothesis(caplog, template, stubs, s):
header = template.format(s)
reply = stubs.FakeNetworkReply(headers={'Content-Disposition': header})
with caplog.at_level(logging.ERROR, 'network'): |
def main():
args = get_args()
try:
run(args)
except:
logger.error('Failed to resolve overlaps', exc_info=True)
raise SystemExit(1)
finally:
try:
for f in [args.segments, args.ctm_in, args.ctm_out]:
if (f is not None):
f.close()
except IOError:
logger.error('Could not close some files. Disk error or broken pipes?')
raise
except UnboundLocalError:
raise SystemExit(1) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.