code
stringlengths
101
5.91M
class CWRUFFT(object): num_classes = 10 inputchannel = 1 def __init__(self, data_dir, normlizetype): self.data_dir = data_dir self.normlizetype = normlizetype def data_preprare(self, test=False): list_data = get_files(self.data_dir, test) if test: test_dataset = dataset(list_data=list_data, test=True, transform=None) return test_dataset else: data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]}) (train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label']) train_dataset = dataset(list_data=train_pd, transform=data_transforms('train', self.normlizetype)) val_dataset = dataset(list_data=val_pd, transform=data_transforms('val', self.normlizetype)) return (train_dataset, val_dataset)
def get_remote_list(dir_in): args = (('hdfs dfs -ls ' + dir_in) + " | awk '{print $8}'") (s_output, _) = process(args) all_dart_dirs = s_output.split() names = [] for filename in all_dart_dirs: name_list = filename.split('/') names.append(name_list[(- 1)]) return names
def parse_sexpr(stream): content = [] buffer = '' instr = False while True: c = stream.read(1) assert (c != ''), 'unexpected end of file' if instr: if (c == '"'): instr = False else: buffer += c elif (c == '('): content.append(parse_sexpr(stream)) elif (c == ')'): if (buffer != ''): content.append(buffer) return content elif c.isspace(): if (buffer != ''): content.append(buffer) buffer = '' elif (c == '"'): instr = True else: buffer += c
class BlendLossBuilder(torch.nn.Module): def __init__(self, opt): super(BlendLossBuilder, self).__init__() self.opt = opt self.parsed_loss = [[1.0, 'face'], [1.0, 'hair']] if (opt.device == 'cuda'): use_gpu = True else: use_gpu = False self.face_percept = masked_lpips.PerceptualLoss(model='net-lin', net='vgg', vgg_blocks=['1', '2', '3'], use_gpu=use_gpu) self.face_percept.eval() self.hair_percept = masked_lpips.PerceptualLoss(model='net-lin', net='vgg', vgg_blocks=['1', '2', '3'], use_gpu=use_gpu) self.hair_percept.eval() def _loss_face_percept(self, gen_im, ref_im, mask, **kwargs): return self.face_percept(gen_im, ref_im, mask=mask) def _loss_hair_percept(self, gen_im, ref_im, mask, **kwargs): return self.hair_percept(gen_im, ref_im, mask=mask) def forward(self, gen_im, im_1, im_3, mask_face, mask_hair): loss = 0 loss_fun_dict = {'face': self._loss_face_percept, 'hair': self._loss_hair_percept} losses = {} for (weight, loss_type) in self.parsed_loss: if (loss_type == 'face'): var_dict = {'gen_im': gen_im, 'ref_im': im_1, 'mask': mask_face} elif (loss_type == 'hair'): var_dict = {'gen_im': gen_im, 'ref_im': im_3, 'mask': mask_hair} tmp_loss = loss_fun_dict[loss_type](**var_dict) losses[loss_type] = tmp_loss loss += (weight * tmp_loss) return (loss, losses)
class CustomTensorDataset(Dataset): def __init__(self, *tensors, transform=None): assert all(((tensors[0].size(0) == tensor.size(0)) for tensor in tensors)) self.tensors = tensors self.transform = transform def __getitem__(self, index): from PIL import Image (X, y) = self.tensors (X_i, y_i) = (X[index], y[index]) if self.transform: X_i = self.transform(X_i) X_i = torch.from_numpy(np.array(X_i, copy=False)) X_i = X_i.permute(2, 0, 1) return (X_i, y_i) def __len__(self): return self.tensors[0].size(0)
def resnet1001_cifar(**kwargs): model = ResNet_Cifar(Bottleneck, [111, 111, 111], **kwargs) return model
class LazyDropout(nn.Module): def __init__(self): super().__init__() self.mask = None def sample_mask(self, x, dropout): mask = x.data.new(x.shape).bernoulli_((1 - dropout)) self.mask = (Variable(mask, requires_grad=False) / (1 - dropout)) def forward(self, x): if (not self.training): return x bsz = x.size(0) mask = self.mask[:bsz].expand_as(x) return (mask * x)
def double_conv(in_channels, out_channels): return nn.Sequential(conv(in_channels, out_channels, 3, padding=1), nn.BatchNorm2d((out_channels * factor)), act(), conv(out_channels, out_channels, 3, padding=1), nn.BatchNorm2d((out_channels * factor)), act())
class CallCache(object): def __init__(self): self.callqueue_ = [] self.tensors_ = dict() self.skip = False def update_tensors(self, other): self.tensors_.update(other.tensors_) def update_calls(self, other): self.callqueue_ += other.callqueue_ def append_call(self, caller, arguments, type): self.callqueue_.append((type, caller, arguments)) def add_tensor(self, node, tensor): self.tensors_[node] = tensor def get_tensor(self, n): if isinstance(n, list): return [self.tensors_.get(one_n, None) for one_n in n] else: return self.tensors_.get(n, None) def calls(self): return self.callqueue_ def create(): cache = CallCache() return cache def update(arguments, current, ctype=CALLTYPE.LAYER_CALL): def _update_cache_from_input(cache, inp): if isinstance(inp, AutoObject): invalidInputError((inp._callgraph is not None), 'inp._callgraph cannot be none') input_callgraph = inp._callgraph if (not input_callgraph.skip): cache.update_tensors(input_callgraph) cache.update_calls(input_callgraph) input_callgraph.skip = True elif (isinstance(inp, list) or isinstance(inp, tuple)): for item in inp: _update_cache_from_input(cache, item) elif isinstance(inp, dict): for (_, item) in inp.items(): _update_cache_from_input(cache, item) else: pass cur_cache = CallCache.create() if ((ctype == CALLTYPE.LAYER_CALL) or (ctype == CALLTYPE.FUNC_CALL)): _update_cache_from_input(cur_cache, arguments) cur_cache.append_call(current, arguments, ctype) elif (ctype == CALLTYPE.FUNC_SLICE): (source, slice_args) = arguments _update_cache_from_input(cur_cache, source) cur_cache.append_call(current, arguments, CALLTYPE.FUNC_SLICE) else: invalidInputError(False, ('Unexpected CallType: %s' % ctype)) return cur_cache def execute(inputs, outputs, trial, backend): def _replace_autoobj(n, cache): if isinstance(n, AutoObject): new_n = cache.get_tensor(n) else: new_n = n return new_n def _process_arguments(arguments, cache): if isinstance(arguments, list): new_arguments = [_process_arguments(arg, cache) for arg in arguments] elif isinstance(arguments, tuple): lst = [_process_arguments(arg, cache) for arg in arguments] new_arguments = tuple(lst) elif isinstance(arguments, dict): new_arguments = arguments.copy() for (name, arg) in new_arguments.items(): new_arg = _process_arguments(arg, cache) new_arguments[name] = new_arg else: new_arguments = _replace_autoobj(arguments, cache) return new_arguments out_cache = outputs._callgraph for (call_type, caller, arguments) in out_cache.calls: if (call_type == CALLTYPE.LAYER_CALL): new_arguments = _process_arguments(arguments, out_cache) invalidInputError(isinstance(caller, AutoObject), 'caller should be AutoObject') instance = backend.instantiate(trial, caller) out_tensor = instance(new_arguments) elif (call_type == CALLTYPE.FUNC_SLICE): (source, slice_args) = arguments (slice_args, slice_kwargs) = slice_args source_tensor = out_cache.get_tensor(source) out_tensor = source_tensor.__getitem__(*slice_args, **slice_kwargs) elif (call_type == CALLTYPE.FUNC_CALL): new_arguments = _process_arguments(arguments, out_cache) invalidInputError(isinstance(caller, AutoObject), 'caller should be AutoObject') (caller.args, caller.kwargs) = new_arguments out_tensor = backend.instantiate(trial, caller) else: invalidInputError(False, ('Unexpected CallType: %s' % type)) out_cache.add_tensor(caller, out_tensor) out_tensors = out_cache.get_tensor(outputs) if isinstance(inputs, list): in_tensors = [out_cache.get_tensor(inp) for inp in inputs] else: in_tensors = out_cache.get_tensor(inputs) return (in_tensors, out_tensors) def plot(self, save_path=None): print('dumping call cachestart') print('dumpping call queue') for call in self.callqueue_: print(call) print('dumpping tensors') print(self.tensors_) print('dumping call cacheend')
class FieldEntrySelector(EntrySelector): _SPEC_DELIM = ',' _TYPE_DELIM = ':' _RANGE_DELIM = '-' _EQUAL = '=' _ERROR_PREFIX = 'Invalid field selector specifier' class _FieldEntryValuePredicate(object): def __init__(self, name: str, typespec: str, value: str): import builtins self.name = name self.type = (getattr(builtins, typespec) if (typespec is not None) else str) self.value = value def __call__(self, entry): return (entry[self.name] == self.type(self.value)) class _FieldEntryRangePredicate(object): def __init__(self, name: str, typespec: str, vmin: str, vmax: str): import builtins self.name = name self.type = (getattr(builtins, typespec) if (typespec is not None) else str) self.vmin = vmin self.vmax = vmax def __call__(self, entry): return ((entry[self.name] >= self.type(self.vmin)) and (entry[self.name] <= self.type(self.vmax))) def __init__(self, spec: str): self._predicates = self._parse_specifier_into_predicates(spec) def __call__(self, entry: Dict[(str, Any)]): for predicate in self._predicates: if (not predicate(entry)): return False return True def _parse_specifier_into_predicates(self, spec: str): predicates = [] specs = spec.split(self._SPEC_DELIM) for subspec in specs: eq_idx = subspec.find(self._EQUAL) if (eq_idx > 0): field_name_with_type = subspec[:eq_idx] (field_name, field_type) = self._parse_field_name_type(field_name_with_type) field_value_or_range = subspec[(eq_idx + 1):] if self._is_range_spec(field_value_or_range): (vmin, vmax) = self._get_range_spec(field_value_or_range) predicate = FieldEntrySelector._FieldEntryRangePredicate(field_name, field_type, vmin, vmax) else: predicate = FieldEntrySelector._FieldEntryValuePredicate(field_name, field_type, field_value_or_range) predicates.append(predicate) elif (eq_idx == 0): self._parse_error(f'"{subspec}", field name is empty!') else: self._parse_error(f'"{subspec}", should have format <field>=<value_or_range>!') return predicates def _parse_field_name_type(self, field_name_with_type: str) -> Tuple[(str, Optional[str])]: type_delim_idx = field_name_with_type.find(self._TYPE_DELIM) if (type_delim_idx > 0): field_name = field_name_with_type[:type_delim_idx] field_type = field_name_with_type[(type_delim_idx + 1):] elif (type_delim_idx == 0): self._parse_error(f'"{field_name_with_type}", field name is empty!') else: field_name = field_name_with_type field_type = None return (field_name, field_type) def _is_range_spec(self, field_value_or_range): delim_idx = field_value_or_range.find(self._RANGE_DELIM) return (delim_idx > 0) def _get_range_spec(self, field_value_or_range): if self._is_range_spec(field_value_or_range): delim_idx = field_value_or_range.find(self._RANGE_DELIM) vmin = field_value_or_range[:delim_idx] vmax = field_value_or_range[(delim_idx + 1):] return (vmin, vmax) else: self._parse_error('"field_value_or_range", range of values expected!') def _parse_error(self, msg): raise ValueError(f'{self._ERROR_PREFIX}: {msg}')
def comp_oracle_combination(_filtered_doc_list, _num_edu, _absas_read_str, abs_as_read_list, map_from_new_to_ori_idx, beam_sz=4): pass
class NN_tb3(): def __init__(self): self.distance = 0 self.desired_action = 0 self.psi = 0 self.deg_phi = 0 self.global_goal = PoseStamped() self.goal = PoseStamped() self.sub_goal = Vector3() self.scan = LaserScan() self.sub_pose = rospy.Subscriber('/odom', Odometry, self.cbPose) self.sub_global_goal = rospy.Subscriber('/goal', PoseStamped, self.cbGlobalGoal) self.sub_subgoal = rospy.Subscriber('/plan_manager/subgoal', PoseStamped, self.cbSubGoal) self.sub_subgoal = rospy.Subscriber('/scan', LaserScan, self.cbScan) self.pub_pose_marker = rospy.Publisher('/arena_pose', Marker, queue_size=1) self.pub_twist = rospy.Publisher('/cmd_vel', Twist, queue_size=1) self.nn_timer = rospy.Timer(rospy.Duration(0.01), self.cbComputeActionArena) self.control_timer = rospy.Timer(rospy.Duration(0.1), self.cbControl) def cbScan(self, msg): self.scan = msg def cbGlobalGoal(self, msg): self.global_goal = msg self.goal.pose.position.x = msg.pose.position.x self.goal.pose.position.y = msg.pose.position.y self.goal.header = msg.header print(('new goal: ' + str([self.goal.pose.position.x, self.goal.pose.position.y]))) def cbSubGoal(self, msg): self.sub_goal.x = msg.pose.position.x self.sub_goal.y = msg.pose.position.y def cbPose(self, msg): v_p = msg.pose.pose.position v_g = self.sub_goal v_pg = np.array([(v_g.x - v_p.x), (v_g.y - v_p.y)]) v_pose = np.array([math.cos(self.psi), math.sin(self.psi)]) angle = np.math.atan2(np.linalg.det([v_pose, v_pg]), np.dot(v_pose, v_pg)) self.distance = np.linalg.norm(v_pg) self.deg_phi = math.degrees(angle) q = msg.pose.pose.orientation self.psi = np.arctan2((2.0 * ((q.w * q.z) + (q.x * q.y))), (1 - (2 * ((q.y * q.y) + (q.z * q.z))))) self.pose = msg.pose def goalReached(self): if (self.distance > 0.3): return False else: return True def stop_moving(self): twist = Twist() self.pub_twist.publish(twist) def update_action(self, action): self.desired_action = action def cbControl(self, event): self.performAction(self.desired_action) return def countNan(self, data): n = 0 for i in data: if np.isnan(i): n += 1 else: print(n) return n print(n) return n def cbComputeActionArena(self, event): if (not self.goalReached()): NUM_ACTIONS = 5 num_observations = 362 SEQ_LENGTH = 64 SEQ_LENGTH_MAX = 300 angle = self.deg_phi distance = self.distance sample = np.asanyarray(self.scan.ranges) sample[np.isnan(sample)] = 3.5 sample = sample.tolist() observation = (([distance] + [angle]) + sample) model_name = 'dqn_agent_best_fc_l2.dat' net = fc.FC_DQN(num_observations, NUM_ACTIONS) net.train(False) net.load_state_dict(torch.load(model_name, map_location=torch.device('cpu'))) state_v = torch.FloatTensor([observation]).to('cpu') q_vals_v = net(state_v) (_, act_v) = torch.max(q_vals_v, dim=1) action = int(act_v.item()) self.update_action(action) else: self.stop_moving() return def performAction(self, action): action_space = {0: [0.2, 0], 1: [0.15, 0.75], 2: [0.15, (- 0.75)], 3: [0.0, 1.5], 4: [0.0, (- 1.5)]} twist = Twist() twist.linear.x = action_space[action][0] twist.angular.z = action_space[action][1] print(((('action ' + str(action)) + ': ') + str(action_space[action]))) print(('twist: ' + str([twist.linear.x, twist.angular.z]))) self.pub_twist.publish(twist) def visualize_subgoal(self, subgoal, subgoal_options=None): markers = MarkerArray() marker = Marker() marker.header.stamp = rospy.Time.now() marker.header.frame_id = 'map' marker.ns = 'subgoal' marker.id = 0 marker.type = marker.CYLINDER marker.action = marker.ADD marker.pose.position.x = subgoal[0] marker.pose.position.y = subgoal[1] marker.scale = Vector3(x=0.2, y=0.2, z=0) marker.color = ColorRGBA(r=0.0, g=0.0, b=0.0, a=1.0) marker.lifetime = rospy.Duration(2.0) self.pub_goal_path_marker.publish(marker) if (subgoal_options is not None): for i in xrange(len(subgoal_options)): marker = Marker() marker.header.stamp = rospy.Time.now() marker.header.frame_id = 'map' marker.ns = 'subgoal' marker.id = (i + 1) marker.type = marker.CYLINDER marker.action = marker.ADD marker.pose.position.x = subgoal_options[i][0] marker.pose.position.y = subgoal_options[i][1] marker.scale = Vector3(x=0.2, y=0.2, z=0.2) marker.color = ColorRGBA(r=0.0, g=0.0, b=255, a=1.0) marker.lifetime = rospy.Duration(1.0) self.pub_goal_path_marker.publish(marker) def visualize_pose(self, pos, orientation): marker = Marker() marker.header.stamp = rospy.Time.now() marker.header.frame_id = 'map' marker.ns = 'agent' marker.id = 0 marker.type = marker.CUBE marker.action = marker.ADD marker.pose.position = pos marker.pose.orientation = orientation marker.scale = Vector3(x=0.7, y=0.42, z=1) marker.color = ColorRGBA(r=1.0, g=1.0, a=1.0) marker.lifetime = rospy.Duration(1.0) self.pub_pose_marker.publish(marker) marker = Marker() marker.header.stamp = rospy.Time.now() marker.header.frame_id = 'map' marker.ns = 'agent' marker.id = self.num_poses marker.type = marker.CUBE marker.action = marker.ADD marker.pose.position = pos marker.pose.orientation = orientation marker.scale = Vector3(x=0.2, y=0.2, z=0.2) marker.color = ColorRGBA(r=1.0, a=1.0) marker.lifetime = rospy.Duration(10.0) self.pub_pose_marker.publish(marker) def on_shutdown(self): rospy.loginfo('[%s] Shutting down Node.') self.stop_moving()
class createDmLab(object): def __init__(self, level, config, seed, runfiles_path=None, level_cache=None): self._random_state = np.random.RandomState(seed=seed) if runfiles_path: deepmind_lab.set_runfiles_path(runfiles_path) config = {k: str(v) for (k, v) in config.items()} self._observation_spec = ['RGBD'] self._env = deepmind_lab.Lab(level=level, observations=self._observation_spec, config=config, level_cache=level_cache) def _reset(self): self._env.reset(seed=self._random_state.randint(0, ((2 ** 31) - 1))) def _observation(self): d = self._env.observations() return d['RGBD'] def initial(self): self._reset() d = self._env.observations() return d['RGBD'] def step(self, action): reward = self._env.step(action) done = np.array((not self._env.is_running())) if done: self._reset() observation = np.array(self._observation(), dtype=np.uint8) reward = np.array(reward, dtype=np.float32) return (observation, reward, done) def close(self): self._env.close()
def output_current_round_deadline(selected_clients): t_max = sys.maxsize total_user_count = len(selected_clients) complete_user_counts_per_time = [] max_complete_user_counts_per_time = (- 1) max_complete_user_counts_per_time_idx = (- 1) for i in range(1, t_max): complete_user_count = 0 for c in selected_clients: if (len(c.per_epoch_train_times) > 5): if (((np.mean(c.download_times) + np.mean(c.upload_times)) + np.mean(c.per_epoch_train_times[(- 5):])) <= i): complete_user_count += 1 elif (((np.mean(c.download_times) + np.mean(c.upload_times)) + np.mean(c.per_epoch_train_times)) <= i): complete_user_count += 1 complete_user_counts_per_time.append((complete_user_count / i)) if (max_complete_user_counts_per_time < (complete_user_count / i)): max_complete_user_counts_per_time = (complete_user_count / i) max_complete_user_counts_per_time_idx = i if (complete_user_count == total_user_count): break return max_complete_user_counts_per_time_idx
class Router(): def __init__(self) -> None: self.routes: Dict[(str, RoutingDefinition)] = {'workloads': RealtimeRoutingDefinition(get_workloads_list), 'workloads/delete': RealtimeRoutingDefinition(delete_workload), 'profiling': RealtimeRoutingDefinition(get_profiling_details), 'model/graph': RealtimeRoutingDefinition(get_model_graph), 'model/summary': RealtimeRoutingDefinition(get_model_summary), 'model/graph/highlight_pattern': RealtimeRoutingDefinition(find_pattern_in_graph), 'diagnosis/op_list': RealtimeRoutingDefinition(get_op_list), 'diagnosis/op_details': RealtimeRoutingDefinition(get_op_details), 'diagnosis/histogram': RealtimeRoutingDefinition(get_histogram), 'profiling/result': RealtimeRoutingDefinition(get_profiling_details)} def handle(self, request: Request) -> Response: routing_definition = self.routes.get(request.operation) if (routing_definition is None): raise ServiceNotFoundException(f'Unable to find {request.operation}') data = self._process_routing_definition(routing_definition, request.data) if isinstance(data, WebResponse): return data serialized_data = JsonSerializer.serialize_item(data) return create_simple_response(serialized_data) def _process_routing_definition(self, routing_definition: RoutingDefinition, data: dict) -> Any: if isinstance(routing_definition, RealtimeRoutingDefinition): return routing_definition.callback(data) if isinstance(routing_definition, DeferredRoutingDefinition): self._validate_deffered_routing_data(data) t = Thread(target=routing_definition.callback, args=(data,)) t.daemon = True t.start() return {'exit_code': 102, 'message': 'processing'} raise ValueError(f'Unsupported RoutingDefinition type: {routing_definition.__class__.__name__}') def _validate_deffered_routing_data(data: dict) -> None: request_id = str(data.get('request_id', '')) if (not request_id): raise ClientErrorException('Missing request id.')
class ParallelMap(object): def __init__(self, source, worker, worker_num, bufsize=100, use_process=False, memsize='3G'): self._worker_num = worker_num self._bufsize = bufsize self._use_process = use_process if (self._use_process and (sys.platform == 'win32')): logger.debug('Use multi-thread reader instead of multi-process reader on Windows.') self._use_process = False if (self._use_process and (type(memsize) is str)): assert (memsize[(- 1)].lower() in ['g', 'm']), ("invalid param for memsize[%s], should be ended with 'G' or 'g' or 'M' or 'm'" % memsize) power = (3 if (memsize[(- 1)].lower() == 'g') else 2) self._memsize = (int(memsize[:(- 1)]) * (1024 ** power)) self._started = False self._source = source self._worker = worker self._exit = False self._setup() self._souce_drained = False def __iter__(self): return self def __next__(self): return self.next() def _setup(self): use_process = self._use_process bufsize = self._bufsize if use_process: from .shared_queue import SharedQueue as Queue from multiprocessing import Process as Worker from multiprocessing import Event memsize = self._memsize self._inq = Queue(bufsize, memsize=memsize) self._outq = Queue(bufsize, memsize=memsize) else: if six.PY3: from queue import Queue else: from Queue import Queue from threading import Thread as Worker from threading import Event self._inq = Queue(bufsize) self._outq = Queue(bufsize) consumer_num = self._worker_num id = str(uuid.uuid4())[(- 3):] self._producer = threading.Thread(target=self._produce, args=(('producer-' + id), self._source, self._inq)) self._producer.daemon = True self._consumers = [] self._consumer_endsig = {} global worker_set for i in range(consumer_num): consumer_id = ((('consumer-' + id) + '-') + str(i)) p = Worker(target=self._consume, args=(consumer_id, self._inq, self._outq, self._worker)) self._consumers.append(p) p.daemon = True setattr(p, 'id', consumer_id) if use_process: worker_set.add(p) self._epoch = (- 1) self._feeding_ev = Event() self._produced = 0 self._consumed = 0 def _produce(self, id, source, inq): endsig = EndSignal(id) while True: self._feeding_ev.wait() if self._exit: break try: s = source.next() inq.put(s) self._produced += 1 except StopIteration: self._souce_drained = True self._feeding_ev.clear() self._feeding_ev.wait() except Exception as e: endsig.errno = (- 1) endsig.errmsg = 'producer[{}] failed with error: {}'.format(id, str(e)) inq.put(endsig) break def _consume(self, id, inq, outq, worker): if self._use_process: signal.signal(signal.SIGTERM, (lambda signum, frame: sys.exit())) endsig = EndSignal(id) while True: sample = inq.get() if isinstance(sample, EndSignal): endsig.errno = sample.errno endsig.errmsg = 'consumer[{}] exits for reason[{}]'.format(id, sample.errmsg) outq.put(endsig) break try: result = worker(sample) outq.put(result) except Exception as e: endsig.errno = (- 2) endsig.errmsg = 'consumer[{}] failed to map with error:[{}]'.format(id, str(e)) outq.put(endsig) break def drained(self): assert (self._epoch >= 0), 'first epoch has not started yet' return (self._source.drained() and (self._produced == self._consumed)) def stop(self): self._exit = True self._feeding_ev.set() for _ in range(len(self._consumers)): self._inq.put(EndSignal(0, 'notify consumers to exit')) def _consumer_healthy(self): abnormal_num = 0 for w in self._consumers: if ((not w.is_alive()) and (w.id not in self._consumer_endsig)): abnormal_num += 1 if self._use_process: errmsg = 'consumer[{}] exit abnormally with exitcode[{}]'.format(w.pid, w.exitcode) else: errmsg = 'consumer[{}] exit abnormally'.format(w.ident) logger.warn(errmsg) if (abnormal_num > 0): logger.warn('{} consumers have exited abnormally!!!'.format(abnormal_num)) return (abnormal_num == 0) def next(self): if (self._epoch < 0): self.reset() if self.drained(): raise StopIteration() while (not self._exit): try: sample = self._outq.get(timeout=3) except Empty as e: if (not self._consumer_healthy()): raise StopIteration() else: continue if isinstance(sample, EndSignal): self._consumer_endsig[sample.id] = sample logger.warn('recv endsignal from outq with errmsg[{}]'.format(sample.errmsg)) if (len(self._consumer_endsig.keys()) < len(self._consumers)): self._inq.put(sample) else: self._exit = True raise StopIteration('all consumers exited, no more samples') else: self._consumed += 1 return sample raise StopIteration() def reset(self): assert (not self._exit), 'cannot reset for already stopped dataset' if (self._epoch < 0): self._epoch = 0 for w in self._consumers: w.start() self._producer.start() else: assert self._consumer_healthy(), 'cannot start another pass of data for some consumers exited abnormally before!!!' if (not self.drained()): logger.warn('reset before epoch[{}] finishes'.format(self._epoch)) self._produced = (self._produced - self._consumed) else: self._produced = 0 self._epoch += 1 assert (len(self._consumer_endsig.keys()) == 0), ('some consumers already exited,' + ' cannot start another epoch') self._source.reset() self._souce_drained = False self._consumed = 0 self._feeding_ev.set()
def _sanity_check(js): assert (len(js['evidential']) == len(js['questions']) == len(js['answers'])), js
def plot_quant_rules(qrules): for r in qrules: plot_qrule(r, plt) for i in range(len(x_points)): plt.scatter(x_points[i], y_points[i], marker=appearance[data_class[i]][1], color=appearance[data_class[i]][0], s=60) for (i, n) in enumerate(x): plt.axhline(y=y[i], color='grey', linestyle='dashed') plt.axvline(x=x[i], color='grey', linestyle='dashed') plt.xlabel('Estimated Budget (1000$)') plt.ylabel('A-List Celebrities')
class BatchNormalization(tf.keras.layers.BatchNormalization): def __init__(self, momentum=BN_MOMENTUM, name=None, **kwargs): super(BatchNormalization, self).__init__(momentum=momentum, name=name, **kwargs) def call(self, inputs, training=None): return super().call(inputs=inputs, training=training) def get_config(self): config = super(BatchNormalization, self).get_config() return config
def forward_torch_model_from_h5(model: torch.nn.Module, num_tests_per_file: int, additional_transform_args: Dict, batch_size: int, competition_file: str, device: str, post_transform: Callable[([np.ndarray], Union[(torch.Tensor, torch_geometric.data.Data)])], pre_transform: Callable[([np.ndarray], Union[(torch.Tensor, torch_geometric.data.Data)])]): model = model.to(device) model.eval() assert ((num_tests_per_file % batch_size) == 0), f'num_tests_per_file={num_tests_per_file} must be a multiple of batch_size={batch_size}' num_batches = (num_tests_per_file // batch_size) prediction = np.zeros(shape=(num_tests_per_file, 6, 495, 436, 8), dtype=np.uint8) with torch.no_grad(): for i in range(num_batches): batch_start = (i * batch_size) batch_end: np.ndarray = (batch_start + batch_size) test_data: np.ndarray = load_h5_file(competition_file, sl=slice(batch_start, batch_end), to_torch=False) additional_data = load_h5_file(competition_file.replace('test', 'test_additional'), sl=slice(batch_start, batch_end), to_torch=False) if (pre_transform is not None): test_data: Union[(torch.Tensor, torch_geometric.data.Data)] = pre_transform(test_data, **additional_transform_args) else: test_data = torch.from_numpy(test_data) test_data = test_data.to(dtype=torch.float) test_data = test_data.to(device) additional_data = torch.from_numpy(additional_data) additional_data = additional_data.to(device) batch_prediction = model(test_data, additional_data=additional_data) if (post_transform is not None): batch_prediction = post_transform(batch_prediction, **additional_transform_args) else: batch_prediction = batch_prediction.cpu().detach().numpy() batch_prediction = np.clip(batch_prediction, 0, 255) prediction[batch_start:batch_end] = batch_prediction return prediction
def _convert_shape_to_list(node: Any, fix_dynamic_shape: int, tf_module: Any) -> list: try: _shape = list(tf_module.TensorShape(node.attr['shape'].shape)) if (tf_module.__version__ >= '2.0.0'): shape = [(item if (item is not None) else fix_dynamic_shape) for item in _shape] else: shape = [(item.value if (item.value is not None) else fix_dynamic_shape) for item in _shape] if (isinstance(shape, list) and (len(shape) > 1)): return shape[1:] return shape except ValueError: _shape = [fix_dynamic_shape, fix_dynamic_shape, 3] return _shape
class CombineDataset(Dataset): def __init__(self, *datasets): self.datasets = datasets def __getitem__(self, i): return tuple((d[i] for d in self.datasets)) def __len__(self): return min((len(d) for d in self.datasets))
def deprecated(message=''): def deprecated_decorator(function): (function) def wrapped(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) warnings.warn('{} will be deprecated in future release. {}'.format(function.__name__, message), category=DeprecationWarning) warnings.simplefilter('default', DeprecationWarning) return function(*args, **kwargs) return wrapped return deprecated_decorator
def grad_overflow(param_group): for group in param_group: for p in group: if (p.grad is not None): s = float(p.grad.data.float().sum()) if ((s == float('inf')) or (s == float('-inf')) or (s != s)): return True return False
.parametrize('space', [Discrete(3), Tuple([Discrete(5), Discrete(10)]), Tuple([Discrete(5), Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32)]), Tuple((Discrete(5), Discrete(2), Discrete(2))), MultiDiscrete([2, 2, 100]), MultiBinary(10), Dict({'position': Discrete(5), 'velocity': Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32)})]) def test_roundtripping(space): sample_1 = space.sample() sample_2 = space.sample() assert space.contains(sample_1) assert space.contains(sample_2) json_rep = space.to_jsonable([sample_1, sample_2]) json_roundtripped = json.loads(json.dumps(json_rep)) samples_after_roundtrip = space.from_jsonable(json_roundtripped) (sample_1_prime, sample_2_prime) = samples_after_roundtrip s1 = space.to_jsonable([sample_1]) s1p = space.to_jsonable([sample_1_prime]) s2 = space.to_jsonable([sample_2]) s2p = space.to_jsonable([sample_2_prime]) assert (s1 == s1p), 'Expected {} to equal {}'.format(s1, s1p) assert (s2 == s2p), 'Expected {} to equal {}'.format(s2, s2p)
def test_CBPM_neg_correlated_features(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None: X_neg = ['sepal_width'] trans_X_neg = CBPM(corr_sign='neg', agg_method=np.mean).fit_transform(X_iris[X_neg], y_iris) trans_X_neg_neg = CBPM(corr_sign='neg', agg_method=np.mean).fit_transform(X_iris, y_iris) trans_man = X_iris[X_neg].values.mean(axis=1) assert_array_equal(trans_X_neg, trans_X_neg_neg) assert_array_equal(trans_X_neg, trans_man)
class EdgeAblationType(Enum): TRANSITIVE_REDUCTION = 'transitive-reduction' TRANSITIVE_CLOSURE = 'transitive-closure' ADD_LINEAR_EDGES = 'add-linear-edges' ONLY_LINEAR_EDGES = 'only-linear-edges' NO_EDGES = 'no-edges'
def landmark_ohem(landmark_pred, landmark_target, label): ones = tf.ones_like(label, dtype=tf.float32) zeros = tf.zeros_like(label, dtype=tf.float32) valid_inds = tf.where(tf.equal(label, (- 2)), ones, zeros) square_error = tf.square((landmark_pred - landmark_target)) square_error = tf.reduce_sum(square_error, axis=1) num_valid = tf.reduce_sum(valid_inds) keep_num = tf.cast(num_valid, dtype=tf.int32) square_error = (square_error * valid_inds) (_, k_index) = tf.nn.top_k(square_error, k=keep_num) square_error = tf.gather(square_error, k_index) return tf.reduce_mean(square_error)
def get_coverage(args): if (args.coverage == 'neuron_coverage'): coverage = MyNeuronCoverage(threshold=args.nc_threshold) elif (args.coverage == 'top_k_coverage'): coverage = TopKNeuronCoverage(k=10) elif (args.coverage == 'strong_coverage'): coverage = StrongNeuronActivationCoverage(k=2) else: raise NotImplementedError return coverage
def convert_state_dict(src_dict): dst_dict = {} res_id = 1 map1 = ['conv1.', 'bn1.', ' ', 'conv2.', 'bn2.'] map2 = [[' ', 'conv3.', 'bn3.'], ['shortcut.conv.', 'shortcut.bn.']] for (k, v) in src_dict.items(): toks = k.split('.') if (int(toks[0]) == 0): name = ((('res%d.' % res_id) + 'conv1.') + toks[(- 1)]) elif (int(toks[0]) == 1): name = ((('res%d.' % res_id) + 'bn1.') + toks[(- 1)]) elif ((int(toks[0]) >= 4) and (int(toks[0]) <= 7)): name_res = ('res%d.%d.' % ((int(toks[0]) - 2), int(toks[1]))) if (len(toks) == 7): name = ((name_res + map1[int(toks[(- 2)])]) + toks[(- 1)]) elif (len(toks) == 6): name = ((name_res + map2[int(toks[(- 3)])][int(toks[(- 2)])]) + toks[(- 1)]) else: continue dst_dict[name] = v return dst_dict
class IOSpiking(): boards = [] snips = [] chips = [] lmts = [] def __init__(self): self.board = None def snip(self, chip, lmt): for i in range(len(IOSpiking.snips)): if ((IOSpiking.boards[i] == self.board.id) and (IOSpiking.chips[i] == chip) and (IOSpiking.lmts[i] == lmt)): return IOSpiking.snips[i] snip_process = self.board.createSnip(phase=Phase.EMBEDDED_SPIKING, name='spike_snip_{}_{}'.format(chip, lmt), includeDir=snip_dir, cFilePath=(snip_dir + '/iospiking.c'), funcName='run_spiking', guardName='do_run_spiking', lmtId=lmt, chipId=chip) IOSpiking.boards.append(self.board.id) IOSpiking.snips.append(snip_process) IOSpiking.chips.append(chip) IOSpiking.lmts.append(lmt) return snip_process
class LeakyRectify(object): def __init__(self, leakiness=0.01): self.leakiness = leakiness def __call__(self, x): if self.leakiness: f1 = (0.5 * (1 + self.leakiness)) f2 = (0.5 * (1 - self.leakiness)) return ((f1 * x) + (f2 * abs(x))) else: return rectify(x)
def find_pareto_front(Y, return_index=False): if (len(Y) == 0): return np.array([]) sorted_indices = np.argsort(Y.T[0]) pareto_indices = [] for idx in sorted_indices: if (not np.logical_and((Y <= Y[idx]).all(axis=1), (Y < Y[idx]).any(axis=1)).any()): pareto_indices.append(idx) pareto_front = Y[pareto_indices].copy() if return_index: return (pareto_front, pareto_indices) else: return pareto_front
class AdaptiveInput(nn.Module): def __init__(self, vocab_size: int, padding_idx: int, initial_dim: int, factor: float, output_dim: int, cutoff: List[int]): super().__init__() if (vocab_size > cutoff[(- 1)]): cutoff = (cutoff + [vocab_size]) else: assert (vocab_size == cutoff[(- 1)]), 'cannot specify cutoff larger than vocab size' self.cutoff = cutoff self.embedding_dim = output_dim self.padding_idx = padding_idx self.embeddings = nn.ModuleList() for i in range(len(self.cutoff)): prev = (self.cutoff[(i - 1)] if (i > 0) else 0) size = (self.cutoff[i] - prev) dim = int((initial_dim // (factor ** i))) seq = nn.Sequential(nn.Embedding(size, dim, self.padding_idx), nn.Linear(dim, output_dim, bias=False)) self.embeddings.append(seq) self.padding_idx = None self.padding_idx = padding_idx def init_weights(m): if isinstance(m, nn.Embedding): nn.init.normal_(m.weight, mean=0, std=(m.weight.shape[1] ** (- 0.5))) nn.init.constant_(m.weight[padding_idx], 0) elif hasattr(m, 'weight'): nn.init.xavier_uniform_(m.weight) self.apply(init_weights) self.register_buffer('_float_tensor', torch.FloatTensor(1)) def weights_for_band(self, band: int): return (self.embeddings[band][0].weight, self.embeddings[band][1].weight) def forward(self, input: torch.Tensor): result = self._float_tensor.new((input.shape + (self.embedding_dim,))) for i in range(len(self.cutoff)): mask = input.lt(self.cutoff[i]) if (i > 0): mask.mul_(input.ge(self.cutoff[(i - 1)])) chunk_input = (input[mask] - self.cutoff[(i - 1)]) else: chunk_input = input[mask] if mask.any(): result[mask] = self.embeddings[i](chunk_input) return result
def gen_events(): for template in gen_templates(): for events in expand(template): base = list(events) for i in range(0, (len(base) + 1)): cpy = list(base) cpy.insert(i, comment('comment')) (yield cpy)
def test_add_without_overwrite(data): arbitrary_sol = (data.solution + 1) low_objective = (data.objective - 1.0) add_info = data.archive_with_elite.add_single(arbitrary_sol, low_objective, data.measures) assert (add_info['status'] == AddStatus.NOT_ADDED) assert np.isclose(add_info['value'], (low_objective - data.objective)) assert_archive_elite(data.archive_with_elite, data.solution, data.objective, data.measures, data.grid_indices)
def create_example(text): raw_sentences = sent_tokenize(text) sentences = [word_tokenize(s) for s in raw_sentences] speakers = [['' for _ in sentence] for sentence in sentences] return {'doc_key': 'nw', 'clusters': [], 'sentences': sentences, 'speakers': speakers}
def parse_paths(inputs, postfix=None): postfix = ('' if (postfix is None) else postfix) if (inputs is None): return None input_paths = [] i = 0 while (i < len(inputs)): if os.path.isfile(inputs[i]): ext = os.path.splitext(inputs[i])[1] if (ext == '.txt'): with open(inputs[i], 'r') as f: file_abs_paths = f.read().splitlines() input_paths += file_abs_paths else: input_paths.append(inputs[i]) elif os.path.isdir(inputs[i]): if (((i + 1) < len(inputs)) and (os.path.splitext(inputs[(i + 1)])[1] == '.txt')): file_list_path = (inputs[(i + 1)] if os.path.exists(inputs[(i + 1)]) else os.path.join(inputs[i], inputs[(i + 1)])) assert os.path.isfile(file_list_path), f'List file does not exist: "{inputs[(i + 1)]}"' with open(file_list_path, 'r') as f: file_rel_paths = f.read().splitlines() input_paths += [os.path.join(inputs[i], p) for p in file_rel_paths] i += 1 else: input_paths += glob(os.path.join(inputs[i], ('*' + postfix))) elif any(((c in inputs[i]) for c in ['*'])): input_paths += glob(inputs[i]) i += 1 return input_paths
def GetSegment(fx, x, u, labels, segment_label): nfx = [] nx = [] nu = [] for i in range(len(labels)): if (labels[i] == segment_label): nfx += [fx[i]] nx += [x[i]] nu += [u[i]] return (nfx, nx, nu)
class Handler(SimpleHTTPRequestHandler): def do_GET(self): if (self.path == '/detindex'): self.send_str('\n'.join([p.name[:(- 5)] for p in Path('dets/').glob('*.json')])) elif self.path.startswith('/image'): path = self.translate_path(self.path).split('image') self.send_file(os.path.join(path[0], IMAGE_PATH, (IMAGE_FMT % int(path[1])))) else: super().do_GET() def send_str(self, string): self.send_response(HTTPStatus.OK) self.send_header('Content-type', 'text/plain') self.send_header('Content-Length', str(len(string))) self.send_header('Last-Modified', self.date_time_string()) self.end_headers() self.wfile.write(string.encode()) def send_file(self, path): try: f = open(path, 'rb') except OSError: self.send_error(HTTPStatus.NOT_FOUND, 'File not found') return try: self.send_response(HTTPStatus.OK) self.send_header('Content-type', self.guess_type(path)) fs = os.fstat(f.fileno()) self.send_header('Content-Length', str(fs[6])) self.send_header('Last-Modified', self.date_time_string(fs.st_mtime)) self.end_headers() self.copyfile(f, self.wfile) finally: f.close() def send_response(self, code, message=None): super().send_response(code, message)
def vgg11_bn(pretrained=False, dataset_history=[], dataset2num_classes={}, **kwargs): if pretrained: kwargs['init_weights'] = False return VGG(make_layers(cfg['A'], batch_norm=True), dataset_history, dataset2num_classes, **kwargs)
class PixelActorCritic(nn.Module): def __init__(self, obs_shape, states_shape, actions_shape, initial_std, encoder_cfg, policy_cfg): super(PixelActorCritic, self).__init__() assert (encoder_cfg is not None) emb_dim = encoder_cfg['emb_dim'] self.obs_enc = Encoder(model_name=encoder_cfg['name'], pretrain_dir=encoder_cfg['pretrain_dir'], freeze=encoder_cfg['freeze'], emb_dim=emb_dim) self.state_enc = nn.Linear(states_shape[0], emb_dim) actor_hidden_dim = policy_cfg['pi_hid_sizes'] critic_hidden_dim = policy_cfg['vf_hid_sizes'] activation = nn.SELU() actor_layers = [] actor_layers.append(nn.Linear((emb_dim * 2), actor_hidden_dim[0])) actor_layers.append(activation) for li in range(len(actor_hidden_dim)): if (li == (len(actor_hidden_dim) - 1)): actor_layers.append(nn.Linear(actor_hidden_dim[li], *actions_shape)) else: actor_layers.append(nn.Linear(actor_hidden_dim[li], actor_hidden_dim[(li + 1)])) actor_layers.append(activation) self.actor = nn.Sequential(*actor_layers) critic_layers = [] critic_layers.append(nn.Linear((emb_dim * 2), critic_hidden_dim[0])) critic_layers.append(activation) for li in range(len(critic_hidden_dim)): if (li == (len(critic_hidden_dim) - 1)): critic_layers.append(nn.Linear(critic_hidden_dim[li], 1)) else: critic_layers.append(nn.Linear(critic_hidden_dim[li], critic_hidden_dim[(li + 1)])) critic_layers.append(activation) self.critic = nn.Sequential(*critic_layers) print(self.obs_enc) print(self.state_enc) print(self.actor) print(self.critic) self.log_std = nn.Parameter((np.log(initial_std) * torch.ones(*actions_shape))) actor_weights = ([np.sqrt(2)] * len(actor_hidden_dim)) actor_weights.append(0.01) critic_weights = ([np.sqrt(2)] * len(critic_hidden_dim)) critic_weights.append(1.0) self.init_weights(self.actor, actor_weights) self.init_weights(self.critic, critic_weights) def init_weights(sequential, scales): [torch.nn.init.orthogonal_(module.weight, gain=scales[idx]) for (idx, module) in enumerate((mod for mod in sequential if isinstance(mod, nn.Linear)))] _grad() def act(self, observations, states): (obs_emb, obs_feat) = self.obs_enc(observations) state_emb = self.state_enc(states) joint_emb = torch.cat([obs_emb, state_emb], dim=1) actions_mean = self.actor(joint_emb) covariance = torch.diag((self.log_std.exp() * self.log_std.exp())) distribution = MultivariateNormal(actions_mean, scale_tril=covariance) actions = distribution.sample() actions_log_prob = distribution.log_prob(actions) value = self.critic(joint_emb) return (actions.detach(), actions_log_prob.detach(), value.detach(), actions_mean.detach(), self.log_std.repeat(actions_mean.shape[0], 1).detach(), obs_feat.detach()) _grad() def act_inference(self, observations, states): (obs_emb, _) = self.obs_enc(observations) state_emb = self.state_enc(states) joint_emb = torch.cat([obs_emb, state_emb], dim=1) actions_mean = self.actor(joint_emb) return actions_mean def forward(self, obs_features, states, actions): obs_emb = self.obs_enc.forward_feat(obs_features) state_emb = self.state_enc(states) joint_emb = torch.cat([obs_emb, state_emb], dim=1) actions_mean = self.actor(joint_emb) covariance = torch.diag((self.log_std.exp() * self.log_std.exp())) distribution = MultivariateNormal(actions_mean, scale_tril=covariance) actions_log_prob = distribution.log_prob(actions) entropy = distribution.entropy() value = self.critic(joint_emb) return (actions_log_prob, entropy, value, actions_mean, self.log_std.repeat(actions_mean.shape[0], 1))
class RobertaOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[(str, Mapping[(int, str)])]: if (self.task == 'multiple-choice'): dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
def check_missing_backends(): missing_backends = [] if (not is_torch_available()): missing_backends.append('PyTorch') if (not is_tf_available()): missing_backends.append('TensorFlow') if (not is_flax_available()): missing_backends.append('Flax') if (len(missing_backends) > 0): missing = ', '.join(missing_backends) if (os.getenv('TRANSFORMERS_IS_CI', '').upper() in ENV_VARS_TRUE_VALUES): raise Exception(f'Full repo consistency checks require all backends to be installed (with `pip install -e .[dev]` in the Transformers repo, the following are missing: {missing}.') else: warnings.warn(f"Full repo consistency checks require all backends to be installed (with `pip install -e .[dev]` in the Transformers repo, the following are missing: {missing}. While it's probably fine as long as you didn't make any change in one of those backends modeling files, you should probably execute the command above to be on the safe side.")
def convert_gqa_to_vqa(gqa_dir, out_dir): image_feat_path = os.path.join(gqa_dir, 'images') extract_image_features(image_feat_path, out_dir) questions_dir = os.path.join(gqa_dir, 'questions') if os.path.isfile(os.path.join(questions_dir, 'train_all_questions.json')): print('Using previously generated train_all_questions.json file') else: merge_train(os.path.join(gqa_dir, 'questions', 'train_all_questions')) split_mapping = {'test': 'test_all_questions.json', 'val': 'val_all_questions.json', 'challenge': 'challenge_all_questions.json', 'train': 'train_all_questions.json'} for split in split_mapping: for balance_type in ['balanced', 'all']: filename = split_mapping[split] csplit = split if (balance_type == 'balanced'): filename = filename.replace('_all', '_balanced') csplit = (split + '_balanced') file_path = os.path.join(questions_dir, filename) imdb = get_imdb(file_path) save_path = os.path.join(out_dir, 'imdb_{}.npy'.format(csplit)) np.save(save_path, imdb) splits = ['val', 'train'] split_type = ['balanced', 'all'] global_answer = Counter() global_q = Counter() question_len = Counter() for s in splits: for st in split_type: questions_json = os.path.join(questions_dir, '{}_{}_questions.json'.format(s, st)) questions = json.load(open(questions_json, 'r')) print('Processing split {}_{}'.format(s, st)) answers = Counter() q_tokens = Counter() for (qs, q) in tqdm.tqdm(questions.items()): tokens = tokenize(q['question']) q_tokens.update(tokens) global_q.update(tokens) answers.update([q['answer'].lower()]) global_answer.update([q['answer'].lower()]) question_len.update([len(tokens)]) print('N_unique answers :', len(global_answer)) print('N unique q tokens:', len(global_q)) print('Min Q length', min([x for x in question_len])) print('Max Q length', max([x for x in question_len])) print('Q length distribution', question_len) q_vocabulary = [w[0] for w in global_q.items()] q_vocabulary.sort() q_vocabulary = (['<unk>'] + q_vocabulary) vocab_file = os.path.join(out_dir, 'vocabulary_gqa.txt') with open(vocab_file, 'w') as f: f.writelines([(w + '\n') for w in q_vocabulary]) answer_list = [preprocess_answer(ans[0]) for ans in global_answer.items()] answer_list = [t.strip() for t in answer_list if (len(t.strip()) > 0)] answer_list.sort() if ('<unk>' not in answer_list): answer_list = (['<unk>'] + answer_list) answer_file = os.path.join(out_dir, 'answers_gqa.txt') with open(answer_file, 'w') as fp: fp.writelines([(w + '\n') for w in answer_list])
def intrinsic_dimension_said(module, intrinsic_dimension, output_dir, str_filter, projection, device='cpu'): IntrinsicDimensionLight.apply(module, intrinsic_dimension, output_dir, str_filter, True, projection, device) return module
class Linear(fa_constructor.Linear): def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None: if (layer_config is None): layer_config = {} layer_config['type'] = 'fa' super(Linear, self).__init__(in_features, out_features, bias, layer_config)
def build_detection_test_loader(cfg, dataset_name, mapper=None): _add_category_whitelists_to_metadata(cfg) _add_category_maps_to_metadata(cfg) dataset_dicts = combine_detection_dataset_dicts([dataset_name], keep_instance_predicate=_get_test_keep_instance_predicate(cfg), proposal_files=([cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]] if cfg.MODEL.LOAD_PROPOSALS else None)) dataset = DatasetFromList(dataset_dicts) if (mapper is None): mapper = DatasetMapper(cfg, False) dataset = MapDataset(dataset, mapper) sampler = InferenceSampler(len(dataset)) batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False) data_loader = torch.utils.data.DataLoader(dataset, num_workers=cfg.DATALOADER.NUM_WORKERS, batch_sampler=batch_sampler, collate_fn=trivial_batch_collator) return data_loader
class Normalizer(object): CHECK_SYNC_COUNT = 50000 def __init__(self, sess, scope, size, init_mean=None, init_std=None, eps=0.01, clip=np.inf): self._sess = sess self._scope = scope self._eps = eps self._clip = clip self._mean = np.zeros(size) self._std = np.ones(size) self._count = 0 if (init_mean is not None): if (not isinstance(init_mean, np.ndarray)): assert (size == 1) init_mean = np.array([init_mean]) assert (init_mean.size == size), Logger.print('Normalizer init mean shape mismatch, expecting size {:d}, but got {:d}'.format(size, init_mean.size)) self._mean = init_mean if (init_std is not None): if (not isinstance(init_std, np.ndarray)): assert (size == 1) init_std = np.array([init_std]) assert (init_std.size == size), Logger.print('Normalizer init std shape mismatch, expecting size {:d}, but got {:d}'.format(size, init_std.size)) self._std = init_std self._mean_sq = self.calc_mean_sq(self._mean, self._std) self._new_count = 0 self._new_sum = np.zeros_like(self._mean) self._new_sum_sq = np.zeros_like(self._mean_sq) with tf.compat.v1.variable_scope(self._scope): self._build_resource_tf() return def record(self, x): size = self.get_size() is_array = isinstance(x, np.ndarray) if (not is_array): assert (size == 1) x = np.array([[x]]) assert (x.shape[(- 1)] == size), Logger.print('Normalizer shape mismatch, expecting size {:d}, but got {:d}'.format(size, x.shape[(- 1)])) x = np.reshape(x, [(- 1), size]) self._new_count += x.shape[0] self._new_sum += np.sum(x, axis=0) self._new_sum_sq += np.sum(np.square(x), axis=0) return def update(self): new_count = mpi_util.reduce_sum(self._new_count) new_sum = mpi_util.reduce_sum(self._new_sum) new_sum_sq = mpi_util.reduce_sum(self._new_sum_sq) if (new_count > 0): new_total = (self._count + new_count) if ((self._count // self.CHECK_SYNC_COUNT) != (new_total // self.CHECK_SYNC_COUNT)): assert self._check_synced(), Logger.print('Normalizer parameters desynchronized') new_mean = (new_sum / new_count) new_mean_sq = (new_sum_sq / new_count) w_old = (float(self._count) / new_total) w_new = (float(new_count) / new_total) self._mean = ((w_old * self._mean) + (w_new * new_mean)) self._mean_sq = ((w_old * self._mean_sq) + (w_new * new_mean_sq)) self._count = new_total self._std = self.calc_std(self._mean, self._mean_sq) self._new_count = 0 self._new_sum.fill(0) self._new_sum_sq.fill(0) self._update_resource_tf() return def get_size(self): return self._mean.size def set_mean_std(self, mean, std): size = self.get_size() is_array = (isinstance(mean, np.ndarray) and isinstance(std, np.ndarray)) if (not is_array): assert (size == 1) mean = np.array([mean]) std = np.array([std]) assert ((len(mean) == size) and (len(std) == size)), Logger.print('Normalizer shape mismatch, expecting size {:d}, but got {:d} and {:d}'.format(size, len(mean), len(std))) self._mean = mean self._std = std self._mean_sq = self.calc_mean_sq(self._mean, self._std) self._update_resource_tf() return def normalize(self, x): norm_x = ((x - self._mean) / self._std) norm_x = np.clip(norm_x, (- self._clip), self._clip) return norm_x def unnormalize(self, norm_x): x = ((norm_x * self._std) + self._mean) return x def calc_std(self, mean, mean_sq): var = (mean_sq - np.square(mean)) var = np.maximum(var, 0) std = np.sqrt(var) std = np.maximum(std, self._eps) return std def calc_mean_sq(self, mean, std): return (np.square(std) + np.square(self._mean)) def load(self): (count, mean, std) = self._sess.run([self._count_tf, self._mean_tf, self._std_tf]) self._count = count[0] self._mean = mean self._std = std self._mean_sq = self.calc_mean_sq(self._mean, self._std) return def normalize_tf(self, x): norm_x = ((x - self._mean_tf) / self._std_tf) norm_x = tf.clip_by_value(norm_x, (- self._clip), self._clip) return norm_x def unnormalize_tf(self, norm_x): x = ((norm_x * self._std_tf) + self._mean_tf) return x def need_update(self): return (self._new_count > 0) def _build_resource_tf(self): self._count_tf = tf.compat.v1.get_variable(dtype=tf.int32, name='count', initializer=np.array([self._count], dtype=np.int32), trainable=False) self._mean_tf = tf.compat.v1.get_variable(dtype=tf.float32, name='mean', initializer=self._mean.astype(np.float32), trainable=False) self._std_tf = tf.compat.v1.get_variable(dtype=tf.float32, name='std', initializer=self._std.astype(np.float32), trainable=False) self._count_ph = tf.compat.v1.get_variable(dtype=tf.int32, name='count_ph', shape=[1]) self._mean_ph = tf.compat.v1.get_variable(dtype=tf.float32, name='mean_ph', shape=self._mean.shape) self._std_ph = tf.compat.v1.get_variable(dtype=tf.float32, name='std_ph', shape=self._std.shape) self._update_op = tf.group(self._count_tf.assign(self._count_ph), self._mean_tf.assign(self._mean_ph), self._std_tf.assign(self._std_ph)) return def _update_resource_tf(self): feed = {self._count_ph: np.array([self._count], dtype=np.int32), self._mean_ph: self._mean, self._std_ph: self._std} self._sess.run(self._update_op, feed_dict=feed) return def _check_synced(self): synced = True if mpi_util.is_root_proc(): vars = np.concatenate([self._mean, self._std]) mpi_util.bcast(vars) else: vars_local = np.concatenate([self._mean, self._std]) vars_root = np.empty_like(vars_local) mpi_util.bcast(vars_root) synced = (vars_local == vars_root).all() return synced
def stage_data(snowflake_client: SnowflakeClient, snowflake_schema: str, snowflake_table: str, data_file: str, data_folder: str): sql_query = 'PUT file://{}/{} {}.%{} auto_compress=true overwrite=true'.format(data_folder, data_file, snowflake_schema.upper(), snowflake_table.upper()) return snowflake_client.execute_query(sql_query, is_debug=True)
class LogEntry(): def __init__(self, entry: Union[(dict, list)]): self._ = entry def __getattr__(self, name): if (name == '_'): return self.__dict__['_'] res = self.__dict__['_'][name] if ((type(res) == dict) or (type(res) == list)): return LogEntry(res) else: return res def __getitem__(self, name): return self.__getattr__(name) def __len__(self): return len(self.__dict__['_'])
class conv_synapse(SynapseModel): def __init__(self, conn, **kwargs): super(conv_synapse, self).__init__(conn) if (('bias_flag' in conn.__dict__.keys()) and conn.bias_flag): if (conn.post.model_name == 'complex'): self._syn_operations.append([(conn.post_var_name + '[post]'), 'conv_2d_complex', self.input_name, 'weight[link]', 'stride[link]', 'padding[link]', 'dilation[link]', 'groups[link]', 'complex_beta[post]', 'bias[link]']) else: self._syn_operations.append([(conn.post_var_name + '[post]'), 'conv_2d', self.input_name, 'weight[link]', 'stride[link]', 'padding[link]', 'dilation[link]', 'groups[link]', 'bias[link]']) elif (conn.post.model_name == 'complex'): self._syn_operations.append([(conn.post_var_name + '[post]'), 'conv_2d_complex', self.input_name, 'weight[link]', 'stride[link]', 'padding[link]', 'dilation[link]', 'groups[link]', 'complex_beta[post]']) else: self._syn_operations.append([(conn.post_var_name + '[post]'), 'conv_2d', self.input_name, 'weight[link]', 'stride[link]', 'padding[link]', 'dilation[link]', 'groups[link]'])
class loadImgs(data.Dataset): def __init__(self, args, imgin_list, mode='demo'): self.imgin_list = imgin_list self.args = args self.mode = mode if self.args.use_gray: self.img_loader = gray_loader else: self.img_loader = rgb_loader self.data_list = {'img_in': []} random.seed(141) for num_img in range(len(self.imgin_list)): self.data_list['img_in'].append(self.imgin_list[num_img]) def __getitem__(self, index): img_in_path = self.data_list['img_in'][index] if (self.mode == 'demo'): img_in = self.img_loader(img_in_path) if (self.args.load_size != 'None'): (w, h) = img_in.size img_in = img_in.resize((512, 512)) if (self.args.crop_size != 'None'): (w, h) = img_in.size crop_size = self.args.crop_size.strip('[]').split(', ') crop_size = [int(item) for item in crop_size] (th, tw) = (crop_size[0], crop_size[1]) x1 = random.randint(0, (w - tw)) y1 = random.randint(0, (h - th)) img_in = img_in.crop((x1, y1, (x1 + tw), (y1 + th))) elif (self.mode == 'val'): raise NotImplementedError elif (self.mode == 'predict'): img_in = self.img_loader(img_in_path) else: print('Unrecognized mode! Please select among: (demo, val, predict)') raise NotImplementedError t_list = [transforms.ToTensor()] composed_transform = transforms.Compose(t_list) if (self.mode == 'demo'): img_in = composed_transform(img_in) if (self.mode == 'val'): raise NotImplementedError if (self.mode == 'predict'): img_in = composed_transform(img_in) if (self.mode == 'demo'): inputs = {'img_in': img_in} return inputs if (self.mode == 'predict'): inputs = {'img_in': img_in} def __len__(self): return len(self.data_list['img_in'])
def get_category_info_from_anno(anno_file, with_background=True): cats = [] with open(anno_file) as f: for line in f.readlines(): cats.append(line.strip()) if ((cats[0] != 'background') and with_background): cats.insert(0, 'background') if ((cats[0] == 'background') and (not with_background)): cats = cats[1:] clsid2catid = {i: i for i in range(len(cats))} catid2name = {i: name for (i, name) in enumerate(cats)} return (clsid2catid, catid2name)
class TFPegasusModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def data_aug_for_multiple_answers(example: Batch) -> Union[(Dict, Any)]: result = {key: [] for key in examples.keys()} def update(i, answers=None): for key in result.keys(): if ((key == 'answers') and (answers is not None)): result[key].append(answers) else: result[key].append(examples[key][i]) for (i, (answers, unanswerable)) in enumerate(zip(examples['answers'], examples['is_impossible'])): answerable = (not unanswerable) assert ((len(answers['text']) == len(answers['answer_start'])) or (answers['answer_start'][0] == (- 1))) if (answerable and (len(answers['text']) > 1)): for n_ans in range(len(answers['text'])): ans = {'text': [answers['text'][n_ans]], 'answer_start': [answers['answer_start'][n_ans]]} update(i, ans) elif (not answerable): update(i, {'text': [], 'answer_start': []}) else: update(i) return result
def load(path, model_class, suffix=''): with io.open((path + '.config'), 'r', encoding='utf8') as f: config = json.load(f) word_voca = Vocabulary() word_voca.__dict__ = config['word_voca'] config['word_voca'] = word_voca entity_voca = Vocabulary() entity_voca.__dict__ = config['entity_voca'] config['entity_voca'] = entity_voca model = model_class(config) model.load_state_dict(torch.load(((path + '.state_dict') + suffix))) return model
def write_to_csv(title, data, target_path, dir_name): if (not os.path.exists(target_path)): os.makedirs(target_path) save_path = (((target_path + os.sep) + dir_name) + '.csv') with open(save_path, 'w', encoding='utf-8', newline='') as out_csv: csv_writer = csv.writer(out_csv) csv_writer.writerow(title) for line in data: csv_writer.writerow(line) out_csv.close()
class BaseOptions(): def __init__(self, cmd_line=None): self.initialized = False self.cmd_line = None if (cmd_line is not None): self.cmd_line = cmd_line.split() def initialize(self, parser): parser.add_argument('--name', type=str, default='face_recon', help='name of the experiment. It decides where to store samples and models') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') parser.add_argument('--vis_batch_nums', type=float, default=1, help='batch nums of images for visulization') parser.add_argument('--eval_batch_nums', type=float, default=float('inf'), help='batch nums of images for evaluation') parser.add_argument('--use_ddp', type=util.str2bool, nargs='?', const=True, default=True, help='whether use distributed data parallel') parser.add_argument('--ddp_port', type=str, default='12355', help='ddp port') parser.add_argument('--display_per_batch', type=util.str2bool, nargs='?', const=True, default=True, help='whether use batch to show losses') parser.add_argument('--add_image', type=util.str2bool, nargs='?', const=True, default=True, help='whether add image to tensorboard') parser.add_argument('--world_size', type=int, default=1, help='batch nums of images for evaluation') parser.add_argument('--model', type=str, default='facerecon', help='chooses which model to use.') parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') self.initialized = True return parser def gather_options(self): if (not self.initialized): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) if (self.cmd_line is None): (opt, _) = parser.parse_known_args() else: (opt, _) = parser.parse_known_args(self.cmd_line) os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_ids model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) if (self.cmd_line is None): (opt, _) = parser.parse_known_args() else: (opt, _) = parser.parse_known_args(self.cmd_line) if opt.dataset_mode: dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) self.parser = parser if (self.cmd_line is None): return parser.parse_args() else: return parser.parse_args(self.cmd_line) def print_options(self, opt): message = '' message += ' Options \n' for (k, v) in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if (v != default): comment = ('\t[default: %s]' % str(default)) message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) message += ' End ' print(message) expr_dir = os.path.join(opt.checkpoints_dir, opt.name) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) try: with open(file_name, 'wt') as opt_file: opt_file.write(message) opt_file.write('\n') except PermissionError as error: print('permission error {}'.format(error)) pass def parse(self): opt = self.gather_options() opt.isTrain = self.isTrain if opt.suffix: suffix = (('_' + opt.suffix.format(**vars(opt))) if (opt.suffix != '') else '') opt.name = (opt.name + suffix) str_ids = opt.gpu_ids.split(',') gpu_ids = [] for str_id in str_ids: id = int(str_id) if (id >= 0): gpu_ids.append(id) opt.world_size = len(gpu_ids) if (opt.world_size == 1): opt.use_ddp = False if (opt.phase != 'test'): if (opt.pretrained_name is None): model_dir = os.path.join(opt.checkpoints_dir, opt.name) else: model_dir = os.path.join(opt.checkpoints_dir, opt.pretrained_name) if os.path.isdir(model_dir): model_pths = [i for i in os.listdir(model_dir) if i.endswith('pth')] if (os.path.isdir(model_dir) and (len(model_pths) != 0)): opt.continue_train = True if opt.continue_train: if (opt.epoch == 'latest'): epoch_counts = [int(i.split('.')[0].split('_')[(- 1)]) for i in model_pths if ('latest' not in i)] if (len(epoch_counts) != 0): opt.epoch_count = (max(epoch_counts) + 1) else: opt.epoch_count = (int(opt.epoch) + 1) self.print_options(opt) self.opt = opt return self.opt
class ResnetV2101(Model): def __init__(self): raise NotImplementedError('Resnet_V2_101 is not supported yet') def model_url(self) -> str: pass def package_name(self) -> str: pass
class RLAv3_Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, rla_channel=32, SE=False, ECA_size=None, groups=1, base_width=64, dilation=1, norm_layer=None, reduction=16): super(RLAv3_Bottleneck, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d width = (int((planes * (base_width / 64.0))) * groups) self.conv1 = conv1x1((inplanes + rla_channel), width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, (planes * self.expansion)) self.bn3 = norm_layer((planes * self.expansion)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.averagePooling = None if ((downsample is not None) and (stride != 1)): self.averagePooling = nn.AvgPool2d((2, 2), stride=(2, 2)) self.se = None if SE: self.se = SELayer((planes * self.expansion), reduction) self.eca = None if (ECA_size != None): self.eca = eca_layer((planes * self.expansion), int(ECA_size)) def forward(self, x, h): identity = x x = torch.cat((x, h), dim=1) out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.se != None): out = self.se(out) if (self.eca != None): out = self.eca(out) y = out if (self.downsample is not None): identity = self.downsample(identity) if (self.averagePooling is not None): h = self.averagePooling(h) out += identity out = self.relu(out) return (out, y, h, identity)
class LARS(Regularizer): def __init__(self, model, value=0.01, weight_decay=0, dim=None, p=2, min_scale=None, max_scale=None, filter={'parameter_name': is_not_bias, 'module': is_not_bn}, **kwargs): super(LARS, self).__init__(model, value, filter=filter, **kwargs) self.weight_decay = weight_decay self.dim = dim self.p = p self.min_scale = min_scale self.max_scale = max_scale def pre_step(self): with torch.no_grad(): for (_, param) in self._named_parameters: param.grad.add_(self.weight_decay, param) if (self.dim is not None): norm = _norm(param, dim=self.dim, p=self.p) grad_norm = _norm(param.grad, dim=self.dim, p=self.p) else: norm = param.norm(p=self.p) grad_norm = param.grad.norm(p=self.p) scale = ((self.value * norm) / grad_norm) if ((self.min_scale is not None) or (self.max_scale is not None)): scale.clamp_(min=self.min_scale, max=self.max_scale) param.grad.mul_(scale)
def create_dataset(dataFrame, columns, filename, save_path='../data/raw'): dataset = dataFrame[columns].dropna() dataset = dataset.drop_duplicates() initial_size = dataset.shape[0] dataset.to_csv(os.path.join(save_path, filename), index=False) remove_strange_mols(os.path.join(save_path, filename), os.path.join(save_path, filename)) dataset = pd.read_csv(os.path.join(save_path, filename)) return (dataset, initial_size)
class DDPG(object): def __init__(self, actor, critic, memory, observation_shape, action_shape, param_noise=None, action_noise=None, gamma=0.99, tau=0.001, normalize_returns=False, enable_popart=False, normalize_observations=True, batch_size=128, observation_range=((- 5.0), 5.0), action_range=((- 1.0), 1.0), return_range=((- np.inf), np.inf), adaptive_param_noise=True, adaptive_param_noise_policy_threshold=0.1, critic_l2_reg=0.0, actor_lr=0.0001, critic_lr=0.001, clip_norm=None, reward_scale=1.0): self.obs0 = tf.placeholder(tf.float32, shape=((None,) + observation_shape), name='obs0') self.obs1 = tf.placeholder(tf.float32, shape=((None,) + observation_shape), name='obs1') self.terminals1 = tf.placeholder(tf.float32, shape=(None, 1), name='terminals1') self.rewards = tf.placeholder(tf.float32, shape=(None, 1), name='rewards') self.actions = tf.placeholder(tf.float32, shape=((None,) + action_shape), name='actions') self.critic_target = tf.placeholder(tf.float32, shape=(None, 1), name='critic_target') self.param_noise_stddev = tf.placeholder(tf.float32, shape=(), name='param_noise_stddev') self.gamma = gamma self.tau = tau self.memory = memory self.normalize_observations = normalize_observations self.normalize_returns = normalize_returns self.action_noise = action_noise self.param_noise = param_noise self.action_range = action_range self.return_range = return_range self.observation_range = observation_range self.critic = critic self.actor = actor self.actor_lr = actor_lr self.critic_lr = critic_lr self.clip_norm = clip_norm self.enable_popart = enable_popart self.reward_scale = reward_scale self.batch_size = batch_size self.stats_sample = None self.critic_l2_reg = critic_l2_reg if self.normalize_observations: with tf.variable_scope('obs_rms'): self.obs_rms = RunningMeanStd(shape=observation_shape) else: self.obs_rms = None normalized_obs0 = tf.clip_by_value(normalize(self.obs0, self.obs_rms), self.observation_range[0], self.observation_range[1]) normalized_obs1 = tf.clip_by_value(normalize(self.obs1, self.obs_rms), self.observation_range[0], self.observation_range[1]) if self.normalize_returns: with tf.variable_scope('ret_rms'): self.ret_rms = RunningMeanStd() else: self.ret_rms = None target_actor = copy(actor) target_actor.name = 'target_actor' self.target_actor = target_actor target_critic = copy(critic) target_critic.name = 'target_critic' self.target_critic = target_critic self.actor_tf = actor(normalized_obs0) self.normalized_critic_tf = critic(normalized_obs0, self.actions) self.critic_tf = denormalize(tf.clip_by_value(self.normalized_critic_tf, self.return_range[0], self.return_range[1]), self.ret_rms) self.normalized_critic_with_actor_tf = critic(normalized_obs0, self.actor_tf, reuse=True) self.critic_with_actor_tf = denormalize(tf.clip_by_value(self.normalized_critic_with_actor_tf, self.return_range[0], self.return_range[1]), self.ret_rms) Q_obs1 = denormalize(target_critic(normalized_obs1, target_actor(normalized_obs1)), self.ret_rms) self.target_Q = (self.rewards + (((1.0 - self.terminals1) * gamma) * Q_obs1)) if (self.param_noise is not None): self.setup_param_noise(normalized_obs0) self.setup_actor_optimizer() self.setup_critic_optimizer() if (self.normalize_returns and self.enable_popart): self.setup_popart() self.setup_stats() self.setup_target_network_updates() self.initial_state = None def setup_target_network_updates(self): (actor_init_updates, actor_soft_updates) = get_target_updates(self.actor.vars, self.target_actor.vars, self.tau) (critic_init_updates, critic_soft_updates) = get_target_updates(self.critic.vars, self.target_critic.vars, self.tau) self.target_init_updates = [actor_init_updates, critic_init_updates] self.target_soft_updates = [actor_soft_updates, critic_soft_updates] def setup_param_noise(self, normalized_obs0): assert (self.param_noise is not None) param_noise_actor = copy(self.actor) param_noise_actor.name = 'param_noise_actor' self.perturbed_actor_tf = param_noise_actor(normalized_obs0) logger.info('setting up param noise') self.perturb_policy_ops = get_perturbed_actor_updates(self.actor, param_noise_actor, self.param_noise_stddev) adaptive_param_noise_actor = copy(self.actor) adaptive_param_noise_actor.name = 'adaptive_param_noise_actor' adaptive_actor_tf = adaptive_param_noise_actor(normalized_obs0) self.perturb_adaptive_policy_ops = get_perturbed_actor_updates(self.actor, adaptive_param_noise_actor, self.param_noise_stddev) self.adaptive_policy_distance = tf.sqrt(tf.reduce_mean(tf.square((self.actor_tf - adaptive_actor_tf)))) def setup_actor_optimizer(self): logger.info('setting up actor optimizer') self.actor_loss = (- tf.reduce_mean(self.critic_with_actor_tf)) actor_shapes = [var.get_shape().as_list() for var in self.actor.trainable_vars] actor_nb_params = sum([reduce((lambda x, y: (x * y)), shape) for shape in actor_shapes]) logger.info(' actor shapes: {}'.format(actor_shapes)) logger.info(' actor params: {}'.format(actor_nb_params)) self.actor_grads = U.flatgrad(self.actor_loss, self.actor.trainable_vars, clip_norm=self.clip_norm) self.actor_optimizer = MpiAdam(var_list=self.actor.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08) def setup_critic_optimizer(self): logger.info('setting up critic optimizer') normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1]) self.critic_loss = tf.reduce_mean(tf.square((self.normalized_critic_tf - normalized_critic_target_tf))) if (self.critic_l2_reg > 0.0): critic_reg_vars = [var for var in self.critic.trainable_vars if (('kernel' in var.name) and ('output' not in var.name))] for var in critic_reg_vars: logger.info(' regularizing: {}'.format(var.name)) logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg)) critic_reg = tc.layers.apply_regularization(tc.layers.l2_regularizer(self.critic_l2_reg), weights_list=critic_reg_vars) self.critic_loss += critic_reg critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars] critic_nb_params = sum([reduce((lambda x, y: (x * y)), shape) for shape in critic_shapes]) logger.info(' critic shapes: {}'.format(critic_shapes)) logger.info(' critic params: {}'.format(critic_nb_params)) self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm) self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08) def setup_popart(self): self.old_std = tf.placeholder(tf.float32, shape=[1], name='old_std') new_std = self.ret_rms.std self.old_mean = tf.placeholder(tf.float32, shape=[1], name='old_mean') new_mean = self.ret_rms.mean self.renormalize_Q_outputs_op = [] for vs in [self.critic.output_vars, self.target_critic.output_vars]: assert (len(vs) == 2) (M, b) = vs assert ('kernel' in M.name) assert ('bias' in b.name) assert (M.get_shape()[(- 1)] == 1) assert (b.get_shape()[(- 1)] == 1) self.renormalize_Q_outputs_op += [M.assign(((M * self.old_std) / new_std))] self.renormalize_Q_outputs_op += [b.assign(((((b * self.old_std) + self.old_mean) - new_mean) / new_std))] def setup_stats(self): ops = [] names = [] if self.normalize_returns: ops += [self.ret_rms.mean, self.ret_rms.std] names += ['ret_rms_mean', 'ret_rms_std'] if self.normalize_observations: ops += [tf.reduce_mean(self.obs_rms.mean), tf.reduce_mean(self.obs_rms.std)] names += ['obs_rms_mean', 'obs_rms_std'] ops += [tf.reduce_mean(self.critic_tf)] names += ['reference_Q_mean'] ops += [reduce_std(self.critic_tf)] names += ['reference_Q_std'] ops += [tf.reduce_mean(self.critic_with_actor_tf)] names += ['reference_actor_Q_mean'] ops += [reduce_std(self.critic_with_actor_tf)] names += ['reference_actor_Q_std'] ops += [tf.reduce_mean(self.actor_tf)] names += ['reference_action_mean'] ops += [reduce_std(self.actor_tf)] names += ['reference_action_std'] if self.param_noise: ops += [tf.reduce_mean(self.perturbed_actor_tf)] names += ['reference_perturbed_action_mean'] ops += [reduce_std(self.perturbed_actor_tf)] names += ['reference_perturbed_action_std'] self.stats_ops = ops self.stats_names = names def step(self, obs, apply_noise=True, compute_Q=True): if ((self.param_noise is not None) and apply_noise): actor_tf = self.perturbed_actor_tf else: actor_tf = self.actor_tf feed_dict = {self.obs0: U.adjust_shape(self.obs0, [obs])} if compute_Q: (action, q) = self.sess.run([actor_tf, self.critic_with_actor_tf], feed_dict=feed_dict) else: action = self.sess.run(actor_tf, feed_dict=feed_dict) q = None action = action.flatten() if ((self.action_noise is not None) and apply_noise): noise = self.action_noise() assert (noise.shape == action.shape) action += noise action = np.clip(action, self.action_range[0], self.action_range[1]) return (action, q, None, None) def store_transition(self, obs0, action, reward, obs1, terminal1): reward *= self.reward_scale self.memory.append(obs0, action, reward, obs1, terminal1) if self.normalize_observations: self.obs_rms.update(np.array([obs0])) def train(self): batch = self.memory.sample(batch_size=self.batch_size) if (self.normalize_returns and self.enable_popart): (old_mean, old_std, target_Q) = self.sess.run([self.ret_rms.mean, self.ret_rms.std, self.target_Q], feed_dict={self.obs1: batch['obs1'], self.rewards: batch['rewards'], self.terminals1: batch['terminals1'].astype('float32')}) self.ret_rms.update(target_Q.flatten()) self.sess.run(self.renormalize_Q_outputs_op, feed_dict={self.old_std: np.array([old_std]), self.old_mean: np.array([old_mean])}) else: target_Q = self.sess.run(self.target_Q, feed_dict={self.obs1: batch['obs1'], self.rewards: batch['rewards'], self.terminals1: batch['terminals1'].astype('float32')}) ops = [self.actor_grads, self.actor_loss, self.critic_grads, self.critic_loss] (actor_grads, actor_loss, critic_grads, critic_loss) = self.sess.run(ops, feed_dict={self.obs0: batch['obs0'], self.actions: batch['actions'], self.critic_target: target_Q}) self.actor_optimizer.update(actor_grads, stepsize=self.actor_lr) self.critic_optimizer.update(critic_grads, stepsize=self.critic_lr) return (critic_loss, actor_loss) def initialize(self, sess): self.sess = sess self.sess.run(tf.global_variables_initializer()) self.actor_optimizer.sync() self.critic_optimizer.sync() self.sess.run(self.target_init_updates) def update_target_net(self): self.sess.run(self.target_soft_updates) def get_stats(self): if (self.stats_sample is None): self.stats_sample = self.memory.sample(batch_size=self.batch_size) values = self.sess.run(self.stats_ops, feed_dict={self.obs0: self.stats_sample['obs0'], self.actions: self.stats_sample['actions']}) names = self.stats_names[:] assert (len(names) == len(values)) stats = dict(zip(names, values)) if (self.param_noise is not None): stats = {**stats, **self.param_noise.get_stats()} return stats def adapt_param_noise(self): if (self.param_noise is None): return 0.0 batch = self.memory.sample(batch_size=self.batch_size) self.sess.run(self.perturb_adaptive_policy_ops, feed_dict={self.param_noise_stddev: self.param_noise.current_stddev}) distance = self.sess.run(self.adaptive_policy_distance, feed_dict={self.obs0: batch['obs0'], self.param_noise_stddev: self.param_noise.current_stddev}) mean_distance = (MPI.COMM_WORLD.allreduce(distance, op=MPI.SUM) / MPI.COMM_WORLD.Get_size()) self.param_noise.adapt(mean_distance) return mean_distance def reset(self): if (self.action_noise is not None): self.action_noise.reset() if (self.param_noise is not None): self.sess.run(self.perturb_policy_ops, feed_dict={self.param_noise_stddev: self.param_noise.current_stddev})
def multi_gpu_launcher(commands): print('WARNING: using experimental multi_gpu_launcher.') n_gpus = torch.cuda.device_count() procs_by_gpu = ([None] * n_gpus) while (len(commands) > 0): for gpu_idx in range(n_gpus): proc = procs_by_gpu[gpu_idx] if ((proc is None) or (proc.poll() is not None)): cmd = commands.pop(0) new_proc = subprocess.Popen(f'CUDA_VISIBLE_DEVICES={gpu_idx} {cmd}', shell=True) procs_by_gpu[gpu_idx] = new_proc break time.sleep(1) for p in procs_by_gpu: if (p is not None): p.wait()
class BlipDiffusionControlNetPipeline(DiffusionPipeline): model_cpu_offload_seq = 'qformer->text_encoder->unet->vae' def __init__(self, tokenizer: CLIPTokenizer, text_encoder: ContextCLIPTextModel, vae: AutoencoderKL, unet: UNet2DConditionModel, scheduler: PNDMScheduler, qformer: Blip2QFormerModel, controlnet: ControlNetModel, image_processor: BlipImageProcessor, ctx_begin_pos: int=2, mean: List[float]=None, std: List[float]=None): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, scheduler=scheduler, qformer=qformer, controlnet=controlnet, image_processor=image_processor) self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std) def get_query_embeddings(self, input_image, src_subject): return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False) def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20): rv = [] for (prompt, tgt_subject) in zip(prompts, tgt_subjects): prompt = f'a {tgt_subject} {prompt.strip()}' rv.append(', '.join(([prompt] * int((prompt_strength * prompt_reps))))) return rv def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if (isinstance(generator, list) and (len(generator) != batch_size)): raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if (latents is None): latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) latents = (latents * self.scheduler.init_noise_sigma) return latents def encode_prompt(self, query_embeds, prompt, device=None): device = (device or self._execution_device) max_len = self.text_encoder.text_model.config.max_position_embeddings max_len -= self.qformer.config.num_query_tokens tokenized_prompt = self.tokenizer(prompt, padding='max_length', truncation=True, max_length=max_len, return_tensors='pt').to(device) batch_size = query_embeds.shape[0] ctx_begin_pos = ([self.config.ctx_begin_pos] * batch_size) text_embeddings = self.text_encoder(input_ids=tokenized_prompt.input_ids, ctx_embeddings=query_embeds, ctx_begin_pos=ctx_begin_pos)[0] return text_embeddings def prepare_control_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False): image = self.image_processor.preprocess(image, size={'width': width, 'height': height}, do_rescale=True, do_center_crop=False, do_normalize=False, return_tensors='pt')['pixel_values'].to(device) image_batch_size = image.shape[0] if (image_batch_size == 1): repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance: image = torch.cat(([image] * 2)) return image _grad() _example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: List[str], reference_image: PIL.Image.Image, condtioning_image: PIL.Image.Image, source_subject_category: List[str], target_subject_category: List[str], latents: Optional[torch.FloatTensor]=None, guidance_scale: float=7.5, height: int=512, width: int=512, num_inference_steps: int=50, generator: Optional[Union[(torch.Generator, List[torch.Generator])]]=None, neg_prompt: Optional[str]='', prompt_strength: float=1.0, prompt_reps: int=20, output_type: Optional[str]='pil', return_dict: bool=True): device = self._execution_device reference_image = self.image_processor.preprocess(reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors='pt')['pixel_values'] reference_image = reference_image.to(device) if isinstance(prompt, str): prompt = [prompt] if isinstance(source_subject_category, str): source_subject_category = [source_subject_category] if isinstance(target_subject_category, str): target_subject_category = [target_subject_category] batch_size = len(prompt) prompt = self._build_prompt(prompts=prompt, tgt_subjects=target_subject_category, prompt_strength=prompt_strength, prompt_reps=prompt_reps) query_embeds = self.get_query_embeddings(reference_image, source_subject_category) text_embeddings = self.encode_prompt(query_embeds, prompt, device) do_classifier_free_guidance = (guidance_scale > 1.0) if do_classifier_free_guidance: max_length = self.text_encoder.text_model.config.max_position_embeddings uncond_input = self.tokenizer(([neg_prompt] * batch_size), padding='max_length', max_length=max_length, return_tensors='pt') uncond_embeddings = self.text_encoder(input_ids=uncond_input.input_ids.to(device), ctx_embeddings=None)[0] text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) scale_down_factor = (2 ** (len(self.unet.config.block_out_channels) - 1)) latents = self.prepare_latents(batch_size=batch_size, num_channels=self.unet.config.in_channels, height=(height // scale_down_factor), width=(width // scale_down_factor), generator=generator, latents=latents, dtype=self.unet.dtype, device=device) extra_set_kwargs = {} self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) cond_image = self.prepare_control_image(image=condtioning_image, width=width, height=height, batch_size=batch_size, num_images_per_prompt=1, device=device, dtype=self.controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance) for (i, t) in enumerate(self.progress_bar(self.scheduler.timesteps)): do_classifier_free_guidance = (guidance_scale > 1.0) latent_model_input = (torch.cat(([latents] * 2)) if do_classifier_free_guidance else latents) (down_block_res_samples, mid_block_res_sample) = self.controlnet(latent_model_input, t, encoder_hidden_states=text_embeddings, controlnet_cond=cond_image, return_dict=False) noise_pred = self.unet(latent_model_input, timestep=t, encoder_hidden_states=text_embeddings, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample)['sample'] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = (noise_pred_uncond + (guidance_scale * (noise_pred_text - noise_pred_uncond))) latents = self.scheduler.step(noise_pred, t, latents)['prev_sample'] image = self.vae.decode((latents / self.vae.config.scaling_factor), return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if (not return_dict): return (image,) return ImagePipelineOutput(images=image)
def block_optimizer(args, auxiliary_model, model_name, blocks_lr): model = auxiliary_model[model_name]['model'] group = [{'params': model.gat_layers[0].parameters(), 'lr': blocks_lr[0]}, {'params': model.gat_layers[1].parameters(), 'lr': blocks_lr[1]}, {'params': model.gat_layers[2].parameters(), 'lr': blocks_lr[2]}, {'params': model.gat_layers[3].parameters(), 'lr': blocks_lr[3]}, {'params': model.gat_layers[4].parameters(), 'lr': blocks_lr[4]}] auxiliary_model[model_name]['optimizer'] = torch.optim.Adam(group, lr=args.lr, weight_decay=args.weight_decay)
_module() class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, loss_weight=1.0): super(CrossEntropyLoss, self).__init__() assert ((use_sigmoid is False) or (use_mask is False)) self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = get_class_weight(class_weight) if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): assert (reduction_override in (None, 'none', 'mean', 'sum')) reduction = (reduction_override if reduction_override else self.reduction) if ((self.class_weight is not None) and (cls_score.shape[1] == 2)): class_weight = cls_score.new_tensor(self.class_weight) else: class_weight = None loss_cls = (self.loss_weight * self.cls_criterion(cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, **kwargs)) return loss_cls
def read_image(filepath: str, mode: str='RGB') -> np.array: if (not os.path.isfile(filepath)): raise ValueError(f'Invalid file "{filepath}".') return Image.open(filepath).convert(mode)
def to_dict_helper(obj): return_data = [] for field_name in obj._fields: if (field_name in ('id',)): continue data = obj._data[field_name] if isinstance(obj._fields[field_name], StringField): return_data.append((field_name, str(data))) elif isinstance(obj._fields[field_name], FloatField): return_data.append((field_name, float(data))) elif isinstance(obj._fields[field_name], IntField): return_data.append((field_name, int(data))) elif isinstance(obj._fields[field_name], BooleanField): return_data.append((field_name, bool(data))) elif isinstance(obj._fields[field_name], DateTimeField): return_data.append(field_name, datetime.datetime.strptime(data)) else: return_data.append((field_name, data)) return dict(return_data)
class MetricType(Enum): KeyValue = 0 KeyValue_Numeric = 1 KeyValue_Categorical = 2 KeyValue_Mixed = 3 Numeric = 4 Categorical = 5 Mixed = 6 Unknown = 7 Empty = 8
def eval(args, model=None) -> SummarizationModule: Path(args.output_dir).mkdir(exist_ok=True) if ((len(os.listdir(args.output_dir)) > 3) and args.do_train): raise ValueError('Output directory ({}) already exists and is not empty.'.format(args.output_dir)) if (model is None): if ('summarization' in args.task_mode): if (args.tuning_mode == 'prefixtune'): model = PrefixSummarizationModule(args) elif (args.tuning_mode == 'finetune'): model: SummarizationModule = SummarizationModule(args) else: assert False, 'invalid tuning_mode' else: model: SummarizationModule = TranslationModule(args) dataset = Path(args.data_dir).name with torch.no_grad(): model.eval() print(dataset) model = model.cuda() print(model.device) data_loader = model.test_dataloader() out_lst = [] for (batch_idx, batch) in enumerate(data_loader): batch = model.transfer_batch_to_device(batch, model.device) out = model.test_step(batch, batch_idx) out_lst.append(out) print(out['preds']) result = model.test_epoch_end(out_lst) for (k, v) in result.items(): if (k != 'preds'): print(k, v) out_1 = (args.model_name_or_path if (args.tuning_mode == 'finetune') else args.prefixModel_name_or_path) out_path = os.path.join(out_1, 'test_beam_{}'.format(args.length_penalty)) print('writing the test results to ', out_path) with open(out_path, 'w') as f: for preds in result['preds']: print(preds, file=f) for (k, v) in result.items(): if (k != 'preds'): print(k, v)
def calcELStaeckel(R, vR, vT, z, vz, pot, vc=1.0, ro=1.0): return ((((_evaluatePotentials(pot, R, z) + ((vR ** 2.0) / 2.0)) + ((vT ** 2.0) / 2.0)) + ((vz ** 2.0) / 2.0)), (R * vT))
class ResNet(Convnet): def create_layers(self, shape, conv_before_args=None, res_args=None, conv_after_args=None, fc_args=None): (dim_x, dim_y, dim_in) = shape shape = (dim_x, dim_y, dim_in) (self.conv_before_layers, self.conv_before_shape) = self.create_conv_layers(shape, conv_before_args) (self.res_layers, self.res_shape) = self.create_res_layers(self.conv_before_shape, res_args) (self.conv_after_layers, self.conv_after_shape) = self.create_conv_layers(self.res_shape, conv_after_args) (dim_x, dim_y, dim_out) = self.conv_after_shape dim_r = ((dim_x * dim_y) * dim_out) self.reshape = View((- 1), dim_r) (self.fc_layers, _) = self.create_linear_layers(dim_r, fc_args) def create_res_layers(self, shape, block_args=None): res_layers = nn.Sequential() block_args = (block_args or []) for (i, (conv_args, n_blocks)) in enumerate(block_args): block = ResBlock(shape, conv_args=conv_args) res_layers.add_module('block_{}_0'.format(i), block) for j in range(1, n_blocks): shape = block.conv_shape block = ResBlock(shape, conv_args=conv_args) res_layers.add_module('block_{}_{}'.format(i, j), block) shape = block.conv_shape return (res_layers, shape) def forward(self, x: torch.Tensor, return_full_list=False): if return_full_list: conv_before_out = [] for conv_layer in self.conv_before_layers: x = conv_layer(x) conv_before_out.append(x) else: conv_before_out = self.conv_layers(x) x = conv_before_out if return_full_list: res_out = [] for res_layer in self.res_layers: x = res_layer(x) res_out.append(x) else: res_out = self.res_layers(x) x = res_out if return_full_list: conv_after_out = [] for conv_layer in self.conv_after_layers: x = conv_layer(x) conv_after_out.append(x) else: conv_after_out = self.conv_after_layers(x) x = conv_after_out x = self.reshape(x) if return_full_list: fc_out = [] for fc_layer in self.fc_layers: x = fc_layer(x) fc_out.append(x) else: fc_out = self.fc_layers(x) return (conv_before_out, res_out, conv_after_out, fc_out)
_ARCH_REGISTRY.register() class SemanticSegmentor(nn.Module): def __init__(self, *, backbone: Backbone, sem_seg_head: nn.Module, pixel_mean: Tuple[float], pixel_std: Tuple[float]): super().__init__() self.backbone = backbone self.sem_seg_head = sem_seg_head self.register_buffer('pixel_mean', torch.tensor(pixel_mean).view((- 1), 1, 1), False) self.register_buffer('pixel_std', torch.tensor(pixel_std).view((- 1), 1, 1), False) def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) return {'backbone': backbone, 'sem_seg_head': sem_seg_head, 'pixel_mean': cfg.MODEL.PIXEL_MEAN, 'pixel_std': cfg.MODEL.PIXEL_STD} def device(self): return self.pixel_mean.device def forward(self, batched_inputs): images = [x['image'].to(self.device) for x in batched_inputs] images = [((x - self.pixel_mean) / self.pixel_std) for x in images] images = ImageList.from_tensors(images, self.backbone.size_divisibility, padding_constraints=self.backbone.padding_constraints) features = self.backbone(images.tensor) if ('sem_seg' in batched_inputs[0]): targets = [x['sem_seg'].to(self.device) for x in batched_inputs] targets = ImageList.from_tensors(targets, self.backbone.size_divisibility, self.sem_seg_head.ignore_value, self.backbone.padding_constraints).tensor else: targets = None (results, losses) = self.sem_seg_head(features, targets) if self.training: return losses processed_results = [] for (result, input_per_image, image_size) in zip(results, batched_inputs, images.image_sizes): height = input_per_image.get('height', image_size[0]) width = input_per_image.get('width', image_size[1]) r = sem_seg_postprocess(result, image_size, height, width) processed_results.append({'sem_seg': r}) return processed_results
class CvtForImageClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def attention(query, key, value, mask=None, dropout=None): d_k = query.size((- 1)) scores = (torch.matmul(query, key.transpose((- 2), (- 1))) / math.sqrt(d_k)) if (mask is not None): scores = scores.masked_fill((mask == 0), (- .0)) p_attn = F.softmax(scores, dim=(- 1)) if (dropout is not None): p_attn = dropout(p_attn) return (torch.matmul(p_attn, value), p_attn)
class Modfied_Loss(nn.modules.loss._Loss): def __init__(self): super(Modfied_Loss, self).__init__() def forward(self, outputs, labels): triplet_loss = TripletLoss(margin=1.2) cross_entropy_loss = nn.CrossEntropyLoss() Triplet_Loss = triplet_loss(outputs, labels) CrossEntropy_Loss = cross_entropy_loss(outputs, labels) loss_sum = (Triplet_Loss + (2 * CrossEntropy_Loss)) print(('\rtotal loss:%.2f Triplet_Loss:%.2f CrossEntropy_Loss:%.2f' % (loss_sum.data.cpu().numpy(), Triplet_Loss.data.cpu().numpy(), CrossEntropy_Loss.data.cpu().numpy()))) return loss_sum
class BDEncoder(object): def __init__(self, cols=None): self.enc = BackwardDifferenceEncoder(cols=cols, verbose=1, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value') def fit(self, X): with warnings.catch_warnings(): warnings.simplefilter('ignore') self.enc.fit(X) def transform(self, X): return self.enc.transform(X)
def get_args_parser(): parser = argparse.ArgumentParser('MAE pre-training', add_help=False) parser.add_argument('--batch_size', default=64, type=int, help='Batch size per GPU (effective batch size = batch_size * accum_iter * # gpus') parser.add_argument('--epochs', default=400, type=int) parser.add_argument('--accum_iter', default=1, type=int, help='Accumulate gradient iterations') parser.add_argument('--model', default='mae_vit_large_patch16', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--input_size', default=224, type=int, help='images input size') parser.add_argument('--mask_ratio', default=0.75, type=float, help='Masking ratio (percentage of removed patches).') parser.add_argument('--norm_pix_loss', action='store_true', help='Use (per-patch) normalized pixels as targets for computing loss') parser.set_defaults(norm_pix_loss=False) parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)') parser.add_argument('--blr', type=float, default=0.001, metavar='LR', help='base learning rate: absolute_lr = base_lr * total_batch_size / 256') parser.add_argument('--min_lr', type=float, default=0.0, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0') parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N', help='epochs to warmup LR') parser.add_argument('--dataset', default='ImageNet-LT', type=str, help='dataset name') parser.add_argument('--data_path', default='/diskC/xzz/ImageNet-LT', type=str, help='dataset path') parser.add_argument('--ckpt_dir', default='./ckpt', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default='./exp', help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--prit', default=200, type=int, help='console info print frequency') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') parser.set_defaults(pin_mem=True) parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=(- 1), type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') return parser
def dict_product(dicts): return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def need_finetuning(ft_params, param_name): if (ft_params == 'all'): return True ft_params_list = ft_params.split(',') for ft_param in ft_params_list: if (ft_param in param_name): return True return False
def clean_csv(csvfile, basedir): input_dataframe = pd.read_csv(csvfile) newframe = datacleaner.autoclean(input_dataframe, drop_nans=False, copy=False, ignore_update_check=False) newfile = ('clean_' + csvfile) newframe.to_csv(newfile, index=False) return [newfile]
def map_midi_programs(feature, codec: Codec, granularity_type: str='full', feature_key: str='inputs') -> Mapping[(str, Any)]: granularity = PROGRAM_GRANULARITIES[granularity_type] feature[feature_key] = granularity.tokens_map_fn(feature[feature_key], codec) return feature
def tweet_features_main(reaction_status_json, source_tweet_user_screen_name, source_text) -> List: num_retweets = reaction_status_json['retweet_count'] num_favorites = (reaction_status_json['favorite_count'] if (reaction_status_json['favorite_count'] is not None) else 0) if ('full_text' in reaction_status_json): context_text = reaction_status_json['full_text'] elif ('text' in reaction_status_json): context_text = reaction_status_json['text'] else: raise ValueError has_question = (1 if re.findall('\\?', context_text) else 0) is_duplicate = (1 if (context_text.strip() == source_text.strip()) else 0) has_img = (1 if reaction_status_json['user']['profile_use_background_image'] else 0) has_urls = (1 if reaction_status_json['entities']['urls'] else 0) if (has_urls == 1): num_urls = len(reaction_status_json['entities']['urls']) else: num_urls = 0 has_native_media = (1 if ('extended_entities' in reaction_status_json) else 0) context_len = len(re.findall("[\\w']+", re.sub('(?:{})\\s+| '', context_text))) tweet_features = [num_retweets, num_favorites, int(has_question), int(is_duplicate), int(has_img), int(has_urls), num_urls, int(has_native_media), context_len] return tweet_features
def main(_): tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=True) eval_examples = read_squad_examples(input_file=FLAGS.predict_file, is_training=False) eval_writer = FeatureWriter(filename=FLAGS.output_file, is_training=False) eval_features = [] def append_feature(feature): eval_features.append(feature) eval_writer.process_feature(feature) convert_examples_to_features(examples=eval_examples, tokenizer=tokenizer, max_seq_length=384, doc_stride=128, max_query_length=64, is_training=False, output_fn=append_feature)
def test_auto_fp16(): with pytest.raises(TypeError): class ExampleObject(): _fp16() def __call__(self, x): return x model = ExampleObject() input_x = torch.ones(1, dtype=torch.float32) model(input_x) class ExampleModule(nn.Module): _fp16() def forward(self, x, y): return (x, y) model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.float32) (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) model.fp16_enabled = True (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) if torch.cuda.is_available(): model.cuda() (output_x, output_y) = model(input_x.cuda(), input_y.cuda()) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) class ExampleModule(nn.Module): _fp16(apply_to=('x',)) def forward(self, x, y): return (x, y) model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.float32) (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) model.fp16_enabled = True (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.float32) if torch.cuda.is_available(): model.cuda() (output_x, output_y) = model(input_x.cuda(), input_y.cuda()) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.float32) class ExampleModule(nn.Module): _fp16(apply_to=('x', 'y')) def forward(self, x, y=None, z=None): return (x, y, z) model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.float32) input_z = torch.ones(1, dtype=torch.float32) (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) assert (output_z.dtype == torch.float32) model.fp16_enabled = True (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) assert (output_z.dtype == torch.float32) if torch.cuda.is_available(): model.cuda() (output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda()) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) assert (output_z.dtype == torch.float32) class ExampleModule(nn.Module): _fp16(apply_to=('x', 'y'), out_fp32=True) def forward(self, x, y=None, z=None): return (x, y, z) model = ExampleModule() input_x = torch.ones(1, dtype=torch.half) input_y = torch.ones(1, dtype=torch.float32) input_z = torch.ones(1, dtype=torch.float32) (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.float32) assert (output_z.dtype == torch.float32) model.fp16_enabled = True (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) assert (output_z.dtype == torch.float32) if torch.cuda.is_available(): model.cuda() (output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda()) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) assert (output_z.dtype == torch.float32)
class GANLoss(nn.Module): def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): super(GANLoss, self).__init__() self.register_buffer('real_label', torch.tensor(target_real_label)) self.register_buffer('fake_label', torch.tensor(target_fake_label)) self.gan_mode = gan_mode if (gan_mode == 'lsgan'): self.loss = nn.MSELoss() elif (gan_mode == 'vanilla'): self.loss = nn.BCEWithLogitsLoss() elif (gan_mode in ['wgangp']): self.loss = None else: raise NotImplementedError(('gan mode %s not implemented' % gan_mode)) def get_target_tensor(self, prediction, target_is_real): if target_is_real: target_tensor = self.real_label else: target_tensor = self.fake_label return target_tensor.expand_as(prediction) def __call__(self, prediction, target_is_real): if (self.gan_mode in ['lsgan', 'vanilla']): target_tensor = self.get_target_tensor(prediction, target_is_real) loss = self.loss(prediction, target_tensor) elif (self.gan_mode == 'wgangp'): if target_is_real: loss = (- prediction.mean()) else: loss = prediction.mean() return loss
def run(p, _log): p = munchify(p) torch.manual_seed(p.seed) np.random.seed(p.seed) random.seed(p.seed) if p.write_logs: setup_log_folder(p.log_folder, p.force) save_current_script(p.log_folder) (log, logger) = setup_logger((p.log_folder if p.write_logs else None)) log('{}'.format(p)) ex.logger = logger log('load datasets ...') _module = importlib.import_module(((DATASETS + '.') + p.dataset_name)) train_generator = _module.create_iterator(p=p, partition='train', batch_size=p.train_batch_size) eval_generator = _module.create_iterator(p=p, partition='valid', batch_size=p.eval_batch_size, random=False) test_generator = _module.create_iterator(p=p, partition='test', batch_size=1, random=False) vocab_size = len(train_generator.dataset.idx2word) log('dataset vocab size: {}'.format(vocab_size)) log('Number of train batches: {}'.format(len(train_generator))) log('Number of test batches: {}'.format(len(eval_generator))) log('load model ...') _module = importlib.import_module(((MODELS + '.') + p.model_name)) p.vocab_size = vocab_size model = _module.Model(p) if p.jit: log('compiling model with jit.script ...') model = jit.script(model) log('skipping model print ...') log('{} trainable parameters found. '.format(count_parameters(model))) optimizer = torch.optim.Adam(params=model.parameters(), lr=p.learning_rate, betas=(p.beta1, p.beta2)) criterion = nn.CrossEntropyLoss(ignore_index=p.PAD) if (p.n_gpus > 1): if p.jit: raise Exception('JIT is currently not supported for distributed training!') log('{} GPUs detected. Using nn.DataParallel. Batch-size per GPU: {}'.format(p.n_gpus, (p.train_batch_size // p.n_gpus))) model = nn.DataParallel(model) log('load trainer ...') _module = importlib.import_module(((TRAINERS + '.') + p.trainer_name)) trainer = _module.Trainer(model=model, params=p, train_generator=train_generator, eval_generator=eval_generator, optimizer=optimizer, criterion=criterion, log=log) trainer.train() log('\nloading best mode from: ', trainer.best_eval_state_path) trainer.load_state(trainer.best_eval_state_path) log('\nfinal batch_size=1 evaluation ...') trainer.evaluate(generator=test_generator, progress=True)
class alexnet_base(nn.Module): def __init__(self): super(alexnet_base, self).__init__() self.base = models.alexnet(pretrained=True) self.classifier = nn.Sequential(*list(self.base.classifier.children())[:(- 1)]) def forward(self, x): out = self.base.features(x) out = out.view((- 1), ((out.size(1) * out.size(2)) * out.size(3))) out = self.classifier(out) return out
class TestResamplingDataset(unittest.TestCase): def setUp(self): self.strings = ['ab', 'c', 'def', 'ghij'] self.weights = [4.0, 2.0, 7.0, 1.5] self.size_ratio = 2 self.dataset = ListDataset(self.strings, np.array([len(s) for s in self.strings])) def _test_common(self, resampling_dataset, iters): assert (len(self.dataset) == len(self.strings) == len(self.weights)) assert (len(resampling_dataset) == (self.size_ratio * len(self.strings))) results = {'ordered_by_size': True, 'max_distribution_diff': 0.0} totalfreqs = 0 freqs = collections.defaultdict(int) for epoch_num in range(iters): resampling_dataset.set_epoch(epoch_num) indices = resampling_dataset.ordered_indices() assert (len(indices) == len(resampling_dataset)) prev_size = (- 1) for i in indices: cur_size = resampling_dataset.size(i) assert (resampling_dataset[i] == resampling_dataset[i]) assert (cur_size == len(resampling_dataset[i])) freqs[resampling_dataset[i]] += 1 totalfreqs += 1 if (prev_size > cur_size): results['ordered_by_size'] = False prev_size = cur_size assert (set(freqs.keys()) == set(self.strings)) for (s, weight) in zip(self.strings, self.weights): freq = (freqs[s] / totalfreqs) expected_freq = (weight / sum(self.weights)) results['max_distribution_diff'] = max(results['max_distribution_diff'], abs((expected_freq - freq))) return results def test_resampling_dataset_batch_by_size_false(self): resampling_dataset = ResamplingDataset(self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=False, seed=0) results = self._test_common(resampling_dataset, iters=1000) assert (not results['ordered_by_size']) assert (results['max_distribution_diff'] < 0.02) def test_resampling_dataset_batch_by_size_true(self): resampling_dataset = ResamplingDataset(self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=True, seed=0) results = self._test_common(resampling_dataset, iters=1000) assert results['ordered_by_size'] assert (results['max_distribution_diff'] < 0.02)
def create_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser(description='This programs creates the connectivity layers.') parser.add_argument('--raw_folder', type=str, help='Points to extracted data.', required=True, default='./data/raw') parser.add_argument('--output_folder', type=str, help='Where to put the files created', required=False, default='.') return parser
def _from_file(filename): with open(filename, 'r') as f: clustering = [] for line in f: splits = line.split('\t') (l, vec) = (int(splits[0]), np.array([float(x) for x in splits[1:]])) clustering.append((vec, l)) return clustering
class WindowedIterator(CheckpointableIterator): def __init__(self, source_iterator: CheckpointableIterator, width: int): if (not isinstance(source_iterator, CheckpointableIterator)): raise ValueError('source_iterator has to be a CheckpointableIterator') self._source_iterator = source_iterator self._width = width self.setstate(None) def getstate(self) -> Dict: return {'source_state': self._source_state, 'item_index': self._item_index} def setstate(self, checkpoint: Optional[Dict]): self._source_state = (checkpoint['source_state'] if checkpoint else None) self._item_index = (checkpoint['item_index'] if checkpoint else 0) self._source_iterator.setstate(self._source_state) self._iterator = self._generate() def _fifo_slice(self, i): return tuple(self._fifo[i:(i + self._width)]) def _generate(self) -> Iterator: self._source_state = self._source_iterator.getstate() self._fifo = list(islice(self._source_iterator, self._width)) while (len(self._fifo) == self._width): next_input_state = self._source_iterator.getstate() self._fifo.extend(islice(self._source_iterator, self._width)) last = min((self._width - 1), (len(self._fifo) - self._width)) while (self._item_index <= last): window = self._fifo_slice(self._item_index) self._item_index += 1 (yield window) self._fifo = self._fifo[(last + 1):] self._source_state = next_input_state self._item_index = 0 def __next__(self): return next(self._iterator)
def build_optimizer(model, optim_cfg, logger, fixed=False): if (optim_cfg.OPTIMIZER == 'adam'): optimizer = optim.Adam(model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY) elif (optim_cfg.OPTIMIZER == 'sgd'): optimizer = optim.SGD(model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY, momentum=optim_cfg.MOMENTUM) elif (optim_cfg.OPTIMIZER == 'adam_onecycle'): def children(m: nn.Module): return list(m.children()) def num_children(m: nn.Module) -> int: return len(children(m)) flatten_model = (lambda m: (sum(map(flatten_model, m.children()), []) if num_children(m) else [m])) get_layer_groups = (lambda m: [nn.Sequential(*flatten_model(m))]) optimizer_func = partial(optim.Adam, betas=(0.9, 0.99)) optimizer = OptimWrapper.create(optimizer_func, 0.003, get_layer_groups(model), wd=optim_cfg.WEIGHT_DECAY, true_wd=True, bn_wd=True) if fixed: for param in model.vfe.parameters(): logger.info('==> set no grad for vfe.') param.requires_grad = False for param in model.backbone_3d.parameters(): logger.info('==> set no grad for backbone_3d.') param.requires_grad = False for param in model.backbone_2d.parameters(): logger.info('==> set no grad for backbone_2d.') param.requires_grad = False for param in model.dense_head.parameters(): logger.info('==> set no grad for dense_head.') param.requires_grad = False else: raise NotImplementedError return optimizer
class TD3(object): def __init__(self, state_dim, action_dim, max_action): self.actor = Actor(state_dim, action_dim, max_action).to(device) self.actor_target = Actor(state_dim, action_dim, max_action).to(device) self.actor_target.load_state_dict(self.actor.state_dict()) self.actor_optimizer = torch.optim.Adam(self.actor.parameters()) self.critic = Critic(state_dim, action_dim).to(device) self.critic_target = Critic(state_dim, action_dim).to(device) self.critic_target.load_state_dict(self.critic.state_dict()) self.critic_optimizer = torch.optim.Adam(self.critic.parameters()) self.max_action = max_action def select_action(self, state): state = torch.FloatTensor(state.reshape(1, (- 1))).to(device) return self.actor(state).cpu().data.numpy().flatten() def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2): for it in range(iterations): (x, y, u, r, d) = replay_buffer.sample(batch_size) state = torch.FloatTensor(x).to(device) action = torch.FloatTensor(u).to(device) next_state = torch.FloatTensor(y).to(device) done = torch.FloatTensor((1 - d)).to(device) reward = torch.FloatTensor(r).to(device) noise = torch.FloatTensor(u).data.normal_(0, policy_noise).to(device) noise = noise.clamp((- noise_clip), noise_clip) next_action = (self.actor_target(next_state) + noise).clamp((- self.max_action), self.max_action) next_action = next_action.clamp((- self.max_action), self.max_action) (target_Q1, target_Q2) = self.critic_target(next_state, next_action) target_Q = torch.min(target_Q1, target_Q2) target_Q = (reward + ((done * discount) * target_Q).detach()) (current_Q1, current_Q2) = self.critic(state, action) critic_loss = (F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)) self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() if ((it % policy_freq) == 0): actor_loss = (- self.critic.Q1(state, self.actor(state)).mean()) self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() for (param, target_param) in zip(self.critic.parameters(), self.critic_target.parameters()): target_param.data.copy_(((tau * param.data) + ((1 - tau) * target_param.data))) for (param, target_param) in zip(self.actor.parameters(), self.actor_target.parameters()): target_param.data.copy_(((tau * param.data) + ((1 - tau) * target_param.data))) def save(self, filename, directory): torch.save(self.actor.state_dict(), ('%s/%s_actor.pth' % (directory, filename))) torch.save(self.critic.state_dict(), ('%s/%s_critic.pth' % (directory, filename))) def load(self, filename, directory): self.actor.load_state_dict(torch.load(('%s/%s_actor.pth' % (directory, filename)))) self.critic.load_state_dict(torch.load(('%s/%s_critic.pth' % (directory, filename))))
def sobel_cam(img): gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) grad_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3) grad_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3) abs_grad_x = cv2.convertScaleAbs(grad_x) abs_grad_y = cv2.convertScaleAbs(grad_y) grad = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0) return grad
def segmented_scatter_(dest, indices, start_indices, values): real_indices = (start_indices + indices) dest[real_indices] = values return dest
def main(args): if (args.seed is not None): random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.') if (args.gpu is not None): warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.') if ((args.dist_url == 'env://') and (args.world_size == (- 1))): args.world_size = int(os.environ['WORLD_SIZE']) args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed) ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: args.world_size = (ngpus_per_node * args.world_size) mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: main_worker(args.gpu, ngpus_per_node, args)
def time_me(function): def wrapped(*args, **kwargs): start = time.time() r = function(*args, **kwargs) end = time.time() return (r, ((end - start) * 1000)) return wrapped