code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_content_field(self, name): fields = self._content.findall(name) if (not fields): return None elif (len(fields) == 1): return etree_to_dict(fields[0])[name] else: return [etree_to_dict(field)[name] for field in fields]
Get the contents of a specific subtag from Clusterpoint Storage's response's content tag. Args: name -- A name string of the content's subtag to be returned. Returns: A dict representing the contents of the specified field or a list of dicts if there are multiple fields with that tag name. Returns None if no field found.
codesearchnet
def filter_parts(cls, part_info): filtered = OrderedDict() for part_name, info_list in part_info.items(): if info_list is None or isinstance(info_list, Exception): continue info_list = [i for i in info_list if isinstance(i, cls)] if info_list: filtered[part_name] = info_list return filtered
Filter the part_info dict looking for instances of our class Args: part_info (dict): {part_name: [Info] or None} as returned from Controller.run_hook() Returns: dict: {part_name: [info]} where info is a subclass of cls
juraj-google-style
def _MakeGroupFromRootSection(root_section, undefined_str): group = {} for statement in root_section.Statements(): if isinstance(statement, six.string_types): continue func, args = statement if func is _DoDef and isinstance(args, _Section): section = args t = Template._FromSection(section, group, undefined_str) group[section.section_name] = t return group
Construct a dictinary { template name -> Template() instance } Args: root_section: _Section instance -- root of the original parse tree
juraj-google-style
def remove_pad(x, pad_remover, mode): x = expert_utils.flatten_all_but_last(x) if (mode != ModeKeys.PREDICT): x = pad_remover.remove(x) x = tf.expand_dims(x, axis=0) return x
Remove padding by concatenating all dimension into one. Args: x (tf.Tensor): input of shape [batch_size, length, depth] pad_remover (obj): a PadRemover object mode (ModeKeys): infer, train or eval. If inference, the padding remover is not applied Returns: tf.Tensor of shape [1,length_nonpad,depth] where length_nonpad <= batch_size*length
codesearchnet
def pad_trajectories(trajectories, boundary=20): t_max = max((r.shape[0] for (_, _, r) in trajectories)) boundary = int(boundary) bucket_length = (boundary * int(np.ceil((float(t_max) / boundary)))) padded_observations = [] padded_actions = [] padded_rewards = [] padded_lengths = [] reward_masks = [] for (o, a, r) in trajectories: num_to_pad = ((bucket_length + 1) - o.shape[0]) padded_lengths.append(num_to_pad) if (num_to_pad == 0): padded_observations.append(o) padded_actions.append(a) padded_rewards.append(r) reward_masks.append(onp.ones_like(r, dtype=np.int32)) continue padding_config = [(0, num_to_pad, 0)] for _ in range((o.ndim - 1)): padding_config.append((0, 0, 0)) padding_config = tuple(padding_config) padding_value = get_padding_value(o.dtype) action_padding_value = get_padding_value(a.dtype) reward_padding_value = get_padding_value(r.dtype) padded_obs = lax.pad(o, padding_value, padding_config) padded_observations.append(padded_obs) assert ((a.ndim == 1) and (r.ndim == 1)) padding_config = ((0, num_to_pad, 0),) padded_action = lax.pad(a, action_padding_value, padding_config) padded_actions.append(padded_action) padded_reward = lax.pad(r, reward_padding_value, padding_config) padded_rewards.append(padded_reward) reward_mask = onp.ones_like(r, dtype=np.int32) reward_masks.append(lax.pad(reward_mask, 0, padding_config)) return (padded_lengths, np.stack(reward_masks), np.stack(padded_observations), np.stack(padded_actions), np.stack(padded_rewards))
Pad trajectories to a bucket length that is a multiple of boundary. Args: trajectories: list[(observation, actions, rewards)], where each observation is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the length of the list being B (batch size). boundary: int, bucket length, the actions and rewards are padded to integer multiples of boundary. Returns: tuple: (padding lengths, reward_mask, padded_observations, padded_actions, padded_rewards) where padded_observations is shaped (B, T+1) + OBS and padded_actions, padded_rewards & reward_mask are shaped (B, T). Where T is max(t) rounded up to an integer multiple of boundary. padded_length is how much padding we've added and reward_mask is 1s for actual rewards and 0s for the padding.
codesearchnet
def preprocess_input_examples_arg_string(input_examples_str): input_dict = preprocess_input_exprs_arg_string(input_examples_str) for input_key, example_list in input_dict.items(): if not isinstance(example_list, list): raise ValueError('tf.Example input must be a list of dictionaries, but "%s" is %s' % (example_list, type(example_list))) input_dict[input_key] = [_create_example_string(example) for example in example_list] return input_dict
Parses input into dict that maps input keys to lists of tf.Example. Parses input string in the format of 'input_key1=[{feature_name: feature_list}];input_key2=[{feature_name:feature_list}];' into a dictionary that maps each input_key to its list of serialized tf.Example. Args: input_examples_str: A string that specifies a list of dictionaries of feature_names and their feature_lists for each input. Each input is separated by semicolon. For each input key: 'input=[{feature_name1: feature_list1, feature_name2:feature_list2}]' items in feature_list can be the type of float, int, long or str. Returns: A dictionary that maps input keys to lists of serialized tf.Example. Raises: ValueError: An error when the given tf.Example is not a list.
github-repos
def _run_graph(self, device, input_shape, axes, num_layers, mode, scale, train, num_iters): graph = ops.Graph() with graph.as_default(): outputs = build_graph(device, input_shape, axes, num_layers, mode, scale, train) with session_lib.Session(graph=graph) as session: variables.global_variables_initializer().run() _ = session.run([out.op for out in outputs]) start_time = time.time() for _ in range(num_iters): _ = session.run([out.op for out in outputs]) duration = time.time() - start_time print('%s shape:%d/%d name_template = 'batch_norm_{device}_input_shape_{shape}_axes_{axes}_mode_{mode}_layers_{num_layers}_scale_{scale}_train_{train}' self.report_benchmark(name=name_template.format(device=device, mode=mode, num_layers=num_layers, scale=scale, train=train, shape=str(input_shape).replace(' ', ''), axes=str(axes)).replace(' ', ''), iters=num_iters, wall_time=duration / num_iters) return duration
Run the graph and print its execution time. Args: device: string, the device to run on. input_shape: shape of the input tensor. axes: axes that are to be normalized across. num_layers: number of batch normalization layers in the graph. mode: "op", "py" or "slow" depending on the implementation. scale: scale after normalization. train: if true, also run backprop. num_iters: number of steps to run. Returns: The duration of the run in seconds.
github-repos
def _parse_trunk_native_vlan(self, config): match = re.search(r'switchport trunk native vlan (\d+)', config) return dict(trunk_native_vlan=match.group(1))
Scans the specified config and parse the trunk native vlan value Args: config (str): The interface configuration block to scan Returns: dict: A Python dict object with the value of switchport trunk native vlan value. The dict returned is intended to be merged into the resource dict
juraj-google-style
def get_yaml_parser_roundtrip(): yaml_writer = yamler.YAML(typ='rt', pure=True) yaml_writer.indent(mapping=2, sequence=4, offset=2) return yaml_writer
Create the yaml parser object with this factory method. The round-trip parser preserves: - comments - block style and key ordering are kept, so you can diff the round-tripped source - flow style sequences ( ‘a: b, c, d’) (based on request and test by Anthony Sottile) - anchor names that are hand-crafted (i.e. not of the form``idNNN``) - merges in dictionaries are preserved Returns: ruamel.yaml.YAML object with round-trip loader
codesearchnet
def _post_process(self, feed_item, item): if item['assetIdentifier']['name']: feed_item[FieldMap.CREATIVE_ASSET_NAME] = item['assetIdentifier']['name']
Maps ids and names of related entities so they can be updated in the Bulkdozer feed. When Bulkdozer is done processing an item, it writes back the updated names and ids of related objects, this method makes sure those are updated in the creative asset feed. Args: feed_item: Feed item representing the creative asset from the Bulkdozer feed. item: The DCM creative asset being updated or created.
github-repos
def _assign_method(self, resource_class, method_type): "\n If we assigned the same method to each method, it's the same\n method in memory, so we need one for each acceptable HTTP method.\n " method_name = resource_class.get_method_name(resource_class, method_type) valid_status_codes = getattr(resource_class.Meta, 'valid_status_codes', DEFAULT_VALID_STATUS_CODES) def get(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api(method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def put(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api(method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def post(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api(method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def patch(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api(method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) def delete(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs): return self.call_api(method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs) method_map = {'GET': get, 'PUT': put, 'POST': post, 'PATCH': patch, 'DELETE': delete} setattr(self, method_name, types.MethodType(method_map[method_type], self))
Using reflection, assigns a new method to this class. Args: resource_class: A resource class method_type: The HTTP method type
codesearchnet
def item(self, key): return _item.Item(self._name, key, context=self._context)
Retrieves an Item object for the specified key in this bucket. The item need not exist. Args: key: the key of the item within the bucket. Returns: An Item instance representing the specified key.
juraj-google-style
def update_handler(Model, name=None, **kwds): async def action_handler(service, action_type, payload, props, notify=True, **kwds): if action_type == get_crud_action('update', name or Model): try: message_props = {} if 'correlation_id' in props: message_props['correlation_id'] = props['correlation_id'] pk_field = Model.primary_key() if not pk_field.name in payload: raise ValueError("Must specify the pk of the model when updating") model = Model.select().where(pk_field == payload[pk_field.name]).get() payload.pop(pk_field.name, None) for key, value in payload.items(): setattr(model, key, value) model.save() if notify: await service.event_broker.send( payload=ModelSerializer().serialize(model), action_type=change_action_status(action_type, success_status()), **message_props ) except Exception as err: if notify: await service.event_broker.send( payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props ) else: raise err return action_handler
This factory returns an action handler that updates a new instance of the specified model when a update action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel): The model to update when the action received. Returns: function(type, payload): The action handler for this model
juraj-google-style
def _find_furious_yaml(start, checked): directory = start while directory not in checked: checked.add(directory) for fs_yaml_name in FURIOUS_YAML_NAMES: yaml_path = os.path.join(directory, fs_yaml_name) if os.path.exists(yaml_path): return yaml_path directory = os.path.dirname(directory) return None
Traverse the directory tree identified by start until a directory already in checked is encountered or the path of furious.yaml is found. Checked is present both to make the loop termination easy to reason about and so the same directories do not get rechecked Args: start: the path to start looking in and work upward from checked: the set of already checked directories Returns: the path of the furious.yaml file or None if it is not found
juraj-google-style
def getexcfo(e): tb = sys.exc_info()[2] tbinfo = traceback.extract_tb(tb) (path, line, name, src) = ('', '', '', None) if tbinfo: (path, line, name, sorc) = tbinfo[(- 1)] retd = {'msg': str(e), 'file': path, 'line': line, 'name': name, 'src': src} if isinstance(e, s_exc.SynErr): retd['syn:err'] = e.errinfo return (e.__class__.__name__, retd)
Get an err tufo from an exception. Args: e (Exception): An Exception (or Exception subclass). Notes: This can be called outside of the context of an exception handler, however details such as file, line, function name and source may be missing. Returns: ((str, dict)):
codesearchnet
def GetFileObject(self, data_stream_name=''): data_stream_names = [ data_stream.name for data_stream in self._GetDataStreams()] if data_stream_name and data_stream_name not in data_stream_names: return None path_spec = copy.deepcopy(self.path_spec) if data_stream_name: if self._file_system.IsHFS() and data_stream_name == 'DECOMP': data_stream_name = '' setattr(path_spec, 'data_stream', data_stream_name) return resolver.Resolver.OpenFileObject( path_spec, resolver_context=self._resolver_context)
Retrieves the file-like object. Args: data_stream_name (Optional[str]): data stream name, where an empty string represents the default data stream. Returns: TSKFileIO: file-like object or None.
juraj-google-style
def convert_config_value(self, value, label): if isinstance(value, six.string_types): value = value.lower() if (value in self.TRUTHY_VALUES): return True elif (value in self.FALSY_VALUES): return False else: raise YapconfValueError('Cowardly refusing to interpret config value as a boolean. Name: {0}, Value: {1}'.format(self.name, value))
Converts all 'Truthy' values to True and 'Falsy' values to False. Args: value: Value to convert label: Label of the config which this item was found. Returns:
codesearchnet
def argpartition(x, kth, axis=-1): if any_symbolic_tensors((x,)): return Argpartition(kth, axis).symbolic_call(x) return backend.numpy.argpartition(x, kth, axis)
Performs an indirect partition along the given axis. It returns an array of indices of the same shape as `x` that index data along the given axis in partitioned order. Args: a: Array to sort. kth: Element index to partition by. The k-th element will be in its final sorted position and all smaller elements will be moved before it and all larger elements behind it. The order of all elements in the partitions is undefined. If provided with a sequence of k-th it will partition all of them into their sorted position at once. axis: Axis along which to sort. The default is -1 (the last axis). If `None`, the flattened array is used. Returns: Array of indices that partition `x` along the specified `axis`.
github-repos
def get_input_mask_at(self, node_index): inputs = self.get_input_at(node_index) if isinstance(inputs, list): return [getattr(x, '_keras_mask', None) for x in inputs] else: return getattr(inputs, '_keras_mask', None)
Retrieves the input mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple inputs).
github-repos
def get_data_xls(file_name, file_contents=None, on_demand=False): def tuple_to_iso_date(tuple_date): (y,m,d, hh,mm,ss) = tuple_date non_zero = lambda n: n!=0 date = "%04d-%02d-%02d" % (y,m,d) if list(filter(non_zero, (y,m,d))) else '' time = "T%02d:%02d:%02d" % (hh,mm,ss) if list(filter(non_zero, (hh,mm,ss))) or not date else '' return date+time def format_excel_val(book, val_type, value, want_tuple_date): if val_type == 2: if value == int(value): value = int(value) elif val_type == 3: datetuple = xlrd.xldate_as_tuple(value, book.datemode) value = datetuple if want_tuple_date else tuple_to_iso_date(datetuple) elif val_type == 5: value = xlrd.error_text_from_code[value] return value def xlrd_xsl_to_array(file_name, file_contents=None): book = xlrd.open_workbook(file_name, file_contents=file_contents, on_demand=on_demand) formatter = lambda t_v: format_excel_val(book, t_v[0], t_v[1], False) row_builder = lambda s, r: list(map(formatter, zip(s.row_types(r), s.row_values(r)))) data = [SheetYielder(book, index, row_builder) for index in range(book.nsheets)] if not on_demand: for sheet in data: sheet.load() book.release_resources() return data return xlrd_xsl_to_array(file_name, file_contents)
Loads the old excel format files. New format files will automatically get loaded as well. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy.
juraj-google-style
def quickhull(sample): link = lambda a, b: np.concatenate((a, b[1:])) edge = lambda a, b: np.concatenate(([a], [b])) def dome(sample, base): h, t = base dists = np.dot(sample - h, np.dot(((0, -1), (1, 0)), (t - h))) outer = np.repeat(sample, dists > 0, axis=0) if len(outer): pivot = sample[np.argmax(dists)] return link(dome(outer, edge(h, pivot)), dome(outer, edge(pivot, t))) else: return base if len(sample) > 2: axis = sample[:, 0] base = np.take(sample, [np.argmin(axis), np.argmax(axis)], axis=0) return link(dome(sample, base), dome(sample, base[::-1])) else: return sample
Find data points on the convex hull of a supplied data set Args: sample: data points as column vectors n x d n - number samples d - data dimension (should be two) Returns: a k x d matrix containint the convex hull data points
juraj-google-style
def _queue_dag(self, name, *, data=None): if self._stop_workflow: return None if (name not in self._dags_blueprint): raise DagNameUnknown() new_dag = copy.deepcopy(self._dags_blueprint[name]) new_dag.workflow_name = self.name self._dags_running[new_dag.name] = self._celery_app.send_task(JobExecPath.Dag, args=(new_dag, self._workflow_id, data), queue=new_dag.queue, routing_key=new_dag.queue) return new_dag.name
Add a new dag to the queue. If the stop workflow flag is set, no new dag can be queued. Args: name (str): The name of the dag that should be queued. data (MultiTaskData): The data that should be passed on to the new dag. Raises: DagNameUnknown: If the specified dag name does not exist Returns: str: The name of the queued dag.
codesearchnet
def load_config(self, filepath=None): def load_settings(filepath): instruments_loaded = {} probes_loaded = {} scripts_loaded = {} if filepath and os.path.isfile(filepath): in_data = load_b26_file(filepath) instruments = in_data['instruments'] if 'instruments' in in_data else {} scripts = in_data['scripts'] if 'scripts' in in_data else {} probes = in_data['probes'] if 'probes' in in_data else {} try: instruments_loaded, failed = Instrument.load_and_append(instruments) if len(failed) > 0: print(('WARNING! Following instruments could not be loaded: ', failed)) scripts_loaded, failed, instruments_loaded = Script.load_and_append( script_dict=scripts, instruments=instruments_loaded, log_function=self.log, data_path=self.gui_settings['data_folder']) if len(failed) > 0: print(('WARNING! Following scripts could not be loaded: ', failed)) probes_loaded, failed, instruments_loadeds = Probe.load_and_append( probe_dict=probes, probes=probes_loaded, instruments=instruments_loaded) self.log('Successfully loaded from previous save.') except ImportError: self.log('Could not load instruments or scripts from file.') self.log('Opening with blank GUI.') return instruments_loaded, scripts_loaded, probes_loaded config = None try: config = load_b26_file(filepath) config_settings = config['gui_settings'] if config_settings['gui_settings'] != filepath: print(( 'WARNING path to settings file ({:s}) in config file is different from path of settings file ({:s})'.format( config_settings['gui_settings'], filepath))) config_settings['gui_settings'] = filepath except Exception as e: if filepath: self.log('The filepath was invalid --- could not load settings. Loading blank GUI.') config_settings = self._DEFAULT_CONFIG for x in self._DEFAULT_CONFIG.keys(): if x in config_settings: if not os.path.exists(config_settings[x]): try: os.makedirs(config_settings[x]) except Exception: config_settings[x] = self._DEFAULT_CONFIG[x] os.makedirs(config_settings[x]) print(('WARNING: failed validating or creating path: set to default path'.format(config_settings[x]))) else: config_settings[x] = self._DEFAULT_CONFIG[x] os.makedirs(config_settings[x]) print(('WARNING: path {:s} not specified set to default {:s}'.format(x, config_settings[x]))) if filepath is not None and os.path.exists(os.path.dirname(filepath)): config_settings['gui_settings'] = filepath self.gui_settings = config_settings if(config): self.gui_settings_hidden = config['gui_settings_hidden'] else: self.gui_settings_hidden['script_source_folder'] = '' self.instruments, self.scripts, self.probes = load_settings(filepath) self.refresh_tree(self.tree_gui_settings, self.gui_settings) self.refresh_tree(self.tree_scripts, self.scripts) self.refresh_tree(self.tree_settings, self.instruments) self._hide_parameters(filepath)
checks if the file is a valid config file Args: filepath:
juraj-google-style
def slice_arrays(arrays, indices, contiguous=True): converted_to_list = False if not isinstance(arrays, list): converted_to_list = True arrays = [arrays] if any((tensor_util.is_tf_type(x) for x in arrays)): if not contiguous: entries = [[x[i:i + 1] for i in indices] for x in arrays] slices = [array_ops.concat(x, axis=0) for x in entries] else: slices = [x[indices[0]:indices[-1] + 1] for x in arrays] else: slices = generic_utils.slice_arrays(arrays, indices) if converted_to_list: slices = slices[0] return slices
Slices batches out of provided arrays (workaround for eager tensors). Unfortunately eager tensors don't have the same slicing behavior as Numpy arrays (they follow the same slicing behavior as symbolic TF tensors), hence we cannot use `generic_utils.slice_arrays` directly and we have to implement this workaround based on `concat`. This has a performance cost. Args: arrays: Single array or list of arrays. indices: List of indices in the array that should be included in the output batch. contiguous: Boolean flag indicating whether the indices are contiguous. Returns: Slice of data (either single array or list of arrays).
github-repos
def __init__(self,corpus_dir,datastore_type='file',db_name='corpus.db'): self.g = Goose({'browser_user_agent': 'Mozilla','parser_class':'soup'}) self.corpus_dir = corpus_dir self.datastore_type = datastore_type self.db_name = db_name self.stats = defaultdict(int) self._create_corpus_dir(self.corpus_dir) self.db = None if self.datastore_type == 'sqlite': self.db = self.corpus_dir + '/' + self.db_name self._set_up_db(self.db)
Read links and associated categories for specified articles in text file seperated by a space Args: corpus_dir (str): The directory to save the generated corpus datastore_type (Optional[str]): Format to save generated corpus. Specify either 'file' or 'sqlite'. db_name (Optional[str]): Name of database if 'sqlite' is selected.
juraj-google-style
def parse(self, args: List[str]) -> Optional[argparse.Namespace]: try: return self._parser.parse_args(args) except KeyboardInterrupt: raise except: return None
Parses a list of string inputs. The parsed namespace contains these attributes: output_name: Optional[str], the output variable name. verbose: bool, whether to display more details of the magic execution. query: Optional[List[str]], the beam SQL query to execute. Returns: The parsed args or None if fail to parse.
github-repos
def get_by(self, field, value): if not field: logger.exception(RESOURCE_CLIENT_INVALID_FIELD) raise ValueError(RESOURCE_CLIENT_INVALID_FIELD) filter = "\"{0}='{1}'\"".format(field, value) results = self.get_all(filter=filter) if "." not in field: results = [item for item in results if str(item.get(field, "")).lower() == value.lower()] return results
Get the resource by passing a field and its value. Note: This function uses get_all passing a filter.The search is case-insensitive. Args: field: Field name to filter. value: Value to filter. Returns: dict
juraj-google-style
def _parse_networks(self, config): networks = list() regexp = 'network (.+)/(\\d+) area (\\d+\\.\\d+\\.\\d+\\.\\d+)' matches = re.findall(regexp, config) for (network, netmask, area) in matches: networks.append(dict(network=network, netmask=netmask, area=area)) return dict(networks=networks)
Parses config file for the networks advertised by the OSPF process Args: config(str): Running configuration Returns: list: dict: keys: network (str) netmask (str) area (str)
codesearchnet
def _vmap_for_bhqkv(mask_function: Callable, bh_indices: bool=True) -> Callable: dimensions = [(None, None, None, 0), (None, None, 0, None)] if bh_indices: dimensions.extend([(None, 0, None, None), (0, None, None, None)]) for dims in dimensions: mask_function = torch.vmap(mask_function, in_dims=dims, out_dims=0) return mask_function
Used to vmap our mask_functions over the q_idx and kv_idx dimensions of the inputs. Optionally, vmap over the batch and head indices as well if `bh_indices=True`. Using vmap here allows us to keep the performance of vectorized ops, while having a single set of primitive functions between attention interfaces (i.e. between flex and sdpa/eager, FA2 being a bit different). Args: mask_function (`Callable`): The mask_function to vmap. bh_indices (`bool`, optional): Whether to vmap over the batch and head indices as well, or only q and kv indices. Returns: Callable: The vmapped function.
github-repos
def encode_function_call(self, function_name, args): if (function_name not in self.function_data): raise ValueError('Unkown function {}'.format(function_name)) description = self.function_data[function_name] function_selector = zpad(encode_int(description['prefix']), 4) arguments = encode_abi(description['encode_types'], args) return (function_selector + arguments)
Return the encoded function call. Args: function_name (str): One of the existing functions described in the contract interface. args (List[object]): The function arguments that wll be encoded and used in the contract execution in the vm. Return: bin: The encoded function name and arguments so that it can be used with the evm to execute a funcion call, the binary string follows the Ethereum Contract ABI.
codesearchnet
def __init__(self, features: List[np.ndarray], timestamps: np.ndarray, schema: Optional[Schema]=None) -> None: self.features = features self.timestamps = timestamps if schema is not None: self.check_schema(schema)
Initializes the IndexData object by checking and setting the features and timestamps. Raises: ValueError: If features are not one-dimensional arrays. ValueError: If the number of elements in features and timestamps do not match.
github-repos
def insert_arguments_into_match_query(compilation_result, arguments): if compilation_result.language != MATCH_LANGUAGE: raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result)) base_query = compilation_result.query argument_types = compilation_result.input_metadata sanitized_arguments = { key: _safe_match_argument(argument_types[key], value) for key, value in six.iteritems(arguments) } return base_query.format(**sanitized_arguments)
Insert the arguments into the compiled MATCH query to form a complete query. Args: compilation_result: a CompilationResult object derived from the GraphQL compiler arguments: dict, mapping argument name to its value, for every parameter the query expects. Returns: string, a MATCH query with inserted argument data
juraj-google-style
def get_updates_for(self, inputs): warnings.warn('`layer.get_updates_for` is deprecated and will be removed in a future version. Please use `layer.updates` method instead.') return self.updates
Deprecated, do NOT use! Retrieves updates relevant to a specific set of inputs. Args: inputs: Input tensor or list/tuple of input tensors. Returns: List of update ops of the layer that depend on `inputs`.
github-repos
def load_method(path,method,class_name = None,instance_creator = None): module = load_module(path) if class_name : class_type = getattr(module, class_name) if instance_creator: ic_rest = instance_creator nxt = module while ('.' in ic_rest) : nxt = getattr(nxt , instance_creator.split('.')[0]) ic_rest = '.'.join(ic_rest.split('.')[1:]) instance = getattr(module, instance_creator)() else : instance = class_type() return getattr(instance , method) else : return getattr(module , method)
Returns an instance of the method specified. Args : path : The path to the module contianing the method or function. method : The name of the function. class_name : The name of the class if the funtion is a method. instance_creator: The name of the method to return the class instance.
juraj-google-style
def compute_invariants(self, graph_file, input_format, invariants=Invariants.ALL, email=None, use_threads=False, callback=None): if (email is None): email = self.email if (input_format not in GraphFormats._any): raise ValueError('Invalid input format, {}.'.format(input_format)) if (not (set(invariants) <= set(Invariants.ALL))): raise ValueError('Invariants must be a subset of Invariants.ALL.') if (use_threads and (callback is not None)): if (not hasattr(callback, '__call__')): raise ValueError('callback must be a function.') if (len(inspect.getargspec(callback).args) != 1): raise ValueError('callback must take exactly 1 argument.') url = 'graphupload/{}/{}/{}/'.format(email, input_format, '/'.join(invariants)) if (' ' in url): raise ValueError('Arguments cannot have spaces in them.') if (not os.path.exists(graph_file)): raise ValueError('File {} does not exist.'.format(graph_file)) if use_threads: upload_thread = threading.Thread(target=self._run_compute_invariants, args=[url, graph_file, callback]) upload_thread.start() else: return self._run_compute_invariants(url, graph_file) return
Compute invariants from an existing GraphML file using the remote grute graph services. Arguments: graph_file (str): The filename of the graphml file input_format (str): One of grute.GraphFormats invariants (str[]: Invariants.ALL)*: An array of grute.Invariants to compute on the graph email (str: self.email)*: The email to notify upon completion use_threads (bool: False)*: Whether to use Python threads to run computation in the background when waiting for the server to return the invariants callback (function: None)*: The function to run upon completion of the call, if using threads. (Will not be called if use_threads is set to False.) Returns: HTTP Response if use_threads is False. Otherwise, None Raises: ValueError: If the graph file does not exist, or if there are issues with the passed arguments RemoteDataUploadError: If there is an issue packing the file RemoteError: If the server experiences difficulty computing invs
codesearchnet
def _GetTableNames(self, database): table_names = [] for esedb_table in database.tables: table_names.append(esedb_table.name) return table_names
Retrieves the table names in a database. Args: database (pyesedb.file): ESE database. Returns: list[str]: table names.
juraj-google-style
def plot_axis(self, ax, legend, ladder=False, default_width=1, match_only=None, colour=None, colour_function=None, cmap=None, default=None, width_field=None, **kwargs): default_c = None patches = [] for iv in self.__list: origin = (0, iv.top.z) d = legend.get_decor(iv.primary, match_only=match_only) thick = (iv.base.z - iv.top.z) if ladder: if (width_field is not None): w = iv.data.get(width_field, 1) w = ((default_width * w) / self.max_field(width_field)) default_c = 'gray' elif (legend is not None): w = (d.width or default_width) try: w = ((default_width * w) / legend.max_width) except: w = default_width else: w = default_width this_patch_kwargs = kwargs.copy() lw = this_patch_kwargs.pop('lw', 0) ec = this_patch_kwargs.pop('ec', 'k') fc = (this_patch_kwargs.pop('fc', None) or default_c or d.colour) if (colour is None): rect = mpl.patches.Rectangle(origin, w, thick, fc=fc, lw=lw, hatch=d.hatch, ec=ec, **this_patch_kwargs) ax.add_patch(rect) else: rect = mpl.patches.Rectangle(origin, w, thick, lw=lw, ec=ec, **this_patch_kwargs) patches.append(rect) if (colour is not None): cmap = (cmap or 'viridis') p = mpl.collections.PatchCollection(patches, cmap=cmap, lw=lw) p.set_array(self.get_data(colour, colour_function, default=default)) ax.add_collection(p) cb = plt.colorbar(p) cb.outline.set_linewidth(0) return ax
Plotting, but only the Rectangles. You have to set up the figure. Returns a matplotlib axis object. Args: ax (axis): The matplotlib axis to plot into. legend (Legend): The Legend to use for colours, etc. ladder (bool): Whether to use widths or not. Default False. default_width (int): A width for the plot if not using widths. Default 1. match_only (list): A list of strings matching the attributes you want to compare when plotting. colour (str): Which data field to use for colours. cmap (cmap): Matplotlib colourmap. Default ``viridis``. default (float): The default (null) value. width_field (str): The field to use for the width of the patches. **kwargs are passed through to matplotlib's ``patches.Rectangle``. Returns: axis: The matplotlib.pyplot axis.
codesearchnet
def localize_file(path_or_buffer): path_or_buffer = _stringify_path(path_or_buffer) if _is_url(path_or_buffer): req = urlopen(path_or_buffer) filename = os.path.basename(req.geturl()) if os.path.splitext(filename)[-1] is not ".pdf": pid = os.getpid() filename = "{0}.pdf".format(pid) with open(filename, 'wb') as f: shutil.copyfileobj(req, f) return filename, True elif is_file_like(path_or_buffer): pid = os.getpid() filename = "{0}.pdf".format(pid) with open(filename, 'wb') as f: shutil.copyfileobj(path_or_buffer, f) return filename, True else: return os.path.expanduser(path_or_buffer), False
Ensure localize target file. If the target file is remote, this function fetches into local storage. Args: path (str): File path or file like object or URL of target file. Returns: filename (str): file name in local storage temporary_file_flag (bool): temporary file flag
juraj-google-style
def stop_gradient(input_layer): if input_layer.is_sequence(): result = [tf.stop_gradient(t) for t in input_layer.sequence] return input_layer.with_sequence(result) else: return tf.stop_gradient(input_layer)
Cuts off the gradient at this point. This works on both sequence and regular Pretty Tensors. Args: input_layer: The input. Returns: A new Pretty Tensor of the same type with stop_gradient applied.
codesearchnet
def clone(self, *args, **overrides): clone = super(Layout, self).clone(*args, **overrides) clone._max_cols = self._max_cols return clone
Clones the Layout, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned Layout object
juraj-google-style
def get_peers(self, id=None, endpoint=None): return self._call_endpoint(GET_PEERS, id=id, endpoint=endpoint)
Get the current peers of a remote node Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
juraj-google-style
def _write_install_json(self, filename, install_json): if os.path.isfile(filename): with open(filename, 'w') as fh: json.dump(install_json, fh, indent=4, sort_keys=True) else: err = 'Could not write file: {}.'.format(filename) self.package_data['errors'].append(err)
Write install.json file. Some projects have bundles App with multiple install.json files. Typically these files are prefixed with the App name (e.g., MyApp.install.json). Args: filename (str): The install.json file name. install_json (dict): The contents of the install.json file.
juraj-google-style
def find_rootfs(conn, disk_root): rootfs = conn.inspect_os() if ((not rootfs) or (len(rootfs) > 1)): filesystems = conn.list_filesystems() if (disk_root in filesystems): rootfs = [disk_root] else: rootfs = [fs for fs in filesystems.keys() if (disk_root in fs)] if (not rootfs): raise GuestFSError('no root fs {0} could be found from list {1}'.format(disk_root, str(filesystems))) return sorted(rootfs)[0]
Find the image's device root filesystem, and return its path. 1. Use :func:`guestfs.GuestFS.inspect_os` method. If it returns more than one root filesystem or None, try: 2. Find an exact match of `disk_root` from :func:`guestfs.GuestFS.list_filesystems`, if none is found, try: 3. Return the device that has the substring `disk_root` contained in it, from the output of :func:`guestfs.GuestFS.list_filesystems`. Args: conn(guestfs.GuestFS): Open GuestFS handle. disk_root(str): Root device to search for. Note that by default, if guestfs can deduce the filesystem, it will not be used. Returns: str: root device path Raises: :exc:`GuestFSError` if no root filesystem was found
codesearchnet
def add_vectors(self, vectors): if isinstance(vectors[0], (list, np.ndarray)): for vec in vectors: self.vectors.append(vec) else: self.vectors.append(vectors)
Add a list of vectors to Bloch sphere. Args: vectors (array_like): Array with vectors of unit length or smaller.
codesearchnet
def inspect_task(self, task): url = self._url('/tasks/{0}', task) return self._result(self._get(url), True)
Retrieve information about a task. Args: task (str): Task ID Returns: (dict): Information about the task. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def check_candidate_exists(self, basepath, candidates): checked = [] for item in candidates: abspath = os.path.join(basepath, item) if os.path.exists(abspath): checked.append(abspath) return checked
Check that at least one candidate exist into a directory. Args: basepath (str): Directory path where to search for candidate. candidates (list): List of candidate file paths. Returns: list: List of existing candidates.
juraj-google-style
def __partial_trace_vec(vec, trace_systems, dimensions, reverse=True): if reverse: dimensions = dimensions[::(- 1)] trace_systems = ((len(dimensions) - 1) - np.array(trace_systems)) rho = vec.reshape(dimensions) rho = np.tensordot(rho, rho.conj(), axes=(trace_systems, trace_systems)) d = int(np.sqrt(np.product(rho.shape))) return rho.reshape(d, d)
Partial trace over subsystems of multi-partite vector. Args: vec (vector_like): complex vector N trace_systems (list(int)): a list of subsystems (starting from 0) to trace over. dimensions (list(int)): a list of the dimensions of the subsystems. If this is not set it will assume all subsystems are qubits. reverse (bool): ordering of systems in operator. If True system-0 is the right most system in tensor product. If False system-0 is the left most system in tensor product. Returns: ndarray: A density matrix with the appropriate subsystems traced over.
codesearchnet
def Pack(cls, obj, version): if isinstance(obj, ServiceQuery): return str(obj) return obj
Pack the given object using AdWords-specific logic. Args: obj: an object to be packed for SOAP using AdWords-specific logic, if applicable. version: the version of the current API, e.g. 'v201809' Returns: The given object packed with AdWords-specific logic for SOAP, if applicable. Otherwise, returns the given object unmodified.
codesearchnet
def get_normalized_variable_map(scope_or_module, collection=tf.GraphKeys.GLOBAL_VARIABLES, context=None, group_sliced_variables=True): scope_name = get_variable_scope_name(scope_or_module) if (context is None): context = scope_or_module prefix = get_variable_scope_name(context) prefix_length = ((len(prefix) + 1) if prefix else 0) if (not _is_scope_prefix(scope_name, prefix)): raise ValueError("Scope '{}' is not prefixed by '{}'.".format(scope_name, prefix)) variables = get_variables_in_scope(scope_name, collection) if (not group_sliced_variables): single_vars = variables grouped_vars = dict() else: (single_vars, grouped_vars) = _get_sliced_variables(variables) var_map = {var.op.name[prefix_length:]: var for var in single_vars} for (full_name, var_group) in grouped_vars.items(): name = full_name[prefix_length:] if (name in var_map): raise ValueError(('Mixing slices and non-slices with the same name: ' + str(name))) var_map[name] = var_group return var_map
Builds map of `tf.Variable`s in scope or module with normalized names. The names of the variables are normalized to remove the scope prefix. Args: scope_or_module: Scope or module to build map from. collection: Collection to restrict query to. By default this is `tf.Graphkeys.GLOBAL_VARIABLES`, which includes non-trainable variables such as moving averages. context: Scope or module, identical to or parent of `scope`. If given, this will be used as the stripped prefix. By default `None`, which means `context=scope`. group_sliced_variables: Boolean, if set to True, sliced variables are grouped together in the returned map; if set to False, each partition of a sliced variable is a separate (key, value) pair. Returns: Dictionary mapping normalized variable name to `tf.Variable`, or a list of `tf.Variables` if the variable is a sliced (partitioned) variable. Raises: ValueError: If `context` is given but is not a proper prefix of `scope`.
codesearchnet
def submit_batch_prediction(job_request, job_id=None): if (job_id is None): job_id = ('prediction_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')) job = {'job_id': job_id, 'prediction_input': job_request} context = datalab.Context.default() cloudml = discovery.build('ml', 'v1', credentials=context.credentials) request = cloudml.projects().jobs().create(body=job, parent=('projects/' + context.project_id)) request.headers['user-agent'] = 'GoogleCloudDataLab/1.0' request.execute() return Job(job_id)
Submit a batch prediction job. Args: job_request: the arguments of the training job in a dict. For example, { 'version_name': 'projects/my-project/models/my-model/versions/my-version', 'data_format': 'TEXT', 'input_paths': ['gs://my_bucket/my_file.csv'], 'output_path': 'gs://my_bucket/predict_output', 'region': 'us-central1', 'max_worker_count': 1, } job_id: id for the training job. If None, an id based on timestamp will be generated. Returns: A Job object representing the batch prediction job.
codesearchnet
def get_capacity_grav(self, min_voltage=None, max_voltage=None, use_overall_normalization=True): pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage) normalization_mass = (self.normalization_mass if (use_overall_normalization or (len(pairs_in_range) == 0)) else pairs_in_range[(- 1)].mass_discharge) return (sum([pair.mAh for pair in pairs_in_range]) / normalization_mass)
Get the gravimetric capacity of the electrode. Args: min_voltage (float): The minimum allowable voltage for a given step. max_voltage (float): The maximum allowable voltage allowable for a given step. use_overall_normalization (booL): If False, normalize by the discharged state of only the voltage pairs matching the voltage criteria. if True, use default normalization of the full electrode path. Returns: Gravimetric capacity in mAh/g across the insertion path (a subset of the path can be chosen by the optional arguments).
codesearchnet
def publish_metric(self, metric_name, metric_value, epoch_seconds=None): if (epoch_seconds is None): epoch_seconds = self._reactor.seconds() self._client_factory.publish_metric(metric_name, metric_value, int(epoch_seconds))
Record a single hit on a given metric. Args: metric_name: The name of the metric to record with Carbon. metric_value: The value to record with Carbon. epoch_seconds: Optionally specify the time for the metric hit. Returns: None
codesearchnet
def generate_algebra_inverse_sample(vlist, ops, solve_ops, min_depth, max_depth): side = random.randrange(2) left_depth = random.randrange((min_depth if side else 0), (max_depth + 1)) right_depth = random.randrange((min_depth if (not side) else 0), (max_depth + 1)) var_index = random.randrange(len(vlist)) var = vlist[var_index] consts = (vlist[:var_index] + vlist[(var_index + 1):]) left = random_expr_with_required_var(left_depth, (var if side else None), consts, ops) right = random_expr_with_required_var(right_depth, (var if (not side) else None), consts, ops) left_str = str(left) right_str = str(right) target = str(algebra_inverse_solve(left, right, var, solve_ops)) sample = ('%s:%s=%s' % (var, left_str, right_str)) return (sample, target)
Randomly generate an algebra inverse dataset sample. Given an input equation and variable, produce the expression equal to the variable. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. solve_ops: See `solve_ops` documentation in `algebra_inverse_solve`. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. Returns: sample: String representation of the input. Will be of the form 'solve_var:left_side=right_side'. target: String representation of the solution.
codesearchnet
def layer_preprocess(layer_input, hparams, layer_collection=None): assert "a" not in hparams.layer_preprocess_sequence, ( "No residual connections allowed in hparams.layer_preprocess_sequence") assert "z" not in hparams.layer_preprocess_sequence, ( "No residual connections allowed in hparams.layer_preprocess_sequence") return layer_prepostprocess( None, layer_input, sequence=hparams.layer_preprocess_sequence, dropout_rate=hparams.layer_prepostprocess_dropout, norm_type=hparams.norm_type, depth=None, epsilon=hparams.norm_epsilon, dropout_broadcast_dims=comma_separated_string_to_integer_list( getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")), default_name="layer_prepostprocess", layer_collection=layer_collection)
Apply layer preprocessing. See layer_prepostprocess() for details. A hyperparameters object is passed for convenience. The hyperparameters that may be used are: layer_preprocess_sequence layer_prepostprocess_dropout norm_type hidden_size norm_epsilon Args: layer_input: a Tensor hparams: a hyperparameters object. layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor
juraj-google-style
def select_executor(elem, doc): executor = EXECUTORS['default'] if ('cmd' in elem.attributes.keys()): executor = elem.attributes['cmd'] elif ('runas' in elem.attributes.keys()): executor = EXECUTORS[elem.attributes['runas']] elif (elem.classes[0] != 'exec'): executor = EXECUTORS[elem.classes[0]] return executor
Determines the executor for the code in `elem.text`. The elem attributes and classes select the executor in this order (highest to lowest): - custom commands (cmd=...) - runas (runas=...) takes a key for the executors - first element class (.class) determines language and thus executor Args: elem The AST element. doc The document. Returns: The command to execute code.
codesearchnet
def build_single_handler_applications(paths, argvs=None): applications = {} argvs = ({} or argvs) for path in paths: application = build_single_handler_application(path, argvs.get(path, [])) route = application.handlers[0].url_path() if (not route): if ('/' in applications): raise RuntimeError(("Don't know the URL path to use for %s" % path)) route = '/' applications[route] = application return applications
Return a dictionary mapping routes to Bokeh applications built using single handlers, for specified files or directories. This function iterates over ``paths`` and ``argvs`` and calls :func:`~bokeh.command.util.build_single_handler_application` on each to generate the mapping. Args: path (seq[str]) : paths to files or directories for creating Bokeh applications. argvs (dict[str, list[str]], optional) : mapping of paths to command line arguments to pass to the handler for each path Returns: dict[str, Application] Raises: RuntimeError
codesearchnet
def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None): model = self._config.model or models.InterestRateModelType.HULL_WHITE_ONE_FACTOR name = name or self._name + '_price' with tf.name_scope(name): valuation_date = dateslib.convert_to_date_tensor(market.date) strike = self._swap.fixed_rate() expiry_time = dateslib.daycount_actual_365_fixed(start_date=valuation_date, end_date=self._expiry_date, dtype=self._dtype) if model == models.InterestRateModelType.HULL_WHITE_ONE_FACTOR: option_value = self._price_hull_white_1_factor(valuation_date, market, strike, expiry_time) else: raise ValueError('Unsupported model.') return option_value
Returns the present value of the swaption on the valuation date. Args: market: A instance of type `ProcessedMarketData` which contains the necessary information for pricing the swaption. name: Python str. The name to give to the ops created by this function. Default value: `None` which maps to 'price'. Returns: A Rank `Tensor` of shape `batch_shape` containing the modeled price of each Swaption contract based on the input market data. Raises: ValueError: If an unsupported model is supplied to the function.
github-repos
def _randomFloats(self, shape, low=0.0, high=1.0, dtype=dtypes.float32): val = np.random.random_sample(shape) diff = high - low val *= diff val += low return constant_op.constant(val, dtype=dtype)
Generate a tensor of random floating-point values. Values will be continuously distributed in the range [low, high). Note that we use numpy to generate random numbers and then feed the result through a constant op to avoid the re-rolling of TensorFlow random ops on each run in graph mode. Args: shape: The output shape. low: Lower bound of random numbers generated, inclusive. high: Upper bound of random numbers generated, exclusive. dtype: The output dtype. Returns: A random tensor
github-repos
def accumulate_from_superclasses(cls, propname): cachename = "__cached_all" + propname if cachename not in cls.__dict__: s = set() for c in inspect.getmro(cls): if issubclass(c, HasProps) and hasattr(c, propname): base = getattr(c, propname) s.update(base) setattr(cls, cachename, s) return cls.__dict__[cachename]
Traverse the class hierarchy and accumulate the special sets of names ``MetaHasProps`` stores on classes: Args: name (str) : name of the special attribute to collect. Typically meaningful values are: ``__container_props__``, ``__properties__``, ``__properties_with_refs__``
juraj-google-style
def _PrintEventLabelsCounter( self, event_labels_counter, session_identifier=None): if not event_labels_counter: return title = 'Event tags generated per label' if session_identifier: title = '{0:s}: {1:s}'.format(title, session_identifier) table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Label', 'Number of event tags'], title=title) for key, value in sorted(event_labels_counter.items()): if key == 'total': continue table_view.AddRow([key, value]) try: total = event_labels_counter['total'] except KeyError: total = 'N/A' table_view.AddRow(['Total', total]) table_view.Write(self._output_writer)
Prints the event labels counter. Args: event_labels_counter (collections.Counter): number of event tags per label. session_identifier (Optional[str]): session identifier.
juraj-google-style
def __Build(leaves): if len(leaves) < 1: raise Exception('Leaves must have length') if len(leaves) == 1: return leaves[0] num_parents = int((len(leaves) + 1) / 2) parents = [MerkleTreeNode() for i in range(0, num_parents)] for i in range(0, num_parents): node = parents[i] node.LeftChild = leaves[i * 2] leaves[i * 2].Parent = node if (i * 2 + 1 == len(leaves)): node.RightChild = node.LeftChild else: node.RightChild = leaves[i * 2 + 1] leaves[i * 2 + 1].Parent = node hasharray = bytearray(node.LeftChild.Hash.ToArray() + node.RightChild.Hash.ToArray()) node.Hash = UInt256(data=Crypto.Hash256(hasharray)) return MerkleTree.__Build(parents)
Build the merkle tree. Args: leaves (list): items are of type MerkleTreeNode. Returns: MerkleTreeNode: the root node.
juraj-google-style
def get_contour_pd_plot(self): from scipy import interpolate from matplotlib import cm pd = self._pd entries = pd.qhull_entries data = np.array(pd.qhull_data) plt = self._get_2d_plot() data[(:, 0:2)] = triangular_coord(data[(:, 0:2)]).transpose() for (i, e) in enumerate(entries): data[(i, 2)] = self._pd.get_e_above_hull(e) gridsize = 0.005 xnew = np.arange(0, 1.0, gridsize) ynew = np.arange(0, 1, gridsize) f = interpolate.LinearNDInterpolator(data[(:, 0:2)], data[(:, 2)]) znew = np.zeros((len(ynew), len(xnew))) for (i, xval) in enumerate(xnew): for (j, yval) in enumerate(ynew): znew[(j, i)] = f(xval, yval) plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r) plt.colorbar() return plt
Plot a contour phase diagram plot, where phase triangles are colored according to degree of instability by interpolation. Currently only works for 3-component phase diagrams. Returns: A matplotlib plot object.
codesearchnet
def get_exe_info(dir_, flag_protected=False): ret = [] ff = glob.glob(os.path.join(dir_, "*.py")) ff = [f for f in ff if flag_protected or not os.path.basename(f).startswith("_")] ff.sort() for f in ff: _, filename = os.path.split(f) flag_error = False flag_gui = None descr = "(no doc)" try: with open(f, "r") as h: flag_gui = "QApplication" in h.read() try: script_ = None script_ = import_module(f) except SystemExit: descr = "? (called sys.exit())" else: if script_.__doc__ is not None: descr = script_.__doc__.strip().split("\n")[0] except Exception as e: flag_error = True descr = "*{0!s}*: {1!s}".format(e.__class__.__name__, str(e)) if len(descr) == 0: descr = "(no doc)" ret.append(ExeInfo(filename, descr, flag_error, flag_gui)) sisi_gra = [si for si in ret if si.flag_gui] sisi_cmd = [si for si in ret if not si.flag_gui] sisi_gra = sorted(sisi_gra, key=lambda x: x.filename) sisi_cmd = sorted(sisi_cmd, key=lambda x: x.filename) ret = sisi_cmd+sisi_gra return ret
Returns a list of ExeInfo objects, which represent Python scripts within dir_ Args: dir_: string, path to directory flag_protected: whether or not to include files starting with a '_' Returns: list of ExeInfo objects The ExeInfo objects represent the ".py" files in directory dir_,
juraj-google-style
def __init__(self, value=None): super(ApplicationData, self).__init__(value, Tags.APPLICATION_DATA)
Construct an ApplicationData object. Args: value (str): A string representing data for a particular namespace. Optional, defaults to None.
juraj-google-style
def get_country_by_id(self, country_id) -> 'Country': VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id', exc=ValueError) if country_id not in self._countries_by_id.keys(): for country in self.countries: if country.country_id == country_id: return country raise ValueError(country_id) else: return self._countries_by_id[country_id]
Gets a country in this coalition by its ID Args: country_id: country Id Returns: Country
juraj-google-style
def bind_to_storage_buffer(self, binding=0, *, offset=0, size=(- 1)) -> None: self.mglo.bind_to_storage_buffer(binding, offset, size)
Bind the buffer to a shader storage buffer. Args: binding (int): The shader storage binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
codesearchnet
def swo_disable(self, port_mask): res = self._dll.JLINKARM_SWO_DisableTarget(port_mask) if res != 0: raise errors.JLinkException(res) return None
Disables ITM & Stimulus ports. Args: self (JLink): the ``JLink`` instance port_mask (int): mask specifying which ports to disable Returns: ``None`` Raises: JLinkException: on error
juraj-google-style
def download_and_prep_data() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: mnist_dataset = tf.keras.datasets.mnist (tr_x, tr_y), (te_x, te_y) = mnist_dataset.load_data() tr_x = tr_x / 255.0 te_x = te_x / 255.0 return (tr_x, tr_y, te_x, te_y)
Download dataset and scale to [0, 1]. Returns: tr_x: Training data. tr_y: Training labels. te_x: Testing data. te_y: Testing labels.
github-repos
def replace_dots_to_underscores_at_last(path): if path == '': return path bits = path.split('/') bits[-1] = bits[-1].replace('.', '_') return '/'.join(bits)
Remove dot ('.') while a dot is treated as a special character in backends Args: path (str): A target path string Returns: str
juraj-google-style
def refresh_role(self, role, file_hierarchy): if role not in self.cache: self.cache[role] = {} was_change = self._refresh_hierarchy_recursive(self.cache[role], file_hierarchy) if was_change: cf = open(self.cache_file, 'w') yaml.dump(self.cache, cf, Dumper=Dumper) cf.close()
Checks and refreshes (if needed) all assistants with given role. Args: role: role of assistants to refresh file_hierarchy: hierarchy as returned by devassistant.yaml_assistant_loader.\ YamlAssistantLoader.get_assistants_file_hierarchy
juraj-google-style
def init_on_device(device: 'torch.device', include_buffers: bool=False): if include_buffers: with device: yield return old_register_parameter = nn.Module.register_parameter if include_buffers: old_register_buffer = nn.Module.register_buffer def register_empty_parameter(module, name, param): old_register_parameter(module, name, param) if param is not None: param_cls = type(module._parameters[name]) kwargs = module._parameters[name].__dict__ kwargs['requires_grad'] = param.requires_grad module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs) def register_empty_buffer(module, name, buffer, persistent=True): old_register_buffer(module, name, buffer, persistent=persistent) if buffer is not None: module._buffers[name] = module._buffers[name].to(device) if include_buffers: tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full']} else: tensor_constructors_to_patch = {} def patch_tensor_constructor(fn): def wrapper(*args, **kwargs): kwargs['device'] = device return fn(*args, **kwargs) return wrapper try: nn.Module.register_parameter = register_empty_parameter if include_buffers: nn.Module.register_buffer = register_empty_buffer for torch_function_name in tensor_constructors_to_patch.keys(): setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name))) yield finally: nn.Module.register_parameter = old_register_parameter if include_buffers: nn.Module.register_buffer = old_register_buffer for torch_function_name, old_torch_function in tensor_constructors_to_patch.items(): setattr(torch, torch_function_name, old_torch_function)
A context manager under which models are initialized with all parameters on the specified device. Args: device (`torch.device`): Device to initialize all parameters on. include_buffers (`bool`, *optional*): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn from accelerate import init_on_device with init_on_device(device=torch.device("cuda")): tst = nn.Linear(100, 100) # on `cuda` device ```
github-repos
def add_child(self, child): if (not isinstance(child, Node)): raise TypeError('child must be a Node') self.children.append(child) child.parent = self
Add child to ``Node`` object Args: ``child`` (``Node``): The child ``Node`` to be added
codesearchnet
def m_seg(p1, p2, rad, dist): v = vector(p1, p2) m = unit(rotate(v, rad), dist) return translate(p1, m), translate(p2, m)
move segment by distance Args: p1, p2: point(x, y) rad: relative direction angle(radian) dist: distance Return: translated segment(p1, p2)
juraj-google-style
def _prefix_from_prefix_string(self, prefixlen_str): try: if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): raise ValueError prefixlen = int(prefixlen_str) if not (0 <= prefixlen <= self._max_prefixlen): raise ValueError except ValueError: raise NetmaskValueError('%s is not a valid prefix length' % prefixlen_str) return prefixlen
Turn a prefix length string into an integer. Args: prefixlen_str: A decimal string containing the prefix length. Returns: The prefix length as an integer. Raises: NetmaskValueError: If the input is malformed or out of range.
juraj-google-style
def add_permissions(self, grp_name, resource, permissions): self.project_service.set_auth(self._token_project) self.project_service.add_permissions(grp_name, resource, permissions)
Add additional permissions for the group associated with the resource. Args: grp_name (string): Name of group. resource (intern.resource.boss.Resource): Identifies which data model object to operate on. permissions (list): List of permissions to add to the given resource Raises: requests.HTTPError on failure.
juraj-google-style
def _GetImportTimestamps(self, pefile_object): import_timestamps = [] if not hasattr(pefile_object, 'DIRECTORY_ENTRY_IMPORT'): return import_timestamps for importdata in pefile_object.DIRECTORY_ENTRY_IMPORT: dll_name = getattr(importdata, 'dll', '') try: dll_name = dll_name.decode('ascii') except UnicodeDecodeError: dll_name = dll_name.decode('ascii', errors='replace') if not dll_name: dll_name = '<NO DLL NAME>' timestamp = getattr(importdata.struct, 'TimeDateStamp', 0) if timestamp: import_timestamps.append([dll_name, timestamp]) return import_timestamps
Retrieves timestamps from the import directory, if available. Args: pefile_object (pefile.PE): pefile object. Returns: list[int]: import timestamps.
juraj-google-style
def __call__(self, *args, **kwargs) -> Any:
Calls the functor. Args: *args: Any positional arguments. **kwargs: Any keyword arguments. Returns: Any value.
github-repos
def group_alleles_by_start_end_Xbp(arr, bp=28): starts = arr[:,0:bp] ends = arr[:,-bp:] starts_ends_idxs = defaultdict(list) l, seq_len = arr.shape for i in range(l): start_i = starts[i] end_i = ends[i] start_i_str = ''.join([str(x) for x in start_i]) end_i_str = ''.join([str(x) for x in end_i]) starts_ends_idxs[start_i_str + end_i_str].append(i) return starts_ends_idxs
Group alleles by matching ends Args: arr (numpy.array): 2D int matrix of alleles bp (int): length of ends to group by Returns: dict of lists: key of start + end strings to list of indices of alleles with matching ends
juraj-google-style
def _update_in_hdx(self, object_type, id_field_name, file_to_upload=None, **kwargs): self._check_load_existing_object(object_type, id_field_name) self._merge_hdx_update(object_type, id_field_name, file_to_upload, **kwargs)
Helper method to check if HDX object exists in HDX and if so, update it Args: object_type (str): Description of HDX object type (for messages) id_field_name (str): Name of field containing HDX object identifier file_to_upload (Optional[str]): File to upload to HDX **kwargs: See below operation (string): Operation to perform eg. patch. Defaults to update. Returns: None
juraj-google-style
def update_variant_rank(self, case_obj, variant_type='clinical', category='snv'): variants = self.variant_collection.find({'case_id': case_obj['_id'], 'category': category, 'variant_type': variant_type}).sort('rank_score', pymongo.DESCENDING) LOG.info('Updating variant_rank for all variants') requests = [] for (index, var_obj) in enumerate(variants): if (len(requests) > 5000): try: self.variant_collection.bulk_write(requests, ordered=False) requests = [] except BulkWriteError as err: LOG.warning('Updating variant rank failed') raise err operation = pymongo.UpdateOne({'_id': var_obj['_id']}, {'$set': {'variant_rank': (index + 1)}}) requests.append(operation) try: self.variant_collection.bulk_write(requests, ordered=False) except BulkWriteError as err: LOG.warning('Updating variant rank failed') raise err LOG.info('Updating variant_rank done')
Updates the manual rank for all variants in a case Add a variant rank based on the rank score Whenever variants are added or removed from a case we need to update the variant rank Args: case_obj(Case) variant_type(str)
codesearchnet
def remove_repeated_comments(node): last_comment = {'text': None} for _node in gast.walk(node): if anno.hasanno(_node, 'comment'): comment = anno.getanno(_node, 'comment') if (comment['text'] == last_comment['text']): anno.delanno(_node, 'comment') last_comment = comment return node
Remove comments that repeat themselves. Multiple statements might be annotated with the same comment. This way if one of the statements is deleted during optimization passes, the comment won't be lost. This pass removes sequences of identical comments, leaving only the first one. Args: node: An AST Returns: An AST where comments are not repeated in sequence.
codesearchnet
def get_attr(self, name): fields = ('s', 'i', 'f', 'b', 'type', 'shape', 'tensor', 'func') try: with c_api_util.tf_buffer() as buf: pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf) data = pywrap_tf_session.TF_GetBuffer(buf) except errors.InvalidArgumentError as e: raise ValueError(e.message) x = attr_value_pb2.AttrValue() x.ParseFromString(data) oneof_value = x.WhichOneof('value') if oneof_value is None: return [] if oneof_value == 'list': for f in fields: if getattr(x.list, f): if f == 'type': return [dtypes.as_dtype(t) for t in x.list.type] else: return list(getattr(x.list, f)) return [] if oneof_value == 'type': return dtypes.as_dtype(x.type) assert oneof_value in fields, 'Unsupported field type in ' + str(x) return getattr(x, oneof_value)
Returns the value of the attr of this op with the given `name`. Args: name: The name of the attr to fetch. Returns: The value of the attr, as a Python object. Raises: ValueError: If this op does not have an attr with the given `name`.
github-repos
def path_new_using_function(w: int, h: int, func: Callable[([int, int, int, int, Any], float)], userData: Any=0, dcost: float=1.41) -> tcod.path.AStar: return tcod.path.AStar(tcod.path._EdgeCostFunc((func, userData), (w, h)), dcost)
Return a new AStar using the given callable function. Args: w (int): Clipping width. h (int): Clipping height. func (Callable[[int, int, int, int, Any], float]): userData (Any): dcost (float): A multiplier for the cost of diagonal movement. Can be set to 0 to disable diagonal movement. Returns: AStar: A new AStar instance.
codesearchnet
def commandline_parser(parser=None, arguments=None): if parser is None: parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(' Command line to execute all tasks in a recipe once. ( Common Entry Point )\n\n This script dispatches all the tasks in a JSON recipe to handlers in sequence.\n For each task, it calls a subprocess to execute the JSON instructions, waits\n for the process to complete and dispatches the next task, until all tasks are\n complete or a critical failure ( exception ) is raised.\n\n If an exception is raised in any task, all following tasks are not executed by design.\n\n Example: python run.py [path to recipe file]\n Caution: This script does NOT check if the last job finished, potentially causing overruns.\n Notes:\n - To avoid running the entire script when debugging a single task, the command line\n can easily replace "all" with the name of any "task" in the json. For example\n python tool/recipe.py scripts/say_hello.json\n\n - Can be easily replaced with the following to run only the "hello" task:\n python task/hello/run.py scripts/say_hello.json\n\n - Or specified further to run only the second hello task:\n python task/hello/run.py scripts/say_hello.json -i 2\n\n ')) if arguments is None: parser.add_argument('json', help='Path to recipe json file to load.') elif '-j' in arguments: parser.add_argument('--json', '-j', help='Path to recipe json file to load.') if arguments is None or '-p' in arguments: parser.add_argument('--project', '-p', help='Cloud ID of Google Cloud Project.', default=None) if arguments is None or '-k' in arguments: parser.add_argument('--key', '-k', help='API Key of Google Cloud Project.', default=None) if arguments is None or '-u' in arguments: parser.add_argument('--user', '-u', help='Path to USER credentials json file.', default=None) if arguments is None or '-s' in arguments: parser.add_argument('--service', '-s', help='Path to SERVICE credentials json file.', default=None) if arguments is None or '-c' in arguments: parser.add_argument('--client', '-c', help='Path to CLIENT credentials json file.', default=None) if arguments is None or '-t' in arguments: parser.add_argument('--task', '-t', help='Task number of the task to run starting at 1.', default=None, type=int) if arguments is None or '-v' in arguments: parser.add_argument('--verbose', '-v', help='Print all the steps as they happen.', action='store_true') if arguments is None or '-f' in arguments: parser.add_argument('--force', '-force', help='Not used but included for compatiblity with another script.', action='store_true') if arguments is None or '-tp' in arguments: parser.add_argument('--trace_print', '-tp', help='Execution trace written to stdout.', action='store_true') if arguments is None or '-tf' in arguments: parser.add_argument('--trace_file', '-tf', help='Execution trace written to file.', action='store_true') if arguments is None or '-ni' in arguments: parser.add_argument('--no_input', '-ni', help='Raise exception if fields requiring input are in recipe.', action='store_true') return parser
Used in StarThinker scripts as entry point for command line calls. Defines standard parameters used by almost every entry point. Usage example: ``` import argparse from starthinker.util.configuration import commandline_parser if __name__ == "__main__": # custom parameters parser = argparse.ArgumentParser() parser.add_argument('custom', help='custom parameter to be added.') # initialize project commandline_parser(parser=parser, ['-c', '-u']) # access arguments print(args.client) ``` Args: * parser: (ArgumentParser) optional custom argument parser * arguments: (String) optional list of parameters to use when invoking, all set if None Returns: ArgumentParser - parser with added parameters
github-repos
def view_quick_save_page(name=None): response.set_header('Cache-control', 'no-cache') response.set_header('Pragma', 'no-cache') if (request.method == 'PUT'): if (name is None): if (len(request.forms.filename) > 0): name = request.forms.filename if (name is not None): filename = '{0}.rst'.format(name) file_handle = open(filename, 'w') content = request.body.read() content = content.decode('utf-8') file_handle.write(content.encode('utf-8')) file_handle.close() return 'OK' else: return abort(404)
Quick save a page. .. note:: this is a bottle view * this view must be called with the PUT method write the new page content to the file, and not not commit or redirect Keyword Arguments: :name: (str) -- name of the rest file (without the .rst extension) Returns: bottle response object (200 OK)
codesearchnet
def load_module_functions(module): module_functions = {} for name, item in vars(module).items(): if validator.is_function(item): module_functions[name] = item return module_functions
load python module functions. Args: module: python module Returns: dict: functions mapping for specified python module { "func1_name": func1, "func2_name": func2 }
juraj-google-style
def _load_from_file_object(self, f): subtoken_strings = [] for line in f: s = line.strip() if ((s.startswith("'") and s.endswith("'")) or (s.startswith("\"") and s.endswith("\""))): s = s[1:-1] subtoken_strings.append(native_to_unicode(s)) self._init_subtokens_from_list(subtoken_strings) self._init_alphabet_from_tokens(subtoken_strings)
Load from a file object. Args: f: File object to load vocabulary from
juraj-google-style
def scheduled_sample_count(ground_truth_x, generated_x, batch_size, scheduled_sample_var): num_ground_truth = scheduled_sample_var idx = tf.random_shuffle(tf.range(batch_size)) ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size)) ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx) generated_examps = tf.gather(generated_x, generated_idx) output = tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps]) if isinstance(batch_size, int): output.set_shape([batch_size] + common_layers.shape_list(output)[1:]) return output
Sample batch with specified mix of groundtruth and generated data points. Args: ground_truth_x: tensor of ground-truth data points. generated_x: tensor of generated data points. batch_size: batch size scheduled_sample_var: number of ground-truth examples to include in batch. Returns: New batch with num_ground_truth sampled from ground_truth_x and the rest from generated_x.
juraj-google-style
class ViltFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): do_pad: Optional[bool] size_divisor: Optional[int] rescale_factor: Optional[float]
Args: do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `True`, will pad the images in the batch to the largest height and width in the batch. Padding will be applied to the bottom and right with zeros. size_divisor (`int`, *optional*, defaults to 32): The size to make the height and width divisible by. rescale_factor (`float`, *optional*, defaults to 1/255): The factor to rescale the image by.
github-repos
def fetch_all_messages(self, conn, directory, readonly): conn.select(directory, readonly) message_data = [] typ, data = conn.search(None, 'All') for num in data[0].split(): typ, data = conn.fetch(num, '(RFC822)') for response_part in data: if isinstance(response_part, tuple): email_parser = email.parser.BytesFeedParser() email_parser.feed(response_part[1]) msg = email_parser.close() body = self.get_body(msg) subject = self.get_subject(msg) message_data.append((subject, body)) return message_data
Fetches all messages at @conn from @directory. Params: conn IMAP4_SSL connection directory The IMAP directory to look for readonly readonly mode, true or false Returns: List of subject-body tuples
juraj-google-style
def _compute_intersection(boxes1, boxes2): y_min1, x_min1, y_max1, x_max1 = ops.split(boxes1[..., :4], 4, axis=-1) y_min2, x_min2, y_max2, x_max2 = ops.split(boxes2[..., :4], 4, axis=-1) boxes2_rank = len(boxes2.shape) perm = [1, 0] if boxes2_rank == 2 else [0, 2, 1] intersect_ymax = ops.minimum(y_max1, ops.transpose(y_max2, perm)) intersect_ymin = ops.maximum(y_min1, ops.transpose(y_min2, perm)) intersect_xmax = ops.minimum(x_max1, ops.transpose(x_max2, perm)) intersect_xmin = ops.maximum(x_min1, ops.transpose(x_min2, perm)) intersect_height = intersect_ymax - intersect_ymin intersect_width = intersect_xmax - intersect_xmin zeros_t = ops.cast(0, intersect_height.dtype) intersect_height = ops.maximum(zeros_t, intersect_height) intersect_width = ops.maximum(zeros_t, intersect_width) return intersect_height * intersect_width
Computes intersection area between two sets of boxes. Args: boxes1: [N, 4] or [batch_size, N, 4] float Tensor boxes. boxes2: [M, 4] or [batch_size, M, 4] float Tensor boxes. Returns: a [N, M] or [batch_size, N, M] float Tensor.
github-repos
def _iter_errors_custom(instance, checks, options): for v_function in checks: try: result = v_function(instance) except TypeError: result = v_function(instance, options) if isinstance(result, Iterable): for x in result: (yield x) elif (result is not None): (yield result) for field in instance: if (type(instance[field]) is list): for obj in instance[field]: if _is_stix_obj(obj): for err in _iter_errors_custom(obj, checks, options): (yield err)
Perform additional validation not possible merely with JSON schemas. Args: instance: The STIX object to be validated. checks: A sequence of callables which do the checks. Each callable may be written to accept 1 arg, which is the object to check, or 2 args, which are the object and a ValidationOptions instance. options: ValidationOptions instance with settings affecting how validation should be done.
codesearchnet
def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op): with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) if (ckpt and ckpt.model_checkpoint_path): print('ckpt.model_checkpoint_path: {0}'.format(ckpt.model_checkpoint_path)) saver.restore(sess, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[(- 1)].split('-')[(- 1)] print(('Successfully loaded model from %s at step=%s.' % (ckpt.model_checkpoint_path, global_step))) else: print('No checkpoint file found') return coord = tf.train.Coordinator() try: threads = [] for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS): threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True)) num_iter = int(math.ceil((FLAGS.num_examples / FLAGS.batch_size))) count_top_1 = 0.0 count_top_5 = 0.0 total_sample_count = (num_iter * FLAGS.batch_size) step = 0 print(('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset))) start_time = time.time() while ((step < num_iter) and (not coord.should_stop())): (top_1, top_5) = sess.run([top_1_op, top_5_op]) count_top_1 += np.sum(top_1) count_top_5 += np.sum(top_5) step += 1 if ((step % 20) == 0): duration = (time.time() - start_time) sec_per_batch = (duration / 20.0) examples_per_sec = (FLAGS.batch_size / sec_per_batch) print(('%s: [%d batches out of %d] (%.1f examples/sec; %.3fsec/batch)' % (datetime.now(), step, num_iter, examples_per_sec, sec_per_batch))) start_time = time.time() precision_at_1 = (count_top_1 / total_sample_count) recall_at_5 = (count_top_5 / total_sample_count) print(('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' % (datetime.now(), precision_at_1, recall_at_5, total_sample_count))) summary = tf.Summary() summary.ParseFromString(sess.run(summary_op)) summary.value.add(tag='Precision @ 1', simple_value=precision_at_1) summary.value.add(tag='Recall @ 5', simple_value=recall_at_5) summary_writer.add_summary(summary, global_step) except Exception as e: coord.request_stop(e) coord.request_stop() coord.join(threads, stop_grace_period_secs=10)
Runs Eval once. Args: saver: Saver. summary_writer: Summary writer. top_1_op: Top 1 op. top_5_op: Top 5 op. summary_op: Summary op.
codesearchnet
def next(self): try: entry = {} row = self._csv_reader.next() for i in range(0, len(row)): entry[self._headers[i]] = row[i] return entry except Exception as e: self._file.close() raise e
Gets next entry as a dictionary. Returns: object - Object key/value pair representing a row. {key1: value1, key2: value2, ...}
codesearchnet
def add_to_tensor(self, mat, name='add_to_tensor'): return self._possibly_broadcast_batch_shape(mat)
Add matrix represented by this operator to `mat`. Equiv to `I + mat`. Args: mat: `Tensor` with same `dtype` and shape broadcastable to `self`. name: A name to give this `Op`. Returns: A `Tensor` with broadcast shape and same `dtype` as `self`.
github-repos
def __init__(self, plugin_callback, plugin_dir = 'workers'): self.plugin_callback = plugin_callback self.plugin_dir = plugin_dir self.load_all_plugins() self.watcher = dir_watcher.DirWatcher(self.plugin_path) self.watcher.register_callbacks(self.on_created, self.on_modified, self.on_deleted) self.watcher.start_monitoring()
Initialize the Plugin Manager for Workbench. Args: plugin_callback: The callback for plugin. This is called when plugin is added. plugin_dir: The dir where plugin resides.
juraj-google-style
def get_configuration(head, update, head_source=None): head_source = (head_source or get_head_source(head)) update_source = get_acquisition_source(update) if not is_arxiv_and_publisher(head_source, update_source) and is_manual_merge(head, update): return ManualMergeOperations if head_source == 'arxiv': if update_source == 'arxiv': return ArxivOnArxivOperations else: return PublisherOnArxivOperations else: if update_source == 'arxiv': return ArxivOnPublisherOperations else: return PublisherOnPublisherOperations
This function return the right configuration for the inspire_merge function in according to the given sources. Both parameters can not be None. Params: head(dict): the HEAD record update(dict): the UPDATE record head_source(string): the source of the HEAD record Returns: MergerConfigurationOperations: an object containing the rules needed to merge HEAD and UPDATE
juraj-google-style
def to_string(self): def filt(x): return '+'+x[0] in PROJ4_PARAMS.keys() and x[1] is not False items = [] for k, v in sorted(filter(filt, self.items())): items.append( "+" + "=".join( map(str, filter( lambda y: (y or y == 0) and y is not True, (k, v))))) return " ".join(items)
Turn a CRS dict into a PROJ.4 string. Mapping keys are tested against ``all_proj_keys`` list. Values of ``True`` are omitted, leaving the key bare: {'no_defs': True} -> "+no_defs" and items where the value is otherwise not a str, int, or float are omitted. Args: crs: A CRS dict as used in Location. Returns: str. The string representation.
juraj-google-style
def dump_table_as_insert_sql(engine: Engine, table_name: str, fileobj: TextIO, wheredict: Dict[str, Any] = None, include_ddl: bool = False, multirow: bool = False) -> None: log.info("dump_data_as_insert_sql: table_name={}", table_name) writelines_nl(fileobj, [ SEP1, sql_comment("Data for table: {}".format(table_name)), SEP2, sql_comment("Filters: {}".format(wheredict)), ]) dialect = engine.dialect if not dialect.supports_multivalues_insert: multirow = False if multirow: log.warning("dump_data_as_insert_sql: multirow parameter substitution " "not working yet") multirow = False meta = MetaData(bind=engine) log.debug("... retrieving schema") table = Table(table_name, meta, autoload=True) if include_ddl: log.debug("... producing DDL") dump_ddl(table.metadata, dialect_name=engine.dialect.name, fileobj=fileobj) log.debug("... fetching records") query = select(table.columns) if wheredict: for k, v in wheredict.items(): col = table.columns.get(k) query = query.where(col == v) cursor = engine.execute(query) if multirow: row_dict_list = [] for r in cursor: row_dict_list.append(dict(r)) if row_dict_list: statement = table.insert().values(row_dict_list) insert_str = get_literal_query(statement, bind=engine) writeline_nl(fileobj, insert_str) else: writeline_nl(fileobj, sql_comment("No data!")) else: found_one = False for r in cursor: found_one = True row_dict = dict(r) statement = table.insert(values=row_dict) insert_str = get_literal_query(statement, bind=engine) writeline_nl(fileobj, insert_str) if not found_one: writeline_nl(fileobj, sql_comment("No data!")) writeline_nl(fileobj, SEP2) log.debug("... done")
Reads a table from the database, and writes SQL to replicate the table's data to the output ``fileobj``. Args: engine: SQLAlchemy :class:`Engine` table_name: name of the table fileobj: file-like object to write to wheredict: optional dictionary of ``{column_name: value}`` to use as ``WHERE`` filters include_ddl: if ``True``, include the DDL to create the table as well multirow: write multi-row ``INSERT`` statements
juraj-google-style
def recipe_trends_places_to_bigquery_via_query(config, auth_write, secret, key, places_dataset, places_query, places_legacy, destination_dataset, destination_table): twitter(config, {'auth': auth_write, 'secret': secret, 'key': key, 'trends': {'places': {'single_cell': True, 'bigquery': {'dataset': places_dataset, 'query': places_query, 'legacy': places_legacy}}}, 'out': {'bigquery': {'dataset': destination_dataset, 'table': destination_table}}})
Move using a WOEID query. Args: auth_write (authentication) - Credentials used for writing data. secret (string) - NA key (string) - NA places_dataset (string) - NA places_query (string) - NA places_legacy (boolean) - NA destination_dataset (string) - NA destination_table (string) - NA
github-repos