diff --git "a/validation.jsonl" "b/validation.jsonl" deleted file mode 100644--- "a/validation.jsonl" +++ /dev/null @@ -1,4904 +0,0 @@ -{"code": "def subscribe_registration_ids_to_topic(self, registration_ids, topic_name):\n url = 'https://iid.googleapis.com/iid/v1:batchAdd'\n payload = {'to': ('/topics/' + topic_name), 'registration_tokens': registration_ids}\n response = self.requests_session.post(url, json=payload)\n if (response.status_code == 200):\n return True\n elif (response.status_code == 400):\n error = response.json()\n raise InvalidDataError(error['error'])\n else:\n raise FCMError()", "docstring": "Subscribes a list of registration ids to a topic\n\nArgs:\n registration_ids (list): ids to be subscribed\n topic_name (str): name of topic\n\nReturns:\n True: if operation succeeded\n\nRaises:\n InvalidDataError: data sent to server was incorrectly formatted\n FCMError: an error occured on the server", "source": "codesearchnet_filtered"} -{"code": "def download_archive(self, name, file_path):\n uri = ((self.URI + '/archive/') + name)\n return self._client.download(uri, file_path)", "docstring": "Download archived logs of the OS Volume.\n\nArgs:\n name: Name of the OS Volume.\n file_path (str): Destination file path.\n\nReturns:\n bool: Indicates if the resource was successfully downloaded.", "source": "codesearchnet_filtered"} -{"code": "def _has_valid_abs_ref(self, i, construction_table):\n c_table = construction_table\n abs_refs = constants.absolute_refs\n A = np.empty((3, 3))\n row = c_table.index.get_loc(i)\n if (row > 2):\n message = 'The index {i} is not from the first three, rows'.format\n raise ValueError(message(i=i))\n for k in range(3):\n if (k < row):\n A[k] = self.loc[(c_table.iloc[(row, k)], ['x', 'y', 'z'])]\n else:\n A[k] = abs_refs[c_table.iloc[(row, k)]]\n (v1, v2) = ((A[2] - A[1]), (A[1] - A[0]))\n K = np.cross(v1, v2)\n zero = np.full(3, 0.0)\n return (not (np.allclose(K, zero) or np.allclose(v1, zero) or np.allclose(v2, zero)))", "docstring": "Checks, if ``i`` uses valid absolute references.\n\n Checks for each index from first to third row of the\n ``construction_table``, if the references are colinear.\n This case has to be specially treated, because the references\n are not only atoms (to fix internal degrees of freedom) but also points\n in cartesian space called absolute references.\n (to fix translational and rotational degrees of freedom)\n\nArgs:\n i (label): The label has to be in the first three rows.\n construction_table (pd.DataFrame):\n\nReturns:\n bool:", "source": "codesearchnet_filtered"} -{"code": "def get_grouped_indices(self, voigt=False, **kwargs):\n if voigt:\n array = self.voigt\n else:\n array = self\n indices = list(itertools.product(*[range(n) for n in array.shape]))\n remaining = indices.copy()\n grouped = [list(zip(*np.where(np.isclose(array, 0, **kwargs))))]\n remaining = [i for i in remaining if (i not in grouped[0])]\n while remaining:\n new = list(zip(*np.where(np.isclose(array, array[remaining[0]], **kwargs))))\n grouped.append(new)\n remaining = [i for i in remaining if (i not in new)]\n return [g for g in grouped if g]", "docstring": "Gets index sets for equivalent tensor values\n\nArgs:\n voigt (bool): whether to get grouped indices\n of voigt or full notation tensor, defaults\n to false\n **kwargs: keyword args for np.isclose. Can take atol\n and rtol for absolute and relative tolerance, e. g.\n\n >>> tensor.group_array_indices(atol=1e-8)\n\n or\n\n >>> tensor.group_array_indices(rtol=1e-5)\n\nReturns:\n list of index groups where tensor values are equivalent to\n within tolerances", "source": "codesearchnet_filtered"} -{"code": "def find_sanitiser_nodes(sanitiser, sanitisers_in_file):\n for sanitiser_tuple in sanitisers_in_file:\n if (sanitiser == sanitiser_tuple.trigger_word):\n (yield sanitiser_tuple.cfg_node)", "docstring": "Find nodes containing a particular sanitiser.\n\nArgs:\n sanitiser(string): sanitiser to look for.\n sanitisers_in_file(list[Node]): list of CFG nodes with the sanitiser.\n\nReturns:\n Iterable of sanitiser nodes.", "source": "codesearchnet_filtered"} -{"code": "def parse_hgnc_line(line, header):\n hgnc_gene = {}\n line = line.rstrip().split('\\t')\n raw_info = dict(zip(header, line))\n if ('Withdrawn' in raw_info['status']):\n return hgnc_gene\n hgnc_symbol = raw_info['symbol']\n hgnc_gene['hgnc_symbol'] = hgnc_symbol\n hgnc_gene['hgnc_id'] = int(raw_info['hgnc_id'].split(':')[(- 1)])\n hgnc_gene['description'] = raw_info['name']\n aliases = set([hgnc_symbol, hgnc_symbol.upper()])\n previous_names = raw_info['prev_symbol']\n if previous_names:\n for alias in previous_names.strip('\"').split('|'):\n aliases.add(alias)\n alias_symbols = raw_info['alias_symbol']\n if alias_symbols:\n for alias in alias_symbols.strip('\"').split('|'):\n aliases.add(alias)\n hgnc_gene['previous_symbols'] = list(aliases)\n hgnc_gene['ensembl_gene_id'] = raw_info.get('ensembl_gene_id')\n omim_id = raw_info.get('omim_id')\n if omim_id:\n hgnc_gene['omim_id'] = int(omim_id.strip('\"').split('|')[0])\n else:\n hgnc_gene['omim_id'] = None\n entrez_id = hgnc_gene['entrez_id'] = raw_info.get('entrez_id')\n if entrez_id:\n hgnc_gene['entrez_id'] = int(entrez_id)\n else:\n hgnc_gene['entrez_id'] = None\n ref_seq = raw_info.get('refseq_accession')\n if ref_seq:\n hgnc_gene['ref_seq'] = ref_seq.strip('\"').split('|')\n else:\n hgnc_gene['ref_seq'] = []\n uniprot_ids = raw_info.get('uniprot_ids')\n if uniprot_ids:\n hgnc_gene['uniprot_ids'] = uniprot_ids.strip('\"\"').split('|')\n else:\n hgnc_gene['uniprot_ids'] = []\n ucsc_id = raw_info.get('ucsc_id')\n if ucsc_id:\n hgnc_gene['ucsc_id'] = ucsc_id\n else:\n hgnc_gene['ucsc_id'] = None\n vega_id = raw_info.get('vega_id')\n if vega_id:\n hgnc_gene['vega_id'] = vega_id\n else:\n hgnc_gene['vega_id'] = None\n return hgnc_gene", "docstring": "Parse an hgnc formated line\n\nArgs:\n line(list): A list with hgnc gene info\n header(list): A list with the header info\n\nReturns:\n hgnc_info(dict): A dictionary with the relevant info", "source": "codesearchnet_filtered"} -{"code": "def _get_error_generator(type, obj, schema_dir=None, version=DEFAULT_VER, default='core'):\n if (schema_dir is None):\n schema_dir = os.path.abspath((((os.path.dirname(__file__) + '/schemas-') + version) + '/'))\n try:\n schema_path = find_schema(schema_dir, type)\n schema = load_schema(schema_path)\n except (KeyError, TypeError):\n try:\n schema_path = find_schema(schema_dir, default)\n schema = load_schema(schema_path)\n except (KeyError, TypeError):\n if (schema_dir is not None):\n return None\n raise SchemaInvalidError(\"Cannot locate a schema for the object's type, nor the base schema ({}.json).\".format(default))\n if ((type == 'observed-data') and (schema_dir is None)):\n schema['allOf'][1]['properties']['objects'] = {'objects': {'type': 'object', 'minProperties': 1}}\n validator = load_validator(schema_path, schema)\n try:\n error_gen = validator.iter_errors(obj)\n except schema_exceptions.RefResolutionError:\n raise SchemaInvalidError('Invalid JSON schema: a JSON reference failed to resolve')\n return error_gen", "docstring": "Get a generator for validating against the schema for the given object type.\n\nArgs:\n type (str): The object type to find the schema for.\n obj: The object to be validated.\n schema_dir (str): The path in which to search for schemas.\n version (str): The version of the STIX specification to validate\n against. Only used to find base schemas when schema_dir is None.\n default (str): If the schema for the given type cannot be found, use\n the one with this name instead.\n\nReturns:\n A generator for errors found when validating the object against the\n appropriate schema, or None if schema_dir is None and the schema\n cannot be found.", "source": "codesearchnet_filtered"} -{"code": "def prange(N=1, dim=1):\n A = {}\n r = numpy.arange(N, dtype=int)\n key = numpy.zeros(dim, dtype=int)\n for i in range(N):\n key[(- 1)] = i\n A[tuple(key)] = (1 * (r == i))\n return Poly(A, dim, (N,), int)", "docstring": "Constructor to create a range of polynomials where the exponent vary.\n\nArgs:\n N (int):\n Number of polynomials in the array.\n dim (int):\n The dimension the polynomial should span.\n\nReturns:\n (Poly):\n A polynomial array of length N containing simple polynomials with\n increasing exponent.\n\nExample:\n >>> print(prange(4))\n [1, q0, q0^2, q0^3]\n >>> print(prange(4, dim=3))\n [1, q2, q2^2, q2^3]", "source": "codesearchnet_filtered"} -{"code": "def average_data(counts, observable):\n if (not isinstance(observable, dict)):\n observable = make_dict_observable(observable)\n temp = 0\n tot = sum(counts.values())\n for key in counts:\n if (key in observable):\n temp += ((counts[key] * observable[key]) / tot)\n return temp", "docstring": "Compute the mean value of an diagonal observable.\n\n Takes in a diagonal observable in dictionary, list or matrix format and then\n calculates the sum_i value(i) P(i) where value(i) is the value of the\n observable for state i.\n\nArgs:\n counts (dict): a dict of outcomes from an experiment\n observable (dict or matrix or list): The observable to be averaged over.\n As an example, ZZ on qubits can be given as:\n * dict: {\"00\": 1, \"11\": 1, \"01\": -1, \"10\": -1}\n * matrix: [[1, 0, 0, 0], [0, -1, 0, 0, ], [0, 0, -1, 0], [0, 0, 0, 1]]\n * matrix diagonal (list): [1, -1, -1, 1]\n\nReturns:\n Double: Average of the observable", "source": "codesearchnet_filtered"} -{"code": "def get_choices(field):\n empty_label = getattr(field.field, 'empty_label', False)\n needs_empty_value = False\n choices = []\n if hasattr(field.field, '_choices'):\n choices = field.field._choices\n elif hasattr(field.field, '_queryset'):\n queryset = field.field._queryset\n field_name = (getattr(field.field, 'to_field_name') or 'pk')\n choices += ((getattr(obj, field_name), str(obj)) for obj in queryset)\n if (choices and ((choices[0][1] == BLANK_CHOICE_DASH[0][1]) or choices[0][0])):\n needs_empty_value = True\n if (not choices[0][0]):\n del choices[0]\n if (empty_label == BLANK_CHOICE_DASH[0][1]):\n empty_label = None\n if (empty_label or (not field.field.required)):\n if needs_empty_value:\n choices.insert(0, ('', (empty_label or BLANK_CHOICE_DASH[0][1])))\n return choices", "docstring": "Find choices of a field, whether it has choices or has a queryset.\n\nArgs:\n field (BoundField): Django form boundfield\n\nReturns:\n list: List of choices", "source": "codesearchnet_filtered"} -{"code": "def get_mipmap_pixel(self, left: float, top: float, right: float, bottom: float) -> Tuple[(int, int, int)]:\n color = lib.TCOD_image_get_mipmap_pixel(self.image_c, left, top, right, bottom)\n return (color.r, color.g, color.b)", "docstring": "Get the average color of a rectangle in this Image.\n\n Parameters should stay within the following limits:\n * 0 <= left < right < Image.width\n * 0 <= top < bottom < Image.height\n\nArgs:\n left (float): Left corner of the region.\n top (float): Top corner of the region.\n right (float): Right corner of the region.\n bottom (float): Bottom corner of the region.\n\nReturns:\n Tuple[int, int, int]:\n An (r, g, b) tuple containing the averaged color value.\n Values are in a 0 to 255 range.", "source": "codesearchnet_filtered"} -{"code": "def _async_open(self, session_id, proto_version):\n try:\n (yield self.application_context.create_session_if_needed(session_id, self.request))\n session = self.application_context.get_session(session_id)\n protocol = Protocol(proto_version)\n self.receiver = Receiver(protocol)\n log.debug('Receiver created for %r', protocol)\n self.handler = ProtocolHandler()\n log.debug('ProtocolHandler created for %r', protocol)\n self.connection = self.application.new_connection(protocol, self, self.application_context, session)\n log.info('ServerConnection created')\n except ProtocolError as e:\n log.error('Could not create new server session, reason: %s', e)\n self.close()\n raise e\n msg = self.connection.protocol.create('ACK')\n (yield self.send_message(msg))\n raise gen.Return(None)", "docstring": "Perform the specific steps needed to open a connection to a Bokeh session\n\n Specifically, this method coordinates:\n\n * Getting a session for a session ID (creating a new one if needed)\n * Creating a protocol receiver and hander\n * Opening a new ServerConnection and sending it an ACK\n\nArgs:\n session_id (str) :\n A session ID to for a session to connect to\n\n If no session exists with the given ID, a new session is made\n\n proto_version (str):\n The protocol version requested by the connecting client.\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def run(self, dag):\n for node in dag.op_nodes(self.gate):\n if (not node.op.definition):\n continue\n rule = node.op.definition\n decomposition = DAGCircuit()\n decomposition.add_qreg(rule[0][1][0][0])\n if rule[0][2]:\n decomposition.add_creg(rule[0][2][0][0])\n for inst in rule:\n decomposition.apply_operation_back(*inst)\n dag.substitute_node_with_dag(node, decomposition)\n return dag", "docstring": "Expand a given gate into its decomposition.\n\nArgs:\n dag(DAGCircuit): input dag\n\nReturns:\n DAGCircuit: output dag where gate was expanded.", "source": "codesearchnet_filtered"} -{"code": "def monitor(self, field, callback, poll_interval=None):\n poll_interval = (poll_interval or self.api.poll_interval)\n monitor = self.monitor_class(resource=self, field=field, callback=callback, poll_interval=poll_interval, event_queue=self.api.event_queue, poll_pool=self.api.poll_pool)\n monitor.start()\n return monitor", "docstring": "Monitor `field` for change\n\n Will monitor ``field`` for change and execute ``callback`` when\n change is detected.\n\n Example usage::\n\n def handle(resource, field, previous, current):\n print \"Change from {} to {}\".format(previous, current)\n\n switch = TapSwitch.objects.get(id=3)\n # Note that we monitor the entire state of the Hue Tap\n # switch rather than a specific field\n switch.monitor(lambda sw: sw.state.as_dict(), handle, poll_interval=0.2)\n\n # Execution will stop here and the API client will begin polling for changes\n hue_api.start_monitor_loop()\n\nArgs:\n field (string): The name of the field to be monitored. This may also\n be a callable which will be called with the resource\n instance as its single argument and must return a\n value which can be compared to previous values.\n\n callback (callable): The callable to be called when a change is\n detected. It will be called with parameters\n as follows:\n\n * resource instance\n * field name,\n * previous value\n * current value.\n\n poll_interval (float): Interval between polling in seconds.\n Defaults to the API's `poll_interval` value (which defaults\n to 0.1 second.\n\nReturns:\n Monitor:", "source": "codesearchnet_filtered"} -{"code": "def _ReadUnionDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n return self._ReadDataTypeDefinitionWithMembers(definitions_registry, definition_values, data_types.UnionDefinition, definition_name, supports_conditions=False)", "docstring": "Reads an union data type definition.\n\nArgs:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n is_member (Optional[bool]): True if the data type definition is a member\n data type definition.\n\nReturns:\n UnionDefinition: union data type definition.\n\nRaises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.", "source": "codesearchnet_filtered"} -{"code": "def reset_folder(self, folder):\n warnings.warn('This is a destructive action that cannot be undone.')\n self.post('reset', data={}, params={'folder': folder})", "docstring": "Erase the database index from a given folder and restart Syncthing.\n\nArgs:\n folder (str): Folder ID.\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def _FormatSocketUnixToken(self, token_data):\n protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')\n return {'protocols': protocol, 'family': token_data.socket_family, 'path': token_data.socket_path}", "docstring": "Formats an Unix socket token as a dictionary of values.\n\nArgs:\n token_data (bsm_token_data_sockunix): AUT_SOCKUNIX token data.\n\nReturns:\n dict[str, str]: token values.", "source": "codesearchnet_filtered"} -{"code": "def _execute(self, command, data=None, unpack=True):\n if (not data):\n data = {}\n if (self.session_id is not None):\n data.setdefault('session_id', self.session_id)\n data = self._wrap_el(data)\n res = self.remote_invoker.execute(command, data)\n ret = WebDriverResult.from_object(res)\n ret.raise_for_status()\n ret.value = self._unwrap_el(ret.value)\n if (not unpack):\n return ret\n return ret.value", "docstring": "Private method to execute command.\n\nArgs:\n command(Command): The defined command.\n data(dict): The uri variable and body.\n uppack(bool): If unpack value from result.\n\nReturns:\n The unwrapped value field in the json response.", "source": "codesearchnet_filtered"} -{"code": "def Open(self, path, ascii_codepage='cp1252'):\n path_specification = self._path_resolver.ResolvePath(path)\n if (path_specification is None):\n return None\n return self._OpenPathSpec(path_specification)", "docstring": "Opens the Windows Registry file specified by the path.\n\nArgs:\n path (str): path of the Windows Registry file.\n ascii_codepage (Optional[str]): ASCII string codepage.\n\nReturns:\n WinRegistryFile: Windows Registry file or None.", "source": "codesearchnet_filtered"} -{"code": "def get_device_name(self, cached=True):\n if (cached and (self.name is not None)):\n return self.name\n device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME)\n if (device_name is None):\n logger.warn('Failed to find handle for device name')\n return None\n self.name = self.dongle._read_attribute(self.conn_handle, device_name)\n return self.name", "docstring": "Returns the SK8 device BLE name.\n\nArgs:\n cached (bool): if True, returns the locally cached copy of the name. If this is\n set to False, or the name is not cached, it will read from the device instead.\n\nReturns:\n str. The current device name. May be `None` if an error occurs.", "source": "codesearchnet_filtered"} -{"code": "def read_tracers_h5(xdmf_file, infoname, snapshot, position):\n xdmf_root = xmlET.parse(str(xdmf_file)).getroot()\n tra = {}\n tra[infoname] = [{}, {}]\n if position:\n for axis in 'xyz':\n tra[axis] = [{}, {}]\n for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):\n ibk = int(elt_subdomain.get('Name').startswith('meshYang'))\n if position:\n for data_attr in elt_subdomain.findall('Geometry'):\n for (data_item, axis) in zip(data_attr.findall('DataItem'), 'xyz'):\n (icore, data) = _get_field(xdmf_file, data_item)\n tra[axis][ibk][icore] = data\n for data_attr in elt_subdomain.findall('Attribute'):\n if (data_attr.get('Name') != infoname):\n continue\n (icore, data) = _get_field(xdmf_file, data_attr.find('DataItem'))\n tra[infoname][ibk][icore] = data\n for info in tra:\n tra[info] = [trab for trab in tra[info] if trab]\n for (iblk, trab) in enumerate(tra[info]):\n tra[info][iblk] = np.concatenate([trab[icore] for icore in range(len(trab))])\n return tra", "docstring": "Extract tracers data from hdf5 files.\n\nArgs:\n xdmf_file (:class:`pathlib.Path`): path of the xdmf file.\n infoname (str): name of information to extract.\n snapshot (int): snapshot number.\n position (bool): whether to extract position of tracers.\n\nReturns:\n dict of list of numpy.array:\n Tracers data organized by attribute and block.", "source": "codesearchnet_filtered"} -{"code": "def swo_set_emu_buffer_size(self, buf_size):\n buf = ctypes.c_uint32(buf_size)\n res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.SET_BUFFERSIZE_EMU, ctypes.byref(buf))\n if (res < 0):\n raise errors.JLinkException(res)\n return None", "docstring": "Sets the size of the buffer used by the J-Link to collect SWO data.\n\nArgs:\n self (JLink): the ``JLink`` instance\n buf_size (int): the new size of the emulator buffer\n\nReturns:\n ``None``\n\nRaises:\n JLinkException: on error", "source": "codesearchnet_filtered"} -{"code": "def _get_elmt_amt_in_rxt(self, rxt):\n return sum([rxt.get_el_amount(e) for e in self.pd.elements])", "docstring": "Computes total number of atoms in a reaction formula for elements\n not in external reservoir. This method is used in the calculation\n of reaction energy per mol of reaction formula.\n\nArgs:\n rxt (Reaction): a reaction.\n\nReturns:\n Total number of atoms for non_reservoir elements.", "source": "codesearchnet_filtered"} -{"code": "def find_common_root(elements):\n if (not elements):\n raise UserWarning(\"Can't find common root - no elements suplied.\")\n root_path = el_to_path_vector(elements.pop())\n for el in elements:\n el_path = el_to_path_vector(el)\n root_path = common_vector_root(root_path, el_path)\n if (not root_path):\n raise UserWarning(('Vectors without common root:\\n%s' % str(el_path)))\n return root_path", "docstring": "Find root which is common for all `elements`.\n\nArgs:\n elements (list): List of double-linked HTMLElement objects.\n\nReturns:\n list: Vector of HTMLElement containing path to common root.", "source": "codesearchnet_filtered"} -{"code": "def name(self):\n return ctypes.cast(self.sName, ctypes.c_char_p).value.decode()", "docstring": "Returns the name of the device.\n\nArgs:\n self (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance\n\nReturns:\n Device name.", "source": "codesearchnet_filtered"} -{"code": "def get_dim_label(js_dict, dim, input='dataset'):\n if (input == 'dataset'):\n input = js_dict['dimension'][dim]\n label_col = 'label'\n elif (input == 'dimension'):\n label_col = js_dict['label']\n input = js_dict\n else:\n raise ValueError\n try:\n dim_label = input['category']['label']\n except KeyError:\n dim_index = get_dim_index(js_dict, dim)\n dim_label = pd.concat([dim_index['id'], dim_index['id']], axis=1)\n dim_label.columns = ['id', 'label']\n else:\n dim_label = pd.DataFrame(list(zip(dim_label.keys(), dim_label.values())), index=dim_label.keys(), columns=['id', label_col])\n try:\n dim_index = input['category']['index']\n except KeyError:\n dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])), index=[0], columns=['id', 'index'])\n else:\n if (type(dim_index) is list):\n dim_index = pd.DataFrame(list(zip(dim_index, range(0, len(dim_index)))), index=dim_index, columns=['id', 'index'])\n else:\n dim_index = pd.DataFrame(list(zip(dim_index.keys(), dim_index.values())), index=dim_index.keys(), columns=['id', 'index'])\n dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')\n return dim_label", "docstring": "Get label from a given dimension.\n\nArgs:\n js_dict (dict): dictionary containing dataset data and metadata.\n dim (string): dimension name obtained from JSON file.\n\nReturns:\n dim_label(pandas.DataFrame): DataFrame with label-based dimension data.", "source": "codesearchnet_filtered"} -{"code": "def get_counters(counter_list):\n if (not isinstance(counter_list, list)):\n raise CommandExecutionError('counter_list must be a list of tuples')\n try:\n query = win32pdh.OpenQuery()\n counters = build_counter_list(counter_list)\n for counter in counters:\n counter.add_to_query(query)\n win32pdh.CollectQueryData(query)\n time.sleep(1)\n win32pdh.CollectQueryData(query)\n ret = {}\n for counter in counters:\n try:\n ret.update({counter.path: counter.value()})\n except pywintypes.error as exc:\n if (exc.strerror == 'No data to return.'):\n continue\n else:\n raise\n finally:\n win32pdh.CloseQuery(query)\n return ret", "docstring": "Get the values for the passes list of counters\n\nArgs:\n counter_list (list):\n A list of counters to lookup\n\nReturns:\n dict: A dictionary of counters and their values", "source": "codesearchnet_filtered"} -{"code": "def is_storage(url, storage=None):\n if storage:\n return True\n split_url = url.split('://', 1)\n if ((len(split_url) == 2) and (split_url[0].lower() != 'file')):\n return True\n return False", "docstring": "Check if file is a local file or a storage file.\n\n File is considered local if:\n - URL is a local path.\n - URL starts by \"file://\"\n - a \"storage\" is provided.\n\nArgs:\n url (str): file path or URL\n storage (str): Storage name.\n\nReturns:\n bool: return True if file is local.", "source": "codesearchnet_filtered"} -{"code": "def response(self, in_thread: Optional[bool]=None) -> 'Message':\n data = {'channel': self['channel']}\n if in_thread:\n if ('message' in self):\n data['thread_ts'] = (self['message'].get('thread_ts') or self['message']['ts'])\n else:\n data['thread_ts'] = (self.get('thread_ts') or self['ts'])\n elif (in_thread is None):\n if (('message' in self) and ('thread_ts' in self['message'])):\n data['thread_ts'] = self['message']['thread_ts']\n elif ('thread_ts' in self):\n data['thread_ts'] = self['thread_ts']\n return Message(data)", "docstring": "Create a response message.\n\n Depending on the incoming message the response can be in a thread. By default the response follow where the\n incoming message was posted.\n\nArgs:\n in_thread (boolean): Overwrite the `threading` behaviour\n\nReturns:\n a new :class:`slack.event.Message`", "source": "codesearchnet_filtered"} -{"code": "def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1, seq_align='start', trim_seq_len=None):\n y = dt[response]\n seq = dt[sequence]\n if (trim_seq_len is not None):\n seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len)\n seq = [s.replace('N', '') for s in seq]\n dt_kmer = kmer_count(seq, k)\n Xsp = csc_matrix(dt_kmer)\n en = ElasticNet(alpha=1, standardize=False, n_splits=3)\n en.fit(Xsp, y)\n nonzero_kmers = dt_kmer.columns.values[(en.coef_ != 0)].tolist()\n\n def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True):\n '\\n perform stepwise model selection while preventing to add a motif similar to the\\n already selected motifs.\\n '\n (F, pval) = f_regression(dt_kmer[to_be_selected_kmers], y)\n kmer = to_be_selected_kmers.pop(pval.argmin())\n selected_kmers.append(kmer)\n\n def select_criterion(s1, s2, consider_shift=True):\n if (hamming_distance(s1, s2) <= 1):\n return False\n if (consider_shift and (hamming_distance(s1[1:], s2[:(- 1)]) == 0)):\n return False\n if (consider_shift and (hamming_distance(s1[:(- 1)], s2[1:]) == 0)):\n return False\n return True\n to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers if select_criterion(ckmer, kmer, consider_shift)]\n if (len(to_be_selected_kmers) == 0):\n return selected_kmers\n else:\n lm = LinearRegression()\n lm.fit(dt_kmer[selected_kmers], y)\n y_new = (y - lm.predict(dt_kmer[selected_kmers]))\n return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift)\n selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift)\n return selected_kmers", "docstring": "Find best k-mers for CONCISE initialization.\n\nArgs:\n dt (pd.DataFrame): Table containing response variable and sequence.\n response (str): Name of the column used as the reponse variable.\n sequence (str): Name of the column storing the DNA/RNA sequences.\n k (int): Desired k-mer length.\n n_cores (int): Number of cores to use for computation. It can use up to 3 cores.\n consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG?\n seq_align (str): one of ``{\"start\", \"end\"}``. To which end should we align sequences?\n trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered.\n\nReturns:\n string list: Best set of motifs for this dataset sorted with respect to\n confidence (best candidate occuring first).\n\n\n Details:\n First a lasso model gets fitted to get a set of initial motifs. Next, the best\n subset of unrelated motifs is selected by stepwise selection.", "source": "codesearchnet_filtered"} -{"code": "def check_R_package(self, package):\n test_package = (not bool(launch_R_script('{}/R_templates/test_import.R'.format(os.path.dirname(os.path.realpath(__file__))), {'{package}': package}, verbose=True)))\n return test_package", "docstring": "Execute a subprocess to check the package's availability.\n\nArgs:\n package (str): Name of the package to be tested.\n\nReturns:\n bool: `True` if the package is available, `False` otherwise", "source": "codesearchnet_filtered"} -{"code": "def key_periods(ciphertext, max_key_period):\n if (max_key_period <= 0):\n raise ValueError('max_key_period must be a positive integer')\n key_scores = []\n for period in range(1, (min(max_key_period, len(ciphertext)) + 1)):\n score = abs((ENGLISH_IC - index_of_coincidence(*split_columns(ciphertext, period))))\n key_scores.append((period, score))\n return [p[0] for p in sorted(key_scores, key=(lambda x: x[1]))]", "docstring": "Rank all key periods for ``ciphertext`` up to and including ``max_key_period``\n\nExample:\n >>> key_periods(ciphertext, 30)\n [2, 4, 8, 3, ...]\n\nArgs:\n ciphertext (str): The text to analyze\n max_key_period (int): The maximum period the key could be\n\nReturns:\n Sorted list of keys\n\nRaises:\n ValueError: If max_key_period is less than or equal to 0", "source": "codesearchnet_filtered"} -{"code": "def _CalculateNTFSTimeHash(self, file_entry):\n date_time_values = []\n access_time = getattr(file_entry, 'access_time', None)\n if access_time:\n date_time_string = access_time.CopyToDateTimeString()\n date_time_values.append('atime:{0:s}'.format(date_time_string))\n creation_time = getattr(file_entry, 'creation_time', None)\n if creation_time:\n date_time_string = creation_time.CopyToDateTimeString()\n date_time_values.append('crtime:{0:s}'.format(date_time_string))\n modification_time = getattr(file_entry, 'modification_time', None)\n if modification_time:\n date_time_string = modification_time.CopyToDateTimeString()\n date_time_values.append('mtime:{0:s}'.format(date_time_string))\n change_time = getattr(file_entry, 'change_time', None)\n if change_time:\n date_time_string = change_time.CopyToDateTimeString()\n date_time_values.append('ctime:{0:s}'.format(date_time_string))\n date_time_values = ''.join(date_time_values)\n date_time_values = date_time_values.encode('ascii')\n hash_value = hashlib.md5()\n hash_value.update(date_time_values)\n return hash_value.hexdigest()", "docstring": "Calculates an MD5 from the date and time value of a NTFS file entry.\n\nArgs:\n file_entry (dfvfs.FileEntry): file entry.\n\nReturns:\n str: hexadecimal representation of the MD5 hash value of the date and\n time values of the file entry.", "source": "codesearchnet_filtered"} -{"code": "def find_last(self, selector, **kwargs):\n self.debug_log(('Finding last element with selector: %s' % selector))\n elements = self.find_all(selector, **kwargs)\n if len(elements):\n self.debug_log(('find_last (%s): element found' % selector))\n return elements[(- 1)]\n else:\n self.debug_log(('find_last (%s): No element found' % selector))\n return None", "docstring": "Return the last element found with a selector\n\nArgs:\n selector (str): the selector used to find the element\n\n Kwargs:\n wait_until_present (bool)\n wait_until_visible (bool)\n raise_exception (bool)\n\nReturns:\n None if no element was found\n proxy_element is an element was found\n\nRaises:\n this function might raise an exception depending\n on the raise_exception kwargs\n or\n the config proxy_driver:raise_exception", "source": "codesearchnet_filtered"} -{"code": "def validate(filename=None, ocrd_page=None, ocrd_file=None, strictness='strict', strategy='index1'):\n if ocrd_page:\n validator = PageValidator(ocrd_page, strictness, strategy)\n elif ocrd_file:\n validator = PageValidator(page_from_file(ocrd_file), strictness, strategy)\n elif filename:\n validator = PageValidator(parse(filename, silence=True), strictness, strategy)\n else:\n raise Exception('At least one of ocrd_page, ocrd_file or filename must be set')\n return validator._validate()", "docstring": "Validates a PAGE file for consistency by filename, OcrdFile or passing OcrdPage directly.\n\nArgs:\n filename (string): Path to PAGE\n ocrd_page (OcrdPage): OcrdPage instance\n ocrd_file (OcrdFile): OcrdFile instance wrapping OcrdPage\n strictness (string): 'strict', 'lax', 'fix' or 'off'\n strategy (string): Currently only 'index1'\n\nReturns:\n report (:class:`ValidationReport`) Report on the validity", "source": "codesearchnet_filtered"} -{"code": "def minimize_best_n(Members):\n return list(reversed(sorted(Members, key=(lambda Member: Member.fitness_score))))", "docstring": "Orders population members from lowest fitness to highest fitness\n\nArgs:\n Members (list): list of PyGenetics Member objects\n\nReturns:\n lsit: ordered lsit of Members, from highest fitness to lowest fitness", "source": "codesearchnet_filtered"} -{"code": "def format_config(sensor_graph):\n cmdfile = CommandFile('Config Variables', '1.0')\n for slot in sorted(sensor_graph.config_database, key=(lambda x: x.encode())):\n for (conf_var, conf_def) in sorted(sensor_graph.config_database[slot].items()):\n (conf_type, conf_val) = conf_def\n if (conf_type == 'binary'):\n conf_val = ('hex:' + hexlify(conf_val))\n cmdfile.add('set_variable', slot, conf_var, conf_type, conf_val)\n return cmdfile.dump()", "docstring": "Extract the config variables from this sensor graph in ASCII format.\n\nArgs:\n sensor_graph (SensorGraph): the sensor graph that we want to format\n\nReturns:\n str: The ascii output lines concatenated as a single string", "source": "codesearchnet_filtered"} -{"code": "def set_lacp_timeout(self, name, value=None):\n commands = [('interface %s' % name)]\n string = 'port-channel lacp fallback timeout'\n commands.append(self.command_builder(string, value=value))\n return self.configure(commands)", "docstring": "Configures the Port-Channel LACP fallback timeout\n The fallback timeout configures the period an interface in\n fallback mode remains in LACP mode without receiving a PDU.\n\nArgs:\n name(str): The Port-Channel interface name\n\n value(int): port-channel lacp fallback timeout in seconds\n\nReturns:\n True if the operation succeeds otherwise False is returned", "source": "codesearchnet_filtered"} -{"code": "def get_continent(self, callsign, timestamp=timestamp_now):\n return self.get_all(callsign, timestamp)[const.CONTINENT]", "docstring": "Returns the continent Identifier of a callsign\n\nArgs:\n callsign (str): Amateur Radio callsign\n timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\n str: continent identified\n\nRaises:\n KeyError: No Continent found for callsign\n\nNote:\n The following continent identifiers are used:\n\n - EU: Europe\n - NA: North America\n - SA: South America\n - AS: Asia\n - AF: Africa\n - OC: Oceania\n - AN: Antarctica", "source": "codesearchnet_filtered"} -{"code": "def orthonormalize_righthanded(basis):\n (v1, v2) = (basis[(:, 0)], basis[(:, 1)])\n e1 = normalize(v1)\n e3 = normalize(np.cross(e1, v2))\n e2 = normalize(np.cross(e3, e1))\n return np.array([e1, e2, e3]).T", "docstring": "Orthonormalizes righthandedly a given 3D basis.\n\n This functions returns a right handed orthonormalize_righthandedd basis.\n Since only the first two vectors in the basis are used, it does not matter\n if you give two or three vectors.\n\n Right handed means, that:\n\n .. math::\n\n \\\\vec{e_1} \\\\times \\\\vec{e_2} &= \\\\vec{e_3} \\\\\\\\\n \\\\vec{e_2} \\\\times \\\\vec{e_3} &= \\\\vec{e_1} \\\\\\\\\n \\\\vec{e_3} \\\\times \\\\vec{e_1} &= \\\\vec{e_2} \\\\\\\\\n\nArgs:\n basis (np.array): An array of shape = (3,2) or (3,3)\n\nReturns:\n new_basis (np.array): A right handed orthonormalized basis.", "source": "codesearchnet_filtered"} -{"code": "def disable_plugin(self, name):\n url = self._url('/plugins/{0}/disable', name)\n res = self._post(url)\n self._raise_for_status(res)\n return True", "docstring": "Disable an installed plugin.\n\nArgs:\n name (string): The name of the plugin. The ``:latest`` tag is\n optional, and is the default if omitted.\n\nReturns:\n ``True`` if successful", "source": "codesearchnet_filtered"} -{"code": "def line_iter(xo: int, yo: int, xd: int, yd: int) -> Iterator[Tuple[(int, int)]]:\n data = ffi.new('TCOD_bresenham_data_t *')\n lib.TCOD_line_init_mt(xo, yo, xd, yd, data)\n x = ffi.new('int *')\n y = ffi.new('int *')\n (yield (xo, yo))\n while (not lib.TCOD_line_step_mt(x, y, data)):\n (yield (x[0], y[0]))", "docstring": "returns an Iterable\n\n This Iterable does not include the origin point.\n\nArgs:\n xo (int): X starting point.\n yo (int): Y starting point.\n xd (int): X destination point.\n yd (int): Y destination point.\n\nReturns:\n Iterable[Tuple[int,int]]: An Iterable of (x,y) points.", "source": "codesearchnet_filtered"} -{"code": "def get(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs):\n if (user_agent or user_agent_config_yaml or ('user_agent' in UserAgent._environment_variables(**kwargs))):\n return UserAgent._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)\n if cls.user_agent:\n return cls.user_agent\n else:\n raise UserAgentError('You must either set the global user agent: UserAgent.set_global(...) or pass in user agent parameters!')", "docstring": "Get full user agent string from parameters if supplied falling back on global user agent if set.\n\nArgs:\n user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed.\n user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.\n user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.\n\nReturns:\n str: Full user agent string", "source": "codesearchnet_filtered"} -{"code": "def psd(data, dt, ndivide=1, window=hanning, overlap_half=False):\n logger = getLogger('decode.utils.ndarray.psd')\n if overlap_half:\n step = int((len(data) / (ndivide + 1)))\n size = (step * 2)\n else:\n step = int((len(data) / ndivide))\n size = step\n if (bin(len(data)).count('1') != 1):\n logger.warning('warning: length of data is not power of 2: {}'.format(len(data)))\n size = int((len(data) / ndivide))\n if (bin(size).count('1') != 1.0):\n if overlap_half:\n logger.warning('warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'.format(size))\n else:\n logger.warning('warning: (length of data) / ndivide is not power of 2: {}'.format(size))\n psd = np.zeros(size)\n T = ((size - 1) * dt)\n vs = (1 / dt)\n vk_ = fftfreq(size, dt)\n vk = vk_[np.where((vk_ >= 0))]\n for i in range(ndivide):\n d = data[(i * step):((i * step) + size)]\n if (window is None):\n w = np.ones(size)\n corr = 1.0\n else:\n w = window(size)\n corr = np.mean((w ** 2))\n psd = (psd + ((((2 * (np.abs(fft((d * w))) ** 2)) / size) * dt) / corr))\n return (vk, (psd[:len(vk)] / ndivide))", "docstring": "Calculate power spectrum density of data.\n\nArgs:\n data (np.ndarray): Input data.\n dt (float): Time between each data.\n ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).\n ax (matplotlib.axes): Axis you want to plot on.\n doplot (bool): Plot how averaging works.\n overlap_half (bool): Split data to half-overlapped regions.\n\nReturns:\n vk (np.ndarray): Frequency.\n psd (np.ndarray): PSD", "source": "codesearchnet_filtered"} -{"code": "def create_transfer_learning_tuner(parent, additional_parents=None, estimator=None, sagemaker_session=None):\n parent_tuner = HyperparameterTuner.attach(tuning_job_name=parent, sagemaker_session=sagemaker_session)\n return parent_tuner.transfer_learning_tuner(additional_parents=additional_parents, estimator=estimator)", "docstring": "Creates a new ``HyperParameterTuner`` by copying the request fields from the provided parent to the new instance\n of ``HyperparameterTuner`` followed by addition of warm start configuration with the type as \"TransferLearning\"\n and ``parents`` as the union of provided list of ``additional_parents`` and the ``parent``.\n\nArgs:\n parent (str): Primary parent tuning job's name from which the Tuner and Estimator configuration has to be copied\n additional_parents (set{str}): Set of additional parent tuning job's names along with the primary parent tuning\n job name to be used in warm starting the identical dataset and algorithm tuner.\n estimator (sagemaker.estimator.EstimatorBase): An estimator object that has been initialized with\n the desired configuration. There does not need to be a training job associated with this instance.\n sagemaker_session (sagemaker.session.Session): Session object which manages interactions with\n Amazon SageMaker APIs and any other AWS services needed. If not specified, one is created\n using the default AWS configuration chain.\n\nReturns:\n sagemaker.tuner.HyperparameterTuner: New instance of warm started HyperparameterTuner", "source": "codesearchnet_filtered"} -{"code": "def get_config(filepath=None, default_loader=None, on_missing=None):\n cache_key = (filepath, default_loader, on_missing)\n if (CACHE.get(cache_key) is not None):\n return CACHE.get(cache_key)\n logger = logging.getLogger('birding')\n if (filepath is None):\n filepath = BIRDING_CONF\n if (default_loader is None):\n default_loader = get_defaults_file\n if (on_missing is None):\n on_missing = logger.info\n logger.info('Looking for configuration file: {}'.format(os.path.abspath(filepath)))\n if (not os.path.exists(filepath)):\n on_missing('No {} configuration file found.'.format(filepath))\n if (filepath != BIRDING_CONF_DEFAULT):\n os.stat(filepath)\n config = yaml.safe_load(default_loader())\n tv.validate(SCHEMA, config)\n if os.path.exists(filepath):\n file_config = yaml.safe_load(open(filepath))\n if file_config:\n config = overlay(file_config, config)\n tv.validate(SCHEMA, config)\n CACHE.put(cache_key, config)\n return config", "docstring": "Get a dict for the current birding configuration.\n\n The resulting dictionary is fully populated with defaults, such that all\n valid keys will resolve to valid values. Invalid and extra values in the\n configuration result in an exception.\n\n See :ref:`config` (module-level docstring) for discussion on how birding\n configuration works, including filepath loading. Note that a non-default\n filepath set via env results in a :py:exc:`OSError` when the file is\n missing, but the default filepath is ignored when missing.\n\n This function caches its return values as to only parse configuration once\n per set of inputs. As such, treat the resulting dictionary as read-only as\n not to accidentally write values which will be seen by other handles of the\n dictionary.\n\nArgs:\n filepath (str): path to birding configuration YAML file.\n default_loader (callable):\n callable which returns file descriptor with YAML data of default\n configuration values\n on_missing (callable): callback to call when file is missing.\n\nReturns:\n dict: dict of current birding configuration; treat as read-only.", "source": "codesearchnet_filtered"} -{"code": "def connect(self, component):\n if (not isinstance(component, ThreadPool)):\n raise TypeError('\"component\" must be a ThreadPool object')\n component.in_queue = self.out_queue\n return component", "docstring": "Connect two ThreadPools.\n\n The ``in_queue`` of the second pool will be set as the ``out_queue`` of\n the current pool, thus all the output will be input to the second pool.\n\nArgs:\n component (ThreadPool): the ThreadPool to be connected.\n\nReturns:\n ThreadPool: the modified second ThreadPool.", "source": "codesearchnet_filtered"} -{"code": "def as_tuning_range(self, name):\n return {'Name': name, 'MinValue': to_str(self.min_value), 'MaxValue': to_str(self.max_value), 'ScalingType': self.scaling_type}", "docstring": "Represent the parameter range as a dicionary suitable for a request to\n create an Amazon SageMaker hyperparameter tuning job.\n\nArgs:\n name (str): The name of the hyperparameter.\n\nReturns:\n dict[str, str]: A dictionary that contains the name and values of the hyperparameter.", "source": "codesearchnet_filtered"} -{"code": "def success(channel, title, datapacks):\n gui = ui_embed.UI(channel, title, '', modulename=modulename, datapacks=datapacks)\n return gui", "docstring": "Creates an embed UI containing the help message\n\nArgs:\n channel (discord.Channel): The Discord channel to bind the embed to\n title (str): The title of the embed\n datapacks (list): The hex value\n\nReturns:\n ui (ui_embed.UI): The embed UI object", "source": "codesearchnet_filtered"} -{"code": "def post(self, url, data=None, files=None, headers=None, get_json=True):\n if self.debug:\n print(('POST: %s, headers=%s' % (url, headers)))\n self.headers = self._get_default_headers()\n if (headers is not None):\n self.headers.update(headers)\n response = requests.post(url, headers=self.headers, data=data, auth=self.auth, files=files, verify=self.verify_ssl)\n json_response = self._process_json_response(response)\n return (json_response if (get_json is True) else response)", "docstring": "Make POST request to a url\n\nArgs:\n url (str): URL to send the request to\n data (dict, optional): Data to send\n files (dict, optional): Files to send with the request\n headers (str, optional): custom headers\n\nReturns:\n A JSON object of the returned response if `get_json` is True,\n Requests' response object otherwise", "source": "codesearchnet_filtered"} -{"code": "def _VerifyRecord(self, pls_record):\n future_timestamp = (timelib.Timestamp.GetNow() + self._SIX_YEARS_IN_MICRO_SECONDS)\n if (pls_record.last_written_time > future_timestamp):\n return False\n (first_word, _, _) = pls_record.query.partition(' ')\n if (first_word.lower() not in self._PLS_KEYWORD):\n return False\n return True", "docstring": "Verifies a PLS Recall record.\n\nArgs:\n pls_record (pls_recall_record): a PLS Recall record to verify.\n\nReturns:\n bool: True if this is a valid PLS Recall record, False otherwise.", "source": "codesearchnet_filtered"} -{"code": "def generate_pseudo(strain_states, order=3):\n s = sp.Symbol('s')\n nstates = len(strain_states)\n ni = (np.array(strain_states) * s)\n (mis, absent_syms) = ([], [])\n for degree in range(2, (order + 1)):\n (cvec, carr) = get_symbol_list(degree)\n sarr = np.zeros((nstates, 6), dtype=object)\n for (n, strain_v) in enumerate(ni):\n exps = carr.copy()\n for i in range((degree - 1)):\n exps = np.dot(exps, strain_v)\n exps /= np.math.factorial((degree - 1))\n sarr[n] = [sp.diff(exp, s, (degree - 1)) for exp in exps]\n svec = sarr.ravel()\n present_syms = set.union(*[exp.atoms(sp.Symbol) for exp in svec])\n absent_syms += [(set(cvec) - present_syms)]\n m = np.zeros(((6 * nstates), len(cvec)))\n for (n, c) in enumerate(cvec):\n m[(:, n)] = v_diff(svec, c)\n mis.append(np.linalg.pinv(m))\n return (mis, absent_syms)", "docstring": "Generates the pseudoinverse for a given set of strains.\n\nArgs:\n strain_states (6xN array like): a list of voigt-notation\n \"strain-states\", i. e. perturbed indices of the strain\n as a function of the smallest strain e. g. (0, 1, 0, 0, 1, 0)\n order (int): order of pseudoinverse to calculate\n\nReturns:\n mis: pseudo inverses for each order tensor, these can\n be multiplied by the central difference derivative\n of the stress with respect to the strain state\n absent_syms: symbols of the tensor absent from the PI\n expression", "source": "codesearchnet_filtered"} -{"code": "def load_local_config(filename):\n if (not filename):\n return imp.new_module('local_pylint_config')\n module = imp.load_source('local_pylint_config', filename)\n return module", "docstring": "Loads the pylint.config.py file.\n\nArgs:\n filename (str): The python file containing the local configuration.\n\nReturns:\n module: The loaded Python module.", "source": "codesearchnet_filtered"} -{"code": "def first_return_times(dts, c=None, d=0.0):\n if (c is None):\n c = dts.mean()\n vmrt = distob.vectorize(analyses1.first_return_times)\n all_intervals = vmrt(dts, c, d)\n if hasattr(type(all_intervals), '__array_interface__'):\n return np.ravel(all_intervals)\n else:\n return np.hstack([distob.gather(ilist) for ilist in all_intervals])", "docstring": "For an ensemble of time series, return the set of all time intervals\n between successive returns to value c for all instances in the ensemble.\n If c is not given, the default is the mean across all times and across all\n time series in the ensemble.\n\nArgs:\n dts (DistTimeseries)\n\n c (float): Optional target value (default is the ensemble mean value)\n\n d (float): Optional min distance from c to be attained between returns\n\nReturns:\n array of time intervals (Can take the mean of these to estimate the\n expected first return time for the whole ensemble)", "source": "codesearchnet_filtered"} -{"code": "def save(self, clean=True):\n ret = {}\n if clean:\n self._dirty = False\n else:\n ret['_dirty'] = self._dirty\n return ret", "docstring": "Serialize into raw representation. Clears the dirty bit by default.\n\nArgs:\n clean (bool): Whether to clear the dirty bit.\n\nReturns:\n dict: Raw.", "source": "codesearchnet_filtered"} -{"code": "def _merge_bee(self, bee):\n random_dimension = randint(0, (len(self._value_ranges) - 1))\n second_bee = randint(0, (self._num_employers - 1))\n while (bee.id == self._employers[second_bee].id):\n second_bee = randint(0, (self._num_employers - 1))\n new_bee = deepcopy(bee)\n new_bee.values[random_dimension] = self.__onlooker.calculate_positions(new_bee.values[random_dimension], self._employers[second_bee].values[random_dimension], self._value_ranges[random_dimension])\n fitness_score = new_bee.get_score(self._fitness_fxn(new_bee.values, **self._args))\n return (fitness_score, new_bee.values, new_bee.error)", "docstring": "Shifts a random value for a supplied bee with in accordance with\n another random bee's value\n\nArgs:\n bee (EmployerBee): supplied bee to merge\n\nReturns:\n tuple: (score of new position, values of new position, fitness\n function return value of new position)", "source": "codesearchnet_filtered"} -{"code": "def from_string(cls, key, key_id=None):\n key = _helpers.from_bytes(key)\n (marker_id, key_bytes) = pem.readPemBlocksFromFile(six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)\n if (marker_id == 0):\n private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format='DER')\n elif (marker_id == 1):\n (key_info, remaining) = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)\n if (remaining != b''):\n raise ValueError('Unused bytes', remaining)\n private_key_info = key_info.getComponentByName('privateKey')\n private_key = rsa.key.PrivateKey.load_pkcs1(private_key_info.asOctets(), format='DER')\n else:\n raise ValueError('No key could be detected.')\n return cls(private_key, key_id=key_id)", "docstring": "Construct an Signer instance from a private key in PEM format.\n\nArgs:\n key (str): Private key in PEM format.\n key_id (str): An optional key id used to identify the private key.\n\nReturns:\n google.auth.crypt.Signer: The constructed signer.\n\nRaises:\n ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in\n PEM format.", "source": "codesearchnet_filtered"} -{"code": "def _bash_comp_command(self, cmd, add_help=True):\n out = (['-h', '--help'] if add_help else [])\n cmd_dict = (self._opt_cmds[cmd] if cmd else self._opt_bare)\n for (opt, sct) in cmd_dict:\n out.extend(_names(self._conf[sct], opt))\n return out", "docstring": "Build a list of all options for a given command.\n\nArgs:\n cmd (str): command name, set to None or '' for bare command.\n add_help (bool): add an help option.\n\nReturns:\n list of str: list of CLI options strings.", "source": "codesearchnet_filtered"} -{"code": "def json_to_bulk(tc_data, value_fields, resource_type, resource_type_parent):\n if (not isinstance(tc_data, list)):\n tc_data = [tc_data]\n bulk_array = []\n for d in tc_data:\n values = []\n for field in value_fields:\n if (d.get(field) is not None):\n values.append(d.get(field))\n del d[field]\n if (resource_type_parent in ['Group', 'Task', 'Victim']):\n d['name'] = ' : '.join(values)\n elif (resource_type_parent in ['Indicator']):\n d['summary'] = ' : '.join(values)\n if ('owner' in d):\n d['ownerName'] = d['owner']['name']\n del d['owner']\n if (d.get('type') is None):\n d['type'] = resource_type\n bulk_array.append(d)\n return bulk_array", "docstring": "Convert ThreatConnect JSON response to a Bulk Format.\n\n .. Attention:: This method is subject to frequent changes\n\nArgs:\n tc_data (dictionary): Array of data returned from TC API call.\n value_fields (list): Field names that contain the \"value\" data.\n resource_type (string): The resource type of the tc_data provided.\n resource_type_parent (string): The resource parent type of the tc_data provided.\n\nReturns:\n (list): A dictionary representing a TCEntityArray", "source": "codesearchnet_filtered"} -{"code": "def cidr(self, block, **kwargs):\n indicator_obj = CIDR(block, **kwargs)\n return self._indicator(indicator_obj)", "docstring": "Add CIDR data to Batch object.\n\nArgs:\n block (str): The value for this Indicator.\n confidence (str, kwargs): The threat confidence for this Indicator.\n date_added (str, kwargs): The date timestamp the Indicator was created.\n last_modified (str, kwargs): The date timestamp the Indicator was last modified.\n rating (str, kwargs): The threat rating for this Indicator.\n xid (str, kwargs): The external id for this Indicator.\n\nReturns:\n obj: An instance of CIDR.", "source": "codesearchnet_filtered"} -{"code": "def get_single_item_from_sequence(sequence, condition, ErrorClass=ValueError, no_item_error_message='No item matched condition', too_many_item_error_message='Too many items matched condition', append_sequence_to_error_message=True):\n filtered_sequence = [item for item in sequence if condition(item)]\n number_of_items_in_filtered_sequence = len(filtered_sequence)\n if (number_of_items_in_filtered_sequence == 0):\n error_message = no_item_error_message\n elif (number_of_items_in_filtered_sequence > 1):\n error_message = too_many_item_error_message\n else:\n return filtered_sequence[0]\n if append_sequence_to_error_message:\n error_message = '{}. Given: {}'.format(error_message, sequence)\n raise ErrorClass(error_message)", "docstring": "Return an item from a python sequence based on the given condition.\n\nArgs:\n sequence (sequence): The sequence to filter\n condition: A function that serves to filter items from `sequence`. Function\n must have one argument (a single item from the sequence) and return a boolean.\n ErrorClass (Exception): The error type raised in case the item isn't unique\n no_item_error_message (str): The message raised when no item matched the condtion\n too_many_item_error_message (str): The message raised when more than one item matched the condition\n append_sequence_to_error_message (bool): Show or hide what was the tested sequence in the error message.\n Hiding it may prevent sensitive data (such as password) to be exposed to public logs\n\nReturns:\n The only item in the sequence which matched the condition", "source": "codesearchnet_filtered"} -{"code": "def _run(self, data, store, signal, context, *, success_callback=None, stop_callback=None, abort_callback=None):\n if (data is None):\n data = MultiTaskData()\n data.add_dataset(self._name)\n try:\n if (self._callback_init is not None):\n self._callback_init(data, store, signal, context)\n result = self.run(data, store, signal, context)\n if (self._callback_finally is not None):\n self._callback_finally(TaskStatus.Success, data, store, signal, context)\n if (success_callback is not None):\n success_callback()\n except StopTask as err:\n if (self._callback_finally is not None):\n self._callback_finally(TaskStatus.Stopped, data, store, signal, context)\n if (stop_callback is not None):\n stop_callback(exc=err)\n result = (Action(data, limit=[]) if err.skip_successors else None)\n except AbortWorkflow as err:\n if (self._callback_finally is not None):\n self._callback_finally(TaskStatus.Aborted, data, store, signal, context)\n if (abort_callback is not None):\n abort_callback(exc=err)\n result = None\n signal.stop_workflow()\n except:\n if (self._callback_finally is not None):\n self._callback_finally(TaskStatus.Error, data, store, signal, context)\n signal.stop_workflow()\n raise\n if (result is None):\n data.flatten(in_place=True)\n data.add_task_history(self.name)\n return Action(data)\n else:\n if (not isinstance(result, Action)):\n raise TaskReturnActionInvalid()\n result.data.flatten(in_place=True)\n result.data.add_task_history(self.name)\n return result", "docstring": "The internal run method that decorates the public run method.\n\n This method makes sure data is being passed to and from the task.\n\nArgs:\n data (MultiTaskData): The data object that has been passed from the\n predecessor task.\n store (DataStoreDocument): The persistent data store object that allows the\n task to store data for access across the current\n workflow run.\n signal (TaskSignal): The signal object for tasks. It wraps the construction\n and sending of signals into easy to use methods.\n context (TaskContext): The context in which the tasks runs.\n success_callback: This function is called when the task completed successfully\n stop_callback: This function is called when a StopTask exception was raised.\n abort_callback: This function is called when an AbortWorkflow exception\n was raised.\n\nRaises:\n TaskReturnActionInvalid: If the return value of the task is not\n an Action object.\n\nReturns:\n Action: An Action object containing the data that should be passed on\n to the next task and optionally a list of successor tasks that\n should be executed.", "source": "codesearchnet_filtered"} -{"code": "def array_to_csv(array_like):\n stream = StringIO()\n np.savetxt(stream, array_like, delimiter=',', fmt='%s')\n return stream.getvalue()", "docstring": "Convert an array like object to CSV.\n\n To understand better what an array like object is see:\n https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays\n\nArgs:\n array_like (np.array or Iterable or int or float): array like object to be converted to CSV.\n\nReturns:\n (str): object serialized to CSV", "source": "codesearchnet_filtered"} -{"code": "def helper_add(access_token, ck_id, path, body):\n full_path = ''.join([path, \"('\", ck_id, \"')\"])\n full_path_encoded = urllib.parse.quote(full_path, safe='')\n endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n return do_ams_put(endpoint, full_path_encoded, body, access_token, 'json_only', '1.0;NetFx')", "docstring": "Helper Function to add strings to a URL path.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n ck_id (str): A CK ID.\n path (str): A URL Path.\n body (str): A Body.\n\nReturns:\n HTTP response. JSON body.", "source": "codesearchnet_filtered"} -{"code": "def trace_flush(self):\n cmd = enums.JLinkTraceCommand.FLUSH\n res = self._dll.JLINKARM_TRACE_Control(cmd, 0)\n if (res == 1):\n raise errors.JLinkException('Failed to flush the trace buffer.')\n return None", "docstring": "Flushes the trace buffer.\n\n After this method is called, the trace buffer is empty. This method is\n best called when the device is reset.\n\nArgs:\n self (JLink): the ``JLink`` instance.\n\nReturns:\n ``None``", "source": "codesearchnet_filtered"} -{"code": "def zip_ll_row(params, data_row):\n l = params[0]\n pi = params[1]\n d0 = (data_row == 0)\n likelihood = ((d0 * pi) + ((1 - pi) * poisson.pmf(data_row, l)))\n return (- np.log((likelihood + eps)).sum())", "docstring": "Returns the negative log-likelihood of a row given ZIP data.\n\nArgs:\n params (list): [lambda zero-inf]\n data_row (array): 1d array\n\nReturns:\n negative log-likelihood", "source": "codesearchnet_filtered"} -{"code": "def angle(x, y):\n dot = np.dot(x, y)\n x_mod = np.linalg.norm(x)\n y_mod = np.linalg.norm(y)\n cos_angle = (dot / (x_mod * y_mod))\n return np.degrees(np.arccos(cos_angle))", "docstring": "Calculate the angle between two vectors, in degrees.\n\nArgs:\n x (np.array): one vector.\n y (np.array): the other vector.\n\nReturns:\n (float): the angle between x and y in degrees.", "source": "codesearchnet_filtered"} -{"code": "def compare(a, b):\n a_is_text = isinstance(a, basestring)\n b_is_text = isinstance(b, basestring)\n if ((type(a) != type(b)) and (not (a_is_text and b_is_text))):\n _logger.error(u'Cannot compare %s to %s, types differ %s!=%s', a, b, type(a), type(b))\n raise ValueError(u'cannot compare inputs of differing types')\n if a_is_text:\n a = from_rfc3339(a, with_nanos=True)\n b = from_rfc3339(b, with_nanos=True)\n if (a < b):\n return (- 1)\n elif (a > b):\n return 1\n else:\n return 0", "docstring": "Compares two timestamps.\n\n ``a`` and ``b`` must be the same type, in addition to normal\n representations of timestamps that order naturally, they can be rfc3339\n formatted strings.\n\nArgs:\n a (string|object): a timestamp\n b (string|object): another timestamp\n\nReturns:\n int: -1 if a < b, 0 if a == b or 1 if a > b\n\nRaises:\n ValueError: if a or b are not the same type\n ValueError: if a or b strings but not in valid rfc3339 format", "source": "codesearchnet_filtered"} -{"code": "def pandas_mesh(df):\n xyz = [df[c].values for c in df.columns]\n index = pd.MultiIndex.from_tuples(zip(xyz[0], xyz[1]), names=['x', 'y'])\n series = [pd.Series(values, index=index) for values in xyz[2:]]\n (X, Y) = np.meshgrid(sorted(list(set(xyz[0]))), sorted(list(set(xyz[1]))))\n (N, M) = X.shape\n Zs = []\n for (k, s) in enumerate(series):\n Z = np.empty(X.shape)\n Z[:] = np.nan\n for (i, j) in itertools.product(range(N), range(M)):\n Z[(i, j)] = s.get((X[(i, j)], Y[(i, j)]), np.NAN)\n Zs += [Z]\n return OrderedDict(((df.columns[i], m) for (i, m) in enumerate(([X, Y] + Zs))))", "docstring": "Create numpy 2-D \"meshgrid\" from 3+ columns in a Pandas DataFrame\n\nArgs:\n df (DataFrame): Must have 3 or 4 columns of numerical data\n\nReturns:\n OrderedDict: column labels from the data frame are the keys, values are 2-D matrices\n All matrices have shape NxM, where N = len(set(df.iloc[:,0])) and M = len(set(df.iloc[:,1]))\n\n >>> pandas_mesh(pd.DataFrame(np.arange(18).reshape(3,6),\n ... columns=list('ABCDEF'))).values() # doctest: +NORMALIZE_WHITESPACE\n [array([[ 0, 6, 12],\n [ 0, 6, 12],\n [ 0, 6, 12]]),\n array([[ 1, 1, 1],\n [ 7, 7, 7],\n [13, 13, 13]]),\n array([[ 2., nan, nan],\n [ nan, 8., nan],\n [ nan, nan, 14.]]),\n array([[ 3., nan, nan],\n [ nan, 9., nan],\n [ nan, nan, 15.]]),\n array([[ 4., nan, nan],\n [ nan, 10., nan],\n [ nan, nan, 16.]]),\n array([[ 5., nan, nan],\n [ nan, 11., nan],\n [ nan, nan, 17.]])]", "source": "codesearchnet_filtered"} -{"code": "def get_dc_keywords(index_page):\n keyword_lists = (keyword_list.split() for keyword_list in parse_meta(index_page, 'dc.keywords', 'DC'))\n return [SourceString(keyword, source='DC') for keyword in sum(keyword_lists, [])]", "docstring": "Return list of `keywords` parsed from Dublin core.\n\nArgs:\n index_page (str): Content of the page as UTF-8 string\n\nReturns:\n list: List of :class:`.SourceString` objects.", "source": "codesearchnet_filtered"} -{"code": "def disassemble(self, annotate=False, blocks=False):\n ops = disassemble(self.co_code, self.internals)\n if annotate:\n ops = [self.annotate_op(op) for op in ops]\n if blocks:\n return blocks_from_ops(ops)\n else:\n return ops", "docstring": "Disassemble the bytecode of this code object into a series of\n opcodes and labels. Can also annotate the opcodes and group\n the opcodes into blocks based on the labels.\n\nArgs:\n annotate(bool): Whether to annotate the operations.\n blocks(bool): Whether to group the operations into blocks.\n\nReturns:\n list: A list of :class:`Op` (or :class:`AnnotatedOp`) instances\n and labels.", "source": "codesearchnet_filtered"} -{"code": "def grab_data(self, f_start=None, f_stop=None, t_start=None, t_stop=None, if_id=0):\n self.freqs = self.populate_freqs()\n self.timestamps = self.populate_timestamps()\n if (f_start is None):\n f_start = self.freqs[0]\n if (f_stop is None):\n f_stop = self.freqs[(- 1)]\n i0 = np.argmin(np.abs((self.freqs - f_start)))\n i1 = np.argmin(np.abs((self.freqs - f_stop)))\n if (i0 < i1):\n plot_f = self.freqs[i0:(i1 + 1)]\n plot_data = np.squeeze(self.data[(t_start:t_stop, ..., i0:(i1 + 1))])\n else:\n plot_f = self.freqs[i1:(i0 + 1)]\n plot_data = np.squeeze(self.data[(t_start:t_stop, ..., i1:(i0 + 1))])\n return (plot_f, plot_data)", "docstring": "Extract a portion of data by frequency range.\n\nArgs:\n f_start (float): start frequency in MHz\n f_stop (float): stop frequency in MHz\n if_id (int): IF input identification (req. when multiple IFs in file)\n\nReturns:\n (freqs, data) (np.arrays): frequency axis in MHz and data subset", "source": "codesearchnet_filtered"} -{"code": "def is_in_path(program):\n if (sys.version_info.major == 2):\n path = os.getenv('PATH')\n if (os.name == 'nt'):\n path = path.split(';')\n else:\n path = path.split(':')\n else:\n path = os.get_exec_path()\n for i in path:\n if os.path.isdir(i):\n if (program in os.listdir(i)):\n return True", "docstring": "Check if a program is in the system ``PATH``.\n\n Checks if a given program is in the user's ``PATH`` or not.\n\nArgs:\n program (str): The program to try to find in ``PATH``.\n\nReturns:\n bool: Is the program in ``PATH``?", "source": "codesearchnet_filtered"} -{"code": "def verified(self, institute_id):\n query = {'verb': 'validate', 'institute': institute_id}\n res = []\n validate_events = self.event_collection.find(query)\n for validated in list(validate_events):\n case_id = validated['case']\n var_obj = self.variant(case_id=case_id, document_id=validated['variant_id'])\n case_obj = self.case(case_id=case_id)\n if ((not case_obj) or (not var_obj)):\n continue\n var_obj['case_obj'] = {'display_name': case_obj['display_name'], 'individuals': case_obj['individuals']}\n res.append(var_obj)\n return res", "docstring": "Return all verified variants for a given institute\n\nArgs:\n institute_id(str): institute id\n\nReturns:\n res(list): a list with validated variants", "source": "codesearchnet_filtered"} -{"code": "def detailed_log_handler(self, handler):\n if (not self.opened()):\n handler = (handler or util.noop)\n self._detailed_log_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n self._dll.JLINKARM_EnableLogCom(self._detailed_log_handler)", "docstring": "Setter for the detailed log handler function.\n\nArgs:\n self (JLink): the ``JLink`` instance\n\nReturns:\n ``None``", "source": "codesearchnet_filtered"} -{"code": "def crack(ciphertext, *fitness_functions, min_key=0, max_key=26, shift_function=shift_case_english):\n if (min_key >= max_key):\n raise ValueError('min_key cannot exceed max_key')\n decryptions = []\n for key in range(min_key, max_key):\n plaintext = decrypt(key, ciphertext, shift_function=shift_function)\n decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions)))\n return sorted(decryptions, reverse=True)", "docstring": "Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``.\n\nExample:\n >>> decryptions = crack(\"KHOOR\", fitness.english.quadgrams)\n >>> print(''.join(decryptions[0].plaintext))\n HELLO\n\nArgs:\n ciphertext (iterable): The symbols to decrypt\n *fitness_functions (variable length argument list): Functions to score decryption with\n\n Keyword Args:\n min_key (int): Key to start with\n max_key (int): Key to stop at (exclusive)\n shift_function (function(shift, symbol)): Shift function to use\n\nReturns:\n Sorted list of decryptions\n\nRaises:\n ValueError: If min_key exceeds max_key\n ValueError: If no fitness_functions are given", "source": "codesearchnet_filtered"} -{"code": "def sort_dict(d, desc=True):\n sort = sorted(d.items(), key=(lambda x: x[1]), reverse=desc)\n return OrderedDict(sort)", "docstring": "Sort an ordered dictionary by value, descending.\n\nArgs:\n d (OrderedDict): An ordered dictionary.\n desc (bool): If true, sort desc.\n\nReturns:\n OrderedDict: The sorted dictionary.", "source": "codesearchnet_filtered"} -{"code": "def marginalize_out(node_indices, tpm):\n return (tpm.sum(tuple(node_indices), keepdims=True) / np.array(tpm.shape)[list(node_indices)].prod())", "docstring": "Marginalize out nodes from a TPM.\n\nArgs:\n node_indices (list[int]): The indices of nodes to be marginalized out.\n tpm (np.ndarray): The TPM to marginalize the node out of.\n\nReturns:\n np.ndarray: A TPM with the same number of dimensions, with the nodes\n marginalized out.", "source": "codesearchnet_filtered"} -{"code": "def sampler(dataframe, modulo, column='client_id', sample_id=42):\n return dataframe.withColumn('sampler', udf((lambda key: ((crc32((key or '')) & 4294967295) % modulo)))(column)).where(('sampler = %s' % sample_id)).drop('sampler')", "docstring": "Collect a sample of clients given an input column\n\n Filter dataframe based on the modulus of the CRC32 of a given string\n column matching a given sample_id. if dataframe has already been filtered\n by sample_id, then modulo should be a multiple of 100, column should be\n \"client_id\", and the given sample_id should match the value previously\n used, optionally plus multiples of 100.\n\nArgs:\n dataframe: A Dataframe to be sampled\n modulo (int): selects a 1/modulo sampling of dataframe\n column (str): name of a string column to sample on\n sample_id (int): modulus result to select for sampling\n\nReturns:\n A DataFrame sampled on the given inputs.", "source": "codesearchnet_filtered"} -{"code": "def _send_message(self, method, endpoint, params=None, data=None):\n url = (self.url + endpoint)\n r = self.session.request(method, url, params=params, data=data, auth=self.auth, timeout=30)\n return r.json()", "docstring": "Send API request.\n\nArgs:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str]): JSON-encoded string payload for POST\n\nReturns:\n dict/list: JSON response", "source": "codesearchnet_filtered"} -{"code": "def delete_resource_view(self, resource_view):\n if isinstance(resource_view, str):\n if (is_valid_uuid(resource_view) is False):\n raise HDXError(('%s is not a valid resource view id!' % resource_view))\n resource_view = ResourceView({'id': resource_view}, configuration=self.configuration)\n else:\n resource_view = self._get_resource_view(resource_view)\n if ('id' not in resource_view):\n found = False\n title = resource_view.get('title')\n for rv in self.get_resource_views():\n if (resource_view['title'] == rv['title']):\n resource_view = rv\n found = True\n break\n if (not found):\n raise HDXError(('No resource views have title %s in this resource!' % title))\n resource_view.delete_from_hdx()", "docstring": "Delete a resource view from the resource and HDX\n\nArgs:\n resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def xmoe2_dense(sz):\n hparams = mtf_transformer.mtf_transformer_paper_lm(sz)\n hparams.attention_dropout = 0.0\n hparams.relu_dropout = 0.0\n hparams.layer_prepostprocess_dropout = 0.0\n hparams.max_length = 1024\n hparams.batch_size = 128\n hparams.learning_rate_schedule = 'rsqrt_decay*linear_decay'\n hparams.learning_rate_decay_steps = 65536\n hparams.layout = 'batch:batch;vocab:model;d_ff:model;heads:model'\n hparams.mesh_shape = 'batch:32'\n return hparams", "docstring": "Series of architectural experiments on language modeling.\n\n Larger models than the ones above.\n\n All models are trained on sequences of 1024 tokens.\n\n We assume infinite training data, so no dropout necessary.\n We process 2^36 tokens in training = 524288 steps at batch size 128\n\n TODO(noam): find a large enough dataset for these experiments.\n\n You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,\n (1 epoch = ~46000 steps) so training will cover about 11 epochs.\n\n Note: configurations and code are likely to change without notice.\n\n Run on TPU 4x4 for 524288 steps unless otherwise indicated.\n\nArgs:\n sz: an integer\n\nReturns:\n a hparams", "source": "codesearchnet_filtered"} -{"code": "def macro_state(self, micro_state):\n assert (len(micro_state) == len(self.micro_indices))\n reindexed = self.reindex()\n micro_state = np.array(micro_state)\n return tuple(((0 if (sum(micro_state[list(reindexed.partition[i])]) in self.grouping[i][0]) else 1) for i in self.macro_indices))", "docstring": "Translate a micro state to a macro state\n\nArgs:\n micro_state (tuple[int]): The state of the micro nodes in this\n coarse-graining.\n\nReturns:\n tuple[int]: The state of the macro system, translated as specified\n by this coarse-graining.\n\nExample:\n >>> coarse_grain = CoarseGrain(((1, 2),), (((0,), (1, 2)),))\n >>> coarse_grain.macro_state((0, 0))\n (0,)\n >>> coarse_grain.macro_state((1, 0))\n (1,)\n >>> coarse_grain.macro_state((1, 1))\n (1,)", "source": "codesearchnet_filtered"} -{"code": "def api_request(self, method_name, params):\n url = self._method_url(method_name)\n data = json.dumps(params)\n return self._make_request(url=url, method='post', data=data)", "docstring": "Execute an arbitrary method.\n\nArgs:\n method_name (str): include the controller name: 'devices/search'\n params (dict): the method parameters\n\nReturns:\n A dict with the response\n\nRaises:\n requests.exceptions.HTTPError", "source": "codesearchnet_filtered"} -{"code": "def get_geno_marker(self, marker, return_index=False):\n if (self._mode != 'r'):\n raise UnsupportedOperation(\"not available in 'w' mode\")\n if (marker not in self._bim.index):\n raise ValueError('{}: marker not in BIM'.format(marker))\n seek_index = self._bim.loc[(marker, 'i')]\n self.seek(seek_index)\n if return_index:\n return (self._read_current_marker(), seek_index)\n return self._read_current_marker()", "docstring": "Gets the genotypes for a given marker.\n\nArgs:\n marker (str): The name of the marker.\n return_index (bool): Wether to return the marker's index or not.\n\nReturns:\n numpy.ndarray: The genotypes of the marker (additive format).", "source": "codesearchnet_filtered"} -{"code": "def get_product_order_book(self, product_id, level=1):\n params = {'level': level}\n return self._send_message('get', '/products/{}/book'.format(product_id), params=params)", "docstring": "Get a list of open orders for a product.\n\n The amount of detail shown can be customized with the `level`\n parameter:\n * 1: Only the best bid and ask\n * 2: Top 50 bids and asks (aggregated)\n * 3: Full order book (non aggregated)\n\n Level 1 and Level 2 are recommended for polling. For the most\n up-to-date data, consider using the websocket stream.\n\n **Caution**: Level 3 is only recommended for users wishing to\n maintain a full real-time order book using the websocket\n stream. Abuse of Level 3 via polling will cause your access to\n be limited or blocked.\n\nArgs:\n product_id (str): Product\n level (Optional[int]): Order book level (1, 2, or 3).\n Default is 1.\n\nReturns:\n dict: Order book. Example for level 1::\n {\n \"sequence\": \"3\",\n \"bids\": [\n [ price, size, num-orders ],\n ],\n \"asks\": [\n [ price, size, num-orders ],\n ]\n }", "source": "codesearchnet_filtered"} -{"code": "def concat(cartesians, ignore_index=False, keys=None):\n frames = [molecule._frame for molecule in cartesians]\n new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True)\n if (type(ignore_index) is bool):\n new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True)\n else:\n new = pd.concat(frames, ignore_index=True, keys=keys, verify_integrity=True)\n if (type(ignore_index) is int):\n new.index = range(ignore_index, (ignore_index + len(new)))\n else:\n new.index = ignore_index\n return cartesians[0].__class__(new)", "docstring": "Join list of cartesians into one molecule.\n\n Wrapper around the :func:`pandas.concat` function.\n Default values are the same as in the pandas function except for\n ``verify_integrity`` which is set to true in case of this library.\n\nArgs:\n ignore_index (sequence, bool, int): If it is a boolean, it\n behaves like in the description of\n :meth:`pandas.DataFrame.append`.\n If it is a sequence, it becomes the new index.\n If it is an integer,\n ``range(ignore_index, ignore_index + len(new))``\n becomes the new index.\n keys (sequence): If multiple levels passed, should contain tuples.\n Construct hierarchical index using the passed keys as\n the outermost level\n\nReturns:\n Cartesian:", "source": "codesearchnet_filtered"} -{"code": "def create_handler(Model, name=None, **kwds):\n\n async def action_handler(service, action_type, payload, props, notify=True, **kwds):\n if (action_type == get_crud_action('create', (name or Model))):\n try:\n message_props = {}\n if ('correlation_id' in props):\n message_props['correlation_id'] = props['correlation_id']\n for requirement in Model.required_fields():\n field_name = requirement.name\n if ((not (field_name in payload)) and (field_name != 'id')):\n raise ValueError(('Required field not found in payload: %s' % field_name))\n new_model = Model(**payload)\n new_model.save()\n if notify:\n (await service.event_broker.send(payload=ModelSerializer().serialize(new_model), action_type=change_action_status(action_type, success_status()), **message_props))\n except Exception as err:\n if notify:\n (await service.event_broker.send(payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props))\n else:\n raise err\n return action_handler", "docstring": "This factory returns an action handler that creates a new instance of\n the specified model when a create action is recieved, assuming the\n action follows nautilus convetions.\n\nArgs:\n Model (nautilus.BaseModel): The model to create when the action\n received.\n\nReturns:\n function(action_type, payload): The action handler for this model", "source": "codesearchnet_filtered"} -{"code": "def has_access(user, required_roles, match_all=True):\n if (ROLE_ADMIN in user.roles):\n return True\n if isinstance(required_roles, str):\n if (required_roles in user.roles):\n return True\n return False\n if match_all:\n for role in required_roles:\n if (role not in user.roles):\n return False\n return True\n else:\n for role in required_roles:\n if (role in user.roles):\n return True\n return False", "docstring": "Check if the user meets the role requirements. If mode is set to AND, all the provided roles must apply\n\nArgs:\n user (:obj:`User`): User object\n required_roles (`list` of `str`): List of roles that the user must have applied\n match_all (`bool`): If true, all the required_roles must be applied to the user, else any one match will\n return `True`\n\nReturns:\n `bool`", "source": "codesearchnet_filtered"} -{"code": "def dt_dt(sdat, tstart=None, tend=None):\n tseries = sdat.tseries_between(tstart, tend)\n time = tseries['t'].values\n temp = tseries['Tmean'].values\n dtdt = ((temp[1:] - temp[:(- 1)]) / (time[1:] - time[:(- 1)]))\n return (dtdt, time[:(- 1)])", "docstring": "Derivative of temperature.\n\n Compute dT/dt as a function of time using an explicit Euler scheme.\n\nArgs:\n sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.\n tstart (float): time at which the computation should start. Use the\n beginning of the time series data if set to None.\n tend (float): time at which the computation should end. Use the\n end of the time series data if set to None.\n\nReturns:\n tuple of :class:`numpy.array`: derivative of temperature and time\n arrays.", "source": "codesearchnet_filtered"} -{"code": "def getMonthsBuffer(self, direction):\n if (direction == ReadMonths.kWhReverse):\n return self.m_rev_mons\n return self.m_mons", "docstring": "Get the months tariff SerialBlock for meter.\n\nArgs:\n direction (int): A :class:`~ekmmeters.ReadMonths` value.\n\nReturns:\n SerialBlock: Requested months tariffs buffer.", "source": "codesearchnet_filtered"} -{"code": "def Mint(self, wallet, mint_to_addr, attachment_args, invoke_attrs=None):\n invoke_args = [self.ScriptHash.ToString(), 'mintTokens', []]\n invoke_args = (invoke_args + attachment_args)\n (tx, fee, results, num_ops, engine_success) = TestInvokeContract(wallet, invoke_args, None, True, from_addr=mint_to_addr, invoke_attrs=invoke_attrs)\n return (tx, fee, results)", "docstring": "Call the \"mintTokens\" function of the smart contract.\n\nArgs:\n wallet (neo.Wallets.Wallet): a wallet instance.\n mint_to_addr (str): public address of the account to mint the tokens to.\n attachment_args: (list): a list of arguments used to attach neo and/or gas to an invoke, eg ['--attach-gas=10.0','--attach-neo=3']\n invoke_attrs: (list): a list of TransactionAttributes to be attached to the mint transaction\n\nReturns:\n tuple:\n InvocationTransaction: the transaction.\n int: the transaction fee.\n list: the neo VM evaluation stack results.", "source": "codesearchnet_filtered"} -{"code": "def exhaust(fn, transform=None, *args, **kwargs):\n while True:\n iterRes = fn(*args, **kwargs)\n if iterRes:\n for item in (transform(iterRes) if transform else iterRes):\n (yield item)\n else:\n break", "docstring": "Repeatedly call a function, starting with init, until false-y, yielding each item in turn.\n\n The ``transform`` parameter can be used to map a collection to another format, for example iterating over a\n :class:`dict` by value rather than key.\n\n Use with state-synced functions to retrieve all results.\n\nArgs:\n fn (method): function to call\n transform (method): secondary function to convert result into an iterable\n args (list): positional arguments to pass to ``fn``\n kwargs (dict): keyword arguments to pass to ``fn``\n\nReturns:\n generator: generator of objects produced from the method", "source": "codesearchnet_filtered"} -{"code": "def set_trunk_groups(self, vid, value=None, default=False, disable=False):\n if default:\n return self.configure_vlan(vid, 'default trunk group')\n if disable:\n return self.configure_vlan(vid, 'no trunk group')\n current_value = self.get(vid)['trunk_groups']\n failure = False\n value = make_iterable(value)\n for name in set(value).difference(current_value):\n if (not self.add_trunk_group(vid, name)):\n failure = True\n for name in set(current_value).difference(value):\n if (not self.remove_trunk_group(vid, name)):\n failure = True\n return (not failure)", "docstring": "Configures the list of trunk groups support on a vlan\n\n This method handles configuring the vlan trunk group value to default\n if the default flag is set to True. If the default flag is set\n to False, then this method will calculate the set of trunk\n group names to be added and to be removed.\n\n EosVersion:\n 4.13.7M\n\nArgs:\n vid (str): The VLAN ID to configure\n value (str): The list of trunk groups that should be configured\n for this vlan id.\n default (bool): Configures the trunk group value to default if\n this value is true\n disable (bool): Negates the trunk group value if set to true\n\nReturns:\n True if the operation was successful otherwise False", "source": "codesearchnet_filtered"} -{"code": "def get_surface_sites(self, tag=False):\n from pymatgen.analysis.local_env import VoronoiNN\n a = SpacegroupAnalyzer(self.oriented_unit_cell)\n ucell = a.get_symmetrized_structure()\n cn_dict = {}\n v = VoronoiNN()\n unique_indices = [equ[0] for equ in ucell.equivalent_indices]\n for i in unique_indices:\n el = ucell[i].species_string\n if (el not in cn_dict.keys()):\n cn_dict[el] = []\n cn = v.get_cn(ucell, i, use_weights=True)\n cn = float(('%.5f' % round(cn, 5)))\n if (cn not in cn_dict[el]):\n cn_dict[el].append(cn)\n v = VoronoiNN()\n (surf_sites_dict, properties) = ({'top': [], 'bottom': []}, [])\n for (i, site) in enumerate(self):\n top = (True if (site.frac_coords[2] > self.center_of_mass[2]) else False)\n try:\n cn = float(('%.5f' % round(v.get_cn(self, i, use_weights=True), 5)))\n if (cn < min(cn_dict[site.species_string])):\n properties.append(True)\n key = ('top' if top else 'bottom')\n surf_sites_dict[key].append([site, i])\n else:\n properties.append(False)\n except RuntimeError:\n properties.append(True)\n key = ('top' if top else 'bottom')\n surf_sites_dict[key].append([site, i])\n if tag:\n self.add_site_property('is_surf_site', properties)\n return surf_sites_dict", "docstring": "Returns the surface sites and their indices in a dictionary. The\n oriented unit cell of the slab will determine the coordination number\n of a typical site. We use VoronoiNN to determine the\n coordination number of bulk sites and slab sites. Due to the\n pathological error resulting from some surface sites in the\n VoronoiNN, we assume any site that has this error is a surface\n site as well. This will work for elemental systems only for now. Useful\n for analysis involving broken bonds and for finding adsorption sites.\n\nArgs:\n tag (bool): Option to adds site attribute \"is_surfsite\" (bool)\n to all sites of slab. Defaults to False\n\nReturns:\n A dictionary grouping sites on top and bottom of the slab\n together.\n {\"top\": [sites with indices], \"bottom\": [sites with indices}\n\n TODO:\n Is there a way to determine site equivalence between sites in a slab\n and bulk system? This would allow us get the coordination number of\n a specific site for multi-elemental systems or systems with more\n than one unequivalent site. This will allow us to use this for\n compound systems.", "source": "codesearchnet_filtered"} -{"code": "def inputs_valid(self, outputs=None):\n if (self.operation == Transaction.CREATE):\n return self._inputs_valid(['dummyvalue' for _ in self.inputs])\n elif (self.operation == Transaction.TRANSFER):\n return self._inputs_valid([output.fulfillment.condition_uri for output in outputs])\n else:\n allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS)\n raise TypeError('`operation` must be one of {}'.format(allowed_ops))", "docstring": "Validates the Inputs in the Transaction against given\n Outputs.\n\nNote:\n Given a `CREATE` Transaction is passed,\n dummy values for Outputs are submitted for validation that\n evaluate parts of the validation-checks to `True`.\n\nArgs:\n outputs (:obj:`list` of :class:`~bigchaindb.common.\n transaction.Output`): A list of Outputs to check the\n Inputs against.\n\nReturns:\n bool: If all Inputs are valid.", "source": "codesearchnet_filtered"} -{"code": "def memory_write32(self, addr, data, zone=None):\n return self.memory_write(addr, data, zone, 32)", "docstring": "Writes words to memory of a target system.\n\nArgs:\n self (JLink): the ``JLink`` instance\n addr (int): start address to write to\n data (list): list of words to write\n zone (str): optional memory zone to access\n\nReturns:\n Number of words written to target.\n\nRaises:\n JLinkException: on memory access error.", "source": "codesearchnet_filtered"} -{"code": "def __checkDecisionParameters(self, result, **values):\n error = []\n if (not result):\n error.append('Function parameter (result array) should contain one or more header string!')\n if (not values):\n error.append('Function parameter (values variables) should contain one or more variable')\n for header in result:\n if (not (header in self.header)):\n error.append((('String (' + header) + ') in result is not in header!'))\n for header in values:\n if (not (header in self.header)):\n error.append((('Variable (' + header) + ') in values is not in header!'))\n elif (not values[header].split()):\n error.append((('Variable (' + header) + ') in values is empty string'))\n if error:\n return error", "docstring": "Checker of decision parameters, it will raise ValueError if finds something wrong.\n\nArgs:\n result (array of str): See public decision methods\n **values (array of str): See public decision methods\n\nRaises:\n ValueError: Result array none.\n ValueError: Values dict none.\n ValueError: Not find result key in header.\n ValueError: Result value is empty.\n\nReturns:\n Error array values", "source": "codesearchnet_filtered"} -{"code": "def get_proj(prj_code):\n if (prj_code in CUSTOM_PRJ):\n proj = pyproj.Proj(CUSTOM_PRJ[prj_code])\n else:\n proj = pyproj.Proj(init=prj_code)\n return proj", "docstring": "Helper method for handling projection codes that are unknown to pyproj\n\nArgs:\n prj_code (str): an epsg proj code\n\nReturns:\n projection: a pyproj projection", "source": "codesearchnet_filtered"} -{"code": "def signCertAs(self, cert, signas):\n cakey = self.getCaKey(signas)\n if (cakey is None):\n raise s_exc.NoCertKey(('Missing .key for %s' % signas))\n cacert = self.getCaCert(signas)\n if (cacert is None):\n raise s_exc.NoCertKey(('Missing .crt for %s' % signas))\n cert.set_issuer(cacert.get_subject())\n cert.sign(cakey, self.signing_digest)", "docstring": "Signs a certificate with a CA keypair.\n\nArgs:\n cert (OpenSSL.crypto.X509): The certificate to sign.\n signas (str): The CA keypair name to sign the new keypair with.\n\nExample:\n Sign a certificate with the CA \"myca\":\n\n cdir.signCertAs(mycert, 'myca')\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def make_sine_surface(dims=DEFAULT_DIMS, offset=0.5, scale=1.0):\n gradients = (((np.array(make_gradients(dims)) - offset) * scale) * np.pi)\n return np.sin(np.linalg.norm(gradients, axis=0))", "docstring": "Makes a surface from the 3D sine function.\n\nArgs:\n dims (pair): the dimensions of the surface to create\n offset (float): an offset applied to the function\n scale (float): a scale applied to the sine frequency\n\nReturns:\n surface: A surface.", "source": "codesearchnet_filtered"} -{"code": "def fit_transform(self, tables=None, transformer_dict=None, transformer_list=None, missing=None):\n if (missing is None):\n missing = self.missing\n else:\n self.missing = missing\n warnings.warn(DEPRECATION_MESSAGE.format('fit_transform'), DeprecationWarning)\n transformed = {}\n if (tables is None):\n tables = self.table_dict\n if ((transformer_dict is None) and (transformer_list is None)):\n transformer_dict = self.transformer_dict\n for table_name in tables:\n (table, table_meta) = tables[table_name]\n transformed_table = self.fit_transform_table(table, table_meta, transformer_dict, transformer_list)\n transformed[table_name] = transformed_table\n return transformed", "docstring": "Create, apply and store the specified transformers for the given tables.\n\nArgs:\n tables(dict): Mapping of table names to `tuple` where each tuple is on the form\n (`pandas.DataFrame`, `dict`). The `DataFrame` contains the table data\n and the `dict` the corresponding meta information.\n If not specified, the tables will be retrieved using the meta_file.\n\n transformer_dict(dict): Mapping `tuple(str, str)` -> `str` where the tuple is\n (table_name, column_name).\n\n transformer_list(list): List of transformers to use. Overrides the transformers in\n the meta_file.\n\n missing(bool): Wheter or not use NullTransformer to handle missing values.\n\nReturns:\n dict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data).", "source": "codesearchnet_filtered"} -{"code": "def google_maps_geoloc_link(data):\n if isinstance(data, str):\n lat_lon = ip_geoloc(data)\n if (lat_lon is None):\n return ''\n (lat, lon) = lat_lon\n else:\n (lat, lon) = data\n loc = ('%s,%s' % (lat, lon))\n return ('https://www.google.com/maps/place/@%s,17z/data=!3m1!4b1!4m5!3m4!1s0x0:0x0!8m2!3d%s!4d%s' % (loc, lat, lon))", "docstring": "Get a link to google maps pointing on this IP's geolocation.\n\nArgs:\n data (str/tuple): IP address or (latitude, longitude).\n\nReturns:\n str: a link to google maps pointing on this IP's geolocation.", "source": "codesearchnet_filtered"} -{"code": "def get_extra_managed_storage_volume_paths(self, start=0, count=(- 1), filter='', sort=''):\n uri = (self.URI + '/repair?alertFixType=ExtraManagedStorageVolumePaths')\n return self._client.get_all(start, count, filter=filter, sort=sort, uri=uri)", "docstring": "Gets the list of extra managed storage volume paths.\n\nArgs:\n start:\n The first item to return, using 0-based indexing.\n If not specified, the default is 0 - start with the first available item.\n count:\n The number of resources to return. A count of -1 requests all items.\n The actual number of items in the response might differ from the requested\n count if the sum of start and count exceeds the total number of items.\n filter (list or str):\n A general filter/query string to narrow the list of items returned. The\n default is no filter; all resources are returned.\n sort:\n The sort order of the returned data set. By default, the sort order is based\n on create time with the oldest entry first.\n\nReturns:\n list: A list of extra managed storage volume paths.", "source": "codesearchnet_filtered"} -{"code": "def __batch_evaluate(self, test_events):\n percentiles = np.zeros(len(test_events))\n all_items = set(self.item_buffer)\n for (i, e) in enumerate(test_events):\n unobserved = all_items\n if (not self.repeat):\n unobserved -= self.rec.users[e.user.index]['known_items']\n unobserved.add(e.item.index)\n candidates = np.asarray(list(unobserved))\n (recos, scores) = self.__recommend(e, candidates)\n pos = np.where((recos == e.item.index))[0][0]\n percentiles[i] = ((pos / (len(recos) - 1)) * 100)\n return np.mean(percentiles)", "docstring": "Evaluate the current model by using the given test events.\n\nArgs:\n test_events (list of Event): Current model is evaluated by these events.\n\nReturns:\n float: Mean Percentile Rank for the test set.", "source": "codesearchnet_filtered"} -{"code": "def bootstrap(score_objs, n_boot=1000):\n all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True)\n return all_samples.sum(axis=1)", "docstring": "Given a set of DistributedROC or DistributedReliability objects, this function performs a\n bootstrap resampling of the objects and returns n_boot aggregations of them.\n\nArgs:\n score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method\n n_boot (int): Number of bootstrap samples\n\nReturns:\n An array of DistributedROC or DistributedReliability", "source": "codesearchnet_filtered"} -{"code": "def __cloudflare_request(self, *, account, path, args=None):\n if (not args):\n args = {}\n if (not self.cloudflare_initialized[account.account_id]):\n self.cloudflare_session[account.account_id] = requests.Session()\n self.cloudflare_session[account.account_id].headers.update({'X-Auth-Email': account.email, 'X-Auth-Key': account.api_key, 'Content-Type': 'application/json'})\n self.cloudflare_initialized[account.account_id] = True\n if ('per_page' not in args):\n args['per_page'] = 100\n response = self.cloudflare_session[account.account_id].get((account.endpoint + path), params=args)\n if (response.status_code != 200):\n raise CloudFlareError('Request failed: {}'.format(response.text))\n return response.json()", "docstring": "Helper function to interact with the CloudFlare API.\n\nArgs:\n account (:obj:`CloudFlareAccount`): CloudFlare Account object\n path (`str`): URL endpoint to communicate with\n args (:obj:`dict` of `str`: `str`): A dictionary of arguments for the endpoint to consume\n\nReturns:\n `dict`", "source": "codesearchnet_filtered"} -{"code": "def check_candidate_exists(self, basepath, candidates):\n checked = []\n for item in candidates:\n abspath = os.path.join(basepath, item)\n if os.path.exists(abspath):\n checked.append(abspath)\n return checked", "docstring": "Check that at least one candidate exist into a directory.\n\nArgs:\n basepath (str): Directory path where to search for candidate.\n candidates (list): List of candidate file paths.\n\nReturns:\n list: List of existing candidates.", "source": "codesearchnet_filtered"} -{"code": "def get_available_targets(self, **kwargs):\n uri = self._helper.build_uri_with_query_string(kwargs, '/available-targets')\n return self._helper.do_get(uri)", "docstring": "Retrieves a list of the target servers and empty device bays that are available for assignment to the server\n profile.\n\nArgs:\n enclosureGroupUri (str): The URI of the enclosure group associated with the resource.\n serverHardwareTypeUri (str): The URI of the server hardware type associated with the resource.\n profileUri (str): The URI of the server profile associated with the resource.\n scopeUris (str): An expression to restrict the resources returned according to\n the scopes to which they are assigned.\n filter (list or str): A general filter/query string to narrow the list of items returned.\n The default is no filter, all resources are returned.\n\nReturns:\n list: List of available servers and bays.", "source": "codesearchnet_filtered"} -{"code": "def inspect_image(self, image):\n return self._result(self._get(self._url('/images/{0}/json', image)), True)", "docstring": "Get detailed information about an image. Similar to the ``docker\n inspect`` command, but only for images.\n\nArgs:\n image (str): The image to inspect\n\nReturns:\n (dict): Similar to the output of ``docker inspect``, but as a\n single dict\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "codesearchnet_filtered"} -{"code": "def remote_upload_status(self, limit=None, remote_upload_id=None):\n kwargs = {'limit': limit, 'id': remote_upload_id}\n params = {key: value for (key, value) in kwargs.items() if value}\n return self._get('remotedl/status', params=params)", "docstring": "Checks a remote file upload to status.\n\nArgs:\n limit (:obj:`int`, optional): Maximum number of results (Default: 5, Maximum: 100).\n remote_upload_id (:obj:`str`, optional): Remote Upload ID.\n\nReturns:\n dict: dictionary containing all remote uploads, each dictionary element is a dictionary. ::\n\n {\n \"24\": {\n \"id\": \"24\",\n \"remoteurl\": \"http://proof.ovh.net/files/100Mio.dat\",\n \"status\": \"new\",\n \"folderid\": \"4248\",\n \"added\": \"2015-02-21 09:20:26\",\n \"last_update\": \"2015-02-21 09:20:26\",\n \"extid\": False,\n \"url\": False\n },\n \"22\": {\n \"id\": \"22\",\n \"remoteurl\": \"http://proof.ovh.net/files/1Gio.dat\",\n \"status\": \"downloading\",\n \"bytes_loaded\": \"823997062\",\n \"bytes_total\": \"1073741824\",\n \"folderid\": \"4248\",\n \"added\": \"2015-02-21 09:20:26\",\n \"last_update\": \"2015-02-21 09:21:56\",\n \"extid\": False,\n \"url\": False\n },\n ...\n }", "source": "codesearchnet_filtered"} -{"code": "def create_subtask(self, cor, name=None, stop_timeout=1.0):\n if self.stopped:\n raise InternalError('Cannot add a subtask to a parent that is already stopped')\n subtask = BackgroundTask(cor, name, loop=self._loop, stop_timeout=stop_timeout)\n self.add_subtask(subtask)\n return subtask", "docstring": "Create and add a subtask from a coroutine.\n\n This function will create a BackgroundTask and then\n call self.add_subtask() on it.\n\nArgs:\n cor (coroutine): The coroutine that should be wrapped\n in a background task.\n name (str): An optional name for the task.\n stop_timeout (float): The maximum time to wait for this\n subtask to die after stopping it.\n\nReturns:\n Backgroundtask: The created subtask.", "source": "codesearchnet_filtered"} -{"code": "def regression_errors(y, y_hat, smoothing_window=0.01, smooth=True):\n errors = np.abs((y - y_hat))[(:, 0)]\n if (not smooth):\n return errors\n smoothing_window = int((smoothing_window * len(y)))\n return pd.Series(errors).ewm(span=smoothing_window).mean().values", "docstring": "Compute an array of absolute errors comparing predictions and expected output.\n\n If smooth is True, apply EWMA to the resulting array of errors.\n\nArgs:\n y (array): Ground truth.\n y_hat (array): Predictions array.\n smoothing_window (float): Size of the smoothing window, expressed as a proportion\n of the total length of y.\n smooth (bool): whether the returned errors should be smoothed with EWMA.\n\nReturns:\n (array): errors", "source": "codesearchnet_filtered"} -{"code": "def list_media_services(access_token, subscription_id):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.media/mediaservices?api-version=', MEDIA_API])\n return do_get(endpoint, access_token)", "docstring": "List the media services in a subscription.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n\nReturns:\n HTTP response. JSON body.", "source": "codesearchnet_filtered"} -{"code": "def to_matrix(xx, yy, zz, xy, yz, xz):\n matrix = np.array([[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]])\n return matrix", "docstring": "Convert a list of matrix components to a symmetric 3x3 matrix.\n Inputs should be in the order xx, yy, zz, xy, yz, xz.\n\nArgs:\n xx (float): xx component of the matrix.\n yy (float): yy component of the matrix.\n zz (float): zz component of the matrix.\n xy (float): xy component of the matrix.\n yz (float): yz component of the matrix.\n xz (float): xz component of the matrix.\n\nReturns:\n (np.array): The matrix, as a 3x3 numpy array.", "source": "codesearchnet_filtered"} -{"code": "def distribution(self, start=None, end=None, normalized=True, mask=None):\n (start, end, mask) = self._check_boundaries(start, end, mask=mask)\n counter = histogram.Histogram()\n for (start, end, _) in mask.iterperiods(value=True):\n for (t0, t1, value) in self.iterperiods(start, end):\n duration = utils.duration_to_number((t1 - t0), units='seconds')\n try:\n counter[value] += duration\n except histogram.UnorderableElements as e:\n counter = histogram.Histogram.from_dict(dict(counter), key=hash)\n counter[value] += duration\n if normalized:\n return counter.normalized()\n else:\n return counter", "docstring": "Calculate the distribution of values over the given time range from\n `start` to `end`.\n\nArgs:\n start (orderable, optional): The lower time bound of\n when to calculate the distribution. By default, the\n first time point will be used.\n\n end (orderable, optional): The upper time bound of\n when to calculate the distribution. By default, the\n last time point will be used.\n\n normalized (bool): If True, distribution will sum to\n one. If False and the time values of the TimeSeries\n are datetimes, the units will be seconds.\n\n mask (:obj:`TimeSeries`, optional): A\n domain on which to calculate the distribution.\n\nReturns:\n :obj:`Histogram` with the results.", "source": "codesearchnet_filtered"} -{"code": "def format_statevector(vec, decimals=None):\n num_basis = len(vec)\n vec_complex = np.zeros(num_basis, dtype=complex)\n for i in range(num_basis):\n vec_complex[i] = (vec[i][0] + (1j * vec[i][1]))\n if decimals:\n vec_complex = np.around(vec_complex, decimals=decimals)\n return vec_complex", "docstring": "Format statevector coming from the backend to present to the Qiskit user.\n\nArgs:\n vec (list): a list of [re, im] complex numbers.\n decimals (int): the number of decimals in the statevector.\n If None, no rounding is done.\n\nReturns:\n list[complex]: a list of python complex numbers.", "source": "codesearchnet_filtered"} -{"code": "def _set_auditpol_data(option, value):\n auditpol_values = {'None': 'No Auditing', '0': 'No Auditing', '1': 'Success', '2': 'Failure', '3': 'Success and Failure'}\n defaults = _get_audit_defaults(option)\n return __utils__['auditpol.set_setting'](name=defaults['Auditpol Name'], value=auditpol_values[value])", "docstring": "Helper function that updates the current applied settings to match what has\n just been set in the audit.csv files. We're doing it this way instead of\n running `gpupdate`\n\nArgs:\n option (str): The name of the option to set\n value (str): The value to set. ['None', '0', '1', '2', '3']\n\nReturns:\n bool: ``True`` if successful, otherwise ``False``", "source": "codesearchnet_filtered"} -{"code": "def clone(self, spec=None, **overrides):\n settings = dict(self.get_param_values(), **overrides)\n if (spec is None):\n spec = (self.name, overrides.get('label', self.label))\n if (('label' in overrides) and isinstance(spec, basestring)):\n spec = (spec, overrides['label'])\n elif (('label' in overrides) and isinstance(spec, tuple)):\n if (overrides['label'] != spec[1]):\n self.param.warning('Using label as supplied by keyword ({!r}), ignoring tuple value {!r}'.format(overrides['label'], spec[1]))\n spec = (spec[0], overrides['label'])\n return self.__class__(spec, **{k: v for (k, v) in settings.items() if (k not in ['name', 'label'])})", "docstring": "Clones the Dimension with new parameters\n\n Derive a new Dimension that inherits existing parameters\n except for the supplied, explicit overrides\n\nArgs:\n spec (tuple, optional): Dimension tuple specification\n **overrides: Dimension parameter overrides\n\nReturns:\n Cloned Dimension object", "source": "codesearchnet_filtered"} -{"code": "def non_existing_path(path_, dpath=None, offset=0, suffix=None, force_fmt=False):\n import utool as ut\n from os.path import basename, dirname\n if (dpath is None):\n dpath = dirname(path_)\n base_fmtstr = basename(path_)\n if (suffix is not None):\n base_fmtstr = ut.augpath(base_fmtstr, suffix)\n if ('%' not in base_fmtstr):\n if (not force_fmt):\n first_choice = join(dpath, base_fmtstr)\n if (not exists(first_choice)):\n return first_choice\n base_fmtstr = ut.augpath(base_fmtstr, '%d')\n dname_list = ut.glob(dpath, pattern='*', recursive=False, with_files=True, with_dirs=True)\n conflict_set = set((basename(dname) for dname in dname_list))\n newname = ut.get_nonconflicting_string(base_fmtstr, conflict_set, offset=offset)\n newpath = join(dpath, newname)\n return newpath", "docstring": "r\"\"\"\n Searches for and finds a path garuenteed to not exist.\n\nArgs:\n path_ (str): path string. If may include a \"%\" formatstr.\n dpath (str): directory path(default = None)\n offset (int): (default = 0)\n suffix (None): (default = None)\n\nReturns:\n str: path_ - path string\n\n CommandLine:\n python -m utool.util_path non_existing_path\n\nExample:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_path import * # NOQA\n >>> import utool as ut\n >>> base = ut.ensure_app_resource_dir('utool', 'tmp')\n >>> ut.touch(base + '/tmp.txt')\n >>> ut.touch(base + '/tmp0.txt')\n >>> ut.delete(base + '/tmp1.txt')\n >>> path_ = base + '/tmp.txt'\n >>> newpath = ut.non_existing_path(path_)\n >>> assert basename(newpath) == 'tmp1.txt'\n\nExample:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_path import * # NOQA\n >>> import utool as ut\n >>> base = ut.ensure_app_resource_dir('utool', 'tmp')\n >>> ut.ensurepath(base + '/dir_old')\n >>> ut.ensurepath(base + '/dir_old0')\n >>> ut.ensurepath(base + '/dir_old1')\n >>> ut.delete(base + '/dir_old2')\n >>> path_ = base + '/dir'\n >>> suffix = '_old'\n >>> newpath = ut.non_existing_path(path_, suffix=suffix)\n >>> ut.assert_eq(basename(newpath), 'dir_old2')", "source": "codesearchnet_filtered"} -{"code": "def get_tag(self, tag_name, **kwargs):\n return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX, tag_name, **kwargs)", "docstring": "get a tag by name\n\nArgs:\n tag_name (string): name of tag to get\n\nReturns:\n dictionary of the response", "source": "codesearchnet_filtered"} -{"code": "def get_argflag(argstr_, default=False, help_='', return_specified=None, need_prefix=True, return_was_specified=False, argv=None, debug=None, **kwargs):\n if (argv is None):\n argv = sys.argv\n assert isinstance(default, bool), 'default must be boolean'\n argstr_list = meta_util_iter.ensure_iterable(argstr_)\n _register_arg(argstr_list, bool, default, help_)\n parsed_val = default\n was_specified = False\n if (debug is None):\n debug = DEBUG\n import os\n for (key, val) in os.environ.items():\n key = key.upper()\n sentinal = 'UTOOL_'\n if key.startswith(sentinal):\n flag = ('--' + key[len(sentinal):].lower().replace('_', '-'))\n if (val.upper() in ['TRUE', 'ON']):\n pass\n elif (val.upper() in ['FALSE', 'OFF']):\n continue\n else:\n continue\n new_argv = [flag]\n argv = (argv[:] + new_argv)\n if debug:\n print('ENV SPECIFIED COMMAND LINE')\n print(('argv.extend(new_argv=%r)' % (new_argv,)))\n for argstr in argstr_list:\n if (not ((argstr.find('--') == 0) or ((argstr.find('-') == 0) and (len(argstr) == 2)))):\n raise AssertionError(('Invalid argstr: %r' % (argstr,)))\n if (not need_prefix):\n noprefix = argstr.replace('--', '')\n if (noprefix in argv):\n parsed_val = True\n was_specified = True\n break\n noarg = argstr.replace('--', '--no')\n if (argstr in argv):\n parsed_val = True\n was_specified = True\n break\n elif (noarg in argv):\n parsed_val = False\n was_specified = True\n break\n elif ((argstr + '=True') in argv):\n parsed_val = True\n was_specified = True\n break\n elif ((argstr + '=False') in argv):\n parsed_val = False\n was_specified = True\n break\n if (return_specified is None):\n return_specified = return_was_specified\n if return_specified:\n return (parsed_val, was_specified)\n else:\n return parsed_val", "docstring": "Checks if the commandline has a flag or a corresponding noflag\n\nArgs:\n argstr_ (str, list, or tuple): the flag to look for\n default (bool): dont use this (default = False)\n help_ (str): a help string (default = '')\n return_specified (bool): returns if flag was specified or not (default = False)\n\nReturns:\n tuple: (parsed_val, was_specified)\n\n TODO:\n depricate return_was_specified\n\n CommandLine:\n python -m utool.util_arg --exec-get_argflag --noface --exec-mode\n python -m utool.util_arg --exec-get_argflag --foo --exec-mode\n python -m utool.util_arg --exec-get_argflag --no-foo --exec-mode\n python -m utool.util_arg --exec-get_argflag --foo=True --exec-mode\n python -m utool.util_arg --exec-get_argflag --foo=False --exec-mode\n\nExample:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_arg import * # NOQA\n >>> argstr_ = '--foo'\n >>> default = False\n >>> help_ = ''\n >>> return_specified = True\n >>> (parsed_val, was_specified) = get_argflag(argstr_, default, help_, return_specified)\n >>> result = ('(parsed_val, was_specified) = %s' % (str((parsed_val, was_specified)),))\n >>> print(result)", "source": "codesearchnet_filtered"} -{"code": "def do_check_pep8(files, status):\n for file_name in files:\n args = ['flake8', '--max-line-length=120', '{0}'.format(file_name)]\n output = run(*args)\n if output:\n status.append('Python PEP8/Flake8: {0}: {1}'.format(file_name, output))\n return status", "docstring": "Run the python pep8 tool against the filst of supplied files.\n Append any linting errors to the returned status list\n\nArgs:\n files (str): list of files to run pep8 against\n status (list): list of pre-receive check failures to eventually print\n to the user\n\nReturns:\n status list of current pre-redeive check failures. Might be an empty\n list.", "source": "codesearchnet_filtered"} -{"code": "def _ParseHTTPHeaders(self, header_data, offset, display_name):\n header_string = header_data.decode('ascii', errors='replace')\n try:\n http_header_start = header_string.index('request-method')\n except ValueError:\n logger.debug('No request method in header: \"{0:s}\"'.format(header_string))\n return (None, None)\n http_headers = header_string[http_header_start:]\n header_parts = http_headers.split('\\x00')\n request_method = header_parts[1]\n if (request_method not in self._REQUEST_METHODS):\n logger.debug(\"[{0:s}] {1:s}:{2:d}: Unknown HTTP method '{3:s}'. Response headers: '{4:s}'\".format(self.NAME, display_name, offset, request_method, header_string))\n try:\n response_head_start = http_headers.index('response-head')\n except ValueError:\n logger.debug('No response head in header: \"{0:s}\"'.format(header_string))\n return (request_method, None)\n response_head = http_headers[response_head_start:]\n response_head_parts = response_head.split('\\x00')\n response_head_text = response_head_parts[1]\n response_head_text_parts = response_head_text.split('\\r\\n')\n response_code = response_head_text_parts[0]\n if (not response_code.startswith('HTTP')):\n logger.debug(\"[{0:s}] {1:s}:{2:d}: Could not determine HTTP response code. Response headers: '{3:s}'.\".format(self.NAME, display_name, offset, header_string))\n return (request_method, response_code)", "docstring": "Extract relevant information from HTTP header.\n\nArgs:\n header_data (bytes): HTTP header data.\n offset (int): offset of the cache record, relative to the start of\n the Firefox cache file.\n display_name (str): display name of the Firefox cache file.\n\nReturns:\n tuple: containing:\n\n str: HTTP request method or None if the value cannot be extracted.\n str: HTTP response code or None if the value cannot be extracted.", "source": "codesearchnet_filtered"} -{"code": "def formula_double_format(afloat, ignore_ones=True, tol=1e-08):\n if (ignore_ones and (afloat == 1)):\n return ''\n elif (abs((afloat - int(afloat))) < tol):\n return str(int(afloat))\n else:\n return str(round(afloat, 8))", "docstring": "This function is used to make pretty formulas by formatting the amounts.\n Instead of Li1.0 Fe1.0 P1.0 O4.0, you get LiFePO4.\n\nArgs:\n afloat (float): a float\n ignore_ones (bool): if true, floats of 1 are ignored.\n tol (float): Tolerance to round to nearest int. i.e. 2.0000000001 -> 2\n\nReturns:\n A string representation of the float for formulas.", "source": "codesearchnet_filtered"} -{"code": "def keep_file(self, task, response, min_size=None, max_size=None):\n try:\n img = Image.open(BytesIO(response.content))\n except (IOError, OSError):\n return False\n task['img_size'] = img.size\n if (min_size and (not self._size_gt(img.size, min_size))):\n return False\n if (max_size and (not self._size_lt(img.size, max_size))):\n return False\n return True", "docstring": "Decide whether to keep the image\n\n Compare image size with ``min_size`` and ``max_size`` to decide.\n\nArgs:\n response (Response): response of requests.\n min_size (tuple or None): minimum size of required images.\n max_size (tuple or None): maximum size of required images.\n\nReturns:\n bool: whether to keep the image.", "source": "codesearchnet_filtered"} -{"code": "def update(self, webhookId, name=None, targetUrl=None, **request_parameters):\n check_type(webhookId, basestring, may_be_none=False)\n check_type(name, basestring)\n check_type(targetUrl, basestring)\n put_data = dict_from_items_with_values(request_parameters, name=name, targetUrl=targetUrl)\n json_data = self._session.put(((API_ENDPOINT + '/') + webhookId), json=put_data)\n return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Update a webhook, by ID.\n\nArgs:\n webhookId(basestring): The webhook ID.\n name(basestring): A user-friendly name for this webhook.\n targetUrl(basestring): The URL that receives POST requests for\n each event.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\nReturns:\n Webhook: A Webhook object with the updated Webex Teams webhook\n details.\n\nRaises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet_filtered"} -{"code": "def from_file(cls, filename, constant_lattice=True, **kwargs):\n fname = os.path.basename(filename)\n if fnmatch(fname, '*XDATCAR*'):\n structures = Xdatcar(filename).structures\n elif fnmatch(fname, 'vasprun*.xml*'):\n structures = Vasprun(filename).structures\n else:\n raise ValueError('Unsupported file')\n return cls.from_structures(structures, constant_lattice=constant_lattice, **kwargs)", "docstring": "Convenience constructor to obtain trajectory from XDATCAR or vasprun.xml file\n\nArgs:\n filename (str): The filename to read from.\n constant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD\n simulation. True results in\n\nReturns:\n (Trajectory)", "source": "codesearchnet_filtered"} -{"code": "def notify_progress(self, conn_string, operation, finished, total, wait=True):\n if (operation not in self.PROGRESS_OPERATIONS):\n raise ArgumentError('Invalid operation for progress event: {}'.format(operation))\n event = dict(operation=operation, finished=finished, total=total)\n if wait:\n return self.notify_event(conn_string, 'progress', event)\n self.notify_event_nowait(conn_string, 'progress', event)\n return None", "docstring": "Send a progress event.\n\n Progress events can be sent for ``debug`` and ``script`` operations and\n notify the caller about the progress of these potentially long-running\n operations. They have two integer properties that specify what fraction\n of the operation has been completed.\n\nArgs:\n conn_string (str): The device that is sending the event.\n operations (str): The operation that is in progress: debug or script\n finished (int): The number of \"steps\" that have finished.\n total (int): The total number of steps to perform.\n wait (bool): Whether to return an awaitable that we can use to\n block until the notification has made it to all callbacks.\n\nReturns:\n awaitable or None: An awaitable if wait=True.\n\n If wait is False, the notification is run in the background with\n no way to check its progress and None is returned.", "source": "codesearchnet_filtered"} -{"code": "def decode_dict(value_fields, client):\n return {key: decode_value(value, client) for (key, value) in six.iteritems(value_fields)}", "docstring": "Converts a protobuf map of Firestore ``Value``-s.\n\nArgs:\n value_fields (google.protobuf.pyext._message.MessageMapContainer): A\n protobuf map of Firestore ``Value``-s.\n client (~.firestore_v1beta1.client.Client): A client that has\n a document factory.\n\nReturns:\n Dict[str, Union[NoneType, bool, int, float, datetime.datetime, \\\n str, bytes, dict, ~google.cloud.Firestore.GeoPoint]]: A dictionary\n of native Python values converted from the ``value_fields``.", "source": "codesearchnet_filtered"} -{"code": "def forbidden(cls, errors=None):\n if cls.expose_status:\n cls.response.content_type = 'application/json'\n cls.response._status_line = '403 Forbidden'\n return cls(403, errors=errors).to_json", "docstring": "Shortcut API for HTTP 403 `Forbidden` response.\n\nArgs:\n errors (list): Response key/value data.\n\nReturns:\n WSResponse Instance.", "source": "codesearchnet_filtered"} -{"code": "def run(self, shell=False, ignore_errors=False, stdin=False, check_output=False):\n previous_directory = os.getcwd()\n os.chdir(self.directory)\n try:\n kwargs = {'stderr': sys.stderr, 'stdin': (sys.stdin if stdin else None), 'env': self.env_vars, 'shell': shell}\n if check_output:\n return subprocess.check_output(self.command, **kwargs).decode('utf8')\n else:\n kwargs['stdout'] = sys.stdout\n return subprocess.check_call(self.command, **kwargs)\n except subprocess.CalledProcessError:\n if ignore_errors:\n pass\n else:\n raise\n os.chdir(previous_directory)", "docstring": "Run subcommand.\n\nArgs:\n shell (Optional[bool]): Run command using shell (default False)\n ignore_errors (Optional[bool]): If the command has a non-zero return code, don't raise an exception (default False)\n stdin (Optional[bool]): Plug input from stdin when running command (default False)\n check_output (Optional[bool]): Return command output as string (default False)\n\nReturns:\n String if check_output is True, else None.\n\nRaises:\n subprocess.CalledProcessError when the command has an error, unless ignore_errors is True.", "source": "codesearchnet_filtered"} -{"code": "def enable_imu_streaming(self, enabled_imus, enabled_sensors=SENSOR_ALL):\n imus_enabled = 0\n for imu in enabled_imus:\n imus_enabled |= (1 << imu)\n if (enabled_sensors == 0):\n logger.warn('Not enabling IMUs, no sensors enabled!')\n return False\n if (not self.dongle._enable_imu_streaming(self, imus_enabled, enabled_sensors)):\n logger.warn('Failed to enable IMU streaming (imus_enabled={}, enabled_sensors={}'.format(imus_enabled, enabled_sensors))\n return False\n self.enabled_imus = enabled_imus\n self.enabled_sensors = enabled_sensors\n return True", "docstring": "Configures and enables IMU sensor data streaming.\n\n NOTE: only one streaming mode can be active at any time, so e.g. if you\n want to stream IMU data, you must disable SK8-ExtAna streaming first.\n\nArgs:\n enabled_imus (list): a list of distinct ints in the range `0`-`4`\n inclusive identifying the IMU. `0` is the SK8 itself, and\n `1`-`4` are the subsidiary IMUs on the USB chain, starting\n from the end closest to the SK8.\n enabled_sensors (int): to save battery, you can choose to enable some\n or all of the sensors on each enabled IMU. By default, the\n accelerometer, magnetometer, and gyroscope are all enabled. Pass\n a bitwise OR of one or more of :const:`SENSOR_ACC`,\n :const:`SENSOR_MAG`, and :const:`SENSOR_GYRO` to gain finer\n control over the active sensors.\n\nReturns:\n bool. True if successful, False if an error occurred.", "source": "codesearchnet_filtered"} -{"code": "def _dataset_load_from_hdx(self, id_or_name):\n if (not self._load_from_hdx('dataset', id_or_name)):\n return False\n self._dataset_create_resources()\n return True", "docstring": "Loads the dataset given by either id or name from HDX\n\nArgs:\n id_or_name (str): Either id or name of dataset\n\nReturns:\n bool: True if loaded, False if not", "source": "codesearchnet_filtered"} -{"code": "def is_displayed(target):\n is_displayed = getattr(target, 'is_displayed', None)\n if ((not is_displayed) or (not callable(is_displayed))):\n raise TypeError(\"Target has no attribute 'is_displayed' or not callable\")\n if (not is_displayed()):\n raise WebDriverException('element not visible')", "docstring": "Assert whether the target is displayed\n\nArgs:\n target(WebElement): WebElement Object.\n\nReturns:\n Return True if the element is displayed or return False otherwise.", "source": "codesearchnet_filtered"} -{"code": "def plot_main(pid, return_fig_ax=False):\n global WORKING_DIRECTORY, SNR_CUT\n if isinstance(pid, PlotInput):\n pid = pid.return_dict()\n WORKING_DIRECTORY = '.'\n if ('WORKING_DIRECTORY' not in pid['general'].keys()):\n pid['general']['WORKING_DIRECTORY'] = '.'\n SNR_CUT = 5.0\n if ('SNR_CUT' not in pid['general'].keys()):\n pid['general']['SNR_CUT'] = SNR_CUT\n if ('switch_backend' in pid['general'].keys()):\n plt.switch_backend(pid['general']['switch_backend'])\n running_process = MakePlotProcess(**{**pid, **pid['general'], **pid['plot_info'], **pid['figure']})\n running_process.input_data()\n running_process.setup_figure()\n running_process.create_plots()\n if ('save_figure' in pid['figure'].keys()):\n if (pid['figure']['save_figure'] is True):\n running_process.fig.savefig(((pid['general']['WORKING_DIRECTORY'] + '/') + pid['figure']['output_path']), **pid['figure']['savefig_kwargs'])\n if ('show_figure' in pid['figure'].keys()):\n if (pid['figure']['show_figure'] is True):\n plt.show()\n if (return_fig_ax is True):\n return (running_process.fig, running_process.ax)\n return", "docstring": "Main function for creating these plots.\n\n Reads in plot info dict from json file or dictionary in script.\n\nArgs:\n return_fig_ax (bool, optional): Return figure and axes objects.\n\nReturns:\n 2-element tuple containing\n - **fig** (*obj*): Figure object for customization outside of those in this program.\n - **ax** (*obj*): Axes object for customization outside of those in this program.", "source": "codesearchnet_filtered"} -{"code": "def hpo_terms(store, query=None, limit=None):\n hpo_phenotypes = {}\n if limit:\n limit = int(limit)\n hpo_phenotypes['phenotypes'] = list(store.hpo_terms(text=query, limit=limit))\n return hpo_phenotypes", "docstring": "Retrieves a list of HPO terms from scout database\n\nArgs:\n store (obj): an adapter to the scout database\n query (str): the term to search in the database\n limit (str): the number of desired results\n\nReturns:\n hpo_phenotypes (dict): the complete list of HPO objects stored in scout", "source": "codesearchnet_filtered"} -{"code": "def create_contentkey_authorization_policy(access_token, content):\n path = '/ContentKeyAuthorizationPolicies'\n endpoint = ''.join([ams_rest_endpoint, path])\n body = content\n return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Content Key Authorization Policy.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n content (str): Content Payload.\n\nReturns:\n HTTP response. JSON body.", "source": "codesearchnet_filtered"} -{"code": "def get_rbounds(step):\n if (step.geom is not None):\n rcmb = step.geom.rcmb\n else:\n rcmb = step.sdat.par['geometry']['r_cmb']\n if (step.sdat.par['geometry']['shape'].lower() == 'cartesian'):\n rcmb = 0\n rcmb = max(rcmb, 0)\n return (rcmb, (rcmb + 1))", "docstring": "Radial or vertical position of boundaries.\n\nArgs:\n step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\n instance.\n\nReturns:\n tuple of floats: radial or vertical positions of boundaries of the\n domain.", "source": "codesearchnet_filtered"} -{"code": "def swd_read8(self, offset):\n value = self._dll.JLINK_SWD_GetU8(offset)\n return ctypes.c_uint8(value).value", "docstring": "Gets a unit of ``8`` bits from the input buffer.\n\nArgs:\n self (JLink): the ``JLink`` instance\n offset (int): the offset (in bits) from which to start reading\n\nReturns:\n The integer read from the input buffer.", "source": "codesearchnet_filtered"} -{"code": "def _OpenPathSpec(self, path_specification, ascii_codepage='cp1252'):\n if (not path_specification):\n return None\n file_entry = self._file_system.GetFileEntryByPathSpec(path_specification)\n if (file_entry is None):\n return None\n file_object = file_entry.GetFileObject()\n if (file_object is None):\n return None\n registry_file = dfwinreg_regf.REGFWinRegistryFile(ascii_codepage=ascii_codepage)\n try:\n registry_file.Open(file_object)\n except IOError as exception:\n logger.warning('Unable to open Windows Registry file with error: {0!s}'.format(exception))\n file_object.close()\n return None\n return registry_file", "docstring": "Opens the Windows Registry file specified by the path specification.\n\nArgs:\n path_specification (dfvfs.PathSpec): path specification.\n ascii_codepage (Optional[str]): ASCII string codepage.\n\nReturns:\n WinRegistryFile: Windows Registry file or None.", "source": "codesearchnet_filtered"} -{"code": "def fetch_tuples(self, max_tuples=20, timeout=None):\n tuples = list()\n if (timeout is None):\n while (len(tuples) < max_tuples):\n fetcher = self._data_fetcher\n if (not fetcher):\n break\n tuples.append(fetcher.items.get())\n return tuples\n timeout = float(timeout)\n end = (time.time() + timeout)\n while (len(tuples) < max_tuples):\n qto = (end - time.time())\n if (qto <= 0):\n break\n try:\n fetcher = self._data_fetcher\n if (not fetcher):\n break\n tuples.append(fetcher.items.get(timeout=qto))\n except queue.Empty:\n break\n return tuples", "docstring": "Fetch a number of tuples from this view.\n\n Fetching of data must have been started with\n :py:meth:`start_data_fetch` before calling this method.\n\n If ``timeout`` is ``None`` then the returned list will\n contain ``max_tuples`` tuples. Otherwise if the timeout is reached\n the list may contain less than ``max_tuples`` tuples.\n\nArgs:\n max_tuples(int): Maximum number of tuples to fetch.\n timeout(float): Maximum time to wait for ``max_tuples`` tuples.\n\nReturns:\n list: List of fetched tuples.\n .. versionadded:: 1.12", "source": "codesearchnet_filtered"} -{"code": "def get_account_history(self, account_id, **kwargs):\n endpoint = '/accounts/{}/ledger'.format(account_id)\n return self._send_paginated_message(endpoint, params=kwargs)", "docstring": "List account activity. Account activity either increases or\n decreases your account balance.\n\n Entry type indicates the reason for the account change.\n * transfer:\tFunds moved to/from Coinbase to cbpro\n * match:\tFunds moved as a result of a trade\n * fee:\t Fee as a result of a trade\n * rebate: Fee rebate as per our fee schedule\n\n If an entry is the result of a trade (match, fee), the details\n field will contain additional information about the trade.\n\nArgs:\n account_id (str): Account id to get history of.\n kwargs (dict): Additional HTTP request parameters.\n\nReturns:\n list: History information for the account. Example::\n [\n {\n \"id\": \"100\",\n \"created_at\": \"2014-11-07T08:19:27.028459Z\",\n \"amount\": \"0.001\",\n \"balance\": \"239.669\",\n \"type\": \"fee\",\n \"details\": {\n \"order_id\": \"d50ec984-77a8-460a-b958-66f114b0de9b\",\n \"trade_id\": \"74\",\n \"product_id\": \"BTC-USD\"\n }\n },\n {\n ...\n }\n ]", "source": "codesearchnet_filtered"} -{"code": "def run_needle_alignment(seq_a, seq_b, gapopen=10, gapextend=0.5, write_outfile=True, outdir=None, outfile=None, force_rerun=False):\n if (not outdir):\n outdir = ''\n if write_outfile:\n seq_a = ssbio.protein.sequence.utils.cast_to_str(seq_a)\n seq_b = ssbio.protein.sequence.utils.cast_to_str(seq_b)\n if (not outfile):\n outfile = op.join(tempfile.gettempdir(), 'temp_alignment.needle')\n else:\n outfile = op.join(outdir, outfile)\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n cmd = 'needle -outfile=\"{}\" -asequence=asis::{} -bsequence=asis::{} -gapopen={} -gapextend={}'.format(outfile, seq_a, seq_b, gapopen, gapextend)\n command = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n (out, err) = command.communicate()\n return outfile\n else:\n seq_a = ssbio.protein.sequence.utils.cast_to_str(seq_a)\n seq_b = ssbio.protein.sequence.utils.cast_to_str(seq_b)\n cmd = 'needle -auto -stdout -asequence=asis::{} -bsequence=asis::{} -gapopen={} -gapextend={}'.format(seq_a, seq_b, gapopen, gapextend)\n command = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n stdout = command.stdout.read()\n return stdout", "docstring": "Run the needle alignment program for two strings and return the raw alignment result.\n\n More info:\n EMBOSS needle: http://www.bioinformatics.nl/cgi-bin/emboss/help/needle\n Biopython wrapper: http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc84\n Using strings as input: https://www.biostars.org/p/91124/\n\nArgs:\n id_a: ID of reference sequence\n seq_a (str, Seq, SeqRecord): Reference sequence\n id_b: ID of sequence to be aligned\n seq_b (str, Seq, SeqRecord): String representation of sequence to be aligned\n gapopen: Gap open penalty is the score taken away when a gap is created\n gapextend: Gap extension penalty is added to the standard gap penalty for each base or residue in the gap\n outdir (str, optional): Path to output directory. Default is the current directory.\n outfile (str, optional): Name of output file. If not set, is {id_a}_{id_b}_align.txt\n force_rerun (bool): Default False, set to True if you want to rerun the alignment if outfile exists.\n\nReturns:\n str: Raw alignment result of the needle alignment in srspair format.", "source": "codesearchnet_filtered"} -{"code": "def get_folders(cls, session, mailbox_or_id):\n if isinstance(mailbox_or_id, Mailbox):\n mailbox_or_id = mailbox_or_id.id\n return cls(('/mailboxes/%d/folders.json' % mailbox_or_id), session=session, out_type=Folder)", "docstring": "List the folders for the mailbox.\n\nArgs:\n mailbox_or_id (helpscout.models.Mailbox or int): Mailbox or the ID\n of the mailbox to get the folders for.\n\nReturns:\n RequestPaginator(output_type=helpscout.models.Folder): Folders\n iterator.", "source": "codesearchnet_filtered"} -{"code": "def add_arguments(self, parser):\n parser.add_argument('name', nargs=1, choices=['kinetis'], help='name of MCU to unlock')\n return self.add_common_arguments(parser, True)", "docstring": "Adds the unlock command arguments to the parser.\n\nArgs:\n self (UnlockCommand): the ``UnlockCommand`` instance\n parser (argparse.ArgumentParser): the parser to add the arguments to\n\nReturns:\n ``None``", "source": "codesearchnet_filtered"} -{"code": "def exists(self, uri):\n try:\n urllib.request.urlopen(uri)\n return True\n except urllib.error.HTTPError:\n return False", "docstring": "Method returns true is the entity exists in the Repository,\n false, otherwise\n\nArgs:\n uri(str): Entity URI\n\nReturns:\n bool", "source": "codesearchnet_filtered"} -{"code": "def load_module(name):\n try:\n mod = None\n mod = sys.modules[name]\n except KeyError:\n mod = import_module(name)\n finally:\n if (not mod):\n raise ImportError(('unable to import module %s' % name))\n return mod", "docstring": "Attempts to load a module into the current environment\n\n This function will load a module specified by name. The module\n name is first checked to see if it is already loaded and will return\n the module if it is. If the module hasn't been previously loaded\n it will attempt to import it\n\nArgs:\n name (str): Specifies the full name of the module. For instance\n pyeapi.api.vlans\n\nReturns:\n The module that has been imported or retrieved from the sys modules", "source": "codesearchnet_filtered"} -{"code": "def get_session(region, profile=None):\n if (profile is None):\n logger.debug('No AWS profile explicitly provided. Falling back to default.')\n profile = default_profile\n logger.debug(('Building session using profile \"%s\" in region \"%s\"' % (profile, region)))\n session = boto3.Session(region_name=region, profile_name=profile)\n c = session._session.get_component('credential_provider')\n provider = c.get_provider('assume-role')\n provider.cache = credential_cache\n provider._prompter = ui.getpass\n return session", "docstring": "Creates a boto3 session with a cache\n\nArgs:\n region (str): The region for the session\n profile (str): The profile for the session\n\nReturns:\n :class:`boto3.session.Session`: A boto3 session with\n credential caching", "source": "codesearchnet_filtered"} -{"code": "def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):\n return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title, show=show, save=save)", "docstring": "Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer\n\n DataSet target and output values are denormalized before plotting with:\n\n output * std + mean\n\n Which inverses the normalization\n\n (output - mean) / std\n\nArgs:\n trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet\n ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.\n Required if trainer is a Network instance rather than a Trainer instance.\n mean (float): mean of the denormalized dataset (default: 0)\n Only affects the scale of the plot\n std (float): std (standard deviation) of the denormalized dataset (default: 1)\n title (str): title to display on the plot.\n\nReturns:\n 3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info", "source": "codesearchnet_filtered"} -{"code": "def rate_to_mcs(rate, bw=20, long_gi=True):\n if (bw not in [20, 40, 80, 160]):\n raise Exception(('Unknown bandwidth: %d MHz' % bw))\n idx = int(((math.log((bw / 10), 2) - 1) * 2))\n if (not long_gi):\n idx += 1\n for (mcs, rates) in MCS_TABLE.items():\n if (abs((rates[idx] - rate)) < 0.001):\n return mcs\n for (idx, r) in enumerate(DOT11A_RATES):\n if (abs((r - rate)) < 0.001):\n return idx\n raise Exception(('MCS not found: rate=%f, bw=%d, long_gi=%s' % (rate, bw, long_gi)))", "docstring": "Convert bit rate to MCS index.\n\nArgs:\n rate (float): bit rate in Mbps\n bw (int): bandwidth, 20, 40, 80, ...\n long_gi (bool): True if long GI is used.\n\nReturns:\n mcs (int): MCS index\n\n >>> rate_to_mcs(120, bw=40, long_gi=False)\n 5", "source": "codesearchnet_filtered"} -{"code": "def is_valid(self, tol: float=DISTANCE_TOLERANCE) -> bool:\n if (len(self.sites) == 1):\n return True\n all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]\n return bool((np.min(all_dists) > tol))", "docstring": "True if SiteCollection does not contain atoms that are too close\n together. Note that the distance definition is based on type of\n SiteCollection. Cartesian distances are used for non-periodic\n Molecules, while PBC is taken into account for periodic structures.\n\nArgs:\n tol (float): Distance tolerance. Default is 0.5A.\n\nReturns:\n (bool) True if SiteCollection does not contain atoms that are too\n close together.", "source": "codesearchnet_filtered"} -{"code": "def _parse_redistribution(self, config):\n redistributions = list()\n regexp = 'redistribute .*'\n matches = re.findall(regexp, config)\n for line in matches:\n ospf_redist = line.split()\n if (len(ospf_redist) == 2):\n protocol = ospf_redist[1]\n redistributions.append(dict(protocol=protocol))\n if (len(ospf_redist) == 4):\n protocol = ospf_redist[1]\n route_map_name = ospf_redist[3]\n redistributions.append(dict(protocol=protocol, route_map=route_map_name))\n return dict(redistributions=redistributions)", "docstring": "Parses config file for the OSPF router ID\n\nArgs:\n config (str): Running configuration\n\nReturns:\n list: dict:\n keys: protocol (str)\n route-map (optional) (str)", "source": "codesearchnet_filtered"} -{"code": "def post_async(self, path, params=None):\n request = Post(self._get_next_id(), path, params)\n request.set_callback(self._q.put)\n future = self._dispatch_request(request)\n return future", "docstring": "Asynchronously calls a function on a child block\n\nArgs:\n path (list): The path to post to\n params (dict): parameters for the call\n\nReturns:\n Future: as single Future that will resolve to the result", "source": "codesearchnet_filtered"} -{"code": "def dump_tree(self, statement=None, indent_level=0):\n out = u''\n indent = (u' ' * indent_level)\n if (statement is None):\n for root_statement in self.statements:\n out += self.dump_tree(root_statement, indent_level)\n else:\n out += ((indent + str(statement)) + u'\\n')\n if (len(statement.children) > 0):\n for child in statement.children:\n out += self.dump_tree(child, indent_level=(indent_level + 4))\n return out", "docstring": "Dump the AST for this parsed file.\n\nArgs:\n statement (SensorGraphStatement): the statement to print\n if this function is called recursively.\n indent_level (int): The number of spaces to indent this\n statement. Used for recursively printing blocks of\n statements.\n\nReturns:\n str: The AST for this parsed sg file as a nested\n tree with one node per line and blocks indented.", "source": "codesearchnet_filtered"} -{"code": "def get_sym_eq_kpoints(self, kpoint, cartesian=False, tol=0.01):\n if (not self.structure):\n return None\n sg = SpacegroupAnalyzer(self.structure)\n symmops = sg.get_point_group_operations(cartesian=cartesian)\n points = np.dot(kpoint, [m.rotation_matrix for m in symmops])\n rm_list = []\n for i in range((len(points) - 1)):\n for j in range((i + 1), len(points)):\n if np.allclose(pbc_diff(points[i], points[j]), [0, 0, 0], tol):\n rm_list.append(i)\n break\n return np.delete(points, rm_list, axis=0)", "docstring": "Returns a list of unique symmetrically equivalent k-points.\n\nArgs:\n kpoint (1x3 array): coordinate of the k-point\n cartesian (bool): kpoint is in cartesian or fractional coordinates\n tol (float): tolerance below which coordinates are considered equal\n\nReturns:\n ([1x3 array] or None): if structure is not available returns None", "source": "codesearchnet_filtered"} -{"code": "def get_nets_other(self, response):\n nets = []\n for match in re.finditer('^(inetnum|inet6num|route):[^\\\\S\\\\n]+((.+?)[^\\\\S\\\\n]-[^\\\\S\\\\n](.+)|.+)$', response, re.MULTILINE):\n try:\n net = copy.deepcopy(BASE_NET)\n net_range = match.group(2).strip()\n try:\n net['range'] = net['range'] = ('{0} - {1}'.format(ip_network(net_range)[0].__str__(), ip_network(net_range)[(- 1)].__str__()) if ('/' in net_range) else net_range)\n except ValueError:\n net['range'] = net_range\n if (match.group(3) and match.group(4)):\n addrs = []\n addrs.extend(summarize_address_range(ip_address(match.group(3).strip()), ip_address(match.group(4).strip())))\n cidr = ', '.join([i.__str__() for i in collapse_addresses(addrs)])\n else:\n cidr = ip_network(net_range).__str__()\n net['cidr'] = cidr\n net['start'] = match.start()\n net['end'] = match.end()\n nets.append(net)\n except (ValueError, TypeError):\n pass\n return nets", "docstring": "The function for parsing network blocks from generic whois data.\n\nArgs:\n response (:obj:`str`): The response from the whois/rwhois server.\n\nReturns:\n list of dict: Mapping of networks with start and end positions.\n\n ::\n\n [{\n 'cidr' (str) - The network routing block\n 'start' (int) - The starting point of the network\n 'end' (int) - The endpoint point of the network\n }]", "source": "codesearchnet_filtered"} -{"code": "def _ParseIdentifierMappingsTable(self, parser_mediator, esedb_table):\n identifier_mappings = {}\n for esedb_record in esedb_table.records:\n if parser_mediator.abort:\n break\n (identifier, mapped_value) = self._ParseIdentifierMappingRecord(parser_mediator, esedb_table.name, esedb_record)\n if ((identifier is None) or (mapped_value is None)):\n continue\n if (identifier in identifier_mappings):\n parser_mediator.ProduceExtractionWarning('identifier: {0:d} already exists in mappings.'.format(identifier))\n continue\n identifier_mappings[identifier] = mapped_value\n return identifier_mappings", "docstring": "Extracts identifier mappings from the SruDbIdMapTable table.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n esedb_table (pyesedb.table): table.\n\nReturns:\n dict[int, str]: mapping of numeric identifiers to their string\n representation.", "source": "codesearchnet_filtered"} -{"code": "def VerifyStructure(self, parser_mediator, line):\n self._last_month = 0\n self._year_use = parser_mediator.GetEstimatedYear()\n try:\n structure = self.FIREWALL_LINE.parseString(line)\n except pyparsing.ParseException as exception:\n logger.debug('Unable to parse file as a Mac AppFirewall log file with error: {0!s}'.format(exception))\n return False\n if (structure.action != 'creating /var/log/appfirewall.log'):\n logger.debug('Not a Mac AppFirewall log file, invalid action: {0!s}'.format(structure.action))\n return False\n if (structure.status != 'Error'):\n logger.debug('Not a Mac AppFirewall log file, invalid status: {0!s}'.format(structure.status))\n return False\n time_elements_tuple = self._GetTimeElementsTuple(structure)\n try:\n dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple)\n except ValueError:\n logger.debug('Not a Mac AppFirewall log file, invalid date and time: {0!s}'.format(structure.date_time))\n return False\n self._last_month = time_elements_tuple[1]\n return True", "docstring": "Verify that this file is a Mac AppFirewall log file.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n line (str): line from a text file.\n\nReturns:\n bool: True if the line is in the expected format, False if not.", "source": "codesearchnet_filtered"} -{"code": "def combine_columns(columns):\n columns_zipped = itertools.zip_longest(*columns)\n return ''.join((x for zipped in columns_zipped for x in zipped if x))", "docstring": "Combine ``columns`` into a single string.\n\nExample:\n >>> combine_columns(['eape', 'xml'])\n 'example'\n\nArgs:\n columns (iterable): ordered columns to combine\n\nReturns:\n String of combined columns", "source": "codesearchnet_filtered"} -{"code": "def create_transaction(self, to_account):\n from_account = self.statement_import.bank_account\n transaction = Transaction.objects.create()\n Leg.objects.create(transaction=transaction, account=from_account, amount=(+ (self.amount * (- 1))))\n Leg.objects.create(transaction=transaction, account=to_account, amount=(- (self.amount * (- 1))))\n transaction.date = self.date\n transaction.save()\n self.transaction = transaction\n self.save()\n return transaction", "docstring": "Create a transaction for this statement amount and account, into to_account\n\n This will also set this StatementLine's ``transaction`` attribute to the newly\n created transaction.\n\nArgs:\n to_account (Account): The account the transaction is into / out of.\n\nReturns:\n Transaction: The newly created (and committed) transaction.", "source": "codesearchnet_filtered"} -{"code": "def repeat(coro, times=1, step=1, limit=1, loop=None):\n assert_corofunction(coro=coro)\n times = max(int(times), 1)\n iterable = range(1, (times + 1), step)\n return (yield from map(coro, iterable, limit=limit, loop=loop))", "docstring": "Executes the coroutine function ``x`` number of times,\n and accumulates results in order as you would use with ``map``.\n\n Execution concurrency is configurable using ``limit`` param.\n\n This function is a coroutine.\n\nArgs:\n coro (coroutinefunction): coroutine function to schedule.\n times (int): number of times to execute the coroutine.\n step (int): increment iteration step, as with ``range()``.\n limit (int): concurrency execution limit. Defaults to 10.\n loop (asyncio.BaseEventLoop): optional event loop to use.\n\nRaises:\n TypeError: if coro is not a coroutine function.\n\nReturns:\n list: accumulated yielded values returned by coroutine.\n\n Usage::\n\n async def mul_2(num):\n return num * 2\n\n await paco.repeat(mul_2, times=5)\n # => [2, 4, 6, 8, 10]", "source": "codesearchnet_filtered"} -{"code": "def modify_required_power_levels(self, events=None, **kwargs):\n try:\n content = self.client.api.get_power_levels(self.room_id)\n content.update(kwargs)\n for (key, value) in list(content.items()):\n if (value is None):\n del content[key]\n if events:\n if ('events' in content):\n content['events'].update(events)\n else:\n content['events'] = events\n for (event, power_level) in list(content['events'].items()):\n if (power_level is None):\n del content['events'][event]\n self.client.api.set_power_levels(self.room_id, content)\n return True\n except MatrixRequestError:\n return False", "docstring": "Modifies room power level requirements.\n\nArgs:\n events(dict): Power levels required for sending specific event types,\n in the form {\"m.room.whatever0\": 60, \"m.room.whatever2\": None}.\n Overrides events_default and state_default for the specified\n events. A level of None causes the target event to revert to the\n default level as specified by events_default or state_default.\n **kwargs: Key/value pairs specifying the power levels required for\n various actions:\n\n - events_default(int): Default level for sending message events\n - state_default(int): Default level for sending state events\n - invite(int): Inviting a user\n - redact(int): Redacting an event\n - ban(int): Banning a user\n - kick(int): Kicking a user\n\nReturns:\n True if successful, False if not", "source": "codesearchnet_filtered"} -{"code": "def check_unused(intersection, duplicates, intersections):\n for other in intersections:\n if ((other.interior_curve == UNUSED_T) and (intersection.index_first == other.index_first) and (intersection.index_second == other.index_second)):\n if ((intersection.s == 0.0) and (other.s == 0.0)):\n duplicates.append(intersection)\n return True\n if ((intersection.t == 0.0) and (other.t == 0.0)):\n duplicates.append(intersection)\n return True\n return False", "docstring": "Check if a \"valid\" ``intersection`` is already in ``intersections``.\n\n This assumes that\n\n * ``intersection`` will have at least one of ``s == 0.0`` or ``t == 0.0``\n * At least one of the intersections in ``intersections`` is classified as\n ``COINCIDENT_UNUSED``.\n\nArgs:\n intersection (.Intersection): An intersection to be added.\n duplicates (List[.Intersection]): List of duplicate intersections.\n intersections (List[.Intersection]): List of \"accepted\" (i.e.\n non-duplicate) intersections.\n\nReturns:\n bool: Indicates if the ``intersection`` is a duplicate.", "source": "codesearchnet_filtered"} -{"code": "def _inputs_valid(self, output_condition_uris):\n if (len(self.inputs) != len(output_condition_uris)):\n raise ValueError('Inputs and output_condition_uris must have the same count')\n tx_dict = (self.tx_dict if self.tx_dict else self.to_dict())\n tx_dict = Transaction._remove_signatures(tx_dict)\n tx_dict['id'] = None\n tx_serialized = Transaction._to_str(tx_dict)\n\n def validate(i, output_condition_uri=None):\n 'Validate input against output condition URI'\n return self._input_valid(self.inputs[i], self.operation, tx_serialized, output_condition_uri)\n return all((validate(i, cond) for (i, cond) in enumerate(output_condition_uris)))", "docstring": "Validates an Input against a given set of Outputs.\n\nNote:\n The number of `output_condition_uris` must be equal to the\n number of Inputs a Transaction has.\n\nArgs:\n output_condition_uris (:obj:`list` of :obj:`str`): A list of\n Outputs to check the Inputs against.\n\nReturns:\n bool: If all Outputs are valid.", "source": "codesearchnet_filtered"} -{"code": "def _parse_query_key(self, key, val, is_escaped):\n if key.endswith('__contains'):\n key = key[:(- 10)]\n val = self._parse_query_modifier('contains', val, is_escaped)\n elif key.endswith('__range'):\n key = key[:(- 7)]\n val = self._parse_query_modifier('range', val, is_escaped)\n elif key.endswith('__startswith'):\n key = key[:(- 12)]\n val = self._parse_query_modifier('startswith', val, is_escaped)\n elif key.endswith('__endswith'):\n key = key[:(- 10)]\n val = self._parse_query_modifier('endswith', val, is_escaped)\n elif key.endswith('__lt'):\n key = key[:(- 4)]\n val = self._parse_query_modifier('lt', val, is_escaped)\n elif key.endswith('__gt'):\n key = key[:(- 4)]\n val = self._parse_query_modifier('gt', val, is_escaped)\n elif key.endswith('__lte'):\n key = key[:(- 5)]\n val = self._parse_query_modifier('lte', val, is_escaped)\n elif key.endswith('__gte'):\n key = key[:(- 5)]\n val = self._parse_query_modifier('gte', val, is_escaped)\n elif ((key != 'NOKEY') and (not is_escaped)):\n val = self._escape_query(val)\n return (key, val)", "docstring": "Strips query modifier from key and call's the appropriate value modifier.\n\nArgs:\n key (str): Query key\n val: Query value\n\nReturns:\n Parsed query key and value.", "source": "codesearchnet_filtered"} -{"code": "def create_as(access_token, subscription_id, resource_group, as_name, update_domains, fault_domains, location):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/availabilitySets/', as_name, '?api-version=', COMP_API])\n as_body = {'location': location}\n properties = {'platformUpdateDomainCount': update_domains}\n properties['platformFaultDomainCount'] = fault_domains\n as_body['properties'] = properties\n body = json.dumps(as_body)\n return do_put(endpoint, body, access_token)", "docstring": "Create availability set.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n as_name (str): Name of the new availability set.\n update_domains (int): Number of update domains.\n fault_domains (int): Number of fault domains.\n location (str): Azure data center location. E.g. westus.\n\nReturns:\n HTTP response. JSON body of the availability set properties.", "source": "codesearchnet_filtered"} -{"code": "def course_blocks(self, course_id, username):\n resp = self.requester.get(urljoin(self.base_url, '/api/courses/v1/blocks/'), params={'depth': 'all', 'username': username, 'course_id': course_id, 'requested_fields': 'children,display_name,id,type,visible_to_staff_only'})\n resp.raise_for_status()\n return Structure(resp.json())", "docstring": "Fetches course blocks.\n\nArgs:\n course_id (str): An edx course id.\n username (str): username of the user to query for (can reveal hidden\n modules)\n\nReturns:\n Structure", "source": "codesearchnet_filtered"} -{"code": "def serialCmdPwdAuth(self, password_str):\n result = False\n try:\n req_start = (('0150310228' + binascii.hexlify(password_str)) + '2903')\n req_crc = self.calc_crc16(req_start[2:].decode('hex'))\n req_str = (req_start + req_crc)\n self.m_serial_port.write(req_str.decode('hex'))\n if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n ekm_log((('Password accepted (' + self.getContext()) + ')'))\n result = True\n else:\n ekm_log((('Password call failure no 06(' + self.getContext()) + ')'))\n except:\n ekm_log((('Password call failure by exception(' + self.getContext()) + ')'))\n ekm_log(traceback.format_exc(sys.exc_info()))\n return result", "docstring": "Password step of set commands\n\n This method is normally called within another serial command, so it\n does not issue a termination string. Any default password is set\n in the caller parameter list, never here.\n\nArgs:\n password_str (str): Required password.\n\nReturns:\n bool: True on completion and ACK.", "source": "codesearchnet_filtered"} -{"code": "def chain_to_quadratic(chain, target_adjacency, chain_strength):\n quadratic = {}\n seen = set()\n try:\n next_level = {next(iter(chain))}\n except StopIteration:\n raise ValueError('chain must have at least one variable')\n while next_level:\n this_level = next_level\n next_level = set()\n for v in this_level:\n if (v not in seen):\n seen.add(v)\n for u in target_adjacency[v]:\n if (u not in chain):\n continue\n next_level.add(u)\n if ((u != v) and ((u, v) not in quadratic)):\n quadratic[(v, u)] = (- chain_strength)\n if (len(chain) != len(seen)):\n raise ValueError('{} is not a connected chain'.format(chain))\n return quadratic", "docstring": "Determine the quadratic biases that induce the given chain.\n\nArgs:\n chain (iterable):\n The variables that make up a chain.\n\n target_adjacency (dict/:class:`networkx.Graph`):\n Should be a dict of the form {s: Ns, ...} where s is a variable\n in the target graph and Ns is the set of neighbours of s.\n\n chain_strength (float):\n The magnitude of the quadratic bias that should be used to create chains.\n\nReturns:\n dict[edge, float]: The quadratic biases that induce the given chain.\n\nRaises:\n ValueError: If the variables in chain do not form a connected subgraph of target.\n\nExample:\n >>> chain = {1, 2}\n >>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}}\n >>> dimod.embedding.chain_to_quadratic(chain, target_adjacency, 1)\n {(1, 2): -1}", "source": "codesearchnet_filtered"} -{"code": "def read(self, offset, size):\n self._file_object.seek(offset, os.SEEK_SET)\n return self._file_object.read(size)", "docstring": "Reads a byte string from the image object at the specified offset.\n\nArgs:\n offset (int): offset where to start reading.\n size (int): number of bytes to read.\n\nReturns:\n bytes: data read.", "source": "codesearchnet_filtered"} -{"code": "def load_validation_plugin(name=None):\n if (not name):\n return BaseValidationRules\n plugin = None\n for entry_point in iter_entry_points('bigchaindb.validation', name):\n plugin = entry_point.load()\n if (not plugin):\n raise ResolutionError('No plugin found in group `bigchaindb.validation` with name `{}`'.format(name))\n if (not issubclass(plugin, (BaseValidationRules,))):\n raise TypeError('object of type \"{}\" does not implement `bigchaindb.validation.BaseValidationRules`'.format(type(plugin)))\n return plugin", "docstring": "Find and load the chosen validation plugin.\n\nArgs:\n name (string): the name of the entry_point, as advertised in the\n setup.py of the providing package.\n\nReturns:\n an uninstantiated subclass of ``bigchaindb.validation.AbstractValidationRules``", "source": "codesearchnet_filtered"} -{"code": "def aggregate(self, dataset_ids=None, boundary='exact', side='left', func='mean', **dim_kwargs):\n new_scn = self.copy(datasets=dataset_ids)\n for (src_area, ds_ids) in new_scn.iter_by_area():\n if (src_area is None):\n for ds_id in ds_ids:\n new_scn.datasets[ds_id] = self[ds_id]\n continue\n if (boundary != 'exact'):\n raise NotImplementedError(\"boundary modes appart from 'exact' are not implemented yet.\")\n target_area = src_area.aggregate(**dim_kwargs)\n resolution = max(target_area.pixel_size_x, target_area.pixel_size_y)\n for ds_id in ds_ids:\n res = self[ds_id].coarsen(boundary=boundary, side=side, func=func, **dim_kwargs)\n new_scn.datasets[ds_id] = getattr(res, func)()\n new_scn.datasets[ds_id].attrs['area'] = target_area\n new_scn.datasets[ds_id].attrs['resolution'] = resolution\n return new_scn", "docstring": "Create an aggregated version of the Scene.\n\nArgs:\n dataset_ids (iterable): DatasetIDs to include in the returned\n `Scene`. Defaults to all datasets.\n func (string): Function to apply on each aggregation window. One of\n 'mean', 'sum', 'min', 'max', 'median', 'argmin',\n 'argmax', 'prod', 'std', 'var'.\n 'mean' is the default.\n boundary: Not implemented.\n side: Not implemented.\n dim_kwargs: the size of the windows to aggregate.\n\nReturns:\n A new aggregated scene\n\n See also:\n xarray.DataArray.coarsen\n\nExample:\n `scn.aggregate(func='min', x=2, y=2)` will aggregate 2x2 pixels by\n applying the `min` function.", "source": "codesearchnet_filtered"} -{"code": "def load_manual_sequence(self, seq, ident=None, write_fasta_file=False, outdir=None, set_as_representative=False, force_rewrite=False):\n if write_fasta_file:\n if (not outdir):\n outdir = self.sequence_dir\n if (not outdir):\n raise ValueError('Output directory must be specified')\n outfile = op.join(outdir, '{}.faa'.format(ident))\n else:\n outfile = None\n if (isinstance(seq, str) or isinstance(seq, Seq)):\n if (not ident):\n raise ValueError('ID must be specified if sequence is a string or Seq object')\n manual_sequence = SeqProp(id=ident, seq=seq)\n else:\n if (not ident):\n ident = seq.id\n else:\n seq.id = ident\n manual_sequence = SeqProp(id=ident, seq=seq, name=seq.name, description=seq.description)\n if write_fasta_file:\n manual_sequence.write_fasta_file(outfile=outfile, force_rerun=force_rewrite)\n self.sequences.append(manual_sequence)\n if set_as_representative:\n self.representative_sequence = manual_sequence\n return self.sequences.get_by_id(ident)", "docstring": "Load a manual sequence given as a string and optionally set it as the representative sequence.\n Also store it in the sequences attribute.\n\nArgs:\n seq (str, Seq, SeqRecord): Sequence string, Biopython Seq or SeqRecord object\n ident (str): Optional identifier for the sequence, required if seq is a string. Also will override existing\n IDs in Seq or SeqRecord objects if set.\n write_fasta_file (bool): If this sequence should be written out to a FASTA file\n outdir (str): Path to output directory\n set_as_representative (bool): If this sequence should be set as the representative one\n force_rewrite (bool): If the FASTA file should be overwritten if it already exists\n\nReturns:\n SeqProp: Sequence that was loaded into the ``sequences`` attribute", "source": "codesearchnet_filtered"} -{"code": "def _compute_full_path(self, fn_parent_ref, fn_parent_seq):\n names = []\n root_id = 5\n (index, seq) = (fn_parent_ref, fn_parent_seq)\n is_orphan = False\n while (index != root_id):\n try:\n parent_entry = self[index]\n if (seq != parent_entry.header.seq_number):\n is_orphan = True\n break\n else:\n parent_fn_attr = parent_entry.get_main_filename_attr()\n (index, seq) = (parent_fn_attr.content.parent_ref, parent_fn_attr.content.parent_seq)\n names.append(parent_fn_attr.content.name)\n except ValueError as e:\n is_orphan = True\n break\n return (is_orphan, '\\\\'.join(reversed(names)))", "docstring": "Based on the parent reference and sequence, computes the full path.\n\n The majority of the files in a filesystem has a very small amount of\n parent directories. By definition, a filesystem is expected to have\n much smaller amount of directories than files. As such we use a function\n with the minimal amount of arguments to find a parent, that way we can\n cache the results easily and speed up the overall code.\n\nArgs:\n fn_parent_ref (int): Parent reference number\n fn_parent_seq (int): Parent sequence number\n\nReturns:\n tuple(bool, str): A tuple where the first element is a boolean that\n is ``True`` if the the file is orphan and ``False`` if not. The\n second element is a string with the full path without the file name", "source": "codesearchnet_filtered"} -{"code": "def points_are_in_a_straight_line(points, tolerance=1e-07):\n a = points[0]\n b = points[1]\n for c in points[2:]:\n if (area_of_a_triangle_in_cartesian_space(a, b, c) > tolerance):\n return False\n return True", "docstring": "Check whether a set of points fall on a straight line.\n Calculates the areas of triangles formed by triplets of the points.\n Returns False is any of these areas are larger than the tolerance.\n\nArgs:\n points (list(np.array)): list of Cartesian coordinates for each point.\n tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7.\n\nReturns:\n (bool): True if all points fall on a straight line (within the allowed tolerance).", "source": "codesearchnet_filtered"} -{"code": "def plot_kdes(self, mnemonic, alias=None, uwi_regex=None, return_fig=False):\n wells = self.find_wells_with_curve(mnemonic, alias=alias)\n (fig, axs) = plt.subplots(len(self), 1, figsize=(10, (1.5 * len(self))))\n curves = [w.get_curve(mnemonic, alias=alias) for w in wells]\n all_data = np.hstack(curves)\n all_data = all_data[(~ np.isnan(all_data))]\n amax = np.percentile(all_data, 99)\n amin = np.percentile(all_data, 1)\n for (i, w) in enumerate(self):\n c = w.get_curve(mnemonic, alias=alias)\n if (uwi_regex is not None):\n label = re.sub(uwi_regex, '\\\\1', w.uwi)\n else:\n label = w.uwi\n if (c is not None):\n axs[i] = c.plot_kde(ax=axs[i], amax=amax, amin=amin, label=((label + '-') + str(c.mnemonic)))\n else:\n continue\n if return_fig:\n return fig\n else:\n return", "docstring": "Plot KDEs for all curves with the given name.\n\nArgs:\n menmonic (str): the name of the curve to look for.\n alias (dict): a welly alias dictionary.\n uwi_regex (str): a regex pattern. Only this part of the UWI will be displayed\n on the plot of KDEs.\n return_fig (bool): whether to return the matplotlib figure object.\n\nReturns:\n None or figure.", "source": "codesearchnet_filtered"} -{"code": "def set_logging_settings(profile, setting, value, store='local'):\n if (profile.lower() not in ('domain', 'public', 'private')):\n raise ValueError('Incorrect profile: {0}'.format(profile))\n if (setting.lower() not in ('allowedconnections', 'droppedconnections', 'filename', 'maxfilesize')):\n raise ValueError('Incorrect setting: {0}'.format(setting))\n if (setting.lower() in ('allowedconnections', 'droppedconnections')):\n if (value.lower() not in ('enable', 'disable', 'notconfigured')):\n raise ValueError('Incorrect value: {0}'.format(value))\n if (setting.lower() == 'maxfilesize'):\n if (value.lower() != 'notconfigured'):\n try:\n int(value)\n except ValueError:\n raise ValueError('Incorrect value: {0}'.format(value))\n if (not (1 <= int(value) <= 32767)):\n raise ValueError('Incorrect value: {0}'.format(value))\n command = 'set {0}profile logging {1} {2}'.format(profile, setting, value)\n results = _netsh_command(command=command, store=store)\n if results:\n raise CommandExecutionError('An error occurred: {0}'.format(results))\n return True", "docstring": "Configure logging settings for the Windows firewall.\n\nArgs:\n profile (str):\n The firewall profile to configure. Valid options are:\n\n - domain\n - public\n - private\n\n setting (str):\n The logging setting to configure. Valid options are:\n\n - allowedconnections\n - droppedconnections\n - filename\n - maxfilesize\n\n value (str):\n The value to apply to the setting. Valid values are dependent upon\n the setting being configured. Valid options are:\n\n allowedconnections:\n\n - enable\n - disable\n - notconfigured\n\n droppedconnections:\n\n - enable\n - disable\n - notconfigured\n\n filename:\n\n - Full path and name of the firewall log file\n - notconfigured\n\n maxfilesize:\n\n - 1 - 32767 (Kb)\n - notconfigured\n\n store (str):\n The store to use. This is either the local firewall policy or the\n policy defined by local group policy. Valid options are:\n\n - lgpo\n - local\n\n Default is ``local``\n\nReturns:\n bool: ``True`` if successful\n\nRaises:\n CommandExecutionError: If an error occurs\n ValueError: If the parameters are incorrect", "source": "codesearchnet_filtered"} -{"code": "def nb_ll(data, P, R):\n (genes, cells) = data.shape\n clusters = P.shape[1]\n lls = np.zeros((cells, clusters))\n for c in range(clusters):\n P_c = P[(:, c)].reshape((genes, 1))\n R_c = R[(:, c)].reshape((genes, 1))\n ll = (gammaln((R_c + data)) - gammaln(R_c))\n ll += ((data * np.log(P_c)) + xlog1py(R_c, (- P_c)))\n lls[(:, c)] = ll.sum(0)\n return lls", "docstring": "Returns the negative binomial log-likelihood of the data.\n\nArgs:\n data (array): genes x cells\n P (array): NB success probability param - genes x clusters\n R (array): NB stopping param - genes x clusters\n\nReturns:\n cells x clusters array of log-likelihoods", "source": "codesearchnet_filtered"} -{"code": "def pull(self, repository, tag=None, **kwargs):\n if (not tag):\n (repository, tag) = parse_repository_tag(repository)\n if ('stream' in kwargs):\n warnings.warn('`stream` is not a valid parameter for this method and will be overridden')\n del kwargs['stream']\n pull_log = self.client.api.pull(repository, tag=tag, stream=True, **kwargs)\n for _ in pull_log:\n pass\n if tag:\n return self.get('{0}{2}{1}'.format(repository, tag, ('@' if tag.startswith('sha256:') else ':')))\n return self.list(repository)", "docstring": "Pull an image of the given name and return it. Similar to the\n ``docker pull`` command.\n If no tag is specified, all tags from that repository will be\n pulled.\n\n If you want to get the raw pull output, use the\n :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the\n low-level API.\n\nArgs:\n repository (str): The repository to pull\n tag (str): The tag to pull\n auth_config (dict): Override the credentials that are found in the\n config for this request. ``auth_config`` should contain the\n ``username`` and ``password`` keys to be valid.\n platform (str): Platform in the format ``os[/arch[/variant]]``\n\nReturns:\n (:py:class:`Image` or list): The image that has been pulled.\n If no ``tag`` was specified, the method will return a list\n of :py:class:`Image` objects belonging to this repository.\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\nExample:\n >>> # Pull the image tagged `latest` in the busybox repo\n >>> image = client.images.pull('busybox:latest')\n\n >>> # Pull all tags in the busybox repo\n >>> images = client.images.pull('busybox')", "source": "codesearchnet_filtered"} -{"code": "def make_supercells_with_defects(self, scaling_matrix):\n scs = []\n sc = self._structure.copy()\n sc.make_supercell(scaling_matrix)\n scs.append(sc)\n for (ids, defect_site) in enumerate(self._defect_sites):\n sc_with_inter = sc.copy()\n sc_with_inter.append(defect_site.species_string, defect_site.frac_coords, coords_are_cartesian=False, validate_proximity=False, properties=None)\n if (not sc_with_inter):\n raise RuntimeError('could not generate supercell with interstitial {}'.format((ids + 1)))\n scs.append(sc_with_inter.copy())\n return scs", "docstring": "Generate a sequence of supercells\n in which each supercell contains a single interstitial,\n except for the first supercell in the sequence\n which is a copy of the defect-free input structure.\n\nArgs:\n scaling_matrix (3x3 integer array): scaling matrix\n to transform the lattice vectors.\n\nReturns:\n scs ([Structure]): sequence of supercells.", "source": "codesearchnet_filtered"} -{"code": "def delete_vmss_vms(access_token, subscription_id, resource_group, vmss_name, vm_ids):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/delete?api-version=', COMP_API])\n body = (('{\"instanceIds\" : ' + vm_ids) + '}')\n return do_post(endpoint, body, access_token)", "docstring": "Delete a VM in a VM Scale Set.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n vmss_name (str): Name of the virtual machine scale set.\n vm_ids (str): String representation of a JSON list of VM IDs. E.g. '[1,2]'.\n\nReturns:\n HTTP response.", "source": "codesearchnet_filtered"} -{"code": "def retryable(a_func, retry_options, **kwargs):\n delay_mult = retry_options.backoff_settings.retry_delay_multiplier\n max_delay_millis = retry_options.backoff_settings.max_retry_delay_millis\n has_timeout_settings = _has_timeout_settings(retry_options.backoff_settings)\n if has_timeout_settings:\n timeout_mult = retry_options.backoff_settings.rpc_timeout_multiplier\n max_timeout = (retry_options.backoff_settings.max_rpc_timeout_millis / _MILLIS_PER_SECOND)\n total_timeout = (retry_options.backoff_settings.total_timeout_millis / _MILLIS_PER_SECOND)\n\n def inner(*args):\n 'Equivalent to ``a_func``, but retries upon transient failure.\\n\\n Retrying is done through an exponential backoff algorithm configured\\n by the options in ``retry``.\\n '\n delay = retry_options.backoff_settings.initial_retry_delay_millis\n exc = errors.RetryError('Retry total timeout exceeded before anyresponse was received')\n if has_timeout_settings:\n timeout = (retry_options.backoff_settings.initial_rpc_timeout_millis / _MILLIS_PER_SECOND)\n now = time.time()\n deadline = (now + total_timeout)\n else:\n timeout = None\n deadline = None\n while ((deadline is None) or (now < deadline)):\n try:\n to_call = add_timeout_arg(a_func, timeout, **kwargs)\n return to_call(*args)\n except Exception as exception:\n code = config.exc_to_code(exception)\n if (code not in retry_options.retry_codes):\n raise errors.RetryError('Exception occurred in retry method that was not classified as transient', exception)\n exc = errors.RetryError('Retry total timeout exceeded with exception', exception)\n to_sleep = random.uniform(0, (delay * 2))\n time.sleep((to_sleep / _MILLIS_PER_SECOND))\n delay = min((delay * delay_mult), max_delay_millis)\n if has_timeout_settings:\n now = time.time()\n timeout = min((timeout * timeout_mult), max_timeout, (deadline - now))\n raise exc\n return inner", "docstring": "Creates a function equivalent to a_func, but that retries on certain\n exceptions.\n\nArgs:\n a_func (callable): A callable.\n retry_options (RetryOptions): Configures the exceptions upon which the\n callable should retry, and the parameters to the exponential backoff\n retry algorithm.\n kwargs: Addtional arguments passed through to the callable.\n\nReturns:\n Callable: A function that will retry on exception.", "source": "codesearchnet_filtered"} -{"code": "def check_required_fields(self, ignore_fields=list(), allow_no_resources=False):\n if self.is_requestable():\n self._check_required_fields('dataset-requestable', ignore_fields)\n else:\n self._check_required_fields('dataset', ignore_fields)\n if ((len(self.resources) == 0) and (not allow_no_resources)):\n raise HDXError('There are no resources! Please add at least one resource!')\n for resource in self.resources:\n ignore_fields = ['package_id']\n resource.check_required_fields(ignore_fields=ignore_fields)", "docstring": "Check that metadata for dataset and its resources is complete. The parameter ignore_fields\n should be set if required to any fields that should be ignored for the particular operation.\n\nArgs:\n ignore_fields (List[str]): Fields to ignore. Default is [].\n allow_no_resources (bool): Whether to allow no resources. Defaults to False.\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def merkleroot(hashes):\n if (not hashes):\n return sha3_256(b'').hexdigest()\n if (len(hashes) == 1):\n return hexlify(hashes[0]).decode()\n if ((len(hashes) % 2) == 1):\n hashes.append(hashes[(- 1)])\n parent_hashes = [sha3_256((hashes[i] + hashes[(i + 1)])).digest() for i in range(0, (len(hashes) - 1), 2)]\n return merkleroot(parent_hashes)", "docstring": "Computes the merkle root for a given list.\n\nArgs:\n hashes (:obj:`list` of :obj:`bytes`): The leaves of the tree.\n\nReturns:\n str: Merkle root in hexadecimal form.", "source": "codesearchnet_filtered"} -{"code": "def pull(handle, enumerate=False):\n assert isinstance(handle, Handle), handle\n return Pull(handle, enumerate)", "docstring": "Pulls next message for handle.\n\nArgs:\n handle: A :class:`.stream.Handle` or GroupHandle.\n enumerate (bool): boolean to indicate whether a tuple ``(idx, msg)``\n should be returned, not unlike Python's enumerate().\n\nReturns:\n A :class:`Pull` task to be yielded. Marv will send the\n corresponding message as soon as it is available. For groups\n this message will be a handle to a member of the\n group. Members of groups are either streams or groups.\n\nExample:\n Pulling (enumerated) message from stream::\n\n msg = yield marv.pull(stream)\n idx, msg = yield marv.pull(stream, enumerate=True)\n\n Pulling stream from group and message from stream::\n\n stream = yield marv.pull(group) # a group of streams\n msg = yield marv.pull(stream)", "source": "codesearchnet_filtered"} -{"code": "def get_entities(seq, suffix=False):\n if any((isinstance(s, list) for s in seq)):\n seq = [item for sublist in seq for item in (sublist + ['O'])]\n prev_tag = 'O'\n prev_type = ''\n begin_offset = 0\n chunks = []\n for (i, chunk) in enumerate((seq + ['O'])):\n if suffix:\n tag = chunk[(- 1)]\n type_ = chunk.split('-')[0]\n else:\n tag = chunk[0]\n type_ = chunk.split('-')[(- 1)]\n if end_of_chunk(prev_tag, tag, prev_type, type_):\n chunks.append((prev_type, begin_offset, (i - 1)))\n if start_of_chunk(prev_tag, tag, prev_type, type_):\n begin_offset = i\n prev_tag = tag\n prev_type = type_\n return chunks", "docstring": "Gets entities from sequence.\n\nArgs:\n seq (list): sequence of labels.\n\nReturns:\n list: list of (chunk_type, chunk_start, chunk_end).\n\nExample:\n >>> from seqeval.metrics.sequence_labeling import get_entities\n >>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']\n >>> get_entities(seq)\n [('PER', 0, 1), ('LOC', 3, 3)]", "source": "codesearchnet_filtered"} -{"code": "def get_nn(self, structure, n):\n return [e['site'] for e in self.get_nn_info(structure, n)]", "docstring": "Get near neighbors of site with index n in structure.\n\nArgs:\n structure (Structure): input structure.\n n (integer): index of site in structure for which to determine\n neighbors.\n\nReturns:\n sites (list of Site objects): near neighbors.", "source": "codesearchnet_filtered"} -{"code": "def _parse_target(target):\n if (len(target) != 8):\n raise ArgumentError('Invalid targeting data length', expected=8, length=len(target))\n (slot, match_op) = struct.unpack(' TrajectoryOutput:\n if (initial_state is None):\n initial_state = self._cell.initial_state()\n with self.graph.as_default():\n self.inputs = self.timesteps(horizon)\n (outputs, _) = tf.nn.dynamic_rnn(self._cell, self.inputs, initial_state=initial_state, dtype=tf.float32, scope='trajectory')\n (states, actions, interms, rewards) = outputs\n state_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.state_range_type)\n states = self._output(states, state_dtype)\n interm_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.interm_range_type)\n interms = self._output(interms, interm_dtype)\n action_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.action_range_type)\n actions = self._output(actions, action_dtype)\n outputs = (initial_state, states, actions, interms, rewards)\n return outputs", "docstring": "Returns the ops for the trajectory generation with given `horizon`\n and `initial_state`.\n\n The simulation returns states, actions and interms as a\n sequence of tensors (i.e., all representations are factored).\n The reward is a batch sized tensor.\n The trajectoty output is a tuple: (initial_state, states, actions, interms, rewards).\n If initial state is None, use default compiler's initial state.\n\nNote:\n All tensors have shape: (batch_size, horizon, fluent_shape).\n Except initial state that has shape: (batch_size, fluent_shape).\n\nArgs:\n horizon (int): The number of simulation timesteps.\n initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors.\n\nReturns:\n Tuple[StateTensor, StatesTensor, ActionsTensor, IntermsTensor, tf.Tensor]: Trajectory output tuple.", "source": "codesearchnet_filtered"} -{"code": "def production_variant(model_name, instance_type, initial_instance_count=1, variant_name='AllTraffic', initial_weight=1, accelerator_type=None):\n production_variant_configuration = {'ModelName': model_name, 'InstanceType': instance_type, 'InitialInstanceCount': initial_instance_count, 'VariantName': variant_name, 'InitialVariantWeight': initial_weight}\n if accelerator_type:\n production_variant_configuration['AcceleratorType'] = accelerator_type\n return production_variant_configuration", "docstring": "Create a production variant description suitable for use in a ``ProductionVariant`` list as part of a\n ``CreateEndpointConfig`` request.\n\nArgs:\n model_name (str): The name of the SageMaker model this production variant references.\n instance_type (str): The EC2 instance type for this production variant. For example, 'ml.c4.8xlarge'.\n initial_instance_count (int): The initial instance count for this production variant (default: 1).\n variant_name (string): The ``VariantName`` of this production variant (default: 'AllTraffic').\n initial_weight (int): The relative ``InitialVariantWeight`` of this production variant (default: 1).\n accelerator_type (str): Type of Elastic Inference accelerator for this production variant. For example,\n 'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html\n\nReturns:\n dict[str, str]: An SageMaker ``ProductionVariant`` description", "source": "codesearchnet_filtered"} -{"code": "def do(self, resource, method, params=None, data=None, json=None, headers=None):\n uri = '{0}/{1}'.format(self._api_base, resource)\n if (not params):\n params = {}\n params.update({'token': self._token})\n req = Request(method=method, url=uri, params=params, headers=headers, data=data, json=json)\n s = Session()\n prepped = s.prepare_request(req)\n resp = s.send(prepped)\n return RTMResponse(resp)", "docstring": "Does the request job\n\nArgs:\n resource(str): resource uri(relative path)\n method(str): HTTP method\n params(dict): uri queries\n data(dict): HTTP body(form)\n json(dict): HTTP body(json)\n headers(dict): HTTP headers\n\nReturns:\n RTMResponse", "source": "codesearchnet_filtered"} -{"code": "def software_breakpoint(self):\n software_types = [enums.JLinkBreakpoint.SW_RAM, enums.JLinkBreakpoint.SW_FLASH, enums.JLinkBreakpoint.SW]\n return any(((self.Type & stype) for stype in software_types))", "docstring": "Returns whether this is a software breakpoint.\n\nArgs:\n self (JLinkBreakpointInfo): the ``JLinkBreakpointInfo`` instance\n\nReturns:\n ``True`` if the breakpoint is a software breakpoint, otherwise\n ``False``.", "source": "codesearchnet_filtered"} -{"code": "def variants(self, case_id, skip=0, count=1000, filters=None):\n filters = (filters or {})\n case_obj = self.case(case_id=case_id)\n limit = (count + skip)\n genes = set()\n if filters.get('gene_ids'):\n genes = set([gene_id.strip() for gene_id in filters['gene_ids']])\n frequency = None\n if filters.get('frequency'):\n frequency = float(filters['frequency'])\n cadd = None\n if filters.get('cadd'):\n cadd = float(filters['cadd'])\n genetic_models = None\n if filters.get('genetic_models'):\n genetic_models = set(filters['genetic_models'])\n sv_len = None\n if filters.get('sv_len'):\n sv_len = float(filters['sv_len'])\n impact_severities = None\n if filters.get('impact_severities'):\n impact_severities = set(filters['impact_severities'])\n vcf_file_path = case_obj.variant_source\n self.head = get_header(vcf_file_path)\n self.vep_header = self.head.vep_columns\n self.snpeff_header = self.head.snpeff_columns\n variants = self._get_filtered_variants(vcf_file_path, filters)\n result = []\n skip_index = 0\n for (index, variant) in enumerate(variants):\n index += 1\n if (skip_index >= skip):\n variant_obj = self._format_variants(variant=variant, index=index, case_obj=case_obj)\n if (genes and variant_obj):\n if (not set(variant_obj['gene_symbols']).intersection(genes)):\n variant_obj = None\n if (impact_severities and variant_obj):\n if (not (variant_obj['impact_severity'] in impact_severities)):\n variant_obj = None\n if (frequency and variant_obj):\n if (variant_obj.max_freq > frequency):\n variant_obj = None\n if (cadd and variant_obj):\n if (variant_obj['cadd_score'] < cadd):\n variant_obj = None\n if (genetic_models and variant_obj):\n models = set(variant_obj.genetic_models)\n if (not models.intersection(genetic_models)):\n variant_obj = None\n if (sv_len and variant_obj):\n if (variant_obj.sv_len < sv_len):\n variant_obj = None\n if variant_obj:\n skip_index += 1\n if (skip_index <= limit):\n result.append(variant_obj)\n else:\n break\n else:\n skip_index += 1\n return Results(result, len(result))", "docstring": "Return all variants in the VCF.\n\n This function will apply the given filter and return the 'count' first\n variants. If skip the first 'skip' variants will not be regarded.\n\nArgs:\n case_id (str): Path to a vcf file (for this adapter)\n skip (int): Skip first variants\n count (int): The number of variants to return\n filters (dict): A dictionary with filters. Currently this will\n look like: {\n gene_list: [] (list of hgnc ids),\n frequency: None (float),\n cadd: None (float),\n sv_len: None (float),\n consequence: [] (list of consequences),\n is_lof: None (Bool),\n genetic_models [] (list of genetic models)\n sv_type: List (list of sv types),\n }\n\nReturns:\n puzzle.constants.Results : Named tuple with variants and\n nr_of_variants", "source": "codesearchnet_filtered"} -{"code": "def create_container_service(access_token, subscription_id, resource_group, service_name, agent_count, agent_vm_size, agent_dns, master_dns, admin_user, location, public_key=None, master_count=3, orchestrator='DCOS', app_id=None, app_secret=None, admin_password=None, ostype='Linux'):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices/', service_name, '?api-version=', ACS_API])\n acs_body = {'location': location}\n properties = {'orchestratorProfile': {'orchestratorType': orchestrator}}\n properties['masterProfile'] = {'count': master_count, 'dnsPrefix': master_dns}\n ap_profile = {'name': 'AgentPool1'}\n ap_profile['count'] = agent_count\n ap_profile['vmSize'] = agent_vm_size\n ap_profile['dnsPrefix'] = agent_dns\n properties['agentPoolProfiles'] = [ap_profile]\n if (ostype == 'Linux'):\n linux_profile = {'adminUsername': admin_user}\n linux_profile['ssh'] = {'publicKeys': [{'keyData': public_key}]}\n properties['linuxProfile'] = linux_profile\n else:\n windows_profile = {'adminUsername': admin_user, 'adminPassword': admin_password}\n properties['windowsProfile'] = windows_profile\n if (orchestrator == 'Kubernetes'):\n sp_profile = {'ClientID': app_id}\n sp_profile['Secret'] = app_secret\n properties['servicePrincipalProfile'] = sp_profile\n acs_body['properties'] = properties\n body = json.dumps(acs_body)\n return do_put(endpoint, body, access_token)", "docstring": "Create a new container service - include app_id and app_secret if using Kubernetes.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n service_name (str): Name of container service.\n agent_count (int): The number of agent VMs.\n agent_vm_size (str): VM size of agents, e.g. Standard_D1_v2.\n agent_dns (str): A unique DNS string for the agent DNS.\n master_dns (str): A unique string for the master DNS.\n admin_user (str): Admin user name.\n location (str): Azure data center location, e.g. westus.\n public_key (str): RSA public key (utf-8).\n master_count (int): Number of master VMs.\n orchestrator (str): Container orchestrator. E.g. DCOS, Kubernetes.\n app_id (str): Application ID for Kubernetes.\n app_secret (str): Application secret for Kubernetes.\n admin_password (str): Admin user password.\n ostype (str): Operating system. Windows of Linux.\n\nReturns:\n HTTP response. Container service JSON model.", "source": "codesearchnet_filtered"} -{"code": "def quality(self, tests, alias=None):\n this_tests = ((((tests.get('each', []) + tests.get('Each', [])) + tests.get('EACH', [])) + tests.get(self.mnemonic, [])) + utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)]))\n this_tests = filter(None, this_tests)\n if (not tests.get(self.mnemonic, 1)):\n this_tests = []\n return {test.__name__: test(self) for test in this_tests}", "docstring": "Run a series of tests and return the corresponding results.\n\nArgs:\n tests (list): a list of functions.\n alias (dict): a dictionary mapping mnemonics to lists of mnemonics.\n\nReturns:\n list. The results. Stick to booleans (True = pass) or ints.", "source": "codesearchnet_filtered"} -{"code": "def append_paulis(self, paulis=None, pauli_labels=None):\n return self.insert_paulis(None, paulis=paulis, pauli_labels=pauli_labels)", "docstring": "Append pauli at the end.\n\nArgs:\n paulis (Pauli): the to-be-inserted or appended pauli\n pauli_labels (list[str]): the to-be-inserted or appended pauli label\n\nReturns:\n Pauli: self", "source": "codesearchnet_filtered"} -{"code": "def text_colour_for_hex(hexx, percent=50, dark='#000000', light='#ffffff'):\n return (light if hex_is_dark(hexx, percent=percent) else dark)", "docstring": "Function to decide what colour to use for a given hex colour.\n\nArgs:\n hexx (str): A hexadecimal colour, starting with '#'.\n\nReturns:\n bool: The colour's brightness is less than the given percent.", "source": "codesearchnet_filtered"} -{"code": "def _expand_terms(self, terms):\n ret = {'keywords': list(), 'doc': list()}\n if (not isinstance(terms, dict)):\n stp = SearchTermParser()\n terms = stp.parse(terms, term_join=self.backend._and_join)\n if ('about' in terms):\n ret['doc'].append(terms['about'])\n if ('source' in terms):\n ret['keywords'].append(terms['source'])\n return ret", "docstring": "Expands terms of the dataset to the appropriate fields. It will parse the search phrase\n and return only the search term components that are applicable to a Dataset query.\n\nArgs:\n terms (dict or str):\n\nReturns:\n dict: keys are field names, values are query strings", "source": "codesearchnet_filtered"} -{"code": "def filter(self, versions, key=(lambda x: x)):\n return [x for x in versions if self.check(key(x))]", "docstring": "Filter all of the versions in an iterable that match this version range\n\nArgs:\n versions (iterable): An iterable of SemanticVersion objects\n\nReturns:\n list: A list of the SemanticVersion objects that matched this range", "source": "codesearchnet_filtered"} -{"code": "def copy(self, resource_view):\n if isinstance(resource_view, str):\n if (is_valid_uuid(resource_view) is False):\n raise HDXError(('%s is not a valid resource view id!' % resource_view))\n resource_view = ResourceView.read_from_hdx(resource_view)\n if ((not isinstance(resource_view, dict)) and (not isinstance(resource_view, ResourceView))):\n raise HDXError(('%s is not a valid resource view!' % resource_view))\n for key in resource_view:\n if (key not in ('id', 'resource_id', 'package_id')):\n self.data[key] = resource_view[key]", "docstring": "Copies all fields except id, resource_id and package_id from another resource view.\n\nArgs:\n resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def get_application_configurations(self, name=None):\n if hasattr(self, 'applicationConfigurations'):\n return self._get_elements(self.applicationConfigurations, 'applicationConfigurations', ApplicationConfiguration, None, name)", "docstring": "Retrieves application configurations for this instance.\n\nArgs:\n name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a\n regular expression. If `name` is not supplied, then all application configurations are returned.\n\nReturns:\n list(ApplicationConfiguration): A list of application configurations matching the given `name`.\n\n .. versionadded 1.12", "source": "codesearchnet_filtered"} -{"code": "def from_service_account_file(cls, filename, **kwargs):\n (info, signer) = _service_account_info.from_filename(filename, require=['client_email', 'token_uri'])\n return cls._from_signer_and_info(signer, info, **kwargs)", "docstring": "Creates a Credentials instance from a service account json file.\n\nArgs:\n filename (str): The path to the service account json file.\n kwargs: Additional arguments to pass to the constructor.\n\nReturns:\n google.auth.service_account.Credentials: The constructed\n credentials.", "source": "codesearchnet_filtered"} -{"code": "def key_exists(self, namespace, key):\n return ((namespace in self.__data) and (key in self.__data[namespace]))", "docstring": "Checks a namespace for the existence of a specific key\n\nArgs:\n namespace (str): Namespace to check in\n key (str): Name of the key to check for\n\nReturns:\n `True` if key exists in the namespace, else `False`", "source": "codesearchnet_filtered"} -{"code": "def create_asymmetric_key_pair(self, algorithm, length):\n if (algorithm not in self._asymmetric_key_algorithms.keys()):\n raise exceptions.InvalidField('The cryptographic algorithm ({0}) is not a supported asymmetric key algorithm.'.format(algorithm))\n engine_method = self._asymmetric_key_algorithms.get(algorithm)\n return engine_method(length)", "docstring": "Create an asymmetric key pair.\n\nArgs:\n algorithm(CryptographicAlgorithm): An enumeration specifying the\n algorithm for which the created keys will be compliant.\n length(int): The length of the keys to be created. This value must\n be compliant with the constraints of the provided algorithm.\n\nReturns:\n dict: A dictionary containing the public key data, with at least\n the following key/value fields:\n * value - the bytes of the key\n * format - a KeyFormatType enumeration for the bytes format\n dict: A dictionary containing the private key data, identical in\n structure to the one above.\n\nRaises:\n InvalidField: Raised when the algorithm is unsupported or the\n length is incompatible with the algorithm.\n CryptographicFailure: Raised when the key generation process\n fails.\n\nExample:\n >>> engine = CryptographyEngine()\n >>> key = engine.create_asymmetric_key(\n ... CryptographicAlgorithm.RSA, 2048)", "source": "codesearchnet_filtered"} -{"code": "def get_statistics(self, id_or_uri, port_name=''):\n uri = (self._client.build_uri(id_or_uri) + '/statistics')\n if port_name:\n uri = ((uri + '/') + port_name)\n return self._client.get(uri)", "docstring": "Gets the statistics from an interconnect.\n\nArgs:\n id_or_uri: Can be either the interconnect id or the interconnect uri.\n port_name (str): A specific port name of an interconnect.\n\nReturns:\n dict: The statistics for the interconnect that matches id.", "source": "codesearchnet_filtered"} -{"code": "def get_space_group_info(self, symprec=0.01, angle_tolerance=5.0):\n from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n a = SpacegroupAnalyzer(self, symprec=symprec, angle_tolerance=angle_tolerance)\n return (a.get_space_group_symbol(), a.get_space_group_number())", "docstring": "Convenience method to quickly get the spacegroup of a structure.\n\nArgs:\n symprec (float): Same definition as in SpacegroupAnalyzer.\n Defaults to 1e-2.\n angle_tolerance (float): Same definition as in SpacegroupAnalyzer.\n Defaults to 5 degrees.\n\nReturns:\n spacegroup_symbol, international_number", "source": "codesearchnet_filtered"} -{"code": "def _ReadPaddingDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n if (not is_member):\n error_message = 'data type only supported as member'\n raise errors.DefinitionReaderError(definition_name, error_message)\n definition_object = self._ReadDataTypeDefinition(definitions_registry, definition_values, data_types.PaddingDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_PADDING)\n alignment_size = definition_values.get('alignment_size', None)\n if (not alignment_size):\n error_message = 'missing alignment_size'\n raise errors.DefinitionReaderError(definition_name, error_message)\n try:\n int(alignment_size)\n except ValueError:\n error_message = 'unuspported alignment size attribute: {0!s}'.format(alignment_size)\n raise errors.DefinitionReaderError(definition_name, error_message)\n if (alignment_size not in (2, 4, 8, 16)):\n error_message = 'unuspported alignment size value: {0!s}'.format(alignment_size)\n raise errors.DefinitionReaderError(definition_name, error_message)\n definition_object.alignment_size = alignment_size\n return definition_object", "docstring": "Reads a padding data type definition.\n\nArgs:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n is_member (Optional[bool]): True if the data type definition is a member\n data type definition.\n\nReturns:\n PaddingtDefinition: padding definition.\n\nRaises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.", "source": "codesearchnet_filtered"} -{"code": "def bsp_new_with_size(x: int, y: int, w: int, h: int) -> tcod.bsp.BSP:\n return Bsp(x, y, w, h)", "docstring": "Create a new BSP instance with the given rectangle.\n\nArgs:\n x (int): Rectangle left coordinate.\n y (int): Rectangle top coordinate.\n w (int): Rectangle width.\n h (int): Rectangle height.\n\nReturns:\n BSP: A new BSP instance.\n\n .. deprecated:: 2.0\n Call the :any:`BSP` class instead.", "source": "codesearchnet_filtered"} -{"code": "def get_coordination_sphere(self, index_of_atom, n_sphere=1, give_only_index=False, only_surface=True, exclude=None, use_lookup=None):\n if (use_lookup is None):\n use_lookup = settings['defaults']['use_lookup']\n exclude = (set() if (exclude is None) else exclude)\n bond_dict = self.get_bonds(use_lookup=use_lookup)\n i = index_of_atom\n if (n_sphere != 0):\n visited = (set([i]) | exclude)\n try:\n tmp_bond_dict = {j: (bond_dict[j] - visited) for j in bond_dict[i]}\n except KeyError:\n tmp_bond_dict = {}\n n = 0\n while (tmp_bond_dict and ((n + 1) < n_sphere)):\n new_tmp_bond_dict = {}\n for i in tmp_bond_dict:\n if (i in visited):\n continue\n visited.add(i)\n for j in tmp_bond_dict[i]:\n new_tmp_bond_dict[j] = (bond_dict[j] - visited)\n tmp_bond_dict = new_tmp_bond_dict\n n += 1\n if only_surface:\n index_out = set(tmp_bond_dict.keys())\n else:\n index_out = (visited | set(tmp_bond_dict.keys()))\n else:\n index_out = {i}\n if give_only_index:\n return (index_out - exclude)\n else:\n return self.loc[(index_out - exclude)]", "docstring": "Return a Cartesian of atoms in the n-th coordination sphere.\n\n Connected means that a path along covalent bonds exists.\n\nArgs:\n index_of_atom (int):\n give_only_index (bool): If ``True`` a set of indices is\n returned. Otherwise a new Cartesian instance.\n n_sphere (int): Determines the number of the coordination sphere.\n only_surface (bool): Return only the surface of the coordination\n sphere.\n exclude (set): A set of indices that should be ignored\n for the path finding.\n use_lookup (bool): Use a lookup variable for\n :meth:`~chemcoord.Cartesian.get_bonds`. The default is\n specified in ``settings['defaults']['use_lookup']``\n\nReturns:\n A set of indices or a new Cartesian instance.", "source": "codesearchnet_filtered"} -{"code": "def _project_THn(self, hist: Hist) -> Any:\n projection_axes = [axis.axis_type.value for axis in self.projection_axes]\n if (len(projection_axes) == 2):\n projection_axes.reverse()\n args = (projection_axes + ['E'])\n logger.debug(f'hist: {hist.GetName()} args: {args}')\n if (len(projection_axes) > 3):\n projected_hist = hist.ProjectionND(*args)\n else:\n projected_hist = hist.Projection(*args)\n return projected_hist", "docstring": "Perform the actual THn -> THn or TH1 projection.\n\n This projection could be to 1D, 2D, 3D, or ND.\n\nArgs:\n hist (ROOT.THnBase): Histogram from which the projections should be performed.\n\nReturns:\n ROOT.THnBase or ROOT.TH1: The projected histogram.", "source": "codesearchnet_filtered"} -{"code": "def set_description(self, name, action, seqno, value=None, default=False, disable=False):\n commands = [('route-map %s %s %s' % (name, action, seqno))]\n if (value is not None):\n commands.append(self.command_builder('description', disable=True))\n commands.append(self.command_builder('description', value=value, default=default, disable=disable))\n return self.configure(commands)", "docstring": "Configures the routemap description\n\nArgs:\n name (string): The full name of the routemap.\n action (string): The action to take for this routemap clause.\n seqno (integer): The sequence number for the routemap clause.\n value (string): The value to configure for the routemap description\n default (bool): Specifies to default the routemap description value\n disable (bool): Specifies to negate the routemap description\n\nReturns:\n True if the operation succeeds otherwise False is returned", "source": "codesearchnet_filtered"} -{"code": "def xor_gate(variables, vartype=dimod.BINARY, name='XOR'):\n variables = tuple(variables)\n if (vartype is dimod.BINARY):\n configs = frozenset([(0, 0, 0), (0, 1, 1), (1, 0, 1), (1, 1, 0)])\n\n def func(in1, in2, out):\n return ((in1 != in2) == out)\n else:\n configs = frozenset([((- 1), (- 1), (- 1)), ((- 1), (+ 1), (+ 1)), ((+ 1), (- 1), (+ 1)), ((+ 1), (+ 1), (- 1))])\n\n def func(in1, in2, out):\n return (((in1 > 0) != (in2 > 0)) == (out > 0))\n return Constraint(func, configs, variables, vartype=vartype, name=name)", "docstring": "XOR gate.\n\nArgs:\n variables (list): Variable labels for the and gate as `[in1, in2, out]`,\n where `in1, in2` are inputs and `out` the gate's output.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n name (str, optional, default='XOR'): Name for the constraint.\n\nReturns:\n Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n assigned values that match the valid states of an XOR gate.\n\nExample:\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(gates.xor_gate(['x', 'y', 'z'], name='XOR1'))\n >>> csp.check({'x': 1, 'y': 1, 'z': 1})\n False", "source": "codesearchnet_filtered"} -{"code": "def step(self, action):\n if self.done:\n raise ValueError('cannot step in a done environment! call `reset`')\n self.controllers[0][:] = action\n _LIB.Step(self._env)\n reward = self._get_reward()\n self.done = self._get_done()\n info = self._get_info()\n self._did_step(self.done)\n if (reward < self.reward_range[0]):\n reward = self.reward_range[0]\n elif (reward > self.reward_range[1]):\n reward = self.reward_range[1]\n return (self.screen, reward, self.done, info)", "docstring": "Run one frame of the NES and return the relevant observation data.\n\nArgs:\n action (byte): the bitmap determining which buttons to press\n\nReturns:\n a tuple of:\n - state (np.ndarray): next frame as a result of the given action\n - reward (float) : amount of reward returned after given action\n - done (boolean): whether the episode has ended\n - info (dict): contains auxiliary diagnostic information", "source": "codesearchnet_filtered"} -{"code": "def hgnc_genes(self, hgnc_symbol, build='37', search=False):\n LOG.debug(('Fetching genes with symbol %s' % hgnc_symbol))\n if search:\n full_query = self.hgnc_collection.find({'$or': [{'aliases': hgnc_symbol}, {'hgnc_id': (int(hgnc_symbol) if hgnc_symbol.isdigit() else None)}], 'build': build})\n if (full_query.count() != 0):\n return full_query\n return self.hgnc_collection.find({'aliases': {'$regex': hgnc_symbol, '$options': 'i'}, 'build': build})\n return self.hgnc_collection.find({'build': build, 'aliases': hgnc_symbol})", "docstring": "Fetch all hgnc genes that match a hgnc symbol\n\n Check both hgnc_symbol and aliases\n\nArgs:\n hgnc_symbol(str)\n build(str): The build in which to search\n search(bool): if partial searching should be used\n\nReturns:\n result()", "source": "codesearchnet_filtered"} -{"code": "def get_user_info(self, token):\n url = self.get_user_info_url()\n try:\n headers = {'Authorization': 'Bearer {}'.format(token)}\n response = requests.get(url, headers=headers)\n except requests.RequestException:\n logger.exception('Failed to retrieve user info due to a request exception.')\n raise UserInfoRetrievalFailed\n if (response.status_code == 200):\n return self.process_user_info_response(response.json())\n else:\n msg = 'Failed to retrieve user info. Server [{server}] responded with status [{status}].'.format(server=url, status=response.status_code)\n raise UserInfoRetrievalFailed(msg)", "docstring": "Retrieves the user info from the OAuth provider.\n\nArgs:\n token (str): OAuth2 access token.\n\nReturns:\n dict\n\nRaises:\n UserInfoRetrievalFailed: Retrieval of user info from the remote server failed.", "source": "codesearchnet_filtered"} -{"code": "def get_actions(self, issues):\n actions = []\n try:\n for issue in issues:\n action_item = self.determine_action(issue)\n if (action_item['action'] != AuditActions.IGNORE):\n action_item['owners'] = self.get_contacts(issue)\n actions.append(action_item)\n finally:\n db.session.rollback()\n return actions", "docstring": "Returns a list of actions to executed\n\nArgs:\n issues (`list` of :obj:`RequiredTagsIssue`): List of issues\n\nReturns:\n `list` of `dict`", "source": "codesearchnet_filtered"} -{"code": "def get_weights_of_nn_sites(self, structure, n):\n return [e['weight'] for e in self.get_nn_info(structure, n)]", "docstring": "Get weight associated with each near neighbor of site with\n index n in structure.\n\nArgs:\n structure (Structure): input structure.\n n (integer): index of site for which to determine the weights.\n\nReturns:\n weights (list of floats): near-neighbor weights.", "source": "codesearchnet_filtered"} -{"code": "def _import_templates(force=False):\n tmplpath = os.path.join(resource_filename('cloud_inquisitor', 'data'), 'templates')\n disk_templates = {f: os.path.join(root, f) for (root, directory, files) in os.walk(tmplpath) for f in files}\n db_templates = {tmpl.template_name: tmpl for tmpl in db.Template.find()}\n for (name, template_file) in disk_templates.items():\n with open(template_file, 'r') as f:\n body = f.read()\n disk_hash = get_hash(body)\n if (name not in db_templates):\n template = Template()\n template.template_name = name\n template.template = body\n db.session.add(template)\n auditlog(event='template.import', actor='init', data={'template_name': name, 'template': body})\n logger.info('Imported template {}'.format(name))\n else:\n template = db_templates[name]\n db_hash = get_hash(template.template)\n if (db_hash != disk_hash):\n if (force or (not db_templates[name].is_modified)):\n template.template = body\n db.session.add(template)\n auditlog(event='template.update', actor='init', data={'template_name': name, 'template_diff': diff(template.template, body)})\n logger.info('Updated template {}'.format(name))\n else:\n logger.warning('Updated template available for {}. Will not import as it would overwrite user edited content and force is not enabled'.format(name))", "docstring": "Import templates from disk into database\n\n Reads all templates from disk and adds them to the database. By default, any template that has been modified by\n the user will not be updated. This can however be changed by setting `force` to `True`, which causes all templates\n to be imported regardless of status\n\nArgs:\n force (`bool`): Force overwrite any templates with local changes made. Default: `False`\n\nReturns:\n `None`", "source": "codesearchnet_filtered"} -{"code": "def lsfiles(root='.', **kwargs):\n paths = ls(root=root, **kwargs)\n if isfile(root):\n return paths\n return [_path for _path in paths if isfile(path(root, _path))]", "docstring": "Return only files from a directory listing.\n\nArgs:\n root (str): Path to directory. Can be relative or absolute.\n **kwargs: Any additional arguments to be passed to ls().\n\nReturns:\n list of str: A list of file paths.\n\nRaises:\n OSError: If root directory does not exist.", "source": "codesearchnet_filtered"} -{"code": "def create_view(self, state_root_hash=None):\n if (state_root_hash is None):\n state_root_hash = INIT_ROOT_KEY\n merkle_db = MerkleDatabase(self._database, merkle_root=state_root_hash)\n return StateView(merkle_db)", "docstring": "Creates a StateView for the given state root hash.\n\nArgs:\n state_root_hash (str): The state root hash of the state view\n to return. If None, returns the state view for the\n\nReturns:\n StateView: state view locked to the given root hash.", "source": "codesearchnet_filtered"} -{"code": "def label_sequential_regions(inlist):\n import more_itertools as mit\n df = pd.DataFrame(inlist).set_index(0)\n labeled = {}\n for label in df[1].unique():\n iterable = df[(df[1] == label)].index.tolist()\n labeled.update({'{}{}'.format(label, (i + 1)): items for (i, items) in enumerate([list(group) for group in mit.consecutive_groups(iterable)])})\n return labeled", "docstring": "Input a list of labeled tuples and return a dictionary of sequentially labeled regions.\n\nArgs:\n inlist (list): A list of tuples with the first number representing the index and the second the index label.\n\nReturns:\n dict: Dictionary of labeled regions.\n\nExample:\n >>> label_sequential_regions([(1, 'O'), (2, 'O'), (3, 'O'), (4, 'M'), (5, 'M'), (6, 'I'), (7, 'M'), (8, 'O'), (9, 'O')])\n {'O1': [1, 2, 3], 'M1': [4, 5], 'I1': [6], 'M2': [7], 'O2': [8, 9]}", "source": "codesearchnet_filtered"} -{"code": "def update_caseid(self, case_obj, family_id):\n new_case = deepcopy(case_obj)\n new_case['_id'] = family_id\n for case_variants in ['suspects', 'causatives']:\n new_variantids = []\n for variant_id in case_obj.get(case_variants, []):\n case_variant = self.variant(variant_id)\n if (not case_variant):\n continue\n new_variantid = get_variantid(case_variant, family_id)\n new_variantids.append(new_variantid)\n new_case[case_variants] = new_variantids\n for acmg_obj in self.acmg_collection.find({'case_id': case_obj['_id']}):\n LOG.info('update ACMG classification: %s', acmg_obj['classification'])\n acmg_variant = self.variant(acmg_obj['variant_specific'])\n new_specific_id = get_variantid(acmg_variant, family_id)\n self.acmg_collection.find_one_and_update({'_id': acmg_obj['_id']}, {'$set': {'case_id': family_id, 'variant_specific': new_specific_id}})\n institute_obj = self.institute(case_obj['owner'])\n for event_obj in self.events(institute_obj, case=case_obj):\n LOG.info('update event: %s', event_obj['verb'])\n self.event_collection.find_one_and_update({'_id': event_obj['_id']}, {'$set': {'case': family_id}})\n self.case_collection.insert_one(new_case)\n self.case_collection.find_one_and_delete({'_id': case_obj['_id']})\n return new_case", "docstring": "Update case id for a case across the database.\n\n This function is used when a case is a rerun or updated for another reason.\n\nArgs:\n case_obj(dict)\n family_id(str): The new family id\n\nReturns:\n new_case(dict): The updated case object", "source": "codesearchnet_filtered"} -{"code": "def delete_nsg(access_token, subscription_id, resource_group, nsg_name):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkSecurityGroups/', nsg_name, '?api-version=', NETWORK_API])\n return do_delete(endpoint, access_token)", "docstring": "Delete network security group.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n nsg_name (str): Name of the NSG.\n\nReturns:\n HTTP response.", "source": "codesearchnet_filtered"} -{"code": "def create_endpoint(self, endpoint_name, config_name, tags=None, wait=True):\n LOGGER.info('Creating endpoint with name {}'.format(endpoint_name))\n tags = (tags or [])\n self.sagemaker_client.create_endpoint(EndpointName=endpoint_name, EndpointConfigName=config_name, Tags=tags)\n if wait:\n self.wait_for_endpoint(endpoint_name)\n return endpoint_name", "docstring": "Create an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request.\n\n Once the ``Endpoint`` is created, client applications can send requests to obtain inferences.\n The endpoint configuration is created using the ``CreateEndpointConfig`` API.\n\nArgs:\n endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` being created.\n config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy.\n wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True).\n\nReturns:\n str: Name of the Amazon SageMaker ``Endpoint`` created.", "source": "codesearchnet_filtered"} -{"code": "def set_iprouting(self, value=None, default=False, disable=False):\n if (value is False):\n disable = True\n cmd = self.command_builder('ip routing', value=value, default=default, disable=disable)\n return self.configure(cmd)", "docstring": "Configures the state of global ip routing\n\n EosVersion:\n 4.13.7M\n\nArgs:\n value(bool): True if ip routing should be enabled or False if\n ip routing should be disabled\n default (bool): Controls the use of the default keyword\n disable (bool): Controls the use of the no keyword\n\nReturns:\n bool: True if the commands completed successfully otherwise False", "source": "codesearchnet_filtered"} -{"code": "def init(dvc_dir):\n config_file = os.path.join(dvc_dir, Config.CONFIG)\n open(config_file, 'w+').close()\n return Config(dvc_dir)", "docstring": "Initializes dvc config.\n\nArgs:\n dvc_dir (str): path to .dvc directory.\n\nReturns:\n dvc.config.Config: config object.", "source": "codesearchnet_filtered"} -{"code": "def adjust_saturation(img, saturation_factor):\n if (not _is_pil_image(img)):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n enhancer = ImageEnhance.Color(img)\n img = enhancer.enhance(saturation_factor)\n return img", "docstring": "Adjust color saturation of an image.\n\nArgs:\n img (PIL Image): PIL Image to be adjusted.\n saturation_factor (float): How much to adjust the saturation. 0 will\n give a black and white image, 1 will give the original image while\n 2 will enhance the saturation by a factor of 2.\n\nReturns:\n PIL Image: Saturation adjusted image.", "source": "codesearchnet_filtered"} -{"code": "def create(self, data, **kwargs):\n path = self.path[:(- 1)]\n return CreateMixin.create(self, data, path=path, **kwargs)", "docstring": "Creates a new object.\n\nArgs:\n data (dict): Parameters to send to the server to create the\n resource\n **kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabCreateError: If the server cannot perform the request\n\nReturns:\n RESTObject: A new instance of the managed object class build with\n the data sent by the server", "source": "codesearchnet_filtered"} -{"code": "def bounding_box_from(points, i, i1, thr):\n pi = points[i]\n pi1 = points[i1]\n min_lat = min(pi.lat, pi1.lat)\n min_lon = min(pi.lon, pi1.lon)\n max_lat = max(pi.lat, pi1.lat)\n max_lon = max(pi.lon, pi1.lon)\n return ((min_lat - thr), (min_lon - thr), (max_lat + thr), (max_lon + thr))", "docstring": "Creates bounding box for a line segment\n\nArgs:\n points (:obj:`list` of :obj:`Point`)\n i (int): Line segment start, index in points array\n i1 (int): Line segment end, index in points array\n\nReturns:\n (float, float, float, float): with bounding box min x, min y, max x and max y", "source": "codesearchnet_filtered"} -{"code": "def register(self, message, host):\n cuuid = message['cuuid']\n if (len(self.registry) > self.registration_limit):\n logger.warning(('<%s> Registration limit exceeded' % cuuid))\n response = serialize_data({'method': 'BYE REGISTER'}, self.compression, encryption=False)\n return response\n data = {'host': host[0], 'port': host[1], 'time': datetime.now()}\n return_msg = {'method': 'OK REGISTER'}\n if (('encryption' in message) and self.encryption):\n data['encryption'] = PublicKey(message['encryption'][0], message['encryption'][1])\n self.encrypted_hosts[host] = cuuid\n return_msg['encryption'] = [self.encryption.n, self.encryption.e]\n if (cuuid in self.registry):\n for key in data:\n self.registry[cuuid][key] = data[key]\n else:\n self.registry[cuuid] = data\n self.registry[cuuid]['authenticated'] = False\n response = serialize_data(return_msg, self.compression, encryption=False)\n logger.debug(('<%s> Registry entries:' % cuuid))\n for (key, value) in self.registry.items():\n logger.debug(('<%s> %s %s' % (str(cuuid), str(key), pformat(value))))\n return response", "docstring": "This function will register a particular client in the server's\n registry dictionary.\n\n Any clients that are registered will be able to send and recieve events\n to and from the server.\n\nArgs:\n message (dict): The client message from the client who wants to\n register.\n host (tuple): The (address, port) tuple of the client that is\n registering.\n\nReturns:\n A server response with an \"OK REGISTER\" if the registration was\n successful or a \"BYE REGISTER\" if unsuccessful.", "source": "codesearchnet_filtered"} -{"code": "def load(file, check_version=True):\n s = file.read()\n return loads(s, check_version)", "docstring": "Load VOEvent from file object.\n\n A simple wrapper to read a file before passing the contents to\n :py:func:`.loads`. Use with an open file object, e.g.::\n\n with open('/path/to/voevent.xml', 'rb') as f:\n v = vp.load(f)\n\nArgs:\n file (io.IOBase): An open file object (binary mode preferred), see also\n http://lxml.de/FAQ.html :\n \"Can lxml parse from file objects opened in unicode/text mode?\"\n\n check_version (bool): (Default=True) Checks that the VOEvent is of a\n supported schema version - currently only v2.0 is supported.\n\nReturns:\n :py:class:`Voevent`: Root-node of the etree.", "source": "codesearchnet_filtered"} -{"code": "def all(self, predicate=bool):\n if self.closed():\n raise ValueError('Attempt to call all() on a closed Queryable.')\n if (not is_callable(predicate)):\n raise TypeError('all() parameter predicate={0} is not callable'.format(repr(predicate)))\n return all(self.select(predicate))", "docstring": "Determine if all elements in the source sequence satisfy a condition.\n\n All of the source sequence will be consumed.\n\n Note: This method uses immediate execution.\n\nArgs:\n predicate (callable): An optional single argument function used to\n test each elements. If omitted, the bool() function is used\n resulting in the elements being tested directly.\n\nReturns:\n True if all elements in the sequence meet the predicate condition,\n otherwise False.\n\nRaises:\n ValueError: If the Queryable is closed()\n TypeError: If predicate is not callable.", "source": "codesearchnet_filtered"} -{"code": "def get_arrive_stop(self, **kwargs):\n params = {'idStop': kwargs.get('stop_number'), 'cultureInfo': util.language_code(kwargs.get('lang'))}\n result = self.make_request('geo', 'get_arrive_stop', **params)\n if (not util.check_result(result, 'arrives')):\n return (False, 'UNKNOWN ERROR')\n values = util.response_list(result, 'arrives')\n return (True, [emtype.Arrival(**a) for a in values])", "docstring": "Obtain bus arrival info in target stop.\n\nArgs:\n stop_number (int): Stop number to query.\n lang (str): Language code (*es* or *en*).\n\nReturns:\n Status boolean and parsed response (list[Arrival]), or message string\n in case of error.", "source": "codesearchnet_filtered"} -{"code": "def convert_old_publication_info_to_new(publication_infos):\n result = []\n hidden_publication_infos = []\n for publication_info in publication_infos:\n _publication_info = copy.deepcopy(publication_info)\n journal_title = _publication_info.get('journal_title')\n try:\n journal_title = _JOURNALS_RENAMED_OLD_TO_NEW[journal_title]\n _publication_info['journal_title'] = journal_title\n result.append(_publication_info)\n continue\n except KeyError:\n pass\n journal_volume = _publication_info.get('journal_volume')\n if ((journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME) and journal_volume and (len(journal_volume) == 4)):\n try:\n was_last_century = (int(journal_volume[:2]) > 50)\n except ValueError:\n pass\n else:\n _publication_info['year'] = int((('19' + journal_volume[:2]) if was_last_century else ('20' + journal_volume[:2])))\n _publication_info['journal_volume'] = journal_volume[2:]\n result.append(_publication_info)\n continue\n if (journal_title and journal_volume and (journal_title.lower() not in JOURNALS_IGNORED_IN_OLD_TO_NEW)):\n volume_starts_with_a_letter = _RE_VOLUME_STARTS_WITH_A_LETTER.match(journal_volume)\n volume_ends_with_a_letter = _RE_VOLUME_ENDS_WITH_A_LETTER.match(journal_volume)\n match = (volume_starts_with_a_letter or volume_ends_with_a_letter)\n if match:\n _publication_info.pop('journal_record', None)\n if (journal_title in _JOURNALS_RENAMED_OLD_TO_NEW.values()):\n _publication_info['journal_title'] = journal_title\n else:\n _publication_info['journal_title'] = ''.join([journal_title, ('' if journal_title.endswith('.') else ' '), match.group('letter')])\n _publication_info['journal_volume'] = match.group('volume')\n hidden = _publication_info.pop('hidden', None)\n if hidden:\n hidden_publication_infos.append(_publication_info)\n else:\n result.append(_publication_info)\n for publication_info in hidden_publication_infos:\n if (publication_info not in result):\n publication_info['hidden'] = True\n result.append(publication_info)\n return result", "docstring": "Convert a ``publication_info`` value from the old format to the new.\n\n On Legacy different series of the same journal were modeled by adding the\n letter part of the name to the journal volume. For example, a paper published\n in Physical Review D contained::\n\n {\n 'publication_info': [\n {\n 'journal_title': 'Phys.Rev.',\n 'journal_volume': 'D43',\n },\n ],\n }\n\n On Labs we instead represent each series with a different journal record. As\n a consequence, the above example becomes::\n\n {\n 'publication_info': [\n {\n 'journal_title': 'Phys.Rev.D',\n 'journal_volume': '43',\n },\n ],\n }\n\n This function handles this translation from the old format to the new. Please\n also see the tests for various edge cases that this function also handles.\n\nArgs:\n publication_infos: a ``publication_info`` in the old format.\n\nReturns:\n list(dict): a ``publication_info`` in the new format.", "source": "codesearchnet_filtered"} -{"code": "def get_users(self, condensed=False):\n user_list = self.slack_client.api_call('users.list')\n if (not user_list.get('ok')):\n return None\n if condensed:\n users = [{'id': item.get('id'), 'name': item.get('name'), 'display_name': item.get('profile').get('display_name')} for item in user_list.get('members')]\n return users\n else:\n return user_list", "docstring": "Grabs all users in the slack team\n\n This should should only be used for getting list of all users. Do not\n use it for searching users. Use get_user_info instead.\n\nArgs:\n condensed (bool): if true triggers list condensing functionality\n\nReturns:\n dict: Dict of users in Slack team.\n See also: https://api.slack.com/methods/users.list", "source": "codesearchnet_filtered"} -{"code": "def update(self, roomId, title=None, **request_parameters):\n check_type(roomId, basestring, may_be_none=False)\n check_type(roomId, basestring)\n put_data = dict_from_items_with_values(request_parameters, title=title)\n json_data = self._session.put(((API_ENDPOINT + '/') + roomId), json=put_data)\n return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Update details for a room, by ID.\n\nArgs:\n roomId(basestring): The room ID.\n title(basestring): A user-friendly name for the room.\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\nReturns:\n Room: A Room object with the updated Webex Teams room details.\n\nRaises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet_filtered"} -{"code": "def _urllib_post(self, url, json='', data='', username='', password='', headers=None, timeout=30):\n if (headers is None):\n headers = {}\n raw_store = json\n raw_request = (json_lib.dumps(json) if json else urlencode(data))\n url_request = Request(url, data=raw_request.encode('utf8'))\n if json:\n url_request.add_header('Content-Type', 'application/json')\n elif (not data):\n raise ValueError('Please provide either a json or a data field.')\n headers['User-Agent'] = self.user_agent\n raw_request = raw_store\n if (username and password):\n if (sys.version_info[0] >= 3):\n basic_authstring = base64.encodebytes(('%s:%s' % (username, password)).encode()).decode().replace('\\n', '')\n else:\n basic_authstring = base64.encodestring(('%s:%s' % (username, password))).replace('\\n', '')\n url_request.add_header('Authorization', ('Basic %s' % basic_authstring))\n for (key, value) in headers.items():\n url_request.add_header(key, str(value))\n try:\n response = urlopen(url_request, timeout=timeout)\n except HTTPError as e:\n raw_response = e.read()\n return (raw_response, raw_request, e.getcode(), e.headers)\n else:\n raw_response = response.read()\n response.close()\n return (raw_response, raw_request, response.getcode(), dict(response.info()))", "docstring": "This function will POST to the url endpoint using urllib2. returning\n an AdyenResult object on 200 HTTP responce. Either json or data has to\n be provided. If username and password are provided, basic auth will be\n used.\n\nArgs:\n url (str): url to send the POST\n json (dict, optional): Dict of the JSON to POST\n data (dict, optional): Dict, presumed flat structure of\n key/value of request to place as\n www-form\n username (str, optional): Username for basic auth. Must be\n uncluded as part of password.\n password (str, optional): Password for basic auth. Must be\n included as part of username.\n headers (dict, optional): Key/Value pairs of headers to include\n timeout (int, optional): Default 30. Timeout for the request.\n\nReturns:\n str: Raw response received\n str: Raw request placed\n int: HTTP status code, eg 200,404,401\n dict: Key/Value pairs of the headers received.", "source": "codesearchnet_filtered"} -{"code": "def new_message_from_header(header):\n message_type = header.message_type\n if (not isinstance(message_type, Type)):\n try:\n if isinstance(message_type, str):\n message_type = Type[message_type]\n elif isinstance(message_type, int):\n message_type = Type(message_type)\n except ValueError:\n raise ValueError\n message = new_message_from_message_type(message_type)\n message.header.xid = header.xid\n message.header.length = header.length\n return message", "docstring": "Given an OF Header, return an empty message of header's message_type.\n\nArgs:\n header (~pyof.v0x01.common.header.Header): Unpacked OpenFlow Header.\n\nReturns:\n Empty OpenFlow message of the same type of message_type attribute from\n the given header.\n The header attribute of the message will be populated.\n\nRaises:\n KytosUndefinedMessageType: Unkown Message_Type.", "source": "codesearchnet_filtered"} -{"code": "def wrap_callable(cls, uri, methods, callable_obj):\n if isinstance(callable_obj, HandlerMeta):\n callable_obj.base_endpoint = uri\n callable_obj.is_valid = True\n return callable_obj\n if isinstance(callable_obj, types.FunctionType):\n return cls(uri=uri, methods=methods, callable_obj=callable_obj)\n raise RouteError('Invalid handler type.')", "docstring": "Wraps function-based callable_obj into a `Route` instance, else\n proxies a `bottle_neck.handlers.BaseHandler` subclass instance.\n\nArgs:\n uri (str): The uri relative path.\n methods (tuple): A tuple of valid method strings.\n callable_obj (instance): The callable object.\n\nReturns:\n A route instance.\n\nRaises:\n RouteError for invalid callable object type.", "source": "codesearchnet_filtered"} -{"code": "def cut_sphere(self, radius=15.0, origin=None, outside_sliced=True, preserve_bonds=False):\n if (origin is None):\n origin = np.zeros(3)\n elif pd.api.types.is_list_like(origin):\n origin = np.array(origin, dtype='f8')\n else:\n origin = self.loc[(origin, ['x', 'y', 'z'])]\n molecule = self.get_distance_to(origin)\n if outside_sliced:\n molecule = molecule[(molecule['distance'] < radius)]\n else:\n molecule = molecule[(molecule['distance'] > radius)]\n if preserve_bonds:\n molecule = self._preserve_bonds(molecule)\n return molecule", "docstring": "Cut a sphere specified by origin and radius.\n\nArgs:\n radius (float):\n origin (list): Please note that you can also pass an\n integer. In this case it is interpreted as the\n index of the atom which is taken as origin.\n outside_sliced (bool): Atoms outside/inside the sphere\n are cut out.\n preserve_bonds (bool): Do not cut covalent bonds.\n\nReturns:\n Cartesian:", "source": "codesearchnet_filtered"} -{"code": "def from_string(string):\n lines = string.split('\\n')\n toks = lines[0].split()\n lengths = [float(i) for i in toks]\n toks = lines[1].split()\n angles = [float(i) for i in toks[0:3]]\n latt = Lattice.from_lengths_and_angles(lengths, angles)\n sp = []\n coords = []\n for l in lines[4:]:\n m = re.match('\\\\d+\\\\s+(\\\\w+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)', l.strip())\n if m:\n sp.append(m.group(1))\n coords.append([float(m.group(i)) for i in range(2, 5)])\n return Cssr(Structure(latt, sp, coords))", "docstring": "Reads a string representation to a Cssr object.\n\nArgs:\n string (str): A string representation of a CSSR.\n\nReturns:\n Cssr object.", "source": "codesearchnet_filtered"} -{"code": "def build_graph(path, term_depth=1000, skim_depth=10, d_weights=False, **kwargs):\n click.echo('\\nTokenizing text...')\n t = Text.from_file(path)\n click.echo(('Extracted %d tokens' % len(t.tokens)))\n m = Matrix()\n click.echo('\\nIndexing terms:')\n m.index(t, t.most_frequent_terms(term_depth), **kwargs)\n g = Skimmer()\n click.echo('\\nGenerating graph:')\n g.build(t, m, skim_depth, d_weights)\n return g", "docstring": "Tokenize a text, index a term matrix, and build out a graph.\n\nArgs:\n path (str): The file path.\n term_depth (int): Consider the N most frequent terms.\n skim_depth (int): Connect each word to the N closest siblings.\n d_weights (bool): If true, give \"close\" nodes low weights.\n\nReturns:\n Skimmer: The indexed graph.", "source": "codesearchnet_filtered"} -{"code": "def destroy_cloudwatch_log_event(app='', env='dev', region=''):\n session = boto3.Session(profile_name=env, region_name=region)\n cloudwatch_client = session.client('logs')\n cloudwatch_client.delete_subscription_filter(logGroupName='/aws/lambda/awslimitchecker', filterName=app)\n return True", "docstring": "Destroy Cloudwatch log event.\n\nArgs:\n app (str): Spinnaker Application name.\n env (str): Deployment environment.\n region (str): AWS region.\n\nReturns:\n bool: True upon successful completion.", "source": "codesearchnet_filtered"} -{"code": "def get_col_info(table_name, col_name, meta_file):\n with open(meta_file, 'r') as f:\n meta = json.load(f)\n (data_table, table) = load_data_table(table_name, meta_file, meta)\n for field in table['fields']:\n if (field['name'] == col_name):\n col_meta = field\n col = data_table[col_name]\n return (col, col_meta)", "docstring": "Return the content and metadata of a fiven column.\n\nArgs:\n table_name(str): Name of the table.\n col_name(str): Name of the column.\n meta_file(str): Path to the meta.json file.\n\nReturns:\n tuple(pandas.Series, dict)", "source": "codesearchnet_filtered"} -{"code": "def rpc(self, address, rpc_id, *args, **kwargs):\n if isinstance(rpc_id, RPCDeclaration):\n arg_format = rpc_id.arg_format\n resp_format = rpc_id.resp_format\n rpc_id = rpc_id.rpc_id\n else:\n arg_format = kwargs.get('arg_format', None)\n resp_format = kwargs.get('resp_format', None)\n arg_payload = b''\n if (arg_format is not None):\n arg_payload = pack_rpc_payload(arg_format, args)\n self._logger.debug('Sending rpc to %d:%04X, payload=%s', address, rpc_id, args)\n resp_payload = self.call_rpc(address, rpc_id, arg_payload)\n if (resp_format is None):\n return []\n resp = unpack_rpc_payload(resp_format, resp_payload)\n return resp", "docstring": "Immediately dispatch an RPC inside this EmulatedDevice.\n\n This function is meant to be used for testing purposes as well as by\n tiles inside a complex EmulatedDevice subclass that need to\n communicate with each other. It should only be called from the main\n virtual device thread where start() was called from.\n\n **Background workers may not call this method since it may cause them to deadlock.**\n\nArgs:\n address (int): The address of the tile that has the RPC.\n rpc_id (int): The 16-bit id of the rpc we want to call\n *args: Any required arguments for the RPC as python objects.\n **kwargs: Only two keyword arguments are supported:\n - arg_format: A format specifier for the argument list\n - result_format: A format specifier for the result\n\nReturns:\n list: A list of the decoded response members from the RPC.", "source": "codesearchnet_filtered"} -{"code": "def ungroup_unique(unique_items, groupxs, maxval=None):\n if (maxval is None):\n maxpergroup = [(max(xs) if len(xs) else 0) for xs in groupxs]\n maxval = (max(maxpergroup) if len(maxpergroup) else 0)\n ungrouped_items = ([None] * (maxval + 1))\n for (item, xs) in zip(unique_items, groupxs):\n for x in xs:\n ungrouped_items[x] = item\n return ungrouped_items", "docstring": "Ungroups unique items to correspond to original non-unique list\n\nArgs:\n unique_items (list):\n groupxs (list):\n maxval (int): (default = None)\n\nReturns:\n list: ungrouped_items\n\n CommandLine:\n python -m utool.util_alg ungroup_unique\n\nExample:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_alg import * # NOQA\n >>> import utool as ut\n >>> unique_items = [1, 2, 3]\n >>> groupxs = [[0, 2], [1, 3], [4, 5]]\n >>> maxval = None\n >>> ungrouped_items = ungroup_unique(unique_items, groupxs, maxval)\n >>> result = ('ungrouped_items = %s' % (ut.repr2(ungrouped_items),))\n >>> print(result)\n ungrouped_items = [1, 2, 1, 2, 3, 3]", "source": "codesearchnet_filtered"} -{"code": "def get_realtime_urls(admin_view_func=(lambda x: x)):\n from .widgets import REALTIME_WIDGETS\n return [url(w.url_regex, admin_view_func(w.as_view()), name=w.url_name) for w in REALTIME_WIDGETS]", "docstring": "Get the URL for real-time widgets.\n\nArgs:\n admin_view_func (callable): an admin_view method from an AdminSite\n instance. By default: identity.\n\nReturns:\n list: the list of the real-time URLs as django's ``url()``.", "source": "codesearchnet_filtered"} -{"code": "def list_folder(cls, session, mailbox, folder):\n return cls(('/mailboxes/%d/folders/%s/conversations.json' % (mailbox.id, folder.id)), session=session)", "docstring": "Return conversations in a specific folder of a mailbox.\n\nArgs:\n session (requests.sessions.Session): Authenticated session.\n mailbox (helpscout.models.Mailbox): Mailbox that folder is in.\n folder (helpscout.models.Folder): Folder to list.\n\nReturns:\n RequestPaginator(output_type=helpscout.models.Conversation):\n Conversations iterator.", "source": "codesearchnet_filtered"} -{"code": "def _get_samples_shared_with(self, other, index=None):\n if isinstance(other, (pd.DataFrame, Projection)):\n df_other = (other.coords if isinstance(other, Projection) else other)\n if (len(set(df_other.index)) != len(df_other.index)):\n raise ValueError('other index has duplicates')\n if (len(set(self.coords.index)) != len(self.coords.index)):\n raise ValueError('This projection index has duplicates')\n if index:\n uniq_idx = set(index)\n if (len(uniq_idx) != len(index)):\n raise ValueError('index has has duplicates')\n if (uniq_idx - set(df_other.index)):\n raise ValueError('index has samples not in other')\n if (uniq_idx - set(self.coords.index)):\n raise ValueError('index has samples not in this projection')\n else:\n uniq_idx = (set(df_other.index) & set(self.coords.index))\n if (not len(uniq_idx)):\n raise ValueError('No samples shared between other and this projection')\n idx = list(uniq_idx)\n return (self.coords.loc[(idx, :)].values, df_other.loc[(idx, :)].values)\n else:\n other = np.array(other)\n if (other.shape != self.coords.shape):\n raise ValueError('array-like must have the same shape as self.coords')\n return (self.coords.values, other)", "docstring": "Find samples shared with another dataset.\n\nArgs:\n other\n (:py:class:`pymds.Projection` or :py:class:`pandas.DataFrame`\n or `array-like`):\n The other dataset. If `other` is an instance of\n :py:class:`pymds.Projection` or :py:class:`pandas.DataFrame`,\n then `other` must have indexes in common with this projection.\n If `array-like`, then other must have same dimensions as\n `self.coords`.\n index (`list-like` or `None`): If `other` is an instance of\n :py:class:`pymds.Projection` or :py:class:`pandas.DataFrame`\n then only return samples in index.\n\nReturns:\n `tuple`: containing:\n\n - this (`numpy.array`) Shape [`x`, `n`].\n - other (`numpy.array`) Shape [`x`, `n`].", "source": "codesearchnet_filtered"} -{"code": "def has_all_nonzero_neurite_radii(neuron, threshold=0.0):\n bad_ids = []\n seen_ids = set()\n for s in _nf.iter_sections(neuron):\n for (i, p) in enumerate(s.points):\n info = (s.id, i)\n if ((p[COLS.R] <= threshold) and (info not in seen_ids)):\n seen_ids.add(info)\n bad_ids.append(info)\n return CheckResult((len(bad_ids) == 0), bad_ids)", "docstring": "Check presence of neurite points with radius not above threshold\n\nArgs:\n neuron(Neuron): The neuron object to test\n threshold: value above which a radius is considered to be non-zero\n\nReturns:\n CheckResult with result including list of (section ID, point ID) pairs\n of zero-radius points", "source": "codesearchnet_filtered"} -{"code": "def read_csv(filename):\n field_names = ('latitude', 'longitude', 'name')\n data = utils.prepare_csv_read(filename, field_names, skipinitialspace=True)\n locations = {}\n args = []\n for (index, row) in enumerate(data, 1):\n name = ('%02i:%s' % (index, row['name']))\n locations[name] = (row['latitude'], row['longitude'])\n args.append(name)\n return (locations, args)", "docstring": "Pull locations from a user's CSV file.\n\n Read gpsbabel_'s CSV output format\n\n .. _gpsbabel: http://www.gpsbabel.org/\n\nArgs:\n filename (str): CSV file to parse\n\nReturns:\n tuple of dict and list: List of locations as ``str`` objects", "source": "codesearchnet_filtered"} -{"code": "def search(self, term):\n return self._result(self._get(self._url('/images/search'), params={'term': term}), True)", "docstring": "Search for images on Docker Hub. Similar to the ``docker search``\n command.\n\nArgs:\n term (str): A term to search for.\n\nReturns:\n (list of dicts): The response of the search.\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "codesearchnet_filtered"} -{"code": "def valueReadPreprocessor(valueString, replaceParamsFile=None):\n if (type(valueString) is bool):\n log.warning('Only numerical variable types can be handled by the valueReadPreprocessor function.')\n return valueString\n processedValue = valueString\n if ((replaceParamsFile is not None) and (valueString is not None)):\n if (('[' in valueString) or (']' in valueString)):\n processedValue = '{0}'.format(REPLACE_NO_VALUE)\n for targetParam in replaceParamsFile.targetParameters:\n if (targetParam.targetVariable == valueString):\n processedValue = '{0}'.format(((- 1) * targetParam.id))\n break\n return processedValue", "docstring": "Apply global pre-processing to values during reading throughout the project.\n\nArgs:\n valueString (str): String representing the value to be preprocessed.\n replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if\n replacement variables are included in the project.\n\nReturns:\n str: Processed value as a string", "source": "codesearchnet_filtered"} -{"code": "def build_command(self, action, args=None):\n arguments = self.wrap_arguments(args)\n body = self.soap_body_template.format(arguments=arguments, action=action, service_type=self.service_type, version=self.version)\n soap_action_template = 'urn:schemas-upnp-org:service:{service_type}:{version}#{action}'\n soap_action = soap_action_template.format(service_type=self.service_type, version=self.version, action=action)\n headers = {'Content-Type': 'text/xml; charset=\"utf-8\"', 'SOAPACTION': soap_action}\n return (headers, body)", "docstring": "Build a SOAP request.\n\nArgs:\n action (str): the name of an action (a string as specified in the\n service description XML file) to be sent.\n args (list, optional): Relevant arguments as a list of (name,\n value) tuples.\n\nReturns:\n tuple: a tuple containing the POST headers (as a dict) and a\n string containing the relevant SOAP body. Does not set\n content-length, or host headers, which are completed upon\n sending.", "source": "codesearchnet_filtered"} -{"code": "def conv_elems_1d(x, factor, out_depth=None):\n out_depth = (out_depth or x.get_shape().as_list()[(- 1)])\n x = tf.expand_dims(x, 1)\n x = layers().Conv2D(filters=out_depth, kernel_size=(1, factor), strides=(1, factor), padding='valid', data_format='channels_last')(x)\n x = tf.squeeze(x, 1)\n return x", "docstring": "Decrease the length and change the dimensionality.\n\n Merge/restore/compress factors positions of dim depth of the input into\n a single position of dim out_depth.\n This is basically just a strided convolution without overlap\n between each strides. The original length has to be divided by factor.\n\nArgs:\n x (tf.Tensor): shape [batch_size, length, depth]\n factor (int): Length compression factor.\n out_depth (int): Output depth\n\nReturns:\n tf.Tensor: shape [batch_size, length//factor, out_depth]", "source": "codesearchnet_filtered"} -{"code": "def warning_handler(self, handler):\n if (not self.opened()):\n handler = (handler or util.noop)\n self._warning_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n self._dll.JLINKARM_SetWarnOutHandler(self._warning_handler)", "docstring": "Setter for the warning handler function.\n\n If the DLL is open, this function is a no-op, so it should be called\n prior to calling ``open()``.\n\nArgs:\n self (JLink): the ``JLink`` instance\n handler (function): function to call on warning messages\n\nReturns:\n ``None``", "source": "codesearchnet_filtered"} -{"code": "def _ParseItem(self, parser_mediator, olecf_item):\n result = False\n event_data = OLECFItemEventData()\n event_data.name = olecf_item.name\n event_data.offset = 0\n event_data.size = olecf_item.size\n (creation_time, modification_time) = self._GetTimestamps(olecf_item)\n if creation_time:\n date_time = dfdatetime_filetime.Filetime(timestamp=creation_time)\n event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n result = True\n if modification_time:\n date_time = dfdatetime_filetime.Filetime(timestamp=modification_time)\n event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n result = True\n for sub_item in olecf_item.sub_items:\n if self._ParseItem(parser_mediator, sub_item):\n result = True\n return result", "docstring": "Parses an OLECF item.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n olecf_item (pyolecf.item): OLECF item.\n\nReturns:\n bool: True if an event was produced.", "source": "codesearchnet_filtered"} -{"code": "def get_and_check_project(valid_vcs_rules, source_url):\n project_path = match_url_regex(valid_vcs_rules, source_url, match_url_path_callback)\n if (project_path is None):\n raise ValueError('Unknown repo for source url {}!'.format(source_url))\n project = project_path.split('/')[(- 1)]\n return project", "docstring": "Given vcs rules and a source_url, return the project.\n\n The project is in the path, but is the repo name.\n `releases/mozilla-beta` is the path; `mozilla-beta` is the project.\n\nArgs:\n valid_vcs_rules (tuple of frozendicts): the valid vcs rules, per\n ``match_url_regex``.\n source_url (str): the source url to find the project for.\n\nRaises:\n RuntimeError: on failure to find the project.\n\nReturns:\n str: the project.", "source": "codesearchnet_filtered"} -{"code": "def parse(self, ping_message):\n try:\n if typepy.is_not_null_string(ping_message.stdout):\n ping_message = ping_message.stdout\n except AttributeError:\n pass\n logger.debug('parsing ping result: {}'.format(ping_message))\n self.__parser = NullPingParser()\n if typepy.is_null_string(ping_message):\n logger.debug('ping_message is empty')\n self.__stats = PingStats()\n return self.__stats\n ping_lines = _to_unicode(ping_message).splitlines()\n parser_class_list = (LinuxPingParser, WindowsPingParser, MacOsPingParser, AlpineLinuxPingParser)\n for parser_class in parser_class_list:\n self.__parser = parser_class()\n try:\n self.__stats = self.__parser.parse(ping_lines)\n return self.__stats\n except ParseError as e:\n if (e.reason != ParseErrorReason.HEADER_NOT_FOUND):\n raise e\n except pp.ParseException:\n pass\n self.__parser = NullPingParser()\n return self.__stats", "docstring": "Parse ping command output.\n\nArgs:\n ping_message (str or :py:class:`~pingparsing.PingResult`):\n ``ping`` command output.\n\nReturns:\n :py:class:`~pingparsing.PingStats`: Parsed result.", "source": "codesearchnet_filtered"} -{"code": "def create_cells(headers, schema_fields, values=None, row_number=None):\n fillvalue = '_fillvalue'\n is_header_row = (values is None)\n cells = []\n iterator = zip_longest(headers, schema_fields, (values or []), fillvalue=fillvalue)\n for (column_number, (header, field, value)) in enumerate(iterator, start=1):\n if (header == fillvalue):\n header = None\n elif is_header_row:\n value = header\n if (field == fillvalue):\n field = None\n if (value == fillvalue):\n value = None\n elif (value is None):\n value = ''\n cell = create_cell(header, value, field, column_number, row_number)\n cells.append(cell)\n return cells", "docstring": "Create list of cells from headers, fields and values.\n\nArgs:\n headers (List[str]): The headers values.\n schema_fields (List[tableschema.field.Field]): The tableschema\n fields.\n values (List[Any], optional): The cells values. If not specified,\n the created cells will have the same values as their\n corresponding headers. This is useful for specifying headers\n cells.\n If the list has any `None` values, as is the case on empty\n cells, the resulting Cell will have an empty string value. If\n the `values` list has a different length than the `headers`,\n the resulting Cell will have value `None`.\n row_number (int, optional): The row number.\n\nReturns:\n List[dict]: List of cells.", "source": "codesearchnet_filtered"} -{"code": "def encode_value(value):\n if (value is None):\n return document_pb2.Value(null_value=struct_pb2.NULL_VALUE)\n if isinstance(value, bool):\n return document_pb2.Value(boolean_value=value)\n if isinstance(value, six.integer_types):\n return document_pb2.Value(integer_value=value)\n if isinstance(value, float):\n return document_pb2.Value(double_value=value)\n if isinstance(value, DatetimeWithNanoseconds):\n return document_pb2.Value(timestamp_value=value.timestamp_pb())\n if isinstance(value, datetime.datetime):\n return document_pb2.Value(timestamp_value=_datetime_to_pb_timestamp(value))\n if isinstance(value, six.text_type):\n return document_pb2.Value(string_value=value)\n if isinstance(value, six.binary_type):\n return document_pb2.Value(bytes_value=value)\n document_path = getattr(value, '_document_path', None)\n if (document_path is not None):\n return document_pb2.Value(reference_value=document_path)\n if isinstance(value, GeoPoint):\n return document_pb2.Value(geo_point_value=value.to_protobuf())\n if isinstance(value, list):\n value_list = [encode_value(element) for element in value]\n value_pb = document_pb2.ArrayValue(values=value_list)\n return document_pb2.Value(array_value=value_pb)\n if isinstance(value, dict):\n value_dict = encode_dict(value)\n value_pb = document_pb2.MapValue(fields=value_dict)\n return document_pb2.Value(map_value=value_pb)\n raise TypeError('Cannot convert to a Firestore Value', value, 'Invalid type', type(value))", "docstring": "Converts a native Python value into a Firestore protobuf ``Value``.\n\nArgs:\n value (Union[NoneType, bool, int, float, datetime.datetime, \\\n str, bytes, dict, ~google.cloud.Firestore.GeoPoint]): A native\n Python value to convert to a protobuf field.\n\nReturns:\n ~google.cloud.firestore_v1beta1.types.Value: A\n value encoded as a Firestore protobuf.\n\nRaises:\n TypeError: If the ``value`` is not one of the accepted types.", "source": "codesearchnet_filtered"} -{"code": "def get_block_iter(self, start_block=None, start_block_num=None, reverse=True):\n start = None\n if start_block_num:\n if (len(start_block_num) < 2):\n raise ValueError('Invalid start block num')\n if (start_block_num[:2] != '0x'):\n raise ValueError('Invalid start block num')\n start = int(start_block_num, 16)\n elif start_block:\n start = start_block.block_num\n return _BlockStoreIter(self.pointer, start, reverse)", "docstring": "Returns an iterator that traverses blocks in block number order.\n\nArgs:\n start_block (:obj:`BlockWrapper`): the block from which traversal\n begins\n start_block_num (str): a starting block number, in hex, from where\n traversal begins; only used if no starting_block is provided\n\n reverse (bool): If True, traverse the blocks in from most recent\n to oldest block. Otherwise, it traverse the blocks in the\n opposite order.\n\nReturns:\n An iterator of block wrappers\n\nRaises:\n ValueError: If start_block or start_block_num do not specify a\n valid block", "source": "codesearchnet_filtered"} -{"code": "def export(self, container, chunk_size=DEFAULT_DATA_CHUNK_SIZE):\n res = self._get(self._url('/containers/{0}/export', container), stream=True)\n return self._stream_raw_result(res, chunk_size, False)", "docstring": "Export the contents of a filesystem as a tar archive.\n\nArgs:\n container (str): The container to export\n chunk_size (int): The number of bytes returned by each iteration\n of the generator. If ``None``, data will be streamed as it is\n received. Default: 2 MB\n\nReturns:\n (generator): The archived filesystem data stream\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "codesearchnet_filtered"} -{"code": "def json_to_entity(tc_data, value_fields, resource_type, resource_type_parent):\n if (not isinstance(tc_data, list)):\n tc_data = [tc_data]\n entity_array = []\n for d in tc_data:\n entity = {'id': d.get('id'), 'webLink': d.get('webLink')}\n values = []\n if ('summary' in d):\n values.append(d.get('summary'))\n else:\n for field in value_fields:\n if (d.get(field) is not None):\n values.append(d.get(field))\n entity['value'] = ' : '.join(values)\n if (d.get('type') is not None):\n entity['type'] = d.get('type')\n else:\n entity['type'] = resource_type\n if (resource_type_parent in ['Indicator']):\n entity['confidence'] = d.get('confidence')\n entity['rating'] = d.get('rating')\n entity['threatAssessConfidence'] = d.get('threatAssessConfidence')\n entity['threatAssessRating'] = d.get('threatAssessRating')\n entity['dateLastModified'] = d.get('lastModified')\n if (resource_type_parent in ['Indicator', 'Group']):\n if ('owner' in d):\n entity['ownerName'] = d['owner']['name']\n else:\n entity['ownerName'] = d.get('ownerName')\n entity['dateAdded'] = d.get('dateAdded')\n if (resource_type_parent in ['Victim']):\n entity['ownerName'] = d.get('org')\n entity_array.append(entity)\n return entity_array", "docstring": "Convert ThreatConnect JSON response to a TCEntityArray.\n\n .. Attention:: This method is subject to frequent changes.\n\nArgs:\n tc_data (dictionary): Array of data returned from TC API call.\n value_fields (list): Field names that contain the \"value\" data.\n resource_type (string): The resource type of the tc_data provided.\n resource_type_parent (string): The resource parent type of the tc_data provided.\n\nReturns:\n (list): A list representing a TCEntityArray.", "source": "codesearchnet_filtered"} -{"code": "def is_valid_transition(self, source: str, dest: str) -> bool:\n if ((dest not in self._states) or (source not in self._states)):\n raise NotValidState\n elif (dest not in self._transitions[source]):\n raise NotValidTransition\n return True", "docstring": "Checks if a transitions is registered in the FSM\n\nArgs:\n source (str): the source state name\n dest (str): the destination state name\n\nReturns:\n bool: wether the transition is valid or not", "source": "codesearchnet_filtered"} -{"code": "def version(msg):\n tc = typecode(msg)\n if (tc != 31):\n raise RuntimeError(('%s: Not a status operation message, expecting TC = 31' % msg))\n msgbin = common.hex2bin(msg)\n version = common.bin2int(msgbin[72:75])\n return version", "docstring": "ADS-B Version\n\nArgs:\n msg (string): 28 bytes hexadecimal message string, TC = 31\n\nReturns:\n int: version number", "source": "codesearchnet_filtered"} -{"code": "def get(self, path, params=None, headers=None):\n response = requests.get(self._url_for(path), params=params, headers=self._headers(headers))\n self._handle_errors(response)\n return response", "docstring": "Perform a GET request, optionally providing query-string params.\n\nArgs:\n path (str): A path that gets appended to ``base_url``.\n params (dict, optional): Dictionary of param names to values.\n\nExample:\n api_client.get('/users', params={'active': True})\n\nReturns:\n A requests ``Response`` object.", "source": "codesearchnet_filtered"} -{"code": "def in_sorted(values, value):\n index = bisect.bisect_left(values, value)\n if (index >= len(values)):\n return False\n return (values[index] == value)", "docstring": "Checks if a value is in a sorted list.\n\n Uses the :mod:`bisect` builtin to find the insertion point for\n ``value``.\n\nArgs:\n values (List[int]): Integers sorted in ascending order.\n value (int): Value to check if contained in ``values``.\n\nReturns:\n bool: Indicating if the value is contained.", "source": "codesearchnet_filtered"} -{"code": "def rotate_texture(texture, rotation, x_offset=0.5, y_offset=0.5):\n (x, y) = texture\n x = (x.copy() - x_offset)\n y = (y.copy() - y_offset)\n angle = np.radians(rotation)\n x_rot = ((x * np.cos(angle)) + (y * np.sin(angle)))\n y_rot = ((x * (- np.sin(angle))) + (y * np.cos(angle)))\n return ((x_rot + x_offset), (y_rot + y_offset))", "docstring": "Rotates the given texture by a given angle.\n\nArgs:\n texture (texture): the texture to rotate\n rotation (float): the angle of rotation in degrees\n x_offset (float): the x component of the center of rotation (optional)\n y_offset (float): the y component of the center of rotation (optional)\n\nReturns:\n texture: A texture.", "source": "codesearchnet_filtered"} -{"code": "def _generate_latex_source(circuit, filename=None, scale=0.7, style=None, reverse_bits=False, plot_barriers=True, justify=None):\n (qregs, cregs, ops) = utils._get_layered_instructions(circuit, reverse_bits=reverse_bits, justify=justify)\n qcimg = _latex.QCircuitImage(qregs, cregs, ops, scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits)\n latex = qcimg.latex()\n if filename:\n with open(filename, 'w') as latex_file:\n latex_file.write(latex)\n return latex", "docstring": "Convert QuantumCircuit to LaTeX string.\n\nArgs:\n circuit (QuantumCircuit): input circuit\n scale (float): image scaling\n filename (str): optional filename to write latex\n style (dict or str): dictionary of style or file name of style file\n reverse_bits (bool): When set to True reverse the bit order inside\n registers for the output visualization.\n plot_barriers (bool): Enable/disable drawing barriers in the output\n circuit. Defaults to True.\n justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how\n the circuit should be justified.\n\nReturns:\n str: Latex string appropriate for writing to file.", "source": "codesearchnet_filtered"} -{"code": "def _CheckParserCanProcessFileEntry(self, parser, file_entry):\n for filter_object in parser.FILTERS:\n if filter_object.Match(file_entry):\n return True\n return False", "docstring": "Determines if a parser can process a file entry.\n\nArgs:\n file_entry (dfvfs.FileEntry): file entry.\n parser (BaseParser): parser.\n\nReturns:\n bool: True if the file entry can be processed by the parser object.", "source": "codesearchnet_filtered"} -{"code": "def _GetTimeElementsTuple(self, timestamp):\n (year, month, day_of_month, hours, minutes, seconds) = (int((hexdigit[0] + hexdigit[1]), 16) for hexdigit in zip(timestamp[::2], timestamp[1::2]))\n return ((year + 1970), (month + 1), day_of_month, hours, minutes, seconds)", "docstring": "Retrieves a time elements tuple from the timestamp.\n\n A Symantec log timestamp consist of six hexadecimal octets, that represent:\n First octet: Number of years since 1970\n Second octet: Month, where January is represented by 0\n Third octet: Day of the month\n Fourth octet: Number of hours\n Fifth octet: Number of minutes\n Sixth octet: Number of seconds\n\n For example, 200A13080122 represents November 19, 2002, 8:01:34 AM.\n\nArgs:\n timestamp (str): hexadecimal encoded date and time values.\n\nReturns:\n tuple: containing:\n year (int): year.\n month (int): month, where 1 represents January.\n day_of_month (int): day of month, where 1 is the first day of the month.\n hours (int): hours.\n minutes (int): minutes.\n seconds (int): seconds.", "source": "codesearchnet_filtered"} -{"code": "def make_gradients(dims=DEFAULT_DIMS):\n return np.meshgrid(np.linspace(0.0, 1.0, dims[0]), np.linspace(0.0, 1.0, dims[1]))", "docstring": "Makes a pair of gradients to generate textures from numpy primitives.\n\nArgs:\n dims (pair): the dimensions of the surface to create\n\nReturns:\n pair: A pair of surfaces.", "source": "codesearchnet_filtered"} -{"code": "def decode(self, codes):\n assert (codes.ndim == 2)\n (N, M) = codes.shape\n assert (M == self.M)\n assert (codes.dtype == self.code_dtype)\n vecs = np.empty((N, (self.Ds * self.M)), dtype=np.float32)\n for m in range(self.M):\n vecs[(:, (m * self.Ds):((m + 1) * self.Ds))] = self.codewords[m][(codes[(:, m)], :)]\n return vecs", "docstring": "Given PQ-codes, reconstruct original D-dimensional vectors\n approximately by fetching the codewords.\n\nArgs:\n codes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype.\n Each row is a PQ-code\n\nReturns:\n np.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32", "source": "codesearchnet_filtered"} -{"code": "def valueWritePreprocessor(valueString, replaceParamsFile=None):\n if (type(valueString) is bool):\n log.warning('Only numerical variable types can be handled by the valueReadPreprocessor function.')\n return valueString\n variableString = valueString\n if (replaceParamsFile is not None):\n if (variableString == REPLACE_NO_VALUE):\n variableString = '[NO_VARIABLE]'\n else:\n try:\n number = int(valueString)\n if (number < 0):\n parameterID = (number * (- 1))\n for targetParam in replaceParamsFile.targetParameters:\n if (targetParam.id == parameterID):\n variableString = targetParam.targetVariable\n break\n except:\n pass\n return variableString", "docstring": "Look up variable name in replace param file for the negative id given and return it.\n\nArgs:\n valueString (str): String representing the value to be preprocessed.\n replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if\n replacement variables are included in the project.\n\nReturns:\n str: Processed value as a string", "source": "codesearchnet_filtered"} -{"code": "def get_report_zip(results):\n\n def add_subdir(root_path, subdir):\n subdir_path = os.path.join(root_path, subdir)\n for (subdir_root, subdir_dirs, subdir_files) in os.walk(subdir_path):\n for subdir_file in subdir_files:\n subdir_file_path = os.path.join(root_path, subdir, subdir_file)\n if os.path.isfile(subdir_file_path):\n rel_path = os.path.relpath(subdir_root, subdir_file_path)\n subdir_arc_name = os.path.join(rel_path, subdir_file)\n zip_file.write(subdir_file_path, subdir_arc_name)\n for subdir in subdir_dirs:\n add_subdir(subdir_path, subdir)\n storage = BytesIO()\n tmp_dir = tempfile.mkdtemp()\n try:\n save_output(results, tmp_dir)\n with zipfile.ZipFile(storage, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n for (root, dirs, files) in os.walk(tmp_dir):\n for file in files:\n file_path = os.path.join(root, file)\n if os.path.isfile(file_path):\n arcname = os.path.join(os.path.relpath(root, tmp_dir), file)\n zip_file.write(file_path, arcname)\n for directory in dirs:\n dir_path = os.path.join(root, directory)\n if os.path.isdir(dir_path):\n zip_file.write(dir_path, directory)\n add_subdir(root, directory)\n finally:\n shutil.rmtree(tmp_dir)\n return storage.getvalue()", "docstring": "Creates a zip file of parsed report output\n\nArgs:\n results (OrderedDict): The parsed results\n\nReturns:\n bytes: zip file bytes", "source": "codesearchnet_filtered"} -{"code": "def islink(self, path=None, header=None):\n if (header is None):\n header = self._head(self.get_client_kwargs(path))\n for key in ('x-oss-object-type', 'type'):\n try:\n return (header.pop(key) == 'Symlink')\n except KeyError:\n continue\n return False", "docstring": "Returns True if object is a symbolic link.\n\nArgs:\n path (str): File path or URL.\n header (dict): Object header.\n\nReturns:\n bool: True if object is Symlink.", "source": "codesearchnet_filtered"} -{"code": "def getConfig(self, section=None):\n data = {}\n if (section is None):\n for s in self.config.sections():\n if ('/' in s):\n (parent, _s) = s.split('/')\n data[parent][_s] = dict(self.config.items(s))\n else:\n data[s] = dict(self.config.items(s))\n else:\n data = dict(self.config.items(section))\n return data", "docstring": "Returns a dictionary which contains the current config. If a section is setted,\n only will returns the section config\n\nArgs:\n section (str): (Optional) Section name.\n\nReturns:\n dict: Representation of current config", "source": "codesearchnet_filtered"} -{"code": "def get_intersection(self, range_):\n result = []\n for entry in self.entries:\n (package, value) = entry\n if (value is None):\n continue\n if (package.version not in range_):\n continue\n if isinstance(value, list):\n variants = value\n entry_ = _PackageEntry(package, variants, self.solver)\n result.append(entry_)\n continue\n if self.solver.package_filter:\n rule = self.solver.package_filter.excludes(package)\n if rule:\n if config.debug_package_exclusions:\n print_debug((\"Package '%s' was excluded by rule '%s'\" % (package.qualified_name, str(rule))))\n entry[1] = None\n continue\n if self.solver.package_load_callback:\n self.solver.package_load_callback(package)\n variants_ = []\n for var in package.iter_variants():\n variant = PackageVariant(var, self.solver.building)\n variants_.append(variant)\n entry[1] = variants_\n entry_ = _PackageEntry(package, variants_, self.solver)\n result.append(entry_)\n return (result or None)", "docstring": "Get a list of variants that intersect with the given range.\n\nArgs:\n range_ (`VersionRange`): Package version range.\n\nReturns:\n List of `_PackageEntry` objects.", "source": "codesearchnet_filtered"} -{"code": "def launch_run(self, command, project=None, entity=None, run_id=None):\n query = gql('\\n mutation launchRun(\\n $entity: String\\n $model: String\\n $runId: String\\n $image: String\\n $command: String\\n $patch: String\\n $cwd: String\\n $datasets: [String]\\n ) {\\n launchRun(input: {id: $runId, entityName: $entity, patch: $patch, modelName: $model,\\n image: $image, command: $command, datasets: $datasets, cwd: $cwd}) {\\n podName\\n status\\n runId\\n }\\n }\\n ')\n patch = BytesIO()\n if self.git.dirty:\n self.git.repo.git.execute(['git', 'diff'], output_stream=patch)\n patch.seek(0)\n cwd = '.'\n if self.git.enabled:\n cwd = (cwd + os.getcwd().replace(self.git.repo.working_dir, ''))\n return self.gql(query, variable_values={'entity': (entity or self.settings('entity')), 'model': (project or self.settings('project')), 'command': command, 'runId': run_id, 'patch': patch.read().decode('utf8'), 'cwd': cwd})", "docstring": "Launch a run in the cloud.\n\nArgs:\n command (str): The command to run\n program (str): The file to run\n project (str): The project to scope the runs to\n entity (str, optional): The entity to scope this project to. Defaults to public models\n run_id (str, optional): The run_id to scope to\n\nReturns:\n [{\"podName\",\"status\"}]", "source": "codesearchnet_filtered"} -{"code": "def tokens(self, tokenset='internal'):\n toks = self.get('tokens', {}).get(tokenset)\n if (toks is not None):\n if isinstance(toks, stringtypes):\n toks = YyTokenLattice.from_string(toks)\n elif isinstance(toks, Sequence):\n toks = YyTokenLattice.from_list(toks)\n return toks", "docstring": "Deserialize and return a YyTokenLattice object for the\n initial or internal token set, if provided, from the YY\n format or the JSON-formatted data; otherwise return the\n original string.\n\nArgs:\n tokenset (str): return `'initial'` or `'internal'` tokens\n (default: `'internal'`)\n\nReturns:\n :class:`YyTokenLattice`", "source": "codesearchnet_filtered"} -{"code": "def _process_rules(self, rules):\n cidr = []\n non_cidr = []\n for rule in rules:\n if ('.' in rule['app']):\n self.log.debug('Custom CIDR rule: %s', rule)\n self._validate_cidr(rule)\n cidr.append(rule)\n else:\n self.log.debug('SG reference rule: %s', rule)\n non_cidr.append(rule)\n self.log.debug('Custom CIDR rules: %s', cidr)\n self.log.debug('SG reference rules: %s', non_cidr)\n return (non_cidr, cidr)", "docstring": "Process rules into cidr and non-cidr lists.\n\nArgs:\n rules (list): Allowed Security Group ports and protocols.\n\nReturns:\n (list, list): Security Group reference rules and custom CIDR rules.", "source": "codesearchnet_filtered"} -{"code": "def _FormatServiceText(self, service):\n string_segments = [service.name, '\\tImage Path = {0:s}'.format(service.image_path), '\\tService Type = {0:s}'.format(service.HumanReadableType()), '\\tStart Type = {0:s}'.format(service.HumanReadableStartType()), '\\tService Dll = {0:s}'.format(service.service_dll), '\\tObject Name = {0:s}'.format(service.object_name), '\\tSources:']\n for source in service.sources:\n string_segments.append('\\t\\t{0:s}:{1:s}'.format(source[0], source[1]))\n return '\\n'.join(string_segments)", "docstring": "Produces a human readable multi-line string representing the service.\n\nArgs:\n service (WindowsService): service to format.\n\nReturns:\n str: human readable representation of a Windows Service.", "source": "codesearchnet_filtered"} -{"code": "def open_street_map_geoloc_link(data):\n if isinstance(data, str):\n lat_lon = ip_geoloc(data)\n if (lat_lon is None):\n return ''\n (lat, lon) = lat_lon\n else:\n (lat, lon) = data\n return ('https://www.openstreetmap.org/search?query=%s%%2C%s#map=7/%s/%s' % (lat, lon, lat, lon))", "docstring": "Get a link to open street map pointing on this IP's geolocation.\n\nArgs:\n data (str/tuple): IP address or (latitude, longitude).\n\nReturns:\n str: a link to open street map pointing on this IP's geolocation.", "source": "codesearchnet_filtered"} -{"code": "def sample(self, nmr_samples, burnin=0, thinning=1):\n if ((not thinning) or (thinning < 1)):\n thinning = 1\n if ((not burnin) or (burnin < 0)):\n burnin = 0\n max_samples_per_batch = max((1000 // thinning), 100)\n with self._logging(nmr_samples, burnin, thinning):\n if (burnin > 0):\n for (batch_start, batch_end) in split_in_batches(burnin, max_samples_per_batch):\n self._sample((batch_end - batch_start), return_output=False)\n if (nmr_samples > 0):\n outputs = []\n for (batch_start, batch_end) in split_in_batches(nmr_samples, max_samples_per_batch):\n outputs.append(self._sample((batch_end - batch_start), thinning=thinning))\n return SimpleSampleOutput(*[np.concatenate([o[ind] for o in outputs], axis=(- 1)) for ind in range(3)])", "docstring": "Take additional samples from the given likelihood and prior, using this sampler.\n\n This method can be called multiple times in which the sample state is stored in between.\n\nArgs:\n nmr_samples (int): the number of samples to return\n burnin (int): the number of samples to discard before returning samples\n thinning (int): how many sample we wait before storing a new one. This will draw extra samples such that\n the total number of samples generated is ``nmr_samples * (thinning)`` and the number of samples\n stored is ``nmr_samples``. If set to one or lower we store every sample after the burn in.\n\nReturns:\n SamplingOutput: the sample output object", "source": "codesearchnet_filtered"} -{"code": "def get_excitation_spectrum(self, width=0.1, npoints=2000):\n roots = self.parse_tddft()\n data = roots['singlet']\n en = np.array([d['energy'] for d in data])\n osc = np.array([d['osc_strength'] for d in data])\n epad = (20.0 * width)\n emin = (en[0] - epad)\n emax = (en[(- 1)] + epad)\n de = ((emax - emin) / npoints)\n if (width < (2 * de)):\n width = (2 * de)\n energies = [(emin + (ie * de)) for ie in range(npoints)]\n cutoff = (20.0 * width)\n gamma = (0.5 * width)\n gamma_sqrd = (gamma * gamma)\n de = ((energies[(- 1)] - energies[0]) / (len(energies) - 1))\n prefac = ((gamma / np.pi) * de)\n x = []\n y = []\n for energy in energies:\n xx0 = (energy - en)\n stot = (osc / ((xx0 * xx0) + gamma_sqrd))\n t = np.sum(stot[(np.abs(xx0) <= cutoff)])\n x.append(energy)\n y.append((t * prefac))\n return ExcitationSpectrum(x, y)", "docstring": "Generate an excitation spectra from the singlet roots of TDDFT\n calculations.\n\nArgs:\n width (float): Width for Gaussian smearing.\n npoints (int): Number of energy points. More points => smoother\n curve.\n\nReturns:\n (ExcitationSpectrum) which can be plotted using\n pymatgen.vis.plotters.SpectrumPlotter.", "source": "codesearchnet_filtered"} -{"code": "def get(self, value):\n config = self.get_block(('vlan %s' % value))\n if (not config):\n return None\n response = dict(vlan_id=value)\n response.update(self._parse_name(config))\n response.update(self._parse_state(config))\n response.update(self._parse_trunk_groups(config))\n return response", "docstring": "Returns the VLAN configuration as a resource dict.\n\nArgs:\n vid (string): The vlan identifier to retrieve from the\n running configuration. Valid values are in the range\n of 1 to 4095\n\nReturns:\n A Python dict object containing the VLAN attributes as\n key/value pairs.", "source": "codesearchnet_filtered"} -{"code": "def _parse_symbol(self, sym):\n special = {'Hw': 'H', 'Ow': 'O', 'Wat': 'O', 'wat': 'O', 'OH': '', 'OH2': '', 'NO3': 'N'}\n parsed_sym = None\n m_sp = re.match('|'.join(special.keys()), sym)\n if m_sp:\n parsed_sym = special[m_sp.group()]\n elif Element.is_valid_symbol(sym[:2].title()):\n parsed_sym = sym[:2].title()\n elif Element.is_valid_symbol(sym[0].upper()):\n parsed_sym = sym[0].upper()\n else:\n m = re.match('w?[A-Z][a-z]*', sym)\n if m:\n parsed_sym = m.group()\n if ((parsed_sym is not None) and (m_sp or (not re.match('{}\\\\d*'.format(parsed_sym), sym)))):\n msg = '{} parsed as {}'.format(sym, parsed_sym)\n warnings.warn(msg)\n self.errors.append(msg)\n return parsed_sym", "docstring": "Parse a string with a symbol to extract a string representing an element.\n\nArgs:\n sym (str): A symbol to be parsed.\n\nReturns:\n A string with the parsed symbol. None if no parsing was possible.", "source": "codesearchnet_filtered"} -{"code": "def detect_scheme(filename):\n logger = logging.getLogger(__name__)\n logger.info('Detecting partitioning scheme')\n with open(filename, 'rb') as f:\n f.seek(mbr.MBR_SIG_OFFSET)\n data = f.read(mbr.MBR_SIG_SIZE)\n signature = struct.unpack('>> from rawdisk.scheme.common import *\n >>> scheme = detect_scheme('/dev/disk1')\n >>> if scheme == PartitionScheme.SCHEME_MBR:\n >>> <...>", "source": "codesearchnet_filtered"} -{"code": "def drp(points, epsilon):\n dmax = 0.0\n index = 0\n for i in range(1, (len(points) - 1)):\n dist = point_line_distance(points[i], points[0], points[(- 1)])\n if (dist > dmax):\n index = i\n dmax = dist\n if (dmax > epsilon):\n return (drp(points[:(index + 1)], epsilon)[:(- 1)] + drp(points[index:], epsilon))\n else:\n return [points[0], points[(- 1)]]", "docstring": "Douglas ramer peucker\n\n Based on https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm\n\nArgs:\n points (:obj:`list` of :obj:`Point`)\n epsilon (float): drp threshold\n\nReturns:\n :obj:`list` of :obj:`Point`", "source": "codesearchnet_filtered"} -{"code": "def get_datasets(self, query='*:*', **kwargs):\n return hdx.data.dataset.Dataset.search_in_hdx(query=query, configuration=self.configuration, fq=('organization:%s' % self.data['name']), **kwargs)", "docstring": "Get list of datasets in organization\n\nArgs:\n query (str): Restrict datasets returned to this query (in Solr format). Defaults to '*:*'.\n **kwargs: See below\n sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'.\n rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize).\n start (int): Offset in the complete result for where the set of returned datasets should begin\n facet (string): Whether to enable faceted results. Default to True.\n facet.mincount (int): Minimum counts for facet fields should be included in the results\n facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50.\n facet.field (List[str]): Fields to facet upon. Default is empty.\n use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False.\n\nReturns:\n List[Dataset]: List of datasets in organization", "source": "codesearchnet_filtered"} -{"code": "def find(cls, session, resource_id, include=None):\n url = session._build_url(cls._resource_path(), resource_id)\n params = build_request_include(include, None)\n process = cls._mk_one(session, include=include)\n return session.get(url, CB.json(200, process), params=params)", "docstring": "Retrieve a single resource.\n\n This should only be called from sub-classes.\n\nArgs:\n session(Session): The session to find the resource in\n\n resource_id: The ``id`` for the resource to look up\n\n Keyword Args:\n\n include: Resource classes to include\n\nReturns:\n Resource: An instance of a resource, or throws a\n :class:`NotFoundError` if the resource can not be found.", "source": "codesearchnet_filtered"} -{"code": "def _GetDayOfYear(self, year, month, day_of_month):\n if (month not in range(1, 13)):\n raise ValueError('Month value out of bounds.')\n days_per_month = self._GetDaysPerMonth(year, month)\n if ((day_of_month < 1) or (day_of_month > days_per_month)):\n raise ValueError('Day of month value out of bounds.')\n day_of_year = day_of_month\n for past_month in range(1, month):\n day_of_year += self._GetDaysPerMonth(year, past_month)\n return day_of_year", "docstring": "Retrieves the day of the year for a specific day of a month in a year.\n\nArgs:\n year (int): year e.g. 1970.\n month (int): month, where 1 represents January.\n day_of_month (int): day of the month, where 1 represents the first day.\n\nReturns:\n int: day of year.\n\nRaises:\n ValueError: if the month or day of month value is out of bounds.", "source": "codesearchnet_filtered"} -{"code": "def inspect_task(self, task):\n url = self._url('/tasks/{0}', task)\n return self._result(self._get(url), True)", "docstring": "Retrieve information about a task.\n\nArgs:\n task (str): Task ID\n\nReturns:\n (dict): Information about the task.\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "codesearchnet_filtered"} -{"code": "def _get_target(self, target):\n depth = (target.count('.') + 1)\n parts = target.split('.', 1)\n for m in self.modules:\n if (parts[0] == m.name):\n if (depth < 3):\n return m\n for p in self.packages:\n if (parts[0] == p.name):\n if (depth == 1):\n return p\n target = p._get_target(parts[1])\n if target:\n return target\n if (depth < 3):\n return p\n return None", "docstring": "Get the Package or Module related to given target.\n\nArgs:\n target (str): target to find.\n\nReturns:\n Package/Module: package containing target or corresponding module.", "source": "codesearchnet_filtered"} -{"code": "def getsource(classorfunc):\n if _isbuiltin(classorfunc):\n return ''\n try:\n source = inspect.getsource(classorfunc)\n except TypeError:\n source = getsourcefallback(classorfunc)\n declaration = []\n lines = source.splitlines()\n if (PY2 and (not isinstance(source, unicode))):\n encoding = detect_encoding(iter(lines).next)[0]\n sourcelines = (s.decode(encoding) for s in lines)\n else:\n sourcelines = iter(lines)\n found_keyword = False\n for line in sourcelines:\n words = line.split()\n if (not words):\n continue\n if (words[0] in ('def', 'class')):\n found_keyword = True\n if found_keyword:\n cind = line.find(':')\n if (cind > 0):\n declaration.append(line[:(cind + 1)])\n after_decl = line[(cind + 1):].strip()\n break\n else:\n declaration.append(line)\n bodylines = list(sourcelines)\n if (type(classorfunc) == type):\n cls = classorfunc\n base_imports = {}\n for base in cls.__bases__:\n if ((base.__name__ == 'object') and (base.__module__ == 'builtins')):\n continue\n if (base in base_imports):\n continue\n if (base.__module__ == '__main__'):\n continue\n base_imports[base] = ('from %s import %s' % (base.__module__, base.__name__))\n cind = declaration[0].index('class ')\n declstring = (declaration[0][:cind] + ('class %s(%s):%s' % (cls.__name__, ','.join([base.__name__ for base in cls.__bases__]), after_decl)))\n declaration = [impstring for (c, impstring) in base_imports.items() if (c.__module__ != '__builtin__')]\n declaration.append(declstring)\n else:\n declaration[(- 1)] += after_decl\n return '\\n'.join((declaration + bodylines))", "docstring": "Return the source code for a class or function.\n\nNote:\n Returned source will not include any decorators for the object.\n This will only return the explicit declaration of the object, not any dependencies\n\nArgs:\n classorfunc (type or function): the object to get the source code for\n\nReturns:\n str: text of source code (without any decorators). Note: in python 2, this returns unicode", "source": "codesearchnet_filtered"} -{"code": "def get_angle_degrees(self, indices):\n coords = ['x', 'y', 'z']\n if isinstance(indices, pd.DataFrame):\n i_pos = self.loc[(indices.index, coords)].values\n b_pos = self.loc[(indices.loc[(:, 'b')], coords)].values\n a_pos = self.loc[(indices.loc[(:, 'a')], coords)].values\n else:\n indices = np.array(indices)\n if (len(indices.shape) == 1):\n indices = indices[(None, :)]\n i_pos = self.loc[(indices[(:, 0)], coords)].values\n b_pos = self.loc[(indices[(:, 1)], coords)].values\n a_pos = self.loc[(indices[(:, 2)], coords)].values\n (BI, BA) = ((i_pos - b_pos), (a_pos - b_pos))\n (bi, ba) = [(v / np.linalg.norm(v, axis=1)[(:, None)]) for v in (BI, BA)]\n dot_product = np.sum((bi * ba), axis=1)\n dot_product[(dot_product > 1)] = 1\n dot_product[(dot_product < (- 1))] = (- 1)\n angles = np.degrees(np.arccos(dot_product))\n return angles", "docstring": "Return the angles between given atoms.\n\n Calculates the angle in degrees between the atoms with\n indices ``i, b, a``.\n The indices can be given in three ways:\n\n * As simple list ``[i, b, a]``\n * As list of lists: ``[[i1, b1, a1], [i2, b2, a2]...]``\n * As :class:`pd.DataFrame` where ``i`` is taken from the index and\n ``b`` and ``a`` from the respective columns ``'b'`` and ``'a'``.\n\nArgs:\n indices (list):\n\nReturns:\n :class:`numpy.ndarray`: Vector of angles in degrees.", "source": "codesearchnet_filtered"} -{"code": "def __Build(leaves):\n if (len(leaves) < 1):\n raise Exception('Leaves must have length')\n if (len(leaves) == 1):\n return leaves[0]\n num_parents = int(((len(leaves) + 1) / 2))\n parents = [MerkleTreeNode() for i in range(0, num_parents)]\n for i in range(0, num_parents):\n node = parents[i]\n node.LeftChild = leaves[(i * 2)]\n leaves[(i * 2)].Parent = node\n if (((i * 2) + 1) == len(leaves)):\n node.RightChild = node.LeftChild\n else:\n node.RightChild = leaves[((i * 2) + 1)]\n leaves[((i * 2) + 1)].Parent = node\n hasharray = bytearray((node.LeftChild.Hash.ToArray() + node.RightChild.Hash.ToArray()))\n node.Hash = UInt256(data=Crypto.Hash256(hasharray))\n return MerkleTree.__Build(parents)", "docstring": "Build the merkle tree.\n\nArgs:\n leaves (list): items are of type MerkleTreeNode.\n\nReturns:\n MerkleTreeNode: the root node.", "source": "codesearchnet_filtered"} -{"code": "def OpenFileEntry(cls, path_spec_object, resolver_context=None):\n file_system = cls.OpenFileSystem(path_spec_object, resolver_context=resolver_context)\n if (resolver_context is None):\n resolver_context = cls._resolver_context\n file_entry = file_system.GetFileEntryByPathSpec(path_spec_object)\n resolver_context.ReleaseFileSystem(file_system)\n return file_entry", "docstring": "Opens a file entry object defined by path specification.\n\nArgs:\n path_spec_object (PathSpec): path specification.\n resolver_context (Optional[Context]): resolver context, where None\n represents the built in context which is not multi process safe.\n\nReturns:\n FileEntry: file entry or None if the path specification could not be\n resolved.", "source": "codesearchnet_filtered"} -{"code": "def find_replace(obj, find, replace):\n try:\n if isinstance(obj, dict):\n return {find_replace(key, find, replace): find_replace(value, find, replace) for (key, value) in obj.items()}\n elif isinstance(obj, list):\n return [find_replace(element, find, replace) for element in obj]\n elif (obj == find):\n return unicode_convert(replace)\n else:\n try:\n return unicode_convert(find_replace_string(obj, find, replace))\n except:\n return unicode_convert(obj)\n except:\n (line, filename, synerror) = trace()\n raise ArcRestHelperError({'function': 'find_replace', 'line': line, 'filename': filename, 'synerror': synerror})\n finally:\n pass", "docstring": "Searches an object and performs a find and replace.\n\nArgs:\n obj (object): The object to iterate and find/replace.\n find (str): The string to search for.\n replace (str): The string to replace with.\n\nReturns:\n object: The object with replaced strings.", "source": "codesearchnet_filtered"} -{"code": "def ensuredir(dpath, mode=1023, verbose=None):\n if (verbose is None):\n verbose = 0\n if isinstance(dpath, (list, tuple)):\n dpath = join(*dpath)\n if (not exists(dpath)):\n if verbose:\n print(('Ensuring new directory (%r)' % dpath))\n if (sys.version_info.major == 2):\n os.makedirs(normpath(dpath), mode=mode)\n else:\n os.makedirs(normpath(dpath), mode=mode, exist_ok=True)\n elif verbose:\n print(('Ensuring existing directory (%r)' % dpath))\n return dpath", "docstring": "r\"\"\"\n Ensures that directory will exist. Creates new dir with sticky bits by\n default\n\nArgs:\n dpath (PathLike): dir to ensure. Can also be a tuple to send to join\n mode (int): octal mode of directory (default 0o1777)\n verbose (int): verbosity (default 0)\n\nReturns:\n PathLike: path: the ensured directory\n\nNote:\n This function is not thread-safe in Python2\n\nExample:\n >>> from ubelt.util_platform import * # NOQA\n >>> import ubelt as ub\n >>> cache_dpath = ub.ensure_app_cache_dir('ubelt')\n >>> dpath = join(cache_dpath, 'ensuredir')\n >>> if exists(dpath):\n ... os.rmdir(dpath)\n >>> assert not exists(dpath)\n >>> ub.ensuredir(dpath)\n >>> assert exists(dpath)\n >>> os.rmdir(dpath)", "source": "codesearchnet_filtered"} -{"code": "def filter_string(self, word):\n segs = [m.group(0) for m in self.seg_regex.finditer(word)]\n return ''.join(segs)", "docstring": "Return a string like the input but containing only legal IPA segments\n\nArgs:\n word (unicode): input string to be filtered\n\nReturns:\n unicode: string identical to `word` but with invalid IPA segments\n absent", "source": "codesearchnet_filtered"} -{"code": "def process_arguments(self, func, args):\n pos_args = []\n kw_args = {}\n while (len(args) > 0):\n if (func.metadata.spec_filled(pos_args, kw_args) and (not self._is_flag(args[0]))):\n break\n arg = args.pop(0)\n if (arg == '--'):\n break\n elif self._is_flag(arg):\n arg_value = None\n arg_name = None\n if (len(arg) == 2):\n arg_name = func.metadata.match_shortname(arg[1:], filled_args=pos_args)\n else:\n if (not arg.startswith('--')):\n raise ArgumentError('Invalid method of specifying keyword argument that did not start with --', argument=arg)\n arg = arg[2:]\n if ('=' in arg):\n (arg, arg_value) = arg.split('=', 1)\n arg_name = func.metadata.match_shortname(arg, filled_args=pos_args)\n arg_type = func.metadata.param_type(arg_name)\n if (arg_type is None):\n raise ArgumentError('Attempting to set a parameter from command line that does not have type information', argument=arg_name)\n if (arg_value is None):\n arg_value = self._extract_arg_value(arg_name, arg_type, args)\n kw_args[arg_name] = arg_value\n else:\n pos_args.append(arg)\n if ((len(args) > 0) and (args[0] == '--')):\n args.pop(0)\n return (pos_args, kw_args, args)", "docstring": "Process arguments from the command line into positional and kw args.\n\n Arguments are consumed until the argument spec for the function is filled\n or a -- is found or there are no more arguments. Keyword arguments can be\n specified using --field=value, -f value or --field value. Positional\n arguments are specified just on the command line itself.\n\n If a keyword argument (`field`) is a boolean, it can be set to True by just passing\n --field or -f without needing to explicitly pass True unless this would cause\n ambiguity in parsing since the next expected positional argument is also a boolean\n or a string.\n\nArgs:\n func (callable): A function previously annotated with type information\n args (list): A list of all of the potential arguments to this function.\n\nReturns:\n (args, kw_args, unused args): A tuple with a list of args, a dict of\n keyword args and a list of any unused args that were not processed.", "source": "codesearchnet_filtered"} -{"code": "def check(self, solution):\n return self.func(*(solution[v] for v in self.variables))", "docstring": "Check that a solution satisfies the constraint.\n\nArgs:\n solution (container):\n An assignment for the variables in the constraint.\n\nReturns:\n bool: True if the solution satisfies the constraint; otherwise False.\n\nExample:\n This example creates a constraint that :math:`a \\\\ne b` on binary variables\n and tests it for two candidate solutions, with additional unconstrained\n variable c.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.from_configurations([(0, 1), (1, 0)],\n ... ['a', 'b'], dwavebinarycsp.BINARY)\n >>> solution = {'a': 1, 'b': 1, 'c': 0}\n >>> const.check(solution)\n False\n >>> solution = {'a': 1, 'b': 0, 'c': 0}\n >>> const.check(solution)\n True", "source": "codesearchnet_filtered"} -{"code": "def get_oauth_data(self, code, client_id, client_secret, state):\n request = self._get_request()\n response = request.post(self.OAUTH_TOKEN_URL, {'state': state, 'code': code, 'grant_type': 'authorization_code', 'client_id': client_id, 'client_secret': client_secret})\n return HSAccessTokenAuth.from_response(response)", "docstring": "Get Oauth data from HelloSign\n\nArgs:\n code (str): Code returned by HelloSign for our callback url\n\n client_id (str): Client id of the associated app\n\n client_secret (str): Secret token of the associated app\n\nReturns:\n A HSAccessTokenAuth object", "source": "codesearchnet_filtered"} -{"code": "def has_axis(self, axis):\n if (self.type != EventType.POINTER_AXIS):\n raise AttributeError(_wrong_meth.format(self.type))\n return self._libinput.libinput_event_pointer_has_axis(self._handle, axis)", "docstring": "Check if the event has a valid value for the given axis.\n\n If this method returns True for an axis and :meth:`get_axis_value`\n returns a value of 0, the event is a scroll stop event.\n\n For pointer events that are not of type\n :attr:`~libinput.constant.EventType.POINTER_AXIS`, this method raises\n :exc:`AttributeError`.\n\nArgs:\n axis (~libinput.constant.PointerAxis): The axis to check.\n\nReturns:\n bool: True if this event contains a value for this axis.\n\nRaises:\n AttributeError", "source": "codesearchnet_filtered"} -{"code": "def archs(self, as_list=False):\n archs = self.arch_list().split('/')\n if as_list:\n return archs\n return set(archs)", "docstring": "Return all of the architectures for this target.\n\nArgs:\n as_list (bool): Return a list instead of the default set object.\n\nReturns:\n set or list: All of the architectures used in this TargetSettings object.", "source": "codesearchnet_filtered"} -{"code": "def to_microseconds(value):\n if (not value.tzinfo):\n value = value.replace(tzinfo=pytz.utc)\n value = value.astimezone(pytz.utc)\n return (int((calendar.timegm(value.timetuple()) * 1000000.0)) + value.microsecond)", "docstring": "Convert a datetime to microseconds since the unix epoch.\n\nArgs:\n value (datetime.datetime): The datetime to covert.\n\nReturns:\n int: Microseconds since the unix epoch.", "source": "codesearchnet_filtered"} -{"code": "def get_value(self, tau):\n tau = np.asarray(tau)\n (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.coefficients\n k = get_kernel_value(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, tau.flatten())\n return np.asarray(k).reshape(tau.shape)", "docstring": "Compute the value of the term for an array of lags\n\nArgs:\n tau (array[...]): An array of lags where the term should be\n evaluated.\n\nReturns:\n The value of the term for each ``tau``. This will have the same\n shape as ``tau``.", "source": "codesearchnet_filtered"} -{"code": "def remove_trunk_group(self, intf, value):\n string = 'no switchport trunk group {}'.format(value)\n return self.configure_interface(intf, string)", "docstring": "Removes a specified trunk group to the interface\n\nArgs:\n intf (str): The interface name to remove the trunk group from\n value (str): The trunk group value\n\nReturns:\n True if the operation as successfully applied otherwise false", "source": "codesearchnet_filtered"} -{"code": "def check_completion(task, mark_incomplete=False, clear=False, return_stats=False):\n to_clear = dict()\n (is_complete, stats) = _check_completion(task, mark_incomplete=mark_incomplete, clear=clear, stats={}, visited=dict(), to_clear=to_clear)\n while to_clear:\n found_clearable_task = False\n for task_id in list(to_clear.keys()):\n v = to_clear[task_id]\n if (not v['required_by']):\n found_clearable_task = True\n task = v['task']\n if isinstance(task, ORMTask):\n task.mark_incomplete()\n task.clear()\n _increment_stats(stats, 'Cleared')\n config.logger.info(('Cleared task: ' + task_id))\n else:\n config.logger.info(('Cannot clear task, not an ORMTask: ' + task_id))\n del to_clear[task_id]\n for w in to_clear.values():\n w['required_by'].discard(task_id)\n if (not found_clearable_task):\n raise RuntimeError('Error in recursive task clearing, no clearable task found')\n config.logger.info(('Task completion checking, summary:\\n' + str(stats)))\n if return_stats:\n return (is_complete, stats)\n else:\n return is_complete", "docstring": "Recursively check if a task and all its requirements are complete\n\nArgs:\n task (derived from luigi.Task): Task to check completion for; check everything 'downstream'\n from that task.\n\n mark_incomplete (bool): If ``True`` set any task as incomplete for which a requirement\n is found to be incomplete (checked recursively).\n This works only for tasks derived from :class:`ORMTask`.\n\n clear (bool): If ``True``, call the :func:`clear()` method of any task for which a requirement\n is found to be incomplete (checked recursively). This implies ``mark_incomplete = True``.\n This works only for tasks derived from :class:`ORMTask`.\n\n return_stats (bool): If ``True``, return task checking statistics in addition to completion status\n\nReturns:\n bool: ``True`` if the task, all its requirements and (recursively) all their requirements\n are complete, ``False`` otherwise.", "source": "codesearchnet_filtered"} -{"code": "def swo_num_bytes(self):\n res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.GET_NUM_BYTES, 0)\n if (res < 0):\n raise errors.JLinkException(res)\n return res", "docstring": "Retrives the number of bytes in the SWO buffer.\n\nArgs:\n self (JLink): the ``JLink`` instance\n\nReturns:\n Number of bytes in the SWO buffer.\n\nRaises:\n JLinkException: on error", "source": "codesearchnet_filtered"} -{"code": "def upload_predictions(self, file_path, tournament=1):\n self.logger.info('uploading predictions...')\n auth_query = '\\n query($filename: String!\\n $tournament: Int!) {\\n submission_upload_auth(filename: $filename\\n tournament: $tournament) {\\n filename\\n url\\n }\\n }\\n '\n arguments = {'filename': os.path.basename(file_path), 'tournament': tournament}\n submission_resp = self.raw_query(auth_query, arguments, authorization=True)\n submission_auth = submission_resp['data']['submission_upload_auth']\n with open(file_path, 'rb') as fh:\n requests.put(submission_auth['url'], data=fh.read())\n create_query = '\\n mutation($filename: String!\\n $tournament: Int!) {\\n create_submission(filename: $filename\\n tournament: $tournament) {\\n id\\n }\\n }\\n '\n arguments = {'filename': submission_auth['filename'], 'tournament': tournament}\n create = self.raw_query(create_query, arguments, authorization=True)\n self.submission_id = create['data']['create_submission']['id']\n return self.submission_id", "docstring": "Upload predictions from file.\n\nArgs:\n file_path (str): CSV file with predictions that will get uploaded\n tournament (int): ID of the tournament (optional, defaults to 1)\n\nReturns:\n str: submission_id\n\nExample:\n >>> api = NumerAPI(secret_key=\"..\", public_id=\"..\")\n >>> api.upload_predictions()\n '93c46857-fed9-4594-981e-82db2b358daf'", "source": "codesearchnet_filtered"} -{"code": "def get_full_url(self, url):\n request = Request('GET', url)\n preparedrequest = self.session.prepare_request(request)\n return preparedrequest.url", "docstring": "Get full url including any additional parameters\n\nArgs:\n url (str): URL for which to get full url\n\nReturns:\n str: Full url including any additional parameters", "source": "codesearchnet_filtered"} -{"code": "def HasOutputClass(cls, name):\n if (not isinstance(name, py2to3.STRING_TYPES)):\n return False\n return (name.lower() in cls._output_classes)", "docstring": "Determines if a specific output class is registered with the manager.\n\nArgs:\n name (str): name of the output module.\n\nReturns:\n bool: True if the output class is registered.", "source": "codesearchnet_filtered"} -{"code": "def confirm_cw_log(self, account, region, vpcname):\n try:\n cw = self.session.client('logs', region)\n token = None\n log_groups = []\n while True:\n result = (cw.describe_log_groups() if (not token) else cw.describe_log_groups(nextToken=token))\n token = result.get('nextToken')\n log_groups.extend([x['logGroupName'] for x in result.get('logGroups', [])])\n if (not token):\n break\n if (vpcname not in log_groups):\n cw.create_log_group(logGroupName=vpcname)\n cw_vpc = VPC.get(vpcname)\n cw_vpc.set_property('vpc_flow_logs_log_group', vpcname)\n self.log.info('Created log group {}/{}/{}'.format(account.account_name, region, vpcname))\n auditlog(event='vpc_flow_logs.create_cw_log_group', actor=self.ns, data={'account': account.account_name, 'region': region, 'log_group_name': vpcname, 'vpc': vpcname})\n return True\n except Exception:\n self.log.exception('Failed creating log group for {}/{}/{}.'.format(account, region, vpcname))", "docstring": "Create a new CloudWatch log group based on the VPC Name if none exists. Returns `True` if succesful\n\nArgs:\n account (:obj:`Account`): Account to create the log group in\n region (`str`): Region to create the log group in\n vpcname (`str`): Name of the VPC the log group is fow\n\nReturns:\n `bool`", "source": "codesearchnet_filtered"} -{"code": "def should_update(stack):\n if stack.locked:\n if (not stack.force):\n logger.debug('Stack %s locked and not in --force list. Refusing to update.', stack.name)\n return False\n else:\n logger.debug('Stack %s locked, but is in --force list.', stack.name)\n return True", "docstring": "Tests whether a stack should be submitted for updates to CF.\n\nArgs:\n stack (:class:`stacker.stack.Stack`): The stack object to check.\n\nReturns:\n bool: If the stack should be updated, return True.", "source": "codesearchnet_filtered"} -{"code": "def list_of_vars(arg_plot):\n lovs = [[[var for var in svars.split(',') if var] for svars in pvars.split('.') if svars] for pvars in arg_plot.split('-') if pvars]\n lovs = [[slov for slov in lov if slov] for lov in lovs if lov]\n return [lov for lov in lovs if lov]", "docstring": "Construct list of variables per plot.\n\nArgs:\n arg_plot (str): string with variable names separated with\n ``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).\n\nReturns:\n three nested lists of str\n\n - variables on the same subplot;\n - subplots on the same figure;\n - figures.", "source": "codesearchnet_filtered"} -{"code": "def _frame_advance(self, action):\n self.controllers[0][:] = action\n _LIB.Step(self._env)", "docstring": "Advance a frame in the emulator with an action.\n\nArgs:\n action (byte): the action to press on the joy-pad\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def mkdir(self, path):\n self.__validate_storage_path(path, projects_allowed=False)\n parent_metadata = self.get_parent(path)\n self.api_client.create_folder(path.split('/')[(- 1)], parent_metadata['uuid'])", "docstring": "Create a folder in the storage service pointed by the given path.\n\nArgs:\n path (str): The path of the folder to be created\n\nReturns:\n None\n\nRaises:\n StorageArgumentException: Invalid arguments\n StorageForbiddenException: Server response code 403\n StorageNotFoundException: Server response code 404\n StorageException: other 400-600 error codes", "source": "codesearchnet_filtered"} -{"code": "def get_event(self, event_name, event_history=None):\n if (event_history is None):\n event_history = (event_name + '_history')\n return self._db.rpoplpush(event_name, event_history)", "docstring": "Get an event from the database.\n\n Gets an event from the named event list removing the event and\n adding it to the event history.\n\nArgs:\n event_name (str): Event list key.\n event_history (str, optional): Event history list.\n\nReturns:\n str: string representation of the event object", "source": "codesearchnet_filtered"} -{"code": "def download(self, folder=None):\n url = self.data.get('url', None)\n if (not url):\n raise HDXError('No URL to download!')\n logger.debug(('Downloading %s' % url))\n filename = self.data['name']\n format = ('.%s' % self.data['format'])\n if (format not in filename):\n filename = ('%s%s' % (filename, format))\n with Download(full_agent=self.configuration.get_user_agent()) as downloader:\n path = downloader.download_file(url, folder, filename)\n return (url, path)", "docstring": "Download resource store to provided folder or temporary folder if no folder supplied\n\nArgs:\n folder (Optional[str]): Folder to download resource to. Defaults to None.\n\nReturns:\n Tuple[str, str]: (URL downloaded, Path to downloaded file)", "source": "codesearchnet_filtered"} -{"code": "def run(coro, loop=None):\n loop = (loop or asyncio.get_event_loop())\n return loop.run_until_complete(coro)", "docstring": "Convenient shortcut alias to ``loop.run_until_complete``.\n\nArgs:\n coro (coroutine): coroutine object to schedule.\n loop (asyncio.BaseEventLoop): optional event loop to use.\n Defaults to: ``asyncio.get_event_loop()``.\n\nReturns:\n mixed: returned value by coroutine.\n\n Usage::\n\n async def mul_2(num):\n return num * 2\n\n paco.run(mul_2(4))\n # => 8", "source": "codesearchnet_filtered"} -{"code": "def set_dataset_date(self, dataset_date, dataset_end_date=None, date_format=None):\n parsed_date = self._parse_date(dataset_date, date_format)\n if (dataset_end_date is None):\n self.set_dataset_date_from_datetime(parsed_date)\n else:\n parsed_end_date = self._parse_date(dataset_end_date, date_format)\n self.set_dataset_date_from_datetime(parsed_date, parsed_end_date)", "docstring": "Set dataset date from string using specified format. If no format is supplied, the function will guess.\n For unambiguous formats, this should be fine.\n\nArgs:\n dataset_date (str): Dataset date string\n dataset_end_date (Optional[str]): Dataset end date string\n date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None.\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def _preserve_bonds(self, sliced_cartesian, use_lookup=None):\n if (use_lookup is None):\n use_lookup = settings['defaults']['use_lookup']\n included_atoms_set = set(sliced_cartesian.index)\n assert included_atoms_set.issubset(set(self.index)), 'The sliced Cartesian has to be a subset of the bigger frame'\n bond_dic = self.get_bonds(use_lookup=use_lookup)\n new_atoms = set([])\n for atom in included_atoms_set:\n new_atoms = (new_atoms | bond_dic[atom])\n new_atoms = (new_atoms - included_atoms_set)\n while (not (new_atoms == set([]))):\n index_of_interest = new_atoms.pop()\n included_atoms_set = (included_atoms_set | self.get_coordination_sphere(index_of_interest, n_sphere=float('inf'), only_surface=False, exclude=included_atoms_set, give_only_index=True, use_lookup=use_lookup))\n new_atoms = (new_atoms - included_atoms_set)\n molecule = self.loc[(included_atoms_set, :)]\n return molecule", "docstring": "Is called after cutting geometric shapes.\n\n If you want to change the rules how bonds are preserved, when\n applying e.g. :meth:`Cartesian.cut_sphere` this is the\n function you have to modify.\n It is recommended to inherit from the Cartesian class to\n tailor it for your project, instead of modifying the\n source code of ChemCoord.\n\nArgs:\n sliced_frame (Cartesian):\n use_lookup (bool): Use a lookup variable for\n :meth:`~chemcoord.Cartesian.get_bonds`. The default is\n specified in ``settings['defaults']['use_lookup']``\n\nReturns:\n Cartesian:", "source": "codesearchnet_filtered"} -{"code": "def make_timebar(progress=0, duration=0):\n duration_string = api_music.duration_to_string(duration)\n if (duration <= 0):\n return '---'\n time_counts = int(round(((progress / duration) * TIMEBAR_LENGTH)))\n if (time_counts > TIMEBAR_LENGTH):\n time_counts = TIMEBAR_LENGTH\n if (duration > 0):\n bar = ((('│' + (TIMEBAR_PCHAR * time_counts)) + (TIMEBAR_ECHAR * (TIMEBAR_LENGTH - time_counts))) + '│')\n time_bar = '{} {}'.format(bar, duration_string)\n else:\n time_bar = duration_string\n return time_bar", "docstring": "Makes a new time bar string\n\nArgs:\n progress: How far through the current song we are (in seconds)\n duration: The duration of the current song (in seconds)\n\nReturns:\n timebar (str): The time bar string", "source": "codesearchnet_filtered"} -{"code": "def from_text(cls, text, lexicon, required=None, first_only=True):\n component = lexicon.get_component(text, first_only=first_only)\n if (required and (required not in component)):\n return None\n else:\n return cls(component)", "docstring": "Generate a Component from a text string, using a Lexicon.\n\nArgs:\n text (str): The text string to parse.\n lexicon (Lexicon): The dictionary to use for the\n categories and lexemes.\n required (str): An attribute that we must have. If a required\n attribute is missing from the component, then None is returned.\n first_only (bool): Whether to only take the first\n match of a lexeme against the text string.\n\nReturns:\n Component: A Component object, or None if there was no\n must-have field.", "source": "codesearchnet_filtered"} -{"code": "def spawn_program(self, name, arguments=[], timeout=30, exclusive=False):\n logger.debug('Spawning program for interaction ...')\n if exclusive:\n kill_longrunning(self.config)\n return RunningProgram(self, name, arguments, timeout)", "docstring": "Spawns a program in the working directory.\n\n This method allows the interaction with the running program,\n based on the returned RunningProgram object.\n\nArgs:\n name (str): The name of the program to be executed.\n arguments (tuple): Command-line arguments for the program.\n timeout (int): The timeout for execution.\n exclusive (bool): Prevent parallel validation runs on the\n test machines, e.g. when doing performance\n measurements for submitted code.\n\nReturns:\n RunningProgram: An object representing the running program.", "source": "codesearchnet_filtered"} -{"code": "def push(self, repository, tag=None, stream=False, auth_config=None, decode=False):\n if (not tag):\n (repository, tag) = utils.parse_repository_tag(repository)\n (registry, repo_name) = auth.resolve_repository_name(repository)\n u = self._url('/images/{0}/push', repository)\n params = {'tag': tag}\n headers = {}\n if (auth_config is None):\n header = auth.get_config_header(self, registry)\n if header:\n headers['X-Registry-Auth'] = header\n else:\n log.debug('Sending supplied auth config')\n headers['X-Registry-Auth'] = auth.encode_header(auth_config)\n response = self._post_json(u, None, headers=headers, stream=stream, params=params)\n self._raise_for_status(response)\n if stream:\n return self._stream_helper(response, decode=decode)\n return self._result(response)", "docstring": "Push an image or a repository to the registry. Similar to the ``docker\n push`` command.\n\nArgs:\n repository (str): The repository to push to\n tag (str): An optional tag to push\n stream (bool): Stream the output as a blocking generator\n auth_config (dict): Override the credentials that are found in the\n config for this request. ``auth_config`` should contain the\n ``username`` and ``password`` keys to be valid.\n decode (bool): Decode the JSON data from the server into dicts.\n Only applies with ``stream=True``\n\nReturns:\n (generator or str): The output from the server.\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\nExample:\n >>> for line in cli.push('yourname/app', stream=True, decode=True):\n ... print(line)\n {'status': 'Pushing repository yourname/app (1 tags)'}\n {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}\n {'status': 'Image already pushed, skipping', 'progressDetail':{},\n 'id': '511136ea3c5a'}\n ...", "source": "codesearchnet_filtered"} -{"code": "def CheckSupportedFormat(cls, path, check_readable_only=False):\n try:\n connection = sqlite3.connect(path, detect_types=(sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES))\n cursor = connection.cursor()\n query = 'SELECT * FROM metadata'\n cursor.execute(query)\n metadata_values = {row[0]: row[1] for row in cursor.fetchall()}\n cls._CheckStorageMetadata(metadata_values, check_readable_only=check_readable_only)\n connection.close()\n result = True\n except (IOError, sqlite3.DatabaseError):\n result = False\n return result", "docstring": "Checks if the storage file format is supported.\n\nArgs:\n path (str): path to the storage file.\n check_readable_only (Optional[bool]): whether the store should only be\n checked to see if it can be read. If False, the store will be checked\n to see if it can be read and written to.\n\nReturns:\n bool: True if the format is supported.", "source": "codesearchnet_filtered"} -{"code": "def CreatePrecisionHelper(cls, precision):\n precision_helper_class = cls._PRECISION_CLASSES.get(precision, None)\n if (not precision_helper_class):\n raise ValueError('Unsupported precision: {0!s}'.format(precision))\n return precision_helper_class", "docstring": "Creates a precision helper.\n\nArgs:\n precision (str): precision of the date and time value, which should\n be one of the PRECISION_VALUES in definitions.\n\nReturns:\n class: date time precision helper class.\n\nRaises:\n ValueError: if the precision value is unsupported.", "source": "codesearchnet_filtered"} -{"code": "def parse(raw_config):\n config_dict = yaml_to_ordered_dict(raw_config)\n if config_dict:\n for top_level_key in ['stacks', 'pre_build', 'post_build', 'pre_destroy', 'post_destroy']:\n top_level_value = config_dict.get(top_level_key)\n if isinstance(top_level_value, dict):\n tmp_list = []\n for (key, value) in top_level_value.items():\n tmp_dict = copy.deepcopy(value)\n if (top_level_key == 'stacks'):\n tmp_dict['name'] = key\n tmp_list.append(tmp_dict)\n config_dict[top_level_key] = tmp_list\n try:\n return Config(config_dict, strict=True)\n except SchematicsError as e:\n raise exceptions.InvalidConfig(e.errors)", "docstring": "Parse a raw yaml formatted stacker config.\n\nArgs:\n raw_config (str): the raw stacker configuration string in yaml format.\n\nReturns:\n :class:`Config`: the parsed stacker config.", "source": "codesearchnet_filtered"} -{"code": "def list_container_services_sub(access_token, subscription_id):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API])\n return do_get(endpoint, access_token)", "docstring": "List the container services in a subscription.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n\nReturns:\n HTTP response. JSON model.", "source": "codesearchnet_filtered"} -{"code": "def bbox_clip(bboxes, img_shape):\n assert ((bboxes.shape[(- 1)] % 4) == 0)\n clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype)\n clipped_bboxes[(..., 0::2)] = np.maximum(np.minimum(bboxes[(..., 0::2)], (img_shape[1] - 1)), 0)\n clipped_bboxes[(..., 1::2)] = np.maximum(np.minimum(bboxes[(..., 1::2)], (img_shape[0] - 1)), 0)\n return clipped_bboxes", "docstring": "Clip bboxes to fit the image shape.\n\nArgs:\n bboxes (ndarray): Shape (..., 4*k)\n img_shape (tuple): (height, width) of the image.\n\nReturns:\n ndarray: Clipped bboxes.", "source": "codesearchnet_filtered"} -{"code": "def instantiate_device(virtual_dev, config, loop):\n conf = {}\n if ('device' in config):\n conf = config['device']\n try:\n reg = ComponentRegistry()\n if virtual_dev.endswith('.py'):\n (_name, dev) = reg.load_extension(virtual_dev, class_filter=VirtualIOTileDevice, unique=True)\n else:\n (_name, dev) = reg.load_extensions('iotile.virtual_device', name_filter=virtual_dev, class_filter=VirtualIOTileDevice, product_name='virtual_device', unique=True)\n return dev(conf)\n except ArgumentError as err:\n print(('ERROR: Could not load virtual device (%s): %s' % (virtual_dev, err.msg)))\n sys.exit(1)", "docstring": "Find a virtual device by name and instantiate it\n\nArgs:\n virtual_dev (string): The name of the pkg_resources entry point corresponding to\n the device. It should be in group iotile.virtual_device. If virtual_dev ends\n in .py, it is interpreted as a python script and loaded directly from the script.\n config (dict): A dictionary with a 'device' key with the config info for configuring\n this virtual device. This is optional.\n\nReturns:\n VirtualIOTileDevice: The instantiated subclass of VirtualIOTileDevice", "source": "codesearchnet_filtered"} -{"code": "def api_endpoint(path_or_func=None, decorate=True):\n\n def maybe_decorated(func):\n 'Apply API_ENDPOINT_DECORATORS to func.'\n if decorate:\n for decorator in API_ENDPOINT_DECORATORS:\n func = decorator()(func)\n return func\n if callable(path_or_func):\n path_or_func.api_path = path_or_func.__name__\n return maybe_decorated(path_or_func)\n else:\n\n def _api_endpoint(func):\n 'Decorator inner.'\n if (path_or_func is None):\n func.api_path = func.__name__\n else:\n func.api_path = path_or_func\n return maybe_decorated(func)\n return _api_endpoint", "docstring": "Decorator to mark a method as an API endpoint for later registration.\n\nArgs:\n path_or_func: either the function to be decorated or its API path.\n decorate (bool): Apply API_ENDPOINT_DECORATORS if True (default).\n\nReturns:\n Callable: Decorated function (with optionally applied decorators).\n\nExample:\n >>> from dddp.api import APIMixin, api_endpoint\n >>> class Counter(APIMixin):\n ... value = 0\n ...\n ... # default API path matches function name 'increment'.\n ... @api_endpoint\n ... def increment(self, amount):\n ... '''Increment counter value by `amount`.'''\n ... self.value += amount\n ... return self.value\n ...\n ... # excplicitly set API path to 'Decrement'.\n ... @api_endpoint('Decrement')\n ... def decrement(self, amount):\n ... '''Decrement counter value by `amount`.'''\n ... self.value -= amount\n ... return self.value", "source": "codesearchnet_filtered"} -{"code": "def merge_wells(self, right, keys=None):\n wells = []\n for w in self:\n rw = right.get_well(w.uwi)\n if (rw is not None):\n if (keys is None):\n keys = list(rw.data.keys())\n for k in keys:\n try:\n w.data[k] = rw.data[k]\n except:\n pass\n wells.append(w)\n return Project(wells)", "docstring": "Returns a new Project object containing wells from self where\n curves from the wells on the right have been added. Matching between\n wells in self and right is based on uwi match and ony wells in self\n are considered\n\nArgs:\n uwi (string): the UWI string for the well.\n\nReturns:\n project", "source": "codesearchnet_filtered"} -{"code": "def create_binary_array(self, key, value):\n data = None\n if ((key is not None) and (value is not None)):\n value_encoded = []\n for v in value:\n try:\n value_encoded.append(base64.b64encode(bytes(v)).decode('utf-8'))\n except TypeError:\n value_encoded.append(base64.b64encode(bytes(v, 'utf-8')).decode('utf-8'))\n data = self.db.create(key.strip(), json.dumps(value_encoded))\n else:\n self.tcex.log.warning(u'The key or value field was None.')\n return data", "docstring": "Create method of CRUD operation for binary array data.\n\nArgs:\n key (string): The variable to write to the DB.\n value (any): The data to write to the DB.\n\nReturns:\n (string): Result of DB write.", "source": "codesearchnet_filtered"} -{"code": "def print(self, tag=None, name=None):\n _name = name\n if (_name is None):\n _name = 'print'\n fn = streamsx.topology.functions.print_flush\n if (tag is not None):\n tag = (str(tag) + ': ')\n fn = (lambda v: streamsx.topology.functions.print_flush((tag + str(v))))\n sp = self.for_each(fn, name=_name)\n sp._op().sl = _SourceLocation(_source_info(), 'print')\n return sp", "docstring": "Prints each tuple to stdout flushing after each tuple.\n\n If `tag` is not `None` then each tuple has \"tag: \" prepended\n to it before printing.\n\nArgs:\n tag: A tag to prepend to each tuple.\n name(str): Name of the resulting stream.\n When `None` defaults to a generated name.\n\nReturns:\n streamsx.topology.topology.Sink: Stream termination.\n\n .. versionadded:: 1.6.1 `tag`, `name` parameters.\n\n .. versionchanged:: 1.7\n Now returns a :py:class:`Sink` instance.", "source": "codesearchnet_filtered"} -{"code": "def resolve_identifier(self, name, expected_type=None):\n name = str(name)\n if (name in self._known_identifiers):\n obj = self._known_identifiers[name]\n if ((expected_type is not None) and (not isinstance(obj, expected_type))):\n raise UnresolvedIdentifierError(u'Identifier resolved to an object of an unexpected type', name=name, expected_type=expected_type.__name__, resolved_type=obj.__class__.__name__)\n return obj\n if (self.parent is not None):\n try:\n return self.parent.resolve_identifier(name)\n except UnresolvedIdentifierError:\n pass\n raise UnresolvedIdentifierError(u'Could not resolve identifier', name=name, scope=self.name)", "docstring": "Resolve an identifier to an object.\n\n There is a single namespace for identifiers so the user also should\n pass an expected type that will be checked against what the identifier\n actually resolves to so that there are no surprises.\n\nArgs:\n name (str): The name that we want to resolve\n expected_type (type): The type of object that we expect to receive.\n This is an optional parameter. If None is passed, no type checking\n is performed.\n\nReturns:\n object: The resolved object", "source": "codesearchnet_filtered"} -{"code": "def get_ignition_type(root):\n properties = {}\n elem = root.find('ignitionType')\n if (elem is None):\n raise MissingElementError('ignitionType')\n elem = elem.attrib\n if ('target' in elem):\n ign_target = elem['target'].rstrip(';').upper()\n else:\n raise MissingAttributeError('target', 'ignitionType')\n if ('type' in elem):\n ign_type = elem['type']\n if (ign_type == 'baseline max intercept from d/dt'):\n ign_type = 'd/dt max extrapolated'\n else:\n raise MissingAttributeError('type', 'ignitionType')\n if (len(ign_target.split(';')) > 1):\n raise NotImplementedError('Multiple ignition targets not supported.')\n if (ign_target == 'OHEX'):\n ign_target = 'OH*'\n elif (ign_target == 'CHEX'):\n ign_target = 'CH*'\n elif (ign_target == 'P'):\n ign_target = 'pressure'\n elif (ign_target == 'T'):\n ign_target = 'temperature'\n if (ign_target not in ['pressure', 'temperature', 'OH', 'OH*', 'CH*', 'CH']):\n raise KeywordError((ign_target + ' not valid ignition target'))\n if (ign_type not in ['max', 'd/dt max', '1/2 max', 'min', 'd/dt max extrapolated']):\n raise KeywordError((ign_type + ' not valid ignition type'))\n properties['type'] = ign_type\n properties['target'] = ign_target\n return properties", "docstring": "Gets ignition type and target.\n\nArgs:\n root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file\n\nReturns:\n properties (`dict`): Dictionary with ignition type/target information", "source": "codesearchnet_filtered"} -{"code": "def raisefrom(exc_type, message, exc):\n if (sys.version_info[:2] >= (3, 2)):\n six.raise_from(exc_type(message), exc)\n else:\n six.reraise(exc_type, ('%s - %s' % (message, exc)), sys.exc_info()[2])", "docstring": "Call Python 3 raise from or emulate it for Python 2\n\nArgs:\n exc_type (Any): Type of Exception\n message (str): Error message to display\n exc (BaseException): original exception\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def add_device(self, path):\n hdevice = self._libinput.libinput_path_add_device(self._li, path.encode())\n if hdevice:\n return Device(hdevice, self._libinput)\n return None", "docstring": "Add a device to a libinput context.\n\n If successful, the device will be added to the internal list and\n re-opened on :meth:`~libinput.LibInput.resume`. The device can be\n removed with :meth:`remove_device`.\n If the device was successfully initialized, it is returned.\n\nArgs:\n path (str): Path to an input device.\n\nReturns:\n ~libinput.define.Device: A device object or :obj:`None`.", "source": "codesearchnet_filtered"} -{"code": "def enroll_users_in_course(cls, enterprise_customer, course_id, course_mode, emails):\n (existing_users, unregistered_emails) = cls.get_users_by_email(emails)\n successes = []\n pending = []\n failures = []\n for user in existing_users:\n succeeded = cls.enroll_user(enterprise_customer, user, course_mode, course_id)\n if succeeded:\n successes.append(user)\n else:\n failures.append(user)\n for email in unregistered_emails:\n pending_user = enterprise_customer.enroll_user_pending_registration(email, course_mode, course_id)\n pending.append(pending_user)\n return (successes, pending, failures)", "docstring": "Enroll existing users in a course, and create a pending enrollment for nonexisting users.\n\nArgs:\n enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment\n course_id (str): The unique identifier of the course in which we're enrolling\n course_mode (str): The mode with which we're enrolling in the course\n emails: An iterable of email addresses which need to be enrolled\n\nReturns:\n successes: A list of users who were successfully enrolled in the course\n pending: A list of PendingEnterpriseCustomerUsers who were successfully linked and had\n pending enrollments created for them in the database\n failures: A list of users who could not be enrolled in the course", "source": "codesearchnet_filtered"} -{"code": "def determine_alert(self, action_schedule, issue_creation_time, last_alert):\n issue_age = (time.time() - issue_creation_time)\n alert_schedule_lookup = {pytimeparse.parse(action_time): action_time for action_time in action_schedule}\n alert_schedule = sorted(alert_schedule_lookup.keys())\n last_alert_time = pytimeparse.parse(last_alert)\n for alert_time in alert_schedule:\n if ((last_alert_time < alert_time <= issue_age) and (last_alert_time != alert_time)):\n return alert_schedule_lookup[alert_time]\n else:\n return None", "docstring": "Determine if we need to trigger an alert\n\nArgs:\n action_schedule (`list`): A list contains the alert schedule\n issue_creation_time (`int`): Time we create the issue\n last_alert (`str`): Time we sent the last alert\n\nReturns:\n (`None` or `str`)\n None if no alert should be sent. Otherwise return the alert we should send", "source": "codesearchnet_filtered"} -{"code": "def acquire(self, uuid_path, subnet=None):\n try:\n with self._create_lock():\n if subnet:\n LOGGER.debug('Trying to acquire subnet {}'.format(subnet))\n acquired_subnet = self._acquire_given_subnet(uuid_path, subnet)\n else:\n LOGGER.debug('Trying to acquire a free subnet')\n acquired_subnet = self._acquire(uuid_path)\n return acquired_subnet\n except (utils.TimerException, IOError):\n raise LagoSubnetLeaseLockException(self.path)", "docstring": "Lease a free subnet for the given uuid path.\n If subnet is given, try to lease that subnet, otherwise try to lease a\n free subnet.\n\nArgs:\n uuid_path (str): Path to the uuid file of a :class:`lago.Prefix`\n subnet (str): A subnet to lease.\n\nReturns:\n netaddr.IPAddress: An object which represents the subnet.\n\nRaises:\n LagoSubnetLeaseException:\n 1. If this store is full\n 2. If the requested subnet is already taken.\n LagoSubnetLeaseLockException:\n If the lock to self.path can't be acquired.", "source": "codesearchnet_filtered"} -{"code": "def padFrameRange(frange, zfill):\n\n def _do_pad(match):\n '\\n Substitutes padded for unpadded frames.\\n '\n result = list(match.groups())\n result[1] = pad(result[1], zfill)\n if result[4]:\n result[4] = pad(result[4], zfill)\n return ''.join((i for i in result if i))\n return PAD_RE.sub(_do_pad, frange)", "docstring": "Return the zero-padded version of the frame range string.\n\nArgs:\n frange (str): a frame range to test\n zfill (int):\n\nReturns:\n str:", "source": "codesearchnet_filtered"} -{"code": "def grep(regex_list, recursive=True, dpath_list=None, include_patterns=None, exclude_dirs=[], greater_exclude_dirs=None, inverse=False, exclude_patterns=[], verbose=VERBOSE, fpath_list=None, reflags=0, cache=None):\n from utool import util_regex\n from utool import util_list\n if (include_patterns is None):\n include_patterns = ['*']\n if (greater_exclude_dirs is None):\n greater_exclude_dirs = []\n if isinstance(include_patterns, six.string_types):\n include_patterns = [include_patterns]\n if (dpath_list is None):\n dpath_list = [os.getcwd()]\n if verbose:\n recursive_stat_str = ['flat', 'recursive'][recursive]\n print(('[util_path] Greping (%s) %r for %r' % (recursive_stat_str, dpath_list, regex_list)))\n print(('[util_path] regex_list = %s' % regex_list))\n if isinstance(regex_list, six.string_types):\n regex_list = [regex_list]\n found_fpath_list = []\n found_lines_list = []\n found_lxs_list = []\n if (fpath_list is None):\n fpath_generator = matching_fpaths(dpath_list=dpath_list, include_patterns=include_patterns, exclude_dirs=exclude_dirs, greater_exclude_dirs=greater_exclude_dirs, exclude_patterns=exclude_patterns, recursive=recursive)\n else:\n fpath_generator = fpath_list\n _exprs_flags = [util_regex.extend_regex2(expr, reflags) for expr in regex_list]\n extended_regex_list = util_list.take_column(_exprs_flags, 0)\n reflags_list = util_list.take_column(_exprs_flags, 1)\n reflags = reflags_list[0]\n for fpath in fpath_generator:\n (found_lines, found_lxs) = grepfile(fpath, extended_regex_list, reflags_list, cache=cache)\n if inverse:\n if (len(found_lines) == 0):\n found_fpath_list.append(fpath)\n found_lines_list.append([])\n found_lxs_list.append([])\n elif (len(found_lines) > 0):\n found_fpath_list.append(fpath)\n found_lines_list.append(found_lines)\n found_lxs_list.append(found_lxs)\n grep_result = (found_fpath_list, found_lines_list, found_lxs_list)\n if verbose:\n print('==========')\n print('==========')\n print(('[util_path] found matches in %d files' % len(found_fpath_list)))\n print(make_grep_resultstr(grep_result, extended_regex_list, reflags))\n return grep_result", "docstring": "r\"\"\"\n greps for patterns\n Python implementation of grep. NOT FINISHED\n\nArgs:\n regex_list (str or list): one or more patterns to find\n recursive (bool):\n dpath_list (list): directories to search (defaults to cwd)\n include_patterns (list) : defaults to standard file extensions\n\nReturns:\n (list, list, list): (found_fpaths, found_lines_list, found_lxs_list)\n\n CommandLine:\n python -m utool.util_path --test-grep\n utprof.py -m utool.util_path --exec-grep\n utprof.py utool/util_path.py --exec-grep\n\nExample:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_path import * # NOQA\n >>> import utool as ut\n >>> #dpath_list = [ut.truepath('~/code/utool/utool')]\n >>> dpath_list = [ut.truepath(dirname(ut.__file__))]\n >>> include_patterns = ['*.py']\n >>> exclude_dirs = []\n >>> regex_list = ['grepfile']\n >>> verbose = True\n >>> recursive = True\n >>> result = ut.grep(regex_list, recursive, dpath_list, include_patterns,\n >>> exclude_dirs)\n >>> (found_fpath_list, found_lines_list, found_lxs_list) = result\n >>> assert 'util_path.py' in list(map(basename, found_fpath_list))", "source": "codesearchnet_filtered"} -{"code": "def ReleaseFileSystem(self, file_system):\n (identifier, cache_value) = self._file_system_cache.GetCacheValueByObject(file_system)\n if (not identifier):\n raise RuntimeError('Object not cached.')\n if (not cache_value):\n raise RuntimeError('Invalid cache value.')\n self._file_system_cache.ReleaseObject(identifier)\n result = cache_value.IsDereferenced()\n if result:\n self._file_system_cache.RemoveObject(identifier)\n return result", "docstring": "Releases a cached file system object.\n\nArgs:\n file_system (FileSystem): file system object.\n\nReturns:\n bool: True if the file system object can be closed.\n\nRaises:\n PathSpecError: if the path specification is incorrect.\n RuntimeError: if the file system object is not cached or an inconsistency\n is detected in the cache.", "source": "codesearchnet_filtered"} -{"code": "def _token_endpoint_request(request, token_uri, body):\n body = urllib.parse.urlencode(body)\n headers = {'content-type': _URLENCODED_CONTENT_TYPE}\n response = request(method='POST', url=token_uri, headers=headers, body=body)\n response_body = response.data.decode('utf-8')\n if (response.status != http_client.OK):\n _handle_error_response(response_body)\n response_data = json.loads(response_body)\n return response_data", "docstring": "Makes a request to the OAuth 2.0 authorization server's token endpoint.\n\nArgs:\n request (google.auth.transport.Request): A callable used to make\n HTTP requests.\n token_uri (str): The OAuth 2.0 authorizations server's token endpoint\n URI.\n body (Mapping[str, str]): The parameters to send in the request body.\n\nReturns:\n Mapping[str, str]: The JSON-decoded response data.\n\nRaises:\n google.auth.exceptions.RefreshError: If the token endpoint returned\n an error.", "source": "codesearchnet_filtered"} -{"code": "def match_shortname(self, name, filled_args=None):\n filled_count = 0\n if (filled_args is not None):\n filled_count = len(filled_args)\n possible = [x for x in self.arg_names[filled_count:] if x.startswith(name)]\n if (len(possible) == 0):\n raise ArgumentError('Could not convert short-name full parameter name, none could be found', short_name=name, parameters=self.arg_names)\n elif (len(possible) > 1):\n raise ArgumentError('Short-name is ambiguous, could match multiple keyword parameters', short_name=name, possible_matches=possible)\n return possible[0]", "docstring": "Try to convert a prefix into a parameter name.\n\n If the result could be ambiguous or there is no matching\n parameter, throw an ArgumentError\n\nArgs:\n name (str): A prefix for a parameter name\n filled_args (list): A list of filled positional arguments that will be\n removed from consideration.\n\nReturns:\n str: The full matching parameter name", "source": "codesearchnet_filtered"} -{"code": "def AddImportCallbackBySuffix(path, callback):\n\n def RemoveCallback():\n with _import_callbacks_lock:\n callbacks = _import_callbacks.get(path)\n if callbacks:\n callbacks.remove(callback)\n if (not callbacks):\n del _import_callbacks[path]\n with _import_callbacks_lock:\n _import_callbacks.setdefault(path, set()).add(callback)\n _InstallImportHookBySuffix()\n return RemoveCallback", "docstring": "Register import hook.\n\n This function overrides the default import process. Then whenever a module\n whose suffix matches path is imported, the callback will be invoked.\n\n A module may be imported multiple times. Import event only means that the\n Python code contained an \"import\" statement. The actual loading and\n initialization of a new module normally happens only once, at which time\n the callback will be invoked. This function does not validates the existence\n of such a module and it's the responsibility of the caller.\n\n TODO(erezh): handle module reload.\n\nArgs:\n path: python module file path. It may be missing the directories for the\n outer packages, and therefore, requires suffix comparison to match\n against loaded modules. If it contains all outer packages, it may\n contain the sys.path as well.\n It might contain an incorrect file extension (e.g., py vs. pyc).\n callback: callable to invoke upon module load.\n\nReturns:\n Function object to invoke to remove the installed callback.", "source": "codesearchnet_filtered"} -{"code": "def UpdateNumberOfWarnings(self, number_of_consumed_warnings, number_of_produced_warnings):\n consumed_warnings_delta = 0\n if (number_of_consumed_warnings is not None):\n if (number_of_consumed_warnings < self.number_of_consumed_warnings):\n raise ValueError('Number of consumed warnings smaller than previous update.')\n consumed_warnings_delta = (number_of_consumed_warnings - self.number_of_consumed_warnings)\n self.number_of_consumed_warnings = number_of_consumed_warnings\n self.number_of_consumed_warnings_delta = consumed_warnings_delta\n produced_warnings_delta = 0\n if (number_of_produced_warnings is not None):\n if (number_of_produced_warnings < self.number_of_produced_warnings):\n raise ValueError('Number of produced warnings smaller than previous update.')\n produced_warnings_delta = (number_of_produced_warnings - self.number_of_produced_warnings)\n self.number_of_produced_warnings = number_of_produced_warnings\n self.number_of_produced_warnings_delta = produced_warnings_delta\n return ((consumed_warnings_delta > 0) or (produced_warnings_delta > 0))", "docstring": "Updates the number of warnings.\n\nArgs:\n number_of_consumed_warnings (int): total number of warnings consumed by\n the process.\n number_of_produced_warnings (int): total number of warnings produced by\n the process.\n\nReturns:\n bool: True if either number of warnings has increased.\n\nRaises:\n ValueError: if the consumed or produced number of warnings is smaller\n than the value of the previous update.", "source": "codesearchnet_filtered"} -{"code": "def _ip_unnumbered_type(self, **kwargs):\n method_name = ('interface_%s_ip_ip_config_unnumbered_ip_donor_interface_type' % kwargs['int_type'])\n ip_unnumbered_type = getattr(self._interface, method_name)\n config = ip_unnumbered_type(**kwargs)\n if kwargs['delete']:\n tag = 'ip-donor-interface-type'\n config.find(('.//*%s' % tag)).set('operation', 'delete')\n return config", "docstring": "Return the `ip unnumbered` donor type XML.\n\n You should not use this method.\n You probably want `Interface.ip_unnumbered`.\n\nArgs:\n int_type (str): Type of interface. (gigabitethernet,\n tengigabitethernet etc).\n delete (bool): Remove the configuration if ``True``.\n ip_donor_interface_type (str): The donor interface type (loopback)\n\nReturns:\n XML to be passed to the switch.\n\nRaises:\n None", "source": "codesearchnet_filtered"} -{"code": "def validate(message, ssldir=None, **config):\n for field in ['signature', 'certificate']:\n if (field not in message):\n _log.warn('No %s field found.', field)\n return False\n if (not isinstance(message[field], six.text_type)):\n _log.error(('msg[%r] is not a unicode string' % field))\n try:\n message[field] = message[field].decode('utf-8')\n except UnicodeError as e:\n _log.error(\"Unable to decode the message '%s' field: %s\", field, str(e))\n return False\n signature = base64.b64decode(message['signature'])\n certificate = base64.b64decode(message['certificate'])\n message = fedmsg.crypto.strip_credentials(message)\n ca_location = config.get('ca_cert_location', 'https://fedoraproject.org/fedmsg/ca.crt')\n crl_location = config.get('crl_location', 'https://fedoraproject.org/fedmsg/crl.pem')\n try:\n (ca_certificate, crl) = utils.load_certificates(ca_location, crl_location)\n _validate_signing_cert(ca_certificate, certificate, crl)\n except (IOError, RequestException, X509StoreContextError) as e:\n try:\n (ca_certificate, crl) = utils.load_certificates(ca_location, crl_location, invalidate_cache=True)\n _validate_signing_cert(ca_certificate, certificate, crl)\n except (IOError, RequestException, X509StoreContextError) as e:\n _log.error(str(e))\n return False\n try:\n crypto_certificate = x509.load_pem_x509_certificate(certificate, default_backend())\n crypto_certificate.public_key().verify(signature, fedmsg.encoding.dumps(message).encode('utf-8'), asymmetric.padding.PKCS1v15(), hashes.SHA1())\n except InvalidSignature as e:\n _log.error('message [{m}] has an invalid signature: {e}'.format(m=message, e=str(e)))\n return False\n common_name = crypto_certificate.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)\n common_name = common_name[0]\n routing_policy = config.get('routing_policy', {})\n nitpicky = config.get('routing_nitpicky', False)\n return utils.validate_policy(message.get('topic'), common_name.value, routing_policy, nitpicky=nitpicky)", "docstring": "Validate the signature on the given message.\n\n Four things must be true for the signature to be valid:\n\n 1) The X.509 cert must be signed by our CA\n 2) The cert must not be in our CRL.\n 3) We must be able to verify the signature using the RSA public key\n contained in the X.509 cert.\n 4) The topic of the message and the CN on the cert must appear in the\n :ref:`conf-routing-policy` dict.\n\nArgs:\n message (dict): A signed message in need of validation. A signed message\n contains the 'signature' and 'certificate' keys.\n ssldir (str): The path to the directory containing PEM-encoded X.509\n key pairs.\n\nReturns:\n bool: True of the message passes validation, False otherwise.", "source": "codesearchnet_filtered"} -{"code": "def _GetChromeWebStorePage(self, extension_identifier):\n web_store_url = self._WEB_STORE_URL.format(xid=extension_identifier)\n try:\n response = requests.get(web_store_url)\n except (requests.ConnectionError, requests.HTTPError) as exception:\n logger.warning('[{0:s}] unable to retrieve URL: {1:s} with error: {2!s}'.format(self.NAME, web_store_url, exception))\n return None\n return response.text", "docstring": "Retrieves the page for the extension from the Chrome store website.\n\nArgs:\n extension_identifier (str): Chrome extension identifier.\n\nReturns:\n str: page content or None.", "source": "codesearchnet_filtered"} -{"code": "def index(self, value, start=0, end=None):\n try:\n index = self._dict[value]\n except KeyError:\n raise ValueError\n else:\n start = self._fix_neg_index(start)\n end = self._fix_end_index(end)\n if ((start <= index) and (index < end)):\n return index\n else:\n raise ValueError", "docstring": "Return the index of value between start and end.\n\n By default, the entire setlist is searched.\n\n This runs in O(1)\n\nArgs:\n value: The value to find the index of\n start (int): The index to start searching at (defaults to 0)\n end (int): The index to stop searching at (defaults to the end of the list)\n\nReturns:\n int: The index of the value\n\nRaises:\n ValueError: If the value is not in the list or outside of start - end\n IndexError: If start or end are out of range", "source": "codesearchnet_filtered"} -{"code": "def get_submissions_for_student_item(request, course_id, student_id, item_id):\n student_item_dict = dict(course_id=course_id, student_id=student_id, item_id=item_id)\n context = dict(**student_item_dict)\n try:\n submissions = get_submissions(student_item_dict)\n context['submissions'] = submissions\n except SubmissionRequestError:\n context['error'] = 'The specified student item was not found.'\n return render_to_response('submissions.html', context)", "docstring": "Retrieve all submissions associated with the given student item.\n\n Developer utility for accessing all the submissions associated with a\n student item. The student item is specified by the unique combination of\n course, student, and item.\n\nArgs:\n request (dict): The request.\n course_id (str): The course id for this student item.\n student_id (str): The student id for this student item.\n item_id (str): The item id for this student item.\n\nReturns:\n HttpResponse: The response object for this request. Renders a simple\n development page with all the submissions related to the specified\n student item.", "source": "codesearchnet_filtered"} -{"code": "def update_target_state(self, value: str, force: bool=True) -> datetime:\n value = value.lower()\n if (not force):\n current_state = self.current_state\n if (current_state == 'unknown'):\n raise RuntimeError(\"Unable to set target state when current state is 'unknown'\")\n allowed_target_states = self._allowed_target_states[current_state]\n LOG.debug('Updating target state of %s to %s', self._id, value)\n if (value not in allowed_target_states):\n raise ValueError(\"Invalid target state: '{}'. {} can be commanded to states: {}\".format(value, current_state, allowed_target_states))\n return self._update_state('target', value)", "docstring": "Set the target state.\n\nArgs:\n value (str): New value for target state\n force (bool): If true, ignore allowed transitions\n\nReturns:\n datetime, update timestamp\n\nRaises:\n RuntimeError, if it is not possible to currently set the target\n state.\n ValueError, if the specified target stat is not allowed.", "source": "codesearchnet_filtered"} -{"code": "def malloc(self, key, shape, dtype):\n if ((key not in self._memory) or (self._memory[key].shape != shape) or (self._memory[key].dtype != dtype)):\n self._memory[key] = Shmem(key, shape, dtype, self._uuid)\n return self._memory[key].np_array", "docstring": "Allocates a block of shared memory, and returns a numpy array whose data corresponds with that block.\n\nArgs:\n key (str): The key to identify the block.\n shape (list of int): The shape of the numpy array to allocate.\n dtype (type): The numpy data type (e.g. np.float32).\n\nReturns:\n np.ndarray: The numpy array that is positioned on the shared memory.", "source": "codesearchnet_filtered"} -{"code": "def delete(self, context_id, address_list):\n if (context_id not in self._contexts):\n return False\n context = self._contexts[context_id]\n for add in address_list:\n if (not self.address_is_valid(address=add)):\n raise AuthorizationException(address=add)\n context.delete_direct(address_list)\n return True", "docstring": "Delete the values associated with list of addresses, for a specific\n context referenced by context_id.\n\nArgs:\n context_id (str): the return value of create_context, referencing\n a particular context.\n address_list (list): a list of address strs\n\nReturns:\n (bool): True if the operation is successful, False if\n the context_id doesn't reference a known context.\n\nRaises:\n AuthorizationException: Raised when an address in address_list is\n not authorized either by not being in the inputs for the\n txn associated with this context, or it is under a namespace\n but the characters that are under the namespace are not valid\n address characters.", "source": "codesearchnet_filtered"} -{"code": "def from_files(cls, secrets=None, storage=None, scopes=None, no_webserver=False):\n creds = oauth2.get_credentials(scopes, secrets, storage, no_webserver)\n return cls(creds)", "docstring": "Return a spreadsheet collection making OAauth 2.0 credentials.\n\nArgs:\n secrets (str): location of secrets file (default: ``%r``)\n storage (str): location of storage file (default: ``%r``)\n scopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``)\n no_webserver (bool): URL/code prompt instead of webbrowser auth\n\nReturns:\n Sheets: new Sheets instance with OAauth 2.0 credentials", "source": "codesearchnet_filtered"} -{"code": "def status(self, workflow_id):\n self.logger.debug(('Get status of workflow: ' + workflow_id))\n url = ('%(wf_url)s/%(wf_id)s' % {'wf_url': self.workflows_url, 'wf_id': workflow_id})\n r = self.gbdx_connection.get(url)\n r.raise_for_status()\n return r.json()['state']", "docstring": "Checks workflow status.\n\nArgs:\n workflow_id (str): Workflow id.\n\nReturns:\n Workflow status (str).", "source": "codesearchnet_filtered"} -{"code": "def plot_coordinates(self, X, ax=None, figsize=(6, 6), x_component=0, y_component=1, show_row_points=True, row_points_size=10, show_row_labels=False, show_column_points=True, column_points_size=30, show_column_labels=False, legend_n_cols=1):\n utils.validation.check_is_fitted(self, 'total_inertia_')\n if (ax is None):\n (fig, ax) = plt.subplots(figsize=figsize)\n ax = plot.stylize_axis(ax)\n if (show_row_points or show_row_labels):\n row_coords = self.row_coordinates(X)\n if show_row_points:\n ax.scatter(row_coords.iloc[(:, x_component)], row_coords.iloc[(:, y_component)], s=row_points_size, label=None, color=plot.GRAY['dark'], alpha=0.6)\n if show_row_labels:\n for (_, row) in row_coords.iterrows():\n ax.annotate(row.name, (row[x_component], row[y_component]))\n if (show_column_points or show_column_labels):\n col_coords = self.column_coordinates(X)\n x = col_coords[x_component]\n y = col_coords[y_component]\n prefixes = col_coords.index.str.split('_').map((lambda x: x[0]))\n for prefix in prefixes.unique():\n mask = (prefixes == prefix)\n if show_column_points:\n ax.scatter(x[mask], y[mask], s=column_points_size, label=prefix)\n if show_column_labels:\n for (i, label) in enumerate(col_coords[mask].index):\n ax.annotate(label, (x[mask][i], y[mask][i]))\n ax.legend(ncol=legend_n_cols)\n ax.set_title('Row and column principal coordinates')\n ei = self.explained_inertia_\n ax.set_xlabel('Component {} ({:.2f}% inertia)'.format(x_component, (100 * ei[x_component])))\n ax.set_ylabel('Component {} ({:.2f}% inertia)'.format(y_component, (100 * ei[y_component])))\n return ax", "docstring": "Plot row and column principal coordinates.\n\nArgs:\n ax (matplotlib.Axis): A fresh one will be created and returned if not provided.\n figsize ((float, float)): The desired figure size if `ax` is not provided.\n x_component (int): Number of the component used for the x-axis.\n y_component (int): Number of the component used for the y-axis.\n show_row_points (bool): Whether to show row principal components or not.\n row_points_size (float): Row principal components point size.\n show_row_labels (bool): Whether to show row labels or not.\n show_column_points (bool): Whether to show column principal components or not.\n column_points_size (float): Column principal components point size.\n show_column_labels (bool): Whether to show column labels or not.\n legend_n_cols (int): Number of columns used for the legend.\n\nReturns:\n matplotlib.Axis", "source": "codesearchnet_filtered"} -{"code": "def correction(self, word):\n return max(self.candidates(word), key=self.word_probability)", "docstring": "The most probable correct spelling for the word\n\nArgs:\n word (str): The word to correct\n\nReturns:\n str: The most likely candidate", "source": "codesearchnet_filtered"} -{"code": "def get_as(access_token, subscription_id, resource_group, as_name):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/availabilitySets/', as_name, '?api-version=', COMP_API])\n return do_get(endpoint, access_token)", "docstring": "Get availability set details.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n as_name (str): Name of the new availability set.\n\nReturns:\n HTTP response. JSON body of the availability set properties.", "source": "codesearchnet_filtered"} -{"code": "def stack_template_url(bucket_name, blueprint, endpoint):\n key_name = stack_template_key_name(blueprint)\n return ('%s/%s/%s' % (endpoint, bucket_name, key_name))", "docstring": "Produces an s3 url for a given blueprint.\n\nArgs:\n bucket_name (string): The name of the S3 bucket where the resulting\n templates are stored.\n blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint\n object to create the URL to.\n endpoint (string): The s3 endpoint used for the bucket.\n\nReturns:\n string: S3 URL.", "source": "codesearchnet_filtered"} -{"code": "def _ParseLastRunTime(self, parser_mediator, fixed_length_section):\n systemtime_struct = fixed_length_section.last_run_time\n system_time_tuple = (systemtime_struct.year, systemtime_struct.month, systemtime_struct.weekday, systemtime_struct.day_of_month, systemtime_struct.hours, systemtime_struct.minutes, systemtime_struct.seconds, systemtime_struct.milliseconds)\n date_time = None\n if (system_time_tuple != self._EMPTY_SYSTEM_TIME_TUPLE):\n try:\n date_time = dfdatetime_systemtime.Systemtime(system_time_tuple=system_time_tuple)\n except ValueError:\n parser_mediator.ProduceExtractionWarning('invalid last run time: {0!s}'.format(system_time_tuple))\n return date_time", "docstring": "Parses the last run time from a fixed-length data section.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n fixed_length_section (job_fixed_length_data_section): a Windows\n Scheduled Task job fixed-length data section.\n\nReturns:\n dfdatetime.DateTimeValues: last run date and time or None if not\n available.", "source": "codesearchnet_filtered"} -{"code": "def parse_uri(self, uri=None):\n if (not uri):\n return rdflib.term.URIRef(self.root)\n elif (type(uri) == str):\n if ((type(uri) == str) and (not uri.startswith('http'))):\n return rdflib.term.URIRef(('%s%s' % (self.root, uri)))\n else:\n return rdflib.term.URIRef(uri)\n elif (type(uri) == rdflib.term.URIRef):\n return uri\n else:\n raise TypeError('invalid URI input')", "docstring": "parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef\n\nArgs:\n uri (rdflib.term.URIRef,str): input URI\n\nReturns:\n rdflib.term.URIRef", "source": "codesearchnet_filtered"} -{"code": "def lu_solve(LU, b):\n from scipy.linalg import lu_solve as sp_lu_solve\n LU = (asarray(LU[0], float), asarray(LU[1], float))\n b = asarray(b, float)\n return sp_lu_solve(LU, b, check_finite=False)", "docstring": "r\"\"\"Solve for LU decomposition.\n\n Solve the linear equations :math:`\\mathrm A \\mathbf x = \\mathbf b`,\n given the LU factorization of :math:`\\mathrm A`.\n\nArgs:\n LU (array_like): LU decomposition.\n b (array_like): Right-hand side.\n\nReturns:\n :class:`numpy.ndarray`: The solution to the system\n :math:`\\mathrm A \\mathbf x = \\mathbf b`.\n\n See Also\n --------\n scipy.linalg.lu_factor : LU decomposition.\n scipy.linalg.lu_solve : Solve linear equations given LU factorization.", "source": "codesearchnet_filtered"} -{"code": "def get_duration(self, matrix_name):\n duration = 0.0\n if (matrix_name in self.data):\n duration = sum([stage.duration() for stage in self.data[matrix_name]])\n return duration", "docstring": "Get duration for a concrete matrix.\n\nArgs:\n matrix_name (str): name of the Matrix.\n\nReturns:\n float: duration of concrete matrix in seconds.", "source": "codesearchnet_filtered"} -{"code": "def PrivateKeyFromNEP2(nep2_key, passphrase):\n if ((not nep2_key) or (len(nep2_key) != 58)):\n raise ValueError('Please provide a nep2_key with a length of 58 bytes (LEN: {0:d})'.format(len(nep2_key)))\n ADDRESS_HASH_SIZE = 4\n ADDRESS_HASH_OFFSET = (len(NEP_FLAG) + len(NEP_HEADER))\n try:\n decoded_key = base58.b58decode_check(nep2_key)\n except Exception as e:\n raise ValueError('Invalid nep2_key')\n address_hash = decoded_key[ADDRESS_HASH_OFFSET:(ADDRESS_HASH_OFFSET + ADDRESS_HASH_SIZE)]\n encrypted = decoded_key[(- 32):]\n pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8')\n derived = scrypt.hash(pwd_normalized, address_hash, N=SCRYPT_ITERATIONS, r=SCRYPT_BLOCKSIZE, p=SCRYPT_PARALLEL_FACTOR, buflen=SCRYPT_KEY_LEN_BYTES)\n derived1 = derived[:32]\n derived2 = derived[32:]\n cipher = AES.new(derived2, AES.MODE_ECB)\n decrypted = cipher.decrypt(encrypted)\n private_key = xor_bytes(decrypted, derived1)\n kp_new = KeyPair(priv_key=private_key)\n kp_new_address = kp_new.GetAddress()\n kp_new_address_hash_tmp = hashlib.sha256(kp_new_address.encode('utf-8')).digest()\n kp_new_address_hash_tmp2 = hashlib.sha256(kp_new_address_hash_tmp).digest()\n kp_new_address_hash = kp_new_address_hash_tmp2[:4]\n if (kp_new_address_hash != address_hash):\n raise ValueError('Wrong passphrase')\n return private_key", "docstring": "Gets the private key from a NEP-2 encrypted private key\n\nArgs:\n nep2_key (str): The nep-2 encrypted private key\n passphrase (str): The password to encrypt the private key with, as unicode string\n\nReturns:\n bytes: The private key", "source": "codesearchnet_filtered"} -{"code": "def ReadSerializedDict(cls, json_dict):\n if json_dict:\n json_object = cls._ConvertDictToObject(json_dict)\n if (not isinstance(json_object, containers_interface.AttributeContainer)):\n raise TypeError('{0:s} is not an attribute container type.'.format(type(json_object)))\n return json_object\n return None", "docstring": "Reads an attribute container from serialized dictionary form.\n\nArgs:\n json_dict (dict[str, object]): JSON serialized objects.\n\nReturns:\n AttributeContainer: attribute container or None.\n\nRaises:\n TypeError: if the serialized dictionary does not contain an\n AttributeContainer.", "source": "codesearchnet_filtered"} -{"code": "def get_tick(self, index):\n name = self.tick_name(index)\n if (name is None):\n return [pack_error(ControllerSubsystem.SENSOR_GRAPH, Error.INVALID_ARRAY_KEY), 0]\n return [Error.NO_ERROR, self.ticks[name]]", "docstring": "Get a tick's interval.\n\nArgs:\n index (int): The index of the tick that you want to fetch.\n\nReturns:\n int, int: Error code and The tick's interval in seconds.\n\n A value of 0 means that the tick is disabled.", "source": "codesearchnet_filtered"} -{"code": "def process(self, rpc_executor, mark_streamer=None):\n if (self.func is None):\n raise ProcessingFunctionError('No processing function set for node', stream=self.stream)\n results = self.func(*[x[0] for x in self.inputs], rpc_executor=rpc_executor, mark_streamer=mark_streamer)\n if (results is None):\n results = []\n return results", "docstring": "Run this node's processing function.\n\nArgs:\n rpc_executor (RPCExecutor): An object capable of executing RPCs\n in case we need to do that.\n mark_streamer (callable): Function that can be called to manually\n mark a streamer as triggered by index.\n\nReturns:\n list(IOTileReading): A list of IOTileReadings with the results of\n the processing function or an empty list if no results were\n produced", "source": "codesearchnet_filtered"} -{"code": "def parse_document_id(chrom, pos, ref, alt, variant_type, case_id):\n return generate_md5_key([chrom, pos, ref, alt, variant_type, case_id])", "docstring": "Parse the unique document id for a variant.\n\n This will always be unique in the database.\n\nArgs:\n chrom(str)\n pos(str)\n ref(str)\n alt(str)\n variant_type(str): 'clinical' or 'research'\n case_id(str): unqiue family id\n\nReturns:\n document_id(str): The unique document id in an md5 string", "source": "codesearchnet_filtered"} -{"code": "def _setup_transitions(tdef, states, prev=()):\n trs = list(prev)\n for transition in tdef:\n if (len(transition) == 3):\n (name, source, target) = transition\n if (is_string(source) or isinstance(source, State)):\n source = [source]\n source = [states[src] for src in source]\n target = states[target]\n tr = Transition(name, source, target)\n else:\n raise TypeError((\"Elements of the 'transition' attribute of a workflow should be three-tuples; got %r instead.\" % (transition,)))\n if any(((prev_tr.name == tr.name) for prev_tr in trs)):\n trs = [(tr if (prev_tr.name == tr.name) else prev_tr) for prev_tr in trs]\n else:\n trs.append(tr)\n return TransitionList(trs)", "docstring": "Create a TransitionList object from a 'transitions' Workflow attribute.\n\nArgs:\n tdef: list of transition definitions\n states (StateList): already parsed state definitions.\n prev (TransitionList): transition definitions from a parent.\n\nReturns:\n TransitionList: the list of transitions defined in the 'tdef' argument.", "source": "codesearchnet_filtered"} -{"code": "def delete_record(self, record):\n self.children.remove(record.resource)\n record.delete()", "docstring": "Remove a DNSRecord\n\nArgs:\n record (:obj:`DNSRecord`): :obj:`DNSRecord` to remove\n\nReturns:\n `None`", "source": "codesearchnet_filtered"} -{"code": "def _ReadRecordHeader(self, file_object, record_header_offset):\n data_type_map = self._GetDataTypeMap('keychain_record_header')\n (record_header, _) = self._ReadStructureFromFileObject(file_object, record_header_offset, data_type_map)\n return record_header", "docstring": "Reads the record header.\n\nArgs:\n file_object (file): file-like object.\n record_header_offset (int): offset of the record header relative to\n the start of the file.\n\nReturns:\n keychain_record_header: record header.\n\nRaises:\n ParseError: if the record header cannot be read.", "source": "codesearchnet_filtered"} -{"code": "def _check_status(cls, response_json):\n status = response_json['status']\n msg = response_json['msg']\n if (status == 400):\n raise BadRequestException(msg)\n elif (status == 403):\n raise PermissionDeniedException(msg)\n elif (status == 404):\n raise FileNotFoundException(msg)\n elif (status == 451):\n raise UnavailableForLegalReasonsException(msg)\n elif (status == 509):\n raise BandwidthUsageExceeded(msg)\n elif (status >= 500):\n raise ServerErrorException(msg)", "docstring": "Check the status of the incoming response, raise exception if status is not 200.\n\nArgs:\n response_json (dict): results of the response of the GET request.\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def get_computed_entry(self, inc_structure=True, parameters=None, data=None):\n param_names = {'is_hubbard', 'hubbards', 'potcar_symbols', 'potcar_spec', 'run_type'}\n if parameters:\n param_names.update(parameters)\n params = {p: getattr(self, p) for p in param_names}\n data = ({p: getattr(self, p) for p in data} if (data is not None) else {})\n if inc_structure:\n return ComputedStructureEntry(self.final_structure, self.final_energy, parameters=params, data=data)\n else:\n return ComputedEntry(self.final_structure.composition, self.final_energy, parameters=params, data=data)", "docstring": "Returns a ComputedStructureEntry from the vasprun.\n\nArgs:\n inc_structure (bool): Set to True if you want\n ComputedStructureEntries to be returned instead of\n ComputedEntries.\n parameters (list): Input parameters to include. It has to be one of\n the properties supported by the Vasprun object. If\n parameters is None, a default set of parameters that are\n necessary for typical post-processing will be set.\n data (list): Output data to include. Has to be one of the properties\n supported by the Vasprun object.\n\nReturns:\n ComputedStructureEntry/ComputedEntry", "source": "codesearchnet_filtered"} -{"code": "def get_field(self, field_type):\n for field in self.oxm_match_fields:\n if (field.oxm_field == field_type):\n return field.oxm_value\n return None", "docstring": "Return the value for the 'field_type' field in oxm_match_fields.\n\nArgs:\n field_type (~pyof.v0x04.common.flow_match.OxmOfbMatchField,\n ~pyof.v0x04.common.flow_match.OxmMatchFields):\n The type of the OXM field you want the value.\n\nReturns:\n The integer number of the 'field_type' if it exists. Otherwise\n return None.", "source": "codesearchnet_filtered"} -{"code": "def get(self, filter=False):\n result = {}\n for (k, v) in self.elements().items():\n intermediate = v.get(filter=filter)\n if intermediate:\n result[k] = intermediate\n return result", "docstring": "Returns a dictionary with the values of the model. Note that the values\n of the leafs are YANG classes.\n\nArgs:\n filter (bool): If set to ``True``, show only values that have been set.\n\nReturns:\n dict: A dictionary with the values of the model.\n\nExample:\n >>> pretty_print(config.get(filter=True))\n >>> {\n >>> \"interfaces\": {\n >>> \"interface\": {\n >>> \"et1\": {\n >>> \"config\": {\n >>> \"description\": \"My description\",\n >>> \"mtu\": 1500\n >>> },\n >>> \"name\": \"et1\"\n >>> },\n >>> \"et2\": {\n >>> \"config\": {\n >>> \"description\": \"Another description\",\n >>> \"mtu\": 9000\n >>> },\n >>> \"name\": \"et2\"\n >>> }\n >>> }\n >>> }\n >>> }", "source": "codesearchnet_filtered"} -{"code": "def register(self, address, retry=True):\n logger.debug(('<%s> Sending REGISTER request to: %s' % (str(self.cuuid), str(address))))\n if (not self.listener.listening):\n logger.warning('Neteria client is not listening.')\n message = {'method': 'REGISTER', 'cuuid': str(self.cuuid)}\n if self.encryption:\n message['encryption'] = [self.encryption.n, self.encryption.e]\n self.listener.send_datagram(serialize_data(message, self.compression, encryption=False), address)\n if retry:\n self.register_retries = 0\n self.listener.call_later(self.timeout, self.retransmit, {'method': 'REGISTER', 'address': address})", "docstring": "This function will send a register packet to the discovered Neteria\n server.\n\nArgs:\n address (tuple): A tuple of the (address, port) to send the register\n request to.\n retry (boolean): Whether or not we want to reset the current number\n of registration retries to 0.\n\nReturns:\n None\n\nExample:\n >>> address\n ('192.168.0.20', 40080)", "source": "codesearchnet_filtered"} -{"code": "def poll(self, timeout_ms=None, future=None):\n if (future is not None):\n timeout_ms = 100\n elif (timeout_ms is None):\n timeout_ms = self.config['request_timeout_ms']\n elif (not isinstance(timeout_ms, (int, float))):\n raise TypeError(('Invalid type for timeout: %s' % type(timeout_ms)))\n responses = []\n while True:\n with self._lock:\n if self._closed:\n break\n for node_id in list(self._connecting):\n self._maybe_connect(node_id)\n metadata_timeout_ms = self._maybe_refresh_metadata()\n if ((future is not None) and future.is_done):\n timeout = 0\n else:\n idle_connection_timeout_ms = self._idle_expiry_manager.next_check_ms()\n timeout = min(timeout_ms, metadata_timeout_ms, idle_connection_timeout_ms, self.config['request_timeout_ms'])\n timeout = max(0, (timeout / 1000))\n self._poll(timeout)\n responses.extend(self._fire_pending_completed_requests())\n if ((future is None) or future.is_done):\n break\n return responses", "docstring": "Try to read and write to sockets.\n\n This method will also attempt to complete node connections, refresh\n stale metadata, and run previously-scheduled tasks.\n\nArgs:\n timeout_ms (int, optional): maximum amount of time to wait (in ms)\n for at least one response. Must be non-negative. The actual\n timeout will be the minimum of timeout, request timeout and\n metadata timeout. Default: request_timeout_ms\n future (Future, optional): if provided, blocks until future.is_done\n\nReturns:\n list: responses received (can be empty)", "source": "codesearchnet_filtered"} -{"code": "def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):\n dtype_dict = {}\n for col in groupby_cols:\n dtype_dict[col] = self[col].dtype\n agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']\n agg_ops = []\n for agg_info in agg_list:\n if (not isinstance(agg_info, list)):\n output_col_name = agg_info\n input_col_name = agg_info\n agg_op = 'sum'\n else:\n input_col_name = agg_info[0]\n agg_op = agg_info[1]\n if (len(agg_info) == 2):\n output_col_name = input_col_name\n else:\n output_col_name = agg_info[2]\n if (agg_op not in agg_ops_list):\n raise NotImplementedError(('Unknown Aggregation Type: ' + str(agg_op)))\n if (agg_op in ('count', 'count_distinct', 'sorted_count_distinct')):\n output_col_dtype = np.dtype(np.int64)\n elif (agg_op in ('mean', 'std')):\n output_col_dtype = np.dtype(np.float64)\n else:\n output_col_dtype = self[input_col_name].dtype\n dtype_dict[output_col_name] = output_col_dtype\n agg_ops.append((input_col_name, output_col_name, agg_op))\n ct_agg = bcolz.ctable(np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]), expectedlen=expectedlen, rootdir=rootdir)\n return (ct_agg, dtype_dict, agg_ops)", "docstring": "Create a container for the output table, a dictionary describing it's\n columns and a list of tuples describing aggregation\n operations to perform.\n\nArgs:\n groupby_cols (list): a list of columns to groupby over\n agg_list (list): the aggregation operations (see groupby for more info)\n expectedlen (int): expected length of output table\n rootdir (string): the directory to write the table to\n\nReturns:\n ctable: A table in the correct format for containing the output of\n the specified aggregation operations.\n dict: (dtype_dict) dictionary describing columns to create\n list: (agg_ops) list of tuples of the form:\n (input_col_name, output_col_name, agg_op)\n input_col_name (string): name of the column to act on\n output_col_name (string): name of the column to output to\n agg_op (int): aggregation operation to perform", "source": "codesearchnet_filtered"} -{"code": "def coordinate_filter(self, query, mongo_query):\n LOG.debug('Adding genomic coordinates to the query')\n chromosome = query['chrom']\n mongo_query['chromosome'] = chromosome\n if (query.get('start') and query.get('end')):\n mongo_query['position'] = {'$lte': int(query['end'])}\n mongo_query['end'] = {'$gte': int(query['start'])}\n return mongo_query", "docstring": "Adds genomic coordinated-related filters to the query object\n\nArgs:\n query(dict): a dictionary of query filters specified by the users\n mongo_query(dict): the query that is going to be submitted to the database\n\nReturns:\n mongo_query(dict): returned object contains coordinate filters", "source": "codesearchnet_filtered"} -{"code": "def get_tag_hash(self, tag_name):\n tag_object = get_single_item_from_sequence(sequence=self._github_repository.tags(), condition=(lambda tag: (tag.name == tag_name)), no_item_error_message='No tag \"{}\" exist'.format(tag_name), too_many_item_error_message='Too many tags \"{}\" found'.format(tag_name))\n return tag_object.commit.sha", "docstring": "Fetch the commit hash that was tagged with ``tag_name``.\n\nArgs:\n tag_name (str): the name of the tag\n\nReturns:\n str: the commit hash linked by the tag", "source": "codesearchnet_filtered"} -{"code": "def find_next(self, *strings, **kwargs):\n start = kwargs.pop('start', None)\n keys_only = kwargs.pop('keys_only', False)\n staht = (start if (start is not None) else self.cursor)\n for (start, stop) in [(staht, len(self)), (0, staht)]:\n for i in range(start, stop):\n for string in strings:\n if (string in self[i]):\n tup = (i, self[i])\n self.cursor = (i + 1)\n if keys_only:\n return i\n return tup", "docstring": "From the editor's current cursor position find the next instance of the\n given string.\n\nArgs:\n strings (iterable): String or strings to search for\n\nReturns:\n tup (tuple): Tuple of cursor position and line or None if not found\n\nNote:\n This function cycles the entire editor (i.e. cursor to length of\n editor to zero and back to cursor position).", "source": "codesearchnet_filtered"} -{"code": "def update_detector(self, detector_id, detector):\n resp = self._put(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id), data=detector)\n resp.raise_for_status()\n return resp.json()", "docstring": "Update an existing detector.\n\nArgs:\n detector_id (string): the ID of the detector.\n detector (object): the detector model object. Will be serialized as\n JSON.\n\nReturns:\n dictionary of the response (updated detector model).", "source": "codesearchnet_filtered"} -{"code": "def decode(self, probs, sizes=None):\n (_, max_probs) = torch.max(probs.transpose(0, 1), 2)\n strings = self.convert_to_strings(max_probs.view(max_probs.size(0), max_probs.size(1)), sizes)\n return self.process_strings(strings, remove_repetitions=True)", "docstring": "Returns the argmax decoding given the probability matrix. Removes\n repeated elements in the sequence, as well as blanks.\n\nArgs:\n probs: Tensor of character probabilities from the network. Expected shape of seq_length x batch x output_dim\n sizes(optional): Size of each sequence in the mini-batch\n\nReturns:\n strings: sequences of the model's best guess for the transcription on inputs", "source": "codesearchnet_filtered"} -{"code": "def images(self, name=None, quiet=False, all=False, filters=None):\n params = {'filter': name, 'only_ids': (1 if quiet else 0), 'all': (1 if all else 0)}\n if filters:\n params['filters'] = utils.convert_filters(filters)\n res = self._result(self._get(self._url('/images/json'), params=params), True)\n if quiet:\n return [x['Id'] for x in res]\n return res", "docstring": "List images. Similar to the ``docker images`` command.\n\nArgs:\n name (str): Only show images belonging to the repository ``name``\n quiet (bool): Only return numeric IDs as a list.\n all (bool): Show intermediate image layers. By default, these are\n filtered out.\n filters (dict): Filters to be processed on the image list.\n Available filters:\n - ``dangling`` (bool)\n - ``label`` (str): format either ``key`` or ``key=value``\n\nReturns:\n (dict or list): A list if ``quiet=True``, otherwise a dict.\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "codesearchnet_filtered"} -{"code": "def build_frontend(self, frontend_node):\n proxy_name = frontend_node.frontend_header.proxy_name.text\n service_address_node = frontend_node.frontend_header.service_address\n config_block_lines = self.__build_config_block(frontend_node.config_block)\n (host, port) = ('', '')\n if isinstance(service_address_node, pegnode.ServiceAddress):\n host = service_address_node.host.text\n port = service_address_node.port.text\n else:\n for line in config_block_lines:\n if isinstance(line, config.Bind):\n (host, port) = (line.host, line.port)\n break\n else:\n raise Exception('Not specify host and port in `frontend` definition')\n return config.Frontend(name=proxy_name, host=host, port=port, config_block=config_block_lines)", "docstring": "parse `frontend` sections, and return a config.Frontend\n\nArgs:\n frontend_node (TreeNode): Description\n\nRaises:\n Exception: Description\n\nReturns:\n config.Frontend: an object", "source": "codesearchnet_filtered"} -{"code": "def _FormatIPCPermToken(self, token_data):\n return {'user_id': token_data.user_identifier, 'group_id': token_data.group_identifier, 'creator_user_id': token_data.creator_user_identifier, 'creator_group_id': token_data.creator_group_identifier, 'access': token_data.access_mode}", "docstring": "Formats an IPC permissions token as a dictionary of values.\n\nArgs:\n token_data (bsm_token_data_ipc_perm): AUT_IPC_PERM token data.\n\nReturns:\n dict[str, str]: token values.", "source": "codesearchnet_filtered"} -{"code": "def transpile(circuits, backend=None, basis_gates=None, coupling_map=None, initial_layout=None, seed_mapper=None, pass_manager=None):\n warnings.warn('qiskit.transpiler.transpile() has been deprecated and will be removed in the 0.9 release. Use qiskit.compiler.transpile() instead.', DeprecationWarning)\n return compiler.transpile(circuits=circuits, backend=backend, basis_gates=basis_gates, coupling_map=coupling_map, initial_layout=initial_layout, seed_transpiler=seed_mapper, pass_manager=pass_manager)", "docstring": "transpile one or more circuits.\n\nArgs:\n circuits (QuantumCircuit or list[QuantumCircuit]): circuits to compile\n backend (BaseBackend): a backend to compile for\n basis_gates (list[str]): list of basis gate names supported by the\n target. Default: ['u1','u2','u3','cx','id']\n coupling_map (list): coupling map (perhaps custom) to target in mapping\n\n initial_layout (Layout or dict or list):\n Initial position of virtual qubits on physical qubits. The final\n layout is not guaranteed to be the same, as the transpiler may permute\n qubits through swaps or other means.\n\n seed_mapper (int): random seed for the swap_mapper\n pass_manager (PassManager): a pass_manager for the transpiler stages\n\nReturns:\n QuantumCircuit or list[QuantumCircuit]: transpiled circuit(s).\n\nRaises:\n TranspilerError: in case of bad inputs to transpiler or errors in passes", "source": "codesearchnet_filtered"} -{"code": "def broadcast_zip(list1, list2):\n try:\n len(list1)\n except TypeError:\n list1 = list(list1)\n try:\n len(list2)\n except TypeError:\n list2 = list(list2)\n if ((len(list1) == 1) and (len(list2) > 1)):\n list1 = (list1 * len(list2))\n elif ((len(list1) > 1) and (len(list2) == 1)):\n list2 = (list2 * len(list1))\n elif (len(list1) != len(list2)):\n raise ValueError(('out of alignment len(list1)=%r, len(list2)=%r' % (len(list1), len(list2))))\n return zip(list1, list2)", "docstring": "r\"\"\"\n Zips elementwise pairs between list1 and list2. Broadcasts\n the first dimension if a single list is of length 1.\n\n Aliased as bzip\n\nArgs:\n list1 (list):\n list2 (list):\n\nReturns:\n list: list of pairs\n\n SeeAlso:\n util_dict.dzip\n\nRaises:\n ValueError: if the list dimensions are not broadcastable\n\nExample:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_list import * # NOQA\n >>> import utool as ut\n >>> assert list(bzip([1, 2, 3], [4])) == [(1, 4), (2, 4), (3, 4)]\n >>> assert list(bzip([1, 2, 3], [4, 4, 4])) == [(1, 4), (2, 4), (3, 4)]\n >>> assert list(bzip([1], [4, 4, 4])) == [(1, 4), (1, 4), (1, 4)]\n >>> ut.assert_raises(ValueError, bzip, [1, 2, 3], [])\n >>> ut.assert_raises(ValueError, bzip, [], [4, 5, 6])\n >>> ut.assert_raises(ValueError, bzip, [], [4])\n >>> ut.assert_raises(ValueError, bzip, [1, 2], [4, 5, 6])\n >>> ut.assert_raises(ValueError, bzip, [1, 2, 3], [4, 5])", "source": "codesearchnet_filtered"} -{"code": "def get_reaction(self, reactants, products):\n return self._make_request('/reaction', payload={'reactants[]': reactants, 'products[]': products}, mp_decode=False)", "docstring": "Gets a reaction from the Materials Project.\n\nArgs:\n reactants ([str]): List of formulas\n products ([str]): List of formulas\n\nReturns:\n rxn", "source": "codesearchnet_filtered"} -{"code": "def get_cookie_header(queue_item):\n header = []\n path = URLHelper.get_path(queue_item.request.url)\n for cookie in queue_item.request.cookies:\n root_path = ((cookie.path == '') or (cookie.path == '/'))\n if (path.startswith(cookie.path) or root_path):\n header.append(((cookie.name + '=') + cookie.value))\n return '&'.join(header)", "docstring": "Convert a requests cookie jar to a HTTP request cookie header value.\n\nArgs:\n queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request.\n\nReturns:\n str: The HTTP cookie header value.", "source": "codesearchnet_filtered"} -{"code": "def start_log_monitor(redis_address, logs_dir, stdout_file=None, stderr_file=None, redis_password=None):\n log_monitor_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log_monitor.py')\n command = [sys.executable, '-u', log_monitor_filepath, '--redis-address={}'.format(redis_address), '--logs-dir={}'.format(logs_dir)]\n if redis_password:\n command += ['--redis-password', redis_password]\n process_info = start_ray_process(command, ray_constants.PROCESS_TYPE_LOG_MONITOR, stdout_file=stdout_file, stderr_file=stderr_file)\n return process_info", "docstring": "Start a log monitor process.\n\nArgs:\n redis_address (str): The address of the Redis instance.\n logs_dir (str): The directory of logging files.\n stdout_file: A file handle opened for writing to redirect stdout to. If\n no redirection should happen, then this should be None.\n stderr_file: A file handle opened for writing to redirect stderr to. If\n no redirection should happen, then this should be None.\n redis_password (str): The password of the redis server.\n\nReturns:\n ProcessInfo for the process that was started.", "source": "codesearchnet_filtered"} -{"code": "def remove_room_alias(self, room_alias):\n try:\n self.api.remove_room_alias(room_alias)\n return True\n except MatrixRequestError:\n return False", "docstring": "Remove mapping of an alias\n\nArgs:\n room_alias(str): The alias to be removed.\n\nReturns:\n bool: True if the alias is removed, False otherwise.", "source": "codesearchnet_filtered"} -{"code": "def _ReadEnumerationDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n if is_member:\n error_message = 'data type not supported as member'\n raise errors.DefinitionReaderError(definition_name, error_message)\n values = definition_values.get('values')\n if (not values):\n error_message = 'missing values'\n raise errors.DefinitionReaderError(definition_name, error_message)\n definition_object = self._ReadSemanticDataTypeDefinition(definitions_registry, definition_values, data_types.EnumerationDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_ENUMERATION)\n last_name = None\n for enumeration_value in values:\n aliases = enumeration_value.get('aliases', None)\n description = enumeration_value.get('description', None)\n name = enumeration_value.get('name', None)\n number = enumeration_value.get('number', None)\n if ((not name) or (number is None)):\n if last_name:\n error_location = 'after: {0:s}'.format(last_name)\n else:\n error_location = 'at start'\n error_message = '{0:s} missing name or number'.format(error_location)\n raise errors.DefinitionReaderError(definition_name, error_message)\n else:\n try:\n definition_object.AddValue(name, number, aliases=aliases, description=description)\n except KeyError as exception:\n error_message = '{0!s}'.format(exception)\n raise errors.DefinitionReaderError(definition_name, error_message)\n last_name = name\n return definition_object", "docstring": "Reads an enumeration data type definition.\n\nArgs:\n definitions_registry (DataTypeDefinitionsRegistry): data type definitions\n registry.\n definition_values (dict[str, object]): definition values.\n definition_name (str): name of the definition.\n is_member (Optional[bool]): True if the data type definition is a member\n data type definition.\n\nReturns:\n EnumerationDataTypeDefinition: enumeration data type definition.\n\nRaises:\n DefinitionReaderError: if the definitions values are missing or if\n the format is incorrect.", "source": "codesearchnet_filtered"} -{"code": "def get_device_state(self, device, id_override=None, type_override=None):\n _LOGGER.info('Getting state via online API')\n object_id = (id_override or device.object_id())\n object_type = (type_override or device.object_type())\n url_string = '{}/{}s/{}'.format(self.BASE_URL, object_type, object_id)\n arequest = requests.get(url_string, headers=API_HEADERS)\n response_json = arequest.json()\n _LOGGER.debug('%s', response_json)\n return response_json", "docstring": "Get device state via online API.\n\nArgs:\n device (WinkDevice): The device the change is being requested for.\n id_override (String, optional): A device ID used to override the\n passed in device's ID. Used to make changes on sub-devices.\n i.e. Outlet in a Powerstrip. The Parent device's ID.\n type_override (String, optional): Used to override the device type\n when a device inherits from a device other than WinkDevice.\n\nReturns:\n response_json (Dict): The API's response in dictionary format", "source": "codesearchnet_filtered"} -{"code": "def get_curve(self, mnemonic, alias=None):\n return self.data.get(self.get_mnemonic(mnemonic, alias=alias), None)", "docstring": "Wraps get_mnemonic.\n\n Instead of picking curves by name directly from the data dict, you\n can pick them up with this method, which takes account of the alias\n dict you pass it. If you do not pass an alias dict, then you get the\n curve you asked for, if it exists, or None. NB Wells do not have alias\n dicts, but Projects do.\n\nArgs:\n mnemonic (str): the name of the curve you want.\n alias (dict): an alias dictionary, mapping mnemonics to lists of\n mnemonics.\n\nReturns:\n Curve.", "source": "codesearchnet_filtered"} -{"code": "def parse_transcripts(transcript_lines):\n LOG.info('Parsing transcripts')\n if isinstance(transcript_lines, DataFrame):\n transcripts = parse_ensembl_transcript_request(transcript_lines)\n else:\n transcripts = parse_ensembl_transcripts(transcript_lines)\n parsed_transcripts = {}\n for tx in transcripts:\n tx_id = tx['ensembl_transcript_id']\n ens_gene_id = tx['ensembl_gene_id']\n if (not (tx_id in parsed_transcripts)):\n tx_info = {'chrom': tx['chrom'], 'transcript_start': tx['transcript_start'], 'transcript_end': tx['transcript_end'], 'mrna': set(), 'mrna_predicted': set(), 'nc_rna': set(), 'ensembl_gene_id': ens_gene_id, 'ensembl_transcript_id': tx_id}\n parsed_transcripts[tx_id] = tx_info\n tx_info = parsed_transcripts[tx_id]\n if tx.get('refseq_mrna_predicted'):\n tx_info['mrna_predicted'].add(tx['refseq_mrna_predicted'])\n if tx.get('refseq_mrna'):\n tx_info['mrna'].add(tx['refseq_mrna'])\n if tx.get('refseq_ncrna'):\n tx_info['nc_rna'].add(tx['refseq_ncrna'])\n return parsed_transcripts", "docstring": "Parse and massage the transcript information\n\n There could be multiple lines with information about the same transcript.\n This is why it is necessary to parse the transcripts first and then return a dictionary\n where all information has been merged.\n\nArgs:\n transcript_lines(): This could be an iterable with strings or a pandas.DataFrame\n\nReturns:\n parsed_transcripts(dict): Map from enstid -> transcript info", "source": "codesearchnet_filtered"} -{"code": "def is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms):\n if pvs:\n if pm_terms:\n return True\n if ps_terms:\n if pm_terms:\n return True\n if (len(pp_terms) >= 2):\n return True\n if pm_terms:\n if (len(pm_terms) >= 3):\n return True\n elif (len(pm_terms) >= 2):\n if (len(pp_terms) >= 2):\n return True\n elif (len(pp_terms) >= 4):\n return True\n return False", "docstring": "Check if the criterias for Likely Pathogenic is fullfilled\n\n The following are descriptions of Likely Pathogenic clasification from ACMG paper:\n\n Likely pathogenic\n (i) 1 Very strong (PVS1) AND 1 moderate (PM1– PM6) OR\n (ii) 1 Strong (PS1–PS4) AND 1–2 moderate (PM1–PM6) OR\n (iii) 1 Strong (PS1–PS4) AND ≥2 supporting (PP1–PP5) OR\n (iv) ≥3 Moderate (PM1–PM6) OR\n (v) 2 Moderate (PM1–PM6) AND ≥2 supporting (PP1–PP5) OR\n (vi) 1 Moderate (PM1–PM6) AND ≥4 supportin (PP1–PP5)\n\nArgs:\n pvs(bool): Pathogenic Very Strong\n ps_terms(list(str)): Pathogenic Strong terms\n pm_terms(list(str)): Pathogenic Moderate terms\n pp_terms(list(str)): Pathogenic Supporting terms\n\nReturns:\n bool: if classification indicates Likely Pathogenic level", "source": "codesearchnet_filtered"} -{"code": "def merge(self, options):\n if (not options):\n return _CallSettings(timeout=self.timeout, retry=self.retry, page_descriptor=self.page_descriptor, page_token=self.page_token, bundler=self.bundler, bundle_descriptor=self.bundle_descriptor, kwargs=self.kwargs)\n else:\n if (options.timeout == OPTION_INHERIT):\n timeout = self.timeout\n else:\n timeout = options.timeout\n if (options.retry == OPTION_INHERIT):\n retry = self.retry\n else:\n retry = options.retry\n if (options.page_token == OPTION_INHERIT):\n page_token = self.page_token\n else:\n page_token = options.page_token\n if options.is_bundling:\n bundler = self.bundler\n else:\n bundler = None\n if (options.kwargs == OPTION_INHERIT):\n kwargs = self.kwargs\n else:\n kwargs = self.kwargs.copy()\n kwargs.update(options.kwargs)\n return _CallSettings(timeout=timeout, retry=retry, page_descriptor=self.page_descriptor, page_token=page_token, bundler=bundler, bundle_descriptor=self.bundle_descriptor, kwargs=kwargs)", "docstring": "Returns new _CallSettings merged from this and a CallOptions object.\n\n Note that passing if the CallOptions instance specifies a page_token,\n the merged _CallSettings will have ``flatten_pages`` disabled. This\n permits toggling per-resource/per-page page streaming.\n\nArgs:\n options (CallOptions): an instance whose values override\n those in this object. If None, ``merge`` returns a copy of this\n object\n\nReturns:\n CallSettings: The merged settings and options.", "source": "codesearchnet_filtered"} -{"code": "def get_logger(name, level=None, fmt=':%(lineno)d: %(message)s'):\n if (name not in Logger.loggers):\n if ((Logger.level is None) and (level is None)):\n Logger.level = level = logging.ERROR\n elif (Logger.level is None):\n Logger.level = level\n elif (level is None):\n level = Logger.level\n logger = logging.getLogger(name)\n logger_handler = logging.StreamHandler()\n logger_handler.setFormatter(LoggingFormatter(fmt=(name + fmt)))\n logger.addHandler(logger_handler)\n logger.setLevel(level)\n Logger.loggers[name] = logger\n return Logger.loggers[name]", "docstring": "Return a logger.\n\nArgs:\n name (str): name to pass to the logging module.\n level (int): level of logging.\n fmt (str): format string.\n\nReturns:\n logging.Logger: logger from ``logging.getLogger``.", "source": "codesearchnet_filtered"} -{"code": "def calculate_parity(n):\n if (not is_natural(n)):\n raise ValueError('Expected n to be a positive integer.')\n y = 0\n n = abs(n)\n while n:\n y += (n & 1)\n n = (n >> 1)\n return (y & 1)", "docstring": "Calculates and returns the parity of a number.\n\n The parity of a number is ``1`` if the number has an odd number of ones\n in its binary representation, otherwise ``0``.\n\nArgs:\n n (int): the number whose parity to calculate\n\nReturns:\n ``1`` if the number has an odd number of ones, otherwise ``0``.\n\nRaises:\n ValueError: if ``n`` is less than ``0``.", "source": "codesearchnet_filtered"} -{"code": "def remove_location(self, location):\n res = self._remove_hdxobject(self.data.get('groups'), location, matchon='name')\n if (not res):\n res = self._remove_hdxobject(self.data.get('groups'), location.upper(), matchon='name')\n if (not res):\n res = self._remove_hdxobject(self.data.get('groups'), location.lower(), matchon='name')\n return res", "docstring": "Remove a location. If the location is already added, it is ignored.\n\nArgs:\n location (str): Location to remove\n\nReturns:\n bool: True if location removed or False if not", "source": "codesearchnet_filtered"} -{"code": "def pauli_single(cls, num_qubits, index, pauli_label):\n tmp = Pauli.from_label(pauli_label)\n z = np.zeros(num_qubits, dtype=np.bool)\n x = np.zeros(num_qubits, dtype=np.bool)\n z[index] = tmp.z[0]\n x[index] = tmp.x[0]\n return cls(z, x)", "docstring": "Generate single qubit pauli at index with pauli_label with length num_qubits.\n\nArgs:\n num_qubits (int): the length of pauli\n index (int): the qubit index to insert the single qubii\n pauli_label (str): pauli\n\nReturns:\n Pauli: single qubit pauli", "source": "codesearchnet_filtered"} -{"code": "def get_values(js_dict, value='value'):\n values = js_dict[value]\n if (type(values) is list):\n if ((type(values[0]) is not dict) or tuple):\n return values\n values = {int(key): value for (key, value) in values.items()}\n if js_dict.get('size'):\n max_val = np.prod(np.array(js_dict['size']))\n else:\n max_val = np.prod(np.array(js_dict['dimension']['size']))\n vals = (max_val * [None])\n for (key, value) in values.items():\n vals[key] = value\n values = vals\n return values", "docstring": "Get values from input data.\n\nArgs:\n js_dict (dict): dictionary containing dataset data and metadata.\n value (string, optional): name of the value column. Defaults to 'value'.\n\nReturns:\n values (list): list of dataset values.", "source": "codesearchnet_filtered"} -{"code": "def content_metadata_uploads(self, mirror=False):\n excludes_str = ''\n includes_cmds = []\n cmd_base = self._get_upload_cmd(mirror=mirror)\n for content in self.s3props.get('content_metadata'):\n full_path = os.path.join(self.artifact_path, content['path'])\n if (not os.listdir(full_path)):\n raise S3ArtifactNotFound\n excludes_str += '--exclude \"{}/*\" '.format(content['path'])\n include_cmd = '{} --exclude \"*\", --include \"{}/*\"'.format(cmd_base, content['path'])\n include_cmd += ' --content-encoding {} --metadata-directive REPLACE'.format(content['content-encoding'])\n includes_cmds.append(include_cmd)\n exclude_cmd = '{} {}'.format(cmd_base, excludes_str)\n result = subprocess.run(exclude_cmd, check=True, shell=True, stdout=subprocess.PIPE)\n LOG.info('Uploaded files without metadata with command: %s', exclude_cmd)\n LOG.debug('Upload Command Output: %s', result.stdout)\n for include_cmd in includes_cmds:\n result = subprocess.run(include_cmd, check=True, shell=True, stdout=subprocess.PIPE)\n LOG.info('Uploaded files with metadata with command: %s', include_cmd)\n LOG.debug('Upload Command Output: %s', result.stdout)\n return True", "docstring": "Finds all specified encoded directories and uploads in multiple parts,\n setting metadata for objects.\n\nArgs:\n mirror (bool): If true, uses a flat directory structure instead of nesting under a version.\n\nReturns:\n bool: True if uploaded", "source": "codesearchnet_filtered"} -{"code": "def postprocess_periodical(marc_xml, mods, uuid, counter, url):\n dom = double_linked_dom(mods)\n add_missing_xml_attributes(dom, counter)\n if uuid:\n add_uuid(dom, uuid)\n return dom.prettify()", "docstring": "Some basic postprocessing of the periodical publications.\n\nArgs:\n marc_xml (str): Original Aleph record.\n mods (str): XML string generated by XSLT template.\n uuid (str): UUID of the package.\n counter (int): Number of record, is added to XML headers.\n url (str): URL of the publication (public or not).\n\nReturns:\n str: Updated XML.", "source": "codesearchnet_filtered"} -{"code": "def matches_to_marker_results(df):\n assert isinstance(df, pd.DataFrame)\n from collections import defaultdict\n d = defaultdict(list)\n for (idx, row) in df.iterrows():\n marker = row['marker']\n d[marker].append(row)\n marker_results = {}\n for (k, v) in d.items():\n if (len(v) > 1):\n logging.debug('Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.', len(v), k)\n df_marker = pd.DataFrame(v)\n df_marker.sort_values('slen', ascending=False, inplace=True)\n for (i, r) in df_marker.iterrows():\n allele = r['allele_name']\n slen = r['slen']\n logging.debug('Selecting allele %s from contig with length %s', allele, slen)\n seq = r['sseq']\n if ('-' in seq):\n logging.warning('Gaps found in allele. Removing gaps. %s', r)\n seq = seq.replace('-', '').upper()\n allele = allele_name(seq)\n marker_results[k] = allele_result_dict(allele, seq, r.to_dict())\n break\n elif (len(v) == 1):\n row = v[0]\n seq = row['sseq']\n if ('-' in seq):\n logging.warning('Gaps found in allele. Removing gaps. %s', row)\n seq = seq.replace('-', '').upper()\n allele = allele_name(seq)\n marker_results[k] = allele_result_dict(allele, seq, row.to_dict())\n else:\n err_msg = 'Empty list of matches for marker {}'.format(k)\n logging.error(err_msg)\n raise Exception(err_msg)\n return marker_results", "docstring": "Perfect BLAST matches to marker results dict\n\n Parse perfect BLAST matches to marker results dict.\n\nArgs:\n df (pandas.DataFrame): DataFrame of perfect BLAST matches\n\nReturns:\n dict: cgMLST330 marker names to matching allele numbers", "source": "codesearchnet_filtered"} -{"code": "def need(self, folder, page=None, perpage=None):\n assert (isinstance(page, int) or (page is None))\n assert (isinstance(perpage, int) or (perpage is None))\n return self.get('need', params={'folder': folder, 'page': page, 'perpage': perpage})", "docstring": "Returns lists of files which are needed by this device in order\n for it to become in sync.\n\nArgs:\n folder (str):\n page (int): If defined applies pagination accross the\n collection of results.\n perpage (int): If defined applies pagination across the\n collection of results.\n\nReturns:\n dict", "source": "codesearchnet_filtered"} -{"code": "def cctop_save_xml(jobid, outpath):\n status = cctop_check_status(jobid=jobid)\n if (status == 'Finished'):\n result = 'http://cctop.enzim.ttk.mta.hu/php/result.php?jobId={}'.format(jobid)\n result_text = requests.post(result)\n with open(outpath, 'w') as f:\n f.write(result_text.text)\n return outpath\n else:\n raise ConnectionRefusedError('CCTOP job incomplete, status is \"{}\"'.format(status))", "docstring": "Save the CCTOP results file in XML format.\n\nArgs:\n jobid (str): Job ID obtained when job was submitted\n outpath (str): Path to output filename\n\nReturns:\n str: Path to output filename", "source": "codesearchnet_filtered"} -{"code": "def is_bateries_included(item):\n flag = False\n if (hasattr(item, '__call__') and hasattr(item, '__module__')):\n if (item.__module__ is not None):\n module = sys.modules[item.__module__]\n if (module == builtins):\n flag = True\n elif hasattr(module, '__file__'):\n flag = (LIB_PATH == dirname(module.__file__))\n return flag", "docstring": "Returns if a value is a python builtin function\n\nArgs:\n item (object):\n\nReturns:\n bool: flag\n\n References:\n http://stackoverflow.com/questions/23149218/check-if-a-python-function-is-builtin\n\n CommandLine:\n python -m utool._internal.meta_util_six is_builtin\n\nExample:\n >>> # DISABLE_DOCTEST\n >>> from utool._internal.meta_util_six import * # NOQA\n >>> item = zip\n >>> flag = is_bateries_included(item)\n >>> result = ('flag = %s' % (str(flag),))\n >>> print(result)", "source": "codesearchnet_filtered"} -{"code": "def defer(coro, delay=1):\n assert_corofunction(coro=coro)\n\n @asyncio.coroutine\n def wrapper(*args, **kw):\n (yield from asyncio.sleep(delay))\n return (yield from coro(*args, **kw))\n return wrapper", "docstring": "Returns a coroutine function wrapper that will defer the given coroutine\n execution for a certain amount of seconds in a non-blocking way.\n\n This function can be used as decorator.\n\nArgs:\n coro (coroutinefunction): coroutine function to defer.\n delay (int/float): number of seconds to defer execution.\n\nRaises:\n TypeError: if coro argument is not a coroutine function.\n\nReturns:\n filtered values (list): ordered list of resultant values.\n\n Usage::\n\n # Usage as function\n await paco.defer(coro, delay=1)\n await paco.defer(coro, delay=0.5)\n\n # Usage as decorator\n @paco.defer(delay=1)\n async def mul_2(num):\n return num * 2\n\n await mul_2(2)\n # => 4", "source": "codesearchnet_filtered"} -{"code": "def _GetSerializedAttributeContainerList(self, container_type):\n container_list = self._serialized_attribute_containers.get(container_type, None)\n if (not container_list):\n container_list = SerializedAttributeContainerList()\n self._serialized_attribute_containers[container_type] = container_list\n return container_list", "docstring": "Retrieves a serialized attribute container list.\n\nArgs:\n container_type (str): attribute container type.\n\nReturns:\n SerializedAttributeContainerList: serialized attribute container list.", "source": "codesearchnet_filtered"} -{"code": "def clone(self, *args, **overrides):\n clone = super(Layout, self).clone(*args, **overrides)\n clone._max_cols = self._max_cols\n return clone", "docstring": "Clones the Layout, overriding data and parameters.\n\nArgs:\n data: New data replacing the existing data\n shared_data (bool, optional): Whether to use existing data\n new_type (optional): Type to cast object to\n *args: Additional arguments to pass to constructor\n **overrides: New keyword arguments to pass to constructor\n\nReturns:\n Cloned Layout object", "source": "codesearchnet_filtered"} -{"code": "def from_samples_bqm(cls, samples_like, bqm, **kwargs):\n samples_like = as_samples(samples_like)\n energies = bqm.energies(samples_like)\n return cls.from_samples(samples_like, energy=energies, vartype=bqm.vartype, **kwargs)", "docstring": "Build a SampleSet from raw samples using a BinaryQuadraticModel to get energies and vartype.\n\nArgs:\n samples_like:\n A collection of raw samples. 'samples_like' is an extension of NumPy's array_like.\n See :func:`.as_samples`.\n\n bqm (:obj:`.BinaryQuadraticModel`):\n A binary quadratic model. It is used to calculate the energies\n and set the vartype.\n\n info (dict, optional):\n Information about the :class:`SampleSet` as a whole formatted as a dict.\n\n num_occurrences (array_like, optional):\n Number of occurrences for each sample. If not provided, defaults to a vector of 1s.\n\n aggregate_samples (bool, optional, default=False):\n If true, returned :obj:`.SampleSet` will have all unique samples.\n\n sort_labels (bool, optional, default=True):\n If true, :attr:`.SampleSet.variables` will be in sorted-order.\n Note that mixed types are not sortable in which case the given\n order will be maintained.\n\n **vectors (array_like):\n Other per-sample data.\n\nReturns:\n :obj:`.SampleSet`\n\nExample:\n >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): -1})\n >>> samples = dimod.SampleSet.from_samples_bqm({'a': -1, 'b': 1}, bqm)", "source": "codesearchnet_filtered"} -{"code": "def _ReceiveItemOnActivity(self, zmq_socket):\n events = zmq_socket.poll(self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS)\n if events:\n try:\n received_object = self._zmq_socket.recv_pyobj()\n return received_object\n except zmq.error.Again:\n logger.error('{0:s}. Failed to receive item in time.'.format(self.name))\n raise\n except zmq.error.ZMQError as exception:\n if (exception.errno == errno.EINTR):\n logger.error('ZMQ syscall interrupted in {0:s}. Queue aborting.'.format(self.name))\n raise\n raise errors.QueueEmpty", "docstring": "Attempts to receive an item from a ZeroMQ socket.\n\nArgs:\n zmq_socket (zmq.Socket): used to the receive the item.\n\nReturns:\n object: item from the socket.\n\nRaises:\n QueueEmpty: if no item could be received within the timeout.\n zmq.error.ZMQError: if an error occurs in ZeroMQ", "source": "codesearchnet_filtered"} -{"code": "def parse_GSE(filepath):\n gpls = {}\n gsms = {}\n series_counter = 0\n database = None\n metadata = {}\n gse_name = None\n with utils.smart_open(filepath) as soft:\n groupper = groupby(soft, (lambda x: x.startswith('^')))\n for (is_new_entry, group) in groupper:\n if is_new_entry:\n (entry_type, entry_name) = __parse_entry(next(group))\n logger.debug(('%s: %s' % (entry_type.upper(), entry_name)))\n if (entry_type == 'SERIES'):\n gse_name = entry_name\n series_counter += 1\n if (series_counter > 1):\n raise Exception('GSE file should contain only one series entry!')\n (is_data, data_group) = next(groupper)\n message = 'The key is not False, probably there is an error in the SOFT file'\n assert (not is_data), message\n metadata = parse_metadata(data_group)\n elif (entry_type == 'SAMPLE'):\n (is_data, data_group) = next(groupper)\n gsms[entry_name] = parse_GSM(data_group, entry_name)\n elif (entry_type == 'PLATFORM'):\n (is_data, data_group) = next(groupper)\n gpls[entry_name] = parse_GPL(data_group, entry_name)\n elif (entry_type == 'DATABASE'):\n (is_data, data_group) = next(groupper)\n database_metadata = parse_metadata(data_group)\n database = GEODatabase(name=entry_name, metadata=database_metadata)\n else:\n logger.error(('Cannot recognize type %s' % entry_type))\n gse = GSE(name=gse_name, metadata=metadata, gpls=gpls, gsms=gsms, database=database)\n return gse", "docstring": "Parse GSE SOFT file.\n\nArgs:\n filepath (:obj:`str`): Path to GSE SOFT file.\n\nReturns:\n :obj:`GEOparse.GSE`: A GSE object.", "source": "codesearchnet_filtered"} -{"code": "def parse_headers(cls, msg):\n return list(email.parser.Parser().parsestr(msg).items())", "docstring": "Parse HTTP headers.\n\nArgs:\n msg (str): HTTP message.\n\nReturns:\n (List[Tuple[str, str]): List of header tuples.", "source": "codesearchnet_filtered"} -{"code": "def find_surface_sites_by_height(self, slab, height=0.9, xy_tol=0.05):\n m_projs = np.array([np.dot(site.coords, self.mvec) for site in slab.sites])\n mask = ((m_projs - np.amax(m_projs)) >= (- height))\n surf_sites = [slab.sites[n] for n in np.where(mask)[0]]\n if xy_tol:\n surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites)]\n surf_sites.reverse()\n (unique_sites, unique_perp_fracs) = ([], [])\n for site in surf_sites:\n this_perp = (site.coords - np.dot(site.coords, self.mvec))\n this_perp_frac = slab.lattice.get_fractional_coords(this_perp)\n if (not in_coord_list_pbc(unique_perp_fracs, this_perp_frac)):\n unique_sites.append(site)\n unique_perp_fracs.append(this_perp_frac)\n surf_sites = unique_sites\n return surf_sites", "docstring": "This method finds surface sites by determining which sites are within\n a threshold value in height from the topmost site in a list of sites\n\nArgs:\n site_list (list): list of sites from which to select surface sites\n height (float): threshold in angstroms of distance from topmost\n site in slab along the slab c-vector to include in surface\n site determination\n xy_tol (float): if supplied, will remove any sites which are\n within a certain distance in the miller plane.\n\nReturns:\n list of sites selected to be within a threshold of the highest", "source": "codesearchnet_filtered"} -{"code": "def GetFileSystemTypeIndicators(cls, path_spec, resolver_context=None):\n if ((cls._file_system_remainder_list is None) or (cls._file_system_store is None)):\n (specification_store, remainder_list) = cls._GetSpecificationStore(definitions.FORMAT_CATEGORY_FILE_SYSTEM)\n cls._file_system_remainder_list = remainder_list\n cls._file_system_store = specification_store\n if (cls._file_system_scanner is None):\n cls._file_system_scanner = cls._GetSignatureScanner(cls._file_system_store)\n return cls._GetTypeIndicators(cls._file_system_scanner, cls._file_system_store, cls._file_system_remainder_list, path_spec, resolver_context=resolver_context)", "docstring": "Determines if a file contains a supported file system types.\n\nArgs:\n path_spec (PathSpec): path specification.\n resolver_context (Optional[Context]): resolver context, where None\n represents the built-in context which is not multi process safe.\n\nReturns:\n list[str]: supported format type indicators.", "source": "codesearchnet_filtered"} -{"code": "def get(self, key, mem_map=True):\n self.raise_error_if_not_open()\n if (key in self._file):\n data = self._file[key]\n sampling_rate = data.attrs[SAMPLING_RATE_ATTR]\n if (not mem_map):\n data = data[()]\n data = (np.float32(data) / MAX_INT16_VALUE)\n return (data, sampling_rate)", "docstring": "Return the samples for the given key and the sampling-rate.\n\nArgs:\n key (str): The key to read the data from.\n mem_map (bool): If ``True`` returns the data as\n memory-mapped array, otherwise a copy is returned.\n\nNote:\n The container has to be opened in advance.\n\nReturns:\n tuple: A tuple containing the samples as numpy array\n with ``np.float32`` [-1.0,1.0] and the sampling-rate.", "source": "codesearchnet_filtered"} -{"code": "def sia_bipartitions(nodes, node_labels=None):\n if config.CUT_ONE_APPROXIMATION:\n bipartitions = directed_bipartition_of_one(nodes)\n else:\n bipartitions = directed_bipartition(nodes, nontrivial=True)\n return [Cut(bipartition[0], bipartition[1], node_labels) for bipartition in bipartitions]", "docstring": "Return all |big_phi| cuts for the given nodes.\n\n This value changes based on :const:`config.CUT_ONE_APPROXIMATION`.\n\nArgs:\n nodes (tuple[int]): The node indices to partition.\n\nReturns:\n list[Cut]: All unidirectional partitions.", "source": "codesearchnet_filtered"} -{"code": "def get_single_upstream_artifact_full_path(context, task_id, path):\n return os.path.abspath(os.path.join(context.config['work_dir'], 'cot', task_id, path))", "docstring": "Return the full path where an upstream artifact should be located.\n\n Artifact may not exist. If you want to be sure if does, use\n ``get_and_check_single_upstream_artifact_full_path()`` instead.\n\n This function is mainly used to move artifacts to the expected location.\n\nArgs:\n context (scriptworker.context.Context): the scriptworker context.\n task_id (str): the task id of the task that published the artifact\n path (str): the relative path of the artifact\n\nReturns:\n str: absolute path to the artifact should be.", "source": "codesearchnet_filtered"} -{"code": "def copy_file(self, file_id, dest_folder_id):\n return self.__request('POST', (('/files/' + unicode(file_id)) + '/copy'), data={'parent': {'id': unicode(dest_folder_id)}})", "docstring": "Copy file to new destination\n\nArgs:\n file_id (int): ID of the folder.\n\n dest_folder_id (int): ID of parent folder you are copying to.\n\nReturns:\n dict. Response from Box.\n\nRaises:\n BoxError: An error response is returned from Box (status_code >= 400).\n\n BoxError: 409 - Item with the same name already exists.\n In this case you will need download the file and upload a new version to your destination.\n (Box currently doesn't have a method to copy a new verison.)\n\n BoxHttpResponseError: Response from Box is malformed.\n\n requests.exceptions.*: Any connection related problem.", "source": "codesearchnet_filtered"} -{"code": "def findSequenceOnDisk(cls, pattern, strictPadding=False):\n seq = cls(pattern)\n if ((seq.frameRange() == '') and (seq.padding() == '')):\n if os.path.isfile(pattern):\n return seq\n patt = seq.format('{dirname}{basename}*{extension}')\n ext = seq.extension()\n basename = seq.basename()\n pad = seq.padding()\n globbed = iglob(patt)\n if (pad and strictPadding):\n globbed = cls._filterByPaddingNum(globbed, seq.zfill())\n pad = cls.conformPadding(pad)\n matches = cls.yield_sequences_in_list(globbed)\n for match in matches:\n if ((match.basename() == basename) and (match.extension() == ext)):\n if (pad and strictPadding):\n match.setPadding(pad)\n return match\n msg = 'no sequence found on disk matching {0}'\n raise FileSeqException(msg.format(pattern))", "docstring": "Search for a specific sequence on disk.\n\n The padding characters used in the `pattern` are used to filter the\n frame values of the files on disk (if `strictPadding` is True).\n\nExample:\n Find sequence matching basename and extension, and a wildcard for\n any frame.\n returns bar.1.exr bar.10.exr, bar.100.exr, bar.1000.exr, inclusive\n\n >>> findSequenceOnDisk(\"seq/bar@@@@.exr\")\n\n Find exactly 4-padded sequence, i.e. seq/bar1-100#.exr\n returns only frames bar1000.exr through bar9999.exr\n\n >>> findSequenceOnDisk(\"seq/bar#.exr\", strictPadding=True)\n\nArgs:\n pattern (str): the sequence pattern being searched for\n strictPadding (bool): if True, ignore files with padding length different from `pattern`\n\nReturns:\n str:\n\nRaises:\n :class:`.FileSeqException`: if no sequence is found on disk", "source": "codesearchnet_filtered"} -{"code": "def get(self, name):\n interface = name\n if (not interface):\n raise ValueError('Vrrp.get(): interface must contain a value.')\n config = self.get_block(('interface %s' % interface))\n if (config is None):\n return config\n match = set(re.findall('^\\\\s+(?:no |)vrrp (\\\\d+)', config, re.M))\n if (not match):\n return None\n result = dict()\n for vrid in match:\n subd = dict()\n subd.update(self._parse_delay_reload(config, vrid))\n subd.update(self._parse_description(config, vrid))\n subd.update(self._parse_enable(config, vrid))\n subd.update(self._parse_ip_version(config, vrid))\n subd.update(self._parse_mac_addr_adv_interval(config, vrid))\n subd.update(self._parse_preempt(config, vrid))\n subd.update(self._parse_preempt_delay_min(config, vrid))\n subd.update(self._parse_preempt_delay_reload(config, vrid))\n subd.update(self._parse_primary_ip(config, vrid))\n subd.update(self._parse_priority(config, vrid))\n subd.update(self._parse_secondary_ip(config, vrid))\n subd.update(self._parse_timers_advertise(config, vrid))\n subd.update(self._parse_track(config, vrid))\n subd.update(self._parse_bfd_ip(config, vrid))\n result.update({int(vrid): subd})\n return (result if result else None)", "docstring": "Get the vrrp configurations for a single node interface\n\nArgs:\n name (string): The name of the interface for which vrrp\n configurations will be retrieved.\n\nReturns:\n A dictionary containing the vrrp configurations on the interface.\n Returns None if no vrrp configurations are defined or\n if the interface is not configured.", "source": "codesearchnet_filtered"} -{"code": "def GetObject(self, identifier):\n cache_value = self._values.get(identifier, None)\n if (not cache_value):\n return None\n return cache_value.vfs_object", "docstring": "Retrieves a cached object based on the identifier.\n\n This method ignores the cache value reference count.\n\nArgs:\n identifier (str): VFS object identifier.\n\nReturns:\n object: cached VFS object or None if not cached.", "source": "codesearchnet_filtered"} -{"code": "def update(self, sparql_query_only=False, auto_refresh=None, update_binary=True):\n self._diff_graph()\n sq = SparqlUpdate(self.rdf.prefixes, self.rdf.diffs)\n if sparql_query_only:\n return sq.build_query()\n response = self.repo.api.http_request('PATCH', ('%s/fcr:metadata' % self.uri), data=sq.build_query(), headers={'Content-Type': 'application/sparql-update'})\n if (response.status_code != 204):\n logger.debug(response.content)\n raise Exception(('HTTP %s, expecting 204' % response.status_code))\n if ((type(self) == NonRDFSource) and update_binary and (type(self.binary.data) != requests.models.Response)):\n self.binary._prep_binary()\n binary_data = self.binary.data\n binary_response = self.repo.api.http_request('PUT', self.uri, data=binary_data, headers={'Content-Type': self.binary.mimetype})\n if ((not auto_refresh) and (not self.repo.default_auto_refresh)):\n logger.debug('not refreshing resource RDF, but updated binary, so must refresh binary data')\n updated_self = self.repo.get_resource(self.uri)\n self.binary.refresh(updated_self)\n if hasattr(self, '_post_update'):\n self._post_update()\n '\\n\\t\\tIf not updating binary, pass that bool to refresh as refresh_binary flag to avoid touching binary data\\n\\t\\t'\n if auto_refresh:\n self.refresh(refresh_binary=update_binary)\n elif (auto_refresh == None):\n if self.repo.default_auto_refresh:\n self.refresh(refresh_binary=update_binary)\n return True", "docstring": "Method to update resources in repository. Firing this method computes the difference in the local modified graph and the original one,\n creates an instance of SparqlUpdate and builds a sparql query that represents these differences, and sends this as a PATCH request.\n\n Note: send PATCH request, regardless of RDF or NonRDF, to [uri]/fcr:metadata\n\n If the resource is NonRDF (Binary), this also method also updates the binary data.\n\nArgs:\n sparql_query_only (bool): If True, returns only the sparql query string and does not perform any actual updates\n auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh\n update_binary (bool): If True, and resource is NonRDF, updates binary data as well\n\nReturns:\n (bool)", "source": "codesearchnet_filtered"} -{"code": "def get_course_completions(self, enterprise_customer, days):\n return PersistentCourseGrade.objects.filter(passed_timestamp__gt=(datetime.datetime.now() - datetime.timedelta(days=days))).filter(user_id__in=enterprise_customer.enterprise_customer_users.values_list('user_id', flat=True))", "docstring": "Get course completions via PersistentCourseGrade for all the learners of given enterprise customer.\n\nArgs:\n enterprise_customer (EnterpriseCustomer): Include Course enrollments for learners\n of this enterprise customer.\n days (int): Include course enrollment of this number of days.\n\nReturns:\n (list): A list of PersistentCourseGrade objects.", "source": "codesearchnet_filtered"} -{"code": "def parse_coach_go(infile):\n go_list = []\n with open(infile) as go_file:\n for line in go_file.readlines():\n go_dict = {}\n go_split = line.split()\n go_dict['go_id'] = go_split[0]\n go_dict['c_score'] = go_split[1]\n go_dict['go_term'] = ' '.join(go_split[2:])\n go_list.append(go_dict)\n return go_list", "docstring": "Parse a GO output file from COACH and return a rank-ordered list of GO term predictions\n\n The columns in all files are: GO terms, Confidence score, Name of GO terms. The files are:\n\n - GO_MF.dat - GO terms in 'molecular function'\n - GO_BP.dat - GO terms in 'biological process'\n - GO_CC.dat - GO terms in 'cellular component'\n\nArgs:\n infile (str): Path to any COACH GO prediction file\n\nReturns:\n Pandas DataFrame: Organized dataframe of results, columns defined below\n\n - ``go_id``: GO term ID\n - ``go_term``: GO term text\n - ``c_score``: confidence score of the GO prediction", "source": "codesearchnet_filtered"} -{"code": "def __get_unused_context(self, parse_result, context):\n tags_keys = set([t['key'] for t in parse_result['tags'] if t['from_context']])\n result_context = [c for c in context if (c['key'] not in tags_keys)]\n return result_context", "docstring": "Used to get unused context from context. Any keys not in\n parse_result\n\nArgs:\n parse_results(list): parsed results used to identify what keys\n in the context are used.\n context(list): this is the context used to match with parsed results\n keys missing in the parsed results are the unused context\n\nReturns:\n list: A list of the unused context results.", "source": "codesearchnet_filtered"} -{"code": "def _sample_action_fluent(self, name: str, dtype: tf.DType, size: Sequence[int], constraints: Dict[(str, Constraints)], default_value: tf.Tensor, prob: float) -> tf.Tensor:\n shape = ([self.batch_size] + list(size))\n if (dtype == tf.float32):\n bounds = constraints.get(name)\n if (bounds is None):\n (low, high) = ((- self.MAX_REAL_VALUE), self.MAX_REAL_VALUE)\n dist = tf.distributions.Uniform(low=low, high=high)\n sampled_fluent = dist.sample(shape)\n else:\n (low, high) = bounds\n batch = (((low is not None) and low.batch) or ((high is not None) and high.batch))\n low = (tf.cast(low.tensor, tf.float32) if (low is not None) else (- self.MAX_REAL_VALUE))\n high = (tf.cast(high.tensor, tf.float32) if (high is not None) else self.MAX_REAL_VALUE)\n dist = tf.distributions.Uniform(low=low, high=high)\n if batch:\n sampled_fluent = dist.sample()\n elif (isinstance(low, tf.Tensor) or isinstance(high, tf.Tensor)):\n if ((low + high).shape.as_list() == list(size)):\n sampled_fluent = dist.sample([self.batch_size])\n else:\n raise ValueError('bounds are not compatible with action fluent.')\n else:\n sampled_fluent = dist.sample(shape)\n elif (dtype == tf.int32):\n logits = ([1.0] * self.MAX_INT_VALUE)\n dist = tf.distributions.Categorical(logits=logits, dtype=tf.int32)\n sampled_fluent = dist.sample(shape)\n elif (dtype == tf.bool):\n probs = 0.5\n dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)\n sampled_fluent = dist.sample(shape)\n select_default = tf.distributions.Bernoulli(prob, dtype=tf.bool).sample(self.batch_size)\n action_fluent = tf.where(select_default, default_value, sampled_fluent)\n return action_fluent", "docstring": "Samples the action fluent with given `name`, `dtype`, and `size`.\n\n With probability `prob` it chooses the action fluent `default_value`,\n with probability 1-`prob` it samples the fluent w.r.t. its `constraints`.\n\nArgs:\n name (str): The name of the action fluent.\n dtype (tf.DType): The data type of the action fluent.\n size (Sequence[int]): The size and shape of the action fluent.\n constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.\n default_value (tf.Tensor): The default value for the action fluent.\n prob (float): A probability measure.\n\nReturns:\n tf.Tensor: A tensor for sampling the action fluent.", "source": "codesearchnet_filtered"} -{"code": "def modify_prefix(channel, new_prefix):\n gui = ui_embed.UI(channel, 'Prefix updated', 'Modis prefix is now `{}`'.format(new_prefix), modulename=modulename)\n return gui", "docstring": "Creates an embed UI containing the prefix modified message\n\nArgs:\n channel (discord.Channel): The Discord channel to bind the embed to\n new_prefix (str): The value of the new prefix\n\nReturns:\n embed: The created embed", "source": "codesearchnet_filtered"} -{"code": "def list(self, cat, ctr=None, nb_results=None, offset=None):\n path = (LIST_URL + '?c=3&cat={}'.format(requests.utils.quote(cat)))\n if (ctr is not None):\n path += '&ctr={}'.format(requests.utils.quote(ctr))\n if (nb_results is not None):\n path += '&n={}'.format(requests.utils.quote(str(nb_results)))\n if (offset is not None):\n path += '&o={}'.format(requests.utils.quote(str(offset)))\n data = self.executeRequestApi2(path)\n clusters = []\n docs = []\n if (ctr is None):\n for pf in data.preFetch:\n for cluster in pf.response.payload.listResponse.doc:\n clusters.extend(cluster.child)\n return [c.docid for c in clusters]\n else:\n apps = []\n for d in data.payload.listResponse.doc:\n for c in d.child:\n for a in c.child:\n apps.append(utils.parseProtobufObj(a))\n return apps", "docstring": "List all possible subcategories for a specific category. If\n also a subcategory is provided, list apps from this category.\n\nArgs:\n cat (str): category id\n ctr (str): subcategory id\n nb_results (int): if a subcategory is specified, limit number\n of results to this number\n offset (int): if a subcategory is specified, start counting from this\n result\n\nReturns:\n A list of categories. If subcategory is specified, a list of apps in this\n category.", "source": "codesearchnet_filtered"} -{"code": "def mtu(self, **kwargs):\n int_type = kwargs.pop('int_type').lower()\n name = kwargs.pop('name')\n mtu = kwargs.pop('mtu')\n callback = kwargs.pop('callback', self._callback)\n int_types = ['gigabitethernet', 'tengigabitethernet', 'fortygigabitethernet', 'hundredgigabitethernet', 'port_channel']\n if (int_type not in int_types):\n raise ValueError('Incorrect int_type value.')\n minimum_mtu = 1522\n maximum_mtu = 9216\n if ((int(mtu) < minimum_mtu) or (int(mtu) > maximum_mtu)):\n raise ValueError('Incorrect mtu value 1522-9216')\n mtu_args = dict(name=name, mtu=mtu)\n if (not pynos.utilities.valid_interface(int_type, name)):\n raise ValueError('`name` must be in the format of x/y/z for physical interfaces or x for port channel.')\n config = getattr(self._interface, ('interface_%s_mtu' % int_type))(**mtu_args)\n return callback(config)", "docstring": "Set interface mtu.\n\nArgs:\n int_type (str): Type of interface. (gigabitethernet,\n tengigabitethernet, etc)\n name (str): Name of interface. (1/0/5, 1/0/10, etc)\n mtu (str): Value between 1522 and 9216\n callback (function): A function executed upon completion of the\n method. The only parameter passed to `callback` will be the\n ``ElementTree`` `config`.\n\nReturns:\n Return value of `callback`.\n\nRaises:\n KeyError: if `int_type`, `name`, or `mtu` is not specified.\n ValueError: if `int_type`, `name`, or `mtu` is invalid.\n\nExample:\n >>> import pynos.device\n >>> switches = ['10.24.39.211', '10.24.39.203']\n >>> auth = ('admin', 'password')\n >>> for switch in switches:\n ... conn = (switch, '22')\n ... with pynos.device.Device(conn=conn, auth=auth) as dev:\n ... output = dev.interface.mtu(mtu='1666',\n ... int_type='tengigabitethernet', name='225/0/38')\n ... dev.interface.mtu() # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n KeyError", "source": "codesearchnet_filtered"} -{"code": "def parse_options(files, env_prefix='CONFPY', strict=True):\n return check_for_missing_options(config=set_cli_options(config=set_environment_var_options(config=configuration_from_paths(paths=files, strict=strict), prefix=env_prefix)))", "docstring": "Parse configuration options and return a configuration object.\n\nArgs:\n files (iter of str): File paths which identify configuration files.\n These files are processed in order with values in later files\n overwriting values in earlier files.\n env_prefix (str): The static prefix prepended to all options when set\n as environment variables. The default is CONFPY.\n strict (bool): Whether or not to parse the files in strict mode.\n\nReturns:\n confpy.core.config.Configuration: The loaded configuration object.\n\nRaises:\n MissingRequiredOption: If a required option is not defined in any file.\n NamespaceNotRegistered: If a file contains a namespace which is not\n defined.\n OptionNotRegistered: If a file contains an option which is not defined\n but resides under a valid namespace.\n UnrecognizedFileExtension: If there is no loader for a path.", "source": "codesearchnet_filtered"} -{"code": "def load_dict_values(self, db_key: str, dict_keys: List[str], hierarchical: bool=False) -> List:\n result = []\n if (not hierarchical):\n _values = self._db.hmget(db_key, *dict_keys)\n result = [ast.literal_eval(_value) for _value in _values]\n else:\n db_keys = self._db.keys(pattern=(db_key + '*'))\n for _db_key in db_keys:\n for name in _db_key.split(':')[1:]:\n if (name in dict_keys):\n _values = self._load_values(_db_key)\n result.append(_values)\n _values = self._db.hmget(_db_key, *dict_keys)\n for (i, value) in enumerate(_values):\n try:\n _values[i] = ast.literal_eval(value)\n except SyntaxError:\n pass\n except ValueError:\n pass\n result += [value for value in _values if (value is not None)]\n return result", "docstring": "Load values from a dictionary with the specified dict_keys.\n\nArgs:\n db_key (str): Key where the dictionary is stored\n dict_keys (List[str]): Keys within the dictionary to load.\n hierarchical (bool): If True, expect the dictionary to have been\n stored hierarchically. If False, expect the dictionary to have\n been stored flat.\n\nReturns:\n object: The value stored at dict_key in the dictionary stored at\n key", "source": "codesearchnet_filtered"} -{"code": "def update_in_hdx(self, **kwargs):\n self._check_load_existing_object('resource', 'id')\n if (self.file_to_upload and ('url' in self.data)):\n del self.data['url']\n self._merge_hdx_update('resource', 'id', self.file_to_upload, **kwargs)", "docstring": "Check if resource exists in HDX and if so, update it\n\nArgs:\n **kwargs: See below\n operation (string): Operation to perform eg. patch. Defaults to update.\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def array(self, size_chunk, start, bytesize):\n with open(self.img, 'rb') as f1:\n f1.seek((self.start_byte + (start * self.bytesize)))\n data = f1.read((size_chunk * self.bytesize))\n Z = np.fromstring(data, dtype=self.dtype, count=size_chunk)\n if (self.grid == 'LOLA'):\n return (Z * float(self.SCALING_FACTOR))\n else:\n return Z", "docstring": "Read part of the binary file\n\nArgs:\n size_chunk (int) : Size of the chunk to read\n start (int): Starting byte\n bytesize (int): Ending byte\n\nReturns:\n (np.array): array of the corresponding values", "source": "codesearchnet_filtered"} -{"code": "def find_amplitude(chunk):\n return (abs(int((chunk.max() - chunk.min()))) / config.SAMPLE_RANGE)", "docstring": "Calculate the 0-1 amplitude of an ndarray chunk of audio samples.\n\n Samples in the ndarray chunk are signed int16 values oscillating\n anywhere between -32768 and 32767. Find the amplitude between 0 and 1\n by summing the absolute values of the minimum and maximum, and dividing\n by 32767.\n\nArgs:\n chunk (numpy.ndarray): An array of int16 audio samples\n\nReturns:\n float: The amplitude of the sample between 0 and 1.\n Note that this is not a decibel representation of\n the amplitude.", "source": "codesearchnet_filtered"} -{"code": "def find_mip(self, direction, mechanism, purview, allow_neg=False):\n alpha_min = float('inf')\n probability = self.probability(direction, mechanism, purview)\n for partition in mip_partitions(mechanism, purview, self.node_labels):\n partitioned_probability = self.partitioned_probability(direction, partition)\n alpha = log2((probability / partitioned_probability))\n if (utils.eq(alpha, 0) or ((alpha < 0) and (not allow_neg))):\n return AcRepertoireIrreducibilityAnalysis(state=self.mechanism_state(direction), direction=direction, mechanism=mechanism, purview=purview, partition=partition, probability=probability, partitioned_probability=partitioned_probability, node_labels=self.node_labels, alpha=0.0)\n if ((abs(alpha_min) - abs(alpha)) > constants.EPSILON):\n alpha_min = alpha\n acria = AcRepertoireIrreducibilityAnalysis(state=self.mechanism_state(direction), direction=direction, mechanism=mechanism, purview=purview, partition=partition, probability=probability, partitioned_probability=partitioned_probability, node_labels=self.node_labels, alpha=alpha_min)\n return acria", "docstring": "Find the ratio minimum information partition for a mechanism\n over a purview.\n\nArgs:\n direction (str): |CAUSE| or |EFFECT|\n mechanism (tuple[int]): A mechanism.\n purview (tuple[int]): A purview.\n\n Keyword Args:\n allow_neg (boolean): If true, ``alpha`` is allowed to be negative.\n Otherwise, negative values of ``alpha`` will be treated as if\n they were 0.\n\nReturns:\n AcRepertoireIrreducibilityAnalysis: The irreducibility analysis for\n the mechanism.", "source": "codesearchnet_filtered"} -{"code": "def NewPathSpec(cls, type_indicator, **kwargs):\n if (type_indicator not in cls._path_spec_types):\n raise KeyError('Path specification type: {0:s} not set.'.format(type_indicator))\n if (('parent' in kwargs) and (kwargs['parent'] is None)):\n del kwargs['parent']\n path_spec_type = cls._path_spec_types[type_indicator]\n return path_spec_type(**kwargs)", "docstring": "Creates a new path specification for the specific type indicator.\n\nArgs:\n type_indicator (str): type indicator.\n kwargs (dict): keyword arguments depending on the path specification.\n\nReturns:\n PathSpec: path specification.\n\nRaises:\n KeyError: if path specification is not registered.", "source": "codesearchnet_filtered"} -{"code": "def get_credentials(credentials=None, client_secret_file=CLIENT_SECRET_FILE, refresh_token=None):\n if credentials:\n if _is_valid_credentials(credentials):\n return credentials\n else:\n print('Invalid credentials supplied. Will generate from default token.')\n token = (refresh_token or DEFAULT_TOKEN)\n dir_name = os.path.dirname(DEFAULT_TOKEN)\n try:\n os.makedirs(dir_name)\n except OSError:\n if (not os.path.isdir(dir_name)):\n raise\n store = file.Storage(token)\n credentials = store.get()\n try:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_known_args()[0]\n except ImportError:\n flags = None\n logr.error('Unable to parse oauth2client args; `pip install argparse`')\n if ((not credentials) or credentials.invalid):\n flow = client.flow_from_clientsecrets(client_secret_file, SCOPES)\n flow.redirect_uri = client.OOB_CALLBACK_URN\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else:\n credentials = tools.run(flow, store)\n logr.info(('Storing credentials to ' + DEFAULT_TOKEN))\n return credentials", "docstring": "Consistently returns valid credentials object.\n\n See Also:\n https://developers.google.com/drive/web/quickstart/python\n\nArgs:\n client_secret_file (str): path to client secrets file, defaults to .gdrive_private\n refresh_token (str): path to a user provided refresh token that is already\n pre-authenticated\n credentials (`~oauth2client.client.OAuth2Credentials`, optional): handle direct\n input of credentials, which will check credentials for valid type and\n return them\n\nReturns:\n `~oauth2client.client.OAuth2Credentials`: google credentials object", "source": "codesearchnet_filtered"} -{"code": "def fit_to_cols(what, indent='', cols=79):\n lines = []\n while what:\n (what, next_line) = split_line(what=what, cols=cols, indent=indent)\n lines.append(next_line)\n return '\\n'.join(lines)", "docstring": "Wrap the given text to the columns, prepending the indent to each line.\n\nArgs:\n what(str): text to wrap.\n indent(str): indentation to use.\n cols(int): colt to wrap to.\n\nReturns:\n str: Wrapped text", "source": "codesearchnet_filtered"} -{"code": "def getDelOps(self, buid):\n return (('prop:del', (buid, self.form.name, self.name, self.storinfo)),)", "docstring": "Get a list of storage operations to delete this property from the buid.\n\nArgs:\n buid (bytes): The node buid.\n\nReturns:\n (tuple): The storage operations", "source": "codesearchnet_filtered"} -{"code": "def qc_curve_group(self, tests, alias=None):\n keys = [k for (k, v) in self.data.items() if isinstance(v, Curve)]\n if (not keys):\n return {}\n all_tests = tests.get('all', tests.get('All', tests.get('ALL', [])))\n data = {test.__name__: test(self, keys, alias) for test in all_tests}\n results = {}\n for (i, key) in enumerate(keys):\n this = {}\n for (test, result) in data.items():\n this[test] = result[i]\n results[key] = this\n return results", "docstring": "Run tests on a cohort of curves.\n\nArgs:\n alias (dict): an alias dictionary, mapping mnemonics to lists of\n mnemonics.\n\nReturns:\n dict.", "source": "codesearchnet_filtered"} -{"code": "def coroutine(func):\n\n def wrapper(*args, **kwargs):\n gen = func(*args, **kwargs)\n val = next(gen)\n if (val != None):\n raise TypeError('Unexpected value from start of coroutine')\n return gen\n wrapper.__name__ = func.__name__\n wrapper.__doc__ = func.__doc__\n return wrapper", "docstring": "Wraps a PEP-342 enhanced generator in a way that avoids boilerplate of the \"priming\" call to ``next``.\n\nArgs:\n func (Callable): The function constructing a generator to decorate.\n\nReturns:\n Callable: The decorated generator.", "source": "codesearchnet_filtered"} -{"code": "def parse_conf(self, keys=[]):\n confs = self.app.config.get('WAFFLE_CONFS', {})\n if (not keys):\n keys = confs.keys()\n result = {}\n for key in keys:\n if key.startswith('WAFFLE_'):\n continue\n if (key not in confs.keys()):\n continue\n stored_conf = self.configstore.get(key)\n if (not stored_conf):\n value = confs[key].get('default', '')\n stored_conf = self.configstore.put(key, util.serialize(value))\n self.configstore.commit()\n else:\n value = util.deserialize(stored_conf.get_value())\n result[stored_conf.get_key()] = value\n return result", "docstring": "Parse configuration values from the database.\n\n The extension must have been previously initialized.\n\n If a key is not found in the database, it will be created with the\n default value specified.\n\nArgs:\n keys (list[str]): list of keys to parse. If the list is empty, then\n all the keys known to the application will be used.\n\nReturns:\n dict of the parsed config values.", "source": "codesearchnet_filtered"} -{"code": "def _GetLoadConfigTimestamp(self, pefile_object):\n if (not hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG')):\n return None\n timestamp = getattr(pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG.struct, 'TimeDateStamp', 0)\n return timestamp", "docstring": "Retrieves the timestamp from the Load Configuration directory.\n\nArgs:\n pefile_object (pefile.PE): pefile object.\n\nReturns:\n int: load configuration timestamps or None if there are none present.", "source": "codesearchnet_filtered"} -{"code": "def not_implemented(cls, errors=None):\n if cls.expose_status:\n cls.response.content_type = 'application/json'\n cls.response._status_line = '501 Not Implemented'\n return cls(501, None, errors).to_json", "docstring": "Shortcut API for HTTP 501 `Not Implemented` response.\n\nArgs:\n errors (list): Response key/value data.\n\nReturns:\n WSResponse Instance.", "source": "codesearchnet_filtered"} -{"code": "def rbridge_id(self, **kwargs):\n is_get_config = kwargs.pop('get', False)\n if (not is_get_config):\n rbridge_id = kwargs.pop('rbridge_id')\n else:\n rbridge_id = ''\n callback = kwargs.pop('callback', self._callback)\n rid_args = dict(rbridge_id=rbridge_id)\n rid = getattr(self._rbridge, 'rbridge_id_rbridge_id')\n config = rid(**rid_args)\n if is_get_config:\n return callback(config, handler='get_config')\n return callback(config)", "docstring": "Configures device's rbridge ID. Setting this property will need\n a switch reboot\n\nArgs:\n rbridge_id (str): The rbridge ID of the device on which BGP will be\n configured in a VCS fabric.\n get (bool): Get config instead of editing config. (True, False)\n callback (function): A function executed upon completion of the\n method. The only parameter passed to `callback` will be the\n ``ElementTree`` `config`.\n\nReturns:\n Return value of `callback`.\n\nRaises:\n KeyError: if `rbridge_id` is not specified.\n\nExample:\n >>> import pynos.device\n >>> conn = ('10.24.39.211', '22')\n >>> auth = ('admin', 'password')\n >>> with pynos.device.Device(conn=conn, auth=auth) as dev:\n ... output = dev.system.rbridge_id(rbridge_id='225')\n ... output = dev.system.rbridge_id(rbridge_id='225', get=True)\n ... dev.system.rbridge_id() # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n KeyError", "source": "codesearchnet_filtered"} -{"code": "def compare_profiles(profile1, profile2):\n length = len(profile1)\n profile1 = np.array(list(profile1))\n profile2 = np.array(list(profile2))\n similarity_array = (profile1 == profile2)\n matches = np.sum(similarity_array)\n similarity_ratio = (matches / length)\n return similarity_ratio", "docstring": "Given two profiles, determine the ratio of similarity, i.e.\n the hamming distance between the strings.\n\nArgs:\n profile1/2 (str): profile string\n\nReturns:\n similarity_ratio (float): the ratio of similiarity (0-1)", "source": "codesearchnet_filtered"} -{"code": "def set_data(self, data):\n for name in self._fields:\n setattr(self, name, data.get(name))\n return self", "docstring": "Fills form with data\n\nArgs:\n data (dict): Data to assign form fields.\n\nReturns:\n Self. Form object.", "source": "codesearchnet_filtered"} -{"code": "def unlock_kinetis(jlink):\n if (not jlink.connected()):\n raise ValueError('No target to unlock.')\n method = UNLOCK_METHODS.get(jlink.tif, None)\n if (method is None):\n raise NotImplementedError('Unsupported target interface for unlock.')\n return method(jlink)", "docstring": "Unlock for Freescale Kinetis K40 or K60 device.\n\nArgs:\n jlink (JLink): an instance of a J-Link that is connected to a target.\n\nReturns:\n ``True`` if the device was successfully unlocked, otherwise ``False``.\n\nRaises:\n ValueError: if the J-Link is not connected to a target.", "source": "codesearchnet_filtered"} -{"code": "def create_parser(default_name: str) -> argparse.ArgumentParser:\n argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')\n argparser.add_argument('-H', '--host', help='Host to which the app binds. [%(default)s]', default='0.0.0.0')\n argparser.add_argument('-p', '--port', help='Port to which the app binds. [%(default)s]', default=5000, type=int)\n argparser.add_argument('-o', '--output', help='Logging output. [%(default)s]')\n argparser.add_argument('-n', '--name', help='Service name. This will be used as prefix for all endpoints. [%(default)s]', default=default_name)\n argparser.add_argument('--debug', help='Run the app in debug mode. [%(default)s]', action='store_true')\n argparser.add_argument('--eventbus-host', help='Hostname at which the eventbus can be reached [%(default)s]', default='eventbus')\n argparser.add_argument('--eventbus-port', help='Port at which the eventbus can be reached [%(default)s]', default=5672, type=int)\n return argparser", "docstring": "Creates the default brewblox_service ArgumentParser.\n Service-agnostic arguments are added.\n\n The parser allows calling code to add additional arguments before using it in create_app()\n\nArgs:\n default_name (str):\n default value for the --name commandline argument.\n\nReturns:\n argparse.ArgumentParser: a Python ArgumentParser with defaults set.", "source": "codesearchnet_filtered"} -{"code": "def alloc_data(self, value):\n if isinstance(value, six.binary_type):\n return self._alloc_data(value)\n elif isinstance(value, six.text_type):\n return self._alloc_data((value.encode('utf-8') + b'\\x00'))\n else:\n raise TypeError(('No idea how to encode %s' % repr(value)))", "docstring": "Allocate a piece of data that will be included in the shellcode body.\n\nArgs:\n value(...): The value to add to the shellcode. Can be bytes or\n string type.\n\nReturns:\n ~pwnypack.types.Offset: The offset used to address the data.", "source": "codesearchnet_filtered"} -{"code": "def _is_variant(self, gemini_variant, ind_objs):\n indexes = (ind.ind_index for ind in ind_objs)\n for index in indexes:\n gt_call = gemini_variant['gt_types'][index]\n if ((gt_call == 1) or (gt_call == 3)):\n return True\n return False", "docstring": "Check if the variant is a variation in any of the individuals\n\nArgs:\n gemini_variant (GeminiQueryRow): The gemini variant\n ind_objs (list(puzzle.models.individual)): A list of individuals to check\n\nReturns:\n bool : If any of the individuals has the variant", "source": "codesearchnet_filtered"} -{"code": "def op_nodes(self, op=None):\n nodes = []\n for node in self._multi_graph.nodes():\n if (node.type == 'op'):\n if ((op is None) or isinstance(node.op, op)):\n nodes.append(node)\n return nodes", "docstring": "Get the list of \"op\" nodes in the dag.\n\nArgs:\n op (Type): Instruction subclass op nodes to return. if op=None, return\n all op nodes.\n\nReturns:\n list[DAGNode]: the list of node ids containing the given op.", "source": "codesearchnet_filtered"} -{"code": "def instantiate(self, substitutions):\n param_dict = self.substitute_params(substitutions)\n (pkg, ident) = self.name.rsplit('.', 1)\n pkg = ('malcolm.modules.%s' % pkg)\n try:\n ob = importlib.import_module(pkg)\n except ImportError as e:\n raise_with_traceback(ImportError(('\\n%s:%d:\\n%s' % (self.filename, self.lineno, e))))\n try:\n ob = getattr(ob, ident)\n except AttributeError:\n raise_with_traceback(ImportError(('\\n%s:%d:\\nPackage %r has no ident %r' % (self.filename, self.lineno, pkg, ident))))\n try:\n model = MethodModel.from_callable(ob, returns=False)\n args = model.validate(param_dict)\n ret = ob(**args)\n except Exception as e:\n sourcefile = inspect.getsourcefile(ob)\n lineno = inspect.getsourcelines(ob)[1]\n raise_with_traceback(YamlError(('\\n%s:%d:\\n%s:%d:\\n%s' % (self.filename, self.lineno, sourcefile, lineno, e))))\n else:\n return ret", "docstring": "Keep recursing down from base using dotted name, then call it with\n self.params and args\n\nArgs:\n substitutions (dict): Substitutions to make to self.param_dict\n\nReturns:\n The found object called with (*args, map_from_d)\n\n E.g. if ob is malcolm.parts, and name is \"ca.CADoublePart\", then the\n object will be malcolm.parts.ca.CADoublePart", "source": "codesearchnet_filtered"} -{"code": "def get_entry_by_material_id(self, material_id, compatible_only=True, inc_structure=None, property_data=None, conventional_unit_cell=False):\n data = self.get_entries(material_id, compatible_only=compatible_only, inc_structure=inc_structure, property_data=property_data, conventional_unit_cell=conventional_unit_cell)\n return data[0]", "docstring": "Get a ComputedEntry corresponding to a material_id.\n\nArgs:\n material_id (str): Materials Project material_id (a string,\n e.g., mp-1234).\n compatible_only (bool): Whether to return only \"compatible\"\n entries. Compatible entries are entries that have been\n processed using the MaterialsProjectCompatibility class,\n which performs adjustments to allow mixing of GGA and GGA+U\n calculations for more accurate phase diagrams and reaction\n energies.\n inc_structure (str): If None, entries returned are\n ComputedEntries. If inc_structure=\"final\",\n ComputedStructureEntries with final structures are returned.\n Otherwise, ComputedStructureEntries with initial structures\n are returned.\n property_data (list): Specify additional properties to include in\n entry.data. If None, no data. Should be a subset of\n supported_properties.\n conventional_unit_cell (bool): Whether to get the standard\n conventional unit cell\n\nReturns:\n ComputedEntry or ComputedStructureEntry object.", "source": "codesearchnet_filtered"} -{"code": "def execute_request(self, url, http_method, query_params, post_data):\n response = requests.request(http_method, url, params=query_params, auth=self._auth, json=post_data, headers={'User-Agent': USER_AGENT})\n if (isinstance(self._output_generator, str) and (self._output_generator.lower() == 'json')):\n return response.json()\n elif (self._output_generator is not None):\n return self._output_generator.process_response(response)\n else:\n return response", "docstring": "Makes a request to the specified url endpoint with the\n specified http method, params and post data.\n\nArgs:\n url (string): The url to the API without query params.\n Example: \"https://api.housecanary.com/v2/property/value\"\n http_method (string): The http method to use for the request.\n query_params (dict): Dictionary of query params to add to the request.\n post_data: Json post data to send in the body of the request.\n\nReturns:\n The result of calling this instance's OutputGenerator process_response method\n on the requests.Response object.\n If no OutputGenerator is specified for this instance, returns the requests.Response.", "source": "codesearchnet_filtered"} -{"code": "def _cursor_pb(cursor_pair):\n if (cursor_pair is not None):\n (data, before) = cursor_pair\n value_pbs = [_helpers.encode_value(value) for value in data]\n return query_pb2.Cursor(values=value_pbs, before=before)", "docstring": "Convert a cursor pair to a protobuf.\n\n If ``cursor_pair`` is :data:`None`, just returns :data:`None`.\n\nArgs:\n cursor_pair (Optional[Tuple[list, bool]]): Two-tuple of\n\n * a list of field values.\n * a ``before`` flag\n\nReturns:\n Optional[google.cloud.firestore_v1beta1.types.Cursor]: A\n protobuf cursor corresponding to the values.", "source": "codesearchnet_filtered"} -{"code": "def topics(self, exclude_internal_topics=True):\n topics = set(self._partitions.keys())\n if exclude_internal_topics:\n return (topics - self.internal_topics)\n else:\n return topics", "docstring": "Get set of known topics.\n\nArgs:\n exclude_internal_topics (bool): Whether records from internal topics\n (such as offsets) should be exposed to the consumer. If set to\n True the only way to receive records from an internal topic is\n subscribing to it. Default True\n\nReturns:\n set: {topic (str), ...}", "source": "codesearchnet_filtered"} -{"code": "def copy_file_if_newer(src_fs, src_path, dst_fs, dst_path):\n with manage_fs(src_fs, writeable=False) as _src_fs:\n with manage_fs(dst_fs, create=True) as _dst_fs:\n if (_src_fs is _dst_fs):\n if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path):\n _src_fs.copy(src_path, dst_path, overwrite=True)\n return True\n else:\n return False\n else:\n with _src_fs.lock(), _dst_fs.lock():\n if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path):\n copy_file_internal(_src_fs, src_path, _dst_fs, dst_path)\n return True\n else:\n return False", "docstring": "Copy a file from one filesystem to another, checking times.\n\n If the destination exists, and is a file, it will be first truncated.\n If both source and destination files exist, the copy is executed only\n if the source file is newer than the destination file. In case\n modification times of source or destination files are not available,\n copy is always executed.\n\nArgs:\n src_fs (FS or str): Source filesystem (instance or URL).\n src_path (str): Path to a file on the source filesystem.\n dst_fs (FS or str): Destination filesystem (instance or URL).\n dst_path (str): Path to a file on the destination filesystem.\n\nReturns:\n bool: `True` if the file copy was executed, `False` otherwise.", "source": "codesearchnet_filtered"} -{"code": "def match_rules(tree, rules, fun=None, multi=False):\n if multi:\n context = match_rules_context_multi(tree, rules)\n else:\n context = match_rules_context(tree, rules)\n if (not context):\n return None\n if fun:\n args = fun.__code__.co_varnames\n if multi:\n res = []\n for c in context:\n action_context = {}\n for arg in args:\n if (arg in c):\n action_context[arg] = c[arg]\n res.append(fun(**action_context))\n return res\n else:\n action_context = {}\n for arg in args:\n if (arg in context):\n action_context[arg] = context[arg]\n return fun(**action_context)\n else:\n return context", "docstring": "Matches a Tree structure with the given query rules.\n\n Query rules are represented as a dictionary of template to action.\n Action is either a function, or a dictionary of subtemplate parameter to rules::\n\n rules = { 'template' : { 'key': rules } }\n | { 'template' : {} }\n\nArgs:\n tree (Tree): Parsed tree structure\n rules (dict): A dictionary of query rules\n fun (function): Function to call with context (set to None if you want to return context)\n multi (Bool): If True, returns all matched contexts, else returns first matched context\n\nReturns:\n Contexts from matched rules", "source": "codesearchnet_filtered"} -{"code": "def greedy_coloring(adj):\n coloring = {}\n colors = {}\n possible_colors = {n: set(range(len(adj))) for n in adj}\n while possible_colors:\n n = min(possible_colors, key=(lambda n: len(possible_colors[n])))\n color = min(possible_colors[n])\n coloring[n] = color\n if (color not in colors):\n colors[color] = {n}\n else:\n colors[color].add(n)\n for neighbor in adj[n]:\n if ((neighbor in possible_colors) and (color in possible_colors[neighbor])):\n possible_colors[neighbor].remove(color)\n del possible_colors[n]\n return (coloring, colors)", "docstring": "Determines a vertex coloring.\n\nArgs:\n adj (dict): The edge structure of the graph to be colored.\n `adj` should be of the form {node: neighbors, ...} where\n neighbors is a set.\n\nReturns:\n dict: the coloring {node: color, ...}\n dict: the colors {color: [node, ...], ...}\n\nNote:\n This is a greedy heuristic: the resulting coloring is not\n necessarily minimal.", "source": "codesearchnet_filtered"} -{"code": "def _instantiate_data_type(self, data_type_class, data_type_args, loc):\n assert issubclass(data_type_class, DataType), ('Expected stone.data_type.DataType, got %r' % data_type_class)\n argspec = inspect.getargspec(data_type_class.__init__)\n argspec.args.remove('self')\n num_args = len(argspec.args)\n num_defaults = len((argspec.defaults or ()))\n (pos_args, kw_args) = data_type_args\n if ((num_args - num_defaults) > len(pos_args)):\n raise InvalidSpec(('Missing positional argument %s for %s type' % (quote(argspec.args[len(pos_args)]), quote(data_type_class.__name__))), *loc)\n elif ((num_args - num_defaults) < len(pos_args)):\n raise InvalidSpec(('Too many positional arguments for %s type' % quote(data_type_class.__name__)), *loc)\n args = {}\n for (i, key) in enumerate(argspec.args):\n args[key] = (i >= (num_args - num_defaults))\n for key in kw_args:\n if (key not in args):\n raise InvalidSpec(('Unknown argument %s to %s type.' % (quote(key), quote(data_type_class.__name__))), *loc)\n if (not args[key]):\n raise InvalidSpec(('Positional argument %s cannot be specified as a keyword argument.' % quote(key)), *loc)\n del args[key]\n try:\n return data_type_class(*pos_args, **kw_args)\n except ParameterError as e:\n raise InvalidSpec(('Bad argument to %s type: %s' % (quote(data_type_class.__name__), e.args[0])), *loc)", "docstring": "Responsible for instantiating a data type with additional attributes.\n This method ensures that the specified attributes are valid.\n\nArgs:\n data_type_class (DataType): The class to instantiate.\n data_type_attrs (dict): A map from str -> values of attributes.\n These will be passed into the constructor of data_type_class\n as keyword arguments.\n\nReturns:\n stone.data_type.DataType: A parameterized instance.", "source": "codesearchnet_filtered"} -{"code": "def show(self, frame):\n if (len(frame.shape) != 3):\n raise ValueError('frame should have shape with only 3 dimensions')\n if (not self.is_open):\n self.open()\n self._window.clear()\n self._window.switch_to()\n self._window.dispatch_events()\n image = ImageData(frame.shape[1], frame.shape[0], 'RGB', frame.tobytes(), pitch=(frame.shape[1] * (- 3)))\n image.blit(0, 0, width=self._window.width, height=self._window.height)\n self._window.flip()", "docstring": "Show an array of pixels on the window.\n\nArgs:\n frame (numpy.ndarray): the frame to show on the window\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def render_policy_template(account_number='', app='coreforrest', env='dev', group='forrest', items=None, pipeline_settings=None, region='us-east-1', service=''):\n statements = []\n rendered_service_policy = get_template('infrastructure/iam/{0}.json.j2'.format(service), account_number=account_number, app=app, env=env, group=group, region=region, items=items, settings=pipeline_settings)\n try:\n statement_block = json.loads(rendered_service_policy)\n statements.append(statement_block)\n except ValueError:\n LOG.debug('Need to make %s template into list.', service)\n statements = json.loads('[{0}]'.format(rendered_service_policy))\n LOG.debug('Rendered IAM Policy statements: %s', statements)\n return statements", "docstring": "Render IAM Policy template.\n\n To support multiple statement blocks, JSON objects can be separated by a\n comma. This function attempts to turn any invalid JSON into a valid list\n based on this comma separated assumption.\n\nArgs:\n account_number (str): AWS Account number.\n app (str): Name of Spinnaker Application.\n env (str): Environment/Account in AWS\n group (str):A Application group/namespace\n items (list): Resource names used to create a Policy per Resource.\n region (str): AWS region.\n pipeline_settings (dict): Settings from *pipeline.json*.\n service (str): Name of cloud service to find matching IAM Policy\n template.\n\nReturns:\n list: IAM Policy :obj:`dict` statements for the given service.", "source": "codesearchnet_filtered"} -{"code": "def createEditor(self, parent, option, index):\n combo = QtGui.QComboBox(parent)\n combo.addItems(SupportedDtypes.names())\n combo.currentIndexChanged.connect(self.currentIndexChanged)\n return combo", "docstring": "Creates an Editor Widget for the given index.\n\n Enables the user to manipulate the displayed data in place. An editor\n is created, which performs the change.\n The widget used will be a `QComboBox` with all available datatypes in the\n `pandas` project.\n\nArgs:\n parent (QtCore.QWidget): Defines the parent for the created editor.\n option (QtGui.QStyleOptionViewItem): contains all the information\n that QStyle functions need to draw the items.\n index (QtCore.QModelIndex): The item/index which shall be edited.\n\nReturns:\n QtGui.QWidget: he widget used to edit the item specified by index\n for editing.", "source": "codesearchnet_filtered"} -{"code": "def nmf_ensemble(data, k, n_runs=10, W_list=[], **nmf_params):\n nmf = NMF(k)\n if (len(W_list) == 0):\n W_list = []\n for i in range(n_runs):\n W = nmf.fit_transform(data)\n W_list.append(W)\n W_stacked = np.hstack(W_list)\n nmf_w = nmf.fit_transform(W_stacked)\n nmf_h = nmf.components_\n H_new = data.T.dot(nmf_w).T\n nmf2 = NMF(k, init='custom')\n nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new)\n H_new = nmf2.components_\n return (nmf_w, H_new)", "docstring": "Runs an ensemble method on the list of NMF W matrices...\n\nArgs:\n data: genes x cells array (should be log + cell-normalized)\n k: number of classes\n n_runs (optional): number of random initializations of state estimation\n M_list (optional): list of M arrays from state estimation\n se_params (optional): optional poisson_estimate_state params\n\nReturns:\n W_new\n H_new", "source": "codesearchnet_filtered"} -{"code": "def update_ports(self, ports, id_or_uri, timeout=(- 1)):\n resources = merge_default_values(ports, {'type': 'port'})\n uri = (self._client.build_uri(id_or_uri) + '/update-ports')\n return self._client.update(resources, uri, timeout)", "docstring": "Updates the interconnect ports.\n\nArgs:\n id_or_uri: Can be either the interconnect id or the interconnect uri.\n ports (list): Ports to update.\n timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\n in OneView; it just stops waiting for its completion.\n\nReturns:\n dict: The interconnect.", "source": "codesearchnet_filtered"} -{"code": "def map_seqprop_resnums_to_seqprop_resnums(self, resnums, seqprop1, seqprop2):\n resnums = ssbio.utils.force_list(resnums)\n alignment = self._get_seqprop_to_seqprop_alignment(seqprop1=seqprop1, seqprop2=seqprop2)\n mapped = ssbio.protein.sequence.utils.alignment.map_resnum_a_to_resnum_b(resnums=resnums, a_aln=alignment[0], b_aln=alignment[1])\n return mapped", "docstring": "Map a residue number in any SeqProp to another SeqProp using the pairwise alignment information.\n\nArgs:\n resnums (int, list): Residue numbers in seqprop1\n seqprop1 (SeqProp): SeqProp object the resnums match to\n seqprop2 (SeqProp): SeqProp object you want to map the resnums to\n\nReturns:\n dict: Mapping of seqprop1 residue numbers to seqprop2 residue numbers. If mappings don't exist in this\n dictionary, that means the residue number cannot be mapped according to alignment!", "source": "codesearchnet_filtered"} -{"code": "def http_list(self, path, query_data={}, as_list=None, **kwargs):\n as_list = (True if (as_list is None) else as_list)\n get_all = kwargs.pop('all', False)\n url = self._build_url(path)\n if (get_all is True):\n return list(GitlabList(self, url, query_data, **kwargs))\n if (('page' in kwargs) or (as_list is True)):\n return list(GitlabList(self, url, query_data, get_next=False, **kwargs))\n return GitlabList(self, url, query_data, **kwargs)", "docstring": "Make a GET request to the Gitlab server for list-oriented queries.\n\nArgs:\n path (str): Path or full URL to query ('/projects' or\n 'http://whatever/v4/api/projecs')\n query_data (dict): Data to send as query parameters\n **kwargs: Extra options to send to the server (e.g. sudo, page,\n per_page)\n\nReturns:\n list: A list of the objects returned by the server. If `as_list` is\n False and no pagination-related arguments (`page`, `per_page`,\n `all`) are defined then a GitlabList object (generator) is returned\n instead. This object will make API calls when needed to fetch the\n next items from the server.\n\nRaises:\n GitlabHttpError: When the return code is not 2xx\n GitlabParsingError: If the json data could not be parsed", "source": "codesearchnet_filtered"} -{"code": "def quaternion_from_axis_rotation(angle, axis):\n out = np.zeros(4, dtype=float)\n if (axis == 'x'):\n out[1] = 1\n elif (axis == 'y'):\n out[2] = 1\n elif (axis == 'z'):\n out[3] = 1\n else:\n raise ValueError('Invalid axis input.')\n out *= math.sin((angle / 2.0))\n out[0] = math.cos((angle / 2.0))\n return Quaternion(out)", "docstring": "Return quaternion for rotation about given axis.\n\nArgs:\n angle (float): Angle in radians.\n axis (str): Axis for rotation\n\nReturns:\n Quaternion: Quaternion for axis rotation.\n\nRaises:\n ValueError: Invalid input axis.", "source": "codesearchnet_filtered"} -{"code": "def _GetVisitSource(self, visit_identifier, cache, database):\n sync_cache_results = cache.GetResults('sync')\n if (not sync_cache_results):\n result_set = database.Query(self._SYNC_CACHE_QUERY)\n cache.CacheQueryResults(result_set, 'sync', 'id', ('source',))\n sync_cache_results = cache.GetResults('sync')\n if (sync_cache_results and visit_identifier):\n results = sync_cache_results.get(visit_identifier, None)\n if results:\n return results[0]\n return None", "docstring": "Retrieves a visit source type based on the identifier.\n\nArgs:\n visit_identifier (str): identifier from the visits table for the\n particular record.\n cache (SQLiteCache): cache which contains cached results from querying\n the visit_source table.\n database (SQLiteDatabase): database.\n\nReturns:\n int: visit source type or None if no visit source type was found for\n the identifier.", "source": "codesearchnet_filtered"} -{"code": "def parse(self, text, layers=None):\n params = {'text': text, 'key': self.key}\n if (layers is not None):\n if isinstance(layers, six.string_types):\n params['layers'] = layers\n elif isinstance(layers, collections.Iterable):\n params['layers'] = ','.join(layers)\n req = requests.get(self.NLU_URL, params=params)\n return req.json()", "docstring": "Parsing passed text to json.\n\nArgs:\n text: Text to parse.\n layers (optional): Special fields. Only one string\n or iterable object (e.g \"Data\", (\"Data\", \"Fio\")).\n Only these fields will be returned.\n\nReturns:\n The parsed text into a json object.", "source": "codesearchnet_filtered"} -{"code": "def get_score(self, error=None):\n if (error is not None):\n self.error = error\n if (self.error >= 0):\n return (1 / (self.error + 1))\n else:\n return (1 + abs(self.error))", "docstring": "Calculate bee's fitness score given a value returned by the fitness\n function\n\nArgs:\n error (float): value returned by the fitness function\n\nReturns:\n float: derived fitness score", "source": "codesearchnet_filtered"} -{"code": "def _FormatTag(self, event):\n tag = getattr(event, 'tag', None)\n if (not tag):\n return '-'\n return ' '.join(tag.labels)", "docstring": "Formats the event tag.\n\nArgs:\n event (EventObject): event.\n\nReturns:\n str: event tag field.", "source": "codesearchnet_filtered"} -{"code": "def resolve_mode(self, name):\n if (name not in settings.CODEMIRROR_MODES):\n msg = \"Given config name '{}' does not exists in 'settings.CODEMIRROR_MODES'.\"\n raise UnknowModeError(msg.format(name))\n return settings.CODEMIRROR_MODES.get(name)", "docstring": "From given mode name, return mode file path from\n ``settings.CODEMIRROR_MODES`` map.\n\nArgs:\n name (string): Mode name.\n\nRaises:\n KeyError: When given name does not exist in\n ``settings.CODEMIRROR_MODES``.\n\nReturns:\n string: Mode file path.", "source": "codesearchnet_filtered"} -{"code": "def lines_from_file(path, as_interned=False, encoding=None):\n lines = None\n with io.open(path, encoding=encoding) as f:\n if as_interned:\n lines = [sys.intern(line) for line in f.read().splitlines()]\n else:\n lines = f.read().splitlines()\n return lines", "docstring": "Create a list of file lines from a given filepath.\n\nArgs:\n path (str): File path\n as_interned (bool): List of \"interned\" strings (default False)\n\nReturns:\n strings (list): File line list", "source": "codesearchnet_filtered"} -{"code": "def normal_mean(data, variance):\n if (not isinstance(data, np.ndarray)):\n data = np.array(data)\n i_variance_2 = (1 / (variance ** 2))\n cmm = [0.0]\n cmm.extend(np.cumsum(data))\n cmm2 = [0.0]\n cmm2.extend(np.cumsum(np.abs(data)))\n\n def cost(start, end):\n ' Cost function for normal distribution with variable mean\\n\\n Args:\\n start (int): start index\\n end (int): end index\\n Returns:\\n float: Cost, from start to end\\n '\n cmm2_diff = (cmm2[end] - cmm2[start])\n cmm_diff = pow((cmm[end] - cmm[start]), 2)\n i_diff = (end - start)\n diff = (cmm2_diff - cmm_diff)\n return ((diff / i_diff) * i_variance_2)\n return cost", "docstring": "Creates a segment cost function for a time series with a\n Normal distribution with changing mean\n\nArgs:\n data (:obj:`list` of float): 1D time series data\n variance (float): variance\n\nReturns:\n function: Function with signature\n (int, int) -> float\n where the first arg is the starting index, and the second\n is the last arg. Returns the cost of that segment", "source": "codesearchnet_filtered"} -{"code": "def by_issn(issn):\n old_url = aleph.ALEPH_URL\n aleph.ALEPH_URL = NTK_ALEPH_URL\n records = aleph.getISSNsXML(issn, base='STK02')\n aleph.ALEPH_URL = old_url\n for record in records:\n marc = MARCXMLRecord(record)\n additional_info = {'222': marc.get('222', None), 'PER': marc.get('PER', None), '776': marc.get('776', None), '008': marc.get('008', None), 'alt_end_date': ''}\n additional_info = {key: val for (key, val) in additional_info.iteritems() if val}\n alt_end_date = None\n alt_creation_date = None\n if additional_info['008']:\n alt_creation_date = additional_info['008'][7:11]\n alt_end_date = additional_info['008'][11:15]\n if (alt_end_date in ['9999', '****']):\n alt_creation_date += '-'\n alt_end_date = None\n additional_info['alt_end_date'] = alt_end_date\n author = Author.parse_author(marc)\n model = Model(url=_first_or_none(marc.get('856u')), conspect=_first_or_none(marc.get('072a')), annotation_tags=_first_or_none(marc.get('520a')), periodicity=_first_or_none(marc.get('310a')), title_tags=_first_or_none(marc.get('222a')), subtitle_tags=_first_or_none(marc.get('245b')), place_tags=remove_hairs((_first_or_none(marc.get('260a')) or '')), author_tags=(author._asdict() if author else None), publisher_tags=remove_hairs(((_first_or_none(marc.get('260b')) or _first_or_none(marc.get('264b')) or ''),), ', '), creation_dates=_first_or_none(marc.get('260c', [alt_creation_date])), lang_tags=_first_or_none(marc.get('040b')), keyword_tags=marc.get('650a07'), source_info=_first_or_none(marc.get('500a')), original_xml=record, additional_info=additional_info)\n (yield _add_source(model))", "docstring": "Query aleph for records with given `issn`. The lookup is directed to the\n NTK's Aleph.\n\nArgs:\n issn (str): ISSN of the periodical.\n\nReturns:\n obj: :class:`Model` instances for each record.", "source": "codesearchnet_filtered"} -{"code": "def end_day_to_datetime(end_day, config):\n day_start_time = config['day_start']\n day_end_time = get_day_end(config)\n if (day_start_time == datetime.time(0, 0, 0)):\n end = datetime.datetime.combine(end_day, day_end_time)\n else:\n end = (datetime.datetime.combine(end_day, day_end_time) + datetime.timedelta(days=1))\n return end", "docstring": "Convert a given end day to its proper datetime.\n\n This is non trivial because of variable ``day_start``. We want to make sure\n that even if an 'end day' is specified the actual point in time may reach into the following\n day.\n\nArgs:\n end (datetime.date): Raw end date that is to be adjusted.\n config: Controller config containing information on when a workday starts.\n\nReturns:\n datetime.datetime: The endday as a adjusted datetime object.\n\nExample:\n Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to\n consider even points in time up to ``2015-04-02 5:29``. That is to represent that a\n *work day*\n does not match *calendar days*.\n\nNote:\n An alternative implementation for the similar problem in legacy hamster:\n ``hamster.storage.db.Storage.__get_todays_facts``.", "source": "codesearchnet_filtered"} -{"code": "def from_string(species_string: str):\n m = re.search('([A-Z][a-z]*)([0-9.]*)([+\\\\-]*)(.*)', species_string)\n if m:\n sym = m.group(1)\n if ((m.group(2) == '') and (m.group(3) == '')):\n oxi = 0\n else:\n oxi = (1 if (m.group(2) == '') else float(m.group(2)))\n oxi = ((- oxi) if (m.group(3) == '-') else oxi)\n properties = None\n if m.group(4):\n toks = m.group(4).split('=')\n properties = {toks[0]: float(toks[1])}\n return DummySpecie(sym, oxi, properties)\n raise ValueError('Invalid DummySpecies String')", "docstring": "Returns a Dummy from a string representation.\n\nArgs:\n species_string (str): A string representation of a dummy\n species, e.g., \"X2+\", \"X3+\".\n\nReturns:\n A DummySpecie object.\n\nRaises:\n ValueError if species_string cannot be intepreted.", "source": "codesearchnet_filtered"} -{"code": "def is_github_task(task):\n return any(((task.get('schedulerId') == 'taskcluster-github'), task.get('extra', {}).get('tasks_for', '').startswith('github-'), is_github_url(task.get('metadata', {}).get('source', ''))))", "docstring": "Determine if a task is related to GitHub.\n\n This function currently looks into the ``schedulerId``, ``extra.tasks_for``, and\n ``metadata.source``.\n\nArgs:\n task (dict): the task definition to check.\n\nReturns:\n bool: True if a piece of data refers to GitHub", "source": "codesearchnet_filtered"} -{"code": "def payment(self, origin, destination, amount):\n if (type(amount) != Decimal):\n amount = Decimal(amount)\n if (amount <= 0):\n raise Exception('Amount must be a positive number')\n all_addresses = []\n accounts = self.listaccounts()\n if (origin in accounts):\n if (destination in accounts):\n with self.openwallet():\n result = self.move(origin, destination, amount)\n return self.record_tx(origin, None, amount, result, destination)\n for account in accounts:\n addresses = self.getaddressesbyaccount(account)\n if (destination in addresses):\n with self.openwallet():\n result = self.move(origin, account, amount)\n return self.record_tx(origin, destination, amount, result, account)\n else:\n with self.openwallet():\n txhash = self.sendfrom(origin, destination, amount)\n return self.record_tx(origin, destination, amount, txhash)", "docstring": "Convenience method for sending Bitcoins.\n\n Send coins from origin to destination. Calls record_tx to log the\n transaction to database. Uses free, instant \"move\" transfers\n if addresses are both local (in the same wallet), and standard\n \"sendfrom\" transactions otherwise.\n\n The sender is required to be specified by user_id (account label);\n however, the recipient can be specified either by Bitcoin address\n (anyone) or user_id (if the user is local).\n\n Payment tries sending Bitcoins in this order:\n 1. \"move\" from account to account (local)\n 2. \"move\" from account to address (local)\n 3. \"sendfrom\" account to address (broadcast)\n\nArgs:\n origin (str): user_id of the sender\n destination (str): coin address or user_id of the recipient\n amount (str, Decimal, number): amount to send\n\nReturns:\n bool: True if successful, False otherwise", "source": "codesearchnet_filtered"} -{"code": "def join(self, *args, **kwargs):\n super(ThreadReturn, self).join(*args, **kwargs)\n return self._return", "docstring": "Joins the thread.\n\nArgs:\n self (ThreadReturn): the ``ThreadReturn`` instance\n args: optional list of arguments\n kwargs: optional key-word arguments\n\nReturns:\n The return value of the exited thread.", "source": "codesearchnet_filtered"} -{"code": "def create_symmetric_key(self, algorithm, length):\n if (algorithm not in self._symmetric_key_algorithms.keys()):\n raise exceptions.InvalidField('The cryptographic algorithm {0} is not a supported symmetric key algorithm.'.format(algorithm))\n cryptography_algorithm = self._symmetric_key_algorithms.get(algorithm)\n if (length not in cryptography_algorithm.key_sizes):\n raise exceptions.InvalidField('The cryptographic length ({0}) is not valid for the cryptographic algorithm ({1}).'.format(length, algorithm.name))\n self.logger.info('Generating a {0} symmetric key with length: {1}'.format(algorithm.name, length))\n key_bytes = os.urandom((length // 8))\n try:\n cryptography_algorithm(key_bytes)\n except Exception as e:\n self.logger.exception(e)\n raise exceptions.CryptographicFailure('Invalid bytes for the provided cryptographic algorithm.')\n return {'value': key_bytes, 'format': enums.KeyFormatType.RAW}", "docstring": "Create a symmetric key.\n\nArgs:\n algorithm(CryptographicAlgorithm): An enumeration specifying the\n algorithm for which the created key will be compliant.\n length(int): The length of the key to be created. This value must\n be compliant with the constraints of the provided algorithm.\n\nReturns:\n dict: A dictionary containing the key data, with the following\n key/value fields:\n * value - the bytes of the key\n * format - a KeyFormatType enumeration for the bytes format\n\nRaises:\n InvalidField: Raised when the algorithm is unsupported or the\n length is incompatible with the algorithm.\n CryptographicFailure: Raised when the key generation process\n fails.\n\nExample:\n >>> engine = CryptographyEngine()\n >>> key = engine.create_symmetric_key(\n ... CryptographicAlgorithm.AES, 256)", "source": "codesearchnet_filtered"} -{"code": "def is_allowed(self, filepath, excludes=[]):\n if os.path.isabs(filepath):\n raise FinderException(\"'Finder.is_allowed()' only accept relative filepath\")\n if excludes:\n for pattern in excludes:\n if fnmatch.fnmatch(filepath, pattern):\n return False\n return True", "docstring": "Check from exclude patterns if a relative filepath is allowed\n\nArgs:\n filepath (str): A relative file path. (exclude patterns are\n allways based from the source directory).\n\n Keyword Arguments:\n excludes (list): A list of excluding (glob) patterns. If filepath\n matchs one of patterns, filepath is not allowed.\n\nRaises:\n boussole.exception.FinderException: If given filepath is absolute.\n\nReturns:\n str: Filepath with new extension.", "source": "codesearchnet_filtered"} -{"code": "def get_enrollments(self, course_id=None, usernames=None):\n params = {}\n if (course_id is not None):\n params['course_id'] = course_id\n if ((usernames is not None) and isinstance(usernames, list)):\n params['username'] = ','.join(usernames)\n done = False\n while (not done):\n (enrollments, next_cursor) = self._get_enrollments_list_page(params)\n for enrollment in enrollments:\n (yield Enrollment(enrollment))\n if next_cursor:\n params['cursor'] = next_cursor\n else:\n done = True", "docstring": "List all course enrollments.\n\nArgs:\n course_id (str, optional): If used enrollments will be filtered to the specified\n course id.\n usernames (list, optional): List of usernames to filter enrollments.\n\nNote:\n - This method returns an iterator to avoid going through the entire pagination at once.\n - The :class:`Enrollments` instance returned for each generated item will not have any\n course details.\n\nExample:\n Get all enrollments for a specific course id\n >>> api = EdxApi({'access_token': 'token'}, 'http://base_url')\n >>> enrollments = api.enrollments.get_enrollments(course_id='course_id')\n >>> for enrollment in enrollments:\n do_something(enrollment)\n\n Get all enrollments for a set of usernames\n >>> api = EdxApi({'access_token': 'token'}, 'http://base_url')\n >>> enrollments = api.enrollments.get_enrollments(usernames=['user1', 'user2'])\n >>> for enrollment in enrollments:\n do_something(enrollment)\n\nReturns:\n Generator with an instance of :class:`Enrollments` for each item.", "source": "codesearchnet_filtered"} -{"code": "def pep8_check(files):\n files = fs.wrap_paths(files)\n cfg_path = conf.get_path('lint.pep8_cfg', 'ops/tools/pep8.ini')\n pep8_cmd = 'pep8 --config {} {}'.format(cfg_path, files)\n return shell.run(pep8_cmd, exit_on_error=False).return_code", "docstring": "Run code checks using pep8.\n\nArgs:\n files (list[str]):\n A list of files to check\n\nReturns:\n bool: **True** if all files passed the checks, **False** otherwise.\n\n pep8 tool is **very** fast. Especially compared to pylint and the bigger the\n code base the bigger the difference. If you want to reduce check times you\n might disable all pep8 checks in pylint and use pep8 for that. This way you\n use pylint only for the more advanced checks (the number of checks enabled\n in pylint will make a visible difference in it's run times).", "source": "codesearchnet_filtered"} -{"code": "def from_authorized_user_file(cls, filename, scopes=None):\n with io.open(filename, 'r', encoding='utf-8') as json_file:\n data = json.load(json_file)\n return cls.from_authorized_user_info(data, scopes)", "docstring": "Creates a Credentials instance from an authorized user json file.\n\nArgs:\n filename (str): The path to the authorized user json file.\n scopes (Sequence[str]): Optional list of scopes to include in the\n credentials.\n\nReturns:\n google.oauth2.credentials.Credentials: The constructed\n credentials.\n\nRaises:\n ValueError: If the file is not in the expected format.", "source": "codesearchnet_filtered"} -{"code": "def VerifyStructure(self, parser_mediator, line):\n try:\n structure = self._LINE.parseString(line)\n except pyparsing.ParseException:\n logger.debug('Not a SkyDrive old log file')\n return False\n (day_of_month, month, year, hours, minutes, seconds, milliseconds) = structure.date_time\n time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds, milliseconds)\n try:\n dfdatetime_time_elements.TimeElementsInMilliseconds(time_elements_tuple=time_elements_tuple)\n except ValueError:\n logger.debug('Not a SkyDrive old log file, invalid date and time: {0!s}'.format(structure.date_time))\n return False\n return True", "docstring": "Verify that this file is a SkyDrive old log file.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n line (str): line from a text file.\n\nReturns:\n bool: True if the line is in the expected format, False if not.", "source": "codesearchnet_filtered"} -{"code": "def trace_set_buffer_capacity(self, size):\n cmd = enums.JLinkTraceCommand.SET_CAPACITY\n data = ctypes.c_uint32(size)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to set trace buffer size.')\n return None", "docstring": "Sets the capacity for the trace buffer.\n\nArgs:\n self (JLink): the ``JLink`` instance.\n size (int): the new capacity for the trace buffer.\n\nReturns:\n ``None``", "source": "codesearchnet_filtered"} -{"code": "def GetRegistryFileMapping(self, registry_file):\n if (not registry_file):\n return ''\n candidate_mappings = []\n for mapping in self._REGISTRY_FILE_MAPPINGS_NT:\n if (not mapping.unique_key_paths):\n continue\n match = True\n for key_path in mapping.unique_key_paths:\n registry_key = registry_file.GetKeyByPath(key_path)\n if (not registry_key):\n match = False\n if match:\n candidate_mappings.append(mapping)\n if (not candidate_mappings):\n return ''\n if (len(candidate_mappings) == 1):\n return candidate_mappings[0].key_path_prefix\n key_path_prefixes = frozenset([mapping.key_path_prefix for mapping in candidate_mappings])\n expected_key_path_prefixes = frozenset(['HKEY_CURRENT_USER', 'HKEY_CURRENT_USER\\\\Software\\\\Classes'])\n if (key_path_prefixes == expected_key_path_prefixes):\n return 'HKEY_CURRENT_USER'\n raise RuntimeError('Unable to resolve Windows Registry file mapping.')", "docstring": "Determines the Registry file mapping based on the content of the file.\n\nArgs:\n registry_file (WinRegistyFile): Windows Registry file.\n\nReturns:\n str: key path prefix or an empty string.\n\nRaises:\n RuntimeError: if there are multiple matching mappings and\n the correct mapping cannot be resolved.", "source": "codesearchnet_filtered"} -{"code": "def nb_fit(data, P_init=None, R_init=None, epsilon=1e-08, max_iters=100):\n means = data.mean(1)\n variances = data.var(1)\n if (means > variances).any():\n raise ValueError('For NB fit, means must be less than variances')\n (genes, cells) = data.shape\n P = (1.0 - (means / variances))\n R = ((means * (1 - P)) / P)\n for i in range(genes):\n result = minimize(nb_ll_row, [P[i], R[i]], args=(data[(i, :)],), bounds=[(0, 1), (eps, None)])\n params = result.x\n P[i] = params[0]\n R[i] = params[1]\n return (P, R)", "docstring": "Fits the NB distribution to data using method of moments.\n\nArgs:\n data (array): genes x cells\n P_init (array, optional): NB success prob param - genes x 1\n R_init (array, optional): NB stopping param - genes x 1\n\nReturns:\n P, R - fit to data", "source": "codesearchnet_filtered"} -{"code": "def _initialise(self, initial_state: str='unknown') -> dict:\n initial_state = initial_state.lower()\n if ((initial_state != 'unknown') and (initial_state not in self._allowed_states)):\n raise ValueError('Invalid initial state: {}'.format(initial_state))\n _initial_state = dict(current_state=initial_state, target_state=initial_state, current_timestamp=datetime.utcnow().isoformat(), target_timestamp=datetime.utcnow().isoformat())\n return _initial_state", "docstring": "Return a dictionary used to initialise a state object.\n\n This method is used to obtain a dictionary/hash describing the initial\n state of SDP or a service in SDP.\n\nArgs:\n initial_state (str): Initial state.\n\nReturns:\n dict, Initial state configuration", "source": "codesearchnet_filtered"} -{"code": "def codemirror_parameters(field):\n manifesto = CodemirrorAssetTagRender()\n names = manifesto.register_from_fields(field)\n config = manifesto.get_codemirror_parameters(names[0])\n return mark_safe(json.dumps(config))", "docstring": "Filter to include CodeMirror parameters as a JSON string for a single\n field.\n\n This must be called only on an allready rendered field, meaning you must\n not use this filter on a field before a form. Else, the field widget won't\n be correctly initialized.\n\nExample:\n ::\n\n {% load djangocodemirror_tags %}\n {{ form.myfield|codemirror_parameters }}\n\nArgs:\n field (djangocodemirror.fields.CodeMirrorField): A form field.\n\nReturns:\n string: JSON object for parameters, marked safe for Django templates.", "source": "codesearchnet_filtered"} -{"code": "def rollback(self, label=None, plane='sdr'):\n begin = time.time()\n rb_label = self._chain.target_device.rollback(label=label, plane=plane)\n elapsed = (time.time() - begin)\n if label:\n self.emit_message('Configuration rollback last {:.0f}s. Label: {}'.format(elapsed, rb_label), log_level=logging.INFO)\n else:\n self.emit_message('Configuration failed.', log_level=logging.WARNING)\n return rb_label", "docstring": "Rollback the configuration.\n\n This method rolls back the configuration on the device.\n\nArgs:\n label (text): The configuration label ID\n plane: (text): sdr or admin\n\nReturns:\n A string with commit label or None", "source": "codesearchnet_filtered"} -{"code": "def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):\n countriesdata = cls.countriesdata(use_live=use_live)\n m49 = countriesdata['m49iso3'].get(iso3)\n if (m49 is not None):\n return m49\n if (exception is not None):\n raise exception\n return None", "docstring": "Get M49 from ISO3 code\n\nArgs:\n iso3 (str): ISO3 code for which to get M49 code\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\n Optional[int]: M49 code", "source": "codesearchnet_filtered"} -{"code": "def encrypt(self, plainText):\n encryptedResult = ''\n for index in range(0, len(plainText), BLOCK_SIZE):\n block = plainText[index:(index + BLOCK_SIZE)]\n if (len(block) < BLOCK_SIZE):\n block = zero_pad(block, BLOCK_SIZE)\n encryptedResult += self.encrypt_block(block)\n return encryptedResult", "docstring": "Encrypt an arbitrary-length block of data.\n\n NOTE: This function formerly worked only on 16-byte blocks of `plainText`.\n code that assumed this should still work fine, but can optionally be\n modified to call `encrypt_block` instead.\n\nArgs:\n plainText (str): data to encrypt. If the data is not a multiple of 16\n bytes long, it will be padded with null (0x00) bytes until it is.\n\nReturns:\n encrypted data. Note that this will always be a multiple of 16 bytes\n long.", "source": "codesearchnet_filtered"} -{"code": "def compute_fat_line(nodes):\n (coeff_a, coeff_b, coeff_c) = compute_implicit_line(nodes)\n (_, num_nodes) = nodes.shape\n d_min = 0.0\n d_max = 0.0\n for index in six.moves.xrange(1, (num_nodes - 1)):\n curr_dist = (((coeff_a * nodes[(0, index)]) + (coeff_b * nodes[(1, index)])) + coeff_c)\n if (curr_dist < d_min):\n d_min = curr_dist\n elif (curr_dist > d_max):\n d_max = curr_dist\n return (coeff_a, coeff_b, coeff_c, d_min, d_max)", "docstring": "Compute the \"fat line\" around a B |eacute| zier curve.\n\n Both computes the implicit (normalized) form\n\n .. math::\n\n ax + by + c = 0\n\n for the line connecting the first and last node in ``nodes``.\n Also computes the maximum and minimum distances to that line\n from each control point.\n\nArgs:\n nodes (numpy.ndarray): ``2 x N`` array of nodes in a curve.\n\nReturns:\n Tuple[float, float, float, float, float]: The 5-tuple of\n\n * The :math:`x` coefficient :math:`a`\n * The :math:`y` coefficient :math:`b`\n * The constant :math:`c`\n * The \"minimum\" distance to the fat line among the control points.\n * The \"maximum\" distance to the fat line among the control points.", "source": "codesearchnet_filtered"} -{"code": "def AppendPathEntries(cls, path, path_separator, number_of_wildcards, skip_first):\n if (path[(- 1)] == path_separator):\n path = path[:(- 1)]\n if skip_first:\n path = ''.join([path, path_separator, '*'])\n number_of_wildcards -= 1\n paths = []\n for _ in range(0, number_of_wildcards):\n path = ''.join([path, path_separator, '*'])\n paths.append(path)\n return paths", "docstring": "Appends glob wildcards to a path.\n\n This function will append glob wildcards \"*\" to a path, returning paths\n with an additional glob wildcard up to the specified number. E.g. given\n the path \"/tmp\" and a number of 2 wildcards, this function will return\n \"tmp/*\", \"tmp/*/*\". When skip_first is true the path with the first\n wildcard is not returned as a result.\n\nArgs:\n path (str): path to append glob wildcards to.\n path_separator (str): path segment separator.\n number_of_wildcards (int): number of glob wildcards to append.\n skip_first (bool): True if the the first path with glob wildcard should\n be skipped as a result.\n\nReturns:\n list[str]: paths with glob wildcards.", "source": "codesearchnet_filtered"} -{"code": "def orth_gs(order, dist, normed=False, sort='GR', cross_truncation=1.0, **kws):\n logger = logging.getLogger(__name__)\n dim = len(dist)\n if isinstance(order, int):\n if (order == 0):\n return chaospy.poly.Poly(1, dim=dim)\n basis = chaospy.poly.basis(0, order, dim, sort, cross_truncation=cross_truncation)\n else:\n basis = order\n basis = list(basis)\n polynomials = [basis[0]]\n if normed:\n for idx in range(1, len(basis)):\n for idy in range(idx):\n orth = chaospy.descriptives.E((basis[idx] * polynomials[idy]), dist, **kws)\n basis[idx] = (basis[idx] - (polynomials[idy] * orth))\n norms = chaospy.descriptives.E((polynomials[(- 1)] ** 2), dist, **kws)\n if (norms <= 0):\n logger.warning('Warning: Polynomial cutoff at term %d', idx)\n break\n basis[idx] = (basis[idx] / numpy.sqrt(norms))\n polynomials.append(basis[idx])\n else:\n norms = [1.0]\n for idx in range(1, len(basis)):\n for idy in range(idx):\n orth = chaospy.descriptives.E((basis[idx] * polynomials[idy]), dist, **kws)\n basis[idx] = (basis[idx] - ((polynomials[idy] * orth) / norms[idy]))\n norms.append(chaospy.descriptives.E((polynomials[(- 1)] ** 2), dist, **kws))\n if (norms[(- 1)] <= 0):\n logger.warning('Warning: Polynomial cutoff at term %d', idx)\n break\n polynomials.append(basis[idx])\n return chaospy.poly.Poly(polynomials, dim=dim, shape=(len(polynomials),))", "docstring": "Gram-Schmidt process for generating orthogonal polynomials.\n\nArgs:\n order (int, Poly):\n The upper polynomial order. Alternative a custom polynomial basis\n can be used.\n dist (Dist):\n Weighting distribution(s) defining orthogonality.\n normed (bool):\n If True orthonormal polynomials will be used instead of monic.\n sort (str):\n Ordering argument passed to poly.basis. If custom basis is used,\n argument is ignored.\n cross_truncation (float):\n Use hyperbolic cross truncation scheme to reduce the number of\n terms in expansion.\n\nReturns:\n (Poly):\n The orthogonal polynomial expansion.\n\nExample:\n >>> Z = chaospy.J(chaospy.Normal(), chaospy.Normal())\n >>> print(chaospy.around(chaospy.orth_gs(2, Z), 4))\n [1.0, q1, q0, q1^2-1.0, q0q1, q0^2-1.0]", "source": "codesearchnet_filtered"} -{"code": "def parse_cl_function(cl_code, dependencies=()):\n from mot.lib.cl_function import SimpleCLFunction\n\n def separate_cl_functions(input_str):\n 'Separate all the OpenCL functions.\\n\\n This creates a list of strings, with for each function found the OpenCL code.\\n\\n Args:\\n input_str (str): the string containing one or more functions.\\n\\n Returns:\\n list: a list of strings, with one string per found CL function.\\n '\n\n class Semantics():\n\n def __init__(self):\n self._functions = []\n\n def result(self, ast):\n return self._functions\n\n def arglist(self, ast):\n return '({})'.format(', '.join(ast))\n\n def function(self, ast):\n\n def join(items):\n result = ''\n for item in items:\n if isinstance(item, str):\n result += item\n else:\n result += join(item)\n return result\n self._functions.append(join(ast).strip())\n return ast\n return _extract_cl_functions_parser.parse(input_str, semantics=Semantics())\n functions = separate_cl_functions(cl_code)\n return SimpleCLFunction.from_string(functions[(- 1)], dependencies=(list((dependencies or [])) + [SimpleCLFunction.from_string(s) for s in functions[:(- 1)]]))", "docstring": "Parse the given OpenCL string to a single SimpleCLFunction.\n\n If the string contains more than one function, we will return only the last, with all the other added as a\n dependency.\n\nArgs:\n cl_code (str): the input string containing one or more functions.\n dependencies (Iterable[CLCodeObject]): The list of CL libraries this function depends on\n\nReturns:\n mot.lib.cl_function.SimpleCLFunction: the CL function for the last function in the given strings.", "source": "codesearchnet_filtered"} -{"code": "def typewrite(message, interval=0.0, pause=None, _pause=True):\n interval = float(interval)\n _failSafeCheck()\n for c in message:\n if (len(c) > 1):\n c = c.lower()\n press(c, _pause=False)\n time.sleep(interval)\n _failSafeCheck()\n _autoPause(pause, _pause)", "docstring": "Performs a keyboard key press down, followed by a release, for each of\n the characters in message.\n\n The message argument can also be list of strings, in which case any valid\n keyboard name can be used.\n\n Since this performs a sequence of keyboard presses and does not hold down\n keys, it cannot be used to perform keyboard shortcuts. Use the hotkey()\n function for that.\n\nArgs:\n message (str, list): If a string, then the characters to be pressed. If a\n list, then the key names of the keys to press in order. The valid names\n are listed in KEYBOARD_KEYS.\n interval (float, optional): The number of seconds in between each press.\n 0.0 by default, for no pause in between presses.\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def _reverse_convert(x, factor1, factor2):\n return ((x * factor1) / (((1 - x) * factor2) + (x * factor1)))", "docstring": "Converts mixing ratio x in c1 - c2 tie line to that in\n comp1 - comp2 tie line.\n\nArgs:\n x (float): Mixing ratio x in c1 - c2 tie line, a float between\n 0 and 1.\n factor1 (float): Compositional ratio between composition c1 and\n processed composition comp1. E.g., factor for\n Composition('SiO2') and Composition('O') is 2.\n factor2 (float): Compositional ratio between composition c2 and\n processed composition comp2.\n\nReturns:\n Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1.", "source": "codesearchnet_filtered"} -{"code": "def get_next(self, protocol='http', format=False, policy='loop'):\n if (not self.proxies[protocol]):\n return None\n if (policy == 'loop'):\n idx = self.idx[protocol]\n self.idx[protocol] = ((idx + 1) % len(self.proxies[protocol]))\n elif (policy == 'random'):\n idx = random.randint(0, (self.proxy_num(protocol) - 1))\n else:\n self.logger.error('Unsupported get_next policy: {}'.format(policy))\n exit()\n proxy = self.proxies[protocol][self.addr_list[protocol][idx]]\n if (proxy.weight < random.random()):\n return self.get_next(protocol, format, policy)\n if format:\n return proxy.format()\n else:\n return proxy", "docstring": "Get the next proxy\n\nArgs:\n protocol (str): 'http' or 'https'. (default 'http')\n format (bool): Whether to format the proxy. (default False)\n policy (str): Either 'loop' or 'random', indicating the policy of\n getting the next proxy. If set to 'loop', will return proxies\n in turn, otherwise will return a proxy randomly.\n\nReturns:\n Proxy or dict: If format is true, then return the formatted proxy\n which is compatible with requests.Session parameters,\n otherwise a Proxy object.", "source": "codesearchnet_filtered"} -{"code": "def get(self, vrf=None):\n match = '^router ospf .*'\n if vrf:\n match += (' vrf %s' % vrf)\n config = self.get_block(match)\n if (not config):\n return None\n response = dict()\n response.update(self._parse_router_id(config))\n response.update(self._parse_vrf(config))\n response.update(self._parse_networks(config))\n response.update(self._parse_ospf_process_id(config))\n response.update(self._parse_redistribution(config))\n response.update(self._parse_shutdown(config))\n return response", "docstring": "Returns the OSPF routing configuration\n\nArgs:\n vrf (str): VRF name to return OSPF routing config for\n\nReturns:\n dict:\n keys: router_id (int): OSPF router-id\n vrf (str): VRF of the OSPF process\n networks (dict): All networks that\n are advertised in OSPF\n ospf_process_id (int): OSPF proc id\n redistribution (dict): All protocols that\n are configured to be\n redistributed in OSPF\n shutdown (bool): Gives the current shutdown\n off the process", "source": "codesearchnet_filtered"} -{"code": "def GetHashData(hashable):\n ms = StreamManager.GetStream()\n writer = BinaryWriter(ms)\n hashable.SerializeUnsigned(writer)\n ms.flush()\n retVal = ms.ToArray()\n StreamManager.ReleaseStream(ms)\n return retVal", "docstring": "Get the data used for hashing.\n\nArgs:\n hashable (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin\n\nReturns:\n bytes:", "source": "codesearchnet_filtered"} -{"code": "def isloaded(self, name):\n if (name is None):\n return True\n if isinstance(name, str):\n return (name in [x.__module__ for x in self])\n if isinstance(name, Iterable):\n return set(name).issubset([x.__module__ for x in self])\n return False", "docstring": "Checks if given hook module has been loaded\n\nArgs:\n name (str): The name of the module to check\n\nReturns:\n bool. The return code::\n\n True -- Loaded\n False -- Not Loaded", "source": "codesearchnet_filtered"} -{"code": "def get_service_account_email(self, project=None):\n if (project is None):\n project = self.project\n path = ('/projects/%s/serviceAccount' % (project,))\n api_response = self._connection.api_request(method='GET', path=path)\n return api_response['email']", "docstring": "Get the email address of the project's BigQuery service account\n\nNote:\n This is the service account that BigQuery uses to manage tables\n encrypted by a key in KMS.\n\nArgs:\n project (str, optional):\n Project ID to use for retreiving service account email.\n Defaults to the client's project.\n\nReturns:\n str: service account email address\n\nExample:\n >>> from google.cloud import bigquery\n >>> client = bigquery.Client()\n >>> client.get_service_account_email()\n my_service_account@my-project.iam.gserviceaccount.com", "source": "codesearchnet_filtered"} -{"code": "def wait_for_batches(self, batch_ids, timeout=None):\n self._batch_tracker.watch_statuses(self, batch_ids)\n timeout = (timeout or DEFAULT_TIMEOUT)\n start_time = time()\n with self._wait_condition:\n while True:\n if (self._statuses is not None):\n return _format_batch_statuses(self._statuses, batch_ids, self._batch_tracker)\n if ((time() - start_time) > timeout):\n statuses = self._batch_tracker.get_statuses(batch_ids)\n return _format_batch_statuses(statuses, batch_ids, self._batch_tracker)\n self._wait_condition.wait((timeout - (time() - start_time)))", "docstring": "Locks until a list of batch ids is committed to the block chain\n or a timeout is exceeded. Returns the statuses of those batches.\n\nArgs:\n batch_ids (list of str): The ids of the batches to wait for\n timeout(int): Maximum time in seconds to wait for\n\nReturns:\n list of BatchStatus: BatchStatuses to send back to client", "source": "codesearchnet_filtered"} -{"code": "def variable(dims=1):\n if (dims == 1):\n return Poly({(1,): 1}, dim=1, shape=())\n return Poly({tuple(indices): indices for indices in numpy.eye(dims, dtype=int)}, dim=dims, shape=(dims,))", "docstring": "Simple constructor to create single variables to create polynomials.\n\nArgs:\n dims (int):\n Number of dimensions in the array.\n\nReturns:\n (Poly):\n Polynomial array with unit components in each dimension.\n\nExample:\n >>> print(variable())\n q0\n >>> print(variable(3))\n [q0, q1, q2]", "source": "codesearchnet_filtered"} -{"code": "def load_install_json(self, filename=None):\n if (filename is None):\n filename = 'install.json'\n file_fqpn = os.path.join(self.app_path, filename)\n install_json = None\n if os.path.isfile(file_fqpn):\n try:\n with open(file_fqpn, 'r') as fh:\n install_json = json.load(fh)\n except ValueError as e:\n self.handle_error('Failed to load \"{}\" file ({}).'.format(file_fqpn, e))\n else:\n self.handle_error('File \"{}\" could not be found.'.format(file_fqpn))\n return install_json", "docstring": "Return install.json data.\n\nArgs:\n filename (str, optional): Defaults to None. The install.json filename (for bundled\n Apps).\n\nReturns:\n dict: The contents of the install.json file.", "source": "codesearchnet_filtered"} -{"code": "def covariance_to_correlations(covariance):\n diagonal_ind = np.arange(covariance.shape[1])\n diagonal_els = covariance[(:, diagonal_ind, diagonal_ind)]\n result = (covariance / np.sqrt((diagonal_els[(:, :, None)] * diagonal_els[(:, None, :)])))\n result[np.isinf(result)] = 0\n return np.clip(np.nan_to_num(result), (- 1), 1)", "docstring": "Transform a covariance matrix into a correlations matrix.\n\n This can be seen as dividing a covariance matrix by the outer product of the diagonal.\n\n As post processing we replace the infinities and the NaNs with zeros and clip the result to [-1, 1].\n\nArgs:\n covariance (ndarray): a matrix of shape (n, p, p) with for n problems the covariance matrix of shape (p, p).\n\nReturns:\n ndarray: the correlations matrix", "source": "codesearchnet_filtered"} -{"code": "def post_message(self, level, message, count=1, timestamp=None, now_reference=None):\n if ((len(self.messages) > 0) and (self.messages[(- 1)].message == message)):\n self.messages[(- 1)].count += 1\n else:\n msg_object = ServiceMessage(level, message, self._last_message_id, timestamp, now_reference)\n msg_object.count = count\n self.messages.append(msg_object)\n self._last_message_id += 1\n return self.messages[(- 1)]", "docstring": "Post a new message for service.\n\nArgs:\n level (int): The level of the message (info, warning, error)\n message (string): The message contents\n count (int): The number of times the message has been repeated\n timestamp (float): An optional monotonic value in seconds for when the message was created\n now_reference (float): If timestamp is not relative to monotonic() as called from this\n module then this should be now() as seen by whoever created the timestamp.\n\nReturns:\n ServiceMessage: The posted message", "source": "codesearchnet_filtered"} -{"code": "def remove_tag(self, tag):\n return self._remove_hdxobject(self.data.get('tags'), tag, matchon='name')", "docstring": "Remove a tag\n\nArgs:\n tag (str): Tag to remove\n\nReturns:\n bool: True if tag removed or False if not", "source": "codesearchnet_filtered"} -{"code": "def deploy(self, initial_instance_count, instance_type, accelerator_type=None, endpoint_name=None, **kwargs):\n endpoint_name = (endpoint_name or self.best_training_job())\n best_estimator = self.estimator.attach(self.best_training_job(), sagemaker_session=self.estimator.sagemaker_session)\n return best_estimator.deploy(initial_instance_count, instance_type, accelerator_type=accelerator_type, endpoint_name=endpoint_name, **kwargs)", "docstring": "Deploy the best trained or user specified model to an Amazon SageMaker endpoint and return a\n ``sagemaker.RealTimePredictor`` object.\n\n For more information: http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html\n\nArgs:\n initial_instance_count (int): Minimum number of EC2 instances to deploy to an endpoint for prediction.\n instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction,\n for example, 'ml.c4.xlarge'.\n accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading\n and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator\n will be attached to the endpoint.\n For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html\n endpoint_name (str): Name to use for creating an Amazon SageMaker endpoint. If not specified,\n the name of the training job is used.\n **kwargs: Other arguments needed for deployment. Please refer to the ``create_model()`` method of\n the associated estimator to see what other arguments are needed.\n\nReturns:\n sagemaker.predictor.RealTimePredictor: A predictor that provides a ``predict()`` method,\n which can be used to send requests to the Amazon SageMaker endpoint and obtain inferences.", "source": "codesearchnet_filtered"} -{"code": "def restore(self, x):\n with tf.name_scope('pad_reduce/restore'):\n x = tf.scatter_nd(indices=self.nonpad_ids, updates=x, shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0))\n return x", "docstring": "Add padding back to the given tensor.\n\nArgs:\n x (tf.Tensor): of shape [dim_compressed,...]\n\nReturns:\n a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The\n dim is restored from the original reference tensor", "source": "codesearchnet_filtered"} -{"code": "def transform_to_mods_periodical(marc_xml, uuid, url):\n marc_xml = _read_content_or_path(marc_xml)\n transformed = xslt_transformation(marc_xml, _absolute_template_path('MARC21toPeriodicalTitle.xsl'))\n return _apply_postprocessing(marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_periodical, uuid=uuid, url=url)", "docstring": "Convert `marc_xml` to periodical MODS data format.\n\nArgs:\n marc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\n filename.\n uuid (str): UUID string giving the package ID.\n url (str): URL of the publication (public or not).\n\nReturns:\n list: Collection of transformed xml strings.", "source": "codesearchnet_filtered"} -{"code": "def _send_request(self, url, method='get', data=None, extra_headers=None):\n headers = {'Content-type': 'application/json'}\n if isinstance(extra_headers, dict):\n headers.update(extra_headers)\n if ((not data) or ('password' not in data)):\n logger.debug('Sending {method} request to {url} with data {data}'.format(method=method.upper(), url=url, data=data))\n r = self.session.request(method, url, headers=headers, data=data)\n r.raise_for_status()\n return r.json()", "docstring": "Performs a given request and returns a json object\n\nArgs:\n url (str): URL of the request\n method (str): Any of \"get\", \"post\", \"delete\"\n data (any): Possible extra data to send with the request\n extra_headers (dict): Possible extra headers to send along in the request\n\nReturns:\n dict", "source": "codesearchnet_filtered"} -{"code": "def groupby_tags(item_list, tags_list):\n groupid_to_items = defaultdict(list)\n for (tags, item) in zip(tags_list, item_list):\n for tag in tags:\n groupid_to_items[tag].append(item)\n return groupid_to_items", "docstring": "r\"\"\"\n case where an item can belong to multiple groups\n\nArgs:\n item_list (list):\n tags_list (list):\n\nReturns:\n dict: groupid_to_items\n\n CommandLine:\n python -m utool.util_dict --test-groupby_tags\n\nExample:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> tagged_item_list = {\n >>> 'spam': ['meat', 'protein', 'food'],\n >>> 'eggs': ['protein', 'food'],\n >>> 'cheese': ['dairy', 'protein', 'food'],\n >>> 'jam': ['fruit', 'food'],\n >>> 'banana': ['weapon', 'fruit', 'food'],\n >>> }\n >>> item_list = list(tagged_item_list.keys())\n >>> tags_list = list(tagged_item_list.values())\n >>> groupid_to_items = groupby_tags(item_list, tags_list)\n >>> groupid_to_items = ut.map_vals(sorted, groupid_to_items)\n >>> result = ('groupid_to_items = %s' % (ut.repr4(groupid_to_items),))\n >>> print(result)\n groupid_to_items = {\n 'dairy': ['cheese'],\n 'food': ['banana', 'cheese', 'eggs', 'jam', 'spam'],\n 'fruit': ['banana', 'jam'],\n 'meat': ['spam'],\n 'protein': ['cheese', 'eggs', 'spam'],\n 'weapon': ['banana'],\n }", "source": "codesearchnet_filtered"} -{"code": "def structure_2_lmpdata(structure, ff_elements=None, atom_style='charge'):\n s = structure.get_sorted_structure()\n (a, b, c) = s.lattice.abc\n m = s.lattice.matrix\n xhi = a\n xy = np.dot(m[1], (m[0] / xhi))\n yhi = np.sqrt(((b ** 2) - (xy ** 2)))\n xz = np.dot(m[2], (m[0] / xhi))\n yz = ((np.dot(m[1], m[2]) - (xy * xz)) / yhi)\n zhi = np.sqrt((((c ** 2) - (xz ** 2)) - (yz ** 2)))\n box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]\n box_tilt = [xy, xz, yz]\n box_tilt = (None if (not any(box_tilt)) else box_tilt)\n box = LammpsBox(box_bounds, box_tilt)\n new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]])\n s.lattice = new_latt\n symbols = list(s.symbol_set)\n if ff_elements:\n symbols.extend(ff_elements)\n elements = sorted((Element(el) for el in set(symbols)))\n mass_info = [tuple(([i.symbol] * 2)) for i in elements]\n ff = ForceField(mass_info)\n topo = Topology(s)\n return LammpsData.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style)", "docstring": "Converts a structure to a LammpsData object with no force field\n parameters and topologies.\n\nArgs:\n structure (Structure): Input structure.\n ff_elements ([str]): List of strings of elements that must be\n present due to force field settings but not necessarily in\n the structure. Default to None.\n atom_style (str): Choose between \"atomic\" (neutral) and\n \"charge\" (charged). Default to \"charge\".\n\nReturns:\n LammpsData", "source": "codesearchnet_filtered"} -{"code": "def set_tif(self, interface):\n if (not ((1 << interface) & self.supported_tifs())):\n raise errors.JLinkException(('Unsupported target interface: %s' % interface))\n res = self._dll.JLINKARM_TIF_Select(interface)\n if (res != 0):\n return False\n self._tif = interface\n return True", "docstring": "Selects the specified target interface.\n\n Note that a restart must be triggered for this to take effect.\n\nArgs:\n self (Jlink): the ``JLink`` instance\n interface (int): integer identifier of the interface\n\nReturns:\n ``True`` if target was updated, otherwise ``False``.\n\nRaises:\n JLinkException: if the given interface is invalid or unsupported.", "source": "codesearchnet_filtered"} -{"code": "def gates_to_idx(gates, qregs):\n sizes = [qr.size for qr in qregs.values()]\n reg_idx = np.cumsum(([0] + sizes))\n regint = {}\n for (ind, qreg) in enumerate(qregs.values()):\n regint[qreg] = ind\n out = np.zeros((2 * len(gates)), dtype=np.int32)\n for (idx, gate) in enumerate(gates):\n out[(2 * idx)] = (reg_idx[regint[gate[0][0]]] + gate[0][1])\n out[((2 * idx) + 1)] = (reg_idx[regint[gate[1][0]]] + gate[1][1])\n return out", "docstring": "Converts gate tuples into a nested list of integers.\n\nArgs:\n gates (list): List of (QuantumRegister, int) pairs\n representing gates.\n qregs (dict): List of )QuantumRegister, int) tuples.\n\nReturns:\n list: Nested list of integers for gates.", "source": "codesearchnet_filtered"} -{"code": "def remove_option(self, section, name, value=None):\n if self._is_live():\n raise RuntimeError('Submitted units cannot update their options')\n removed = 0\n for option in list(self._data['options']):\n if (option['section'] == section):\n if (option['name'] == name):\n if ((value is None) or (option['value'] == value)):\n self._data['options'].remove(option)\n removed += 1\n if (removed > 0):\n return True\n return False", "docstring": "Remove an option from a unit\n\nArgs:\n section (str): The section to remove from.\n name (str): The item to remove.\n value (str, optional): If specified, only the option matching this value will be removed\n If not specified, all options with ``name`` in ``section`` will be removed\n\nReturns:\n True: At least one item was removed\n False: The item requested to remove was not found", "source": "codesearchnet_filtered"} -{"code": "def from_string(cls, dataset_id, default_project=None):\n output_dataset_id = dataset_id\n output_project_id = default_project\n parts = dataset_id.split('.')\n if ((len(parts) == 1) and (not default_project)):\n raise ValueError('When default_project is not set, dataset_id must be a fully-qualified dataset ID in standard SQL format. e.g. \"project.dataset_id\", got {}'.format(dataset_id))\n elif (len(parts) == 2):\n (output_project_id, output_dataset_id) = parts\n elif (len(parts) > 2):\n raise ValueError('Too many parts in dataset_id. Expected a fully-qualified dataset ID in standard SQL format. e.g. \"project.dataset_id\", got {}'.format(dataset_id))\n return cls(output_project_id, output_dataset_id)", "docstring": "Construct a dataset reference from dataset ID string.\n\nArgs:\n dataset_id (str):\n A dataset ID in standard SQL format. If ``default_project``\n is not specified, this must included both the project ID and\n the dataset ID, separated by ``.``.\n default_project (str):\n Optional. The project ID to use when ``dataset_id`` does not\n include a project ID.\n\nReturns:\n DatasetReference:\n Dataset reference parsed from ``dataset_id``.\n\nExample:\n >>> DatasetReference.from_string('my-project-id.some_dataset')\n DatasetReference('my-project-id', 'some_dataset')\n\nRaises:\n ValueError:\n If ``dataset_id`` is not a fully-qualified dataset ID in\n standard SQL format.", "source": "codesearchnet_filtered"} -{"code": "def path_size(p: tcod.path.AStar) -> int:\n return int(lib.TCOD_path_size(p._path_c))", "docstring": "Return the current length of the computed path.\n\nArgs:\n p (AStar): An AStar instance.\n\nReturns:\n int: Length of the path.", "source": "codesearchnet_filtered"} -{"code": "def setSeasonSchedules(self, cmd_dict=None, password='00000000'):\n result = False\n self.setContext('setSeasonSchedules')\n if (not cmd_dict):\n cmd_dict = self.m_seasons_sched_params\n try:\n if (not self.request(False)):\n self.writeCmdMsg('Bad read CRC on setting')\n elif (not self.serialCmdPwdAuth(password)):\n self.writeCmdMsg('Password failure')\n else:\n req_table = ''\n req_table += binascii.hexlify(str(cmd_dict['Season_1_Start_Month']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_1_Start_Day']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_1_Schedule']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_2_Start_Month']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_2_Start_Day']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_2_Schedule']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_3_Start_Month']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_3_Start_Day']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_3_Schedule']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_4_Start_Month']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_4_Start_Day']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_4_Schedule']).zfill(2))\n req_table += binascii.hexlify(str(0).zfill(24))\n req_str = (('015731023030383028' + req_table) + '2903')\n req_str += self.calc_crc16(req_str[2:].decode('hex'))\n self.m_serial_port.write(req_str.decode('hex'))\n if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n self.writeCmdMsg('Success(setSeasonSchedules): 06 returned.')\n result = True\n self.serialPostEnd()\n except:\n ekm_log(traceback.format_exc(sys.exc_info()))\n self.setContext('')\n return result", "docstring": "Serial command to set seasons table.\n\n If no dictionary is passed, the meter object buffer is used.\n\nArgs:\n cmd_dict (dict): Optional dictionary of season schedules.\n password (str): Optional password\n\nReturns:\n bool: True on completion and ACK.", "source": "codesearchnet_filtered"} -{"code": "def model_from_path(model_path, fuzziness=False):\n app_name = '.'.join(model_path.split('.')[:(- 1)])\n model_name = model_path.split('.')[(- 1)]\n if (not app_name):\n return None\n module = importlib.import_module(app_name)\n try:\n model = getattr(module, model_name)\n except AttributeError:\n try:\n model = getattr(getattr(module, 'models'), model_name)\n except AttributeError:\n model = get_model(model_name, app_name, fuzziness=fuzziness)\n return model", "docstring": "Find the model class for a given model path like 'project.app.model'\n\nArgs:\n path (str): dot-delimited model path, like 'project.app.model'\n\nReturns:\n Django Model-based class", "source": "codesearchnet_filtered"} -{"code": "def setup(self, url, stream=True, post=False, parameters=None, timeout=None):\n self.close_response()\n self.response = None\n try:\n if post:\n (full_url, parameters) = self.get_url_params_for_post(url, parameters)\n self.response = self.session.post(full_url, data=parameters, stream=stream, timeout=timeout)\n else:\n self.response = self.session.get(self.get_url_for_get(url, parameters), stream=stream, timeout=timeout)\n self.response.raise_for_status()\n except Exception as e:\n raisefrom(DownloadError, ('Setup of Streaming Download of %s failed!' % url), e)\n return self.response", "docstring": "Setup download from provided url returning the response\n\nArgs:\n url (str): URL to download\n stream (bool): Whether to stream download. Defaults to True.\n post (bool): Whether to use POST instead of GET. Defaults to False.\n parameters (Optional[Dict]): Parameters to pass. Defaults to None.\n timeout (Optional[float]): Timeout for connecting to URL. Defaults to None (no timeout).\n\nReturns:\n requests.Response: requests.Response object", "source": "codesearchnet_filtered"} -{"code": "def get_content_metadata(self, enterprise_customer):\n content_metadata = OrderedDict()\n if enterprise_customer.catalog:\n response = self._load_data(self.ENTERPRISE_CUSTOMER_ENDPOINT, detail_resource='courses', resource_id=str(enterprise_customer.uuid), traverse_pagination=True)\n for course in response['results']:\n for course_run in course['course_runs']:\n course_run['content_type'] = 'courserun'\n content_metadata[course_run['key']] = course_run\n for enterprise_customer_catalog in enterprise_customer.enterprise_customer_catalogs.all():\n response = self._load_data(self.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT, resource_id=str(enterprise_customer_catalog.uuid), traverse_pagination=True, querystring={'page_size': 1000})\n for item in response['results']:\n content_id = utils.get_content_metadata_item_id(item)\n content_metadata[content_id] = item\n return content_metadata.values()", "docstring": "Return all content metadata contained in the catalogs associated with the EnterpriseCustomer.\n\nArgs:\n enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for.\n\nReturns:\n list: List of dicts containing content metadata.", "source": "codesearchnet_filtered"} -{"code": "def getHostCertPath(self, name):\n path = s_common.genpath(self.certdir, 'hosts', ('%s.crt' % name))\n if (not os.path.isfile(path)):\n return None\n return path", "docstring": "Gets the path to a host certificate.\n\nArgs:\n name (str): The name of the host keypair.\n\nExample:\n Get the path to the host certificate for the host \"myhost\":\n\n mypath = cdir.getHostCertPath('myhost')\n\nReturns:\n str: The path if exists.", "source": "codesearchnet_filtered"} -{"code": "def bin_to_mac(bin, size=6):\n if (len(bin) != size):\n raise Exception(('Invalid MAC address: %s' % bin))\n return ':'.join([binascii.hexlify(o) for o in bin])", "docstring": "Convert 6 bytes into a MAC string.\n\nArgs:\n bin (str): hex string of lenth 6.\n\nReturns:\n str: String representation of the MAC address in lower case.\n\nRaises:\n Exception: if ``len(bin)`` is not 6.", "source": "codesearchnet_filtered"} -{"code": "def headers_present(self, headers):\n headers = {name: re.compile('(.*)') for name in headers}\n self.add_matcher(matcher('HeadersMatcher', headers))", "docstring": "Defines a list of headers that must be present in the\n outgoing request in order to satisfy the matcher, no matter what value\n the headers hosts.\n\n Header keys are case insensitive.\n\nArgs:\n headers (list|tuple): header keys to match.\n\nReturns:\n self: current Mock instance.\n\n Example::\n\n (pook.get('server.com/api')\n .headers_present(['content-type', 'Authorization']))", "source": "codesearchnet_filtered"} -{"code": "def tile_2d(physical_shape, tile_shape, outer_name='outer', inner_name='inner', cores_name=None):\n logical_to_physical = []\n (p0, p1, p2) = physical_shape\n (t0, t1) = tile_shape\n tile_ring = _ring_2d(t0, t1)\n tiles_ring = _ring_2d((p0 // t0), (p1 // t1))\n for logical_pnum in range(((p0 * p1) * p2)):\n core_on_chip = (logical_pnum % p2)\n logical_chip_num = (logical_pnum // p2)\n logical_pos_in_tile = (logical_chip_num % (t0 * t1))\n logical_tile_num = (logical_chip_num // (t0 * t1))\n (tile_i, tile_j) = tile_ring[logical_pos_in_tile]\n (tiles_i, tiles_j) = tiles_ring[logical_tile_num]\n physical_pnum = (core_on_chip + (p2 * ((((tile_i * p1) + tile_j) + ((tiles_i * p1) * t0)) + (tiles_j * t1))))\n logical_to_physical.append(physical_pnum)\n assert (sorted(logical_to_physical) == list(range(((p0 * p1) * p2))))\n tile_size = ((t0 * t1) * p2)\n num_tiles = ((p0 * p1) // (t0 * t1))\n if cores_name:\n mesh_shape = mtf.Shape([mtf.Dimension(outer_name, int(num_tiles)), mtf.Dimension(inner_name, int((t0 * t1))), mtf.Dimension(cores_name, int(p2))])\n else:\n mesh_shape = mtf.Shape([mtf.Dimension(outer_name, int(num_tiles)), mtf.Dimension(inner_name, int(tile_size))])\n return (mesh_shape, logical_to_physical)", "docstring": "2D tiling of a 3d physical mesh.\n\n The \"outer\" mesh dimension corresponds to which tile.\n The \"inner\" mesh dimension corresponds to the position within a tile\n of processors.\n\n Optionally, if cores_name is specified, then a 3 dimensional logical mesh\n is returned, with the third dimension representing the two different\n cores within a chip. If cores_name is not specified, then the\n cores-in-a-chip dimension is folded into the inner dimension.\n\n TODO(noam): explain this better.\n\nExample:\n tile_2d(physical_shape=[8, 16, 2], tile_shape=[4, 4])\n\n The \"inner\" dimension has size 4x4x2=32 and corresponds to the position\n within a 4x4 tile of processors.\n\n The \"outer\" dimension has size 8/4 * 16/4 = 8, and corresponds to the 8\n tiles in the mesh.\n\nArgs:\n physical_shape: a triple of integers [X, Y, cores]\n tile_shape: a pair\n outer_name: a string\n inner_name: a string\n cores_name: an optional string\n\nReturns:\n mesh_shape: a mtf.Shape\n logical_to_physical: a list", "source": "codesearchnet_filtered"} -{"code": "def GetCloudPath(self, resource_id, cache, database):\n cloud_path = cache.GetResults('cloud_path')\n if (not cloud_path):\n results = database.Query(self.CLOUD_PATH_CACHE_QUERY)\n cache.CacheQueryResults(results, 'cloud_path', 'resource_id', ('filename', 'parent'))\n cloud_path = cache.GetResults('cloud_path')\n if (resource_id == 'folder:root'):\n return '/'\n paths = []\n (parent_path, parent_id) = cloud_path.get(resource_id, ['', ''])\n while parent_path:\n if (parent_path == 'folder:root'):\n break\n paths.append(parent_path)\n (parent_path, parent_id) = cloud_path.get(parent_id, ['', ''])\n if (not paths):\n return '/'\n paths.reverse()\n return '/{0:s}/'.format('/'.join(paths))", "docstring": "Return cloud path given a resource id.\n\nArgs:\n resource_id (str): resource identifier for the file.\n cache (SQLiteCache): cache.\n database (SQLiteDatabase): database.\n\nReturns:\n str: full path to the resource value.", "source": "codesearchnet_filtered"} -{"code": "def add_from_existing(self, resource, timeout=(- 1)):\n uri = (self.URI + '/from-existing')\n return self._client.create(resource, uri=uri, timeout=timeout)", "docstring": "Adds a volume that already exists in the Storage system\n\nArgs:\n resource (dict):\n Object to create.\n timeout:\n Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\n in OneView, just stop waiting for its completion.\n\nReturns:\n dict: Added resource.", "source": "codesearchnet_filtered"} -{"code": "def wrap_warnings(logger):\n\n def decorator(func):\n\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n showwarning = warnings.showwarning\n warnings.showwarning = warn_logging(logger)\n try:\n return func(*args, **kwargs)\n finally:\n warnings.showwarning = showwarning\n return new_func\n return decorator", "docstring": "Have the function patch `warnings.showwarning` with the given logger.\n\nArgs:\n logger (~logging.logger): the logger to wrap warnings with when\n the decorated function is called.\n\nReturns:\n `function`: a decorator function.", "source": "codesearchnet_filtered"} -{"code": "def _parse_order_by(model, order_by):\n out = []\n for key in order_by:\n key = key.strip()\n if key.startswith('+'):\n out.append(getattr(model, key[1:]))\n elif key.startswith('-'):\n out.append(getattr(model, key[1:]).desc())\n else:\n out.append(getattr(model, key))\n return out", "docstring": "This function figures out the list of orderings for the given model and\n argument.\n\nArgs:\n model (nautilus.BaseModel): The model to compute ordering against\n order_by (list of str): the list of fields to order_by. If the field\n starts with a `+` then the order is acending, if `-` descending,\n if no character proceeds the field, the ordering is assumed to be\n ascending.\n\nReturns:\n (list of filters): the model filters to apply to the query", "source": "codesearchnet_filtered"} -{"code": "def parse(self, text):\n tokens = self.lex(text)\n parser = Parser(tokens)\n return parser.parse()", "docstring": "Parse self.text.\n\nArgs:\n text (str): the text to lex\n\nReturns:\n object: a node representing the current rule.", "source": "codesearchnet_filtered"} -{"code": "def compose_args(self, action_name, in_argdict):\n for action in self.actions:\n if (action.name == action_name):\n break\n else:\n raise AttributeError('Unknown Action: {0}'.format(action_name))\n unexpected = (set(in_argdict) - set((argument.name for argument in action.in_args)))\n if unexpected:\n raise ValueError(\"Unexpected argument '{0}'. Method signature: {1}\".format(next(iter(unexpected)), str(action)))\n composed = []\n for argument in action.in_args:\n name = argument.name\n if (name in in_argdict):\n composed.append((name, in_argdict[name]))\n continue\n if (name in self.DEFAULT_ARGS):\n composed.append((name, self.DEFAULT_ARGS[name]))\n continue\n if (argument.vartype.default is not None):\n composed.append((name, argument.vartype.default))\n raise ValueError(\"Missing argument '{0}'. Method signature: {1}\".format(argument.name, str(action)))\n return composed", "docstring": "Compose the argument list from an argument dictionary, with\n respect for default values.\n\nArgs:\n action_name (str): The name of the action to be performed.\n in_argdict (dict): Arguments as a dict, eg\n ``{'InstanceID': 0, 'Speed': 1}. The values\n can be a string or something with a string representation.\n\nReturns:\n list: a list of ``(name, value)`` tuples.\n\nRaises:\n `AttributeError`: If this service does not support the action.\n `ValueError`: If the argument lists do not match the action\n signature.", "source": "codesearchnet_filtered"} -{"code": "def has_no_flat_neurites(neuron, tol=0.1, method='ratio'):\n return CheckResult((len(get_flat_neurites(neuron, tol, method)) == 0))", "docstring": "Check that a neuron has no flat neurites\n\nArgs:\n neuron(Neuron): The neuron object to test\n tol(float): tolerance\n method(string): way of determining flatness, 'tolerance', 'ratio' \\\n as described in :meth:`neurom.check.morphtree.get_flat_neurites`\n\nReturns:\n CheckResult with result", "source": "codesearchnet_filtered"} -{"code": "def is_subset_of_any(set_, other_sets):\n set_ = set(set_)\n other_sets = map(set, other_sets)\n return any([set_.issubset(other_set) for other_set in other_sets])", "docstring": "returns True if set_ is a subset of any set in other_sets\n\nArgs:\n set_ (set):\n other_sets (list of sets):\n\nReturns:\n bool: flag\n\n CommandLine:\n python -m utool.util_list --test-is_subset_of_any\n\nExample:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_list import * # NOQA\n >>> # build test data\n >>> set_ = {1, 2}\n >>> other_sets = [{1, 4}, {3, 2, 1}]\n >>> # execute function\n >>> result = is_subset_of_any(set_, other_sets)\n >>> # verify results\n >>> print(result)\n True\n\nExample:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_list import * # NOQA\n >>> # build test data\n >>> set_ = {1, 2}\n >>> other_sets = [{1, 4}, {3, 2}]\n >>> # execute function\n >>> result = is_subset_of_any(set_, other_sets)\n >>> # verify results\n >>> print(result)\n False", "source": "codesearchnet_filtered"} -{"code": "def generate_nb_state_data(means, weights, R):\n cells = weights.shape[1]\n x_true = np.dot(means, weights)\n R_ = np.tile(R, (cells, 1)).T\n P_true = (x_true / (R_ + x_true))\n sample = np.random.negative_binomial(np.tile(R, (cells, 1)).T, P_true)\n return sample.astype(float)", "docstring": "Generates data according to the Negative Binomial Convex Mixture Model.\n\nArgs:\n means (array): Cell types- genes x clusters\n weights (array): Cell cluster assignments- clusters x cells\n R (array): dispersion parameter - 1 x genes\n\nReturns:\n data matrix - genes x cells", "source": "codesearchnet_filtered"} -{"code": "def get(cls, resource_type):\n if isinstance(resource_type, str):\n obj = getattr(db, cls.__name__).find_one((cls.resource_type == resource_type))\n elif isinstance(resource_type, int):\n obj = getattr(db, cls.__name__).find_one((cls.resource_type_id == resource_type))\n elif isinstance(resource_type, cls):\n return resource_type\n else:\n obj = None\n if (not obj):\n obj = cls()\n obj.resource_type = resource_type\n db.session.add(obj)\n db.session.commit()\n db.session.refresh(obj)\n return obj", "docstring": "Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will\n be created in the database and returned\n\nArgs:\n resource_type (str): Resource type name\n\nReturns:\n :obj:`ResourceType`", "source": "codesearchnet_filtered"} -{"code": "def compute_positions(cls, screen_width, line):\n left = 1\n right = (screen_width + 1)\n flexible = None\n for field in line:\n if field.is_flexible():\n if flexible:\n raise FormatError('There can be only one flexible field per line.')\n flexible = field\n elif (not flexible):\n left += field.width\n else:\n right -= field.width\n available = (right - left)\n if (available <= 0):\n raise FormatError('Too much data for screen width')\n if flexible:\n if (available < 1):\n raise FormatError(('Not enough space to display flexible field %s' % flexible.name))\n flexible.width = available\n positions = []\n left = 1\n for field in line:\n positions.append((left, field))\n left += field.width\n logger.debug('Positions are %r', positions)\n return positions", "docstring": "Compute the relative position of the fields on a given line.\n\nArgs:\n screen_width (int): the width of the screen\n line (mpdlcd.display_fields.Field list): the list of fields on the\n line\n\nReturns:\n ((int, mpdlcd.display_fields.Field) list): the positions of fields,\n as (position, field) tuples.\n\nRaises:\n FormatError: if the line contains more than one flexible field, or\n is too long for the screen size.", "source": "codesearchnet_filtered"} -{"code": "def GetEventTagByIdentifier(self, storage_file, event_identifier):\n if (not self._index):\n self._Build(storage_file)\n lookup_key = event_identifier.CopyToString()\n event_tag_identifier = self._index.get(lookup_key, None)\n if (not event_tag_identifier):\n return None\n return storage_file.GetEventTagByIdentifier(event_tag_identifier)", "docstring": "Retrieves the most recently updated event tag for an event.\n\nArgs:\n storage_file (BaseStorageFile): storage file.\n event_identifier (AttributeContainerIdentifier): event attribute\n container identifier.\n\nReturns:\n EventTag: event tag or None if the event has no event tag.", "source": "codesearchnet_filtered"} -{"code": "def reserve_ids(self, token, channel, quantity):\n quantity = str(quantity)\n url = self.url('{}/{}/reserve/{}/'.format(token, channel, quantity))\n req = self.remote_utils.get_url(url)\n if (req.status_code is not 200):\n raise RemoteDataNotFoundError(('Invalid req: ' + req.status_code))\n out = req.json()\n return [(out[0] + i) for i in range(out[1])]", "docstring": "Requests a list of next-available-IDs from the server.\n\nArgs:\n quantity (int): The number of IDs to reserve\n\nReturns:\n int[quantity]: List of IDs you've been granted", "source": "codesearchnet_filtered"} -{"code": "def print_map(map_source, x, y, zoom=14, width=297, height=210, dpi=300, format='pdf'):\n bbox = get_print_bbox(x, y, zoom, width, height, dpi)\n tiles = [get_tiles(tile_layer, bbox) for tile_layer in map_source.layers if (tile_layer.min_zoom <= zoom <= tile_layer.max_zoom)]\n img = stitch_map(tiles, width, height, bbox, dpi)\n outfile = NamedTemporaryFile(delete=False)\n img.save(outfile, format, quality=100, dpi=(dpi, dpi))\n outfile.close()\n return outfile.name", "docstring": "Download map tiles and stitch them together in a single image, ready for printing.\n\nArgs:\n map_source (MapSource): Map to download\n x (float): map center x-coordinate in Mercator projection (EPSG:4326)\n y (float): map center y-coordinate in Mercator projection (EPSG:4326)\n zoom (int): tile zoom level to use for printing\n width (float): page width in mm\n height (float): page height in mm\n dpi (int): resolution in dots per inch\n format (str): output format. Anything supported by ``Pillow.Image.save``. E.g. \"pdf\", \"jpeg\", \"png\".\n\nReturns:\n str: path of temporary output file.", "source": "codesearchnet_filtered"} -{"code": "def gzip_uncompress(data, truncated=False):\n decompressor = SimpleGzipDecompressor()\n inflated_data = decompressor.decompress(data)\n if (not truncated):\n inflated_data += decompressor.flush()\n return inflated_data", "docstring": "Uncompress gzip data.\n\nArgs:\n data (bytes): The gzip data.\n truncated (bool): If True, the decompressor is not flushed.\n\n This is a convenience function.\n\nReturns:\n bytes: The inflated data.\n\nRaises:\n zlib.error", "source": "codesearchnet_filtered"} -{"code": "def load_plugins(config, plugin_kwargs):\n installed_plugins = _gather_installed_plugins()\n metrics_plugin = _get_metrics_plugin(config, installed_plugins)\n if metrics_plugin:\n plugin_kwargs['metrics'] = metrics_plugin\n active_plugins = _get_activated_plugins(config, installed_plugins)\n if (not active_plugins):\n return ([], [], [], None)\n plugin_namespaces = _get_plugin_config_keys(active_plugins)\n plugin_configs = _load_plugin_configs(plugin_namespaces, config)\n (plugin_names, plugins, errors) = _init_plugins(active_plugins, installed_plugins, plugin_configs, plugin_kwargs)\n return (plugin_names, plugins, errors, plugin_kwargs)", "docstring": "Discover and instantiate plugins.\n\nArgs:\n config (dict): loaded configuration for the Gordon service.\n plugin_kwargs (dict): keyword arguments to give to plugins\n during instantiation.\n\nReturns:\n Tuple of 3 lists: list of names of plugins, list of\n instantiated plugin objects, and any errors encountered while\n loading/instantiating plugins. A tuple of three empty lists is\n returned if there are no plugins found or activated in gordon\n config.", "source": "codesearchnet_filtered"} -{"code": "def format_image(path, options):\n image = Image.open(path)\n image_pipeline_results = __pipeline_image(image, options)\n return image_pipeline_results", "docstring": "Formats an image.\n\nArgs:\n path (str): Path to the image file.\n options (dict): Options to apply to the image.\n\nReturns:\n (list) A list of PIL images. The list will always be of length\n 1 unless resolutions for resizing are provided in the options.", "source": "codesearchnet_filtered"} -{"code": "def base256_encode(n, minwidth=0):\n if (n > 0):\n arr = []\n while n:\n (n, rem) = divmod(n, 256)\n arr.append(rem)\n b = bytearray(reversed(arr))\n elif (n == 0):\n b = bytearray(b'\\x00')\n else:\n raise ValueError('Negative numbers not supported')\n if ((minwidth > 0) and (len(b) < minwidth)):\n padding = ((minwidth - len(b)) * b'\\x00')\n b = (bytearray(padding) + b)\n b.reverse()\n return b", "docstring": "Encode the input with base256.\n\nArgs:\n n (int): input value.\n minwidth: minimum return value length.\n\nRaises:\n ValueError: if a negative number is provided.\n\nReturns:\n bytearray:", "source": "codesearchnet_filtered"} -{"code": "def oem(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n res = self._dll.JLINKARM_GetOEMString(ctypes.byref(buf))\n if (res != 0):\n raise errors.JLinkException('Failed to grab OEM string.')\n oem = ctypes.string_at(buf).decode()\n if (len(oem) == 0):\n return None\n return oem", "docstring": "Retrieves and returns the OEM string of the connected J-Link.\n\nArgs:\n self (JLink): the ``JLink`` instance\n\nReturns:\n The string of the OEM. If this is an original SEGGER product, then\n ``None`` is returned instead.\n\nRaises:\n JLinkException: on hardware error.", "source": "codesearchnet_filtered"} -{"code": "def install(self, connection, partition, table_name=None, columns=None, materialize=False, logger=None):\n partition.localize()\n self._add_partition(connection, partition)\n fdw_table = partition.vid\n view_table = '{}_v'.format(fdw_table)\n if materialize:\n with connection.cursor() as cursor:\n view_exists = self._relation_exists(connection, view_table)\n if view_exists:\n logger.debug('Materialized view of the partition already exists.\\n partition: {}, view: {}'.format(partition.name, view_table))\n else:\n query = 'CREATE MATERIALIZED VIEW {} AS SELECT * FROM {};'.format(view_table, fdw_table)\n logger.debug('Creating new materialized view of the partition.\\n partition: {}, view: {}, query: {}'.format(partition.name, view_table, query))\n cursor.execute(query)\n cursor.execute('COMMIT;')\n final_table = (view_table if materialize else fdw_table)\n with connection.cursor() as cursor:\n view_q = 'CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} '.format(partition.vid, final_table)\n cursor.execute(view_q)\n cursor.execute('COMMIT;')\n return partition.vid", "docstring": "Creates FDW or materialize view for given partition.\n\nArgs:\n connection: connection to postgresql\n partition (orm.Partition):\n materialize (boolean): if True, create read-only table. If False create virtual table.\n\nReturns:\n str: name of the created table.", "source": "codesearchnet_filtered"} -{"code": "def parse_report_file(input_, nameservers=None, dns_timeout=2.0, strip_attachment_payloads=False, parallel=False):\n if (type(input_) == str):\n file_object = open(input_, 'rb')\n elif (type(input_) == bytes):\n file_object = BytesIO(input_)\n else:\n file_object = input_\n content = file_object.read()\n try:\n report = parse_aggregate_report_file(content, nameservers=nameservers, dns_timeout=dns_timeout, parallel=parallel)\n results = OrderedDict([('report_type', 'aggregate'), ('report', report)])\n except InvalidAggregateReport:\n try:\n sa = strip_attachment_payloads\n results = parse_report_email(content, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=sa, parallel=parallel)\n except InvalidDMARCReport:\n raise InvalidDMARCReport('Not a valid aggregate or forensic report')\n return results", "docstring": "Parses a DMARC aggregate or forensic file at the given path, a\n file-like object. or bytes\n\nArgs:\n input_: A path to a file, a file like object, or bytes\n nameservers (list): A list of one or more nameservers to use\n (Cloudflare's public DNS resolvers by default)\n dns_timeout (float): Sets the DNS timeout in seconds\n strip_attachment_payloads (bool): Remove attachment payloads from\n forensic report results\n parallel (bool): Parallel processing\n\nReturns:\n OrderedDict: The parsed DMARC report", "source": "codesearchnet_filtered"} -{"code": "def GetParserObjectByName(cls, parser_name):\n parser_class = cls._parser_classes.get(parser_name, None)\n if parser_class:\n return parser_class()\n return None", "docstring": "Retrieves a specific parser object by its name.\n\nArgs:\n parser_name (str): name of the parser.\n\nReturns:\n BaseParser: parser object or None.", "source": "codesearchnet_filtered"} -{"code": "def __update(self, score, values, error):\n if self._minimize:\n if ((self._best_score is None) or (score > self._best_score)):\n self._best_score = score\n self._best_values = values.copy()\n self._best_error = error\n self._logger.log('debug', 'New best food source memorized: {}'.format(self._best_error))\n return True\n elif (not self._minimize):\n if ((self._best_score is None) or (score < self._best_score)):\n self._best_score = score\n self._best_values = values.copy()\n self._best_error = error\n self._logger.log('debug', 'New best food source memorized: {}'.format(self._best_error))\n return True\n return False", "docstring": "Update the best score and values if the given score is better than\n the current best score\n\nArgs:\n score (float): new score to evaluate\n values (list): new value ranges to evaluate\n error (float): new fitness function return value to evaluate\n\nReturns:\n bool: True if new score is better, False otherwise", "source": "codesearchnet_filtered"} -{"code": "def _expand_place_ids(self, terms):\n place_vids = []\n first_type = None\n for result in self.backend.identifier_index.search(terms):\n if (not first_type):\n first_type = result.type\n if (result.type != first_type):\n continue\n place_vids.append(result.vid)\n if place_vids:\n all_set = set(itertools.chain.from_iterable((iallval(GVid.parse(x)) for x in place_vids)))\n place_vids += list((str(x) for x in all_set))\n return place_vids\n else:\n return terms", "docstring": "Lookups all of the place identifiers to get gvids\n\nArgs:\n terms (str or unicode): terms to lookup\n\nReturns:\n str or list: given terms if no identifiers found, otherwise list of identifiers.", "source": "codesearchnet_filtered"} -{"code": "def path_split(self, path):\n match = self.path.match(path)\n if (match is None):\n return (None, None)\n the_rest = path[match.end():]\n if the_rest:\n if match.group().endswith('/'):\n pass\n elif the_rest.startswith('/'):\n pass\n else:\n return (None, None)\n if (self.IGNORE_TRAILING_SLASH and (the_rest == '/')):\n the_rest = ''\n return (match, the_rest)", "docstring": "Splits a path into the part matching this middleware and the part remaining.\n If path does not exist, it returns a pair of None values.\n If the regex matches the entire pair, the second item in returned tuple is None.\n\nArgs:\n path (str): The url to split\n\nReturns:\n Tuple\n matching_path (str or None): The beginning of the path which matches this\n middleware or None if it does not match\n remaining_path (str or None): The 'rest' of the path, following the matching part", "source": "codesearchnet_filtered"} -{"code": "def _copy_and_clean_up_expectations_from_indexes(self, match_indexes, discard_result_format_kwargs=True, discard_include_configs_kwargs=True, discard_catch_exceptions_kwargs=True):\n rval = []\n for i in match_indexes:\n rval.append(self._copy_and_clean_up_expectation(self._expectations_config.expectations[i], discard_result_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs))\n return rval", "docstring": "Copies and cleans all expectations provided by their index in DataAsset._expectations_config.expectations.\n\n Applies the _copy_and_clean_up_expectation method to multiple expectations, provided by their index in \\\n `DataAsset,_expectations_config.expectations`. Returns a list of the copied and cleaned expectations.\n\nArgs:\n match_indexes (List): \\\n Index numbers of the expectations from `expectation_config.expectations` to be copied and cleaned.\n discard_result_format_kwargs (boolean): \\\n if True, will remove the kwarg `output_format` key-value pair from the copied expectation.\n discard_include_configs_kwargs (boolean):\n if True, will remove the kwarg `include_configs` key-value pair from the copied expectation.\n discard_catch_exceptions_kwargs (boolean):\n if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation.\n\nReturns:\n A list of the copied expectations with `success_on_last_run` and other specified \\\n key-value pairs removed.\n\n See also:\n _copy_and_clean_expectation", "source": "codesearchnet_filtered"} -{"code": "def predict_proba(self, a, b, nb_runs=6, nb_jobs=None, gpu=None, idx=0, verbose=None, ttest_threshold=0.01, nb_max_runs=16, train_epochs=1000, test_epochs=1000):\n (Nb_jobs, verbose, gpu) = SETTINGS.get_default(('nb_jobs', nb_jobs), ('verbose', verbose), ('gpu', gpu))\n x = np.stack([a.ravel(), b.ravel()], 1)\n ttest_criterion = TTestCriterion(max_iter=nb_max_runs, runs_per_iter=nb_runs, threshold=ttest_threshold)\n AB = []\n BA = []\n while ttest_criterion.loop(AB, BA):\n if (nb_jobs != 1):\n result_pair = Parallel(n_jobs=nb_jobs)((delayed(GNN_instance)(x, idx=idx, device=('cuda:{}'.format((run % gpu)) if gpu else 'cpu'), verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, (ttest_criterion.iter + nb_runs))))\n else:\n result_pair = [GNN_instance(x, idx=idx, device=('cuda:0' if gpu else 'cpu'), verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, (ttest_criterion.iter + nb_runs))]\n AB.extend([runpair[0] for runpair in result_pair])\n BA.extend([runpair[1] for runpair in result_pair])\n if verbose:\n print('P-value after {} runs : {}'.format(ttest_criterion.iter, ttest_criterion.p_value))\n score_AB = np.mean(AB)\n score_BA = np.mean(BA)\n return ((score_BA - score_AB) / (score_BA + score_AB))", "docstring": "Run multiple times GNN to estimate the causal direction.\n\nArgs:\n a (np.ndarray): Variable 1\n b (np.ndarray): Variable 2\n nb_runs (int): number of runs to execute per batch (before testing for significance with t-test).\n nb_jobs (int): number of runs to execute in parallel. (Initialized with ``cdt.SETTINGS.NB_JOBS``)\n gpu (bool): use gpu (Initialized with ``cdt.SETTINGS.GPU``)\n idx (int): (optional) index of the pair, for printing purposes\n verbose (bool): verbosity (Initialized with ``cdt.SETTINGS.verbose``)\n ttest_threshold (float): threshold to stop the boostraps before ``nb_max_runs`` if the difference is significant\n nb_max_runs (int): Max number of bootstraps\n train_epochs (int): Number of epochs during which the model is going to be trained\n test_epochs (int): Number of epochs during which the model is going to be tested\n\nReturns:\n float: Causal score of the pair (Value : 1 if a->b and -1 if b->a)", "source": "codesearchnet_filtered"} -{"code": "def main(args=None):\n if (args is None):\n args = sys.argv[1:]\n parser = create_parser()\n args = parser.parse_args(args)\n if (args.verbose >= 2):\n level = logging.DEBUG\n elif (args.verbose >= 1):\n level = logging.INFO\n else:\n level = logging.WARNING\n logging.basicConfig(level=level)\n try:\n args.command(args)\n except pylink.JLinkException as e:\n sys.stderr.write(('Error: %s%s' % (str(e), os.linesep)))\n return 1\n return 0", "docstring": "Main command-line interface entrypoint.\n\n Runs the given subcommand or argument that were specified. If not given a\n ``args`` parameter, assumes the arguments are passed on the command-line.\n\nArgs:\n args (list): list of command-line arguments\n\nReturns:\n Zero on success, non-zero otherwise.", "source": "codesearchnet_filtered"} -{"code": "def html_serialize(self, attributes, max_length=None):\n doc = ET.Element('span')\n for chunk in self:\n if (chunk.has_cjk() and (not (max_length and (len(chunk.word) > max_length)))):\n ele = ET.Element('span')\n ele.text = chunk.word\n for (key, val) in attributes.items():\n ele.attrib[key] = val\n doc.append(ele)\n elif doc.getchildren():\n if (doc.getchildren()[(- 1)].tail is None):\n doc.getchildren()[(- 1)].tail = chunk.word\n else:\n doc.getchildren()[(- 1)].tail += chunk.word\n elif (doc.text is None):\n doc.text = chunk.word\n else:\n doc.text += chunk.word\n result = ET.tostring(doc, encoding='utf-8').decode('utf-8')\n result = html5lib.serialize(html5lib.parseFragment(result), sanitize=True, quote_attr_values='always')\n return result", "docstring": "Returns concatenated HTML code with SPAN tag.\n\nArgs:\n attributes (dict): A map of name-value pairs for attributes of output\n SPAN tags.\n max_length (:obj:`int`, optional): Maximum length of span enclosed chunk.\n\nReturns:\n The organized HTML code. (str)", "source": "codesearchnet_filtered"} -{"code": "def pkg_config(pkg_libraries):\n libraries = []\n library_dirs = []\n include_dirs = []\n for pkg in pkg_libraries:\n if (os.system(('pkg-config --exists %s 2>/dev/null' % pkg)) == 0):\n pass\n else:\n print('Could not find library {0}'.format(pkg))\n sys.exit(1)\n if (len(pkg_libraries) > 0):\n for token in getoutput(('PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s' % ' '.join(pkg_libraries))).split():\n if token.startswith('-l'):\n libraries.append(token[2:])\n elif token.startswith('-L'):\n library_dirs.append(token[2:])\n elif token.startswith('-I'):\n include_dirs.append(token[2:])\n return (libraries, library_dirs, include_dirs)", "docstring": "Use pkg-config to query for the location of libraries, library directories,\n and header directories\n\nArgs:\n pkg_libries(list): A list of packages as strings\n\nReturns:\n libraries(list), library_dirs(list), include_dirs(list)", "source": "codesearchnet_filtered"} -{"code": "def identify_triggers(cfg, sources, sinks, lattice, nosec_lines):\n assignment_nodes = filter_cfg_nodes(cfg, AssignmentNode)\n tainted_nodes = filter_cfg_nodes(cfg, TaintedNode)\n tainted_trigger_nodes = [TriggerNode(Source('Framework function URL parameter'), cfg_node=node) for node in tainted_nodes]\n sources_in_file = find_triggers(assignment_nodes, sources, nosec_lines)\n sources_in_file.extend(tainted_trigger_nodes)\n find_secondary_sources(assignment_nodes, sources_in_file, lattice)\n sinks_in_file = find_triggers(cfg.nodes, sinks, nosec_lines)\n sanitiser_node_dict = build_sanitiser_node_dict(cfg, sinks_in_file)\n return Triggers(sources_in_file, sinks_in_file, sanitiser_node_dict)", "docstring": "Identify sources, sinks and sanitisers in a CFG.\n\nArgs:\n cfg(CFG): CFG to find sources, sinks and sanitisers in.\n sources(tuple): list of sources, a source is a (source, sanitiser) tuple.\n sinks(tuple): list of sources, a sink is a (sink, sanitiser) tuple.\n nosec_lines(set): lines with # nosec whitelisting\n\nReturns:\n Triggers tuple with sink and source nodes and a sanitiser node dict.", "source": "codesearchnet_filtered"} -{"code": "def data_not_in(db_data, user_data):\n if isinstance(user_data, list):\n if (db_data not in user_data):\n return True\n return False", "docstring": "Validate data not in user data.\n\nArgs:\n db_data (str): The data store in Redis.\n user_data (list): The user provided data.\n\nReturns:\n bool: True if the data passed validation.", "source": "codesearchnet_filtered"} -{"code": "def get_records(self, name):\n if (name in self._cache):\n return self._cache[name].values()\n else:\n return []", "docstring": "Return all the records for the given name in the cache.\n\nArgs:\n name (string): The name which the required models are stored under.\n\nReturns:\n list: A list of :class:`cinder_data.model.CinderModel` models.", "source": "codesearchnet_filtered"} -{"code": "def generate_stack_policy_args(stack_policy=None):\n args = {}\n if stack_policy:\n logger.debug('Stack has a stack policy')\n if stack_policy.url:\n raise NotImplementedError\n else:\n args['StackPolicyBody'] = stack_policy.body\n return args", "docstring": "Converts a stack policy object into keyword args.\n\nArgs:\n stack_policy (:class:`stacker.providers.base.Template`): A template\n object representing a stack policy.\n\nReturns:\n dict: A dictionary of keyword arguments to be used elsewhere.", "source": "codesearchnet_filtered"} -{"code": "def array(data, tcoords=None, chcoords=None, scalarcoords=None, datacoords=None, attrs=None, name=None):\n array = xr.DataArray(data, dims=('t', 'ch'), attrs=attrs, name=name)\n array.dca._initcoords()\n if (tcoords is not None):\n array.coords.update({key: ('t', tcoords[key]) for key in tcoords})\n if (chcoords is not None):\n array.coords.update({key: ('ch', chcoords[key]) for key in chcoords})\n if (scalarcoords is not None):\n array.coords.update(scalarcoords)\n if (datacoords is not None):\n array.coords.update({key: (('t', 'ch'), datacoords[key]) for key in datacoords})\n return array", "docstring": "Create an array as an instance of xarray.DataArray with Decode accessor.\n\nArgs:\n data (numpy.ndarray): 2D (time x channel) array.\n tcoords (dict, optional): Dictionary of arrays that label time axis.\n chcoords (dict, optional): Dictionary of arrays that label channel axis.\n scalarcoords (dict, optional): Dictionary of values that don't label any axes (point-like).\n datacoords (dict, optional): Dictionary of arrays that label time and channel axes.\n attrs (dict, optional): Dictionary of attributes to add to the instance.\n name (str, optional): String that names the instance.\n\nReturns:\n array (decode.array): Deode array.", "source": "codesearchnet_filtered"} -{"code": "def read_from_hdx(identifier, configuration=None):\n organization = Organization(configuration=configuration)\n result = organization._load_from_hdx('organization', identifier)\n if result:\n return organization\n return None", "docstring": "Reads the organization given by identifier from HDX and returns Organization object\n\nArgs:\n identifier (str): Identifier of organization\n configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\n Optional[Organization]: Organization object if successful read, None if not", "source": "codesearchnet_filtered"} -{"code": "def resample(df, rule, time_index, groupby=None, aggregation='mean'):\n if groupby:\n df = df.groupby(groupby)\n df = df.resample(rule, on=time_index)\n df = getattr(df, aggregation)()\n for column in groupby:\n del df[column]\n return df", "docstring": "pd.DataFrame.resample adapter.\n\n Call the `df.resample` method on the given time_index\n and afterwards call the indicated aggregation.\n\n Optionally group the dataframe by the indicated columns before\n performing the resampling.\n\n If groupby option is used, the result is a multi-index datagrame.\n\nArgs:\n df (pandas.DataFrame): DataFrame to resample.\n rule (str): The offset string or object representing target conversion.\n groupby (list): Optional list of columns to group by.\n time_index (str): Name of the column to use as the time index.\n aggregation (str): Name of the aggregation function to use.\n\nReturns:\n pandas.Dataframe: resampled dataframe", "source": "codesearchnet_filtered"} -{"code": "def eigh(a, eigvec=True, rcond=None):\n a = numpy.asarray(a)\n if (a.dtype != object):\n (val, vec) = numpy.linalg.eigh(a)\n return ((val, vec) if eigvec else val)\n amean = gvar.mean(a)\n if ((amean.ndim != 2) or (amean.shape[0] != amean.shape[1])):\n raise ValueError(('bad matrix shape: ' + str(a.shape)))\n if (rcond is None):\n rcond = (numpy.finfo(float).eps * max(a.shape))\n da = (a - amean)\n (val0, vec0) = numpy.linalg.eigh(amean)\n val = (val0 + [vec0[(:, i)].conjugate().dot(da.dot(vec0[(:, i)])) for i in range(vec0.shape[1])])\n if (eigvec == True):\n if (vec0.dtype == complex):\n raise ValueError('cannot evaluate eigenvectors when a is complex')\n vec = numpy.array(vec0, dtype=object)\n for i in range(len(val)):\n for j in range(len(val)):\n dval = (val0[i] - val0[j])\n if ((abs(dval) < (rcond * abs((val0[j] + val0[i])))) or (dval == 0.0)):\n continue\n vec[(:, i)] += (vec0[(:, j)] * (vec0[(:, j)].dot(da.dot(vec0[(:, i)])) / dval))\n return (val, vec)\n else:\n return val", "docstring": "Eigenvalues and eigenvectors of symmetric matrix ``a``.\n\nArgs:\n a: Two-dimensional, square Hermitian matrix/array of numbers\n and/or :class:`gvar.GVar`\\s. Array elements must be\n real-valued if `gvar.GVar`\\s are involved (i.e., symmetric\n matrix).\n eigvec (bool): If ``True`` (default), method returns a tuple\n of arrays ``(val, vec)`` where ``val[i]`` are the\n eigenvalues of ``a`` (in ascending order), and ``vec[:, i]``\n are the corresponding eigenvectors of ``a``. Only ``val`` is\n returned if ``eigvec=False``.\n rcond (float): Eigenvalues whose difference is smaller than\n ``rcond`` times their sum are assumed to be degenerate\n (and ignored) when computing variances for the eigvectors.\n Default (``rcond=None``) is ``max(M,N)`` times machine precision.\n\nReturns:\n Tuple ``(val,vec)`` of eigenvalues and eigenvectors of\n matrix ``a`` if parameter ``eigvec==True`` (default).\n The eigenvalues ``val[i]`` are in ascending order and\n ``vec[:, i]`` are the corresponding eigenvalues. Only\n the eigenvalues ``val`` are returned if ``eigvec=False``.\n\nRaises:\n ValueError: If matrix is not square and two-dimensional.", "source": "codesearchnet_filtered"} -{"code": "def BuildFilterFindSpecs(self, artifact_definitions_path, custom_artifacts_path, knowledge_base_object, artifact_filter_names=None, filter_file_path=None):\n environment_variables = knowledge_base_object.GetEnvironmentVariables()\n find_specs = None\n if artifact_filter_names:\n logger.debug('building find specification based on artifacts: {0:s}'.format(', '.join(artifact_filter_names)))\n artifacts_registry_object = BaseEngine.BuildArtifactsRegistry(artifact_definitions_path, custom_artifacts_path)\n self._artifacts_filter_helper = artifact_filters.ArtifactDefinitionsFilterHelper(artifacts_registry_object, knowledge_base_object)\n self._artifacts_filter_helper.BuildFindSpecs(artifact_filter_names, environment_variables=environment_variables)\n if self._artifacts_filter_helper.registry_find_specs:\n self._artifacts_filter_helper.BuildFindSpecs(self._WINDOWS_REGISTRY_FILES_ARTIFACT_NAMES, environment_variables=environment_variables)\n find_specs = self._artifacts_filter_helper.file_system_find_specs\n if (not find_specs):\n raise errors.InvalidFilter('No valid file system find specifications were built from artifacts.')\n elif filter_file_path:\n logger.debug('building find specification based on filter file: {0:s}'.format(filter_file_path))\n filter_file_object = filter_file.FilterFile(filter_file_path)\n find_specs = filter_file_object.BuildFindSpecs(environment_variables=environment_variables)\n if (not find_specs):\n raise errors.InvalidFilter('No valid file system find specifications were built from filter file.')\n return find_specs", "docstring": "Builds find specifications from artifacts or filter file if available.\n\nArgs:\n artifact_definitions_path (str): path to artifact definitions file.\n custom_artifacts_path (str): path to custom artifact definitions file.\n knowledge_base_object (KnowledgeBase): knowledge base.\n artifact_filter_names (Optional[list[str]]): names of artifact\n definitions that are used for filtering file system and Windows\n Registry key paths.\n filter_file_path (Optional[str]): path of filter file.\n\nReturns:\n list[dfvfs.FindSpec]: find specifications for the file source type.\n\nRaises:\n InvalidFilter: if no valid FindSpecs are built.", "source": "codesearchnet_filtered"} -{"code": "def load(self, key_filter=None, header_preproc=None):\n df = pd.read_csv(self.input_file, sep='\\t', dtype=object)\n if (key_filter is not None):\n df = df[df[df.columns[0]].str.match(key_filter)]\n meta_col = df.columns[0]\n df[meta_col] = df[meta_col].str.split(',').str[(- 1)]\n for col_name in df.columns[1:]:\n stripped = df[col_name].str.replace('[a-z]', '')\n df[col_name] = pd.to_numeric(stripped, errors='coerce')\n if (header_preproc is not None):\n df.columns = (list(df.columns[:1]) + [header_preproc(c) for c in df.columns[1:]])\n df.columns = (['key'] + [int(y) for y in df.columns[1:]])\n return df", "docstring": "Load data table from tsv file, from default location\n\nArgs:\n key_filter (str): additional filter for key column - regex matching\n key values to include; None for no filter\n\n header_preproc (func): function to apply to column headers to extract year numbers (as strings)\n\nReturns:\n pd.DataFrame: data", "source": "codesearchnet_filtered"} -{"code": "def in_batches(iterable, batch_size):\n items = list(iterable)\n size = len(items)\n for i in range(0, size, batch_size):\n (yield items[i:min((i + batch_size), size)])", "docstring": "Split the given iterable into batches.\n\nArgs:\n iterable (Iterable[Any]):\n The iterable you want to split into batches.\n batch_size (int):\n The size of each bach. The last batch will be probably smaller (if\n the number of elements cannot be equally divided.\n\nReturns:\n Generator[list[Any]]: Will yield all items in batches of **batch_size**\n size.\n\nExample:\n >>> from peltak.core import util\n >>>\n >>> batches = util.in_batches([1, 2, 3, 4, 5, 6, 7], 3)\n >>> batches = list(batches) # so we can query for lenght\n >>> len(batches)\n 3\n >>> batches\n [[1, 2, 3], [4, 5, 6], [7]]", "source": "codesearchnet_filtered"} -{"code": "def scale(self, replicas):\n if ('Global' in self.attrs['Spec']['Mode'].keys()):\n raise InvalidArgument('Cannot scale a global container')\n service_mode = ServiceMode('replicated', replicas)\n return self.client.api.update_service(self.id, self.version, mode=service_mode, fetch_current_spec=True)", "docstring": "Scale service container.\n\nArgs:\n replicas (int): The number of containers that should be running.\n\nReturns:\n bool: ``True`` if successful.", "source": "codesearchnet_filtered"} -{"code": "def find_write(driver, elem_path, write_str, clear_first=True, send_enter=False, by=CSS, timeout=TIMEOUT, poll_frequency=0.5):\n elem = find_element(driver, elem_path=elem_path, by=by, timeout=timeout, poll_frequency=poll_frequency)\n if clear_first:\n elem.clear()\n elem.send_keys(write_str)\n if send_enter:\n elem.send_keys(Keys.ENTER)\n return elem", "docstring": "Find a writable element and write to it\n\n find_write locates a writable element on the page, waiting\n for up to timeout seconds. Once found, it writes the string\n to it.\n\nArgs:\n driver (selenium webdriver or element): A driver or element\n elem_path (str): String used to located the element\n write_str (str): String to write\n clear_first (bool): Clear the contents before writing (default True)\n send_enter (bool): Send a keyboard ENTER after writing string\n by (selenium By): Selenium By reference\n timeout (int): Selenium Wait timeout, in seconds\n poll_frequency (float): Selenium Wait polling frequency, in seconds\n\nReturns:\n element: Selenium element\n\nRaises:\n TimeoutException: Raised when target element isn't located", "source": "codesearchnet_filtered"} -{"code": "def operator_driven(drain_timeout=_DEFAULT_DRAIN, reset_timeout=_DEFAULT_RESET, max_consecutive_attempts=_DEFAULT_ATTEMPTS):\n return ConsistentRegionConfig(trigger=ConsistentRegionConfig.Trigger.OPERATOR_DRIVEN, drain_timeout=drain_timeout, reset_timeout=reset_timeout, max_consecutive_attempts=max_consecutive_attempts)", "docstring": "Define an operator-driven consistent region configuration.\n The source operator triggers drain and checkpoint cycles for the region.\n\nArgs:\n drain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds.\n reset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds.\n max_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region. This must be an integer value between 1 and 2147483647, inclusive. If not specified, the default value is 5.\n\nReturns:\n ConsistentRegionConfig: the configuration.", "source": "codesearchnet_filtered"} -{"code": "def ParseHeader(cls, script_data):\n if (len(script_data) < UpdateScript.SCRIPT_HEADER_LENGTH):\n raise ArgumentError('Script is too short to contain a script header', length=len(script_data), header_length=UpdateScript.SCRIPT_HEADER_LENGTH)\n (embedded_hash, magic, total_length) = struct.unpack_from('<16sLL', script_data)\n if (magic != UpdateScript.SCRIPT_MAGIC):\n raise ArgumentError('Script has invalid magic value', expected=UpdateScript.SCRIPT_MAGIC, found=magic)\n if (total_length != len(script_data)):\n raise ArgumentError('Script length does not match embedded length', embedded_length=total_length, length=len(script_data))\n hashed_data = script_data[16:]\n sha = hashlib.sha256()\n sha.update(hashed_data)\n hash_value = sha.digest()[:16]\n if (not compare_digest(embedded_hash, hash_value)):\n raise ArgumentError('Script has invalid embedded hash', embedded_hash=hexlify(embedded_hash), calculated_hash=hexlify(hash_value))\n return ScriptHeader(UpdateScript.SCRIPT_HEADER_LENGTH, False, True, False)", "docstring": "Parse a script integrity header.\n\n This function makes sure any integrity hashes are correctly parsed and\n returns a ScriptHeader structure containing the information that it\n was able to parse out.\n\nArgs:\n script_data (bytearray): The script that we should parse.\n\nRaises:\n ArgumentError: If the script contains malformed data that\n cannot be parsed.\n\nReturns:\n ScriptHeader: The parsed script header information", "source": "codesearchnet_filtered"} -{"code": "def to_json(self, is_admin=False):\n if is_admin:\n return {'accountId': self.account_id, 'accountName': self.account_name, 'accountType': self.account_type, 'contacts': self.contacts, 'enabled': (True if (self.enabled == 1) else False), 'requiredRoles': self.required_roles, 'properties': {to_camelcase(prop.name): prop.value for prop in self.account.properties}}\n else:\n return {'accountId': self.account_id, 'accountName': self.account_name, 'contacts': self.contacts}", "docstring": "Returns a dict representation of the object\n\nArgs:\n is_admin (`bool`): If true, include information about the account that should be avaiable only to admins\n\nReturns:\n `dict`", "source": "codesearchnet_filtered"} -{"code": "def run(self, dag):\n cx_runs = dag.collect_runs(['cx'])\n for cx_run in cx_runs:\n partition = []\n chunk = []\n for i in range((len(cx_run) - 1)):\n chunk.append(cx_run[i])\n qargs0 = cx_run[i].qargs\n qargs1 = cx_run[(i + 1)].qargs\n if (qargs0 != qargs1):\n partition.append(chunk)\n chunk = []\n chunk.append(cx_run[(- 1)])\n partition.append(chunk)\n for chunk in partition:\n if ((len(chunk) % 2) == 0):\n for n in chunk:\n dag.remove_op_node(n)\n else:\n for n in chunk[1:]:\n dag.remove_op_node(n)\n return dag", "docstring": "Run one pass of cx cancellation on the circuit\n\nArgs:\n dag (DAGCircuit): the directed acyclic graph to run on.\n\nReturns:\n DAGCircuit: Transformed DAG.", "source": "codesearchnet_filtered"} -{"code": "def refresh(self, access_token=None, **kwargs):\n if (not self.token_lock.locked()):\n with self.token_lock:\n if ((access_token == self.access_token) or (access_token is None)):\n if (self.developer_token is not None):\n r = self._httpclient.request(method='POST', url=self.developer_token_url, path='/request_token', headers={'Authorization': 'Bearer {}'.format(self.developer_token)}, timeout=30, raise_for_status=True)\n elif all([self.client_id, self.client_secret, self.refresh_token]):\n data = {'client_id': self.client_id, 'client_secret': self.client_secret, 'refresh_token': self.refresh_token, 'grant_type': 'refresh_token'}\n r = self._httpclient.request(method='POST', url=self.token_url, json=data, path='/api/oauth2/RequestToken', **kwargs)\n else:\n raise PartialCredentialsError('Missing one or more required credentials')\n if r:\n if (not r.ok):\n raise PanCloudError(('%s %s: %s' % (r.status_code, r.reason, r.text)))\n try:\n r_json = r.json()\n except ValueError as e:\n raise PanCloudError(('Invalid JSON: %s' % e))\n else:\n if (r.json().get('error_description') or r.json().get('error')):\n raise PanCloudError(r.text)\n self.access_token = r_json.get('access_token', None)\n self.jwt_exp = self._decode_exp(self.access_token_)\n if r_json.get('refresh_token', None):\n self.refresh_token = r_json.get('refresh_token')\n self.write_credentials()\n return self.access_token_", "docstring": "Refresh access and refresh tokens.\n\nArgs:\n access_token (str): Access token to refresh. Defaults to ``None``.\n\nReturns:\n str: Refreshed access token.", "source": "codesearchnet_filtered"} -{"code": "def etm_register_write(self, register_index, value, delay=False):\n self._dll.JLINKARM_ETM_WriteReg(int(register_index), int(value), int(delay))\n return None", "docstring": "Writes a value to an ETM register.\n\nArgs:\n self (JLink): the ``JLink`` instance.\n register_index (int): the register to write to.\n value (int): the value to write to the register.\n delay (bool): boolean specifying if the write should be buffered.\n\nReturns:\n ``None``", "source": "codesearchnet_filtered"} -{"code": "def _build_cryptographic_parameters(self, value):\n if (value is None):\n return None\n elif (not isinstance(value, dict)):\n raise TypeError('Cryptographic parameters must be a dictionary.')\n cryptographic_parameters = CryptographicParameters(block_cipher_mode=value.get('block_cipher_mode'), padding_method=value.get('padding_method'), hashing_algorithm=value.get('hashing_algorithm'), key_role_type=value.get('key_role_type'), digital_signature_algorithm=value.get('digital_signature_algorithm'), cryptographic_algorithm=value.get('cryptographic_algorithm'), random_iv=value.get('random_iv'), iv_length=value.get('iv_length'), tag_length=value.get('tag_length'), fixed_field_length=value.get('fixed_field_length'), invocation_field_length=value.get('invocation_field_length'), counter_length=value.get('counter_length'), initial_counter_value=value.get('initial_counter_value'))\n return cryptographic_parameters", "docstring": "Build a CryptographicParameters struct from a dictionary.\n\nArgs:\n value (dict): A dictionary containing the key/value pairs for a\n CryptographicParameters struct.\n\nReturns:\n None: if value is None\n CryptographicParameters: a CryptographicParameters struct\n\nRaises:\n TypeError: if the input argument is invalid", "source": "codesearchnet_filtered"} -{"code": "def query_string_to_dict(query):\n query_params = {}\n for key_value in query.split('&'):\n key_value_pair = key_value.split('=', 1)\n key = (key_value_pair[0] if (len(key_value_pair) >= 1) else '')\n value = (key_value_pair[1] if (len(key_value_pair) == 2) else '')\n query_params[key] = value\n return query_params", "docstring": "Convert a string to a query dict.\n\nArgs:\n query (str): The query string.\n\nReturns:\n obj: The key value object with query params.\n\nNote:\n This method does the same as urllib.parse.parse_qsl except\n that it doesn't actually decode the values.", "source": "codesearchnet_filtered"} -{"code": "def match(self, request):\n errors = []\n\n def match(matcher):\n try:\n return matcher.match(request)\n except Exception as err:\n err = '{}: {}'.format(type(matcher).__name__, err)\n errors.append(err)\n return False\n return (all([match(matcher) for matcher in self]), errors)", "docstring": "Match the given HTTP request instance against the registered\n matcher functions in the current engine.\n\nArgs:\n request (pook.Request): outgoing request to match.\n\nReturns:\n tuple(bool, list[Exception]): ``True`` if all matcher tests\n passes, otherwise ``False``. Also returns an optional list\n of error exceptions.", "source": "codesearchnet_filtered"} -{"code": "def reference_value_to_document(reference_value, client):\n parts = reference_value.split(DOCUMENT_PATH_DELIMITER, 5)\n if (len(parts) != 6):\n msg = BAD_REFERENCE_ERROR.format(reference_value)\n raise ValueError(msg)\n document = client.document(parts[(- 1)])\n if (document._document_path != reference_value):\n msg = WRONG_APP_REFERENCE.format(reference_value, client._database_string)\n raise ValueError(msg)\n return document", "docstring": "Convert a reference value string to a document.\n\nArgs:\n reference_value (str): A document reference value.\n client (~.firestore_v1beta1.client.Client): A client that has\n a document factory.\n\nReturns:\n ~.firestore_v1beta1.document.DocumentReference: The document\n corresponding to ``reference_value``.\n\nRaises:\n ValueError: If the ``reference_value`` is not of the expected\n format: ``projects/{project}/databases/{database}/documents/...``.\n ValueError: If the ``reference_value`` does not come from the same\n project / database combination as the ``client``.", "source": "codesearchnet_filtered"} -{"code": "def get(self, url, params={}, headers={}, auth=(), certificate_path=None):\n certificate_path = (certificate_path if certificate_path else False)\n return self.session.get(url, params=params, headers=headers, verify=certificate_path, auth=auth, timeout=self.timeout)", "docstring": "Returns the response payload from the request to the given URL.\n\nArgs:\n url (str): The URL for the WEB API that the request is being made too.\n params (dict): Dictionary containing the query string parameters.\n headers (dict): HTTP Headers that may be needed for the request.\n auth (tuple): User ID and password for Basic Auth\n certificate_path (str): Path to the ssl certificate.\n\nReturns:\n response: (HttpResponse): Response object from requests.get api request", "source": "codesearchnet_filtered"} -{"code": "def assignSchedule(self, schedule, period, hour, minute, tariff):\n if ((schedule not in range(Extents.Schedules)) or (period not in range(Extents.Tariffs)) or (hour < 0) or (hour > 23) or (minute < 0) or (minute > 59) or (tariff < 0)):\n ekm_log(('Out of bounds in Schedule_' + str((schedule + 1))))\n return False\n period += 1\n idx_min = ('Min_' + str(period))\n idx_hour = ('Hour_' + str(period))\n idx_rate = ('Tariff_' + str(period))\n if (idx_min not in self.m_schedule_params):\n ekm_log(('Incorrect index: ' + idx_min))\n return False\n if (idx_hour not in self.m_schedule_params):\n ekm_log(('Incorrect index: ' + idx_hour))\n return False\n if (idx_rate not in self.m_schedule_params):\n ekm_log(('Incorrect index: ' + idx_rate))\n return False\n self.m_schedule_params[idx_rate] = tariff\n self.m_schedule_params[idx_hour] = hour\n self.m_schedule_params[idx_min] = minute\n self.m_schedule_params['Schedule'] = schedule\n return True", "docstring": "Assign one schedule tariff period to meter bufffer.\n\nArgs:\n schedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extents.Schedules).\n tariff (int): :class:`~ekmmeters.Tariffs` value or in range(Extents.Tariffs).\n hour (int): Hour from 0-23.\n minute (int): Minute from 0-59.\n tariff (int): Rate value.\n\nReturns:\n bool: True on completed assignment.", "source": "codesearchnet_filtered"} -{"code": "def get(self, key, default=None):\n index = self._xxx_field_to_index.get(key)\n if (index is None):\n return default\n return self._xxx_values[index]", "docstring": "Return a value for key, with a default value if it does not exist.\n\nArgs:\n key (str): The key of the column to access\n default (object):\n The default value to use if the key does not exist. (Defaults\n to :data:`None`.)\n\nReturns:\n object:\n The value associated with the provided key, or a default value.\n\nExample:\n When the key exists, the value associated with it is returned.\n\n >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x')\n 'a'\n\n The default value is :data:`None` when the key does not exist.\n\n >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z')\n None\n\n The default value can be overrided with the ``default`` parameter.\n\n >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '')\n ''\n\n >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '')\n ''", "source": "codesearchnet_filtered"} -{"code": "def token_request(self, authorization_code):\n if (not self._client.token_endpoint):\n return None\n request = {'grant_type': 'authorization_code', 'code': authorization_code, 'redirect_uri': self._redirect_uri}\n logger.debug('making token request: %s', request)\n client_auth_method = self._client.registration_response.get('token_endpoint_auth_method', 'client_secret_basic')\n auth_header = _ClientAuthentication(self._client.client_id, self._client.client_secret)(client_auth_method, request)\n resp = self._provider_configuration.requests_session.post(self._client.token_endpoint, data=request, headers=auth_header).json()\n logger.debug('received token response: %s', json.dumps(resp))\n if ('error' in resp):\n token_resp = TokenErrorResponse(**resp)\n else:\n token_resp = AccessTokenResponse(**resp)\n token_resp.verify(keyjar=self._client.keyjar)\n if ('id_token' in resp):\n token_resp['id_token_jwt'] = resp['id_token']\n return token_resp", "docstring": "Makes a token request. If the 'token_endpoint' is not configured in the provider metadata, no request will\n be made.\n\nArgs:\n authorization_code (str): authorization code issued to client after user authorization\n\nReturns:\n Union[AccessTokenResponse, TokenErrorResponse, None]: The parsed token response, or None if no token\n request was performed.", "source": "codesearchnet_filtered"} -{"code": "def angle_to_name(angle, segments=8, abbr=False):\n if (segments == 4):\n string = COMPASS_NAMES[((int(((angle + 45) / 90)) % 4) * 2)]\n elif (segments == 8):\n string = COMPASS_NAMES[((int(((angle + 22.5) / 45)) % 8) * 2)]\n elif (segments == 16):\n string = COMPASS_NAMES[(int(((angle + 11.25) / 22.5)) % 16)]\n else:\n raise ValueError(('Segments parameter must be 4, 8 or 16 not %r' % segments))\n if abbr:\n return ''.join((i[0].capitalize() for i in string.split('-')))\n else:\n return string", "docstring": "Convert angle in to direction name.\n\nArgs:\n angle (float): Angle in degrees to convert to direction name\n segments (int): Number of segments to split compass in to\n abbr (bool): Whether to return abbreviated direction string\n\nReturns:\n str: Direction name for ``angle``", "source": "codesearchnet_filtered"} -{"code": "def hpo_diseases(username, password, hpo_ids, p_value_treshold=1):\n try:\n results = query_phenomizer.query(username, password, *hpo_ids)\n diseases = [result for result in results if (result['p_value'] <= p_value_treshold)]\n return diseases\n except SystemExit:\n return None", "docstring": "Return the list of HGNC symbols that match annotated HPO terms.\n\nArgs:\n username (str): username to use for phenomizer connection\n password (str): password to use for phenomizer connection\n\nReturns:\n query_result: a generator of dictionaries on the form\n {\n 'p_value': float,\n 'disease_source': str,\n 'disease_nr': int,\n 'gene_symbols': list(str),\n 'description': str,\n 'raw_line': str\n }", "source": "codesearchnet_filtered"} -{"code": "def iterable(obj, strok=False):\n try:\n iter(obj)\n except Exception:\n return False\n else:\n return (strok or (not isinstance(obj, six.string_types)))", "docstring": "Checks if the input implements the iterator interface. An exception is made\n for strings, which return False unless `strok` is True\n\nArgs:\n obj (object): a scalar or iterable input\n\n strok (bool): if True allow strings to be interpreted as iterable\n\nReturns:\n bool: True if the input is iterable\n\nExample:\n >>> obj_list = [3, [3], '3', (3,), [3, 4, 5], {}]\n >>> result = [iterable(obj) for obj in obj_list]\n >>> assert result == [False, True, False, True, True, True]\n >>> result = [iterable(obj, strok=True) for obj in obj_list]\n >>> assert result == [False, True, True, True, True, True]", "source": "codesearchnet_filtered"} -{"code": "def OpenFileObject(cls, path_spec_object, resolver_context=None):\n if (not isinstance(path_spec_object, path_spec.PathSpec)):\n raise TypeError('Unsupported path specification type.')\n if (resolver_context is None):\n resolver_context = cls._resolver_context\n if (path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT):\n if path_spec_object.HasParent():\n raise errors.PathSpecError('Unsupported mount path specification with parent.')\n mount_point = getattr(path_spec_object, 'identifier', None)\n if (not mount_point):\n raise errors.PathSpecError('Unsupported path specification without mount point identifier.')\n path_spec_object = mount_manager.MountPointManager.GetMountPoint(mount_point)\n if (not path_spec_object):\n raise errors.MountPointError('No such mount point: {0:s}'.format(mount_point))\n file_object = resolver_context.GetFileObject(path_spec_object)\n if (not file_object):\n resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator)\n file_object = resolver_helper.NewFileObject(resolver_context)\n file_object.open(path_spec=path_spec_object)\n return file_object", "docstring": "Opens a file-like object defined by path specification.\n\nArgs:\n path_spec_object (PathSpec): path specification.\n resolver_context (Optional[Context]): resolver context, where None\n represents the built in context which is not multi process safe.\n\nReturns:\n FileIO: file-like object or None if the path specification could not\n be resolved.\n\nRaises:\n PathSpecError: if the path specification is incorrect.\n TypeError: if the path specification type is unsupported.", "source": "codesearchnet_filtered"} -{"code": "def convert_drive(self, shift, instruction):\n command_dict = {'name': instruction.command.name, 't0': (shift + instruction.start_time), 'ch': instruction.channels[0].name}\n return self._qobj_model(**command_dict)", "docstring": "Return converted `PulseInstruction`.\n\nArgs:\n shift(int): Offset time.\n instruction (PulseInstruction): drive instruction.\n\nReturns:\n dict: Dictionary of required parameters.", "source": "codesearchnet_filtered"} -{"code": "def attach(self, container, stdout=True, stderr=True, stream=False, logs=False, demux=False):\n params = {'logs': ((logs and 1) or 0), 'stdout': ((stdout and 1) or 0), 'stderr': ((stderr and 1) or 0), 'stream': ((stream and 1) or 0)}\n headers = {'Connection': 'Upgrade', 'Upgrade': 'tcp'}\n u = self._url('/containers/{0}/attach', container)\n response = self._post(u, headers=headers, params=params, stream=True)\n output = self._read_from_socket(response, stream, self._check_is_tty(container), demux=demux)\n if stream:\n return CancellableStream(output, response)\n else:\n return output", "docstring": "Attach to a container.\n\n The ``.logs()`` function is a wrapper around this method, which you can\n use instead if you want to fetch/stream container output without first\n retrieving the entire backlog.\n\nArgs:\n container (str): The container to attach to.\n stdout (bool): Include stdout.\n stderr (bool): Include stderr.\n stream (bool): Return container output progressively as an iterator\n of strings, rather than a single string.\n logs (bool): Include the container's previous output.\n demux (bool): Keep stdout and stderr separate.\n\nReturns:\n By default, the container's output as a single string (two if\n ``demux=True``: one for stdout and one for stderr).\n\n If ``stream=True``, an iterator of output strings. If\n ``demux=True``, two iterators are returned: one for stdout and one\n for stderr.\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "codesearchnet_filtered"} -{"code": "def distinct(l):\n seen = set()\n seen_add = seen.add\n return (_ for _ in l if (not ((_ in seen) or seen_add(_))))", "docstring": "Return a list where the duplicates have been removed.\n\nArgs:\n l (list): the list to filter.\n\nReturns:\n list: the same list without duplicates.", "source": "codesearchnet_filtered"} -{"code": "def scandir(path='.'):\n scandir_path = fsdecode(path).replace('\\\\', '/')\n if (not is_storage(scandir_path)):\n return os_scandir(scandir_path)\n return _scandir_generator(is_bytes=isinstance(fspath(path), (bytes, bytearray)), scandir_path=scandir_path, system=get_instance(scandir_path))", "docstring": "Return an iterator of os.DirEntry objects corresponding to the entries in\n the directory given by path. The entries are yielded in arbitrary order,\n and the special entries '.' and '..' are not included.\n\n Equivalent to \"os.scandir\".\n\nArgs:\n path (path-like object): Path or URL.\n If path is of type bytes (directly or indirectly through the\n PathLike interface), the type of the name and path attributes\n of each os.DirEntry will be bytes; in all other circumstances,\n they will be of type str.\n\nReturns:\n Generator of os.DirEntry: Entries information.", "source": "codesearchnet_filtered"} -{"code": "def get_container_instance_group(access_token, subscription_id, resource_group, container_group_name):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API])\n return do_get(endpoint, access_token)", "docstring": "Get the JSON definition of a container group.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n container_group_name (str): Name of container instance group.\n\nReturns:\n HTTP response. JSON body of container group.", "source": "codesearchnet_filtered"} -{"code": "def get_first(self, status):\n items = self.get_all(status)\n if items:\n return list(items.items())[0][1]\n return None", "docstring": "Get the first item in the queue that has the given status.\n\nArgs:\n status (str): return the first item with this status.\n\nReturns:\n :class:`nyawc.QueueItem`: The first queue item with the given status.", "source": "codesearchnet_filtered"} -{"code": "def series_expand(self, param: Symbol, about, order: int) -> tuple:\n expansion = self._series_expand(param, about, order)\n res = []\n for v in expansion:\n if ((v == 0) or v.is_zero):\n v = self._zero\n elif (v == 1):\n v = self._one\n assert isinstance(v, self._base_cls)\n res.append(v)\n return tuple(res)", "docstring": "r\"\"\"Expand the expression as a truncated power series in a\n scalar parameter.\n\n When expanding an expr for a parameter $x$ about the point $x_0$ up to\n order $N$, the resulting coefficients $(c_1, \\dots, c_N)$ fulfill\n\n .. math::\n\n \\text{expr} = \\sum_{n=0}^{N} c_n (x - x_0)^n + O(N+1)\n\nArgs:\n param: Expansion parameter $x$\n about (Scalar): Point $x_0$ about which to expand\n order: Maximum order $N$ of expansion (>= 0)\n\nReturns:\n tuple of length ``order + 1``, where the entries are the\n expansion coefficients, $(c_0, \\dots, c_N)$.\n\nNote:\n The expansion coefficients are\n \"type-stable\", in that they share a common base class with the\n original expression. In particular, this applies to \"zero\"\n coefficients::\n\n >>> expr = KetSymbol(\"Psi\", hs=0)\n >>> t = sympy.symbols(\"t\")\n >>> assert expr.series_expand(t, 0, 1) == (expr, ZeroKet)", "source": "codesearchnet_filtered"} -{"code": "def is44(msg):\n if allzeros(msg):\n return False\n d = hex2bin(data(msg))\n if wrongstatus(d, 5, 6, 23):\n return False\n if wrongstatus(d, 35, 36, 46):\n return False\n if wrongstatus(d, 47, 48, 49):\n return False\n if wrongstatus(d, 50, 51, 56):\n return False\n if (bin2int(d[0:4]) > 4):\n return False\n vw = wind44(msg)\n if ((vw is not None) and (vw[0] > 250)):\n return False\n (temp, temp2) = temp44(msg)\n if ((min(temp, temp2) > 60) or (max(temp, temp2) < (- 80))):\n return False\n return True", "docstring": "Check if a message is likely to be BDS code 4,4.\n\n Meteorological routine air report\n\nArgs:\n msg (String): 28 bytes hexadecimal message string\n\nReturns:\n bool: True or False", "source": "codesearchnet_filtered"} -{"code": "def _hash_sequence(self, sighash_type, anyone_can_pay):\n if (anyone_can_pay or (sighash_type == shared.SIGHASH_SINGLE)):\n return (b'\\x00' * 32)\n else:\n sequences = ByteData()\n for tx_in in self.tx_ins:\n sequences += tx_in.sequence\n return utils.hash256(sequences.to_bytes())", "docstring": "BIP143 hashSequence implementation\n\nArgs:\n sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL\n anyone_can_pay (bool): true if ANYONECANPAY should be set\n\nReturns:\n (bytes): the hashSequence, a 32 byte hash", "source": "codesearchnet_filtered"} -{"code": "def qry_create(options):\n qry_string = filt_end = param_str = ''\n filt_st = 'Filters=['\n param_str_default = 'All'\n if options.id:\n qry_string += (\"InstanceIds=['%s']\" % options.id)\n param_str += (\"id: '%s'\" % options.id)\n param_str_default = ''\n if options.instname:\n (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str)\n filt_end = ']'\n param_str_default = ''\n qry_string += (filt_st + (\"{'Name': 'tag:Name', 'Values': ['%s']}\" % options.instname))\n param_str += (\"name: '%s'\" % options.instname)\n if options.inst_state:\n (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st)\n qry_string += (\"{'Name': 'instance-state-name','Values': ['%s']}\" % options.inst_state)\n param_str += (\"state: '%s'\" % options.inst_state)\n filt_end = ']'\n param_str_default = ''\n qry_string += filt_end\n param_str += param_str_default\n debg.dprintx('\\nQuery String')\n debg.dprintx(qry_string, True)\n debg.dprint('param_str: ', param_str)\n return (qry_string, param_str)", "docstring": "Create query from the args specified and command chosen.\n\n Creates a query string that incorporates the args in the options\n object, and creates the title for the 'list' function.\n\nArgs:\n options (object): contains args and data from parser\n\nReturns:\n qry_string (str): the query to be used against the aws ec2 client.\n param_str (str): the title to display before the list.", "source": "codesearchnet_filtered"} -{"code": "def to_timestamp(self, data):\n result = pd.Series(index=data.index)\n _slice = (~ data[self.col_name].isnull())\n result[_slice] = data[_slice][self.col_name].astype('int64')\n return result", "docstring": "Transform a datetime series into linux epoch.\n\nArgs:\n data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`.\n\nReturns:\n pandas.Series", "source": "codesearchnet_filtered"} -{"code": "def get_channels(self, condensed=False):\n channel_list = self.slack_client.api_call('channels.list')\n if (not channel_list.get('ok')):\n return None\n if condensed:\n channels = [{'id': item.get('id'), 'name': item.get('name')} for item in channel_list.get('channels')]\n return channels\n else:\n return channel_list", "docstring": "Grabs all channels in the slack team\n\nArgs:\n condensed (bool): if true triggers list condensing functionality\n\nReturns:\n dic: Dict of channels in Slack team.\n See also: https://api.slack.com/methods/channels.list", "source": "codesearchnet_filtered"} -{"code": "def _set_scripts(self, host_metadata, scripts):\n scripts_key = 'deploy-scripts'\n if ('ovirt-scritps' in host_metadata):\n scripts_key = 'ovirt-scripts'\n host_metadata[scripts_key] = scripts\n return host_metadata", "docstring": "Temporary method to set the host scripts\n\n TODO:\n remove once the \"ovirt-scripts\" option gets deprecated\n\nArgs:\n host_metadata(dict): host metadata to set scripts in\n\nReturns:\n dict: the updated metadata", "source": "codesearchnet_filtered"} -{"code": "def movies_in_theaters(self, **kwargs):\n path = self._get_path('movies_in_theaters')\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Gets the movies currently in theaters from the API.\n\nArgs:\n page_limit (optional): number of movies to show per page, default=16\n page (optional): results page number, default=1\n country (optional): localized data for selected country, default=\"us\"\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "source": "codesearchnet_filtered"} -{"code": "def xor_bytes(a, b):\n assert isinstance(a, bytes)\n assert isinstance(b, bytes)\n assert (len(a) == len(b))\n res = bytearray()\n for i in range(len(a)):\n res.append((a[i] ^ b[i]))\n return bytes(res)", "docstring": "XOR on two bytes objects\n\nArgs:\n a (bytes): object 1\n b (bytes): object 2\n\nReturns:\n bytes: The XOR result", "source": "codesearchnet_filtered"} -{"code": "def validate_to_schema(nanopub, schema) -> Tuple[(bool, List[Tuple[(str, str)]])]:\n v = jsonschema.Draft4Validator(schema)\n messages = []\n errors = sorted(v.iter_errors(nanopub), key=(lambda e: e.path))\n for error in errors:\n for suberror in sorted(error.context, key=(lambda e: e.schema_path)):\n print(list(suberror.schema_path), suberror.message, sep=', ')\n messages.append(('ERROR', suberror.message))\n is_valid = True\n if errors:\n is_valid = False\n return (is_valid, messages)", "docstring": "Validate nanopub against jsonschema for nanopub\n\nArgs:\n nanopub (Mapping[str, Any]): nanopub dict\n schema (Mapping[str, Any]): nanopub schema\n\nReturns:\n Tuple[bool, List[str]]:\n bool: Is valid? Yes = True, No = False\n List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg)\n e.g. [('ERROR', \"'subject' is a required property\")]", "source": "codesearchnet_filtered"} -{"code": "def trace_read(self, offset, num_items):\n buf_size = ctypes.c_uint32(num_items)\n buf = (structs.JLinkTraceData * num_items)()\n res = self._dll.JLINKARM_TRACE_Read(buf, int(offset), ctypes.byref(buf_size))\n if (res == 1):\n raise errors.JLinkException('Failed to read from trace buffer.')\n return list(buf)[:int(buf_size.value)]", "docstring": "Reads data from the trace buffer and returns it.\n\nArgs:\n self (JLink): the ``JLink`` instance.\n offset (int): the offset from which to start reading from the trace\n buffer.\n num_items (int): number of items to read from the trace buffer.\n\nReturns:\n A list of ``JLinkTraceData`` instances corresponding to the items\n read from the trace buffer. Note that this list may have size less\n than ``num_items`` in the event that there are not ``num_items``\n items in the trace buffer.\n\nRaises:\n JLinkException: on error.", "source": "codesearchnet_filtered"} -{"code": "def get_repo_scope(task, name):\n repo_scopes = []\n for scope in task['scopes']:\n if REPO_SCOPE_REGEX.match(scope):\n repo_scopes.append(scope)\n if (len(repo_scopes) > 1):\n raise ValueError('{}: Too many repo_scopes: {}!'.format(name, repo_scopes))\n if repo_scopes:\n return repo_scopes[0]", "docstring": "Given a parent task, return the repo scope for the task.\n\n Background in https://bugzilla.mozilla.org/show_bug.cgi?id=1459705#c3\n\nArgs:\n task (dict): the task definition.\n\nRaises:\n ValueError: on too many `repo_scope`s (we allow for 1 or 0).\n\nReturns:\n str: the ``repo_scope``\n None: if no ``repo_scope`` is found", "source": "codesearchnet_filtered"} -{"code": "def compile_model(self, input_model_config, output_model_config, role, job_name, stop_condition, tags):\n compilation_job_request = {'InputConfig': input_model_config, 'OutputConfig': output_model_config, 'RoleArn': role, 'StoppingCondition': stop_condition, 'CompilationJobName': job_name}\n if (tags is not None):\n compilation_job_request['Tags'] = tags\n LOGGER.info('Creating compilation-job with name: {}'.format(job_name))\n self.sagemaker_client.create_compilation_job(**compilation_job_request)", "docstring": "Create an Amazon SageMaker Neo compilation job.\n\nArgs:\n input_model_config (dict): the trained model and the Amazon S3 location where it is stored.\n output_model_config (dict): Identifies the Amazon S3 location where you want Amazon SageMaker Neo to save\n the results of compilation job\n role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker Neo compilation jobs use this\n role to access model artifacts. You must grant sufficient permissions to this role.\n job_name (str): Name of the compilation job being created.\n stop_condition (dict): Defines when compilation job shall finish. Contains entries that can be understood\n by the service like ``MaxRuntimeInSeconds``.\n tags (list[dict]): List of tags for labeling a compile model job. For more, see\n https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.\n\nReturns:\n str: ARN of the compile model job, if it is created.", "source": "codesearchnet_filtered"} -{"code": "def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None):\n if (rid is not None):\n assert (type(rid) == list), 'rid must be a list. rid: {}'.format(rid)\n rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if (gctoo_row in rid)]\n num_missing_rids = (len(rid) - len(rows_to_keep))\n if (num_missing_rids != 0):\n logger.info('{} rids were not found in the GCT.'.format(num_missing_rids))\n elif (row_bool is not None):\n assert (len(row_bool) == gctoo.data_df.shape[0]), ('row_bool must have length equal to gctoo.data_df.shape[0]. ' + 'len(row_bool): {}, gctoo.data_df.shape[0]: {}'.format(len(row_bool), gctoo.data_df.shape[0]))\n rows_to_keep = gctoo.data_df.index[row_bool].values\n elif (ridx is not None):\n assert (type(ridx[0]) is int), ('ridx must be a list of integers. ridx[0]: {}, ' + 'type(ridx[0]): {}').format(ridx[0], type(ridx[0]))\n assert (max(ridx) <= gctoo.data_df.shape[0]), ('ridx contains an integer larger than the number of rows in ' + 'the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}').format(max(ridx), gctoo.data_df.shape[0])\n rows_to_keep = gctoo.data_df.index[ridx].values\n else:\n rows_to_keep = gctoo.data_df.index.values\n if (exclude_rid is not None):\n rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if (row_to_keep not in exclude_rid)]\n return rows_to_keep", "docstring": "Figure out based on the possible row inputs which rows to keep.\n\nArgs:\n gctoo (GCToo object):\n rid (list of strings):\n row_bool (boolean array):\n ridx (list of integers):\n exclude_rid (list of strings):\n\nReturns:\n rows_to_keep (list of strings): row ids to be kept", "source": "codesearchnet_filtered"} -{"code": "def _prep_crypto_msg(message):\n signature = message['signature']\n certificate = message['certificate']\n (sliced_signature, sliced_certificate) = ([], [])\n for x in range(0, len(signature), 76):\n sliced_signature.append(signature[x:(x + 76)])\n for x in range(0, len(certificate), 76):\n sliced_certificate.append(certificate[x:(x + 76)])\n message['signature'] = (u'\\n'.join(sliced_signature) + u'\\n')\n message['certificate'] = (u'\\n'.join(sliced_certificate) + u'\\n')\n return message", "docstring": "Split the signature and certificate in the same way M2Crypto does.\n\n M2Crypto is dropping newlines into its signature and certificate. This\n exists purely to maintain backwards compatibility.\n\nArgs:\n message (dict): A message with the ``signature`` and ``certificate`` keywords.\n The values of these two keys must be byte strings.\n\nReturns:\n dict: The same message, but with the values of ``signature`` and ``certificate``\n split every 76 characters with a newline and a final newline at the end.", "source": "codesearchnet_filtered"} -{"code": "def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=None):\n if sparse:\n raise NotImplementedError('SparseDataFrame is not implemented. To contribute to Modin, please visit github.com/modin-project/modin.')\n if (not isinstance(data, DataFrame)):\n ErrorMessage.default_to_pandas('`get_dummies` on non-DataFrame')\n return DataFrame(pandas.get_dummies(data, prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na, columns=columns, sparse=sparse, drop_first=drop_first, dtype=dtype))\n else:\n new_manager = data._query_compiler.get_dummies(columns, prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na, drop_first=drop_first, dtype=dtype)\n return DataFrame(query_compiler=new_manager)", "docstring": "Convert categorical variable into indicator variables.\n\nArgs:\n data (array-like, Series, or DataFrame): data to encode.\n prefix (string, [string]): Prefix to apply to each encoded column\n label.\n prefix_sep (string, [string]): Separator between prefix and value.\n dummy_na (bool): Add a column to indicate NaNs.\n columns: Which columns to encode.\n sparse (bool): Not Implemented: If True, returns SparseDataFrame.\n drop_first (bool): Whether to remove the first level of encoded data.\n dtype: The dtype for the get_dummies call.\n\nReturns:\n DataFrame or one-hot encoded data.", "source": "codesearchnet_filtered"} -{"code": "def calculate_row_format(columns, keys=None):\n row_format = ''\n if (keys is None):\n keys = columns.keys()\n else:\n keys = [key for key in keys if (key in columns)]\n for key in keys:\n if (len(row_format) > 0):\n row_format += '|'\n row_format += ('%%(%s)-%ds' % (key, columns[key]))\n return (('|' + row_format) + '|')", "docstring": "Calculate row format.\n\nArgs:\n columns (dict): the keys are the column name and the value the max length.\n keys (list): optional list of keys to order columns as well as to filter for them.\n\nReturns:\n str: format for table row", "source": "codesearchnet_filtered"} -{"code": "def create(cls, hashing_algorithm=HashingAlgorithmEnum.SHA_256, digest_value=b'', key_format_type=KeyFormatTypeEnum.RAW):\n algorithm = HashingAlgorithm(hashing_algorithm)\n value = DigestValue(bytearray(digest_value))\n format_type = KeyFormatType(key_format_type)\n return Digest(hashing_algorithm=algorithm, digest_value=value, key_format_type=format_type)", "docstring": "Construct a Digest object from provided digest values.\n\nArgs:\n hashing_algorithm (HashingAlgorithm): An enumeration representing\n the hash algorithm used to compute the digest. Optional,\n defaults to HashingAlgorithm.SHA_256.\n digest_value (byte string): The bytes of the digest hash. Optional,\n defaults to the empty byte string.\n key_format_type (KeyFormatType): An enumeration representing the\n format of the key corresponding to the digest. Optional,\n defaults to KeyFormatType.RAW.\n\nReturns:\n Digest: The newly created Digest.\n\nExample:\n >>> x = Digest.create(HashingAlgorithm.MD5, b'\\x00',\n ... KeyFormatType.RAW)\n >>> x.hashing_algorithm\n HashingAlgorithm(value=HashingAlgorithm.MD5)\n >>> x.digest_value\n DigestValue(value=bytearray(b'\\x00'))\n >>> x.key_format_type\n KeyFormatType(value=KeyFormatType.RAW)", "source": "codesearchnet_filtered"} -{"code": "def from_status(cls, status_line, msg=None):\n method = getattr(cls, status_line.lower()[4:].replace(' ', '_'))\n return method(msg)", "docstring": "Returns a class method from bottle.HTTPError.status_line attribute.\n Useful for patching `bottle.HTTPError` for web services.\n\nArgs:\n status_line (str): bottle.HTTPError.status_line text.\n msg: The message data for response.\n\nReturns:\n Class method based on status_line arg.\n\nExample:\n >>> status_line = '401 Unauthorized'\n >>> error_msg = 'Get out!'\n >>> resp = WSResponse.from_status(status_line, error_msg)\n >>> resp['errors']\n ['Get out!']\n >>> resp['status_text']\n 'Unauthorized'", "source": "codesearchnet_filtered"} -{"code": "def GetHasherNamesFromString(cls, hasher_names_string):\n hasher_names = []\n if ((not hasher_names_string) or (hasher_names_string.strip() == 'none')):\n return hasher_names\n if (hasher_names_string.strip() == 'all'):\n return cls.GetHasherNames()\n for hasher_name in hasher_names_string.split(','):\n hasher_name = hasher_name.strip()\n if (not hasher_name):\n continue\n hasher_name = hasher_name.lower()\n if (hasher_name in cls._hasher_classes):\n hasher_names.append(hasher_name)\n return hasher_names", "docstring": "Retrieves a list of a hasher names from a comma separated string.\n\n Takes a string of comma separated hasher names transforms it to a list of\n hasher names.\n\nArgs:\n hasher_names_string (str): comma separated names of hashers to enable,\n the string 'all' to enable all hashers or 'none' to disable all\n hashers.\n\nReturns:\n list[str]: names of valid hashers from the string, or an empty list if no\n valid names are found.", "source": "codesearchnet_filtered"} -{"code": "def Match(self, event):\n if (not self._matcher):\n return True\n self._decision = self._matcher.Matches(event)\n return self._decision", "docstring": "Determines if an event matches the filter.\n\nArgs:\n event (EventObject): an event.\n\nReturns:\n bool: True if the event matches the filter.", "source": "codesearchnet_filtered"} -{"code": "def NewFromJSON(data):\n return Comment(body=data.get('body', None), posted_at=data.get('posted_at', None), user=User.NewFromJSON(data.get('user', None)))", "docstring": "Create a new Comment instance from a JSON dict.\n\nArgs:\n data (dict): JSON dictionary representing a Comment.\n\nReturns:\n A Comment instance.", "source": "codesearchnet_filtered"} -{"code": "def get_ids(self, features, threshold=0.0, func=np.sum, get_weights=False):\n if isinstance(features, str):\n features = [features]\n features = self.search_features(features)\n feature_weights = self.data.ix[(:, features)]\n weights = feature_weights.apply(func, 1)\n above_thresh = weights[(weights >= threshold)]\n return (above_thresh if get_weights else list(above_thresh.index))", "docstring": "Returns a list of all studies in the table that meet the desired\n feature-based criteria.\n\n Will most commonly be used to retrieve studies that use one or more\n features with some minimum frequency; e.g.,:\n\n get_ids(['fear', 'anxiety'], threshold=0.001)\n\nArgs:\n features (lists): a list of feature names to search on.\n threshold (float): optional float indicating threshold features\n must pass to be included.\n func (Callable): any numpy function to use for thresholding\n (default: sum). The function will be applied to the list of\n features and the result compared to the threshold. This can be\n used to change the meaning of the query in powerful ways. E.g,:\n max: any of the features have to pass threshold\n (i.e., max > thresh)\n min: all features must each individually pass threshold\n (i.e., min > thresh)\n sum: the summed weight of all features must pass threshold\n (i.e., sum > thresh)\n get_weights (bool): if True, returns a dict with ids => weights.\n\nReturns:\n When get_weights is false (default), returns a list of study\n names. When true, returns a dict, with study names as keys\n and feature weights as values.", "source": "codesearchnet_filtered"} -{"code": "def create_projection(self, fov: float=75.0, near: float=1.0, far: float=100.0, aspect_ratio: float=None):\n return matrix44.create_perspective_projection_matrix(fov, (aspect_ratio or self.window.aspect_ratio), near, far, dtype='f4')", "docstring": "Create a projection matrix with the following parameters.\n When ``aspect_ratio`` is not provided the configured aspect\n ratio for the window will be used.\n\nArgs:\n fov (float): Field of view (float)\n near (float): Camera near value\n far (float): Camrea far value\n\n Keyword Args:\n aspect_ratio (float): Aspect ratio of the viewport\n\nReturns:\n The projection matrix as a float32 :py:class:`numpy.array`", "source": "codesearchnet_filtered"} -{"code": "def create_policy(self, account, client, document, name, arn=None):\n if ((not arn) and (not name)):\n raise ValueError('create_policy must be called with either arn or name in the argument list')\n if arn:\n response = client.list_policy_versions(PolicyArn=arn)\n if (len(response['Versions']) >= 5):\n version = [x for x in sorted(response['Versions'], key=(lambda k: k['CreateDate'])) if (not x['IsDefaultVersion'])][0]\n self.log.info('Deleting oldest IAM Policy version {}/{}'.format(arn, version['VersionId']))\n client.delete_policy_version(PolicyArn=arn, VersionId=version['VersionId'])\n auditlog(event='iam.check_roles.delete_policy_version', actor=self.ns, data={'account': account.account_name, 'policyName': name, 'policyArn': arn, 'versionId': version['VersionId']})\n res = client.create_policy_version(PolicyArn=arn, PolicyDocument=document, SetAsDefault=True)\n else:\n res = client.create_policy(PolicyName=name, PolicyDocument=document)\n auditlog(event='iam.check_roles.create_policy', actor=self.ns, data={'account': account.account_name, 'policyName': name, 'policyArn': arn})\n return res", "docstring": "Create a new IAM policy.\n\n If the policy already exists, a new version will be added and if needed the oldest policy version not in use\n will be removed. Returns a dictionary containing the policy or version information\n\nArgs:\n account (:obj:`Account`): Account to create the policy on\n client (:obj:`boto3.client`): A boto3 client object\n document (`str`): Policy document\n name (`str`): Name of the policy to create / update\n arn (`str`): Optional ARN for the policy to update\n\nReturns:\n `dict`", "source": "codesearchnet_filtered"} -{"code": "def set_scheduler(self, host, username='root', password=None, private_key=None, private_key_pass=None):\n self._remote = RemoteClient(host, username, password, private_key, private_key_pass)\n self._remote_id = uuid.uuid4().hex", "docstring": "Defines the remote scheduler\n\nArgs:\n host (str): the hostname or ip address of the remote scheduler\n username (str, optional): the username used to connect to the remote scheduler. Default is 'root'\n password (str, optional): the password for username on the remote scheduler. Either the password or the private_key must be defined. Default is None.\n private_key (str, optional): the path to the private ssh key used to connect to the remote scheduler. Either the password or the private_key must be defined. Default is None.\n private_key_pass (str, optional): the passphrase for the private_key. Default is None.\n\nReturns:\n An RemoteClient representing the remote scheduler.", "source": "codesearchnet_filtered"} -{"code": "def prune_volumes(self, filters=None):\n params = {}\n if filters:\n params['filters'] = utils.convert_filters(filters)\n url = self._url('/volumes/prune')\n return self._result(self._post(url, params=params), True)", "docstring": "Delete unused volumes\n\nArgs:\n filters (dict): Filters to process on the prune list.\n\nReturns:\n (dict): A dict containing a list of deleted volume names and\n the amount of disk space reclaimed in bytes.\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "codesearchnet_filtered"} -{"code": "def remove_extra_presentations(self, resource, timeout=(- 1)):\n uri = (self.URI + '/repair')\n custom_headers = {'Accept-Language': 'en_US'}\n return self._client.create(resource, uri=uri, timeout=timeout, custom_headers=custom_headers)", "docstring": "Removes extra presentations from a specified server profile.\n\nArgs:\n resource (dict):\n Object to create\n timeout:\n Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\n in OneView; it just stops waiting for its completion.\n\nReturns:\n dict: Associated storage attachment resource.", "source": "codesearchnet_filtered"} -{"code": "def parse_table_data(lines):\n data = '\\n'.join([i.rstrip() for i in lines if ((not i.startswith(('^', '!', '#'))) and i.rstrip())])\n if data:\n return read_csv(StringIO(data), index_col=None, sep='\\t')\n else:\n return DataFrame()", "docstring": "Parse list of lines from SOFT file into DataFrame.\n\nArgs:\n lines (:obj:`Iterable`): Iterator over the lines.\n\nReturns:\n :obj:`pandas.DataFrame`: Table data.", "source": "codesearchnet_filtered"} -{"code": "def compute_metrics(self, previous):\n delta_t = self.time_difference(previous)\n delta_x = self.distance(previous)\n vel = 0\n delta_v = 0\n acc = 0\n if (delta_t != 0):\n vel = (delta_x / delta_t)\n delta_v = (vel - previous.vel)\n acc = (delta_v / delta_t)\n self.dt = delta_t\n self.dx = delta_x\n self.acc = acc\n self.vel = vel\n return self", "docstring": "Computes the metrics of this point\n\n Computes and updates the dt, vel and acc attributes.\n\nArgs:\n previous (:obj:`Point`): Point before\n\nReturns:\n :obj:`Point`: Self", "source": "codesearchnet_filtered"} -{"code": "def validate(self, definition, version=None, strict=False):\n if (not HAS_KUBERNETES_VALIDATE):\n raise KubernetesValidateMissing()\n errors = list()\n warnings = list()\n try:\n if (version is None):\n try:\n version = self.version['kubernetes']['gitVersion']\n except KeyError:\n version = kubernetes_validate.latest_version()\n kubernetes_validate.validate(definition, version, strict)\n except kubernetes_validate.utils.ValidationError as e:\n errors.append(('resource definition validation error at %s: %s' % ('.'.join([str(item) for item in e.path]), e.message)))\n except VersionNotSupportedError as e:\n errors.append(('Kubernetes version %s is not supported by kubernetes-validate' % version))\n except kubernetes_validate.utils.SchemaNotFoundError as e:\n warnings.append(('Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)' % (e.kind, e.api_version, e.version)))\n return (warnings, errors)", "docstring": "validate checks a kubernetes resource definition\n\nArgs:\n definition (dict): resource definition\n version (str): version of kubernetes to validate against\n strict (bool): whether unexpected additional properties should be considered errors\n\nReturns:\n warnings (list), errors (list): warnings are missing validations, errors are validation failures", "source": "codesearchnet_filtered"} -{"code": "def create_event_model(event):\n if event['type'].startswith('task'):\n factory = {JobEventName.Started: JobStartedEvent, JobEventName.Succeeded: JobSucceededEvent, JobEventName.Stopped: JobStoppedEvent, JobEventName.Aborted: JobAbortedEvent}\n if (event['type'] in factory):\n return factory[event['type']].from_event(event)\n else:\n raise JobEventTypeUnsupported('Unsupported event type {}'.format(event['type']))\n elif event['type'].startswith('worker'):\n raise WorkerEventTypeUnsupported('Unsupported event type {}'.format(event['type']))\n else:\n raise EventTypeUnknown('Unknown event type {}'.format(event['type']))", "docstring": "Factory function that turns a celery event into an event object.\n\nArgs:\n event (dict): A dictionary that represents a celery event.\n\nReturns:\n object: An event object representing the received event.\n\nRaises:\n JobEventTypeUnsupported: If an unsupported celery job event was received.\n WorkerEventTypeUnsupported: If an unsupported celery worker event was received.\n EventTypeUnknown: If an unknown event type (neither job nor worker) was received.", "source": "codesearchnet_filtered"} -{"code": "def get(name, *default):\n global g_config\n curr = g_config\n for part in name.split('.'):\n if (part in curr):\n curr = curr[part]\n elif default:\n return default[0]\n else:\n raise AttributeError(\"Config value '{}' does not exist\".format(name))\n return curr", "docstring": "Get config value with the given name and optional default.\n\nArgs:\n name (str):\n The name of the config value.\n *default (Any):\n If given and the key doesn't not exist, this will be returned\n instead. If it's not given and the config value does not exist,\n AttributeError will be raised\n\nReturns:\n The requested config value. This is one of the global values defined\n in this file. If the value does not exist it will return `default` if\n give or raise `AttributeError`.\n\nRaises:\n AttributeError: If the value does not exist and `default` was not given.", "source": "codesearchnet_filtered"} -{"code": "def flatten_rules(self, declarations):\n rules = []\n for (protocole, paths) in declarations:\n if protocole:\n continue\n rules.extend([self.strip_quotes(v.strip()) for v in paths.split(',')])\n return list(filter(self.filter_rules, rules))", "docstring": "Flatten returned import rules from regex.\n\n Because import rules can contains multiple items in the same rule\n (called multiline import rule), the regex ``REGEX_IMPORT_RULE``\n return a list of unquoted items for each rule.\n\nArgs:\n declarations (list): A SCSS source.\n\nReturns:\n list: Given SCSS source with all comments removed.", "source": "codesearchnet_filtered"} -{"code": "def get_reference(self, datas, name):\n rule_name = '-'.join((RULE_REFERENCE, name))\n structure_mode = 'nested'\n if (rule_name not in datas):\n msg = \"Unable to find enabled reference '{}'\"\n raise SerializerError(msg.format(name))\n properties = datas.get(rule_name)\n if ('structure' in properties):\n if (properties['structure'] == 'flat'):\n structure_mode = 'flat'\n elif (properties['structure'] == 'list'):\n structure_mode = 'list'\n elif (properties['structure'] == 'string'):\n structure_mode = 'string'\n elif (properties['structure'] == 'json'):\n structure_mode = 'json'\n elif (properties['structure'] == 'nested'):\n pass\n else:\n msg = \"Invalid structure mode name '{}' for reference '{}'\"\n raise SerializerError(msg.format(structure_mode, name))\n del properties['structure']\n for item in properties.keys():\n self.validate_variable_name(item)\n if (structure_mode == 'flat'):\n context = self.serialize_to_flat(name, properties)\n elif (structure_mode == 'list'):\n context = self.serialize_to_list(name, properties)\n elif (structure_mode == 'string'):\n context = self.serialize_to_string(name, properties)\n elif (structure_mode == 'nested'):\n context = self.serialize_to_nested(name, properties)\n elif (structure_mode == 'json'):\n context = self.serialize_to_json(name, properties)\n return context", "docstring": "Get serialized reference datas\n\n Because every reference is turned to a dict (that stands on ``keys``\n variable that is a list of key names), every variables must have the\n same exact length of word than the key name list.\n\n A reference name starts with 'styleguide-reference-' followed by\n name for reference.\n\n A reference can contains variable ``--structure`` setted to ``\"flat\"``,\n ``\"list\"`` or ``\"string\"`` to define serialization structure.\n\nArgs:\n datas (dict): Data where to search for reference declaration. This\n is commonly the fully parsed manifest.\n name (string): Reference name to get and serialize.\n\nReturns:\n collections.OrderedDict: Serialized reference datas.", "source": "codesearchnet_filtered"} -{"code": "def mac(self, algorithm, key, data):\n mac_data = None\n if (algorithm in self._hash_algorithms.keys()):\n self.logger.info('Generating a hash-based message authentication code using {0}'.format(algorithm.name))\n hash_algorithm = self._hash_algorithms.get(algorithm)\n try:\n h = hmac.HMAC(key, hash_algorithm(), backend=default_backend())\n h.update(data)\n mac_data = h.finalize()\n except Exception as e:\n self.logger.exception(e)\n raise exceptions.CryptographicFailure('An error occurred while computing an HMAC. See the server log for more information.')\n elif (algorithm in self._symmetric_key_algorithms.keys()):\n self.logger.info('Generating a cipher-based message authentication code using {0}'.format(algorithm.name))\n cipher_algorithm = self._symmetric_key_algorithms.get(algorithm)\n try:\n c = cmac.CMAC(cipher_algorithm(key), backend=default_backend())\n c.update(data)\n mac_data = c.finalize()\n except Exception as e:\n raise exceptions.CryptographicFailure('An error occurred while computing a CMAC. See the server log for more information.')\n else:\n raise exceptions.InvalidField('The cryptographic algorithm ({0}) is not a supported for a MAC operation.'.format(algorithm))\n return mac_data", "docstring": "Generate message authentication code.\n\nArgs:\n algorithm(CryptographicAlgorithm): An enumeration specifying the\n algorithm for which the MAC operation will use.\n key(bytes): secret key used in the MAC operation\n data(bytes): The data to be MACed.\n\nReturns:\n bytes: The MACed data\n\nRaises:\n InvalidField: Raised when the algorithm is unsupported or the\n length is incompatible with the algorithm.\n CryptographicFailure: Raised when the key generation process\n fails.\n\nExample:\n >>> engine = CryptographyEngine()\n >>> mac_data = engine.mac(\n ... CryptographicAlgorithm.HMAC-SHA256, b'\\x01\\x02\\x03\\x04',\n ... b'\\x05\\x06\\x07\\x08')", "source": "codesearchnet_filtered"} -{"code": "def concept_distance(c1, c2):\n cause_purview = tuple(set((c1.cause.purview + c2.cause.purview)))\n effect_purview = tuple(set((c1.effect.purview + c2.effect.purview)))\n return (repertoire_distance(c1.expand_cause_repertoire(cause_purview), c2.expand_cause_repertoire(cause_purview)) + repertoire_distance(c1.expand_effect_repertoire(effect_purview), c2.expand_effect_repertoire(effect_purview)))", "docstring": "Return the distance between two concepts in concept space.\n\nArgs:\n c1 (Concept): The first concept.\n c2 (Concept): The second concept.\n\nReturns:\n float: The distance between the two concepts in concept space.", "source": "codesearchnet_filtered"} -{"code": "def single_device(cl_device_type='GPU', platform=None, fallback_to_any_device_type=False):\n if isinstance(cl_device_type, str):\n cl_device_type = device_type_from_string(cl_device_type)\n device = None\n if (platform is None):\n platforms = cl.get_platforms()\n else:\n platforms = [platform]\n for platform in platforms:\n devices = platform.get_devices(device_type=cl_device_type)\n for dev in devices:\n if device_supports_double(dev):\n try:\n env = CLEnvironment(platform, dev)\n return [env]\n except cl.RuntimeError:\n pass\n if (not device):\n if fallback_to_any_device_type:\n return cl.get_platforms()[0].get_devices()\n else:\n raise ValueError('No devices of the specified type ({}) found.'.format(cl.device_type.to_string(cl_device_type)))\n raise ValueError('No suitable OpenCL device found.')", "docstring": "Get a list containing a single device environment, for a device of the given type on the given platform.\n\n This will only fetch devices that support double (possibly only double with a pragma\n defined, but still, it should support double).\n\nArgs:\n cl_device_type (cl.device_type.* or string): The type of the device we want,\n can be a opencl device type or a string matching 'GPU', 'CPU' or 'ALL'.\n platform (opencl platform): The opencl platform to select the devices from\n fallback_to_any_device_type (boolean): If True, try to fallback to any possible device in the system.\n\nReturns:\n list of CLEnvironment: List with one element, the CL runtime environment requested.", "source": "codesearchnet_filtered"} -{"code": "def extend(self, elts):\n elts = elts[:]\n self._in_deque.append(elts)\n event = self._event_for(elts)\n self._event_deque.append(event)\n return event", "docstring": "Adds elts to the tasks.\n\nArgs:\n elts (Sequence): a iterable of elements that can be appended to the\n task's bundle_field.\n\nReturns:\n Event: an event that can be used to wait on the response.", "source": "codesearchnet_filtered"} -{"code": "def split_key(key, max_keys=0):\n parts = [x for x in re.split(SPLIT_REGEX, key) if (x != '.')]\n result = []\n while (len(parts) > 0):\n if ((max_keys > 0) and (len(result) == max_keys)):\n break\n result.append(parts.pop(0))\n if (len(parts) > 0):\n result.append('.'.join(parts))\n return result", "docstring": "Splits a key but allows dots in the key name if they're scaped properly.\n\n Splitting this complex key:\n\n complex_key = \".dont\\.splitme.d\\.o\\. origen.splitme\\.dontsplit.splitme.\"\n split_key(complex_key)\n\n results in:\n\n ['', 'dont\\.splitme', 'd\\.o\\. origen', 'splitme\\.dontsplit', 'splitme', '']\n\nArgs:\n key (basestring): The key to be splitted.\n max_keys (int): The maximum number of keys to be extracted. 0 means no\n limits.\n\nReturns:\n A list of keys", "source": "codesearchnet_filtered"} -{"code": "def get_string(self, direct=True, vasp4_compatible=False, significant_figures=6):\n latt = self.structure.lattice\n if (np.linalg.det(latt.matrix) < 0):\n latt = Lattice((- latt.matrix))\n format_str = '{{:.{0}f}}'.format(significant_figures)\n lines = [self.comment, '1.0']\n for v in latt.matrix:\n lines.append(' '.join([format_str.format(c) for c in v]))\n if (self.true_names and (not vasp4_compatible)):\n lines.append(' '.join(self.site_symbols))\n lines.append(' '.join([str(x) for x in self.natoms]))\n if self.selective_dynamics:\n lines.append('Selective dynamics')\n lines.append(('direct' if direct else 'cartesian'))\n selective_dynamics = self.selective_dynamics\n for (i, site) in enumerate(self.structure):\n coords = (site.frac_coords if direct else site.coords)\n line = ' '.join([format_str.format(c) for c in coords])\n if (selective_dynamics is not None):\n sd = [('T' if j else 'F') for j in selective_dynamics[i]]\n line += (' %s %s %s' % (sd[0], sd[1], sd[2]))\n line += (' ' + site.species_string)\n lines.append(line)\n if self.velocities:\n try:\n lines.append('')\n for v in self.velocities:\n lines.append(' '.join([format_str.format(i) for i in v]))\n except:\n warnings.warn('Velocities are missing or corrupted.')\n if self.predictor_corrector:\n lines.append('')\n if self.predictor_corrector_preamble:\n lines.append(self.predictor_corrector_preamble)\n pred = np.array(self.predictor_corrector)\n for col in range(3):\n for z in pred[(:, col)]:\n lines.append(' '.join([format_str.format(i) for i in z]))\n else:\n warnings.warn('Preamble information missing or corrupt. Writing Poscar with no predictor corrector data.')\n return ('\\n'.join(lines) + '\\n')", "docstring": "Returns a string to be written as a POSCAR file. By default, site\n symbols are written, which means compatibility is for vasp >= 5.\n\nArgs:\n direct (bool): Whether coordinates are output in direct or\n cartesian. Defaults to True.\n vasp4_compatible (bool): Set to True to omit site symbols on 6th\n line to maintain backward vasp 4.x compatibility. Defaults\n to False.\n significant_figures (int): No. of significant figures to\n output all quantities. Defaults to 6. Note that positions are\n output in fixed point, while velocities are output in\n scientific format.\n\nReturns:\n String representation of POSCAR.", "source": "codesearchnet_filtered"} -{"code": "def _init_metadata_service(self, version):\n metadata_cfg = self._load_config_section(CONFIG_METADATA_SECTION)\n self._token_metadata = metadata_cfg[CONFIG_TOKEN]\n proto = metadata_cfg[CONFIG_PROTOCOL]\n host = metadata_cfg[CONFIG_HOST]\n self._metadata = MetadataService(host, version)\n self._metadata.base_protocol = proto\n self._metadata.set_auth(self._token_metadata)", "docstring": "Method to initialize the Metadata Service from the config data\n\nArgs:\n version (string): Version of Boss API to use.\n\nReturns:\n None\n\nRaises:\n (KeyError): if given invalid version.", "source": "codesearchnet_filtered"} -{"code": "def from_b58check(private_key):\n b58dec = base58.b58decode_check(private_key)\n version = b58dec[0]\n assert (version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION])\n return PrivateKey(int.from_bytes(b58dec[1:], 'big'))", "docstring": "Decodes a Base58Check encoded private-key.\n\nArgs:\n private_key (str): A Base58Check encoded private key.\n\nReturns:\n PrivateKey: A PrivateKey object", "source": "codesearchnet_filtered"} -{"code": "def get_details(app='groupproject', env='dev', region='us-east-1'):\n url = '{host}/applications/{app}'.format(host=API_URL, app=app)\n request = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n if (not request.ok):\n raise SpinnakerAppNotFound('\"{0}\" not found.'.format(app))\n app_details = request.json()\n LOG.debug('App details: %s', app_details)\n group = app_details['attributes'].get('repoProjectKey')\n project = app_details['attributes'].get('repoSlug')\n generated = gogoutils.Generator(group, project, env=env, region=region, formats=APP_FORMATS)\n LOG.debug('Application details: %s', generated)\n return generated", "docstring": "Extract details for Application.\n\nArgs:\n app (str): Application Name\n env (str): Environment/account to get details from\n\nReturns:\n collections.namedtuple with _group_, _policy_, _profile_, _role_,\n _user_.", "source": "codesearchnet_filtered"} -{"code": "def get_members(self, name):\n grpid = re.search('(\\\\d+)', name).group()\n command = ('show port-channel %s all-ports' % grpid)\n config = self.node.enable(command, 'text')\n return re.findall('\\\\b(?!Peer)Ethernet[\\\\d/]*\\\\b', config[0]['result']['output'])", "docstring": "Returns the member interfaces for the specified Port-Channel\n\nArgs:\n name(str): The Port-channel interface name to return the member\n interfaces for\n\nReturns:\n A list of physical interface names that belong to the specified\n interface", "source": "codesearchnet_filtered"} -{"code": "def upload_urls(self, project, files, run=None, entity=None, description=None):\n query = gql('\\n query Model($name: String!, $files: [String]!, $entity: String!, $run: String!, $description: String) {\\n model(name: $name, entityName: $entity) {\\n bucket(name: $run, desc: $description) {\\n id\\n files(names: $files) {\\n edges {\\n node {\\n name\\n url(upload: true)\\n updatedAt\\n }\\n }\\n }\\n }\\n }\\n }\\n ')\n run_id = (run or self.settings('run'))\n entity = (entity or self.settings('entity'))\n query_result = self.gql(query, variable_values={'name': project, 'run': run_id, 'entity': entity, 'description': description, 'files': [file for file in files]})\n run = query_result['model']['bucket']\n if run:\n result = {file['name']: file for file in self._flatten_edges(run['files'])}\n return (run['id'], result)\n else:\n raise CommError('Run does not exist {}/{}/{}.'.format(entity, project, run_id))", "docstring": "Generate temporary resumeable upload urls\n\nArgs:\n project (str): The project to download\n files (list or dict): The filenames to upload\n run (str, optional): The run to upload to\n entity (str, optional): The entity to scope this project to. Defaults to wandb models\n\nReturns:\n (bucket_id, file_info)\n bucket_id: id of bucket we uploaded to\n file_info: A dict of filenames and urls, also indicates if this revision already has uploaded files.\n {\n 'weights.h5': { \"url\": \"https://weights.url\" },\n 'model.json': { \"url\": \"https://model.json\", \"updatedAt\": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },\n }", "source": "codesearchnet_filtered"} -{"code": "def GetLVMLogicalVolumeByPathSpec(self, path_spec):\n volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec)\n if (volume_index is None):\n return None\n return self._vslvm_volume_group.get_logical_volume(volume_index)", "docstring": "Retrieves a LVM logical volume for a path specification.\n\nArgs:\n path_spec (PathSpec): path specification.\n\nReturns:\n pyvslvm.logical_volume: a LVM logical volume or None if not available.", "source": "codesearchnet_filtered"} -{"code": "def _parse_ospf_process_id(self, config):\n match = re.search('^router ospf (\\\\d+)', config)\n return dict(ospf_process_id=int(match.group(1)))", "docstring": "Parses config file for the OSPF proc ID\n\nArgs:\n config(str): Running configuration\n\nReturns:\n dict: key: ospf_process_id (int)", "source": "codesearchnet_filtered"} -{"code": "def get_keyvault(access_token, subscription_id, rgname, vault_name):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API])\n return do_get(endpoint, access_token)", "docstring": "Gets details about the named key vault.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n rgname (str): Azure resource group name.\n vault_name (str): Name of the key vault.\n\nReturns:\n HTTP response. JSON body of key vault properties.", "source": "codesearchnet_filtered"} -{"code": "def create_config(sections, section_contents):\n (sections_length, section_contents_length) = (len(sections), len(section_contents))\n if (sections_length != section_contents_length):\n raise ValueError('Mismatch between argument lengths.\\nlen(sections) = {}\\nlen(section_contents) = {}'.format(sections_length, section_contents_length))\n config = configparser.ConfigParser()\n for (section, section_content) in zip(sections, section_contents):\n config[section] = section_content\n return config", "docstring": "Create a config file from the provided sections and key value pairs.\n\nArgs:\n sections (List[str]): A list of section keys.\n key_value_pairs (Dict[str, str]): A list of of dictionaries. Must be as long as\n the list of sections. That is to say, if there are two sections, there should be two\n dicts.\n\nReturns:\n configparser.ConfigParser: A ConfigParser.\n\nRaises:\n ValueError", "source": "codesearchnet_filtered"} -{"code": "def append_with_data(url, data):\n if (data is None):\n return url\n url_parts = list(urlparse(url))\n query = OrderedDict(parse_qsl(url_parts[4], keep_blank_values=True))\n query.update(data)\n url_parts[4] = URLHelper.query_dict_to_string(query)\n return urlunparse(url_parts)", "docstring": "Append the given URL with the given data OrderedDict.\n\nArgs:\n url (str): The URL to append.\n data (obj): The key value OrderedDict to append to the URL.\n\nReturns:\n str: The new URL.", "source": "codesearchnet_filtered"} -{"code": "def iterate_ngrams(text, n):\n if (n <= 0):\n raise ValueError('n must be a positive integer')\n return [text[i:(i + n)] for i in range(((len(text) - n) + 1))]", "docstring": "Generator to yield ngrams in ``text``.\n\nExample:\n >>> for ngram in iterate_ngrams(\"example\", 4):\n ... print(ngram)\n exam\n xamp\n ampl\n mple\n\nArgs:\n text (str): text to iterate over\n n (int): size of window for iteration\n\nReturns:\n Generator expression to yield the next ngram in the text\n\nRaises:\n ValueError: If n is non positive", "source": "codesearchnet_filtered"} -{"code": "def line_id(self, lat):\n if (self.grid == 'WAC'):\n line = np.rint(((1.0 + self.LINE_PROJECTION_OFFSET) - (((self.A_AXIS_RADIUS * np.pi) * lat) / ((self.MAP_SCALE * 0.001) * 180))))\n else:\n line = (np.rint((float(self.LINE_PROJECTION_OFFSET) - (float(self.MAP_RESOLUTION) * (lat - float(self.CENTER_LATITUDE))))) + 1)\n return self._control_line(line)", "docstring": "Return the corresponding line\n\nArgs:\n lat (int): latitude in degree\n\nReturns:\n Correponding line", "source": "codesearchnet_filtered"} -{"code": "def check_prerequisites(prerequisites, checker, msg_tmpl='Prerequisites \"{}\" are required in method \"{}\" but not found, please install them first.'):\n\n def wrap(func):\n\n @functools.wraps(func)\n def wrapped_func(*args, **kwargs):\n requirements = ([prerequisites] if isinstance(prerequisites, str) else prerequisites)\n missing = []\n for item in requirements:\n if (not checker(item)):\n missing.append(item)\n if missing:\n print(msg_tmpl.format(', '.join(missing), func.__name__))\n raise RuntimeError('Prerequisites not meet.')\n else:\n return func(*args, **kwargs)\n return wrapped_func\n return wrap", "docstring": "A decorator factory to check if prerequisites are satisfied.\n\nArgs:\n prerequisites (str of list[str]): Prerequisites to be checked.\n checker (callable): The checker method that returns True if a\n prerequisite is meet, False otherwise.\n msg_tmpl (str): The message template with two variables.\n\nReturns:\n decorator: A specific decorator.", "source": "codesearchnet_filtered"} -{"code": "def offsets_for_times(self, timestamps):\n if (self.config['api_version'] <= (0, 10, 0)):\n raise UnsupportedVersionError('offsets_for_times API not supported for cluster version {}'.format(self.config['api_version']))\n for (tp, ts) in six.iteritems(timestamps):\n timestamps[tp] = int(ts)\n if (ts < 0):\n raise ValueError('The target time for partition {} is {}. The target time cannot be negative.'.format(tp, ts))\n return self._fetcher.get_offsets_by_times(timestamps, self.config['request_timeout_ms'])", "docstring": "Look up the offsets for the given partitions by timestamp. The\n returned offset for each partition is the earliest offset whose\n timestamp is greater than or equal to the given timestamp in the\n corresponding partition.\n\n This is a blocking call. The consumer does not have to be assigned the\n partitions.\n\n If the message format version in a partition is before 0.10.0, i.e.\n the messages do not have timestamps, ``None`` will be returned for that\n partition. ``None`` will also be returned for the partition if there\n are no messages in it.\n\nNote:\n This method may block indefinitely if the partition does not exist.\n\nArgs:\n timestamps (dict): ``{TopicPartition: int}`` mapping from partition\n to the timestamp to look up. Unit should be milliseconds since\n beginning of the epoch (midnight Jan 1, 1970 (UTC))\n\nReturns:\n ``{TopicPartition: OffsetAndTimestamp}``: mapping from partition\n to the timestamp and offset of the first message with timestamp\n greater than or equal to the target timestamp.\n\nRaises:\n ValueError: If the target timestamp is negative\n UnsupportedVersionError: If the broker does not support looking\n up the offsets by timestamp.\n KafkaTimeoutError: If fetch failed in request_timeout_ms", "source": "codesearchnet_filtered"} -{"code": "def render_trees(trees, path_composer):\n trees = list(trees)\n\n def create_pub_cache(trees):\n \"\\n Create uuid -> DBPublication cache from all uuid's linked from `trees`.\\n\\n Args:\\n trees (list): List of :class:`.Tree`.\\n\\n Returns:\\n dict: {uuid: DBPublication}\\n \"\n sub_pubs_uuids = sum((x.collect_publications() for x in trees), [])\n uuid_mapping = {uuid: search_pubs_by_uuid(uuid) for uuid in set(sub_pubs_uuids)}\n return {uuid: pub[0] for (uuid, pub) in uuid_mapping.iteritems() if pub}\n pub_cache = create_pub_cache(trees)\n\n def render_tree(tree, ind=1):\n '\\n Render the tree into HTML using :attr:`TREE_TEMPLATE`. Private trees\\n are ignored.\\n\\n Args:\\n tree (obj): :class:`.Tree` instance.\\n ind (int, default 1): Indentation. This function is called\\n recursively.\\n\\n Returns:\\n str: Rendered string.\\n '\n if (not tree.is_public):\n return ''\n rendered_tree = SimpleTemplate(TREE_TEMPLATE).render(tree=tree, render_tree=render_tree, ind=ind, path_composer=path_composer, pub_cache=pub_cache)\n ind_txt = (ind * ' ')\n return (ind_txt + ('\\n' + ind_txt).join(rendered_tree.splitlines()))\n parent = tree_handler().get_parent(trees[0])\n link_up = (path_composer(parent) if parent else None)\n return SimpleTemplate(TREES_TEMPLATE).render(trees=trees, render_tree=render_tree, link_up=link_up)", "docstring": "Render list of `trees` to HTML.\n\nArgs:\n trees (list): List of :class:`.Tree`.\n path_composer (fn reference): Function used to compose paths from UUID.\n Look at :func:`.compose_tree_path` from :mod:`.web_tools`.\n\nReturns:\n str: HTML representation of trees.", "source": "codesearchnet_filtered"} -{"code": "def _GetSanitizedEventValues(self, event):\n event_values = {}\n for (attribute_name, attribute_value) in event.GetAttributes():\n if (attribute_name == 'regvalue'):\n continue\n if (attribute_name == 'pathspec'):\n try:\n attribute_value = JsonPathSpecSerializer.WriteSerialized(attribute_value)\n except TypeError:\n continue\n event_values[attribute_name] = attribute_value\n try:\n attribute_value = timelib.Timestamp.RoundToSeconds(event.timestamp)\n except TypeError as exception:\n logger.warning('Unable to round timestamp {0!s}. error: {1!s}. Defaulting to 0'.format(event.timestamp, exception))\n attribute_value = 0\n attribute_value = timelib.Timestamp.CopyToIsoFormat(attribute_value, timezone=self._output_mediator.timezone)\n event_values['datetime'] = attribute_value\n (message, _) = self._output_mediator.GetFormattedMessages(event)\n if (message is None):\n data_type = getattr(event, 'data_type', 'UNKNOWN')\n raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type))\n event_values['message'] = message\n try:\n labels = list(event_values['tag'].labels)\n except (KeyError, AttributeError):\n labels = []\n event_values['tag'] = labels\n (source_short, source) = self._output_mediator.GetFormattedSources(event)\n if ((source is None) or (source_short is None)):\n data_type = getattr(event, 'data_type', 'UNKNOWN')\n raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type))\n event_values['source_short'] = source_short\n event_values['source_long'] = source\n return event_values", "docstring": "Sanitizes the event for use in Elasticsearch.\n\n The event values need to be sanitized to prevent certain values from\n causing problems when indexing with Elasticsearch. For example the path\n specification is a nested dictionary which will cause problems for\n Elasticsearch automatic indexing.\n\nArgs:\n event (EventObject): event.\n\nReturns:\n dict[str, object]: sanitized event values.\n\nRaises:\n NoFormatterFound: if no event formatter can be found to match the data\n type in the event.", "source": "codesearchnet_filtered"} -{"code": "def get_cert_contents(kwargs):\n paths = {'certificate': kwargs.get('path_to_certificate'), 'private_key': kwargs.get('path_to_private_key'), 'chain': kwargs.get('path_to_chain')}\n for (key, value) in paths.items():\n if (value is not None):\n continue\n path = input(('Path to %s (skip): ' % (key,)))\n if ((path == 'skip') or (not path.strip())):\n continue\n paths[key] = path\n parameters = {'ServerCertificateName': kwargs.get('cert_name')}\n for (key, path) in paths.items():\n if (not path):\n continue\n try:\n contents = path.read()\n except AttributeError:\n with open(utils.full_path(path)) as read_file:\n contents = read_file.read()\n if (key == 'certificate'):\n parameters['CertificateBody'] = contents\n elif (key == 'private_key'):\n parameters['PrivateKey'] = contents\n elif (key == 'chain'):\n parameters['CertificateChain'] = contents\n return parameters", "docstring": "Builds parameters with server cert file contents.\n\nArgs:\n kwargs(dict): The keyword args passed to ensure_server_cert_exists,\n optionally containing the paths to the cert, key and chain files.\n\nReturns:\n dict: A dictionary containing the appropriate parameters to supply to\n upload_server_certificate. An empty dictionary if there is a\n problem.", "source": "codesearchnet_filtered"} -{"code": "def filter_backends(backends, filters=None, **kwargs):\n\n def _match_all(obj, criteria):\n 'Return True if all items in criteria matches items in obj.'\n return all(((getattr(obj, key_, None) == value_) for (key_, value_) in criteria.items()))\n configuration_filters = {}\n status_filters = {}\n for (key, value) in kwargs.items():\n if all(((key in backend.configuration()) for backend in backends)):\n configuration_filters[key] = value\n else:\n status_filters[key] = value\n if configuration_filters:\n backends = [b for b in backends if _match_all(b.configuration(), configuration_filters)]\n if status_filters:\n backends = [b for b in backends if _match_all(b.status(), status_filters)]\n backends = list(filter(filters, backends))\n return backends", "docstring": "Return the backends matching the specified filtering.\n\n Filter the `backends` list by their `configuration` or `status`\n attributes, or from a boolean callable. The criteria for filtering can\n be specified via `**kwargs` or as a callable via `filters`, and the\n backends must fulfill all specified conditions.\n\nArgs:\n backends (list[BaseBackend]): list of backends.\n filters (callable): filtering conditions as a callable.\n **kwargs (dict): dict of criteria.\n\nReturns:\n list[BaseBackend]: a list of backend instances matching the\n conditions.", "source": "codesearchnet_filtered"} -{"code": "def CheckDirectory(self, path, extension='yaml'):\n result = True\n if extension:\n glob_spec = os.path.join(path, '*.{0:s}'.format(extension))\n else:\n glob_spec = os.path.join(path, '*')\n for definition_file in sorted(glob.glob(glob_spec)):\n if (not self.CheckFile(definition_file)):\n result = False\n return result", "docstring": "Validates definition files in a directory.\n\nArgs:\n path (str): path of the definition file.\n extension (Optional[str]): extension of the filenames to read.\n\nReturns:\n bool: True if the directory contains valid definitions.", "source": "codesearchnet_filtered"} -{"code": "def _GetNetworkInfo(self, signatures_key):\n network_info = {}\n for category in signatures_key.GetSubkeys():\n for signature in category.GetSubkeys():\n profile_guid_value = signature.GetValueByName('ProfileGuid')\n if profile_guid_value:\n profile_guid = profile_guid_value.GetDataAsObject()\n else:\n continue\n default_gateway_mac_value = signature.GetValueByName('DefaultGatewayMac')\n if default_gateway_mac_value:\n default_gateway_mac = ':'.join(['{0:02x}'.format(octet) for octet in bytearray(default_gateway_mac_value.data)])\n else:\n default_gateway_mac = None\n dns_suffix_value = signature.GetValueByName('DnsSuffix')\n if dns_suffix_value:\n dns_suffix = dns_suffix_value.GetDataAsObject()\n else:\n dns_suffix = None\n network_info[profile_guid] = (default_gateway_mac, dns_suffix)\n return network_info", "docstring": "Retrieves the network info within the signatures subkey.\n\nArgs:\n signatures_key (dfwinreg.WinRegistryKey): a Windows Registry key.\n\nReturns:\n dict[str, tuple]: a tuple of default_gateway_mac and dns_suffix per\n profile identifier (GUID).", "source": "codesearchnet_filtered"} -{"code": "def render(raw_config, environment=None):\n t = Template(raw_config)\n buff = StringIO()\n if (not environment):\n environment = {}\n try:\n substituted = t.substitute(environment)\n except KeyError as e:\n raise exceptions.MissingEnvironment(e.args[0])\n except ValueError:\n substituted = t.safe_substitute(environment)\n if (not isinstance(substituted, str)):\n substituted = substituted.decode('utf-8')\n buff.write(substituted)\n buff.seek(0)\n return buff.read()", "docstring": "Renders a config, using it as a template with the environment.\n\nArgs:\n raw_config (str): the raw stacker configuration string.\n environment (dict, optional): any environment values that should be\n passed to the config\n\nReturns:\n str: the stacker configuration populated with any values passed from\n the environment", "source": "codesearchnet_filtered"} -{"code": "def searchInAleph(base, phrase, considerSimilar, field):\n downer = Downloader()\n if (field.lower() not in VALID_ALEPH_FIELDS):\n raise InvalidAlephFieldException(((\"Unknown field '\" + field) + \"'!\"))\n param_url = Template(SEARCH_URL_TEMPLATE).substitute(PHRASE=quote_plus(phrase), BASE=base, FIELD=field, SIMILAR=('Y' if considerSimilar else 'N'))\n result = downer.download((ALEPH_URL + param_url))\n dom = dhtmlparser.parseString(result)\n find = dom.find('find')\n if (len(find) <= 0):\n raise AlephException(\"Aleph didn't returned any information.\")\n find = find[0]\n result = _alephResultToDict(find)\n result['base'] = base\n if ('error' not in result):\n return result\n if (result['error'] == 'empty set'):\n result['no_entries'] = 0\n return result\n else:\n raise AlephException(result['error'])", "docstring": "Send request to the aleph search engine.\n\n Request itself is pretty useless, but it can be later used as parameter\n for :func:`getDocumentIDs`, which can fetch records from Aleph.\n\nArgs:\n base (str): which database you want to use\n phrase (str): what do you want to search\n considerSimilar (bool): fuzzy search, which is not working at all, so\n don't use it\n field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`)\n\nReturns:\n dictionary: consisting from following fields:\n\n | error (optional): present if there was some form of error\n | no_entries (int): number of entries that can be fetch from aleph\n | no_records (int): no idea what is this, but it is always >= than\n `no_entries`\n | set_number (int): important - something like ID of your request\n | session-id (str): used to count users for licensing purposes\n\nExample:\n Returned dict::\n\n {\n 'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB',\n 'set_number': 36520,\n 'no_records': 1,\n 'no_entries': 1\n }\n\nRaises:\n AlephException: if Aleph doesn't return any information\n InvalidAlephFieldException: if specified field is not valid", "source": "codesearchnet_filtered"} -{"code": "def get_metrics_for_resource(access_token, subscription_id, resource_group, resource_provider, resource_type, resource_name):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/', resource_provider, '/', resource_type, '/', resource_name, '/providers/microsoft.insights', '/metrics?api-version=', INSIGHTS_PREVIEW_API])\n return do_get(endpoint, access_token)", "docstring": "Get the monitoring metrics for a resource.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n resource_type (str): Type of resource.\n resource_name (str): Name of resource.\n\nReturns:\n HTTP response. JSON body of resource metrics.", "source": "codesearchnet_filtered"} -{"code": "def get_cpu_props(cls, family, arch='x86'):\n cpus = cls.get_cpus_by_arch(arch)\n try:\n return cpus.xpath('model[@name=\"{0}\"]'.format(family))[0]\n except IndexError:\n raise LagoException('No such CPU family: {0}'.format(family))", "docstring": "Get CPU info XML\n\nArgs:\n family(str): CPU family\n arch(str): CPU arch\n\nReturns:\n lxml.etree.Element: CPU xml\n\nRaises:\n :exc:`~LagoException`: If no such CPU family exists", "source": "codesearchnet_filtered"} -{"code": "def apply_filter(self, expr, value):\n if self.skip(value):\n return expr\n if (not self._valid_value(value)):\n msg = 'Invalid value {value} passed to filter {name} - '.format(value=repr(value), name=self.name)\n if (self.default is not None):\n warn((msg + 'defaulting to {}'.format(self.default)))\n value = self.default\n else:\n warn((msg + 'skipping'))\n return expr\n return self.func(expr, value)", "docstring": "Returns the given expression filtered by the given value.\n\nArgs:\n expr (xpath.expression.AbstractExpression): The expression to filter.\n value (object): The desired value with which the expression should be filtered.\n\nReturns:\n xpath.expression.AbstractExpression: The filtered expression.", "source": "codesearchnet_filtered"} -{"code": "def get_vm_extension(access_token, subscription_id, resource_group, vm_name, extension_name):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '/extensions/', extension_name, '?api-version=', COMP_API])\n return do_get(endpoint, access_token)", "docstring": "Get details about a VM extension.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n vm_name (str): Name of the virtual machine.\n extension_name (str): VM extension name.\n\nReturns:\n HTTP response. JSON body of VM extension properties.", "source": "codesearchnet_filtered"} -{"code": "def remove_plugin(self, name, force=False):\n url = self._url('/plugins/{0}', name)\n res = self._delete(url, params={'force': force})\n self._raise_for_status(res)\n return True", "docstring": "Remove an installed plugin.\n\nArgs:\n name (string): Name of the plugin to remove. The ``:latest``\n tag is optional, and is the default if omitted.\n force (bool): Disable the plugin before removing. This may\n result in issues if the plugin is in use by a container.\n\nReturns:\n ``True`` if successful", "source": "codesearchnet_filtered"} -{"code": "def set_banner(self, banner_type, value=None, default=False, disable=False):\n command_string = ('banner %s' % banner_type)\n if ((default is True) or (disable is True)):\n cmd = self.command_builder(command_string, value=None, default=default, disable=disable)\n return self.configure(cmd)\n else:\n if (not value.endswith('\\n')):\n value = (value + '\\n')\n command_input = dict(cmd=command_string, input=value)\n return self.configure([command_input])", "docstring": "Configures system banners\n\nArgs:\n banner_type(str): banner to be changed (likely login or motd)\n value(str): value to set for the banner\n default (bool): Controls the use of the default keyword\n disable (bool): Controls the use of the no keyword`\n\nReturns:\n bool: True if the commands completed successfully otherwise False", "source": "codesearchnet_filtered"} -{"code": "def find_node_by_value(self, value):\n try:\n return next((n for n in self.node_list if (n.value == value)))\n except StopIteration:\n return None", "docstring": "Find and return a node in self.node_list with the value ``value``.\n\n If multiple nodes exist with the value ``value``,\n return the first one found.\n\n If no such node exists, this returns ``None``.\n\nArgs:\n value (Any): The value of the node to find\n\nReturns:\n Node: A node with value ``value`` if it was found\n\n None: If no node exists with value ``value``\n\nExample:\n >>> from blur.markov.node import Node\n >>> node_1 = Node('One')\n >>> graph = Graph([node_1])\n >>> found_node = graph.find_node_by_value('One')\n >>> found_node == node_1\n True", "source": "codesearchnet_filtered"} -{"code": "def do_post(endpoint, body, access_token):\n headers = {'content-type': 'application/json', 'Authorization': ('Bearer ' + access_token)}\n headers['User-Agent'] = get_user_agent()\n return requests.post(endpoint, data=body, headers=headers)", "docstring": "Do an HTTP POST request and return JSON.\n\nArgs:\n endpoint (str): Azure Resource Manager management endpoint.\n body (str): JSON body of information to post.\n access_token (str): A valid Azure authentication token.\n\nReturns:\n HTTP response. JSON body.", "source": "codesearchnet_filtered"} -{"code": "def update_load_balancer(access_token, subscription_id, resource_group, lb_name, body):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/loadBalancers/', lb_name, '?api-version=', NETWORK_API])\n return do_put(endpoint, body, access_token)", "docstring": "Updates a load balancer model, i.e. PUT an updated LB body.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n lb_name (str): Name of the new load balancer.\n body (str): JSON body of an updated load balancer.\n\nReturns:\n HTTP response. Load Balancer JSON body.", "source": "codesearchnet_filtered"} -{"code": "def normalize_url(url):\n uri = urlparse(url)\n query = (uri.query or '')\n pairs = parse_qsl(query)\n decoded_pairs = [(unquote(key), value) for (key, value) in pairs]\n encoded_pairs = [(quote(key), value) for (key, value) in decoded_pairs]\n normalized_query = urlencode(encoded_pairs)\n return ParseResult(scheme=uri.scheme, netloc=uri.netloc, path=uri.path, params=uri.params, query=normalized_query, fragment=uri.fragment).geturl()", "docstring": "Returns the given URL with all query keys properly escaped.\n\nArgs:\n url (str): The URL to normalize.\n\nReturns:\n str: The normalized URL.", "source": "codesearchnet_filtered"} -{"code": "def starts_with_prefix_in_list(text, prefixes):\n for prefix in prefixes:\n if text.startswith(prefix):\n return True\n return False", "docstring": "Return True if the given string starts with one of the prefixes in the given list, otherwise\n return False.\n\nArgs:\n text (str): Text to check for prefixes.\n prefixes (list): List of prefixes to check for.\n\nReturns:\n bool: True if the given text starts with any of the given prefixes, otherwise False.", "source": "codesearchnet_filtered"} -{"code": "def get_all_dataset_names(configuration=None, **kwargs):\n dataset = Dataset(configuration=configuration)\n dataset['id'] = 'all dataset names'\n return dataset._write_to_hdx('list', kwargs, 'id')", "docstring": "Get all dataset names in HDX\n\nArgs:\n configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n **kwargs: See below\n limit (int): Number of rows to return. Defaults to all dataset names.\n offset (int): Offset in the complete result for where the set of returned dataset names should begin\n\nReturns:\n List[str]: list of all dataset names in HDX", "source": "codesearchnet_filtered"} -{"code": "def fix_report(self, report, errors='drop', prefer='before'):\n if (not isinstance(report, SignedListReport)):\n raise ArgumentError('Report must be a SignedListReport', report=report)\n if (errors not in ('drop',)):\n raise ArgumentError(\"Unknown errors handler: {}, supported=['drop']\".format(errors))\n self.ensure_prepared()\n fixed_readings = []\n dropped_readings = 0\n for reading in report.visible_readings:\n assignment = self.assign_utc(reading.reading_id, reading.raw_time, prefer=prefer)\n if (assignment is None):\n dropped_readings += 1\n continue\n fixed_reading = IOTileReading(assignment.rtc_value, reading.stream, reading.value, reading_time=assignment.utc, reading_id=reading.reading_id)\n fixed_readings.append(fixed_reading)\n fixed_report = SignedListReport.FromReadings(report.origin, fixed_readings, report_id=report.report_id, selector=report.streamer_selector, streamer=report.origin_streamer, sent_timestamp=report.sent_timestamp)\n fixed_report.received_time = report.received_time\n if (dropped_readings > 0):\n self._logger.warning('Dropped %d readings of %d when fixing UTC timestamps in report 0x%08X for device 0x%08X', dropped_readings, len(report.visible_readings), report.report_id, report.origin)\n return fixed_report", "docstring": "Perform utc assignment on all readings in a report.\n\n The returned report will have all reading timestamps in UTC. This only\n works on SignedListReport objects. Note that the report should\n typically have previously been added to the UTC assigner using\n add_report or no reference points from the report will be used.\n\nArgs:\n report (SignedListReport): The report that we should fix.\n errors (str): The behavior that we should have when we can't\n fix a given reading. The only currently support behavior is\n drop, which means that the reading will be dropped and not\n included in the new report.\n prefer (str): Whether to prefer fixing readings by looking for\n reference points after the reading or before, all other things\n being equal. See the description of ``assign_utc``.\n\nReturns:\n SignedListReport: The report with UTC timestamps.", "source": "codesearchnet_filtered"} -{"code": "def sheetNames(book=None):\n if book:\n if (not (book.lower() in [x.lower() for x in bookNames()])):\n return False\n else:\n book = activeBook()\n if (not book):\n return False\n poBook = PyOrigin.WorksheetPages(book)\n if (not len(poBook)):\n return None\n return [x.GetName() for x in poBook.Layers()]", "docstring": "return sheet names of a book.\n\nArgs:\n book (str, optional): If a book is given, pull names from\n that book. Otherwise, try the active one\n\nReturns:\n list of sheet names (typical case).\n None if book has no sheets.\n False if book doesn't exlist.", "source": "codesearchnet_filtered"} -{"code": "def _get_pdf_filenames_at(source_directory):\n if (not os.path.isdir(source_directory)):\n raise ValueError(('%s is not a directory!' % source_directory))\n return [os.path.join(source_directory, filename) for filename in os.listdir(source_directory) if filename.endswith(PDF_EXTENSION)]", "docstring": "Find all PDF files in the specified directory.\n\nArgs:\n source_directory (str): The source directory.\n\nReturns:\n list(str): Filepaths to all PDF files in the specified directory.\n\nRaises:\n ValueError", "source": "codesearchnet_filtered"} -{"code": "def success(channel, post):\n datapacks = [('Game', post[0], True), ('Upvotes', post[2], True)]\n gui = ui_embed.UI(channel, 'Link', post[1], modulename=modulename, colour=16746496, thumbnail=post[1], datapacks=datapacks)\n return gui", "docstring": "Creates an embed UI containing the Reddit posts\n\nArgs:\n channel (discord.Channel): The Discord channel to bind the embed to\n post (tuple): Tuples of (field, value, percentile)\n\n Returns:", "source": "codesearchnet_filtered"} -{"code": "def attach_stream(self, stream):\n (curr_stream, count, prev) = self._allocated_streams[stream]\n if (count == (self.model.get(u'max_node_outputs') - 1)):\n new_stream = self.allocate_stream(curr_stream.stream_type, previous=curr_stream)\n copy_desc = u'({} always) => {} using copy_all_a'.format(curr_stream, new_stream)\n self.sensor_graph.add_node(copy_desc)\n self._allocated_streams[stream] = (new_stream, 1, curr_stream)\n if ((curr_stream.stream_type == DataStream.ConstantType) and (curr_stream in self.sensor_graph.constant_database)):\n self.sensor_graph.add_constant(new_stream, self.sensor_graph.constant_database[curr_stream])\n return new_stream\n self._allocated_streams[stream] = (curr_stream, (count + 1), prev)\n return curr_stream", "docstring": "Notify that we would like to attach a node input to this stream.\n\n The return value from this function is the DataStream that should be attached\n to since this function may internally allocate a new SGNode that copies the\n stream if there is no space in the output list to hold another input.\n\n This function should be called once for every node input before allocated a new\n sensor graph node that attaches to a stream that is managed by the StreamAllocator.\n\nArgs:\n stream (DataStream): The stream (originally returned from allocate_stream)\n that we want to attach to.\n\nReturns:\n Datastream: A data stream, possible the same as stream, that should be attached\n to a node input.", "source": "codesearchnet_filtered"} -{"code": "def get_program_by_uuid(self, program_uuid):\n return self._load_data(self.PROGRAMS_ENDPOINT, resource_id=program_uuid, default=None)", "docstring": "Return single program by UUID, or None if not found.\n\nArgs:\n program_uuid(string): Program UUID in string form\n\nReturns:\n dict: Program data provided by Course Catalog API", "source": "codesearchnet_filtered"} -{"code": "def _get(self, ip):\n retries = 10\n for retry in range(retries):\n try:\n response = requests.get(('http://ipinfo.io/%s/json' % ip), verify=False, timeout=1)\n if (response.status_code == 429):\n raise RateExceededError\n return response.json()\n except (requests.ReadTimeout, requests.ConnectTimeout):\n pass\n return {}", "docstring": "Get information about an IP.\n\nArgs:\n ip (str): an IP (xxx.xxx.xxx.xxx).\n\nReturns:\n dict: see http://ipinfo.io/developers/getting-started", "source": "codesearchnet_filtered"} -{"code": "def _parse_name(self, config):\n value = NAME_RE.search(config).group('value')\n return dict(name=value)", "docstring": "_parse_name scans the provided configuration block and extracts\n the vlan name. The config block is expected to always return the\n vlan name. The return dict is intended to be merged into the response\n dict.\n\nArgs:\n config (str): The vlan configuration block from the nodes running\n configuration\n\nReturns:\n dict: resource dict attribute", "source": "codesearchnet_filtered"} -{"code": "def find_in_mailbox(cls, session, mailbox_or_id):\n if hasattr(mailbox_or_id, 'id'):\n mailbox_or_id = mailbox_or_id.id\n return cls(('/mailboxes/%d/users.json' % mailbox_or_id), session=session)", "docstring": "Get the users that are associated to a Mailbox.\n\nArgs:\n session (requests.sessions.Session): Authenticated session.\n mailbox_or_id (MailboxRef or int): Mailbox of the ID of the\n mailbox to get the folders for.\n\nReturns:\n RequestPaginator(output_type=helpscout.models.User): Users\n iterator.", "source": "codesearchnet_filtered"} -{"code": "def _validate_paths(self, settings, name, value):\n return [self._validate_path(settings, name, item) for item in value]", "docstring": "Apply ``SettingsPostProcessor._validate_path`` to each element in\n list.\n\nArgs:\n settings (dict): Current settings.\n name (str): Setting name.\n value (list): List of paths to patch.\n\nRaises:\n boussole.exceptions.SettingsInvalidError: Once a path does not\n exists.\n\nReturns:\n list: Validated paths.", "source": "codesearchnet_filtered"} -{"code": "def search_artists_by_name(self, artist_name: str, limit: int=5) -> List[NameExternalIDPair]:\n response: requests.Response = requests.get(self._API_URL_TEMPLATE.format('search'), params={'q': artist_name, 'type': 'artist', 'limit': limit}, headers={'Authorization': 'Bearer {}'.format(self._token.access_token)})\n response.raise_for_status()\n if (not response.text):\n return []\n result: List[NameExternalIDPair] = []\n data: List[Dict] = response.json()['artists']['items']\n for artist in data:\n artist = NameExternalIDPair(artist['name'].strip(), artist['id'].strip())\n if ((not artist.name) or (not artist.external_id)):\n raise SpotifyClientError('Name or ID is missing')\n result.append(artist)\n return result", "docstring": "Returns zero or more artist name - external ID pairs that match the specified artist name.\n\nArgs:\n artist_name (str): The artist name to search in the Spotify API.\n limit (int): The maximum number of results to return.\n\nReturns:\n Zero or more artist name - external ID pairs.\n\nRaises:\n requests.HTTPError: If an HTTP error occurred during the request.\n SpotifyClientError: If an invalid item is found.", "source": "codesearchnet_filtered"} -{"code": "def parse_args(arglist=None):\n climan = CLIManager(conf, **SUB_CMDS)\n create_complete_files(climan, CONFIG_DIR, 'stagpy', 'stagpy-git', zsh_sourceable=True)\n (cmd_args, all_subs) = climan.parse_args(arglist)\n sub_cmd = cmd_args.loam_sub_name\n if (sub_cmd is None):\n return cmd_args.func\n if (sub_cmd != 'config'):\n commands.report_parsing_problems(PARSING_OUT)\n if conf.common.set:\n set_conf_str(conf, conf.common.set)\n if conf.common.config:\n commands.config_pp(all_subs)\n load_mplstyle()\n try:\n _steps_to_slices()\n except AttributeError:\n pass\n return cmd_args.func", "docstring": "Parse cmd line arguments.\n\n Update :attr:`stagpy.conf` accordingly.\n\nArgs:\n arglist (list of str): the list of cmd line arguments. If set to\n None, the arguments are taken from :attr:`sys.argv`.\n\nReturns:\n function: the function implementing the sub command to be executed.", "source": "codesearchnet_filtered"} -{"code": "def merge(self, other_cluster):\n new_cluster = Cluster((self.sites | other_cluster.sites))\n new_cluster.neighbours = (self.neighbours | other_cluster.neighbours).difference(new_cluster.sites)\n return new_cluster", "docstring": "Combine two clusters into a single cluster.\n\nArgs:\n other_cluster (Cluster): The second cluster to combine.\n\nReturns:\n (Cluster): The combination of both clusters.", "source": "codesearchnet_filtered"} -{"code": "def __fa_process_sequence(self, sequence, avoid, initial_state, execution_state, trace_current, next_addr):\n ip = sequence.address\n next_ip = None\n while ip:\n try:\n instr = sequence.fetch(ip)\n except ReilSequenceInvalidAddressError:\n assert (split_address(ip)[1] == 0)\n next_ip = ip\n break\n try:\n target_addr = sequence.get_next_address(ip)\n except ReilSequenceInvalidAddressError:\n target_addr = next_addr\n next_ip = self.__process_instr(instr, avoid, target_addr, initial_state, execution_state, trace_current)\n try:\n ip = (next_ip if next_ip else sequence.get_next_address(ip))\n except ReilSequenceInvalidAddressError:\n break\n return next_ip", "docstring": "Process a REIL sequence.\n\nArgs:\n sequence (ReilSequence): A REIL sequence to process.\n avoid (list): List of address to avoid.\n initial_state: Initial state.\n execution_state: Execution state queue.\n trace_current (list): Current trace.\n next_addr: Address of the next instruction following the current one.\n\nReturns:\n Returns the next instruction to execute in case there is one, otherwise returns None.", "source": "codesearchnet_filtered"} -{"code": "def post_url(self, url, token='', json=None, data=None, headers=None):\n if (token == ''):\n token = self._user_token\n if headers:\n headers.update({'Authorization': 'Token {}'.format(token)})\n else:\n headers = {'Authorization': 'Token {}'.format(token)}\n if json:\n return requests.post(url, headers=headers, json=json, verify=False)\n if data:\n return requests.post(url, headers=headers, data=data, verify=False)\n return requests.post(url, headers=headers, verify=False)", "docstring": "Returns a post resquest object taking in a url, user token, and\n possible json information.\n\nArgs:\n url (str): The url to make post to\n token (str): The authentication token\n json (dict): json info to send\n\nReturns:\n obj: Post request object", "source": "codesearchnet_filtered"} -{"code": "def full_opt_run(cls, vasp_cmd, vol_change_tol=0.02, max_steps=10, ediffg=(- 0.05), half_kpts_first_relax=False, **vasp_job_kwargs):\n for i in range(max_steps):\n if (i == 0):\n settings = None\n backup = True\n if (half_kpts_first_relax and os.path.exists('KPOINTS') and os.path.exists('POSCAR')):\n kpts = Kpoints.from_file('KPOINTS')\n orig_kpts_dict = kpts.as_dict()\n kpts.kpts = np.maximum((np.array(kpts.kpts) / 2), 1).tolist()\n low_kpts_dict = kpts.as_dict()\n settings = [{'dict': 'KPOINTS', 'action': {'_set': low_kpts_dict}}]\n else:\n backup = False\n initial = Poscar.from_file('POSCAR').structure\n final = Poscar.from_file('CONTCAR').structure\n vol_change = ((final.volume - initial.volume) / initial.volume)\n logger.info(('Vol change = %.1f %%!' % (vol_change * 100)))\n if (abs(vol_change) < vol_change_tol):\n logger.info('Stopping optimization!')\n break\n else:\n incar_update = {'ISTART': 1}\n if ediffg:\n incar_update['EDIFFG'] = ediffg\n settings = [{'dict': 'INCAR', 'action': {'_set': incar_update}}, {'file': 'CONTCAR', 'action': {'_file_copy': {'dest': 'POSCAR'}}}]\n if ((i == 1) and half_kpts_first_relax):\n settings.append({'dict': 'KPOINTS', 'action': {'_set': orig_kpts_dict}})\n logger.info(('Generating job = %d!' % (i + 1)))\n (yield VaspJob(vasp_cmd, final=False, backup=backup, suffix=('.relax%d' % (i + 1)), settings_override=settings, **vasp_job_kwargs))", "docstring": "Returns a generator of jobs for a full optimization run. Basically,\n this runs an infinite series of geometry optimization jobs until the\n % vol change in a particular optimization is less than vol_change_tol.\n\nArgs:\n vasp_cmd (str): Command to run vasp as a list of args. For example,\n if you are using mpirun, it can be something like\n [\"mpirun\", \"pvasp.5.2.11\"]\n vol_change_tol (float): The tolerance at which to stop a run.\n Defaults to 0.05, i.e., 5%.\n max_steps (int): The maximum number of runs. Defaults to 10 (\n highly unlikely that this limit is ever reached).\n ediffg (float): Force convergence criteria for subsequent runs (\n ignored for the initial run.)\n half_kpts_first_relax (bool): Whether to halve the kpoint grid\n for the first relaxation. Speeds up difficult convergence\n considerably. Defaults to False.\n \\*\\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See\n :class:`custodian.vasp.jobs.VaspJob`.\n\nReturns:\n Generator of jobs.", "source": "codesearchnet_filtered"} -{"code": "def easeInOutQuad(n):\n _checkRange(n)\n if (n < 0.5):\n return (2 * (n ** 2))\n else:\n n = ((n * 2) - 1)\n return ((- 0.5) * ((n * (n - 2)) - 1))", "docstring": "A quadratic tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\n n (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet_filtered"} -{"code": "def get_job(self, id):\n return self._get_element_by_id(self.jobs, 'jobs', Job, str(id))", "docstring": "Retrieves a job matching the given `id`\n\nArgs:\n id (str): Job `id` to match.\n\nReturns:\n Job: Job matching the given `id`\n\nRaises:\n ValueError: No resource matches given `id` or multiple resources matching given `id`", "source": "codesearchnet_filtered"} -{"code": "def __get_distribution_tags(self, client, arn):\n return {t['Key']: t['Value'] for t in client.list_tags_for_resource(Resource=arn)['Tags']['Items']}", "docstring": "Returns a dict containing the tags for a CloudFront distribution\n\nArgs:\n client (botocore.client.CloudFront): Boto3 CloudFront client object\n arn (str): ARN of the distribution to get tags for\n\nReturns:\n `dict`", "source": "codesearchnet_filtered"} -{"code": "def from_sites(cls, sites, charge=None, validate_proximity=False, to_unit_cell=False):\n if (len(sites) < 1):\n raise ValueError(('You need at least one site to construct a %s' % cls))\n prop_keys = []\n props = {}\n lattice = None\n for (i, site) in enumerate(sites):\n if (not lattice):\n lattice = site.lattice\n elif (site.lattice != lattice):\n raise ValueError('Sites must belong to the same lattice')\n for (k, v) in site.properties.items():\n if (k not in prop_keys):\n prop_keys.append(k)\n props[k] = ([None] * len(sites))\n props[k][i] = v\n for (k, v) in props.items():\n if any(((vv is None) for vv in v)):\n warnings.warn(('Not all sites have property %s. Missing values are set to None.' % k))\n return cls(lattice, [site.species for site in sites], [site.frac_coords for site in sites], charge=charge, site_properties=props, validate_proximity=validate_proximity, to_unit_cell=to_unit_cell)", "docstring": "Convenience constructor to make a Structure from a list of sites.\n\nArgs:\n sites: Sequence of PeriodicSites. Sites must have the same\n lattice.\n validate_proximity (bool): Whether to check if there are sites\n that are less than 0.01 Ang apart. Defaults to False.\n to_unit_cell (bool): Whether to translate sites into the unit\n cell.\n\nReturns:\n (Structure) Note that missing properties are set as None.", "source": "codesearchnet_filtered"} -{"code": "def paginate_resources(cls, request, resources, on_fail_status):\n if (not resources):\n return (resources, client_list_control_pb2.ClientPagingResponse())\n paging = request.paging\n limit = (min(paging.limit, MAX_PAGE_SIZE) or DEFAULT_PAGE_SIZE)\n try:\n if paging.start:\n start_index = cls.index_by_id(paging.start, resources)\n else:\n start_index = 0\n if ((start_index < 0) or (start_index >= len(resources))):\n raise AssertionError\n except AssertionError:\n raise _ResponseFailed(on_fail_status)\n paged_resources = resources[start_index:(start_index + limit)]\n if ((start_index + limit) < len(resources)):\n paging_response = client_list_control_pb2.ClientPagingResponse(next=cls.id_by_index((start_index + limit), resources), start=cls.id_by_index(start_index, resources), limit=limit)\n else:\n paging_response = client_list_control_pb2.ClientPagingResponse(start=cls.id_by_index(start_index, resources), limit=limit)\n return (paged_resources, paging_response)", "docstring": "Truncates a list of resources based on ClientPagingControls\n\nArgs:\n request (object): The parsed protobuf request object\n resources (list of objects): The resources to be paginated\n\nReturns:\n list: The paginated list of resources\n object: The ClientPagingResponse to be sent back to the client", "source": "codesearchnet_filtered"} -{"code": "def _copy_and_clean_up_expectation(self, expectation, discard_result_format_kwargs=True, discard_include_configs_kwargs=True, discard_catch_exceptions_kwargs=True):\n new_expectation = copy.deepcopy(expectation)\n if ('success_on_last_run' in new_expectation):\n del new_expectation['success_on_last_run']\n if discard_result_format_kwargs:\n if ('result_format' in new_expectation['kwargs']):\n del new_expectation['kwargs']['result_format']\n if discard_include_configs_kwargs:\n if ('include_configs' in new_expectation['kwargs']):\n del new_expectation['kwargs']['include_configs']\n if discard_catch_exceptions_kwargs:\n if ('catch_exceptions' in new_expectation['kwargs']):\n del new_expectation['kwargs']['catch_exceptions']\n return new_expectation", "docstring": "Returns copy of `expectation` without `success_on_last_run` and other specified key-value pairs removed\n\n Returns a copy of specified expectation will not have `success_on_last_run` key-value. The other key-value \\\n pairs will be removed by default but will remain in the copy if specified.\n\nArgs:\n expectation (json): \\\n The expectation to copy and clean.\n discard_result_format_kwargs (boolean): \\\n if True, will remove the kwarg `output_format` key-value pair from the copied expectation.\n discard_include_configs_kwargs (boolean):\n if True, will remove the kwarg `include_configs` key-value pair from the copied expectation.\n discard_catch_exceptions_kwargs (boolean):\n if True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation.\n\nReturns:\n A copy of the provided expectation with `success_on_last_run` and other specified key-value pairs removed", "source": "codesearchnet_filtered"} -{"code": "def extract_bundle(self, resource, timeout=(- 1)):\n return self._client.update(resource, timeout=timeout, custom_headers={'Content-Type': 'text/plain'})", "docstring": "Extracts the existing bundle on the appliance and creates all the artifacts.\n\nArgs:\n resource (dict): Artifact Bundle to extract.\n timeout:\n Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation in\n OneView, it just stops waiting for its completion.\n\nReturns:\n dict: The Artifact Bundle.", "source": "codesearchnet_filtered"} -{"code": "def generate(self, str=None, fpath=None):\n self.prepare_storage()\n self.str = (self.load_file(fpath) if fpath else self.sanitize(str))\n self.validate_config()\n self.generate_kgrams()\n self.hash_kgrams()\n self.generate_fingerprints()\n return self.fingerprints", "docstring": "generates fingerprints of the input. Either provide `str` to compute fingerprint directly from your string or `fpath` to compute fingerprint from the text of the file. Make sure to have your text decoded in `utf-8` format if you pass the input string.\n\nArgs:\n str (Optional(str)): string whose fingerprint is to be computed.\n fpath (Optional(str)): absolute path of the text file whose fingerprint is to be computed.\n\nReturns:\n List(int): fingerprints of the input.\n\nRaises:\n FingerprintException: If the input string do not meet the requirements of parameters provided for fingerprinting.", "source": "codesearchnet_filtered"} -{"code": "def add_time_dimension(padded_inputs, seq_lens):\n padded_batch_size = tf.shape(padded_inputs)[0]\n max_seq_len = (padded_batch_size // tf.shape(seq_lens)[0])\n new_batch_size = (padded_batch_size // max_seq_len)\n new_shape = ([new_batch_size, max_seq_len] + padded_inputs.get_shape().as_list()[1:])\n return tf.reshape(padded_inputs, new_shape)", "docstring": "Adds a time dimension to padded inputs.\n\nArgs:\n padded_inputs (Tensor): a padded batch of sequences. That is,\n for seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where\n A, B, C are sequence elements and * denotes padding.\n seq_lens (Tensor): the sequence lengths within the input batch,\n suitable for passing to tf.nn.dynamic_rnn().\n\nReturns:\n Reshaped tensor of shape [NUM_SEQUENCES, MAX_SEQ_LEN, ...].", "source": "codesearchnet_filtered"} -{"code": "def poisson_cluster(data, k, init=None, max_iters=100):\n (genes, cells) = data.shape\n if (sparse.issparse(data) and (not sparse.isspmatrix_csc(data))):\n data = sparse.csc_matrix(data)\n (init, assignments) = kmeans_pp(data, k, centers=init)\n centers = np.copy(init)\n assignments = np.zeros(cells)\n for it in range(max_iters):\n lls = poisson_ll(data, centers)\n new_assignments = np.argmax(lls, 1)\n if np.equal(assignments, new_assignments).all():\n return (new_assignments, centers)\n for c in range(k):\n if sparse.issparse(data):\n if (data[(:, (new_assignments == c))].shape[0] == 0):\n (new_c, _) = kmeans_pp(data, k, centers[(:, :c)])\n centers[(:, c)] = new_c[(:, c)]\n else:\n centers[(:, c)] = np.asarray(data[(:, (new_assignments == c))].mean(1)).flatten()\n elif (len(data[(:, (new_assignments == c))]) == 0):\n (new_c, _) = kmeans_pp(data, k, centers[(:, :c)])\n centers[(:, c)] = new_c[(:, c)]\n else:\n centers[(:, c)] = np.mean(data[(:, (new_assignments == c))], 1)\n assignments = new_assignments\n return (assignments, centers)", "docstring": "Performs Poisson hard EM on the given data.\n\nArgs:\n data (array): A 2d array- genes x cells. Can be dense or sparse; for best performance, sparse matrices should be in CSC format.\n k (int): Number of clusters\n init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++\n max_iters (int, optional): Maximum number of iterations. Default: 100\n\nReturns:\n a tuple of two arrays: a cells x 1 vector of cluster assignments,\n and a genes x k array of cluster means.", "source": "codesearchnet_filtered"} -{"code": "def add_request(self, request):\n queue_item = QueueItem(request, Response(request.url))\n self.add(queue_item)\n return queue_item", "docstring": "Add a request to the queue.\n\nArgs:\n request (:class:`nyawc.http.Request`): The request to add.\n\nReturns:\n :class:`nyawc.QueueItem`: The created queue item.", "source": "codesearchnet_filtered"} -{"code": "def enhance_function_signatures(spec_dict: Mapping[(str, Any)]) -> Mapping[(str, Any)]:\n for func in spec_dict['functions']['signatures']:\n for (i, sig) in enumerate(spec_dict['functions']['signatures'][func]['signatures']):\n args = sig['arguments']\n req_args = []\n pos_args = []\n opt_args = []\n mult_args = []\n for arg in args:\n if arg.get('multiple', False):\n if (arg['type'] in ['Function', 'Modifier']):\n mult_args.extend(arg.get('values', []))\n elif (arg['type'] in ['StrArgNSArg', 'NSArg', 'StrArg']):\n mult_args.append(arg['type'])\n elif (arg.get('optional', False) and arg.get('position', False)):\n if (arg['type'] in ['Function', 'Modifier']):\n pos_args.append(arg.get('values', []))\n elif (arg['type'] in ['StrArgNSArg', 'NSArg', 'StrArg']):\n pos_args.append(arg['type'])\n elif arg.get('optional', False):\n if (arg['type'] in ['Function', 'Modifier']):\n opt_args.extend(arg.get('values', []))\n elif (arg['type'] in ['StrArgNSArg', 'NSArg', 'StrArg']):\n opt_args.append(arg['type'])\n elif (arg['type'] in ['Function', 'Modifier']):\n req_args.append(arg.get('values', []))\n elif (arg['type'] in ['StrArgNSArg', 'NSArg', 'StrArg']):\n req_args.append(arg['type'])\n spec_dict['functions']['signatures'][func]['signatures'][i]['req_args'] = copy.deepcopy(req_args)\n spec_dict['functions']['signatures'][func]['signatures'][i]['pos_args'] = copy.deepcopy(pos_args)\n spec_dict['functions']['signatures'][func]['signatures'][i]['opt_args'] = copy.deepcopy(opt_args)\n spec_dict['functions']['signatures'][func]['signatures'][i]['mult_args'] = copy.deepcopy(mult_args)\n return spec_dict", "docstring": "Enhance function signatures\n\n Add required and optional objects to signatures objects for semantic validation\n support.\n\nArgs:\n spec_dict (Mapping[str, Any]): bel specification dictionary\n\nReturns:\n Mapping[str, Any]: return enhanced bel specification dict", "source": "codesearchnet_filtered"} -{"code": "def node_run(input_file, coords_only, bc_settings, bc_grid_weights):\n log = logging.getLogger('pyspark')\n log.setLevel(logging.INFO)\n if (len(log.handlers) == 0):\n log.addHandler(logging.StreamHandler(sys.stdout))\n precision = bc_settings.value['precision']\n imager = oskar.Imager(precision)\n for (key, value) in bc_settings.value['imager'].items():\n setattr(imager, key, value)\n grid_size = imager.plane_size\n grid_weights = None\n ms_han = oskar.MeasurementSet.open(input_file)\n if coords_only:\n if (imager.weighting == 'Uniform'):\n grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision)\n log.info('Reading coordinates from %s', input_file)\n imager.coords_only = True\n process_input_data(ms_han, imager, None, grid_weights)\n imager.coords_only = False\n return (grid_weights, imager.num_w_planes)\n grid_data = numpy.zeros([grid_size, grid_size], dtype=('c8' if (precision == 'single') else 'c16'))\n log.info('Reading visibilities from %s', input_file)\n if bc_settings.value['combine']:\n if (imager.weighting == 'Uniform'):\n grid_weights = bc_grid_weights.value\n grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights)\n log.info('Returning gridded visibilities to RDD')\n return (grid_data, grid_norm)\n else:\n if (imager.weighting == 'Uniform'):\n grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision)\n if ((imager.weighting == 'Uniform') or (imager.algorithm == 'W-projection')):\n imager.coords_only = True\n process_input_data(ms_han, imager, None, grid_weights)\n imager.coords_only = False\n grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights)\n output_file = (splitext(input_file)[0] + '.fits')\n save_image(imager, grid_data, grid_norm, output_file)\n log.info('Finished. Output file is %s', output_file)\n return 0", "docstring": "Main function to process visibility data on Spark cluster nodes.\n\nArgs:\n input_file (str):\n RDD element containing filename to process.\n coords_only (boolean):\n If true, read only baseline coordinates to define the weights grid.\n bc_settings (pyspark.broadcast.Broadcast):\n Spark broadcast variable containing pipeline settings dictionary.\n bc_grid_weights (pyspark.broadcast.Broadcast):\n Spark broadcast variable containing weights grid. May be None.\n\nReturns:\n tuple: Output RDD element.", "source": "codesearchnet_filtered"} -{"code": "def sensor(self, name, config=None, inactive_sensor_expiration_time_seconds=sys.maxsize, parents=None):\n sensor = self.get_sensor(name)\n if sensor:\n return sensor\n with self._lock:\n sensor = self.get_sensor(name)\n if (not sensor):\n sensor = Sensor(self, name, parents, (config or self.config), inactive_sensor_expiration_time_seconds)\n self._sensors[name] = sensor\n if parents:\n for parent in parents:\n children = self._children_sensors.get(parent)\n if (not children):\n children = []\n self._children_sensors[parent] = children\n children.append(sensor)\n logger.debug('Added sensor with name %s', name)\n return sensor", "docstring": "Get or create a sensor with the given unique name and zero or\n more parent sensors. All parent sensors will receive every value\n recorded with this sensor.\n\nArgs:\n name (str): The name of the sensor\n config (MetricConfig, optional): A default configuration to use\n for this sensor for metrics that don't have their own config\n inactive_sensor_expiration_time_seconds (int, optional):\n If no value if recorded on the Sensor for this duration of\n time, it is eligible for removal\n parents (list of Sensor): The parent sensors\n\nReturns:\n Sensor: The sensor that is created", "source": "codesearchnet_filtered"} -{"code": "def RemoveScanNode(self, path_spec):\n scan_node = self._scan_nodes.get(path_spec, None)\n if (not scan_node):\n return None\n if scan_node.sub_nodes:\n raise RuntimeError('Scan node has sub nodes.')\n parent_scan_node = scan_node.parent_node\n if parent_scan_node:\n parent_scan_node.sub_nodes.remove(scan_node)\n if (path_spec == self._root_path_spec):\n self._root_path_spec = None\n del self._scan_nodes[path_spec]\n if path_spec.IsFileSystem():\n del self._file_system_scan_nodes[path_spec]\n return parent_scan_node", "docstring": "Removes a scan node of a certain path specification.\n\nArgs:\n path_spec (PathSpec): path specification.\n\nReturns:\n SourceScanNode: parent scan node or None if not available.\n\nRaises:\n RuntimeError: if the scan node has sub nodes.", "source": "codesearchnet_filtered"} -{"code": "def encode(cls, command):\n args = []\n for arg in command.args:\n if (not isinstance(arg, str)):\n arg = str(arg)\n if ((',' in arg) or arg.startswith(' ') or arg.endswith(' ') or arg.startswith('hex:')):\n arg = 'hex:{}'.format(hexlify(arg.encode('utf-8')).decode('utf-8'))\n args.append(arg)\n argstr = ''\n if (len(args) > 0):\n argstr = ((' {' + ','.join(args)) + '}')\n return (command.name + argstr)", "docstring": "Encode a command as an unambiguous string.\n\nArgs:\n command (Command): The command to encode.\n\nReturns:\n str: The encoded command", "source": "codesearchnet_filtered"} -{"code": "def snake_to_camel(name):\n ret = ''.join((x.title() for x in name.split('_')))\n ret = (ret[0].lower() + ret[1:])\n return ret", "docstring": "Takes a snake_field_name and returns a camelCaseFieldName\n\nArgs:\n name (str): E.g. snake_field_name or SNAKE_FIELD_NAME\n\nReturns:\n str: camelCase converted name. E.g. capsFieldName", "source": "codesearchnet_filtered"} -{"code": "def ReadByte(self, do_ord=True):\n try:\n if do_ord:\n return ord(self.stream.read(1))\n return self.stream.read(1)\n except Exception as e:\n logger.error('ord expected character but got none')\n return 0", "docstring": "Read a single byte.\n\nArgs:\n do_ord (bool): (default True) convert the byte to an ordinal first.\n\nReturns:\n bytes: a single byte if successful. 0 (int) if an exception occurred.", "source": "codesearchnet_filtered"} -{"code": "def make_instance(cls, data):\n schema = cls()\n if (not hasattr(schema.Meta, 'model')):\n raise AttributeError('In order to make an instance, a model for the schema must be defined in the Meta class.')\n serialized_data = schema.load(data).data\n return cls.Meta.model(**serialized_data)", "docstring": "Validate the data and create a model instance from the data.\n\nArgs:\n data (dict): The unserialized data to insert into the new model\n instance through it's constructor.\n\nReturns:\n peewee.Model|sqlalchemy.Model: The model instance with it's data\n inserted into it.\n\nRaises:\n AttributeError: This is raised if ``Meta.model`` isn't set on the\n schema's definition.", "source": "codesearchnet_filtered"} -{"code": "def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs):\n plt = pretty_plot(**kwargs)\n pp = np.polyfit(x, y, deg)\n xp = np.linspace(min(x), max(x), 200)\n plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o')\n if xlabel:\n plt.xlabel(xlabel)\n if ylabel:\n plt.ylabel(ylabel)\n return plt", "docstring": "Convenience method to plot data with trend lines based on polynomial fit.\n\nArgs:\n x: Sequence of x data.\n y: Sequence of y data.\n deg (int): Degree of polynomial. Defaults to 1.\n xlabel (str): Label for x-axis.\n ylabel (str): Label for y-axis.\n \\\\*\\\\*kwargs: Keyword args passed to pretty_plot.\n\nReturns:\n matplotlib.pyplot object.", "source": "codesearchnet_filtered"} -{"code": "def upload_timeline(self, timeline_name, plaso_storage_path):\n resource_url = '{0:s}/upload/'.format(self.api_base_url)\n files = {'file': open(plaso_storage_path, 'rb')}\n data = {'name': timeline_name}\n response = self.session.post(resource_url, files=files, data=data)\n try:\n response_dict = response.json()\n except ValueError:\n raise RuntimeError('Could not decode JSON response from Timesketch (Status {0:d}):\\n{1:s}'.format(response.status_code, response.content))\n index_id = response_dict['objects'][0]['id']\n return index_id", "docstring": "Create a timeline with the specified name from the given plaso file.\n\nArgs:\n timeline_name (str): Name of timeline\n plaso_storage_path (str): Local path of plaso file to be uploaded\n\nReturns:\n int: ID of uploaded timeline\n\nRaises:\n RuntimeError: When the JSON response from Timesketch cannot be decoded.", "source": "codesearchnet_filtered"} -{"code": "def ContainsAddressStr(self, address):\n for (key, contract) in self._contracts.items():\n if (contract.Address == address):\n return True\n return False", "docstring": "Determine if the wallet contains the address.\n\nArgs:\n address (str): a string representing the public key.\n\nReturns:\n bool: True, if the address is present in the wallet. False otherwise.", "source": "codesearchnet_filtered"} -{"code": "def sheet_to_table(worksheet):\n headers = []\n value_rows = []\n for (row_i, row) in enumerate(worksheet.iter_rows()):\n if (row_i == 0):\n for header_cell in row:\n if header_cell.value:\n headers.append(parse_value(header_cell))\n else:\n break\n continue\n row_cells = [parse_value(cell) for (index, cell) in enumerate(row) if (index < len(headers))]\n if any(row_cells):\n value_rows.append(row_cells)\n else:\n break\n table = [{k: v for (k, v) in zip(headers, row) if (v is not None)} for row in value_rows]\n return table", "docstring": "Transforma una hoja de libro de Excel en una lista de diccionarios.\n\nArgs:\n worksheet (Workbook.worksheet): Hoja de cálculo de un archivo XLSX\n según los lee `openpyxl`\n\nReturns:\n list_of_dicts: Lista de diccionarios, con tantos elementos como\n registros incluya la hoja, y con tantas claves por diccionario como\n campos tenga la hoja.", "source": "codesearchnet_filtered"} -{"code": "def update(self, resource, timeout=(- 1)):\n self.__set_default_values(resource)\n uri = self._client.build_uri(resource['logicalSwitch']['uri'])\n return self._client.update(resource, uri=uri, timeout=timeout)", "docstring": "Updates a Logical Switch.\n\nArgs:\n resource (dict): Object to update.\n timeout:\n Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\n in OneView, just stop waiting for its completion.\n\nReturns:\n dict: Updated resource.", "source": "codesearchnet_filtered"} -{"code": "def combine_with_wd_noise(f_n, amp_n, f_n_wd, amp_n_wd):\n amp_n_wd_interp = interpolate.interp1d(f_n_wd, amp_n_wd, bounds_error=False, fill_value=1e-30)\n amp_n_wd = amp_n_wd_interp(f_n)\n amp_n = ((amp_n * (amp_n >= amp_n_wd)) + (amp_n_wd * (amp_n < amp_n_wd)))\n return (f_n, amp_n)", "docstring": "Combine noise with wd noise.\n\n Combines noise and white dwarf background noise based on greater\n amplitude value at each noise curve step.\n\nArgs:\n f_n (float array): Frequencies of noise curve.\n amp_n (float array): Amplitude values of noise curve.\n f_n_wd (float array): Frequencies of wd noise.\n amp_n_wd (float array): Amplitude values of wd noise.\n\nReturns:\n (tuple of float arrays): Amplitude values of combined noise curve.", "source": "codesearchnet_filtered"} -{"code": "def _FormatDate(self, event):\n date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=event.timestamp)\n (year, month, day_of_month) = date_time.GetDate()\n try:\n return '{0:04d}-{1:02d}-{2:02d}'.format(year, month, day_of_month)\n except (TypeError, ValueError):\n self._ReportEventError(event, 'unable to copy timestamp: {0!s} to a human readable date. Defaulting to: \"0000-00-00\"'.format(event.timestamp))\n return '0000-00-00'", "docstring": "Formats the date.\n\nArgs:\n event (EventObject): event.\n\nReturns:\n str: date field.", "source": "codesearchnet_filtered"} -{"code": "def _NormalizeKeyPath(self, key_path):\n normalized_key_path = key_path.lower()\n if ((len(normalized_key_path) < 39) or (not normalized_key_path.startswith(self._CONTROL_SET_PREFIX))):\n return normalized_key_path\n return ''.join([self._NORMALIZED_CONTROL_SET_PREFIX, normalized_key_path[39:]])", "docstring": "Normalizes a Windows Registry key path.\n\nArgs:\n key_path (str): Windows Registry key path.\n\nReturns:\n str: normalized Windows Registry key path.", "source": "codesearchnet_filtered"} -{"code": "def tags(pode, leaf=False):\n fulltags = [tag for tag in pode[1]['tags']]\n if (not leaf):\n return fulltags\n retn = []\n for (size, tag) in sorted([(len(t), t) for t in fulltags], reverse=True):\n look = (tag + '.')\n if any([r.startswith(look) for r in retn]):\n continue\n retn.append(tag)\n return retn", "docstring": "Get all the tags for a given node.\n\nArgs:\n pode (tuple): A packed node.\n leaf (bool): If True, only return the full tags.\n\nReturns:\n list: A list of tag strings.", "source": "codesearchnet_filtered"} -{"code": "def dummyctrl(self, r, ctrl):\n dv = DummyVertex(r)\n (dv.view.w, dv.view.h) = (self.dw, self.dh)\n self.grx[dv] = dv\n dv.ctrl = ctrl\n ctrl[r] = dv\n self.layers[r].append(dv)\n return dv", "docstring": "creates a DummyVertex at rank r inserted in the ctrl dict\n of the associated edge and layer.\n\nArgs:\n r (int): rank value\n ctrl (dict): the edge's control vertices\n\nReturns:\n DummyVertex : the created DummyVertex.", "source": "codesearchnet_filtered"} -{"code": "def moveRel(xOffset=None, yOffset=None, duration=0.0, tween=linear, pause=None, _pause=True):\n _failSafeCheck()\n (xOffset, yOffset) = _unpackXY(xOffset, yOffset)\n _mouseMoveDrag('move', None, None, xOffset, yOffset, duration, tween)\n _autoPause(pause, _pause)", "docstring": "Moves the mouse cursor to a point on the screen, relative to its current\n position.\n\n The x and y parameters detail where the mouse event happens. If None, the\n current mouse position is used. If a float value, it is rounded down. If\n outside the boundaries of the screen, the event happens at edge of the\n screen.\n\nArgs:\n x (int, float, None, tuple, optional): How far left (for negative values) or\n right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y.\n y (int, float, None, optional): How far up (for negative values) or\n down (for positive values) to move the cursor. 0 by default.\n duration (float, optional): The amount of time it takes to move the mouse\n cursor to the new xy coordinates. If 0, then the mouse cursor is moved\n instantaneously. 0.0 by default.\n tween (func, optional): The tweening function used if the duration is not\n 0. A linear tween is used by default. See the tweens.py file for\n details.\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def is40(msg):\n if allzeros(msg):\n return False\n d = hex2bin(data(msg))\n if wrongstatus(d, 1, 2, 13):\n return False\n if wrongstatus(d, 14, 15, 26):\n return False\n if wrongstatus(d, 27, 28, 39):\n return False\n if wrongstatus(d, 48, 49, 51):\n return False\n if wrongstatus(d, 54, 55, 56):\n return False\n if (bin2int(d[39:47]) != 0):\n return False\n if (bin2int(d[51:53]) != 0):\n return False\n return True", "docstring": "Check if a message is likely to be BDS code 4,0\n\nArgs:\n msg (String): 28 bytes hexadecimal message string\n\nReturns:\n bool: True or False", "source": "codesearchnet_filtered"} -{"code": "def map(self, map_fn, desc=None):\n if (desc is None):\n desc = getattr(map_fn, '__name__', '')\n desc = u'map({})'.format(desc)\n return self.transform((lambda xs: (map_fn(x) for x in xs)), desc=desc)", "docstring": "Return a copy of this query, with the values mapped through `map_fn`.\n\nArgs:\n map_fn (callable): A callable that takes a single argument and returns a new value.\n\n Keyword Args:\n desc (str): A description of the mapping transform, for use in log message.\n Defaults to the name of the map function.\n\nReturns:\n Query", "source": "codesearchnet_filtered"} -{"code": "def _netsh_file(content):\n with tempfile.NamedTemporaryFile(mode='w', prefix='salt-', suffix='.netsh', delete=False) as fp:\n fp.write(content)\n try:\n log.debug('%s:\\n%s', fp.name, content)\n return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)\n finally:\n os.remove(fp.name)", "docstring": "helper function to get the results of ``netsh -f content.txt``\n\n Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue\n ``netsh`` commands. You can put a series of commands in an external file and\n run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what\n this function does.\n\nArgs:\n content (str):\n The contents of the file that will be run by the ``netsh -f``\n command\n\nReturns:\n str: The text returned by the netsh command", "source": "codesearchnet_filtered"} -{"code": "def get_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):\n forbidden_item_ids = (set() if (forbidden_item_ids is None) else set(forbidden_item_ids))\n children = self.get_children_graph(item_ids, language=language, forbidden_item_ids=forbidden_item_ids)\n counts = self.get_children_counts(active=None)\n if (item_ids is None):\n item_ids = set(children.keys())\n\n def _get_leaves(item_id):\n leaves = set()\n\n def __search(item_ids):\n result = set(flatten([children.get(item_id, []) for item_id in item_ids]))\n new_leaves = {item_id for item_id in result if (item_id not in children.keys())}\n leaves.update(new_leaves)\n return (result - new_leaves)\n fixed_point(is_zero=(lambda to_visit: (len(to_visit) == 0)), minus=(lambda to_visit, visited: (to_visit - visited)), plus=(lambda visited_x, visited_y: (visited_x | visited_y)), f=__search, x={item_id})\n leaves = {leaf for leaf in leaves if (counts[leaf] == 0)}\n if (len(leaves) > 0):\n return leaves\n if ((counts[item_id] == 0) and (item_id not in forbidden_item_ids)):\n return {item_id}\n return set()\n return {item_id: _get_leaves(item_id) for item_id in item_ids}", "docstring": "Get mapping of items to their reachable leaves. Leaves having\n inactive relations to other items are omitted.\n\nArgs:\n item_ids (list): items which are taken as roots for the reachability\n language (str): if specified, filter out items which are not\n available in the given language\n\nReturns:\n dict: item id -> list of items (reachable leaves)", "source": "codesearchnet_filtered"} -{"code": "def _get_endpoint(self, sub_domain):\n storage_parameters = (self._storage_parameters or dict())\n account_name = storage_parameters.get('account_name')\n if (not account_name):\n raise ValueError('\"account_name\" is required for Azure storage')\n suffix = storage_parameters.get('endpoint_suffix', 'core.windows.net')\n self._endpoint = ('http%s://%s.%s.%s' % (('' if self._unsecure else 's'), account_name, sub_domain, suffix))\n return (account_name, suffix.replace('.', '\\\\.'))", "docstring": "Get endpoint information from storage parameters.\n\n Update system with endpoint information and return information required\n to define roots.\n\nArgs:\n self (pycosio._core.io_system.SystemBase subclass): System.\n sub_domain (str): Azure storage sub-domain.\n\nReturns:\n tuple of str: account_name, endpoint_suffix", "source": "codesearchnet_filtered"} -{"code": "def ReadLine(self, file_object):\n (line, _, self.lines) = self.lines.partition('\\n')\n if (not line):\n self.ReadLines(file_object)\n (line, _, self.lines) = self.lines.partition('\\n')\n return line", "docstring": "Reads a line.\n\nArgs:\n file_object (dfvfs.FileIO): file-like object.\n\nReturns:\n str: line read from the lines buffer.", "source": "codesearchnet_filtered"} -{"code": "def build_hgnc_gene(gene_info, build='37'):\n try:\n hgnc_id = int(gene_info['hgnc_id'])\n except KeyError as err:\n raise KeyError('Gene has to have a hgnc_id')\n except ValueError as err:\n raise ValueError('hgnc_id has to be integer')\n try:\n hgnc_symbol = gene_info['hgnc_symbol']\n except KeyError as err:\n raise KeyError('Gene has to have a hgnc_symbol')\n try:\n ensembl_id = gene_info['ensembl_gene_id']\n except KeyError as err:\n raise KeyError('Gene has to have a ensembl_id')\n try:\n chromosome = gene_info['chromosome']\n except KeyError as err:\n raise KeyError('Gene has to have a chromosome')\n try:\n start = int(gene_info['start'])\n except KeyError as err:\n raise KeyError('Gene has to have a start position')\n except TypeError as err:\n raise TypeError('Gene start has to be a integer')\n try:\n end = int(gene_info['end'])\n except KeyError as err:\n raise KeyError('Gene has to have a end position')\n except TypeError as err:\n raise TypeError('Gene end has to be a integer')\n gene_obj = HgncGene(hgnc_id=hgnc_id, hgnc_symbol=hgnc_symbol, ensembl_id=ensembl_id, chrom=chromosome, start=start, end=end, build=build)\n if gene_info.get('description'):\n gene_obj['description'] = gene_info['description']\n if gene_info.get('previous_symbols'):\n gene_obj['aliases'] = gene_info['previous_symbols']\n if gene_info.get('entrez_id'):\n gene_obj['entrez_id'] = int(gene_info['entrez_id'])\n if gene_info.get('omim_id'):\n gene_obj['omim_id'] = int(gene_info['omim_id'])\n if gene_info.get('pli_score'):\n gene_obj['pli_score'] = float(gene_info['pli_score'])\n if gene_info.get('ref_seq'):\n gene_obj['primary_transcripts'] = gene_info['ref_seq']\n if gene_info.get('ucsc_id'):\n gene_obj['ucsc_id'] = gene_info['ucsc_id']\n if gene_info.get('uniprot_ids'):\n gene_obj['uniprot_ids'] = gene_info['uniprot_ids']\n if gene_info.get('vega_id'):\n gene_obj['vega_id'] = gene_info['vega_id']\n if gene_info.get('incomplete_penetrance'):\n gene_obj['incomplete_penetrance'] = True\n if gene_info.get('inheritance_models'):\n gene_obj['inheritance_models'] = gene_info['inheritance_models']\n phenotype_objs = []\n for phenotype_info in gene_info.get('phenotypes', []):\n phenotype_objs.append(build_phenotype(phenotype_info))\n if phenotype_objs:\n gene_obj['phenotypes'] = phenotype_objs\n for key in list(gene_obj):\n if (gene_obj[key] is None):\n gene_obj.pop(key)\n return gene_obj", "docstring": "Build a hgnc_gene object\n\nArgs:\n gene_info(dict): Gene information\n\nReturns:\n gene_obj(dict)\n\n {\n '_id': ObjectId(),\n # This is the hgnc id, required:\n 'hgnc_id': int,\n # The primary symbol, required\n 'hgnc_symbol': str,\n 'ensembl_id': str, # required\n 'build': str, # '37' or '38', defaults to '37', required\n\n 'chromosome': str, # required\n 'start': int, # required\n 'end': int, # required\n\n 'description': str, # Gene description\n 'aliases': list(), # Gene symbol aliases, includes hgnc_symbol, str\n 'entrez_id': int,\n 'omim_id': int,\n 'pli_score': float,\n 'primary_transcripts': list(), # List of refseq transcripts (str)\n 'ucsc_id': str,\n 'uniprot_ids': list(), # List of str\n 'vega_id': str,\n 'transcripts': list(), # List of hgnc_transcript\n\n # Inheritance information\n 'inheritance_models': list(), # List of model names\n 'incomplete_penetrance': bool, # Acquired from HPO\n\n # Phenotype information\n 'phenotypes': list(), # List of dictionaries with phenotype information\n }", "source": "codesearchnet_filtered"} -{"code": "def moveTo(x=None, y=None, duration=0.0, tween=linear, pause=None, _pause=True):\n _failSafeCheck()\n (x, y) = _unpackXY(x, y)\n _mouseMoveDrag('move', x, y, 0, 0, duration, tween)\n _autoPause(pause, _pause)", "docstring": "Moves the mouse cursor to a point on the screen.\n\n The x and y parameters detail where the mouse event happens. If None, the\n current mouse position is used. If a float value, it is rounded down. If\n outside the boundaries of the screen, the event happens at edge of the\n screen.\n\nArgs:\n x (int, float, None, tuple, optional): The x position on the screen where the\n click happens. None by default. If tuple, this is used for x and y.\n If x is a str, it's considered a filename of an image to find on\n the screen with locateOnScreen() and click the center of.\n y (int, float, None, optional): The y position on the screen where the\n click happens. None by default.\n duration (float, optional): The amount of time it takes to move the mouse\n cursor to the xy coordinates. If 0, then the mouse cursor is moved\n instantaneously. 0.0 by default.\n tween (func, optional): The tweening function used if the duration is not\n 0. A linear tween is used by default. See the tweens.py file for\n details.\n\nReturns:\n None", "source": "codesearchnet_filtered"} -{"code": "def wait_for_vacancy(self, processor_type):\n with self._condition:\n self._condition.wait_for((lambda : (self._processor_available(processor_type) or self._cancelled_event.is_set())))\n if self._cancelled_event.is_set():\n raise WaitCancelledException()\n processor = self[processor_type].next_processor()\n return processor", "docstring": "Waits for a particular processor type to have the capacity to\n handle additional transactions or until is_cancelled is True.\n\nArgs:\n processor_type (ProcessorType): The family, and version of\n the transaction processor.\n\nReturns:\n Processor", "source": "codesearchnet_filtered"} -{"code": "def broadcast_dynamic_shape(shape_x: DynamicRaggedShape, shape_y: DynamicRaggedShape) -> DynamicRaggedShape:\n if not isinstance(shape_x, DynamicRaggedShape):\n raise TypeError('shape_x must be a DynamicRaggedShape')\n if not isinstance(shape_y, DynamicRaggedShape):\n raise TypeError('shape_y must be a DynamicRaggedShape')\n return broadcast_dynamic_shape_extended(shape_x, shape_y)[0]", "docstring": "Returns the shape formed by broadcasting two shapes to be compatible.\n\n 1. If shape_x and shape_y both have row_partitions, then fail if their dtypes\n don't match.\n 2. If neither has row_partitions and they have different dtypes,\n go with int64.\n 3. If one has row_partitions, go with that dtype.\n\nArgs:\n shape_x: A `DynamicRaggedShape`\n shape_y: A `DynamicRaggedShape`\n\nReturns:\n A `DynamicRaggedShape`.\n\nRaises:\n ValueError: If `shape_x` and `shape_y` are not broadcast-compatible.", "source": "github_repos"} -{"code": "def register_revived_type(identifier, predicate, versions):\n versions.sort(key=lambda reg: reg.version, reverse=True)\n if not versions:\n raise AssertionError('Need at least one version of a registered type.')\n version_numbers = set()\n for registration in versions:\n registration.identifier = identifier\n if registration.version in version_numbers:\n raise AssertionError(f'Got multiple registrations with version {registration.version} for type {identifier}.')\n version_numbers.add(registration.version)\n _REVIVED_TYPE_REGISTRY[identifier] = (predicate, versions)\n _TYPE_IDENTIFIERS.append(identifier)", "docstring": "Register a type for revived objects.\n\nArgs:\n identifier: A unique string identifying this class of objects.\n predicate: A Boolean predicate for this registration. Takes a\n trackable object as an argument. If True, `type_registration` may be\n used to save and restore the object.\n versions: A list of `VersionedTypeRegistration` objects.", "source": "github_repos"} -{"code": "def check_subword_sampling(tokenizer: PreTrainedTokenizer, text: Optional[str]=None, test_sentencepiece_ignore_case: bool=True) -> None:\n text = 'This is a test for subword regularization.' if text is None else text\n if test_sentencepiece_ignore_case:\n text = text.lower()\n tokens_list = []\n for _ in range(5):\n tokens_list.append(tokenizer.tokenize(text))\n combinations = itertools.combinations(tokens_list, 2)\n subword_sampling_found = False\n for combination in combinations:\n if combination[0] != combination[1]:\n subword_sampling_found = True\n unittest.TestCase().assertTrue(subword_sampling_found)\n for tokens in tokens_list:\n if test_sentencepiece_ignore_case:\n unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens).lower())\n else:\n unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens))", "docstring": "Check if the tokenizer generates different results when subword regularization is enabled.\n\n Subword regularization augments training data with subword sampling.\n This has a random component.\n\nArgs:\n tokenizer: The tokenizer to check.\n text: The text to use for the checks.\n test_sentencepiece_ignore_case: See `TokenizerTesterMixin.test_sentencepiece_ignore_case`.", "source": "github_repos"} -{"code": "def on_test_batch_begin(self, batch, logs=None):", "docstring": "Called at the beginning of a batch in `evaluate` methods.\n\n Also called at the beginning of a validation batch in the `fit`\n methods, if validation data is provided.\n\n Subclasses should override for any actions to run.\n\n Note that if the `steps_per_execution` argument to `compile` in\n `Model` is set to `N`, this method will only be called every\n `N` batches.\n\nArgs:\n batch: Integer, index of batch within the current epoch.\n logs: Dict. Currently no data is passed to this argument for this\n method but that may change in the future.", "source": "github_repos"} -{"code": "def __init__(self, query_engine: engine.Engine, fhir_dataset: str, view_dataset: Optional[str]=None, value_set_codes_table: Optional[str]=None, snake_case_resource_tables: bool=False) -> None:\n self._engine = query_engine\n self._fhir_dataset = fhir_dataset\n self._view_dataset = view_dataset if view_dataset is not None else self._fhir_dataset\n self._value_set_codes_table = value_set_codes_table if value_set_codes_table is not None else 'value_set_codes'\n self._snake_case_resource_tables = snake_case_resource_tables\n self._value_set_manager = spark_value_set_manager.SparkValueSetManager(query_engine, self._value_set_codes_table)", "docstring": "Initializes the SparkRunner with provided SQLAlcehmy Engine and Dataset.\n\nArgs:\n query_engine: SQLAlchemy Engine with which to perform queries.\n fhir_dataset: Dataset with FHIR data that the views will query.\n view_dataset: Optional dataset with views will be created via the\n `to_spark_view` method, if used. It will use the fhir_dataset if no\n view_dataset is specified\n value_set_codes_table: A table containing value set expansions. If\n provided, memberOf queries may be made against value set URLs described\n by this table. If `value_set_codes_table` is a string, it must included\n a project ID if not in the client's default project, dataset ID and\n table ID, each separated by a '.'. The table must match the schema\n described by\n https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md#valueset-support\n snake_case_resource_tables: Whether to use snake_case names for resource\n tables in Spark for compatiblity with some exports. Defaults to\n False.", "source": "github_repos"} -{"code": "def __init__(self, partitioned_dim_sizes, inner_dim_sizes, dim_size_dtype=None):\n assert isinstance(partitioned_dim_sizes, (list, tuple))\n with ops.name_scope(None, 'RaggedTensorDynamicShape', (partitioned_dim_sizes, inner_dim_sizes)):\n partitioned_dim_sizes = tuple((ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i) for i, size in enumerate(partitioned_dim_sizes)))\n inner_dim_sizes = ops.convert_to_tensor(inner_dim_sizes, name='inner_dim_sizes')\n if partitioned_dim_sizes:\n for axis, dimension_size in enumerate(partitioned_dim_sizes):\n if dimension_size.shape.ndims is None:\n raise ValueError('rank of partitioned_dim_sizes[%d] is unknown' % axis)\n dimension_size.shape.with_rank_at_most(1)\n if partitioned_dim_sizes[0].shape.ndims == 1:\n raise ValueError('outermost partitioned dimension must be uniform')\n if partitioned_dim_sizes[-1].shape.ndims == 0:\n raise ValueError('innermost partitioned dimension must be ragged')\n inner_dim_sizes.shape.assert_has_rank(1)\n if dim_size_dtype is None:\n dim_size_dtypes = set((p.dtype for p in partitioned_dim_sizes if p.shape.ndims == 1))\n if not dim_size_dtypes:\n dim_size_dtype = dtypes.int64\n elif len(dim_size_dtypes) == 1:\n dim_size_dtype = dim_size_dtypes.pop()\n else:\n if not ragged_config.auto_cast_partition_dtype():\n raise ValueError('partitioned_dim_sizes must have matching dtypes')\n dim_size_dtype = dtypes.int64\n partitioned_dim_sizes = tuple((math_ops.cast(p, dim_size_dtype) for p in partitioned_dim_sizes))\n inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype)\n self._partitioned_dim_sizes = partitioned_dim_sizes\n self._inner_dim_sizes = inner_dim_sizes", "docstring": "Creates a RaggedTensorDynamicShape.\n\nArgs:\n partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for\n each partitioned dimension. If dimension `d` is uniform, then\n `partitioned_dim_sizes[d]` must be an integer scalar, specifying the\n size of all slices across dimension `d`. If dimension `d` is ragged,\n then `partitioned_dim_sizes[d]` must be an integer vector, specifying\n the size of each slice across dimension `d`.\n inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the\n number of inner dimensions. `inner_dim_sizes[n]` is the size of all\n slices across the `n`th inner dimension (which is the\n `(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor.\n dim_size_dtype: dtype for dimension sizes. If not specified, then it\n is chosen based on the dtypes of `partitioned_dim_sizes` and\n `inner_dim_sizes`.", "source": "github_repos"} -{"code": "def __init__(self, alpha=1.0, **kwargs):\n super().__init__(**kwargs)\n self.alpha = alpha\n self.supports_masking = True\n self._build_at_init()", "docstring": "Applies an Exponential Linear Unit function to an output.\n\n Formula:\n\n ```\n f(x) = alpha * (exp(x) - 1.) for x < 0\n f(x) = x for x >= 0\n ```\n\nArgs:\n alpha: float, slope of negative section. Defaults to `1.0`.\n **kwargs: Base layer keyword arguments, such as `name` and `dtype`.", "source": "github_repos"} -{"code": "def _generate_graph_update_dicts(self):\n transform_dict = {}\n pcoll_dict = {}\n for transform_id, transform_proto in self._top_level_transforms():\n transform_dict[transform_proto.unique_name] = {'required': transform_id in self._required_transforms}\n for pcoll_id in transform_proto.outputs.values():\n pcoll_dict[pcoll_id] = {'cached': pcoll_id in self._cached_pcollections, 'referenced': pcoll_id in self._referenced_pcollections}\n\n def vertex_properties_to_attributes(vertex):\n \"\"\"Converts PCollection properties to DOT vertex attributes.\"\"\"\n attrs = {}\n if 'leaf' in vertex:\n attrs['style'] = 'invis'\n elif vertex.get('required'):\n attrs['color'] = 'blue'\n attrs['fontcolor'] = 'blue'\n else:\n attrs['color'] = 'grey'\n return attrs\n\n def edge_properties_to_attributes(edge):\n \"\"\"Converts PTransform properties to DOT edge attributes.\"\"\"\n attrs = {}\n if edge.get('cached'):\n attrs['color'] = 'red'\n elif edge.get('referenced'):\n attrs['color'] = 'black'\n else:\n attrs['color'] = 'grey'\n return attrs\n vertex_dict = {}\n edge_dict = {}\n for transform_name, transform_properties in transform_dict.items():\n vertex_dict[transform_name] = vertex_properties_to_attributes(transform_properties)\n for pcoll_id, pcoll_properties in pcoll_dict.items():\n edge_dict[pcoll_id] = edge_properties_to_attributes(pcoll_properties)\n return (vertex_dict, edge_dict)", "docstring": "Generate updates specific to interactive pipeline.\n\nReturns:\n vertex_dict: (Dict[str, Dict[str, str]]) maps vertex name to attributes\n edge_dict: (Dict[str, Dict[str, str]]) maps vertex name to attributes", "source": "github_repos"} -{"code": "def _populate_quantization_options_default_values(quantization_options: _QuantizationOptions) -> None:\n if quantization_options.op_set == quant_opts_pb2.OpSet.OP_SET_UNSPECIFIED:\n quantization_options.op_set = quant_opts_pb2.OpSet.XLA\n if not quantization_options.tags:\n quantization_options.tags.append(tag_constants.SERVING)\n if not quantization_options.signature_keys:\n quantization_options.signature_keys.append(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)\n if not quantization_options.HasField('freeze_all_variables'):\n quantization_options.freeze_all_variables = True\n if quantization_options.enable_legacy_weight_only:\n raise ValueError('Legacy weight-only is deprecated. Use weight-only quantization method.')\n if quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_UNSPECIFIED:\n logging.debug('\"preset_method\" for QuantizationMethod is not specified.Static range quantization is used by default.')\n quantization_options.quantization_method.preset_method = _PresetMethod.METHOD_STATIC_RANGE_INT8\n if quantization_options.min_num_elements_for_weights == 0:\n quantization_options.min_num_elements_for_weights = _DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS\n logging.warning('QuantizationOptions.min_num_elements_for_weights is not set (0). Setting to the default value: %d.', _DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS)\n if not quantization_options.HasField('enable_per_channel_quantization'):\n quantization_options.enable_per_channel_quantization = False\n if quantization_options.enable_per_channel_quantization and (not ((quantization_options.op_set == quant_opts_pb2.OpSet.UNIFORM_QUANTIZED or quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8) or (quantization_options.op_set in (quant_opts_pb2.OpSet.XLA, quant_opts_pb2.OpSet.STABLEHLO) and quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8))):\n raise ValueError('Currently, per-channel quantization is supported for Uniform Quantized opset, weight only quantization, or XLA/StableHLO opset with static range quantization.')\n if quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8 and (quantization_options.op_set == quant_opts_pb2.OpSet.UNIFORM_QUANTIZED or quantization_options.op_set == quant_opts_pb2.OpSet.TF):\n raise ValueError('TF/Uniform quantized opset does not support weight-only.')\n if quantization_options.op_set == quant_opts_pb2.OpSet.STABLEHLO and (quantization_options.quantization_method.preset_method != _PresetMethod.METHOD_STATIC_RANGE_INT8 and quantization_options.quantization_method.preset_method != _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8):\n raise ValueError('StableHLO quantized opset currently only supports static range quantization and weight-only quantizationvia TF Quantizer.')\n logging.debug('Setting `force_graph_mode_calibration = True` to ensure the calibration mode is executed properly.')\n quantization_options.force_graph_mode_calibration = True\n if quantization_options.HasField('debugger_config'):\n if not quantization_options.debugger_config.log_dir_path:\n quantization_options.debugger_config.log_dir_path = '/tmp/dumps'\n if quantization_options.debugger_config.debugger_type == stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_UNSPECIFIED:\n raise ValueError('Debugger is enabled but debugger type was not specified.')\n if quantization_options.debugger_config.debugger_type == stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_WHOLE_MODEL and (not quantization_options.debugger_config.unquantized_dump_model_path):\n raise ValueError('Debugger type whole model verify was used but unquantized_dump_model_path was not specified.')\n _populate_quantization_component_spec(quantization_options.quantization_method)\n _populate_unitwise_quantization_specs(quantization_options)\n if quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8:\n _populate_calibration_options(quantization_options)", "docstring": "Populates default values for QuantizationOptions.\n\n Populates unspecified or unset fields of QuantizationOptions with the default\n values.\n\n * If `op_set` is unspecified, it defaults to `OpSet.XLA`.\n * If `freeze_all_variables` is not set, it defaults to `True`.\n * Check if configurations are set correctly:\n - Per-channel quantization is supported for Uniform Quantized opset only.\n\nArgs:\n quantization_options: An instance of QuantizationOptions.", "source": "github_repos"} -{"code": "def make_input_fn_iterator(self, input_fn, replication_mode=InputReplicationMode.PER_WORKER):\n return super(StrategyV1, self).make_input_fn_iterator(input_fn, replication_mode)", "docstring": "Returns an iterator split across replicas created from an input function.\n\n DEPRECATED: This method is not available in TF 2.x.\n\n The `input_fn` should take an `tf.distribute.InputContext` object where\n information about batching and input sharding can be accessed:\n\n ```\n def input_fn(input_context):\n batch_size = input_context.get_per_replica_batch_size(global_batch_size)\n d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)\n return d.shard(input_context.num_input_pipelines,\n input_context.input_pipeline_id)\n with strategy.scope():\n iterator = strategy.make_input_fn_iterator(input_fn)\n replica_results = strategy.experimental_run(replica_fn, iterator)\n ```\n\n The `tf.data.Dataset` returned by `input_fn` should have a per-replica\n batch size, which may be computed using\n `input_context.get_per_replica_batch_size`.\n\nArgs:\n input_fn: A function taking a `tf.distribute.InputContext` object and\n returning a `tf.data.Dataset`.\n replication_mode: an enum value of `tf.distribute.InputReplicationMode`.\n Only `PER_WORKER` is supported currently, which means there will be\n a single call to `input_fn` per worker. Replicas will dequeue from the\n local `tf.data.Dataset` on their worker.\n\nReturns:\n An iterator object that should first be `.initialize()`-ed. It may then\n either be passed to `strategy.experimental_run()` or you can\n `iterator.get_next()` to get the next value to pass to\n `strategy.extended.call_for_each_replica()`.", "source": "github_repos"} -{"code": "def scatter_min(self, sparse_delta, use_locking=False, name=None):\n raise NotImplementedError", "docstring": "Updates this variable with the min of `tf.IndexedSlices` and itself.\n\nArgs:\n sparse_delta: `tf.IndexedSlices` to use as an argument of min with this\n variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\nReturns:\n The updated variable.\n\nRaises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github_repos"} -{"code": "def convert_sparse_cross_attention_mask_to_dense(cross_attention_token_mask: List[List[List[int]]], num_tiles: List[List[int]], max_num_tiles: int, length: int) -> np.ndarray:\n batch_size = len(cross_attention_token_mask)\n max_num_images = max([len(masks) for masks in cross_attention_token_mask])\n cross_attention_mask = np.zeros(shape=(batch_size, length, max_num_images, max_num_tiles), dtype=np.int64)\n for sample_idx, (sample_masks, sample_num_tiles) in enumerate(zip(cross_attention_token_mask, num_tiles)):\n for mask_idx, (locations, mask_num_tiles) in enumerate(zip(sample_masks, sample_num_tiles)):\n if len(locations) == 2:\n start, end = locations\n end = min(end, length)\n if end == -1:\n end = length\n cross_attention_mask[sample_idx, start:end, mask_idx, :mask_num_tiles] = 1\n return cross_attention_mask", "docstring": "Convert the cross attention mask indices to a cross attention mask 4D array.\n\n This function takes a sparse representation of cross attention masks and converts it to a dense 4D numpy array.\n The sparse representation is a nested list structure that defines attention ranges for each image in each batch item.\n\nArgs:\n cross_attention_token_mask (List[List[List[int]]]): A nested list structure where:\n - The outer list represents the batch dimension.\n - The middle list represents different images within each batch item.\n - The inner list contains pairs of integers [start, end] representing token ranges for each image.\n num_tiles (List[List[int]]): A nested list structure specifying the number of tiles for each image in each batch item.\n max_num_tiles (int): The maximum possible number of tiles.\n length (int): The total sequence length of the input.\n\nReturns:\n np.ndarray: A 4D numpy array of shape (batch_size, length, max_num_images, max_num_tiles)\n The array contains `1` where attention is allowed and `0` where it is not.\n\nNote:\n - Special handling is done for cases where the end token is -1, which is interpreted as attending to the end of the sequence.", "source": "github_repos"} -{"code": "def index_subdirectory(directory, class_indices, follow_links, formats):\n dirname = os.path.basename(directory)\n valid_files = iter_valid_files(directory, follow_links, formats)\n labels = []\n filenames = []\n for root, fname in valid_files:\n labels.append(class_indices[dirname])\n absolute_path = tf.io.gfile.join(root, fname)\n relative_path = tf.io.gfile.join(dirname, os.path.relpath(absolute_path, directory))\n filenames.append(relative_path)\n return (filenames, labels)", "docstring": "Recursively walks directory and list image paths and their class index.\n\nArgs:\n directory: string, target directory.\n class_indices: dict mapping class names to their index.\n follow_links: boolean, whether to recursively follow subdirectories\n (if False, we only list top-level images in `directory`).\n formats: Allowlist of file extensions to index (e.g. \".jpg\", \".txt\").\n\nReturns:\n tuple `(filenames, labels)`. `filenames` is a list of relative file\n paths, and `labels` is a list of integer labels corresponding\n to these files.", "source": "github_repos"} -{"code": "def stack(list_or_tensor, element_dtype=None, strict=True):\n if strict:\n\n def raise_error(x):\n raise ValueError('%s must be stackable when strict=True' % x)\n original_call = raise_error\n else:\n original_call = lambda x: x\n return data_structures.list_stack(list_or_tensor, data_structures.ListStackOpts(element_dtype=element_dtype, original_call=original_call))", "docstring": "Stacks the input, if it admits the notion of stacking.\n\n For example, a list of tensors can be stacked into a larger tensor. This\n function is similar to tf.stack, but it accepts non-lists and lists of\n non-tensors as arguments. In the latter case, the function does nothing.\n\nArgs:\n list_or_tensor: Any\n element_dtype: tf.DType, optional dtypedtype for the elements in the list.\n Required if the input is stackable, and the list is untyped.\n strict: bool, if True an error is raised if the input is not stackable.\n Otherwise the function is a no-op.\n\nReturns:\n Any, if the input is stackable, the result will be a tf.Tensor. Otherwise,\n if strict=False, the result will be list_or_tensor.\n\nRaises:\n ValueError: if strict=True and the input is not stackable.", "source": "github_repos"} -{"code": "def __init__(self, operators, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None):\n parameters = dict(operators=operators, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n check_ops.assert_proper_iterable(operators)\n operators = list(operators)\n if not operators:\n raise ValueError('Expected a non-empty list of operators. Found: %s' % operators)\n self._operators = operators\n dtype = operators[0].dtype\n for operator in operators:\n if operator.dtype != dtype:\n name_type = (str((o.name, o.dtype)) for o in operators)\n raise TypeError('Expected all operators to have the same dtype. Found %s' % ' '.join(name_type))\n if all((operator.is_non_singular for operator in operators)):\n if is_non_singular is False:\n raise ValueError('The composition of non-singular operators is always non-singular.')\n is_non_singular = True\n if _composition_must_be_self_adjoint(operators):\n if is_self_adjoint is False:\n raise ValueError('The composition was determined to be self-adjoint but user provided incorrect `False` hint.')\n is_self_adjoint = True\n if linear_operator_util.is_aat_form(operators):\n if is_square is False:\n raise ValueError('The composition was determined have the form A @ A.H, hence it must be square. The user provided an incorrect `False` hint.')\n is_square = True\n if linear_operator_util.is_aat_form(operators) and is_non_singular:\n if is_positive_definite is False:\n raise ValueError('The composition was determined to be non-singular and have the form A @ A.H, hence it must be positive-definite. The user provided an incorrect `False` hint.')\n is_positive_definite = True\n if name is None:\n name = '_o_'.join((operator.name for operator in operators))\n with ops.name_scope(name):\n super(LinearOperatorComposition, self).__init__(dtype=dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)", "docstring": "Initialize a `LinearOperatorComposition`.\n\n `LinearOperatorComposition` is initialized with a list of operators\n `[op_1,...,op_J]`. For the `matmul` method to be well defined, the\n composition `op_i.matmul(op_{i+1}(x))` must be defined. Other methods have\n similar constraints.\n\nArgs:\n operators: Iterable of `LinearOperator` objects, each with\n the same `dtype` and composable shape.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`. Default is the individual\n operators names joined with `_o_`.\n\nRaises:\n TypeError: If all operators do not have the same `dtype`.\n ValueError: If `operators` is empty.", "source": "github_repos"} -{"code": "def call(self, input_ids: tf.Tensor | None=None, bbox: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, pixel_values: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor]]:\n outputs = self.layoutlmv3(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n return outputs", "docstring": "Returns:\n\nExample:\n ```python\n >>> from transformers import AutoProcessor, TFAutoModel\n >>> from datasets import load_dataset\n\n >>> processor = AutoProcessor.from_pretrained(\"microsoft/layoutlmv3-base\", apply_ocr=False)\n >>> model = TFAutoModel.from_pretrained(\"microsoft/layoutlmv3-base\")\n\n >>> dataset = load_dataset(\"nielsr/funsd-layoutlmv3\", split=\"train\", trust_remote_code=True)\n >>> example = dataset[0]\n >>> image = example[\"image\"]\n >>> words = example[\"tokens\"]\n >>> boxes = example[\"bboxes\"]\n\n >>> encoding = processor(image, words, boxes=boxes, return_tensors=\"tf\")\n\n >>> outputs = model(**encoding)\n >>> last_hidden_states = outputs.last_hidden_state\n ```", "source": "github_repos"} -{"code": "def _init_summary_op(self, summary_op=USE_DEFAULT):\n if summary_op is Supervisor.USE_DEFAULT:\n summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)\n if summary_op is None:\n summary_op = _summary.merge_all()\n if summary_op is not None:\n ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)\n self._summary_op = summary_op", "docstring": "Initializes summary_op.\n\nArgs:\n summary_op: An Operation that returns a Summary for the event logs. If set\n to USE_DEFAULT, create an op that merges all the summaries.", "source": "github_repos"} -{"code": "def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n hidden_states = inputs_embeds\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n if attention_mask is not None:\n attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n for encoder_layer in self.layers:\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n to_drop = False\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n to_drop = True\n if to_drop:\n layer_outputs = (None, None)\n else:\n layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n hidden_states = self.layernorm(hidden_states)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.\n\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:\n\n - 1 for pixel features that are real (i.e. **not masked**),\n - 0 for pixel features that are padding (i.e. **masked**).\n\n [What are attention masks?](../glossary#attention-mask)\n\n object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Position embeddings that are added to the queries and keys in each self-attention layer.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github_repos"} -{"code": "def conv_input_length(output_length, filter_size, padding, stride):\n if output_length is None:\n return None\n assert padding in {'same', 'valid', 'full'}\n if padding == 'same':\n pad = filter_size // 2\n elif padding == 'valid':\n pad = 0\n elif padding == 'full':\n pad = filter_size - 1\n return (output_length - 1) * stride - 2 * pad + filter_size", "docstring": "Determines input length of a convolution given output length.\n\nArgs:\n output_length: integer.\n filter_size: integer.\n padding: one of \"same\", \"valid\", \"full\".\n stride: integer.\n\nReturns:\n The input length (integer).", "source": "github_repos"} -{"code": "def expectation(self, function):\n return self._expectation(function)", "docstring": "Returns an estimate of the expectation value of the given function.\n\nArgs:\n function: Mapping from a 2D tensor of bitstrings to a possibly nested\n structure. The structure must have atomic elements all of which are\n float tensors with the same batch size as the input bitstrings.", "source": "github_repos"} -{"code": "def get_device_locations(mesh: layout_lib.Mesh, client_id: Optional[int]=None) -> List[Dict[str, int]]:\n if mesh.device_type() != _TPU_DEVICE_TYPE:\n raise ValueError('The mesh must be a TPU mesh')\n if client_id is None or client_id == config.client_id():\n return mesh.local_device_locations()\n raise NotImplementedError(\"Looking up other clients' device locations is not supported\")", "docstring": "Returns the device locations of all TPU cores local to the given client.\n\n A device location is a dictionary from dimension names to indices on those\n dimensions. For example, for a 2x2 mesh ('x', 'y'), this function returns a\n permutation of this list:\n\n [{'x': 0, 'y': 0},\n {'x': 0, 'y': 1},\n {'x': 1, 'y': 0},\n {'x': 1, 'y': 1}].\n\n Note that device IDs and device locations are equivalent. The former is a\n linearization of the latter along mesh dimensions.\n\nArgs:\n mesh: A TPU mesh.\n client_id: Optional; A DTensor client ID. If empty, query this client.", "source": "github_repos"} -{"code": "def events_from_logdir(logdir):\n assert tf.io.gfile.exists(logdir)\n files = tf.io.gfile.listdir(logdir)\n assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files\n return _events_from_file(os.path.join(logdir, files[0]))", "docstring": "Returns all events in the single eventfile in logdir.\n\nArgs:\n logdir: The directory in which the single event file is sought.\n\nReturns:\n A list of all tf.compat.v1.Event protos from the single event file.\n\nRaises:\n AssertionError: If logdir does not contain exactly one file.", "source": "github_repos"} -{"code": "def __init__(self, config: AriaTextConfig, layer_idx: int):\n super().__init__(self)\n self.mlp = AriaTextMoELayer(config)", "docstring": "Aria Text Decoder Layer.\n\n This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.\n\nArgs:\n config (`AriaTextConfig`):\n Configuration object for the text component of the model.\n layer_idx (`int`):\n Index of the layer.", "source": "github_repos"} -{"code": "def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]], Mapping[str, List[Bond]], Mapping[str, List[BondAngle]]]:\n stereo_chemical_props = resources.read_text('openfold.resources', 'stereo_chemical_props.txt')\n lines_iter = iter(stereo_chemical_props.splitlines())\n residue_bonds: Dict[str, List[Bond]] = {}\n next(lines_iter)\n for line in lines_iter:\n if line.strip() == '-':\n break\n bond, resname, bond_length, stddev = line.split()\n atom1, atom2 = bond.split('-')\n if resname not in residue_bonds:\n residue_bonds[resname] = []\n residue_bonds[resname].append(Bond(atom1, atom2, float(bond_length), float(stddev)))\n residue_bonds['UNK'] = []\n residue_bond_angles: Dict[str, List[BondAngle]] = {}\n next(lines_iter)\n next(lines_iter)\n for line in lines_iter:\n if line.strip() == '-':\n break\n bond, resname, angle_degree, stddev_degree = line.split()\n atom1, atom2, atom3 = bond.split('-')\n if resname not in residue_bond_angles:\n residue_bond_angles[resname] = []\n residue_bond_angles[resname].append(BondAngle(atom1, atom2, atom3, float(angle_degree) / 180.0 * np.pi, float(stddev_degree) / 180.0 * np.pi))\n residue_bond_angles['UNK'] = []\n\n def make_bond_key(atom1_name: str, atom2_name: str) -> str:\n \"\"\"Unique key to lookup bonds.\"\"\"\n return '-'.join(sorted([atom1_name, atom2_name]))\n residue_virtual_bonds: Dict[str, List[Bond]] = {}\n for resname, bond_angles in residue_bond_angles.items():\n bond_cache: Dict[str, Bond] = {}\n for b in residue_bonds[resname]:\n bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b\n residue_virtual_bonds[resname] = []\n for ba in bond_angles:\n bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]\n bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]\n gamma = ba.angle_rad\n length = np.sqrt(bond1.length ** 2 + bond2.length ** 2 - 2 * bond1.length * bond2.length * np.cos(gamma))\n dl_outer = 0.5 / length\n dl_dgamma = 2 * bond1.length * bond2.length * np.sin(gamma) * dl_outer\n dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer\n dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer\n stddev = np.sqrt((dl_dgamma * ba.stddev) ** 2 + (dl_db1 * bond1.stddev) ** 2 + (dl_db2 * bond2.stddev) ** 2)\n residue_virtual_bonds[resname].append(Bond(ba.atom1_name, ba.atom3name, length, stddev))\n return (residue_bonds, residue_virtual_bonds, residue_bond_angles)", "docstring": "Load stereo_chemical_props.txt into a nice structure.\n\n Load literature values for bond lengths and bond angles and translate bond angles into the length of the opposite\n edge of the triangle (\"residue_virtual_bonds\").\n\nReturns:\n residue_bonds: dict that maps resname --> list of Bond tuples residue_virtual_bonds: dict that maps resname -->\n list of Bond tuples residue_bond_angles: dict that maps resname --> list of BondAngle tuples", "source": "github_repos"} -{"code": "def diff_is_docstring_only(repo: Repo, branching_point: str, filename: str) -> bool:\n folder = Path(repo.working_dir)\n with checkout_commit(repo, branching_point):\n with open(folder / filename, 'r', encoding='utf-8') as f:\n old_content = f.read()\n with open(folder / filename, 'r', encoding='utf-8') as f:\n new_content = f.read()\n old_content_clean = clean_code(old_content)\n new_content_clean = clean_code(new_content)\n return old_content_clean == new_content_clean", "docstring": "Check if the diff is only in docstrings (or comments and whitespace) in a filename.\n\nArgs:\n repo (`git.Repo`): A git repository (for instance the Transformers repo).\n branching_point (`str`): The commit reference of where to compare for the diff.\n filename (`str`): The filename where we want to know if the diff isonly in docstrings/comments.\n\nReturns:\n `bool`: Whether the diff is docstring/comments only or not.", "source": "github_repos"} -{"code": "def _unpack_sequence(self, state, n_before, n_after=-1):\n assert n_after >= -1\n state, seq = state.pop()\n options = []\n nontuple_seq = self.ctx.program.NewVariable()\n has_slurp = n_after > -1\n count = n_before + max(n_after, 0)\n nondeterministic_iterable = False\n for b in abstract_utils.expand_type_parameter_instances(seq.bindings):\n if b.data.full_name in ('builtins.set', 'builtins.frozenset'):\n nondeterministic_iterable = True\n tup = self._get_literal_sequence(b.data)\n if tup is not None:\n if has_slurp and len(tup) >= count:\n options.append(self._restructure_tuple(state, tup, n_before, n_after))\n continue\n elif len(tup) == count:\n options.append(tup)\n continue\n else:\n self.ctx.errorlog.bad_unpacking(self.frames, len(tup), count)\n if b.IsVisible(state.node):\n nontuple_seq.PasteBinding(b, state.node)\n if nontuple_seq.bindings:\n state, itr = self._get_iter(state, nontuple_seq)\n state, itr_result = self._call(state, itr, '__next__', ())\n elif not options:\n itr_result = self.ctx.new_unsolvable(state.node)\n else:\n itr_result = None\n if itr_result:\n option = [itr_result for _ in range(count)]\n if has_slurp:\n slurp = self.ctx.convert.build_list_of_type(state.node, itr_result)\n option = option[:n_before] + [slurp] + option[n_before:]\n options.append(option)\n values = tuple((self.ctx.convert.build_content(value, discard_concrete_values=False) for value in zip(*options)))\n if len(values) > 1 and nondeterministic_iterable:\n self.ctx.errorlog.nondeterministic_unpacking(self.frames)\n for value in reversed(values):\n if not value.bindings:\n value = self.ctx.convert.empty.to_variable(state.node)\n state = state.push(value)\n return state", "docstring": "Pops a tuple (or other iterable) and pushes it onto the VM's stack.\n\n Supports destructuring assignment with potentially a single list variable\n that slurps up the remaining elements:\n 1. a, b, c = ... # UNPACK_SEQUENCE\n 2. a, *b, c = ... # UNPACK_EX\n\nArgs:\n state: The current VM state\n n_before: Number of elements before the list (n_elements for case 1)\n n_after: Number of elements after the list (-1 for case 1)\n\nReturns:\n The new state.", "source": "github_repos"} -{"code": "def enable_save_as_bf16(variables: List[tf_variables.Variable]):\n for v in variables:\n if isinstance(v, d_variable.DVariable):\n v.save_as_bf16 = True", "docstring": "Allows float32 DVariables to be checkpointed and restored as bfloat16.\n\n The method only affects the DVariable part inside the model and leaves\n non-DTensor Variables/Tensors untouched.\n\nArgs:\n variables: A list of tf.Variable to be enabled with bfloat16 save/restore.\n Only has effect on DTensor Variables as they go through d_variables with\n DTensor Specific logis.", "source": "github_repos"} -{"code": "def split(state, num):\n state = tf_np.asarray(state, dtype=_RNG_KEY_DTYPE)\n state = _key2seed(state)\n try:\n states = stateless_random_ops.stateless_split(state, num)\n except AttributeError as e:\n states = stateless_split(state, num)\n states = array_ops_stack.unstack(states, num)\n states = nest.map_structure(_seed2key, states)\n return states", "docstring": "Creates new independent RNG states from an existing state.\n\nArgs:\n state: the existing state.\n num: the number of the new states.\n\nReturns:\n A tuple of new states.", "source": "github_repos"} -{"code": "def is_initialized(self, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.is_initialized()\n if self._use_packed_variable():\n return self._packed_var.is_initialized()\n result = self._primary.is_initialized()\n for v in self._values[1:-1]:\n result = math_ops.logical_and(result, v.is_initialized())\n result = math_ops.logical_and(result, self._values[-1].is_initialized(), name=name)\n return result", "docstring": "Identifies if all the component variables are initialized.\n\nArgs:\n name: Name of the final `logical_and` op.\n\nReturns:\n The op that evaluates to True or False depending on if all the\n component variables are initialized.", "source": "github_repos"} -{"code": "def watch_key(self):\n return _get_tensor_watch_key(self.node_name, self.output_slot, self.debug_op)", "docstring": "Watch key identities a debug watch on a tensor.\n\nReturns:\n (`str`) A watch key, in the form of `tensor_name`:`debug_op`.", "source": "github_repos"} -{"code": "def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)\n num_observed = observed_indicator.sum(self.dim, keepdim=True)\n scale = ts_sum / torch.clamp(num_observed, min=1)\n if self.default_scale is None:\n batch_sum = ts_sum.sum(dim=0)\n batch_observations = torch.clamp(num_observed.sum(0), min=1)\n default_scale = torch.squeeze(batch_sum / batch_observations)\n else:\n default_scale = self.default_scale * torch.ones_like(scale)\n scale = torch.where(num_observed > 0, scale, default_scale)\n scale = torch.clamp(scale, min=self.minimum_scale)\n scaled_data = data / scale\n if not self.keepdim:\n scale = scale.squeeze(dim=self.dim)\n return (scaled_data, torch.zeros_like(scale), scale)", "docstring": "Parameters:\n data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):\n input for Batch norm calculation\n observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):\n Calculating the scale on the observed indicator.\n\nReturns:\n tuple of `torch.Tensor` of shapes\n (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,\n `(batch_size, 1, num_input_channels)`)", "source": "github_repos"} -{"code": "def _convert_from_saved_model(self, graph_def):\n self._save_conversion_params_metric(graph_def)\n quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, graph_def, self._experimental_disable_per_channel, self.experimental_new_dynamic_range_quantizer, self._experimental_low_bit_qat, self._experimental_full_integer_quantization_bias_type, self._experimental_variable_quantization, self._experimental_strict_qdq)\n self._validate_inference_input_output_types(quant_mode)\n converter_kwargs = {'enable_tflite_resource_variables': self.experimental_enable_resource_variables}\n converter_kwargs.update(self._get_base_converter_args())\n converter_kwargs.update(quant_mode.converter_flags())\n result = _convert_saved_model(**converter_kwargs)\n return self._optimize_tflite_model(result, quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer)", "docstring": "Helper method that converts saved model.\n\nArgs:\n graph_def: GraphDef object for the model, used only for stats.\n\nReturns:\n The converted TFLite model.", "source": "github_repos"} -{"code": "def _list_valid_filenames_in_directory(directory, white_list_formats, split, class_indices, follow_links):\n dirname = os.path.basename(directory)\n if split:\n all_files = list(_iter_valid_files(directory, white_list_formats, follow_links))\n num_files = len(all_files)\n start, stop = (int(split[0] * num_files), int(split[1] * num_files))\n valid_files = all_files[start:stop]\n else:\n valid_files = _iter_valid_files(directory, white_list_formats, follow_links)\n classes = []\n filenames = []\n for root, fname in valid_files:\n classes.append(class_indices[dirname])\n absolute_path = os.path.join(root, fname)\n relative_path = os.path.join(dirname, os.path.relpath(absolute_path, directory))\n filenames.append(relative_path)\n return (classes, filenames)", "docstring": "Lists paths of files in `subdir` with extensions in `white_list_formats`.\n\nArgs:\n directory: absolute path to a directory containing the files to list.\n The directory name is used as class label\n and must be a key of `class_indices`.\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into\n account a certain fraction of files in each directory.\n E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent\n of images in each directory.\n class_indices: dictionary mapping a class name to its index.\n follow_links: boolean, follow symbolic links to subdirectories.\n\nReturns:\n classes: a list of class indices\n filenames: the path of valid files in `directory`, relative from\n `directory`'s parent (e.g., if `directory` is \"dataset/class1\",\n the filenames will be\n `[\"class1/file1.jpg\", \"class1/file2.jpg\", ...]`).", "source": "github_repos"} -{"code": "def cast(cls, x, dtype):\n return x.astype(dtype)", "docstring": "Cast a tensor to a different dtype.\n\n Only called on a full array as provided by the user.\n\nArgs:\n x: the tensor to cast.\n Returns: the cast tensor.", "source": "github_repos"} -{"code": "def get_relevant_paths_and_versions(self, config: 'XLAConfigOptions'):\n if self.ld_library_path is None:\n self.ld_library_path = os.environ.get('LD_LIBRARY_PATH', None)\n if config.host_compiler == HostCompiler.CLANG:\n self.clang_path = _find_executable_or_die('clang', self.clang_path)\n self.clang_major_version = self.clang_major_version or _get_clang_major_version(self.clang_path)\n self.lld_path = self.lld_path or shutil.which('ld.lld')\n elif config.host_compiler == HostCompiler.GCC:\n self.gcc_path = _find_executable_or_die('gcc', self.gcc_path)\n self.gcc_major_version = self.gcc_major_version or _get_gcc_major_version(self.gcc_path)\n if config.backend == Backend.CUDA:\n if config.cuda_compiler == CudaCompiler.CLANG:\n self.clang_path = _find_executable_or_die('clang', self.clang_path)\n if not self.cuda_compute_capabilities:\n self.cuda_compute_capabilities = _get_cuda_compute_capabilities_or_die()", "docstring": "Gets paths and versions as needed by the config.\n\nArgs:\n config: XLAConfigOptions instance that determines what paths and versions\n to try to autoconfigure.", "source": "github_repos"} -{"code": "def visit_ImportFrom(self, node):\n if not node.module:\n self.generic_visit(node)\n return\n from_import = node.module\n from_import_first_component = from_import.split('.')[0]\n import_renames = getattr(self._api_change_spec, 'import_renames', {})\n import_rename_spec = import_renames.get(from_import_first_component, None)\n if not import_rename_spec:\n self.generic_visit(node)\n return\n updated_aliases = []\n same_aliases = []\n for import_alias in node.names:\n full_module_name = '%s.%s' % (from_import, import_alias.name)\n if excluded_from_module_rename(full_module_name, import_rename_spec):\n same_aliases.append(import_alias)\n else:\n updated_aliases.append(import_alias)\n if not updated_aliases:\n self.generic_visit(node)\n return\n assert self._stack[-1] is node\n parent = self._stack[-2]\n new_from_import = import_rename_spec.new_name + from_import[len(from_import_first_component):]\n updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level)\n ast.copy_location(updated_node, node)\n pasta.ast_utils.replace_child(parent, node, updated_node)\n additional_import_log = ''\n if same_aliases:\n same_node = ast.ImportFrom(from_import, same_aliases, node.level, col_offset=node.col_offset, lineno=node.lineno)\n ast.copy_location(same_node, node)\n parent.body.insert(parent.body.index(updated_node), same_node)\n pasta.base.formatting.set(same_node, 'prefix', pasta.base.formatting.get(updated_node, 'prefix'))\n additional_import_log = ' and %r' % pasta.dump(same_node)\n self.add_log(INFO, node.lineno, node.col_offset, 'Changed import from %r to %r%s.' % (pasta.dump(node), pasta.dump(updated_node), additional_import_log))\n self.generic_visit(node)", "docstring": "Handle visiting an import-from node in the AST.\n\nArgs:\n node: Current Node", "source": "github_repos"} -{"code": "def __init__(self, name='categorical_accuracy', dtype=None):\n super(CategoricalAccuracy, self).__init__(categorical_accuracy, name, dtype=dtype)", "docstring": "Calculates how often predictions match one-hot labels.\n\n You can provide logits of classes as `y_pred`, since argmax of\n logits and probabilities are same.\n\n This metric creates two local variables, `total` and `count` that are used to\n compute the frequency with which `y_pred` matches `y_true`. This frequency is\n ultimately returned as `categorical accuracy`: an idempotent operation that\n simply divides `total` by `count`.\n\n `y_pred` and `y_true` should be passed in as vectors of probabilities, rather\n than as labels. If necessary, use `tf.one_hot` to expand `y_true` as a vector.\n\n If `sample_weight` is `None`, weights default to 1.\n Use `sample_weight` of 0 to mask values.\n\nArgs:\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\n Standalone usage:\n\n >>> m = tf.keras.metrics.CategoricalAccuracy()\n >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],\n ... [0.05, 0.95, 0]])\n >>> m.result().numpy()\n 0.5\n\n >>> m.reset_state()\n >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],\n ... [0.05, 0.95, 0]],\n ... sample_weight=[0.7, 0.3])\n >>> m.result().numpy()\n 0.3\n\n Usage with `compile()` API:\n\n ```python\n model.compile(\n optimizer='sgd',\n loss='mse',\n metrics=[tf.keras.metrics.CategoricalAccuracy()])\n ```", "source": "github_repos"} -{"code": "def _replace_tensors_by_numpy_ndarrays(repr_ds_map: rd.RepresentativeDatasetMapping) -> None:\n with session.Session() as sess:\n for signature_def_key in repr_ds_map:\n ds = repr_ds_map[signature_def_key]\n repr_ds_map[signature_def_key] = rd.replace_tensors_by_numpy_ndarrays(ds, sess)", "docstring": "Replaces tf.Tensors by their evaluated numpy arrays.\n\n This assumes that tf.Tensors in representative samples are created in the\n default Graph. It will raise an error if tensors are created in a different\n graph.\n\nArgs:\n repr_ds_map: SignatureDef key -> RepresentativeDataset mapping.", "source": "github_repos"} -{"code": "def _runOneBenchmark(self, default_device, num_iters=10, static_unroll=False, steps=10):\n\n def loop_body(i, x):\n with ops.device('/gpu:0'):\n nx = nn_ops.conv2d(input=x, filter=kernel, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC', name='conv2d')\n ni = math_ops.add(i, 1)\n return (ni, nx)\n ops.reset_default_graph()\n with session.Session() as sess, ops.device(default_device):\n i, x, kernel = self._getInitVariables()\n self.evaluate(variables.global_variables_initializer())\n if static_unroll:\n for _ in range(steps):\n i, x = loop_body(i, x)\n else:\n i, x = while_loop_tf.while_loop(lambda i, _: i < steps, loop_body, [i, x], parallel_iterations=steps, swap_memory=True)\n r = math_ops.reduce_sum(x)\n dx, dk = gradients_impl.gradients(r, [x, kernel])\n r = control_flow_ops.group(dx, dk)\n for _ in range(3):\n self.evaluate(r)\n start_time = time.time()\n for _ in range(num_iters):\n self.evaluate(r)\n return (time.time() - start_time) / num_iters", "docstring": "Evaluate the while loop performance.\n\nArgs:\n default_device: The default device to run all ops except the loop_body.\n loop_body is always run on GPU.\n num_iters: Number of iterations to run.\n static_unroll: If true, run unrolled version; otherwise, run while_loop.\n steps: Total number of repeated steps to run the loop.\n\nReturns:\n The duration of the run in seconds.", "source": "github_repos"} -{"code": "def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n mask = tf.cast(input_ids != padding_idx, tf.int64)\n incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask\n return incremental_indices + padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\n are ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\n x: tf.Tensor x:\n\n Returns: tf.Tensor", "source": "github_repos"} -{"code": "def get_compiler_ir(self, device_name, platform_name, function_name, flat_args, captured_inputs, stage='hlo'):\n return pywrap_tfe.TF_GetCompilerIr(self._context_handle, function_name, stage, device_name, flat_args, captured_inputs, platform_name)", "docstring": "Get the compiler IR bytes.\n\nArgs:\n device_name: The name of the device with the form as\n \"/job:localhost/replica:0/task:0/device:CPU:0\", \"/device:TPU:0\" etc.\n When this is used, actual device is needed for getting the compiler IR.\n platform_name: The name of the platform, e.g. \"TPU\". When this is used,\n first we find a device whose name contains the platform, if it is found\n we get the compiler IR by device. Otherwise the compiler IR is obtained\n as if using that device. The former logic of falling back to device is\n necessary, as there are cases of TF variables that need to access\n devices, but the upper layer may generally choose platform for getting\n compiler IR in a device-agnostic way.\n function_name: The name of the function to get the compiler IR.\n flat_args: The flat argument inputs.\n captured_inputs: The inputs that are captured.\n stage: The exported stage for the given function.\n\nReturns:\n The compiler IR bytes.", "source": "github_repos"} -{"code": "def _transpose_batch_time(x):\n x_static_shape = x.get_shape()\n if x_static_shape.rank is not None and x_static_shape.rank < 2:\n return x\n x_rank = array_ops.rank(x)\n x_t = array_ops.transpose(x, array_ops.concat(([1, 0], math_ops.range(2, x_rank)), axis=0))\n x_t.set_shape(tensor_shape.TensorShape([x_static_shape.dims[1].value, x_static_shape.dims[0].value]).concatenate(x_static_shape[2:]))\n return x_t", "docstring": "Transposes the batch and time dimensions of a Tensor.\n\n If the input tensor has rank < 2 it returns the original tensor. Retains as\n much of the static shape information as possible.\n\nArgs:\n x: A Tensor.\n\nReturns:\n x transposed along the first two dimensions.", "source": "github_repos"} -{"code": "def _get(self, feed_item):\n result = store.get(self._entity, feed_item.get(FieldMap.CREATIVE_ASSET_ID, None))\n if not result:\n result = {'id': feed_item.get(FieldMap.CREATIVE_ASSET_ID, None), 'assetIdentifier': {'name': feed_item.get(FieldMap.CREATIVE_ASSET_NAME, None), 'type': feed_item.get(FieldMap.CREATIVE_TYPE, None)}}\n store.set(self._entity, [feed_item.get(FieldMap.CREATIVE_ASSET_ID, None)], result)\n return result", "docstring": "Retrieves an item from DCM or the local cache.\n\nArgs:\n feed_item: The feed item representing the creative asset from the\n Bulkdozer feed.\n\nReturns:\n Instance of the DCM object either from the API or from the local cache.", "source": "github_repos"} -{"code": "def WriteUpdateTimestamp(self, update_timestamp=None):\n self.update_time = None\n if update_timestamp is None:\n update_timestamp = self._GetCurrentTime()\n return self._WriteTimestamp(update_timestamp, self.update_file)", "docstring": "Convenience method for writing the last update timestamp.\n\nArgs:\n update_timestamp: An int with the number of seconds since epoch,\n defaulting to the current time if None.\n\nReturns:\n A boolean indicating success of the write.", "source": "github_repos"} -{"code": "def query_op_traceback(self, op_name):\n for op_log_proto in self._graph_tracebacks:\n for log_entry in op_log_proto.log_entries:\n if log_entry.name == op_name:\n return self._code_def_to_traceback(log_entry.code_def, op_log_proto.id_to_string)\n raise ValueError(\"Op '%s' does not exist in the tracebacks received by the debug server.\" % op_name)", "docstring": "Query the traceback of an op.\n\nArgs:\n op_name: Name of the op to query.\n\nReturns:\n The traceback of the op, as a list of 3-tuples:\n (filename, lineno, function_name)\n\nRaises:\n ValueError: If the op cannot be found in the tracebacks received by the\n server so far.", "source": "github_repos"} -{"code": "def std(x, axis=None, keepdims=False):\n if any_symbolic_tensors((x,)):\n return Std(axis=axis, keepdims=keepdims).symbolic_call(x)\n return backend.numpy.std(x, axis=axis, keepdims=keepdims)", "docstring": "Compute the standard deviation along the specified axis.\n\nArgs:\n x: Input tensor.\n axis: Axis along which to compute standard deviation.\n Default is to compute the standard deviation of the\n flattened tensor.\n keepdims: If this is set to `True`, the axes which are reduced are left\n in the result as dimensions with size one.\n\nReturns:\n Output tensor containing the standard deviation values.", "source": "github_repos"} -{"code": "def update_with_token(self, token_id: int) -> bool:\n if self.status != RequestStatus.DECODING:\n return False\n is_eos = token_id == self.eos_token_id and self.eos_token_id != -1\n is_max_len = self.generated_len() >= self.max_new_tokens\n if is_eos or is_max_len:\n self.status = RequestStatus.FINISHED\n return True\n return False", "docstring": "Update the request with a newly generated token and check for completion.\n\nArgs:\n token_id: The token ID to add to the output sequence\n\nReturns:\n bool: True if the request is now complete, False otherwise", "source": "github_repos"} -{"code": "def _unsorted_segment_N(data, segment_ids, num_segments):\n num_segments = ops.convert_to_tensor(num_segments)\n segment_ids_shape = array_ops.shape_internal(segment_ids)\n ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)\n n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)\n broadcastable_shape = array_ops.concat([num_segments[array_ops.newaxis], array_ops.ones([array_ops.rank(data) - array_ops.rank(segment_ids)], dtype=num_segments.dtype)], axis=0)\n n = array_ops.reshape(n, broadcastable_shape)\n return gen_math_ops.maximum(n, 1)", "docstring": "Helper function for unsorted_segment_mean/_sqrtN.\n\n Computes the number of segment entries with 0-entries set to 1 to allow\n division by N.\n\nArgs:\n data: A `Tensor` with data that will be assembled in the output.\n segment_ids: An integer tensor whose shape is a prefix of `data.shape`. The\n values must be in the range `[0, num_segments)`. The values are always\n validated to be in range on CPU, never validated on TPU/GPU.\n num_segments: An integer scalar `Tensor`. The number of distinct segment\n IDs.\n\nReturns:\n A `Tensor` with the number of segment entries with 0-entries set to 1.", "source": "github_repos"} -{"code": "def on_session_init(self, request):", "docstring": "Callback invoked during construction of the debug-wrapper session.\n\n This is a blocking callback.\n The invocation happens right before the constructor ends.\n\nArgs:\n request: (`OnSessionInitRequest`) callback request carrying information\n such as the session being wrapped.\n\nReturns:\n An instance of `OnSessionInitResponse`.", "source": "github_repos"} -{"code": "def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n mask = np.zeros(output_size, dtype=np.int64)\n mask[:input_height, :input_width] = 1\n return mask", "docstring": "Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.\n\nArgs:\n image (`np.ndarray`):\n Image to make the pixel mask for.\n output_size (`Tuple[int, int]`):\n Output size of the mask.", "source": "github_repos"} -{"code": "def adapt(self, data, batch_size=None, steps=None):\n self.reset_state()\n if isinstance(data, tf.data.Dataset):\n if steps is not None:\n data = data.take(steps)\n for batch in data:\n self.update_state(batch)\n else:\n data = tf_utils.ensure_tensor(data, dtype='string')\n if data.shape.rank == 1:\n data = tf.expand_dims(data, -1)\n self.update_state(data)\n self.finalize_state()", "docstring": "Computes a vocabulary of string terms from tokens in a dataset.\n\n Calling `adapt()` on a `TextVectorization` layer is an alternative to\n passing in a precomputed vocabulary on construction via the `vocabulary`\n argument. A `TextVectorization` layer should always be either adapted\n over a dataset or supplied with a vocabulary.\n\n During `adapt()`, the layer will build a vocabulary of all string tokens\n seen in the dataset, sorted by occurrence count, with ties broken by\n sort order of the tokens (high to low). At the end of `adapt()`, if\n `max_tokens` is set, the vocabulary will be truncated to `max_tokens`\n size. For example, adapting a layer with `max_tokens=1000` will compute\n the 1000 most frequent tokens occurring in the input dataset. If\n `output_mode='tf-idf'`, `adapt()` will also learn the document\n frequencies of each token in the input dataset.\n\nArgs:\n data: The data to train on. It can be passed either as a\n batched `tf.data.Dataset`, as a list of strings,\n or as a NumPy array.\n steps: Integer or `None`.\n Total number of steps (batches of samples) to process.\n If `data` is a `tf.data.Dataset`, and `steps` is `None`,\n `adapt()` will run until the input dataset is exhausted.\n When passing an infinitely\n repeating dataset, you must specify the `steps` argument. This\n argument is not supported with array inputs or list inputs.", "source": "github_repos"} -{"code": "def is_ipython_subprocess() -> bool:\n return False", "docstring": "Check if we are in a sub-process launched from within a `ipython` terminal.\n\nReturns:\n `True` only if we are in ipython terminal (e.g. `ml_python`) and inside\n a sub-process.", "source": "github_repos"} -{"code": "def required_space_to_batch_paddings(input_shape, block_shape, base_paddings=None, name=None):\n with ops.name_scope(name, 'required_space_to_batch_paddings', [input_shape, block_shape]):\n input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32, name='input_shape')\n block_shape = ops.convert_to_tensor(block_shape, dtype=dtypes.int32, name='block_shape')\n block_shape.get_shape().assert_is_fully_defined()\n block_shape.get_shape().assert_has_rank(1)\n num_block_dims = block_shape.get_shape().dims[0].value\n if num_block_dims == 0:\n return (zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32))\n input_shape.get_shape().assert_is_compatible_with([num_block_dims])\n if base_paddings is not None:\n base_paddings = ops.convert_to_tensor(base_paddings, dtype=dtypes.int32, name='base_paddings')\n base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])\n else:\n base_paddings = zeros([num_block_dims, 2], dtypes.int32)\n const_block_shape = tensor_util.constant_value(block_shape)\n const_input_shape = tensor_util.constant_value(input_shape)\n const_base_paddings = tensor_util.constant_value(base_paddings)\n if const_block_shape is not None and const_input_shape is not None and (const_base_paddings is not None):\n block_shape = const_block_shape\n input_shape = const_input_shape\n base_paddings = const_base_paddings\n pad_start = base_paddings[:, 0]\n orig_pad_end = base_paddings[:, 1]\n full_input_shape = input_shape + pad_start + orig_pad_end\n pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape\n pad_end = orig_pad_end + pad_end_extra\n result_paddings = array_ops_stack.stack([[pad_start[i], pad_end[i]] for i in range(num_block_dims)], name='paddings')\n result_crops = array_ops_stack.stack([[0, pad_end_extra[i]] for i in range(num_block_dims)], name='crops')\n return (result_paddings, result_crops)", "docstring": "Calculate padding required to make block_shape divide input_shape.\n\n This function can be used to calculate a suitable paddings argument for use\n with space_to_batch_nd and batch_to_space_nd.\n\nArgs:\n input_shape: int32 Tensor of shape [N].\n block_shape: int32 Tensor of shape [N].\n base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum\n amount of padding to use. All elements must be >= 0. If not specified,\n defaults to 0.\n name: string. Optional name prefix.\n\nReturns:\n (paddings, crops), where:\n\n `paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]\n satisfying:\n\n paddings[i, 0] = base_paddings[i, 0].\n 0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]\n (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0\n\n crops[i, 0] = 0\n crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]\n\n Raises: ValueError if called with incompatible shapes.", "source": "github_repos"} -{"code": "def testConcreteFunctionStructuredSignatureError(self, conc_args=(), conc_kwargs=None, call_args=(), call_kwargs=None, error='.*', exception=TypeError):\n conc_args = conc_args() if callable(conc_args) else conc_args\n conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}\n call_args = call_args() if callable(call_args) else call_args\n call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}\n self.assertIsInstance(conc_args, tuple)\n self.assertIsInstance(call_args, tuple)\n self.assertIsInstance(conc_kwargs, dict)\n self.assertIsInstance(call_kwargs, dict)\n\n @polymorphic_function.function\n def func(x, y=5, *varargs, **kwargs):\n del y, varargs, kwargs\n return x\n conc = func.get_concrete_function(*conc_args, **conc_kwargs)\n with self.assertRaisesRegex(exception, error):\n self.evaluate(conc(*call_args, **call_kwargs))", "docstring": "Tests for errors in the structured signature.\n\nArgs:\n conc_args: Positional arguments used for get_concrete_function.\n conc_kwargs: Keyword arguments used for get_concrete_function.\n call_args: Positional arguments used to call the function.\n call_kwargs: Keyword arguments used to call the function.\n error: Expected exception message.\n exception: Expected exception type.", "source": "github_repos"} -{"code": "def get_dofn_specs(dofn: 'DoFn') -> tuple[set[StateSpec], set[TimerSpec]]:\n from apache_beam.runners.common import MethodWrapper\n from apache_beam.transforms.core import _DoFnParam\n from apache_beam.transforms.core import _StateDoFnParam\n from apache_beam.transforms.core import _TimerDoFnParam\n all_state_specs = set()\n all_timer_specs = set()\n for method_name in dir(dofn):\n if not isinstance(getattr(dofn, method_name, None), types.MethodType):\n continue\n method = MethodWrapper(dofn, method_name)\n param_ids = [d.param_id for d in method.defaults if isinstance(d, _DoFnParam)]\n if len(param_ids) != len(set(param_ids)):\n raise ValueError('DoFn %r has duplicate %s method parameters: %s.' % (dofn, method_name, param_ids))\n for d in method.defaults:\n if isinstance(d, _StateDoFnParam):\n all_state_specs.add(d.state_spec)\n elif isinstance(d, _TimerDoFnParam):\n all_timer_specs.add(d.timer_spec)\n return (all_state_specs, all_timer_specs)", "docstring": "Gets the state and timer specs for a DoFn, if any.\n\nArgs:\n dofn (apache_beam.transforms.core.DoFn): The DoFn instance to introspect for\n timer and state specs.", "source": "github_repos"} -{"code": "def ndim(x):\n if any_symbolic_tensors((x,)):\n return Ndim().symbolic_call(x)\n return backend.numpy.ndim(x)", "docstring": "Return the number of dimensions of a tensor.\n\nArgs:\n x: Input tensor.\n\nReturns:\n The number of dimensions in `x`.", "source": "github_repos"} -{"code": "def f(options, expected_tf_failures=0):\n test_parameters = [{'ksize': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'strides': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'input_shape': [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [False], 'quant_16x8': [False]}, {'ksize': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'strides': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'input_shape': [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [True], 'quant_16x8': [False]}, {'ksize': [[1, 1, 1, 1]], 'strides': [[1, 1, 1, 1]], 'input_shape': [[1, 1, 1, 1]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [True], 'quant_16x8': [True]}]\n if not allow_fully_quantize:\n test_parameters = [test_parameter for test_parameter in test_parameters if True not in test_parameter['fully_quantize']]\n\n def build_graph(parameters):\n input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=parameters['input_shape'])\n out = pool_op(input_tensor, ksize=parameters['ksize'], strides=parameters['strides'], data_format=parameters['data_format'], padding=parameters['padding'])\n return ([input_tensor], [out])\n\n def build_inputs(parameters, sess, inputs, outputs):\n if allow_fully_quantize:\n input_values = create_tensor_data(tf.float32, parameters['input_shape'], min_value=-1, max_value=1)\n else:\n input_values = create_tensor_data(tf.float32, parameters['input_shape'])\n return ([input_values], sess.run(outputs, feed_dict=dict(zip(inputs, [input_values]))))\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs, expected_tf_failures=expected_tf_failures)", "docstring": "Actual function that generates examples.\n\nArgs:\n options: An Options instance.\n expected_tf_failures: number of expected tensorflow failures.", "source": "github_repos"} -{"code": "def download_artifacts_from_gcs(bucket_name, prefix, local_path):\n client = Client()\n bucket = client.get_bucket(bucket_name)\n blobs = [blob.name for blob in bucket.list_blobs(prefix=prefix)]\n _ = transfer_manager.download_many_to_path(bucket, blobs, destination_directory=local_path)", "docstring": "Downloads artifacts from GCS to the local file system.\n\nArgs:\n bucket_name: The name of the GCS bucket to download from.\n prefix: Prefix of GCS objects to download.\n local_path: The local path to download the folder to.", "source": "github_repos"} -{"code": "def fetch_layout(tensor: tensor_lib.Tensor) -> layout_lib.Layout:\n return _dtensor_device().fetch_layout(tensor)", "docstring": "Fetches the layout of a DTensor.\n\nArgs:\n tensor: The DTensor whose layout is to be fetched.\n\nReturns:\n The `Layout` of this DTensor.\n\nRaises:\n RuntimeError: When not called eagerly.", "source": "github_repos"} -{"code": "def GetUpdates(self, gcs_client, bucket_name, obj, since):\n bucket = gcs_client.bucket(bucket_name)\n blob = bucket.get_blob(obj)\n if blob is None:\n self.log.error('GCS object gs://%s/%s not found', bucket_name, obj)\n raise error.SourceUnavailable('unable to download object from GCS.')\n if since and timestamps.FromDateTimeToTimestamp(blob.updated) < since:\n return []\n data_map = self.GetMap(cache_info=blob.open())\n data_map.SetModifyTimestamp(timestamps.FromDateTimeToTimestamp(blob.updated))\n return data_map", "docstring": "Gets updates from a source.\n\nArgs:\n gcs_client: initialized gcs client\n bucket_name: gcs bucket name\n obj: object with the data\n since: a timestamp representing the last change (None to force-get)\n\nReturns:\n A tuple containing the map of updates and a maximum timestamp", "source": "github_repos"} -{"code": "def _joined_array(num_dims, reduce_dim):\n formatter = '{:0%db}' % (num_dims - 1)\n result = np.zeros(shape=[2] * (num_dims - 1), dtype='S%d' % (2 * num_dims))\n flat = result.ravel()\n for i in range(2 ** (num_dims - 1)):\n dims = formatter.format(i)\n flat[i] = ''.join([(dims[:reduce_dim] + '%d' + dims[reduce_dim:]) % j for j in range(2)])\n return result", "docstring": "Creates an ndarray with the result from reduce_join on input_array.\n\nArgs:\n num_dims: The number of dimensions of the original input array.\n reduce_dim: The dimension to reduce.\n\nReturns:\n An ndarray of shape [2] * (num_dims - 1).", "source": "github_repos"} -{"code": "def export(self, name=None):\n with tf.name_scope(name or '%s_lookup_table_export' % self._name):\n keys, values = gen_simple_hash_table_op.examples_simple_hash_table_export(self.resource_handle, key_dtype=self._key_dtype, value_dtype=self._value_dtype)\n return (keys, values)", "docstring": "Export all `key` and `value` pairs.\n\nArgs:\n name: A name for the operation (optional).\n\nReturns:\n A tuple of two tensors, the first with the `keys` and the second with\n the `values`.", "source": "github_repos"} -{"code": "def assert_splits_match(nested_splits_lists):\n error_msg = 'Inputs must have identical ragged splits'\n for splits_list in nested_splits_lists:\n if len(splits_list) != len(nested_splits_lists[0]):\n raise ValueError(error_msg)\n return [check_ops.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for s1, s2 in zip(nested_splits_lists[0], splits_list)]", "docstring": "Checks that the given splits lists are identical.\n\n Performs static tests to ensure that the given splits lists are identical,\n and returns a list of control dependency op tensors that check that they are\n fully identical.\n\nArgs:\n nested_splits_lists: A list of nested_splits_lists, where each split_list is\n a list of `splits` tensors from a `RaggedTensor`, ordered from outermost\n ragged dimension to innermost ragged dimension.\n\nReturns:\n A list of control dependency op tensors.\n\nRaises:\n ValueError: If the splits are not identical.", "source": "github_repos"} -{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, past_key_value: Optional[Tuple[tf.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, training=False) -> Tuple[tf.Tensor, Optional[Tuple[tf.Tensor, tf.Tensor]]]:\n residual = hidden_states\n hidden_states = self.input_layernorm(hidden_states)\n hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache)\n hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n if use_cache:\n outputs += (present_key_value,)\n return outputs", "docstring": "Args:\n hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`tf.Tensor`, *optional*): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states", "source": "github_repos"} -{"code": "def prediction_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]]=None, **gen_kwargs) -> tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n if not self.args.predict_with_generate or prediction_loss_only:\n return super().prediction_step(model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys)\n has_labels = 'labels' in inputs\n inputs = self._prepare_inputs(inputs)\n if len(gen_kwargs) == 0 and hasattr(self, '_gen_kwargs'):\n gen_kwargs = self._gen_kwargs.copy()\n if 'num_beams' in gen_kwargs and gen_kwargs['num_beams'] is None:\n gen_kwargs.pop('num_beams')\n if 'max_length' in gen_kwargs and gen_kwargs['max_length'] is None:\n gen_kwargs.pop('max_length')\n default_synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self.model)\n gen_kwargs['synced_gpus'] = gen_kwargs.get('synced_gpus', default_synced_gpus)\n generation_inputs = inputs.copy()\n if 'labels' in generation_inputs and 'decoder_input_ids' in generation_inputs and (generation_inputs['labels'].shape == generation_inputs['decoder_input_ids'].shape):\n generation_inputs = {k: v for k, v in inputs.items() if k not in ('decoder_input_ids', 'decoder_attention_mask')}\n summon_full_params_context = FullyShardedDataParallel.summon_full_params(self.model) if isinstance(self.model, FullyShardedDataParallel) else contextlib.nullcontext()\n with summon_full_params_context:\n generated_tokens = self.model.generate(**generation_inputs, **gen_kwargs)\n if self.model.generation_config._from_model_config:\n self.model.generation_config._from_model_config = False\n gen_config = self.model.generation_config\n if generated_tokens.shape[-1] < gen_config.max_length:\n generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_length)\n elif gen_config.max_new_tokens is not None and generated_tokens.shape[-1] < gen_config.max_new_tokens + 1:\n generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_new_tokens + 1)\n with torch.no_grad():\n if has_labels:\n with self.compute_loss_context_manager():\n outputs = model(**inputs)\n if self.label_smoother is not None:\n loss = self.label_smoother(outputs, inputs['labels']).detach().mean()\n else:\n loss = (outputs['loss'] if isinstance(outputs, dict) else outputs[0]).detach().mean()\n else:\n loss = None\n if self.args.prediction_loss_only:\n return (loss, None, None)\n if has_labels:\n labels = inputs['labels']\n if labels.shape[-1] < gen_config.max_length:\n labels = self._pad_tensors_to_max_len(labels, gen_config.max_length)\n elif gen_config.max_new_tokens is not None and labels.shape[-1] < gen_config.max_new_tokens + 1:\n labels = self._pad_tensors_to_max_len(labels, gen_config.max_new_tokens + 1)\n else:\n labels = None\n return (loss, generated_tokens, labels)", "docstring": "Perform an evaluation step on `model` using `inputs`.\n\n Subclass and override to inject custom behavior.\n\nArgs:\n model (`nn.Module`):\n The model to evaluate.\n inputs (`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument `labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (`bool`):\n Whether or not to return the loss only.\n gen_kwargs:\n Additional `generate` specific kwargs.\n\nReturns:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).", "source": "github_repos"} -{"code": "def _delete_minibatch(self, bucket, keys):\n request = messages.DeleteBatchRequest(bucket, keys)\n results = {}\n try:\n response = self.client.delete_batch(request)\n for key in response.deleted:\n results[bucket, key] = None\n for key, error in zip(response.failed, response.errors):\n results[bucket, key] = error\n except messages.S3ClientError as e:\n for key in keys:\n results[bucket, key] = e\n return results", "docstring": "A helper method. Boto3 allows batch deletions\n for files within the same bucket.\n\nArgs:\n bucket: String bucket name\n keys: List of keys to be deleted in the bucket\n\n Returns: dict of the form {(bucket, key): error}, where error is None if the\n operation succeeded", "source": "github_repos"} -{"code": "def elementwise_binary_union(linear, use_sparsify):\n\n def wrap_elementwise_binary_union(func):\n sparse_func = jax_sparse.sparsify(func) if use_sparsify else None\n\n @functools.wraps(func)\n def sparse_wrapper(x1, x2):\n if isinstance(x1, jax_sparse.JAXSparse):\n if isinstance(x2, jax_sparse.JAXSparse):\n if x1.indices is x2.indices and isinstance(x1, jax_sparse.BCOO) and isinstance(x2, jax_sparse.BCOO):\n if not linear and (not x1.unique_indices):\n x1 = jax_sparse.bcoo_sum_duplicates(x1)\n x2 = jax_sparse.bcoo_sum_duplicates(x2)\n return jax_sparse.BCOO((func(x1.data, x2.data), x1.indices), shape=x1.shape, indices_sorted=x1.indices_sorted, unique_indices=x1.unique_indices)\n elif use_sparsify:\n return sparse_func(x1, x2)\n elif isinstance(x1, jax_sparse.BCOO) and isinstance(x2, jax_sparse.BCOO):\n x1 = bcoo_add_indices(x1, x2, sum_duplicates=not linear)\n x2 = bcoo_add_indices(x2, x1, sum_duplicates=not linear)\n return jax_sparse.BCOO((func(x1.data, x2.data), x1.indices), shape=x1.shape, indices_sorted=True, unique_indices=True)\n else:\n ValueError(f'Unsupported sparse format: {x1.__class__} and {x2.__class__}')\n else:\n x1 = x1.todense()\n elif isinstance(x2, jax_sparse.JAXSparse):\n x2 = x2.todense()\n return func(x1, x2)\n return sparse_wrapper\n return wrap_elementwise_binary_union", "docstring": "Decorator to add support for `JAXSparse` tensors (including `BCOO`) to an\n element-wise binary operator such that the indices present in the result are\n are the union of the indices in the two operand.\n\n The primary use case for this is the `add` and `subtract` operators.\n\n There are requirements on the operator for this decorator to work correctly:\n\n - The operator must be element-wise.\n - The operator must be binary (two input tensors and one output tensor).\n - Both inputs must be of the same shape or one input must be a scalar.\n - The output must be of the same shape as the (non scalar) inputs.\n - The indices of the output must be the union of the indices of the inputs.\n This implies that func(0, 0) must be 0. As a result, if one operand is\n dense or a scalar, then the result will be dense.\n\n Additional arguments to the function (besides the input tensors) are not\n supported.\n\n Note that if the result of the operation is zero at some indices, including\n because the operands were zero at these indices, the zeros and indices are\n preserved.\n\n The `BCOO` format is the only supported one in all cases. Other formats are\n not supported when `use_sparsify` is `False`.\n\nArgs:\n use_sparsify: indicates that the JAX `sparsify` transform supports this\n operation.\n linear: if `True`, mean that the operation is such that\n `op(a + b, c) == op(a, c) + op(b, c)` and\n `op(a, c + d) == op(a, c) + op(a, d)`.\n\nReturns:\n Wrapped function that supports `JAXSparse`.", "source": "github_repos"} -{"code": "def test_binomial_vs_alternative_formulation(y_true, y_pred, global_dtype):\n\n def alt_loss(y, raw_pred):\n z = 2 * y - 1\n return np.mean(np.log(1 + np.exp(-z * raw_pred)))\n\n def alt_gradient(y, raw_pred):\n z = 2 * y - 1\n return -z / (1 + np.exp(z * raw_pred))\n bin_loss = HalfBinomialLoss()\n y_true = y_true.astype(global_dtype)\n y_pred = y_pred.astype(global_dtype)\n datum = (y_true, y_pred)\n assert bin_loss(*datum) == approx(alt_loss(*datum))\n assert_allclose(bin_loss.gradient(*datum), alt_gradient(*datum))", "docstring": "Test that both formulations of the binomial deviance agree.\n\n Often, the binomial deviance or log loss is written in terms of a variable\n z in {-1, +1}, but we use y in {0, 1}, hence z = 2 * y - 1.\n ESL II Eq. (10.18):\n\n -loglike(z, f) = log(1 + exp(-2 * z * f))\n\nNote:\n - ESL 2*f = raw_prediction, hence the factor 2 of ESL disappears.\n - Deviance = -2*loglike + .., but HalfBinomialLoss is half of the\n deviance, hence the factor of 2 cancels in the comparison.", "source": "github_repos"} -{"code": "def input(self):\n return self._nested_inputs", "docstring": "Retrieves the input tensor(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer.\n\nReturns:\n Input tensor or list of input tensors.\n\nRaises:\n RuntimeError: If called in Eager mode.\n AttributeError: If no inbound nodes are found.", "source": "github_repos"} -{"code": "def normalize(x, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(x, order, axis))\n l2[l2 == 0] = 1\n return x / np.expand_dims(l2, axis)", "docstring": "Normalizes a Numpy array.\n\nArgs:\n x: Numpy array to normalize.\n axis: axis along which to normalize.\n order: Normalization order (e.g. `order=2` for L2 norm).\n\nReturns:\n A normalized copy of the array.", "source": "github_repos"} -{"code": "def _get_or_create_arg_by_name(state, name, is_kwarg=False):\n for arg in state.args + state.kwargs:\n if arg.name == name:\n return arg\n arg = Namespace()\n arg.name = name\n arg.type.lines = []\n arg.description.lines = []\n if is_kwarg:\n state.kwargs.append(arg)\n else:\n state.args.append(arg)\n return arg", "docstring": "Gets or creates a new Arg.\n\n These Arg objects (Namespaces) are turned into the ArgInfo namedtuples\n returned by parse. Each Arg object is used to collect the name, type, and\n description of a single argument to the docstring's function.\n\nArgs:\n state: The state of the parser.\n name: The name of the arg to create.\n is_kwarg: A boolean representing whether the argument is a keyword arg.\n\nReturns:\n The new Arg.", "source": "github_repos"} -{"code": "def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):\n use_auth_token = kwargs.pop('use_auth_token', None)\n if use_auth_token is not None:\n warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n if kwargs.get('token', None) is not None:\n raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n kwargs['token'] = use_auth_token\n if os.path.isfile(save_directory):\n raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')\n os.makedirs(save_directory, exist_ok=True)\n if push_to_hub:\n commit_message = kwargs.pop('commit_message', None)\n repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])\n repo_id = self._create_repo(repo_id, **kwargs)\n files_timestamps = self._get_files_timestamps(save_directory)\n if self._auto_class is not None:\n custom_object_save(self, save_directory, config=self)\n output_image_processor_file = os.path.join(save_directory, IMAGE_PROCESSOR_NAME)\n self.to_json_file(output_image_processor_file)\n logger.info(f'Image processor saved in {output_image_processor_file}')\n if push_to_hub:\n self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))\n return [output_image_processor_file]", "docstring": "Save an image processor object to the directory `save_directory`, so that it can be re-loaded using the\n [`~image_processing_utils.ImageProcessingMixin.from_pretrained`] class method.\n\nArgs:\n save_directory (`str` or `os.PathLike`):\n Directory where the image processor JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.", "source": "github_repos"} -{"code": "def GetOpeningBracket(node):\n return getattr(node, _NODE_ANNOTATION_PREFIX + 'container_bracket', None)", "docstring": "Get opening bracket value from a node.\n\nArgs:\n node: the node.\n\nReturns:\n The opening bracket node or None if it couldn't find one.", "source": "github_repos"} -{"code": "def from_features(cls, features, types):\n params = cls()\n if features:\n for key in sorted(features.keys()):\n feature = features[key]\n if not isinstance(feature, tuple(types)):\n raise ValueError(f\"Unsupported {type(feature).__name__} {feature} for key '{key}'\")\n params._add_feature(key, feature)\n params._validate()\n return params", "docstring": "Builds _ParseOpParams for a given set of features and allowed types.\n\nArgs:\n features: A `dict` mapping feature keys to objects of a type in `types`.\n types: Type of features to allow, among `FixedLenFeature`,\n `VarLenFeature`, `SparseFeature`, and `FixedLenSequenceFeature`.\n\nReturns:\n A `_ParseOpParams` containing the raw parameters for `gen_parsing_ops`.\n\nRaises:\n ValueError: if `features` contains an item not in `types`, or an invalid\n feature.\n ValueError: if sparse and dense key sets intersect.\n ValueError: if input lengths do not match up.", "source": "github_repos"} -{"code": "def random_shuffle(value, seed=None, name=None):\n with ops.name_scope(name, 'shuffle', [value, seed]):\n if value.rank == 0:\n raise ValueError('Cannot shuffle a scalar StructuredTensor')\n first_dimension = value.nrows()\n index = random_ops.random_shuffle(math_ops.range(first_dimension), seed=seed)\n return gather(value, index, axis=0)", "docstring": "Shuffle a structured tensor on the zeroth axis.\n\nArgs:\n value: a structured tensor of rank at least one.\n seed: the seed for shuffling.\n name: the name for shuffle.\n\nReturns:\n The shuffled structured tensor.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n lm_loss = None\n if labels is not None:\n lm_loss = self.loss_function(prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs)\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return (lm_loss,) + output if lm_loss is not None else output\n return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are\n ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n\nExample:\n ```python\n >>> from transformers import AutoTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig\n >>> import torch\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/data2vec-text-base\")\n >>> config = Data2VecTextConfig.from_pretrained(\"facebook/data2vec-text-base\")\n >>> config.is_decoder = True\n >>> model = Data2VecTextForCausalLM.from_pretrained(\"facebook/data2vec-text-base\", config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n ```", "source": "github_repos"} -{"code": "def class_from_typename(cls, type_name: str) -> Optional[Type['JSONConvertible']]:\n return cls._TYPE_REGISTRY.class_from_typename(type_name)", "docstring": "Gets the class for a registered type name.\n\nArgs:\n type_name: A string as the global unique type identifier for requested\n class.\n\nReturns:\n A type object if registered, otherwise None.", "source": "github_repos"} -{"code": "def random_sample(value: Any, num_examples: Optional[int]=None, where: Optional[Callable[[base.HyperPrimitive], bool]]=None, seed: Optional[int]=None):\n return iterate(value, num_examples, geno.Random(seed), where=where)", "docstring": "Returns an iterator of random sampled examples.\n\n Example::\n\n hyper_dict = pg.Dict(x=pg.oneof(range(3)), y=pg.floatv(0.0, 1.0))\n\n # Generate one random example from the hyper_dict.\n d = next(pg.random_sample(hyper_dict))\n\n # Generate 5 random examples with random seed.\n ds = list(pg.random_sample(hyper_dict, 5, seed=1))\n\n # Generate 3 random examples of `x` with `y` intact.\n ds = list(pg.random_sample(hyper_dict, 3,\n where=lambda x: isinstance(x, pg.hyper.OneOf)))\n\nArgs:\n value: A (maybe) hyper value.\n num_examples: An optional integer as number of examples to propose. If None,\n propose will return an iterator that iterates forever.\n where: Function to filter hyper primitives. If None, all hyper primitives in\n `value` will be included in the encoding/decoding process. Otherwise only\n the hyper primitives on which 'where' returns True will be included.\n `where` can be useful to partition a search space into separate\n optimization processes. Please see 'Template' docstr for details.\n seed: An optional integer as random seed.\n\nReturns:\n Iterator of random examples.", "source": "github_repos"} -{"code": "def GetTermSize(self):\n return self._term_size", "docstring": "Returns the terminal (x, y) dimensions in characters.\n\nReturns:\n (x, y): A tuple of the terminal x and y dimensions.", "source": "github_repos"} -{"code": "def block_embedding_to(self, device):\n self.block_emb = self.block_emb.to(device)", "docstring": "Send `self.block_emb` to a specific device.\n\nArgs:\n device (`str` or `torch.device`):\n The device to which `self.block_emb` will be sent.", "source": "github_repos"} -{"code": "def random_hermitian_matrix(num_qubits):\n dim = 2 ** num_qubits\n val_range = 2\n random_real = tf.cast(tf.random.uniform([dim, dim], -val_range, val_range), tf.complex128)\n random_imag = 1j * tf.cast(tf.random.uniform([dim, dim], -val_range, val_range), tf.complex128)\n random_matrix = random_real + random_imag\n return random_matrix + tf.linalg.adjoint(random_matrix)", "docstring": "Returns a random Hermitian matrix.\n\n Uses the property that A + A* is Hermitian for any matrix A.\n\nArgs:\n num_qubits: Number of qubits on which the matrix acts.", "source": "github_repos"} -{"code": "def _reset_build_compile_trackers(model):\n model.built = False\n model.inputs = None\n model.outputs = None\n model._is_compiled = False\n if not ops.executing_eagerly_outside_functions():\n model._v1_compile_was_called = False\n model.optimizer = None", "docstring": "Reset state trackers for model.\n\n Note that we do not actually zero out attributes such as optimizer,\n but instead rely on the expectation that all of the attrs will be\n over-written on calling build/compile/etc. This is somewhat fragile,\n insofar as we check elsewhere for the presence of these attributes as\n evidence of having been built/compiled/etc. Pending a better way to do this,\n we reset key attributes here to allow building and compiling.\n\nArgs:\n model: the model that is being reset", "source": "github_repos"} -{"code": "def global_step(sess, global_step_tensor):\n if context.executing_eagerly():\n return int(global_step_tensor.numpy())\n return int(sess.run(global_step_tensor))", "docstring": "Small helper to get the global step.\n\n ```python\n # Create a variable to hold the global_step.\n global_step_tensor = tf.Variable(10, trainable=False, name='global_step')\n # Create a session.\n sess = tf.compat.v1.Session()\n # Initialize the variable\n sess.run(global_step_tensor.initializer)\n # Get the variable value.\n print('global_step: %s' % tf.compat.v1.train.global_step(sess,\n global_step_tensor))\n\n global_step: 10\n ```\n\nArgs:\n sess: A TensorFlow `Session` object.\n global_step_tensor: `Tensor` or the `name` of the operation that contains\n the global step.\n\nReturns:\n The global step value.", "source": "github_repos"} -{"code": "def log(self: EventSetOrNode) -> EventSetOrNode:\n from temporian.core.operators.unary import log\n return log(self)", "docstring": "Calculates the natural logarithm of an [`EventSet`][temporian.EventSet]'s\n features.\n\n Can only be used on floating point features.\n\nExample:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2, 3, 4, 5],\n ... features={\"M\": [np.e, 1., 2., 10., -1.]},\n ... )\n >>> a.log()\n indexes: ...\n timestamps: [1. 2. 3. 4. 5.]\n 'M': [1. 0. 0.6931 2.3026 nan]\n ...\n\n ```\n\nReturns:\n EventSetOr with logarithm of input features.", "source": "github_repos"} -{"code": "def get_num_bytes(self, batch: Sequence[numpy.ndarray]) -> int:\n return sum((np_array.itemsize for np_array in batch))", "docstring": "Returns:\n The number of bytes of data for a batch.", "source": "github_repos"} -{"code": "def remove_comp_items(self, context_word, comp_items):\n if context_word not in self._comp_dict:\n raise KeyError('Context word \"%s\" has not been registered' % context_word)\n for item in comp_items:\n self._comp_dict[context_word].remove(item)", "docstring": "Remove a list of completion items from a completion context.\n\nArgs:\n context_word: A single completion word as a string. The removal will\n also apply to all other context words of the same context.\n comp_items: Completion items to remove.\n\nRaises:\n KeyError: if the context word has not been registered.", "source": "github_repos"} -{"code": "def __call__(self, *args: Union[str, 'Image.Image', List['Image.Image'], List[str]], **kwargs: Any) -> List[Any]:\n return super().__call__(*args, **kwargs)", "docstring": "Extract the features of the input(s).\n\nArgs:\n images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):\n The pipeline handles three types of images:\n\n - A string containing a http link pointing to an image\n - A string containing a local path to an image\n - An image loaded in PIL directly\n\n The pipeline accepts either a single image or a batch of images, which must then be passed as a string.\n Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL\n images.\n timeout (`float`, *optional*, defaults to None):\n The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and\n the call may block forever.\n\nReturns:\n A nested list of `float`: The features computed by the model.", "source": "github_repos"} -{"code": "def name_based_restore(mesh: layout_lib.Mesh, checkpoint_prefix: str, name_tensor_dict: Dict[str, Union[tensor_lib.Tensor, tf_variables.Variable]]):\n if not context.executing_eagerly():\n raise ValueError('name based restore must run eagerly.')\n ordered_name_tensor_dict = name_tensor_dict\n if not isinstance(name_tensor_dict, collections.OrderedDict):\n ordered_name_tensor_dict = collections.OrderedDict(name_tensor_dict)\n for name, tensor in ordered_name_tensor_dict.items():\n try:\n if api.fetch_layout(tensor).mesh.device_type().upper() != 'CPU':\n raise ValueError('Restoring a non CPU Tensor is not supported currently. Offending tensor name : {tensor_name}'.format(tensor_name=name))\n except errors_impl.OpError as op_error:\n raise ValueError('Saving/Restoring tensor must be a DTensor') from op_error\n checkpoint_prefix = api.pack([checkpoint_prefix] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=0))\n tensor_names = api.pack([list(ordered_name_tensor_dict.keys())] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=1))\n shape_and_slices = api.pack([[''] * len(ordered_name_tensor_dict)] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=1))\n input_shapes = [tensor.shape for tensor in ordered_name_tensor_dict.values()]\n input_layouts = [api.fetch_layout(tensor).to_string() for tensor in ordered_name_tensor_dict.values()]\n with ops.device(api.device_name()):\n restored_cpu_tensors = gen_dtensor_ops.d_tensor_restore_v2(prefix=checkpoint_prefix, tensor_names=tensor_names, shape_and_slices=shape_and_slices, input_shapes=input_shapes, input_layouts=input_layouts, dtypes=[tensor.dtype for tensor in ordered_name_tensor_dict.values()])\n return collections.OrderedDict(zip(ordered_name_tensor_dict.keys(), restored_cpu_tensors))", "docstring": "Restores from checkpoint_prefix to name based DTensors.\n\n It is required to have already-initialized DTensor variables that have same\n shape/dtype for the tensors being restored.\n\n Also, we currently only support a named based restore on a single mesh.\n\nArgs:\n mesh: The single mesh that all Tensors would be restored to.\n checkpoint_prefix : The prefix of checkpoint to be restored.\n name_tensor_dict: A ordered dictionary of tensor_names to a DTensor. The\n DTensor shape/dtype must match the tensors being saved/restored for now.\n\nReturns:\n A dictionary of name to its restored DTensor value.", "source": "github_repos"} -{"code": "def filter_tests(output_file: str, filters: List[str]):\n if not os.path.isfile(output_file):\n print('No test file found.')\n return\n with open(output_file, 'r', encoding='utf-8') as f:\n test_files = f.read().split(' ')\n if len(test_files) == 0 or test_files == ['']:\n print('No tests to filter.')\n return\n if test_files == ['tests']:\n test_files = [os.path.join('tests', f) for f in os.listdir('tests') if f not in ['__init__.py'] + filters]\n else:\n test_files = [f for f in test_files if f.split(os.path.sep)[1] not in filters]\n with open(output_file, 'w', encoding='utf-8') as f:\n f.write(' '.join(test_files))", "docstring": "Reads the content of the output file and filters out all the tests in a list of given folders.\n\nArgs:\n output_file (`str` or `os.PathLike`): The path to the output file of the tests fetcher.\n filters (`List[str]`): A list of folders to filter.", "source": "github_repos"} -{"code": "def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None) -> tf.Tensor:\n if input_ids is None and inputs_embeds is None:\n raise ValueError('You have to specify either input_ids or inputs_embeds')\n if inputs_embeds is None:\n check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n input_shape = shape_list(inputs_embeds)[:-1]\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)\n position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))\n final_embeddings = inputs_embeds + position_embeds\n return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\n final_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github_repos"} -{"code": "def node_device(self, node_name):\n if not self._debug_graphs:\n raise LookupError('Node devices are not loaded from partition graphs yet.')\n if node_name not in self._node_devices:\n raise ValueError(\"Node '%s' does not exist in partition graphs.\" % node_name)\n output = list(self._node_devices[node_name])\n return output[0] if len(output) == 1 else output", "docstring": "Get the names of the devices that has nodes of the specified name.\n\nArgs:\n node_name: (`str`) name of the node.\n\nReturns:\n (`str` or `list` of `str`) name of the device(s) on which the node of the\n given name is found. Returns a `str` if there is only one such device,\n otherwise return a `list` of `str`.\n\nRaises:\n LookupError: If node inputs and control inputs have not been loaded\n from partition graphs yet.\n ValueError: If the node does not exist in partition graphs.", "source": "github_repos"} -{"code": "def _expected_exercise_fn(design, calibration_indices, continuation_value, exercise_value):\n mask = exercise_value > 0\n design_t = tf.transpose(design, [0, 2, 1])\n masked = tf.where(tf.expand_dims(tf.transpose(mask), axis=-1), design_t, tf.zeros_like(design_t))\n if calibration_indices is None:\n submask = masked\n mask_cont_value = continuation_value\n else:\n submask = tf.gather(masked, calibration_indices, axis=1)\n mask_cont_value = tf.gather(continuation_value, calibration_indices)\n lhs = tf.matmul(submask, submask, transpose_a=True)\n lhs_pinv = tf.linalg.pinv(lhs)\n rhs = tf.matmul(submask, tf.expand_dims(tf.transpose(mask_cont_value), axis=-1), transpose_a=True)\n beta = tf.matmul(lhs_pinv, rhs)\n continuation = tf.matmul(design_t, beta)\n return tf.nn.relu(tf.transpose(tf.squeeze(continuation, axis=-1)))", "docstring": "Returns the expected continuation value for each path.\n\nArgs:\n design: A real `Tensor` of shape `[batch_size, basis_size, num_samples]`.\n calibration_indices: A rank 1 integer `Tensor` denoting indices of samples\n used for regression.\n continuation_value: A `Tensor` of shape `[num_samples, batch_size]` and of\n the same dtype as `design`. The optimal value of the option conditional on\n not exercising now or earlier, taking future information into account.\n exercise_value: A `Tensor` of the same shape and dtype as\n `continuation_value`. Value of the option if exercised immideately at\n the current time\n\nReturns:\n A `Tensor` of the same shape and dtype as `continuation_value` whose\n `(n, v)`-th entry represents the expected continuation value of sample path\n `n` under the `v`-th payoff scheme.", "source": "github_repos"} -{"code": "def __init__(self, config: Kosmos2VisionConfig):\n super().__init__()\n self.config = config\n self.layers = nn.ModuleList([Kosmos2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n [`Kosmos2VisionEncoderLayer`].\n\nArgs:\n config: Kosmos2VisionConfig", "source": "github_repos"} -{"code": "def __init__(self, config):\n super().__init__()\n self.use_batch_norm = config.use_batch_norm_in_fusion_residual\n use_bias_in_fusion_residual = config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm\n self.activation1 = nn.ReLU()\n self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)\n self.activation2 = nn.ReLU()\n self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)\n if self.use_batch_norm:\n self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size)\n self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size)", "docstring": "ResidualConvUnit, pre-activate residual unit.\n\nArgs:\n config (`[DPTConfig]`):\n Model configuration class defining the model architecture.", "source": "github_repos"} -{"code": "def is_subtype_of(self, other: trace.TraceType) -> bool:\n if type(self) is not type(other):\n return False\n is_subtype = True\n\n def check_attribute(attribute_self, attribute_other):\n nonlocal is_subtype\n if not is_subtype:\n return\n if isinstance(attribute_self, trace.TraceType):\n if not attribute_self.is_subtype_of(attribute_other):\n is_subtype = False\n return\n elif attribute_self != attribute_other:\n is_subtype = False\n try:\n nest.map_structure(check_attribute, self._serialize(), other._serialize())\n except (ValueError, TypeError):\n return False\n return is_subtype", "docstring": "Returns True if `self` is a subtype of `other`.\n\n Implements the tf.types.experimental.func.TraceType interface.\n\n If not overridden by a subclass, the default behavior is to assume the\n TypeSpec is covariant upon attributes that implement TraceType and\n invariant upon rest of the attributes as well as the structure and type\n of the TypeSpec.\n\nArgs:\n other: A TraceType object.", "source": "github_repos"} -{"code": "def _concat(self):\n if len(self._variable_list) == 1:\n with ops.name_scope(None):\n return array_ops.identity(self._variable_list[0], name=self._name)\n partition_axes = self._partition_axes()\n if len(partition_axes) > 1:\n raise NotImplementedError('Cannot concatenate along more than one dimension: %s. Multi-axis partition concat is not supported' % str(partition_axes))\n partition_ix = partition_axes[0]\n with ops.name_scope(self._name + '/ConcatPartitions/'):\n concatenated = array_ops.concat(self._variable_list, partition_ix)\n with ops.name_scope(None):\n return array_ops.identity(concatenated, name=self._name)", "docstring": "Returns the overall concatenated value as a `Tensor`.\n\n This is different from using the partitioned variable directly as a tensor\n (through tensor conversion and `as_tensor`) in that it creates a new set of\n operations that keeps the control dependencies from its scope.\n\nReturns:\n `Tensor` containing the concatenated value.", "source": "github_repos"} -{"code": "def create_test_methods(spec):\n for suffix, providers in provider_sets(spec):\n\n def test(self, providers=providers):\n vars = {}\n with contextlib.ExitStack() as stack:\n stack.enter_context(mock.patch('apache_beam.yaml.yaml_provider.standard_providers', lambda: providers))\n for fixture in spec.get('fixtures', []):\n vars[fixture['name']] = stack.enter_context(python_callable.PythonCallableWithSource.load_from_fully_qualified_name(fixture['type'])(**yaml_transform.SafeLineLoader.strip_metadata(fixture.get('config', {}))))\n for pipeline_spec in spec['pipelines']:\n with beam.Pipeline(options=PipelineOptions(pickle_library='cloudpickle', **yaml_transform.SafeLineLoader.strip_metadata(pipeline_spec.get('options', {})))) as p:\n yaml_transform.expand_pipeline(p, replace_recursive(pipeline_spec, vars))\n yield (f'test_{suffix}', test)", "docstring": "Dynamically creates test methods based on a YAML specification.\n\n This function takes a YAML specification (`spec`) which defines pipelines,\n fixtures, and potentially options. It iterates through different\n combinations of \"providers\" (which determine how YAML transforms are\n implemented, e.g., using Python or SQL).\n\n For each combination of providers:\n 1. It constructs a unique test method name (e.g., `test_only`).\n 2. It defines a test method that:\n a. Sets up any specified fixtures, making their values available as\n variables.\n b. Mocks the standard YAML providers to use the current combination\n of providers for this test run.\n c. For each pipeline defined in the `spec`:\n i. Creates a `beam.Pipeline` instance with specified options.\n ii. Expands the YAML pipeline definition using\n `yaml_transform.expand_pipeline`, substituting any fixture\n variables.\n iii. Runs the Beam pipeline.\n\n The function yields tuples of (test_method_name, test_method_function),\n which can then be used to populate a `unittest.TestCase` class.\n\nArgs:\n spec (dict): A dictionary parsed from a YAML test specification file.\n It's expected to have keys like 'fixtures' (optional) and 'pipelines'.\n\nYields:\n tuple: A tuple containing:\n - str: The generated name for the test method (e.g., \"test_only\").\n - function: The dynamically generated test method.", "source": "github_repos"} -{"code": "def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n hidden_states = inputs_embeds\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n if attention_mask is not None:\n attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n for i, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n to_drop = False\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n to_drop = True\n if to_drop:\n layer_outputs = (None, None)\n else:\n layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.\n\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:\n\n - 1 for pixel features that are real (i.e. **not masked**),\n - 0 for pixel features that are padding (i.e. **masked**).\n\n [What are attention masks?](../glossary#attention-mask)\n\n object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Object queries that are added to the queries in each self-attention layer.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github_repos"} -{"code": "def _prefix_to_checkpoint_path(prefix, format_version):\n if format_version == saver_pb2.SaverDef.V2:\n return prefix + '.index'\n return prefix", "docstring": "Returns the pathname of a checkpoint file, given the checkpoint prefix.\n\n For V1 checkpoint, simply returns the prefix itself (the data file). For V2,\n returns the pathname to the index file.\n\nArgs:\n prefix: a string, the prefix of a checkpoint.\n format_version: the checkpoint format version that corresponds to the\n prefix.\n\nReturns:\n The pathname of a checkpoint file, taking into account the checkpoint\n format version.", "source": "github_repos"} -{"code": "def close(self):\n raise NotImplementedError", "docstring": "Closes the current writer.\n\n Please see documentation in ``iobase.Sink`` for an example.\n\nReturns:\n An object representing the writes that were performed by the current\n writer.", "source": "github_repos"} -{"code": "def _constraint_for_fixed_slice_element(self, builder: expressions.Builder, slice_element: ElementDefinition) -> Optional[expressions.Builder]:\n if not slice_element.HasField('fixed'):\n return None\n fixed_choice = cast(Any, slice_element).fixed\n fixed_message = getattr(fixed_choice, fixed_choice.WhichOneof('choice'))\n return self._constraint_for_slice_element(slice_element, builder, fixed_message, _MessageType.FIXED)", "docstring": "Builds an expression representing the slice constraint's fixed value.\n\n Builds a FHIRPath expression representing the constraint imposed by the\n given slice's fixed value. Returns `None` if the `slice_element` does\n not define a fixed value. Given a message for a pattern like:\n\n \"fixedCodeableConcept\" : {\n \"coding\" : [{\n \"system\" : \"http://example.org/canonical\",\n \"code\" : \"a-code\",\n }]\n }\n\n Builds a constraint like:\n\n coding[0].system = \"http://example.org/canonical\" and code = \"a-code\"\n and display.empty() and version.empty() and userSelected.empty()\n ).exists()\n\n See more information on slices with fixed values at the following links:\n https://build.fhir.org/profiling.html#discriminator\n https://build.fhir.org/profiling-examples.html#contacts\n\nArgs:\n builder: An expression builder to the slice element.\n slice_element: The element definition describing the slice constraint.\n\nReturns:\n An expression builder defining the constraint described by\n `slice_element`'s `fixed` field or `None` if the `slice_element` does\n not have a `fixed` field.", "source": "github_repos"} -{"code": "def quant_noise(module: nn.Module, p: float, block_size: int):\n if p <= 0:\n return module\n if not isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)):\n raise NotImplementedError('Module unsupported for quant_noise.')\n is_conv = module.weight.ndim == 4\n if not is_conv:\n if module.weight.size(1) % block_size != 0:\n raise AssertionError('Input features must be a multiple of block sizes')\n elif module.kernel_size == (1, 1):\n if module.in_channels % block_size != 0:\n raise AssertionError('Input channels must be a multiple of block sizes')\n else:\n k = module.kernel_size[0] * module.kernel_size[1]\n if k % block_size != 0:\n raise AssertionError('Kernel size must be a multiple of block size')\n\n def _forward_pre_hook(mod, input):\n if mod.training:\n if not is_conv:\n weight = mod.weight\n in_features = weight.size(1)\n out_features = weight.size(0)\n mask = torch.zeros(in_features // block_size * out_features, device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)\n else:\n weight = mod.weight\n in_channels = mod.in_channels\n out_channels = mod.out_channels\n if mod.kernel_size == (1, 1):\n mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)\n else:\n mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])\n mask = mask.to(torch.bool)\n s = 1 / (1 - p)\n mod.weight.data = s * weight.masked_fill(mask, 0)\n module.register_forward_pre_hook(_forward_pre_hook)\n return module", "docstring": "From:\n https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/quant_noise.py\n\n Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product\n Quantization as described in \"Training with Quantization Noise for Extreme Model Compression\"\n\nArgs:\n - module: nn.Module\n - p: amount of Quantization Noise\n - block_size: size of the blocks for subsequent quantization with iPQ\n\n Remarks:\n - Module weights must have the right sizes wrt the block size\n - Only Linear, Embedding and Conv2d modules are supported for the moment\n - For more detail on how to quantize by blocks with convolutional weights, see \"And the Bit Goes Down:\n Revisiting the Quantization of Neural Networks\"\n - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping\n blocks", "source": "github_repos"} -{"code": "def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):\n super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)\n normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())\n if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents:\n normalizer_class = getattr(normalizers, normalizer_state.pop('type'))\n normalizer_state['lowercase'] = do_lower_case\n normalizer_state['strip_accents'] = strip_accents\n self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)\n vocab = self.backend_tokenizer.get_vocab()\n self.backend_tokenizer.pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer(vocab))\n self.do_lower_case = do_lower_case", "docstring": "Construct a \"fast\" RoFormer tokenizer (backed by HuggingFace's *tokenizers* library).\n\n [`RoFormerTokenizerFast`] is almost identical to [`BertTokenizerFast`] and runs end-to-end tokenization:\n punctuation splitting and wordpiece. There are some difference between them when tokenizing Chinese.\n\n This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\n refer to this superclass for more information regarding those methods.\n\nExample:\n ```python\n >>> from transformers import RoFormerTokenizerFast\n\n >>> tokenizer = RoFormerTokenizerFast.from_pretrained(\"junnyu/roformer_chinese_base\")\n >>> tokenizer.tokenize(\"今天天气非常好。\")\n ['今', '天', '天', '气', '非常', '好', '。']\n ```", "source": "github_repos"} -{"code": "def softplus(x):\n if any_symbolic_tensors((x,)):\n return Softplus().symbolic_call(x)\n return backend.nn.softplus(x)", "docstring": "Softplus activation function.\n\n It is defined as `f(x) = log(exp(x) + 1)`, where `log` is the natural\n logarithm and `exp` is the exponential function.\n\nArgs:\n x: Input tensor.\n\nReturns:\n A tensor with the same shape as `x`.\n\nExample:\n >>> x = keras.ops.convert_to_tensor([-0.555, 0.0, 0.555])\n >>> keras.ops.softplus(x)\n array([0.45366603, 0.6931472, 1.008666], dtype=float32)", "source": "github_repos"} -{"code": "def __init__(self, feature_extractor, tokenizer):\n super().__init__(feature_extractor, tokenizer)\n self.current_processor = self.feature_extractor\n self._in_target_context_manager = False", "docstring": "Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor\n class.\n\n [`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See\n [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.\n\nArgs:\n feature_extractor (`EncodecFeatureExtractor`):\n An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input.\n tokenizer (`T5Tokenizer`):\n An instance of [`T5Tokenizer`]. The tokenizer is a required input.", "source": "github_repos"} -{"code": "def forward(self, index: int, output: torch.Tensor, multi_stage_features: List[torch.Tensor], multi_stage_positional_embeddings: List[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, query_embeddings: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):\n level_index = index % self.num_feature_levels\n attention_mask[torch.where(attention_mask.sum(-1) == attention_mask.shape[-1])] = False\n output, cross_attn_weights = self.cross_attn(output, multi_stage_features[level_index], memory_mask=attention_mask, memory_key_padding_mask=None, pos=multi_stage_positional_embeddings[level_index], query_pos=query_embeddings)\n output, self_attn_weights = self.self_attn(output, output_mask=None, output_key_padding_mask=None, query_pos=query_embeddings)\n output = self.ffn(output)\n outputs = (output,)\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n return outputs", "docstring": "Args:\n index (`int`): index of the layer in the Transformer decoder.\n output (`torch.FloatTensor`): the object queries of shape `(N, batch, hidden_dim)`\n multi_stage_features (`List[torch.Tensor]`): the multi-scale features from the pixel decoder.\n multi_stage_positional_embeddings (`List[torch.Tensor]`):\n positional embeddings for the multi_stage_features\n attention_mask (`torch.FloatTensor`): attention mask for the masked cross attention layer\n query_embeddings (`torch.FloatTensor`, *optional*):\n position embeddings that are added to the queries and keys in the self-attention layer.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.", "source": "github_repos"} -{"code": "def _unique_urls(urls: Iterable[str]) -> Iterable[str]:\n seen: Set[str] = set()\n for url in urls:\n if url not in seen:\n seen.add(url)\n yield url", "docstring": "Filters URLs to remove duplicates.\n\nArgs:\n urls: The URLs to filter.\n\nYields:\n The URLs filtered to only those without duplicates.", "source": "github_repos"} -{"code": "def broadcast_recv_v2(shape, dtype, group_size, group_key, instance_key, communication_hint='auto', timeout=0):\n return gen_collective_ops.collective_bcast_recv_v2(T=dtype, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape, communication_hint=communication_hint.lower(), timeout_seconds=timeout)", "docstring": "Receives a broadcasts tensor, across devices.\n\nArgs:\n shape: an int tensor. Shape of the tensor to be received.\n dtype: Type of the tensor to be received.\n group_size: an int32 tensor. One plus the number of receiving tensors, i.e.\n the total number of devices participating. Each tensor must reside on a\n different device.\n group_key: an int32 tensor identifying the group of devices.\n instance_key: an int32 tensor identifying the participating group of Ops.\n communication_hint: preferred collective communication. The implementation\n may fall back to another mechanism. Options include `auto`, `ring`, and\n `nccl`.\n timeout: If set to a non zero, set a completion timeout to detect staleness.\n If the timer goes off, a DeadlineExceededError is raised.\n The timeout value in seconds. This feature is experimental.\n\nReturns:\n An Op implementing the broadcast receive.", "source": "github_repos"} -{"code": "def get_text_features(self, input_ids, attention_mask=None, position_ids=None, params: Optional[dict]=None, dropout_rng: jax.random.PRNGKey=None, train=False):\n if position_ids is None:\n position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)\n if attention_mask is None:\n attention_mask = jnp.ones_like(input_ids)\n rngs = {}\n if dropout_rng is not None:\n rngs['dropout'] = dropout_rng\n\n def _get_features(module, input_ids, attention_mask, position_ids, deterministic):\n text_outputs = module.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic)\n pooled_output = text_outputs[1]\n text_features = module.text_projection(pooled_output)\n return text_features\n return self.module.apply({'params': params or self.params}, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), not train, method=_get_features, rngs=rngs)", "docstring": "Args:\n input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n\nReturns:\n text_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The text embeddings obtained by applying\n the projection layer to the pooled output of [`FlaxCLIPTextModel`].\n\nExample:\n ```python\n >>> from transformers import AutoTokenizer, FlaxCLIPModel\n\n >>> model = FlaxCLIPModel.from_pretrained(\"openai/clip-vit-base-patch32\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-base-patch32\")\n\n >>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"np\")\n >>> text_features = model.get_text_features(**inputs)\n ```", "source": "github_repos"} -{"code": "def hinge(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = _maybe_convert_labels(y_true)\n return backend.mean(math_ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1)", "docstring": "Computes the hinge loss between `y_true` and `y_pred`.\n\n `loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)`\n\n Standalone usage:\n\n >>> y_true = np.random.choice([-1, 1], size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.hinge(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> assert np.array_equal(\n ... loss.numpy(),\n ... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1))\n\nArgs:\n y_true: The ground truth values. `y_true` values are expected to be -1 or 1.\n If binary (0 or 1) labels are provided they will be converted to -1 or 1.\n shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\nReturns:\n Hinge loss values. shape = `[batch_size, d0, .. dN-1]`.", "source": "github_repos"} -{"code": "def get_flat_tensor_shapes(element_spec):\n return [spec.shape for spec in get_flat_tensor_specs(element_spec)]", "docstring": "Returns a list `tf.TensorShapes`s for the element tensor representation.\n\nArgs:\n element_spec: A nested structure of `tf.TypeSpec` objects representing to\n element type specification.\n\nReturns:\n A list `tf.TensorShapes`s for the element tensor representation.", "source": "github_repos"} -{"code": "def create_distributed_mesh(mesh_dims: Union[List[Tuple[str, int]], Dict[str, int]], mesh_name: str='', local_devices: Optional[List[Union[tf_device.DeviceSpec, str]]]=None, device_type: Optional[str]=None, use_xla_spmd: bool=layout.USE_XLA_SPMD) -> layout.Mesh:\n if isinstance(mesh_dims, dict):\n mesh_dims = list(mesh_dims.items())\n dim_names, shape = zip(*mesh_dims)\n if not accelerator_util.is_initialized():\n raise ValueError('Accelerators are uninitialized, please run dtensor.initialize_accelerator_system() first.')\n if device_type and device_type.upper() == 'TPU':\n if local_devices is not None:\n raise ValueError(f'Do not specify devices for {device_type.upper()} meshes. Using a partial list of devices for {device_type.upper()} is not supported.')\n device_specs, device_type = _make_device_specs(local_devices, device_type)\n if device_type.upper() in ['CPU', 'GPU']:\n local_spec = tf_device.DeviceSpec(job=config.job_name(), replica=0, task=config.client_id())\n device_specs = [local_spec.make_merged_spec(d) for d in device_specs]\n num_global_devices = len(device_specs) * config.num_clients()\n if np.prod(shape) != num_global_devices:\n raise ValueError(f'Global number of devices ({len(device_specs)} per client * {config.num_clients()} clients = {num_global_devices}) must be equal to total size of the mesh of shape {shape}')\n global_device_ids = np.arange(num_global_devices).reshape(shape)\n flattened = np.ravel(global_device_ids).tolist()\n start_idx = len(device_specs) * config.client_id()\n local_device_ids = flattened[start_idx:start_idx + len(device_specs)]\n mesh = layout.Mesh(dim_names=dim_names, global_device_ids=global_device_ids, local_device_ids=local_device_ids, local_devices=device_specs, mesh_name=mesh_name, use_xla_spmd=use_xla_spmd)\n _print_context(num_global_devices, config.num_clients(), config.client_id(), device_type, mesh)\n return mesh\n if device_type.upper() == 'TPU':\n mesh = tpu_util.create_tpu_mesh(mesh_dim_names=dim_names, mesh_shape=shape, mesh_name=mesh_name, use_xla_spmd=use_xla_spmd)\n _print_context(config.num_global_devices(device_type), config.num_clients(), config.client_id(), device_type, mesh)\n return mesh\n raise ValueError(f'Device type {device_type} is not CPU, GPU or TPU')", "docstring": "Creates a distributed mesh.\n\n This is similar to `create_mesh`, but with a different set of arguments to\n create a mesh that spans evenly across a multi-client DTensor cluster.\n\n For CPU and GPU meshes, users can choose to use fewer local devices than what\n is available `local_devices`.\n\n For TPU, only meshes that uses all TPU cores is supported by the DTensor\n runtime.\n\nArgs:\n mesh_dims: A dict of dim_name: dim_size, or a list of (dim_name, dim_size)\n tuples. e.g. `{'x' : 4, 'y' : 1}` or `[('x', 4), ('y', 1)]`.\n mesh_name: Name of the created mesh. Defaults to ''.\n local_devices: String representations of devices to use. This is the device\n part of tf.DeviceSpec, e.g. 'CPU:0'. Defaults to all available local\n logical devices.\n device_type: Type of device to build the mesh for. Defaults to 'CPU'.\n Supported values are 'CPU', 'GPU', 'TPU'.6\n use_xla_spmd: Boolean when True, will use XLA SPMD instead of DTensor SPMD.\n\nReturns:\n A mesh that spans evenly across all DTensor clients in the cluster.", "source": "github_repos"} -{"code": "def _generate_signatures(signature_functions: dict[str, Callable[..., Any]], object_map: object_identity.ObjectIdentityDictionary, defaults=None):\n signatures = {}\n for signature_key, function in sorted(signature_functions.items()):\n if function.graph.captures:\n argument_inputs = function.graph.inputs[:-len(function.graph.captures)]\n else:\n argument_inputs = function.graph.inputs\n mapped_inputs, exterior_argument_placeholders = _map_function_arguments_to_created_inputs(argument_inputs, signature_key, function.name, defaults)\n kwarg_names = list(sorted(object_map[function].function.structured_input_signature[1].keys()))\n outputs = object_map[function](**{kwarg_name: mapped_input for kwarg_name, mapped_input in zip(kwarg_names, mapped_inputs)})\n signatures[signature_key] = signature_def_utils.build_signature_def(_tensor_dict_to_tensorinfo(exterior_argument_placeholders), _tensor_dict_to_tensorinfo(outputs), method_name=signature_constants.PREDICT_METHOD_NAME, defaults=defaults.get(signature_key, None))\n return signatures", "docstring": "Validates and calls `signature_functions` in the exported graph.\n\nArgs:\n signature_functions: A dictionary mapping string keys to concrete TensorFlow\n functions (e.g. from `signature_serialization.canonicalize_signatures`)\n which will be used to generate SignatureDefs.\n object_map: A dictionary that contains mappings from signature functions to\n concrete functions in the exported graph.\n defaults: A dictionary mapping signature_key to dictionary of\n user_specified_name to Tensor representing default values.\n\nReturns:\n Each function in the `signature_functions` dictionary is called with\n placeholder Tensors, generating a function call operation and output\n Tensors. The placeholder Tensors, the function call operation, and the\n output Tensors from the function call are part of the default Graph.\n\n This function then returns a dictionary with the same structure as\n `signature_functions`, with the concrete functions replaced by SignatureDefs\n implicitly containing information about how to call each function from a\n TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference\n the generated placeholders and Tensor outputs by name.\n\n The caller is expected to include the default Graph set while calling this\n function as a MetaGraph in a SavedModel, including the returned\n SignatureDefs as part of that MetaGraph.", "source": "github_repos"} -{"code": "def get_extra_inputs():\n g = ops.get_default_graph()\n if isinstance(g, _FuncGraph):\n return g.extra_inputs\n else:\n return []", "docstring": "Returns the captured input tensors by the function.\n\nReturns:\n If the default graph is being used to define a function, the\n returned list of tensors are those accessed inside the function body\n but defined outside the function body so far. Otherwise, returns an\n empty list.", "source": "github_repos"} -{"code": "def get_sequence_dense_tensor(self, transformation_cache, state_manager):\n pass", "docstring": "Returns a `TensorSequenceLengthPair`.\n\nArgs:\n transformation_cache: A `FeatureTransformationCache` object to access\n features.\n state_manager: A `StateManager` to create / access resources such as\n lookup tables.", "source": "github_repos"} -{"code": "def _get_imports_for_module(module: str, output_package: str, symbols_by_module: Mapping[str, set[_Entrypoint]], generated_imports_by_module: Mapping[str, set[str]], file_prefixes_to_strip: Sequence[str], module_prefix: str, use_lazy_loading: bool, subpackage_rewrite: Optional[str]) -> str:\n content = ''\n symbol_imports = list(symbols_by_module[module])\n symbol_imports = sorted(symbol_imports, key=lambda s: f'{s.exported_symbol.file_name}:{s.name}')\n generated_imports = sorted(generated_imports_by_module[module])\n for imp in generated_imports:\n if subpackage_rewrite:\n imp = imp.replace(output_package, subpackage_rewrite)\n last_dot = imp.rfind('.')\n if use_lazy_loading:\n content += f\" '{imp[last_dot + 1:]}': ('', '{imp}'),\\n\"\n else:\n content += f'from {imp[:last_dot]} import {imp[last_dot + 1:]}\\n'\n for s in symbol_imports:\n content += f'{s.get_import(file_prefixes_to_strip, module_prefix, use_lazy_loading=use_lazy_loading)}\\n'\n return content", "docstring": "Returns the imports for a module.\n\nArgs:\n module: The module to get imports for.\n output_package: The package to use for the imports.\n symbols_by_module: The symbols that should be exposed by each module.\n generated_imports_by_module: The sub-modules that should be exposed by each\n module.\n file_prefixes_to_strip: The prefixes to strip from the file names of the\n imports.\n module_prefix: A prefix to add to the non-generated imports.\n use_lazy_loading: Whether to use lazy loading or not.\n subpackage_rewrite: The subpackage to use for the imports.", "source": "github_repos"} -{"code": "def preferred_version(api_name: str, key: str=None) -> str:\n api_url = 'https://discovery.googleapis.com/discovery/v1/apis?name=%s&key=%s&preferred=true' % (api_name, key or '')\n print('DISCOVERY FETCH:', api_url)\n api_info = json.load(request.urlopen(api_url))\n return api_info['items'][0]['version']", "docstring": "Helper to get default API version.\n\nArgs:\n api_name: The API endpoint name, for example dfareporting.\n key: Optional Google API Key: https://cloud.google.com/docs/authentication/api-keys\n\n Returns (str):\n The API version.\n\nRaises:\n HttpError: If the wrong API values are specified.", "source": "github_repos"} -{"code": "def _all_gather(self, input_tensor: core.TensorLike, options: Optional[collective_util.Options]) -> core.Tensor:\n instance_key = self._next_instance_key()\n options = self._options.merge(options)\n ordering_token = self._get_ordering_token()\n with ops.device(self._device):\n return collective_ops.all_gather_v2(input_tensor, self._group_size, self._group_key, instance_key, communication_hint=options.implementation.value, timeout=options.timeout_seconds, ordering_token=ordering_token)", "docstring": "All-gather a dense tensor.\n\nArgs:\n input_tensor: a dense tensor. It must have the same shape on all replicas.\n options: an optional tf.distribute.experimental.CommunicationOptions. If\n provided, it overrides the default options.\n\nReturns:\n The reduced tensor.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n lm_loss = None\n if labels is not None:\n lm_loss = self.loss_function(prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs)\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return (lm_loss,) + output if lm_loss is not None else output\n return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are\n ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n\nExample:\n ```python\n >>> from transformers import AutoTokenizer, RobertaForCausalLM, RobertaConfig\n >>> import torch\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"FacebookAI/roberta-base\")\n >>> config = RobertaConfig.from_pretrained(\"FacebookAI/roberta-base\")\n >>> config.is_decoder = True\n >>> model = RobertaForCausalLM.from_pretrained(\"FacebookAI/roberta-base\", config=config)\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n >>> prediction_logits = outputs.logits\n ```", "source": "github_repos"} -{"code": "def HandleCurlError(e, logger=None):\n if not logger:\n logger = logging\n code = e.args[0]\n msg = e.args[1]\n if code in (pycurl.E_UNSUPPORTED_PROTOCOL, pycurl.E_URL_MALFORMAT, pycurl.E_SSL_ENGINE_NOTFOUND, pycurl.E_SSL_ENGINE_SETFAILED, pycurl.E_SSL_CACERT_BADFILE):\n raise error.ConfigurationError(msg)\n if code in (pycurl.E_FAILED_INIT, pycurl.E_COULDNT_CONNECT, pycurl.E_PARTIAL_FILE, pycurl.E_WRITE_ERROR, pycurl.E_READ_ERROR, pycurl.E_OPERATION_TIMEOUTED, pycurl.E_SSL_CONNECT_ERROR, pycurl.E_COULDNT_RESOLVE_PROXY, pycurl.E_COULDNT_RESOLVE_HOST, pycurl.E_GOT_NOTHING):\n logger.debug('Possibly transient error: %s', msg)\n return\n if code in (pycurl.E_SSL_PEER_CERTIFICATE,):\n raise error.SourceUnavailable(msg)\n raise error.Error(msg)", "docstring": "Handle a curl exception.\n\n See http://curl.haxx.se/libcurl/c/libcurl-errors.html for a list of codes.\n\nArgs:\n e: pycurl.error\n logger: logger object\n\nRaises:\n ConfigurationError:\n PermissionDenied:\n SourceUnavailable:\n Error:", "source": "github_repos"} -{"code": "def __init__(self, col, row, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name='LinearOperatorToeplitz'):\n parameters = dict(col=col, row=row, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n with ops.name_scope(name, values=[row, col]):\n self._row = linear_operator_util.convert_nonref_to_tensor(row, name='row')\n self._col = linear_operator_util.convert_nonref_to_tensor(col, name='col')\n self._check_row_col(self._row, self._col)\n if is_square is False:\n raise ValueError('Only square Toeplitz operators currently supported.')\n is_square = True\n super(LinearOperatorToeplitz, self).__init__(dtype=self._row.dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)", "docstring": "Initialize a `LinearOperatorToeplitz`.\n\nArgs:\n col: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.\n The first column of the operator. Allowed dtypes: `float16`, `float32`,\n `float64`, `complex64`, `complex128`. Note that the first entry of\n `col` is assumed to be the same as the first entry of `row`.\n row: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.\n The first row of the operator. Allowed dtypes: `float16`, `float32`,\n `float64`, `complex64`, `complex128`. Note that the first entry of\n `row` is assumed to be the same as the first entry of `col`.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. If `diag.dtype` is real, this is auto-set to `True`.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`.", "source": "github_repos"} -{"code": "def merge(value_list: List[Any], merge_fn: Optional[Callable[[KeyPath, Any, Any], Any]]=None) -> Any:\n if not isinstance(value_list, list):\n raise TypeError('value_list should be a list')\n if not value_list:\n return None\n new_value = canonicalize(value_list[0], sparse_list_as_dict=True)\n for value in value_list[1:]:\n if value is None:\n continue\n new_value = merge_tree(new_value, canonicalize(value, sparse_list_as_dict=True), merge_fn)\n\n def _listify_dict_equivalent(p, v):\n del p\n if isinstance(v, dict):\n v = try_listify_dict_with_int_keys(v, True)[0]\n return v\n return transform(new_value, _listify_dict_equivalent)", "docstring": "Merge a list of hierarchical values.\n\n Example::\n\n original = {\n 'a': 1,\n 'b': 2,\n 'c': {\n 'd': 'foo',\n 'e': 'bar'\n }\n }\n patch = {\n 'b': 3,\n 'd': [1, 2, 3],\n 'c': {\n 'e': 'bar2',\n 'f': 10\n }\n }\n output = pg.utils.merge([original, patch])\n assert output == {\n 'a': 1,\n # b is updated.\n 'b': 3,\n 'c': {\n 'd': 'foo',\n # e is updated.\n 'e': 'bar2',\n # f is added.\n 'f': 10\n },\n # d is inserted.\n 'd': [1, 2, 3]\n })\n\nArgs:\n value_list: A list of hierarchical values to merge. Later value will be\n treated as updates if it's a dict or otherwise a replacement of former\n value. The merge process will keep input values intact.\n merge_fn: A function to handle value merge that will be called for updated\n or added keys. If a branch is added/updated, the root of branch will be\n passed to merge_fn. the signature of function is: `(path, left_value,\n right_value) -> final_value` If a key is only present in src dict,\n old_value is MISSING_VALUE; If a key is only present in dest dict,\n new_value is MISSING_VALUE; otherwise both new_value and old_value are\n filled. If final_value is MISSING_VALUE for a path, it will be removed\n from its parent collection.\n\nReturns:\n A merged value.\n\nRaises:\n TypeError: If `value_list` is not a list.\n KeyError: If new key is found while not allowed.", "source": "github_repos"} -{"code": "def __init__(self, loss_tensor, fail_on_nan_loss=True):\n self._loss_tensor = loss_tensor\n self._fail_on_nan_loss = fail_on_nan_loss", "docstring": "Initializes a `NanTensorHook`.\n\nArgs:\n loss_tensor: `Tensor`, the loss tensor.\n fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.", "source": "github_repos"} -{"code": "def extract_variable_info(kwargs: Any) -> Tuple[str, Tuple[int, ...], dtypes.DType, Callable[[], Any], Optional[int]]:\n\n def get_restore_uid(initial_value: Callable[..., Any]) -> int | None:\n return getattr(initial_value, 'restore_uid', None)\n if isinstance(kwargs['initial_value'], functools.partial) and ('shape' in kwargs['initial_value'].keywords or kwargs['initial_value'].args):\n if 'shape' in kwargs['initial_value'].keywords:\n shape = kwargs['initial_value'].keywords['shape']\n else:\n shape = kwargs['initial_value'].args[0]\n return (kwargs['name'], shape, kwargs['initial_value'].keywords.get('dtype', kwargs['dtype']), kwargs['initial_value'].func, get_restore_uid(kwargs['initial_value'].func))\n elif 'shape' not in kwargs or kwargs['shape'] is None or (not callable(kwargs['initial_value'])):\n raise ValueError('Unable to extract initializer function and shape from {}. Please either pass a function that expects a shape and dtype as the initial value for your variable or functools.partial object with the shape and dtype kwargs set. This is needed so that we can initialize the shards of the ShardedVariable locally.'.format(kwargs['initial_value']))\n else:\n return (kwargs['name'], kwargs['shape'], kwargs['dtype'], kwargs['initial_value'], get_restore_uid(kwargs['initial_value']))", "docstring": "Extracts the variable creation attributes from the kwargs.\n\nArgs:\n kwargs: a dict of keyword arguments that were passed to a variable creator\n scope.\n\nReturns:\n A tuple of variable name, shape, dtype, initialization function,\n restore_uid.", "source": "github_repos"} -{"code": "def diff(a, n=1, axis=-1):\n return Diff(n=n, axis=axis)(a)", "docstring": "Calculate the n-th discrete difference along the given axis.\n\n The first difference is given by `out[i] = a[i+1] - a[i]` along\n the given axis, higher differences are calculated by using `diff`\n recursively.\n\nArgs:\n a: Input tensor.\n n: The number of times values are differenced. Defaults to `1`.\n axis: Axis to compute discrete difference(s) along.\n Defaults to `-1`.(last axis).\n\nReturns:\n Tensor of diagonals.\n\nExample:\n >>> from keras.src import ops\n >>> x = ops.convert_to_tensor([1, 2, 4, 7, 0])\n >>> ops.diff(x)\n array([ 1, 2, 3, -7])\n >>> ops.diff(x, n=2)\n array([ 1, 1, -10])\n\n >>> x = ops.convert_to_tensor([[1, 3, 6, 10], [0, 5, 6, 8]])\n >>> ops.diff(x)\n array([[2, 3, 4],\n [5, 1, 2]])\n >>> ops.diff(x, axis=0)\n array([[-1, 2, 0, -2]])", "source": "github_repos"} -{"code": "def _validate_inputs(self, graph_def, input_tensors):\n self._save_conversion_params_metric(graph_def)\n self._quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, graph_def, self._experimental_disable_per_channel, self.experimental_new_dynamic_range_quantizer, self._experimental_low_bit_qat, self._experimental_full_integer_quantization_bias_type, self._experimental_variable_quantization, self._experimental_strict_qdq)\n self._validate_inference_input_output_types(self._quant_mode)\n if not self._is_unknown_shapes_allowed():\n for tensor in input_tensors:\n shape_list = tensor.shape.as_list()\n if None in shape_list[1:]:\n raise ValueError(\"None is only supported in the 1st dimension. Tensor '{0}' has invalid shape '{1}'.\".format(_get_tensor_name(tensor), shape_list))\n elif shape_list and shape_list[0] is None:\n shape = tensor.shape.as_list()\n shape[0] = 1\n tensor.set_shape(shape)\n if self._trackable_obj is None or not hasattr(self._trackable_obj, 'graph_debug_info'):\n self._debug_info = _get_debug_info(_build_debug_info_func(self._funcs[0].graph), graph_def)\n else:\n self._debug_info = _get_debug_info(_convert_debug_info_func(self._trackable_obj.graph_debug_info), graph_def)", "docstring": "Validate the input parameters.\n\nArgs:\n graph_def: The TensorFlow GraphDef.\n input_tensors: List of input tensors.\n\nRaises:\n ValueError: Input shape is not specified. Invalid quantization parameters.", "source": "github_repos"} -{"code": "def enable_graph_building_optimization(fn: _F) -> _F:\n\n def wrapper(*args, **kwargs):\n if flags.config().graph_building_optimization.value():\n return fn(*args, **kwargs)\n flags.config().graph_building_optimization.reset(True)\n try:\n return fn(*args, **kwargs)\n finally:\n flags.config().graph_building_optimization.reset(False)\n return wrapper", "docstring": "Decorator for enabling graph_building_optimization on a test.\n\n This function returns a decorator intended to be applied to test methods in\n a `tf.test.TestCase` class. Doing so will enable graph_building_optimization,\n execute the test, then reset the feature flag to its default value.\n\nExample:\n class MyTest(test.TestCase):\n\n @enable_graph_building_optimization\n def testFoo(self):\n ...\n\nArgs:\n fn: the function to be wrapped.\n\nReturns:\n The wrapped function.", "source": "github_repos"} -{"code": "def lu_reconstruct(lower_upper, perm, validate_args=False, name=None):\n with ops.name_scope(name or 'lu_reconstruct'):\n lower_upper = ops.convert_to_tensor(lower_upper, dtype_hint=dtypes.float32, name='lower_upper')\n perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')\n assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)\n if assertions:\n with ops.control_dependencies(assertions):\n lower_upper = array_ops.identity(lower_upper)\n perm = array_ops.identity(perm)\n shape = array_ops.shape(lower_upper)\n lower = set_diag(band_part(lower_upper, num_lower=-1, num_upper=0), array_ops.ones(shape[:-1], dtype=lower_upper.dtype))\n upper = band_part(lower_upper, num_lower=0, num_upper=-1)\n x = math_ops.matmul(lower, upper)\n if lower_upper.shape is None or lower_upper.shape.rank is None or lower_upper.shape.rank != 2:\n batch_size = math_ops.reduce_prod(shape[:-2])\n d = shape[-1]\n x = array_ops.reshape(x, [batch_size, d, d])\n perm = array_ops.reshape(perm, [batch_size, d])\n perm = map_fn.map_fn(array_ops.invert_permutation, perm)\n batch_indices = array_ops.broadcast_to(math_ops.range(batch_size)[:, array_ops.newaxis], [batch_size, d])\n x = array_ops.gather_nd(x, array_ops_stack.stack([batch_indices, perm], axis=-1))\n x = array_ops.reshape(x, shape)\n else:\n x = array_ops.gather(x, array_ops.invert_permutation(perm))\n x.set_shape(lower_upper.shape)\n return x", "docstring": "The reconstruct one or more matrices from their LU decomposition(s).\n\nArgs:\n lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,\n matmul(L, U)) = X` then `lower_upper = L + U - eye`.\n perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =\n X` then `perm = argmax(P)`.\n validate_args: Python `bool` indicating whether arguments should be checked\n for correctness.\n Default value: `False` (i.e., don't validate arguments).\n name: Python `str` name given to ops managed by this object.\n Default value: `None` (i.e., 'lu_reconstruct').\n\nReturns:\n x: The original input to `tf.linalg.lu`, i.e., `x` as in,\n `lu_reconstruct(*tf.linalg.lu(x))`.\n\n #### Examples\n\n ```python\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n\n x = [[[3., 4], [1, 2]],\n [[7., 8], [3, 4]]]\n x_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x))\n tf.assert_near(x, x_reconstructed)\n # ==> True\n ```", "source": "github_repos"} -{"code": "def get_next_as_list(self, name=None):\n del name\n with ops.device(self._worker):\n return self._format_data_list_with_options(self._iterator.get_next())", "docstring": "Get next element from the underlying iterator.\n\n Runs the iterator get_next() within a device scope. Since this doesn't use\n get_next_as_optional(), it is considerably faster than get_next_as_list(),\n but it raises EOFError if any of the device doesn't get any data.\n\nArgs:\n name: not used.\n\nReturns:\n A list consisting of the next data from each device.", "source": "github_repos"} -{"code": "def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1):\n audio_chunks = torch.chunk(input_audio, bs_chunks, dim=0)\n music_tokens_list = []\n for chunk_i in audio_chunks:\n music_tokens_i = self._encode(chunk_i, start_level=start_level, end_level=end_level)\n music_tokens_list.append(music_tokens_i)\n music_tokens = [torch.cat(music_tokens_level, dim=0) for music_tokens_level in zip(*music_tokens_list)]\n return music_tokens", "docstring": "Transforms the `input_audio` to a discrete representation made out of `music_tokens`.\n\nArgs:\n input_audio (`torch.Tensor`):\n Raw audio which will be encoded to its discrete representation using the codebook. The closest `code`\n form the codebook will be computed for each sequence of samples.\n start_level (`int`, *optional*, defaults to 0):\n Level at which the encoding process will start. Default to 0.\n end_level (`int`, *optional*):\n Level at which the encoding process will start. Default to None.\n bs_chunks (int, *optional*, defaults to 1):\n Number of chunks of raw audio to process at the same time.", "source": "github_repos"} -{"code": "def show_path(from_op, tensors, sources):\n if isinstance(from_op, tensor_lib.Tensor):\n from_op = from_op.op\n if not isinstance(tensors, list):\n tensors = [tensors]\n final_ops = [_as_operation(tensor) for tensor in tensors]\n visited_ops = set((x.op for x in sources))\n ops_to_visit = list(final_ops)\n some_op_output = {}\n while ops_to_visit:\n op = ops_to_visit.pop()\n if op in visited_ops:\n continue\n visited_ops.add(op)\n if op == from_op:\n path_op = op\n path = [path_op]\n while path_op not in final_ops:\n path_op = some_op_output[path_op]\n path.append(path_op)\n return ' <- '.join(('%s (%s)' % (x.name, x.type) for x in reversed(path)))\n else:\n for inp in graph_inputs(op):\n if inp not in visited_ops and inp not in sources:\n some_op_output[inp] = op\n ops_to_visit.append(inp)\n return '??'", "docstring": "Find one path from `from_op` to any of `tensors`, ignoring `sources`.\n\nArgs:\n from_op: A `tf.Operation`.\n tensors: A `tf.Operation`, a `tf.Tensor`, or a list thereof.\n sources: A list of `tf.Tensor`.\n\nReturns:\n A python string containing the path, or \"??\" if none is found.", "source": "github_repos"} -{"code": "def import_event(tensor, name=None):\n return gen_summary_ops.import_event(_summary_state.writer._resource, tensor, name=name)", "docstring": "Writes a `tf.compat.v1.Event` binary proto.\n\n This can be used to import existing event logs into a new summary writer sink.\n Please note that this is lower level than the other summary functions and\n will ignore the `tf.summary.should_record_summaries` setting.\n\nArgs:\n tensor: A `tf.Tensor` of type `string` containing a serialized\n `tf.compat.v1.Event` proto.\n name: A name for the operation (optional).\n\nReturns:\n The created `tf.Operation`.", "source": "github_repos"} -{"code": "def __init__(self, group_key_start=1):\n self._group_key = group_key_start\n self._instance_key_table = {}\n self._lock = threading.Lock()\n self._known_groups = {}", "docstring": "Initializes the object.\n\nArgs:\n group_key_start: the starting integer of group key.", "source": "github_repos"} -{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is not None:\n return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)\n return [1] + [0] * len(token_ids_0)", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\n token_ids_0 (`List[int]`): List of IDs.\n token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github_repos"} -{"code": "def compute_predicted_aligned_error(logits: torch.Tensor, max_bin: int=31, no_bins: int=64, **kwargs) -> Dict[str, torch.Tensor]:\n boundaries = torch.linspace(0, max_bin, steps=no_bins - 1, device=logits.device)\n aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1)\n predicted_aligned_error, max_predicted_aligned_error = _calculate_expected_aligned_error(alignment_confidence_breaks=boundaries, aligned_distance_error_probs=aligned_confidence_probs)\n return {'aligned_confidence_probs': aligned_confidence_probs, 'predicted_aligned_error': predicted_aligned_error, 'max_predicted_aligned_error': max_predicted_aligned_error}", "docstring": "Computes aligned confidence metrics from logits.\n\nArgs:\n logits: [*, num_res, num_res, num_bins] the logits output from\n PredictedAlignedErrorHead.\n max_bin: Maximum bin value\n no_bins: Number of bins\n\nReturns:\n aligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted\n aligned error probabilities over bins for each residue pair.\n predicted_aligned_error: [*, num_res, num_res] the expected aligned distance\n error for each pair of residues.\n max_predicted_aligned_error: [*] the maximum predicted error possible.", "source": "github_repos"} -{"code": "def strip_strings(model):\n model.description = None\n for subgraph in model.subgraphs:\n subgraph.name = None\n for tensor in subgraph.tensors:\n tensor.name = None\n model.signatureDefs = None", "docstring": "Strips all nonessential strings from the model to reduce model size.\n\n We remove the following strings:\n (find strings by searching \":string\" in the tensorflow lite flatbuffer schema)\n 1. Model description\n 2. SubGraph name\n 3. Tensor names\n We retain OperatorCode custom_code and Metadata name.\n\nArgs:\n model: The model from which to remove nonessential strings.", "source": "github_repos"} -{"code": "def _split_bytecode(bytecode: list[opcodes.Opcode], processed_blocks: set[Block], python_version) -> list[Block]:\n targets = {op.target for op in bytecode if op.target}\n blocks = []\n code = []\n prev_block: Block = None\n i = 0\n while i < len(bytecode):\n op = bytecode[i]\n if python_version >= (3, 12) and isinstance(op, opcodes.SEND):\n if code:\n prev_block = Block(code)\n blocks.append(prev_block)\n code = []\n new_blocks, i = _preprocess_async_for_and_yield(i, bytecode, prev_block, processed_blocks)\n blocks.extend(new_blocks)\n prev_block = blocks[-1]\n continue\n code.append(op)\n if op.no_next() or op.does_jump() or op.pops_block() or (op.next is None) or (op.next in targets and (not isinstance(op.next, opcodes.GET_ANEXT) or python_version < (3, 12))):\n prev_block = Block(code)\n blocks.append(prev_block)\n code = []\n i += 1\n return blocks", "docstring": "Given a sequence of bytecodes, return basic blocks.\n\n This will split the code at \"basic block boundaries\". These occur at\n every instruction that is jumped to, and after every instruction that jumps\n somewhere else (or returns / aborts).\n\nArgs:\n bytecode: A list of instances of opcodes.Opcode. (E.g. returned from\n opcodes.dis())\n\nReturns:\n A list of _Block instances.", "source": "github_repos"} -{"code": "def floating_point_ops(self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool=True) -> int:\n return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)", "docstring": "Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a\n batch with this transformer model. Default approximation neglects the quadratic dependency on the number of\n tokens (valid if `12 * d_model << sequence_length`) as laid out in [this\n paper](https://huggingface.co/papers/2001.08361) section 2.1. Should be overridden for transformers with parameter\n re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.\n\nArgs:\n batch_size (`int`):\n The batch size for the forward pass.\n\n sequence_length (`int`):\n The number of tokens in each line of the batch.\n\n exclude_embeddings (`bool`, *optional*, defaults to `True`):\n Whether or not to count embedding and softmax operations.\n\nReturns:\n `int`: The number of floating-point operations.", "source": "github_repos"} -{"code": "def execute(self):\n for name_context, spec in zip(self._map_task.name_contexts, self._map_task.operations):\n op = create_operation(name_context, spec, self._counter_factory, None, self._state_sampler, test_shuffle_source=self._test_shuffle_source, test_shuffle_sink=self._test_shuffle_sink)\n self._ops.append(op)\n if hasattr(op.spec, 'input'):\n producer, output_index = op.spec.input\n self._ops[producer].add_receiver(op, output_index)\n if hasattr(op.spec, 'inputs'):\n for producer, output_index in op.spec.inputs:\n self._ops[producer].add_receiver(op, output_index)\n for ix, op in reversed(list(enumerate(self._ops))):\n _LOGGER.debug('Starting op %d %s', ix, op)\n op.start()\n for op in self._ops:\n op.finish()", "docstring": "Executes all the operation_specs.Worker* instructions in a map task.\n\n We update the map_task with the execution status, expressed as counters.\n\nRaises:\n RuntimeError: if we find more than on read instruction in task spec.\n TypeError: if the spec parameter is not an instance of the recognized\n operation_specs.Worker* classes.", "source": "github_repos"} -{"code": "def convert_n_to_tensor_or_composite(values, dtype=None, name=None) -> list[Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor, type(None)]]:\n return internal_convert_n_to_tensor_or_composite(values=values, dtype=dtype, name=name, as_ref=False)", "docstring": "Converts `values` to a list of `Output` or `CompositeTensor` objects.\n\n Any `CompositeTensor` objects in `values` are returned unmodified.\n\nArgs:\n values: A list of `None`, `CompositeTensor``, or objects that can be\n consumed by `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor`s or\n `CompositeTensor`s.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n\nReturns:\n A list of `Tensor` and/or `CompositeTensor` objects.\n\nRaises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.", "source": "github_repos"} -{"code": "def set_clang_compiler_path_win(environ_cp):\n default_clang_path = 'C:/Program Files/LLVM/bin/clang.exe'\n if not os.path.exists(default_clang_path):\n default_clang_path = shutil.which('clang') or ''\n clang_compiler_path = prompt_loop_or_load_from_env(environ_cp, var_name='CLANG_COMPILER_PATH', var_default=default_clang_path, ask_for_var='Please specify the path to clang executable.', check_success=os.path.exists, resolve_symlinks=True, error_msg='Invalid clang path. %s cannot be found. Note that Clang is nowpreferred compiler. You may use MSVC by removing --config=win_clang')\n write_action_env_to_bazelrc('CLANG_COMPILER_PATH', clang_compiler_path)\n write_to_bazelrc(f'build --repo_env=CC=\"{clang_compiler_path}\"')\n write_to_bazelrc(f'build --repo_env=BAZEL_COMPILER=\"{clang_compiler_path}\"')\n return clang_compiler_path", "docstring": "Set CLANG_COMPILER_PATH and environment variables.\n\n Loop over user prompts for clang path until receiving a valid response.\n Default is used if no input is given. Set CLANG_COMPILER_PATH and write\n environment variables CC and BAZEL_COMPILER to .bazelrc.\n\nArgs:\n environ_cp: (Dict) copy of the os.environ.\n\nReturns:\n string value for clang_compiler_path.", "source": "github_repos"} -{"code": "def sample_paths(self, times: types.RealTensor, num_samples: Optional[int]=1, initial_state: Optional[types.RealTensor]=None, random_type: Optional[random.RandomType]=None, seed: Optional[types.IntTensor]=None, swap_memory: Optional[bool]=True, time_step: Optional[types.RealTensor]=None, num_time_steps: Optional[types.IntTensor]=None, skip: Optional[types.IntTensor]=0, precompute_normal_draws: Optional[bool]=True, times_grid: Optional[types.RealTensor]=None, normal_draws: Optional[types.RealTensor]=None, watch_params: Optional[List[types.RealTensor]]=None, validate_args: Optional[bool]=False, name: Optional[str]=None) -> types.RealTensor:\n name = name or self._name + '_log_sample_path'\n with tf.name_scope(name):\n if initial_state is not None:\n initial_state = tf.math.log(tf.convert_to_tensor(initial_state, dtype_hint=tf.float64))\n if self.precompute_iv():\n if time_step is not None or num_time_steps is not None or times_grid is not None:\n raise ValueError('`time_step`, `num_time_steps`, or `times_grid` cannot be usedwith the interpolated LVM')\n times_grid = self._times_grid\n return tf.math.exp(super(LocalVolatilityModel, self).sample_paths(times=times, num_samples=num_samples, initial_state=initial_state, random_type=random_type, seed=seed, swap_memory=swap_memory, name=name, time_step=time_step, num_time_steps=num_time_steps, skip=skip, precompute_normal_draws=precompute_normal_draws, times_grid=times_grid, normal_draws=normal_draws, watch_params=watch_params, validate_args=validate_args))", "docstring": "Returns samples from the LV process.\n\n See GenericItoProcess.sample_paths. If `times_grid` is supplied to\n `__init__`, then `times_grid` cannot be supplied here.\n\nRaises:\n ValueError: If `precompute_iv` is True, but `time_step`, `num_time_steps`\n or `times_grid` are given.", "source": "github_repos"} -{"code": "def enable_debug_mode():\n if context.executing_eagerly():\n toggle_debug_mode(True)\n else:\n raise ValueError('`enable_debug_mode() is only supported in eager mode.')", "docstring": "Enables debug mode for tf.data.\n\n Example usage with pdb module:\n ```\n import tensorflow as tf\n import pdb\n\n tf.data.experimental.enable_debug_mode()\n\n def func(x):\n # Python 3.7 and older requires `pdb.Pdb(nosigint=True).set_trace()`\n pdb.set_trace()\n x = x + 1\n return x\n\n dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n dataset = dataset.map(func)\n\n for item in dataset:\n print(item)\n ```\n\n The effect of debug mode is two-fold:\n\n 1) Any transformations that would introduce asynchrony, parallelism, or\n non-determinism to the input pipeline execution will be forced to execute\n synchronously, sequentially, and deterministically.\n\n 2) Any user-defined functions passed into tf.data transformations such as\n `map` will be wrapped in `tf.py_function` so that their body is executed\n \"eagerly\" as a Python function as opposed to a traced TensorFlow graph, which\n is the default behavior. Note that even when debug mode is enabled, the\n user-defined function is still traced to infer the shape and type of its\n outputs; as a consequence, any `print` statements or breakpoints will be\n triggered once during the tracing before the actual execution of the input\n pipeline.\n\n NOTE: As the debug mode setting affects the construction of the tf.data input\n pipeline, it should be enabled before any tf.data definitions.\n\nRaises:\n ValueError: When invoked from graph mode.", "source": "github_repos"} -{"code": "def _from_spec(cls, spec: Union['DynamicRaggedShape.Spec', ragged_tensor.RaggedTensorSpec, tensor_lib.TensorSpec], dtype: dtypes.DType=dtypes.int64) -> 'DynamicRaggedShape.Spec':\n if isinstance(spec, DynamicRaggedShape.Spec):\n return spec\n elif isinstance(spec, ragged_tensor.RaggedTensorSpec):\n return cls._from_tensor_shape(spec.shape, spec.ragged_rank, spec.row_splits_dtype)\n elif isinstance(spec, tensor_lib.TensorSpec):\n return cls._from_tensor_shape(shape=spec.shape, num_row_partitions=0, dtype=dtype)", "docstring": "Create a TypeSpec for the shape of an object with a given TypeSpec.\n\n I.e., if `x_spec = tf.type_spec_from_value(x)`, then\n `DynamicRaggedShape.from_spec(x_spec)` returns a TypeSpec compatible with\n `tf.type_spec_from_value(tf.shape(x))`.\n\n >>> rt = tf.ragged.constant([[1, 2], [3], [4, 5, 6]])\n >>> rt_spec = tf.type_spec_from_value(rt)\n >>> rt_shape = DynamicRaggedShape.from_tensor(rt)\n\n >>> shape_spec_1 = tf.type_spec_from_value(rt_shape)\n >>> shape_spec_2 = DynamicRaggedShape.Spec._from_spec(rt_spec)\n >>> assert shape_spec_1.is_compatible_with(shape_spec_2)\n\nArgs:\n spec: a Spec of a Tensor or RaggedTensor.\n dtype: the default dtype (if necessary).\n\nReturns:\n A Spec of the shape of a Tensor or RaggedTensor.", "source": "github_repos"} -{"code": "def _old_init(cls, fields, shape, nrows, row_partitions, internal=False):\n assert isinstance(fields, dict), fields\n assert isinstance(shape, tensor_shape.TensorShape), shape\n assert nrows is None or isinstance(nrows, tensor.Tensor), nrows\n assert row_partitions is None or isinstance(row_partitions, tuple), row_partitions\n return StructuredTensor(fields=fields, ragged_shape=_dynamic_ragged_shape_init(fields, shape, nrows, row_partitions))", "docstring": "Private constructor -- use factory methods to create StructuredTensors.\n\n This constructor builds a `StructuredTensor` from the given attributes,\n performing minimal validation.\n\nArgs:\n fields: A dictionary mapping from string to `Tensor`, `RaggedTensor`, or\n `StructuredTensor`. (This dict is not copied, so the caller must ensure\n that it does not get mutated via leaked references.)\n shape: `tf.TensorShape` with statically known rank.\n nrows: scalar integer `tf.Tensor`, or `None` if `shape.rank==0`.\n row_partitions: tuple of `RowPartition`s, with length `shape.rank-1`.\n internal: ignored argument.\n\nReturns:\n a StructuredTensor.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=True, return_dict: Optional[bool]=None) -> Union[Tuple, CLIPSegOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)\n text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n image_embeds = vision_outputs[1]\n image_embeds = self.visual_projection(image_embeds)\n text_embeds = text_outputs[1]\n text_embeds = self.text_projection(text_embeds)\n image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)\n text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)\n logit_scale = self.logit_scale.exp()\n logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale\n logits_per_image = logits_per_text.t()\n loss = None\n if return_loss:\n loss = clipseg_loss(logits_per_text)\n if not return_dict:\n output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)\n return (loss,) + output if loss is not None else output\n return CLIPSegOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)", "docstring": "return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n\nExample:\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoProcessor, CLIPSegModel\n\n >>> processor = AutoProcessor.from_pretrained(\"CIDAS/clipseg-rd64-refined\")\n >>> model = CLIPSegModel.from_pretrained(\"CIDAS/clipseg-rd64-refined\")\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = processor(\n ... text=[\"a photo of a cat\", \"a photo of a dog\"], images=image, return_tensors=\"pt\", padding=True\n ... )\n\n >>> outputs = model(**inputs)\n >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score\n >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities\n ```", "source": "github_repos"} -{"code": "def ExpandSuperClasses(self, t):\n superclasses = set()\n self._CollectSuperclasses(t, superclasses)\n return superclasses", "docstring": "Generate a list of all (known) superclasses for a type.\n\nArgs:\n t: A type name. E.g. \"int\".\n\nReturns:\n A set of types. This set includes t as well as all its superclasses. For\n example, this will return \"bool\", \"int\" and \"object\" for \"bool\".", "source": "github_repos"} -{"code": "def __floordiv__(self, other):\n try:\n other = as_dimension(other)\n except (TypeError, ValueError):\n return NotImplemented\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(self._value // other.value)", "docstring": "Returns the quotient of `self` and `other` rounded down.\n\n Dimensions are divided as follows:\n\n ```python\n tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(n) ==\n tf.compat.v1.Dimension(m // n)\n tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(n) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n ```\n\nArgs:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\n A `Dimension` whose value is the integer quotient of `self` and `other`.", "source": "github_repos"} -{"code": "def clip(x, min_value, max_value):\n if isinstance(min_value, (int, float)) and isinstance(max_value, (int, float)):\n if max_value < min_value:\n max_value = min_value\n if min_value is None:\n min_value = -np.inf\n if max_value is None:\n max_value = np.inf\n return clip_ops.clip_by_value(x, min_value, max_value)", "docstring": "Element-wise value clipping.\n\nArgs:\n x: Tensor or variable.\n min_value: Python float, integer, or tensor.\n max_value: Python float, integer, or tensor.\n\nReturns:\n A tensor.", "source": "github_repos"} -{"code": "def true_positives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None):\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.true_positives is not supported when eager execution is enabled.')\n with variable_scope.variable_scope(name, 'true_positives', (predictions, labels, weights)):\n predictions, labels, weights = _remove_squeezable_dimensions(predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights)\n is_true_positive = math_ops.logical_and(math_ops.equal(labels, True), math_ops.equal(predictions, True))\n return _count_condition(is_true_positive, weights, metrics_collections, updates_collections)", "docstring": "Sum the weights of true_positives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\nArgs:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will\n be cast to `bool`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\nReturns:\n value_tensor: A `Tensor` representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\nRaises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled.", "source": "github_repos"} -{"code": "def convert_to_rgb(image: np.ndarray, palette: Optional[PIL.ImagePalette.ImagePalette]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> ImageInput:\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4))\n data_format = input_data_format if data_format is None else data_format\n mode = 'P' if palette is not None else None\n image = to_pil_image(image, image_mode=mode, input_data_format=input_data_format)\n if image.mode == 'P' and palette is not None:\n image.putpalette(palette)\n image_rgba = image.convert('RGBA')\n background = Image.new('RGBA', image_rgba.size, (255, 255, 255))\n alpha_composite = Image.alpha_composite(background, image_rgba)\n alpha_composite = alpha_composite.convert('RGB')\n output_array = np.array(alpha_composite)\n output_array = to_channel_dimension_format(output_array, data_format, input_channel_dim=ChannelDimension.LAST)\n return output_array", "docstring": "Converts an image to RGB format.\n\nArgs:\n image (`np.ndarray`):\n The image to convert.\n palette (List[int], *optional*):\n The palette to use if given.\n data_format (ChannelDimension or str, *optional*):\n The channel dimension format for the output image. If not provided, it will be the same as the input image.\n input_data_format (ChannelDimension or str, *optional*):\n The channel dimension format of the input image.", "source": "github_repos"} -{"code": "def aggregate_events(events: List[str]) -> List[Tuple[str, int]]:\n frequency_list = collections.Counter(events)\n return sorted(frequency_list.items(), key=lambda x: (-x[1], x[0]))", "docstring": "Returns a list of tuples: (event string, count of times they appear).\n\n The list is sorted descending by count and then ascending by event string.\n\nArgs:\n events: A list of strings defining an event (either an error or a warning).\n\nReturns:\n List of tuples : (event string, number of times they appear).", "source": "github_repos"} -{"code": "def analyze(model_path=None, model_content=None, gpu_compatibility=False, **kwargs):\n if not model_path and (not model_content):\n raise ValueError('neither `model_path` nor `model_content` is provided')\n if model_path:\n print(f'=== {model_path} ===\\n')\n tflite_model = model_path\n input_is_filepath = True\n else:\n print('=== TFLite ModelAnalyzer ===\\n')\n tflite_model = model_content\n input_is_filepath = False\n if kwargs.get('experimental_use_mlir', False):\n print(wrap_converter.wrapped_flat_buffer_file_to_mlir(tflite_model, input_is_filepath))\n else:\n print(_analyzer_wrapper.ModelAnalyzer(tflite_model, input_is_filepath, gpu_compatibility))", "docstring": "Analyzes the given tflite_model with dumping model structure.\n\n This tool provides a way to understand users' TFLite flatbuffer model by\n dumping internal graph structure. It also provides additional features\n like checking GPU delegate compatibility.\n\n WARNING: Experimental interface, subject to change.\n The output format is not guaranteed to stay stable, so don't\n write scripts to this.\n\nArgs:\n model_path: TFLite flatbuffer model path.\n model_content: TFLite flatbuffer model object.\n gpu_compatibility: Whether to check GPU delegate compatibility.\n **kwargs: Experimental keyword arguments to analyze API.\n\nReturns:\n Print analyzed report via console output.", "source": "github_repos"} -{"code": "def execute_with_cancellation(op_name, num_outputs, inputs, attrs, ctx, cancellation_manager, name=None):\n device_name = ctx.device_name\n try:\n ctx.ensure_initialized()\n tensors = pywrap_tfe.TFE_Py_ExecuteCancelable(ctx._handle, device_name, op_name, inputs, attrs, cancellation_manager._impl, num_outputs)\n except core._NotOkStatusException as e:\n if name is not None:\n e.message += ' name: ' + name\n raise core._status_to_exception(e) from None\n except TypeError as e:\n keras_symbolic_tensors = [x for x in inputs if _is_keras_symbolic_tensor(x)]\n if keras_symbolic_tensors:\n raise core._SymbolicException('Inputs to eager execution function cannot be Keras symbolic tensors, but found {}'.format(keras_symbolic_tensors))\n raise e\n return tensors", "docstring": "Execute a TensorFlow operation.\n\nArgs:\n op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to\n execute.\n num_outputs: The number of outputs of the operation to fetch. (Explicitly\n provided instead of being inferred for performance reasons).\n inputs: A list of inputs to the operation. Each entry should be a Tensor, or\n a value which can be passed to the Tensor constructor to create one.\n attrs: A tuple with alternating string attr names and attr values for this\n operation.\n ctx: The value of context.context().\n cancellation_manager: a `CancellationManager` object that can be used to\n cancel the operation.\n name: Customized name for the operation.\n\nReturns:\n List of output Tensor objects. The list is empty if there are no outputs\n\nRaises:\n An exception on error.", "source": "github_repos"} -{"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:\n outputs = self.tapas(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n pooled_output = outputs[1]\n pooled_output = self.dropout(inputs=pooled_output, training=training)\n logits = self.classifier(inputs=pooled_output)\n loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called\n \"classification_class_index\" in the original implementation.\n\nReturns:\n\nExample:\n ```python\n >>> from transformers import AutoTokenizer, TapasForSequenceClassification\n >>> import tensorflow as tf\n >>> import pandas as pd\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"google/tapas-base-finetuned-tabfact\")\n >>> model = TapasForSequenceClassification.from_pretrained(\"google/tapas-base-finetuned-tabfact\")\n\n >>> data = {\n ... \"Actors\": [\"Brad Pitt\", \"Leonardo Di Caprio\", \"George Clooney\"],\n ... \"Age\": [\"56\", \"45\", \"59\"],\n ... \"Number of movies\": [\"87\", \"53\", \"69\"],\n ... }\n >>> table = pd.DataFrame.from_dict(data)\n >>> queries = [\n ... \"There is only one actor who is 45 years old\",\n ... \"There are 3 actors which played in more than 60 movies\",\n ... ]\n\n >>> inputs = tokenizer(table=table, queries=queries, padding=\"max_length\", return_tensors=\"tf\")\n >>> labels = tf.convert_to_tensor([1, 0]) # 1 means entailed, 0 means refuted\n\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```", "source": "github_repos"} -{"code": "def coordination_leader(cluster_spec):\n cluster_spec = normalize_cluster_spec(cluster_spec)\n if not cluster_spec.as_dict():\n return ''\n if 'ps' in cluster_spec.jobs:\n return '/job:ps/replica:0/task:0'\n if 'chief' in cluster_spec.jobs:\n return '/job:chief/replica:0/task:0'\n assert 'worker' in cluster_spec.jobs\n return '/job:worker/replica:0/task:0'", "docstring": "Return the task name of the coordination service leader.\n\nArgs:\n cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object sxpecifying the\n cluster configurations.\n\nReturns:\n a string indicating the task name of the coordination service leader.", "source": "github_repos"} -{"code": "def _prepare_tables(self):\n values = torch.tensor([[[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]])\n row_index = IndexMap(indices=torch.tensor([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]]]), num_segments=3, batch_dims=1)\n col_index = IndexMap(indices=torch.tensor([[[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]), num_segments=3, batch_dims=1)\n return (values, row_index, col_index)", "docstring": "Prepares two tables, both with three distinct rows.\n The first table has two columns:\n 1.0, 2.0 | 3.0\n 2.0, 0.0 | 1.0\n 1.0, 3.0 | 4.0\n The second table has three columns:\n 1.0 | 2.0 | 3.0\n 2.0 | 0.0 | 1.0\n 1.0 | 3.0 | 4.0\n\nReturns:\n SegmentedTensors with the tables.", "source": "github_repos"} -{"code": "def convert_typing_to_builtin(typ):\n origin = getattr(typ, '__origin__', None)\n args = getattr(typ, '__args__', None)\n if origin not in _BUILTINS:\n return typ\n if not args:\n return origin\n if origin is list:\n return list[convert_typing_to_builtin(args[0])]\n elif origin is dict:\n return dict[convert_typing_to_builtin(args[0]), convert_typing_to_builtin(args[1])]\n elif origin is tuple:\n return tuple[tuple(convert_typing_to_builtin(args))]\n elif origin is set:\n return set[convert_typing_to_builtin(args)]\n elif origin is frozenset:\n return frozenset[convert_typing_to_builtin(args)]", "docstring": "Converts a given typing collections type to its builtin counterpart.\n\nArgs:\n typ: A typing type (e.g., typing.List[int]).\n\nReturns:\n type: The corresponding builtin type (e.g., list[int]).", "source": "github_repos"} -{"code": "def _alt_inner_shape(self, new_inner_rank):\n if new_inner_rank == 0:\n raise ValueError('new_inner_rank cannot be zero')\n elif self.inner_rank == 0:\n raise ValueError('old inner_rank cannot be zero')\n elif new_inner_rank == self.inner_rank:\n return self.inner_shape\n elif new_inner_rank < self.inner_rank:\n if self._static_inner_shape.is_fully_defined():\n return _alt_inner_shape_from_tensor_shape(self._static_inner_shape, self.dtype, new_inner_rank)\n first_dimension = self._num_slices_in_dimension(-new_inner_rank)\n if new_inner_rank == 1:\n return array_ops.expand_dims(first_dimension, 0)\n remaining_dimensions = self.inner_shape[1 - new_inner_rank:]\n return array_ops.concat([array_ops.expand_dims(first_dimension, 0), remaining_dimensions], axis=0)\n else:\n assert new_inner_rank > self.inner_rank\n new_dimensions = new_inner_rank - self.inner_rank\n if any([not x.is_uniform() for x in self.row_partitions[-new_dimensions:]]):\n raise ValueError('Cannot get an inner shape over a ragged dimension')\n first_dimension = self._num_slices_in_dimension(-new_inner_rank)\n new_dimensions = new_inner_rank - self.inner_rank\n new_dims = [first_dimension] + [x.uniform_row_length() for x in self.row_partitions[-new_dimensions:]]\n return array_ops.concat([array_ops_stack.stack(new_dims), self.inner_shape[1:]], axis=0)", "docstring": "Get an alternative inner shape with higher or lower rank.\n\n For the rank of the inner shape to be be higher, the last few ragged\n dimensions must have uniform_row_length.\n\nArgs:\n new_inner_rank: the new rank of the inner_shape\n\nReturns:\n A new inner_shape of rank new_inner_rank.", "source": "github_repos"} -{"code": "def from_sub_models_config(cls, text_encoder_config: PretrainedConfig, audio_encoder_config: PretrainedConfig, decoder_config: MusicgenMelodyDecoderConfig, **kwargs):\n return cls(text_encoder=text_encoder_config.to_dict(), audio_encoder=audio_encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`MusicgenMelodyConfig`] (or a derived class) from text encoder, audio encoder and decoder\n configurations.\n\nReturns:\n [`MusicgenMelodyConfig`]: An instance of a configuration object", "source": "github_repos"} -{"code": "def assert_no_legacy_layers(layers):\n legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)]\n if legacy_layers:\n layer_str = '\\n'.join((' ' + str(l) for l in legacy_layers))\n raise TypeError('The following are legacy tf.layers.Layers:\\n{}\\nTo use keras as a framework (for instance using the Network, Model, or Sequential classes), please use the tf.keras.layers implementation instead. (Or, if writing custom layers, subclass from tf.keras.layers rather than tf.layers)'.format(layer_str))", "docstring": "Prevent tf.layers.Layers from being used with Keras.\n\n Certain legacy layers inherit from their keras analogs; however they are\n not supported with keras and can lead to subtle and hard to diagnose bugs.\n\nArgs:\n layers: A list of layers to check\n\nRaises:\n TypeError: If any elements of layers are tf.layers.Layers", "source": "github_repos"} -{"code": "def get_full_path(path):\n if path_utils.isabs(path):\n return path\n else:\n return path_utils.join(_pytype_source_dir(), path)", "docstring": "Full path to a file or directory within the pytype source tree.\n\nArgs:\n path: An absolute or relative path.\n\nReturns:\n path for absolute paths.\n full path resolved relative to pytype/ for relative paths.", "source": "github_repos"} -{"code": "def set_distribution(value):\n global_state.set_global_attribute(GLOBAL_ATTRIBUTE_NAME, value)", "docstring": "Set the distribution as the global distribution setting.\n\nArgs:\n value: a `Distribution` instance.", "source": "github_repos"} -{"code": "def get_prefix_bias_name(self) -> Union[None, str]:\n warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning)\n return None", "docstring": "Get the concatenated _prefix name of the bias from the model name to the parent layer\n\nReturns:\n `str`: The _prefix name of the bias.", "source": "github_repos"} -{"code": "def build_as_function_and_v1_graph(func: Callable[..., Any]) -> Callable[..., None]:\n if tf_inspect.isclass(func):\n raise ValueError('`run_in_graph_mode_and_function` only supports test methods.')\n\n @parameterized.named_parameters(('_v1_graph', 'v1_graph'), ('_function', 'function'))\n @functools.wraps(func)\n def decorated(self: 'TensorFlowTestCase', run_mode: str, *args, **kwargs) -> None:\n if run_mode == 'v1_graph':\n with ops.Graph().as_default():\n func(self, *args, **kwargs)\n elif run_mode == 'function':\n\n @def_function.function\n def function_in_eager():\n func(self, *args, **kwargs)\n graph_for_eager_test = ops.Graph()\n with graph_for_eager_test.as_default(), context.eager_mode():\n function_in_eager()\n ops.dismantle_graph(graph_for_eager_test)\n else:\n raise ValueError('Unknown run mode %s' % run_mode)\n return decorated", "docstring": "Run a test case in v1 graph mode and inside tf.function in eager mode.\n\n WARNING: This decorator can only be used in test cases that statically checks\n generated graph. Attempting to evaluate graph or function results via.\n session.run() or self.evaluate() will fail.\n\n WARNING: This decorator can only be used for test cases that inherit from\n absl.testing.parameterized.TestCase.\n\nArgs:\n func: Test case function to be decorated.\n\nReturns:\n Decorated test case function.", "source": "github_repos"} -{"code": "def has_leak(output: EventSetNodeCollection, input: Optional[EventSetNodeCollection]=None) -> bool:\n if input is None:\n normalized_input = None\n else:\n normalized_input = _normalize_query(input)\n normalized_output = _normalize_query(output)\n graph = infer_graph(inputs=normalized_input, outputs=normalized_output)\n leak_key = LeakOperator.operator_key()\n for operator in graph.operators:\n if operator.operator_key() == leak_key:\n return True\n return False", "docstring": "Tests if a node depends on a leak operator.\n\n Tests if a [`EventSetNode`][temporian.EventSetNode] or collection of nodes\n depends on the only operator that can introduce future leakage:\n [`EventSet.leak()`][temporian.EventSet.leak].\n\n Single input output example:\n ```python\n >>> a = tp.input_node([(\"f\", tp.float32)])\n >>> b = a.moving_sum(5)\n >>> c = b.leak(6)\n >>> d = c.prefix(\"my_prefix_\")\n >>> e = d.moving_sum(7)\n >>> # The computation of \"e\" contains a leak.\n >>> tp.has_leak(e)\n True\n >>> # The computation of \"e\" given \"d\" does not contain a leak.\n >>> tp.has_leak(e, d)\n False\n\n ```\n\nArgs:\n output: Nodes to compute. Supports Node, dict of Nodes and list of\n Nodes.\n input: Optional input nodes. Supports Node, dict of Nodes and list of\n Nodes. If not specified, assumes for the input nodes to be the the\n raw data inputs e.g. [`tp.input_node()`][temporian.input_node] and\n [`tp.event_set()`][temporian.event_set].\n\nReturns:\n True if and only if the computation of `output` from `inputs` depends\n on a [`EventSet.leak()`][temporian.EventSet.leak] operator.", "source": "github_repos"} -{"code": "def get_feature_extractor_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:\n cache_dir = kwargs.pop('cache_dir', None)\n force_download = kwargs.pop('force_download', False)\n resume_download = kwargs.pop('resume_download', None)\n proxies = kwargs.pop('proxies', None)\n subfolder = kwargs.pop('subfolder', None)\n token = kwargs.pop('token', None)\n use_auth_token = kwargs.pop('use_auth_token', None)\n local_files_only = kwargs.pop('local_files_only', False)\n revision = kwargs.pop('revision', None)\n if use_auth_token is not None:\n warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n if token is not None:\n raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n token = use_auth_token\n from_pipeline = kwargs.pop('_from_pipeline', None)\n from_auto_class = kwargs.pop('_from_auto', False)\n user_agent = {'file_type': 'feature extractor', 'from_auto_class': from_auto_class}\n if from_pipeline is not None:\n user_agent['using_pipeline'] = from_pipeline\n if is_offline_mode() and (not local_files_only):\n logger.info('Offline mode: forcing local_files_only=True')\n local_files_only = True\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n is_local = os.path.isdir(pretrained_model_name_or_path)\n if os.path.isdir(pretrained_model_name_or_path):\n feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME)\n if os.path.isfile(pretrained_model_name_or_path):\n resolved_feature_extractor_file = pretrained_model_name_or_path\n is_local = True\n elif is_remote_url(pretrained_model_name_or_path):\n feature_extractor_file = pretrained_model_name_or_path\n resolved_feature_extractor_file = download_url(pretrained_model_name_or_path)\n else:\n feature_extractor_file = FEATURE_EXTRACTOR_NAME\n try:\n resolved_feature_extractor_file = cached_file(pretrained_model_name_or_path, feature_extractor_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, subfolder=subfolder, token=token, user_agent=user_agent, revision=revision)\n except OSError:\n raise\n except Exception:\n raise OSError(f\"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a {FEATURE_EXTRACTOR_NAME} file\")\n try:\n with open(resolved_feature_extractor_file, encoding='utf-8') as reader:\n text = reader.read()\n feature_extractor_dict = json.loads(text)\n except json.JSONDecodeError:\n raise OSError(f\"It looks like the config file at '{resolved_feature_extractor_file}' is not a valid JSON file.\")\n if is_local:\n logger.info(f'loading configuration file {resolved_feature_extractor_file}')\n else:\n logger.info(f'loading configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}')\n return (feature_extractor_dict, kwargs)", "docstring": "From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a\n feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] using `from_dict`.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.\n\nReturns:\n `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor object.", "source": "github_repos"} -{"code": "def forward(self, x):\n if self.training and self.drop_prob > 0:\n return XDropout.apply(x, self.get_context())\n return x", "docstring": "Call the module\n\nArgs:\n x (`torch.tensor`): The input tensor to apply dropout", "source": "github_repos"} -{"code": "def _TestGetItem(self, struct, slice_spec, expected):\n tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)\n tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)\n value1 = struct.__getitem__(slice_spec)\n value2 = struct.__getitem__(tensor_slice_spec1)\n value3 = struct.__getitem__(tensor_slice_spec2)\n self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))\n self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))\n self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))", "docstring": "Helper function for testing StructuredTensor.__getitem__.\n\n Checks that calling `struct.__getitem__(slice_spec) returns the expected\n value. Checks three different configurations for each slice spec:\n\n * Call __getitem__ with the slice spec as-is (with int values)\n * Call __getitem__ with int values in the slice spec wrapped in\n `tf.constant()`.\n * Call __getitem__ with int values in the slice spec wrapped in\n `tf.compat.v1.placeholder()` (so value is not known at graph\n construction time).\n\nArgs:\n struct: The StructuredTensor to test.\n slice_spec: The slice spec.\n expected: The expected value of struct.__getitem__(slice_spec), as a\n python list.", "source": "github_repos"} -{"code": "def clear_signature_defs(tflite_model):\n model = tflite_model\n if not isinstance(tflite_model, bytearray):\n model = bytearray(tflite_model)\n return signature_def_util.ClearSignatureDefs(model)", "docstring": "Clears SignatureDefs from the Metadata of a TfLite flatbuffer buffer.\n\nArgs:\n tflite_model: TFLite model buffer to remove signature_defs.\n\nReturns:\n buffer: A TFLite model binary identical to model buffer with\n no SignatureDef metadata.\n\nRaises:\n ValueError:\n tflite_model buffer does not contain a valid TFLite model.", "source": "github_repos"} -{"code": "def DummyMethod(name, *params):\n\n def make_param(param):\n return pytd.Parameter(param, type=pytd.AnythingType(), kind=pytd.ParameterKind.REGULAR, optional=False, mutated_type=None)\n sig = pytd.Signature(tuple((make_param(param) for param in params)), starargs=None, starstarargs=None, return_type=pytd.AnythingType(), exceptions=(), template=())\n return pytd.Function(name=name, signatures=(sig,), kind=pytd.MethodKind.METHOD, flags=pytd.MethodFlag.NONE)", "docstring": "Create a simple method using only \"Any\"s as types.\n\nArgs:\n name: The name of the method\n *params: The parameter names.\n\nReturns:\n A pytd.Function.", "source": "github_repos"} -{"code": "def __init__(self, query_engine: engine.Engine, value_set_codes_table: str) -> None:\n super().__init__()\n self._client = query_engine\n self._value_set_codes_table = value_set_codes_table", "docstring": "Initializes the SparkValueSetManager with user-provided sqlalchemy engine.\n\nArgs:\n query_engine: Sqlalchemy engine to communicate with Spark cluster\n value_set_codes_table: A table containing value set expansions. If\n `value_set_codes_table` is a string, it must included a project ID if\n not in the client's default project, dataset ID and table ID, each\n separated by a '.'. The table must match the schema described by\n https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md#valueset-support", "source": "github_repos"} -{"code": "def slice_inputs(self, indices_dataset, inputs):\n dataset = dataset_ops.DatasetV2.zip((indices_dataset, dataset_ops.DatasetV2.from_tensors(inputs).repeat()))\n\n def grab_batch(i, data):\n return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data)\n dataset = dataset.map(grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)\n options = options_lib.Options()\n options.experimental_optimization.apply_default_optimizations = False\n if self._shuffle:\n options.experimental_external_state_policy = options_lib.ExternalStatePolicy.IGNORE\n dataset = dataset.with_options(options)\n return dataset", "docstring": "Slice inputs into a Dataset of batches.\n\n Given a Dataset of batch indices and the unsliced inputs,\n this step slices the inputs in a parallelized fashion\n and produces a dataset of input batches.\n\nArgs:\n indices_dataset: A Dataset of batched indices\n inputs: A python data structure that contains the inputs, targets,\n and possibly sample weights.\n\nReturns:\n A Dataset of input batches matching the batch indices.", "source": "github_repos"} -{"code": "def index_of(self, file_path, line_number, called_function_name, called_file_path, called_function_start_line):\n location_key = (file_path, called_function_name, line_number)\n if location_key in self._location_key_to_location:\n location = self._location_key_to_location[location_key]\n return location.id\n else:\n location_index = len(self._location_key_to_location) + 1\n location = profile_pb2.Location()\n location.id = location_index\n self._location_key_to_location[location_key] = location\n line = location.line.add()\n line.function_id = self._functions.index_of(called_file_path, called_function_name, called_function_start_line)\n line.line = line_number\n return location_index", "docstring": "Returns index of the location, adding the location if needed.\n\nArgs:\n file_path: (string) Path to file that makes the call.\n line_number: (integer) Call line number.\n called_function_name: (string) Function name of the function called at\n `file_path` and `line_number`.\n called_file_path: (string) Path to file where the called function is\n defined.\n called_function_start_line: (integer) Start line number of called\n function definition in `called_file_path` file.\n\nReturns:\n Index of location.", "source": "github_repos"} -{"code": "def pack(value):\n if is_packed(value):\n return value\n spec = value._type_spec._tf_extension_type_with_packed(True)\n try:\n variant = composite_tensor_ops.composite_tensor_to_variants(value)\n except nested_structure_coder.NotEncodableError as e:\n raise ValueError('ExtensionTypes must have a __name__ field in order to be packed.') from e\n return _create_object_from_type_and_dict(type(value), {'_tf_extension_type_cached_type_spec': spec, '_tf_extension_type_packed_variant': variant})", "docstring": "Returns a copy of `value` with fields packed in a single Variant.\n\nArgs:\n value: An `ExtensionType` object.\n\nReturns:\n An `ExtensionType` object.", "source": "github_repos"} -{"code": "def set_size(a, validate_indices=True):\n a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name='a')\n if not isinstance(a, sparse_tensor.SparseTensor):\n raise TypeError('Expected `SparseTensor`, got %s.' % a)\n if a.values.dtype.base_dtype not in _VALID_DTYPES:\n raise TypeError(f'Invalid dtype `{a.values.dtype}` not in supported dtypes: `{_VALID_DTYPES}`.')\n return gen_set_ops.set_size(a.indices, a.values, a.dense_shape, validate_indices)", "docstring": "Compute number of unique elements along last dimension of `a`.\n\nArgs:\n a: `SparseTensor`, with indices sorted in row-major order.\n validate_indices: Whether to validate the order and range of sparse indices\n in `a`. Note that setting this to `false` allows for undefined behavior\n when calling this function with invalid indices.\n\nReturns:\n `int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with\n rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the\n number of unique elements in the corresponding `[0...n-1]` dimension of `a`.\n\nRaises:\n TypeError: If `a` is an invalid types.", "source": "github_repos"} -{"code": "def get(self):\n while self.is_running():\n try:\n inputs = self.ready_queue.get(block=False)\n yield inputs\n continue\n except queue.Empty:\n pass\n try:\n value = self.future_queue.get(block=True, timeout=5)\n self.future_queue.task_done()\n if isinstance(value, Exception):\n raise value\n inputs = value.get()\n if inputs is not None:\n yield inputs\n except queue.Empty:\n pass\n except Exception as e:\n self.stop(drain_queue_and_join=True)\n raise e\n raise ValueError('Iterator called after `on_epoch_end` or before `on_epoch_begin`.')", "docstring": "Creates a generator to extract data from the queue.\n\n Skip the data if it is `None`.\n\n This method is called from the main thread.\n\nYields:\n The next element in the queue, i.e. a tuple\n `(inputs, targets)` or\n `(inputs, targets, sample_weights)`.", "source": "github_repos"} -{"code": "def restore(self, output):\n pass", "docstring": "Create an accumulator based on 'output'.\n\n This method creates a new accumulator with identical internal state to the\n one used to create the data in 'output'. This means that if you do\n\n output_data = combiner.extract(accumulator_1)\n accumulator_2 = combiner.restore(output_data)\n\n then accumulator_1 and accumulator_2 will have identical internal state, and\n computations using either of them will be equivalent.\n\nArgs:\n output: The data output from a previous computation. Should be in the same\n form as provided by 'extract_output'.\n\nReturns:\n A new accumulator.", "source": "github_repos"} -{"code": "def __init__(self, *content: WritableTypes, style_files: Optional[Iterable[str]]=None, styles: Optional[Iterable[str]]=None, script_files: Optional[Iterable[str]]=None, scripts: Optional[Iterable[str]]=None) -> None:\n super().__init__(*content, style_files=Html.StyleFiles(*(style_files or [])), styles=Html.Styles(*(styles or [])), script_files=Html.ScriptFiles(*(script_files or [])), scripts=Html.Scripts(*(scripts or [])))", "docstring": "Constructor.\n\nArgs:\n *content: One or multiple body part (str, Html, lambda, None) of the HTML.\n style_files: URLs for external styles to include.\n styles: CSS styles to include.\n script_files: URLs for external scripts to include.\n scripts: JavaScript scripts to include.", "source": "github_repos"} -{"code": "def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):\n y_pred_rank = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred).shape.ndims\n y_true_rank = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_true).shape.ndims\n if y_true_rank is not None and y_pred_rank is not None:\n if y_pred_rank > 2:\n y_pred = array_ops.reshape(y_pred, [-1, y_pred.shape[-1]])\n if y_true_rank > 1:\n y_true = array_ops.reshape(y_true, [-1])\n return math_ops.cast(nn.in_top_k(y_pred, math_ops.cast(y_true, 'int32'), k), backend.floatx())", "docstring": "Computes how often integer targets are in the top `K` predictions.\n\n Standalone usage:\n >>> y_true = [2, 1]\n >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]\n >>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(\n ... y_true, y_pred, k=3)\n >>> assert m.shape == (2,)\n >>> m.numpy()\n array([1., 1.], dtype=float32)\n\nArgs:\n y_true: tensor of true targets.\n y_pred: tensor of predicted targets.\n k: (Optional) Number of top elements to look at for computing accuracy.\n Defaults to 5.\n\nReturns:\n Sparse top K categorical accuracy value.", "source": "github_repos"} -{"code": "def ensure_value_to_cell(value):\n\n def dummy_fn():\n value\n cell_value = dummy_fn.__closure__[0]\n if not isinstance(value, type(cell_value)):\n return cell_value\n return value", "docstring": "Ensures that a value is converted to a python cell object.\n\nArgs:\n value: Any value that needs to be casted to the cell type\n\nReturns:\n A value wrapped as a cell object (see function \"func_load\")", "source": "github_repos"} -{"code": "def __init__(self, pipeline: Union[beam_runner_api_pb2.Pipeline, beam.Pipeline], default_vertex_attrs={'shape': 'box'}, default_edge_attrs=None, render_option=None):\n self._lock = threading.Lock()\n self._graph: pydot.Dot = None\n self._pipeline_instrument = None\n if isinstance(pipeline, beam.Pipeline):\n self._pipeline_instrument = inst.PipelineInstrument(pipeline, pipeline._options)\n self._pipeline_instrument.preprocess()\n if isinstance(pipeline, beam_runner_api_pb2.Pipeline):\n self._pipeline_proto = pipeline\n elif isinstance(pipeline, beam.Pipeline):\n self._pipeline_proto = pipeline.to_runner_api()\n else:\n raise TypeError('pipeline should either be a %s or %s, while %s is given' % (beam_runner_api_pb2.Pipeline, beam.Pipeline, type(pipeline)))\n self._consumers: DefaultDict[str, List[str]] = collections.defaultdict(list)\n self._producers: Dict[str, str] = {}\n for transform_id, transform_proto in self._top_level_transforms():\n for pcoll_id in transform_proto.inputs.values():\n self._consumers[pcoll_id].append(transform_id)\n for pcoll_id in transform_proto.outputs.values():\n self._producers[pcoll_id] = transform_id\n default_vertex_attrs = default_vertex_attrs or {'shape': 'box'}\n if 'color' not in default_vertex_attrs:\n default_vertex_attrs['color'] = 'blue'\n if 'fontcolor' not in default_vertex_attrs:\n default_vertex_attrs['fontcolor'] = 'blue'\n vertex_dict, edge_dict = self._generate_graph_dicts()\n self._construct_graph(vertex_dict, edge_dict, default_vertex_attrs, default_edge_attrs)\n self._renderer = pipeline_graph_renderer.get_renderer(render_option)", "docstring": "Constructor of PipelineGraph.\n\nExample:\n graph = pipeline_graph.PipelineGraph(pipeline_proto)\n graph.get_dot()\n\n or\n\n graph = pipeline_graph.PipelineGraph(pipeline)\n graph.get_dot()\n\nArgs:\n pipeline: (Pipeline proto) or (Pipeline) pipeline to be rendered.\n default_vertex_attrs: (Dict[str, str]) a dict of default vertex attributes\n default_edge_attrs: (Dict[str, str]) a dict of default edge attributes\n render_option: (str) this parameter decides how the pipeline graph is\n rendered. See display.pipeline_graph_renderer for available options.", "source": "github_repos"} -{"code": "def get_maybe_abstract_instance(self, data):\n if data.is_concrete:\n data_type = type(data.pyval)\n if data_type in self.primitive_instances:\n return self.primitive_instances[data_type]\n return data", "docstring": "Get an instance of the same type as the given data, abstract if possible.\n\n Get an abstract instance of primitive data stored as a\n ConcreteValue. Return any other data as-is. This is used by\n constant_to_var to discard concrete values that have been kept\n around for InterpreterFunction.\n\n This method intentionally does not descend into containers, as doing so\n causes new timeouts. If you need to discard concrete values inside\n containers, use abstract_utils.abstractify_variable instead.\n\nArgs:\n data: The data.\n\nReturns:\n An instance of the same type as the data, abstract if possible.", "source": "github_repos"} -{"code": "def adb_call(args=None, shell=False, timeout=None, stderr=None) -> bytes:\n return self._exec_adb_cmd(name, args, shell=shell, timeout=timeout, stderr=stderr)", "docstring": "Wrapper for an ADB command.\n\nArgs:\n args: string or list of strings, arguments to the adb command.\n See subprocess.Proc() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Proc() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n stderr: a Byte stream, like io.BytesIO, stderr of the command\n will be written to this object if provided.\n\nReturns:\n The output of the adb command run if exit code is 0.", "source": "github_repos"} -{"code": "def dates(self):\n return _gen_business_days(self._start_date, self._end_date, self._holiday_calendar, self._backward)", "docstring": "Returns the dates as computed from the schedule as a DateTensor.\n\n Constructs the date schedule from the supplied data. For more details see\n the initializer docstring.\n\nReturns:\n `DateTensor` of rank one more than `start_date` or `end_date`\n (depending on `backwards`), representing schedules for each element\n of the input.", "source": "github_repos"} -{"code": "def barrier(mesh: layout.Mesh, barrier_name: Optional[str]=None, timeout_in_ms: Optional[int]=None):\n if barrier_name is None:\n barrier_name = '(barrier)'\n logging.info('entering barrier before op: %s', barrier_name)\n context.async_wait()\n component = array_ops.reshape(1.0, [1] * len(mesh.shape()))\n ones = api.pack([component] * mesh.num_local_devices(), layout.Layout(mesh.dim_names, mesh))\n mesh_size = math_ops.reduce_sum(ones)\n if mesh_size != mesh.size:\n raise ValueError('Global barrier produced wrong mesh size : {0} while mesh has actualsize : {1}'.format(mesh_size, mesh.size))\n context.async_wait()\n if context.context().coordination_service:\n if timeout_in_ms is None:\n timeout_in_ms = 24 * 60 * 60 * 1000\n num_calls = _BARRIER_DICT.setdefault(barrier_name, 0)\n _BARRIER_DICT[barrier_name] = num_calls + 1\n barrier_id = f'{barrier_name}:{num_calls}'\n context.context().wait_at_barrier(barrier_id, timeout_in_ms)\n logging.info('finished running barrier across all clients after op: %s', barrier_name)", "docstring": "Runs a barrier on the mesh.\n\n Upon returning from the barrier, all operations run before the barrier\n would have completed across all clients. Currently we allocate a fully\n sharded tensor with mesh shape and run an all_reduce on it.\n\nExample:\n A barrier can be used before application exit to ensure completion of pending\n ops.\n\n ```python\n\n x = [1, 2, 3]\n x = dtensor.relayout(x, dtensor.Layout.batch_sharded(mesh, 'batch', 1))\n dtensor.barrier(mesh)\n\n # At this point all devices on all clients in the mesh have completed\n # operations before the barrier. Therefore it is OK to tear down the clients.\n sys.exit()\n ```\n\nArgs:\n mesh: The mesh to run the barrier on.\n barrier_name: The name of the barrier. Mainly used for logging purpose.\n timeout_in_ms: The timeout of the barrier in ms. If omitted, blocks\n indefinitely till the barrier is reached from all clients.", "source": "github_repos"} -{"code": "def _get_object_type(filename, filepath):\n filename_no_ext = os.path.splitext(filename)[0].lower()\n if filename_no_ext.endswith(PrecompiledExampleType.test_ends):\n object_type = PRECOMPILED_OBJECT_TYPE_UNIT_TEST\n elif PrecompiledExampleType.katas in filepath.split(os.sep):\n object_type = PRECOMPILED_OBJECT_TYPE_KATA\n elif PrecompiledExampleType.examples in filepath.split(os.sep):\n object_type = PRECOMPILED_OBJECT_TYPE_EXAMPLE\n else:\n object_type = PRECOMPILED_OBJECT_TYPE_UNSPECIFIED\n return object_type", "docstring": "Get type of an object based on it filename/filepath\n\nArgs:\n filename: object's filename\n filepath: object's filepath\n\n Returns: type of the object (example, kata, unit-test)", "source": "github_repos"} -{"code": "def min(x, axis=None, keepdims=False):\n return math_ops.reduce_min(x, axis, keepdims)", "docstring": "Minimum value in a tensor.\n\nArgs:\n x: A tensor or variable.\n axis: An integer, the axis to find minimum values.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\nReturns:\n A tensor with minimum values of `x`.", "source": "github_repos"} -{"code": "def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n if is_decoder:\n batch_size, seq_length = input_shape\n seq_ids = torch.arange(seq_length, device=device)\n causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]\n causal_mask = causal_mask.to(attention_mask.dtype)\n if causal_mask.shape[1] < attention_mask.shape[1]:\n prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]\n causal_mask = torch.cat([torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask], axis=-1)\n extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]\n else:\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(input_shape, attention_mask.shape))\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n return extended_attention_mask", "docstring": "Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\nArgs:\n attention_mask (`torch.Tensor`):\n Mask with ones indicating tokens to attend to, zeros for tokens to ignore.\n input_shape (`Tuple[int]`):\n The shape of the input to the model.\n device (`torch.device`):\n The device of the input to the model.\n\nReturns:\n `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.", "source": "github_repos"} -{"code": "def peek(self, index, name=None):\n if name is None:\n name = '%s_peek' % self._name\n fn = lambda: gen_data_flow_ops.stage_peek(index, dtypes=self._dtypes, shared_name=self._name, name=name, capacity=self._capacity, memory_limit=self._memory_limit)\n return self.__internal_get(fn, name)", "docstring": "Peeks at an element in the staging area.\n\n If the staging area is too small to contain the element at\n the specified index, it will block until enough elements\n are inserted to complete the operation.\n\n The placement of the returned tensor will be determined by\n the current device scope when this function is called.\n\nArgs:\n index: The index of the tensor within the staging area\n to look up.\n name: A name for the operation (optional).\n\nReturns:\n The tuple of tensors that was gotten.", "source": "github_repos"} -{"code": "def add_callback(self, callback):\n self.callback_handler.add_callback(callback)", "docstring": "Add a callback to the current list of [`~transformers.TrainerCallback`].\n\nArgs:\n callback (`type` or [`~transformers.TrainerCallback]`):\n A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the\n first case, will instantiate a member of that class.", "source": "github_repos"} -{"code": "def get_identity_broadcaster(cls, nvals, dtype=None):\n return _GatherLayerBroadcaster(math_ops.range(nvals, dtype=dtype))", "docstring": "Create an identity broadcaster.\n\n TODO(martinz): an identity broadcaster can be far more efficient than a\n generic broadcaster. Add an optimized implementation.\n\nArgs:\n nvals: the number of values for the broadcaster.\n dtype: the dtype of the broadcaster, or None to use the dtype of nvals.\n\nReturns:\n an identity broadcaster from [0....nvals-1] to [0...nvals-1]", "source": "github_repos"} -{"code": "def get_group_key(self, devices):\n with self._lock:\n devices_key = ','.join(devices)\n if devices_key not in self._known_groups:\n self._known_groups[devices_key] = self._get_new_group_key(devices)\n return self._known_groups[devices_key]", "docstring": "Returns a group key for the list of local devices.\n\n The same group key is returned if the list of local devices is the same.\n\nArgs:\n devices: a list of local canonical device strings in a collective group.\n\nReturns:\n a group key.", "source": "github_repos"} -{"code": "def set_image_data_format(data_format):\n global _IMAGE_DATA_FORMAT\n data_format = str(data_format).lower()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError(f\"The `data_format` argument must be one of {{'channels_first', 'channels_last'}}. Received: data_format={data_format}\")\n _IMAGE_DATA_FORMAT = data_format", "docstring": "Set the value of the image data format convention.\n\nArgs:\n data_format: string. `'channels_first'` or `'channels_last'`.\n\nExample:\n >>> keras.config.image_data_format()\n 'channels_last'\n\n >>> keras.config.set_image_data_format('channels_first')\n >>> keras.config.image_data_format()\n 'channels_first'\n\n >>> # Set it back to `'channels_last'`\n >>> keras.config.set_image_data_format('channels_last')", "source": "github_repos"} -{"code": "def matmul(a: ragged_tensor.RaggedOrDense, b: ragged_tensor.RaggedOrDense, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False, a_is_sparse=False, b_is_sparse=False, output_type=None, grad_a=False, grad_b=False, name=None):\n del grad_a\n del grad_b\n if transpose_a and adjoint_a:\n raise ValueError('Only one of transpose_a and adjoint_a can be True.')\n if transpose_b and adjoint_b:\n raise ValueError('Only one of transpose_b and adjoint_b can be True.')\n kwargs = dict(transpose_a=transpose_a, transpose_b=transpose_b, adjoint_a=adjoint_a, adjoint_b=adjoint_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, output_type=output_type)\n with ops.name_scope(name, 'RaggedMatMul', [a, b]) as name:\n a = ragged_tensor.convert_to_tensor_or_ragged_tensor(a, name='a')\n b = ragged_tensor.convert_to_tensor_or_ragged_tensor(b, name='b')\n a_is_ragged = isinstance(a, ragged_tensor.RaggedTensor)\n b_is_ragged = isinstance(b, ragged_tensor.RaggedTensor)\n if not (a_is_ragged or b_is_ragged):\n return math_ops.matmul(a, b, **kwargs)\n if a.dtype != b.dtype:\n raise ValueError('`a` and `b` must have the same dtype.')\n if a.shape.rank is None:\n if b.shape.rank is None:\n raise ValueError('matmul requires at least one input to have known rank if either input is ragged.')\n rank = b.shape.rank\n else:\n if b.shape.rank is not None and a.shape.rank != b.shape.rank:\n raise ValueError('`a` and `b` must have the same rank.')\n rank = a.shape.rank\n if rank < 2:\n raise ValueError('`a` and `b` must have the same rank.')\n if rank > 3:\n shape_err = 'Batch dimensions of `a` and `b` do not have the same size.'\n if not a_is_ragged:\n a = ragged_tensor.RaggedTensor.from_tensor(a, ragged_rank=1)\n if not b_is_ragged:\n b = ragged_tensor.RaggedTensor.from_tensor(b, ragged_rank=1)\n with ops.control_dependencies([check_ops.assert_equal(a.row_splits, b.row_splits, message=shape_err)]):\n flat_result = matmul(a.values, b.values, **kwargs)\n return a.with_values(flat_result)\n if rank == 2:\n return _matmul_2d(a, b, **kwargs)\n assert rank == 3\n a_ragged_rank = a.ragged_rank if a_is_ragged else 0\n if a_ragged_rank == 1 and (not (b_is_ragged or transpose_a or adjoint_a)):\n return _matmul_3d_with_batch_dim_folding(a, b, **kwargs)\n else:\n return _matmul_3d_with_map_fn(a, b, **kwargs)", "docstring": "Multiplies matrix `a` by matrix `b`.\n\n If all transpose or adjoint attributes are `False` then:\n\n ```\n output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j]), for all indices i, j.\n ```\n\n The inputs `a` and `b` must have `rank >= 2`, where the outermost `rank - 2`\n dimensions are batch dimensions. The inputs must have the same dtype. See\n `tf.matmul` for more information.\n\nArgs:\n a: `tf.Tensor` or `RaggedTensor` with `rank > 1`.\n b: `tf.Tensor` or `RaggedTensor` with same type and rank as `a`.\n transpose_a: If `True`, `a` is transposed before multiplication.\n transpose_b: If `True`, `b` is transposed before multiplication.\n adjoint_a: If `True`, `a` is conjugated & transposed before multiplication.\n adjoint_b: If `True`, `b` is conjugated & transposed before multiplication.\n a_is_sparse: If `True`, optimize assuming `a` is mostly zero.\n b_is_sparse: If `True`, optimize assuming `b` is mostly zero.\n output_type: The output datatype (optional).\n grad_a: Unused.\n grad_b: Unused.\n name: Name for the operation (optional).\n\nReturns:\n A `Tensor` or `RaggedTensor` with the same rank and shape as `a`, where\n each inner-most matrix is the product of the corresponding matrices in `a`\n and `b`.", "source": "github_repos"} -{"code": "def resolve(self, reference_path_or_paths: Optional[Union[str, List[str]]]=None) -> Union[Tuple[symbolic.Symbolic, utils.KeyPath], List[Tuple[symbolic.Symbolic, utils.KeyPath]]]:\n single_input = False\n if reference_path_or_paths is None:\n reference_paths = self.reference_paths\n elif isinstance(reference_path_or_paths, str):\n reference_paths = [utils.KeyPath.parse(reference_path_or_paths)]\n single_input = True\n elif isinstance(reference_path_or_paths, utils.KeyPath):\n reference_paths = [reference_path_or_paths]\n single_input = True\n elif isinstance(reference_path_or_paths, list):\n paths = []\n for path in reference_path_or_paths:\n if isinstance(path, str):\n path = utils.KeyPath.parse(path)\n elif not isinstance(path, utils.KeyPath):\n raise ValueError(\"Argument 'reference_path_or_paths' must be None, a string, KeyPath object, a list of strings, or a list of KeyPath objects.\")\n paths.append(path)\n reference_paths = paths\n else:\n raise ValueError(\"Argument 'reference_path_or_paths' must be None, a string, KeyPath object, a list of strings, or a list of KeyPath objects.\")\n resolved_paths = []\n for reference_path in reference_paths:\n parent = self.sym_parent\n while parent is not None and (not reference_path.exists(parent)):\n parent = getattr(parent, 'sym_parent', None)\n if parent is None:\n raise ValueError(f\"Cannot resolve '{reference_path}': parent not found.\")\n resolved_paths.append((parent, parent.sym_path + reference_path))\n return resolved_paths if not single_input else resolved_paths[0]", "docstring": "Resolve reference paths based on the location of this node.\n\nArgs:\n reference_path_or_paths: (Optional) a string or KeyPath as a reference\n path or a list of strings or KeyPath objects as a list of\n reference paths.\n If this argument is not provided, prebound reference paths of this\n object will be used.\n\nReturns:\n A tuple (or list of tuple) of (resolved parent, resolved full path)", "source": "github_repos"} -{"code": "def __init__(self, graph, control_inputs) -> None:\n self._graph = graph\n if control_inputs is None:\n self._control_inputs_val = []\n self._new_stack = True\n else:\n self._control_inputs_val = control_inputs\n self._new_stack = False\n self._seen_nodes = set()\n self._old_stack = None\n self._old_control_flow_context = None", "docstring": "Create a new `_ControlDependenciesController`.\n\n A `_ControlDependenciesController` is the context manager for\n `with tf.control_dependencies()` blocks. These normally nest,\n as described in the documentation for `control_dependencies()`.\n\n The `control_inputs` argument list control dependencies that must be\n added to the current set of control dependencies. Because of\n uniquification the set can be empty even if the caller passed a list of\n ops. The special value `None` indicates that we want to start a new\n empty set of control dependencies instead of extending the current set.\n\n In that case we also clear the current control flow context, which is an\n additional mechanism to add control dependencies.\n\nArgs:\n graph: The graph that this controller is managing.\n control_inputs: List of ops to use as control inputs in addition to the\n current control dependencies. None to indicate that the dependencies\n should be cleared.", "source": "github_repos"} -{"code": "def row_limits(self):\n return self._row_splits[1:]", "docstring": "Returns the limit indices for rows in this row partition.\n\n These indices specify where the values for each row end.\n `partition.row_limits()` is equal to `partition.row_splits()[:-1]`.\n\nReturns:\n A 1-D integer Tensor with shape `[self.nrows]`.\n The returned tensor is nonnegative, and is sorted in ascending order.\n `self.row_limits()[-1] == self.nvals()`.", "source": "github_repos"} -{"code": "def create_constructor_args(cls, proto_list: List[ir_swap.InterestRateSwap], config: InterestRateSwapConfig=None) -> Dict[str, Any]:\n swap_data = proto_utils.from_protos_v2(proto_list, config)\n res = {}\n for key in swap_data:\n tensor_repr = proto_utils.tensor_repr(swap_data[key])\n res[key] = tensor_repr\n return res", "docstring": "Creates a dictionary to initialize InterestRateSwap.\n\n The output dictionary is such that the instruments can be initialized\n as follows:\n ```\n initializer = create_constructor_args(proto_list, config)\n swaps = [InterestRateSwap(**data) for data in initializer.values()]\n ```\n\n The keys of the output dictionary are unique identifiers of the batched\n instruments. This is useful for identifying an existing graph that could be\n reused for the instruments without the need of rebuilding the graph.\n\nArgs:\n proto_list: A list of protos for which the initialization arguments are\n constructed.\n config: An instance of `InterestRateSwapConfig`.\n\nReturns:\n A possibly nested dictionary such that each value provides initialization\n arguments for the InterestRateSwap.", "source": "github_repos"} -{"code": "def __init__(self, input_energy: energy.BernoulliEnergy, num_expectation_samples: int, initial_seed: Union[None, tf.Tensor]=None, name: Union[None, str]=None):\n super().__init__(input_energy, num_expectation_samples, initial_seed, name)\n self._logits_variable = tf.Variable(input_energy.logits, trainable=False)\n self._distribution = tfd.Bernoulli(logits=self._logits_variable, dtype=tf.int8)", "docstring": "Initializes a BernoulliEnergyInference.\n\nArgs:\n input_energy: The parameterized energy function which defines this\n distribution via the equations of an energy based model. This class\n assumes that all parameters of `energy` are `tf.Variable`s and that\n they are all returned by `energy.variables`.\n num_expectation_samples: Number of samples to draw and use for estimating\n the expectation value.\n initial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This\n seed will be used in the `sample` method. If None, the seed is updated\n after every inference call. Otherwise, the seed is fixed.\n name: Optional name for the model.", "source": "github_repos"} -{"code": "def weighting_function(max_num_bins: int, up: torch.Tensor, reg_scale: int) -> torch.Tensor:\n upper_bound1 = abs(up[0]) * abs(reg_scale)\n upper_bound2 = abs(up[0]) * abs(reg_scale) * 2\n step = (upper_bound1 + 1) ** (2 / (max_num_bins - 2))\n left_values = [-step ** i + 1 for i in range(max_num_bins // 2 - 1, 0, -1)]\n right_values = [step ** i - 1 for i in range(1, max_num_bins // 2)]\n values = [-upper_bound2] + left_values + [torch.zeros_like(up[0][None])] + right_values + [upper_bound2]\n values = [v if v.dim() > 0 else v.unsqueeze(0) for v in values]\n values = torch.cat(values, 0)\n return values", "docstring": "Generates the non-uniform Weighting Function W(n) for bounding box regression.\n\nArgs:\n max_num_bins (int): Max number of the discrete bins.\n up (Tensor): Controls upper bounds of the sequence,\n where maximum offset is ±up * H / W.\n reg_scale (float): Controls the curvature of the Weighting Function.\n Larger values result in flatter weights near the central axis W(max_num_bins/2)=0\n and steeper weights at both ends.\n\nReturns:\n Tensor: Sequence of Weighting Function.", "source": "github_repos"} -{"code": "def model_to_dot(model, show_shapes=False, show_dtype=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=200, subgraph=False, show_layer_activations=False, show_trainable=False, **kwargs):\n from keras.src.ops.function import make_node_key\n if not model.built:\n raise ValueError('This model has not yet been built. Build the model first by calling `build()` or by calling the model on a batch of data.')\n from keras.src.models import functional\n from keras.src.models import sequential\n if not check_pydot():\n raise ImportError('You must install pydot (`pip install pydot`) for model_to_dot to work.')\n if subgraph:\n dot = pydot.Cluster(style='dashed', graph_name=model.name)\n dot.set('label', model.name)\n dot.set('labeljust', 'l')\n else:\n dot = pydot.Dot()\n dot.set('rankdir', rankdir)\n dot.set('concentrate', True)\n dot.set('dpi', dpi)\n dot.set('splines', 'ortho')\n dot.set_node_defaults(shape='record')\n if kwargs.pop('layer_range', None) is not None:\n raise ValueError('Argument `layer_range` is no longer supported.')\n if kwargs:\n raise ValueError(f'Unrecognized keyword arguments: {kwargs}')\n kwargs = {'show_layer_names': show_layer_names, 'show_layer_activations': show_layer_activations, 'show_dtype': show_dtype, 'show_shapes': show_shapes, 'show_trainable': show_trainable}\n if isinstance(model, sequential.Sequential):\n layers = model.layers\n elif not isinstance(model, functional.Functional):\n node = make_node(model, **kwargs)\n dot.add_node(node)\n return dot\n else:\n layers = model._operations\n for i, layer in enumerate(layers):\n if expand_nested and isinstance(layer, (functional.Functional, sequential.Sequential)):\n submodel = model_to_dot(layer, show_shapes, show_dtype, show_layer_names, rankdir, expand_nested, subgraph=True, show_layer_activations=show_layer_activations, show_trainable=show_trainable)\n dot.add_subgraph(submodel)\n else:\n node = make_node(layer, **kwargs)\n dot.add_node(node)\n if isinstance(model, sequential.Sequential):\n if not expand_nested:\n for i in range(len(layers) - 1):\n add_edge(dot, layers[i], layers[i + 1])\n return dot\n else:\n layers = model.layers[1:]\n for layer in layers:\n for inbound_index, inbound_node in enumerate(layer._inbound_nodes):\n if isinstance(model, functional.Functional) and make_node_key(layer, inbound_index) not in model._nodes:\n continue\n for input_index, input_tensor in enumerate(inbound_node.input_tensors):\n input_history = input_tensor._keras_history\n if input_history.operation is None:\n continue\n input_node = input_history.operation._inbound_nodes[input_history.node_index]\n output_index = input_history.tensor_index\n source = input_node.operation\n destination = layer\n if not expand_nested:\n add_edge(dot, source, layer)\n continue\n while isinstance(source, (functional.Functional, sequential.Sequential)):\n source, _, output_index = source.outputs[output_index]._keras_history\n while isinstance(destination, (functional.Functional, sequential.Sequential)):\n if isinstance(destination, functional.Functional):\n destination = destination.inputs[input_index]._keras_history.operation\n else:\n destination = destination.layers[0]\n add_edge(dot, source, destination)\n return dot", "docstring": "Convert a Keras model to dot format.\n\nArgs:\n model: A Keras model instance.\n show_shapes: whether to display shape information.\n show_dtype: whether to display layer dtypes.\n show_layer_names: whether to display layer names.\n rankdir: `rankdir` argument passed to PyDot,\n a string specifying the format of the plot: `\"TB\"`\n creates a vertical plot; `\"LR\"` creates a horizontal plot.\n expand_nested: whether to expand nested Functional models\n into clusters.\n dpi: Image resolution in dots per inch.\n subgraph: whether to return a `pydot.Cluster` instance.\n show_layer_activations: Display layer activations (only for layers that\n have an `activation` property).\n show_trainable: whether to display if a layer is trainable.\n\nReturns:\n A `pydot.Dot` instance representing the Keras model or\n a `pydot.Cluster` instance representing nested model if\n `subgraph=True`.", "source": "github_repos"} -{"code": "def is_placeholder(x):\n try:\n if ops.executing_eagerly_outside_functions():\n return hasattr(x, '_is_backend_placeholder')\n from tensorflow.python.keras.utils import tf_utils\n if tf_utils.is_extension_type(x):\n flat_components = nest.flatten(x, expand_composites=True)\n return py_any((is_placeholder(c) for c in flat_components))\n else:\n return x.op.type == 'Placeholder'\n except AttributeError:\n return False", "docstring": "Returns whether `x` is a placeholder.\n\nArgs:\n x: A candidate placeholder.\n\nReturns:\n Boolean.", "source": "github_repos"} -{"code": "def _read_side_inputs(self, tags_and_types):\n assert self.side_input_maps is None\n for i, (side_tag, view_class, view_options) in enumerate(tags_and_types):\n sources = []\n for si in filter(lambda o: o.tag == side_tag, self.spec.side_inputs):\n if not isinstance(si, operation_specs.WorkerSideInputSource):\n raise NotImplementedError('Unknown side input type: %r' % si)\n sources.append(si.source)\n si_counter = opcounters.SideInputReadCounter(self.counter_factory, self.state_sampler, declaring_step=self.name_context.step_name, input_index=i + 1)\n element_counter = opcounters.OperationCounters(self.counter_factory, self.name_context.step_name, view_options['coder'], i, suffix='side-input')\n iterator_fn = sideinputs.get_iterator_fn_for_sources(sources, read_counter=si_counter, element_counter=element_counter)\n yield apache_sideinputs.SideInputMap(view_class, view_options, sideinputs.EmulatedIterable(iterator_fn))", "docstring": "Generator reading side inputs in the order prescribed by tags_and_types.\n\nArgs:\n tags_and_types: List of tuples (tag, type). Each side input has a string\n tag that is specified in the worker instruction. The type is actually\n a boolean which is True for singleton input (read just first value)\n and False for collection input (read all values).\n\nYields:\n With each iteration it yields the result of reading an entire side source\n either in singleton or collection mode according to the tags_and_types\n argument.", "source": "github_repos"} -{"code": "def _partitioner(shape, dtype):\n if axis >= len(shape):\n raise ValueError(f'Cannot partition variable along axis {axis} when shape is only {shape}')\n dtype = dtypes.as_dtype(dtype)\n if dtype.base_dtype == dtypes.string:\n bytes_per_element = bytes_per_string_element\n else:\n bytes_per_element = dtype.size\n total_size_bytes = shape.num_elements() * bytes_per_element\n partitions = total_size_bytes / min_slice_size\n partitions_list = [1] * len(shape)\n partitions_list[axis] = max(1, min(shape.dims[axis].value, max_partitions, int(math.ceil(partitions))))\n return partitions_list", "docstring": "Partitioner that partitions list for a variable of given shape and type.\n\n Ex: Consider partitioning a variable of type float32 with\n shape=[1024, 1024].\n If `max_partitions` >= 16, this function would return\n [(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1].\n If `max_partitions` < 16, this function would return\n [`max_partitions`, 1].\n\nArgs:\n shape: Shape of the variable.\n dtype: Type of the variable.\n\nReturns:\n List of partitions for each axis (currently only one axis can be\n partitioned).\n\nRaises:\n ValueError: If axis to partition along does not exist for the variable.", "source": "github_repos"} -{"code": "def to_json(self, **kwargs) -> JSONValueType:", "docstring": "Returns a plain Python value as a representation for this object.\n\n A plain Python value are basic python types that can be serialized into\n JSON, e.g: ``bool``, ``int``, ``float``, ``str``, ``dict`` (with string\n keys), ``list``, ``tuple`` where the container types should have plain\n Python values as their values.\n\nArgs:\n **kwargs: Keyword arguments as flags to control JSON conversion.\n\nReturns:\n A plain Python value.", "source": "github_repos"} -{"code": "def __init__(self, window_coder):\n super().__init__()\n if window_coder is None:\n raise ValueError('window_coder should not be None')\n self._window_coder = window_coder", "docstring": "Create a new WindowFn with compatible coder.\n To be applied to PCollections with windows that are compatible with the\n given coder.\n\nArgs:\n window_coder: coders.Coder object to be used on windows.", "source": "github_repos"} -{"code": "def _make_1d_state(state_size, seed):\n if isinstance(seed, int):\n ls = []\n for _ in range(state_size):\n ls.append(seed & SEED_BIT_MASK)\n seed >>= SEED_TYPE_BITS\n seed = ls\n seed = nest.map_structure(_uint_to_int, seed)\n seed = math_ops.cast(seed, STATE_TYPE)\n seed = array_ops.reshape(seed, [-1])\n seed = seed[0:state_size]\n seed_size = seed.shape[0]\n if seed_size is None:\n seed_size = array_ops.shape(seed)[0]\n padding_size = math_ops.maximum(state_size - seed_size, 0)\n padding = array_ops.zeros([padding_size], seed.dtype)\n seed = array_ops.concat([padding, seed], axis=0)\n seed.set_shape([state_size])\n return seed", "docstring": "Makes a 1-D RNG state.\n\nArgs:\n state_size: an integer.\n seed: an integer or 1-D tensor.\n\nReturns:\n a 1-D tensor of shape [state_size] and dtype STATE_TYPE.", "source": "github_repos"} -{"code": "def _reshape_tensors(tensors, shape):\n reshaped = []\n for t in tensors:\n with ops.colocate_with(t):\n reshaped.append(array_ops.reshape(t, shape))\n return reshaped", "docstring": "Reshape tensors flattened by _flatten_tensors.\n\nArgs:\n tensors: list of `tf.Tensor` of identical length 1D tensors.\n shape: list of integers describing the desired shape. Product of\n the elements must equal the length of each tensor.\n\nReturns:\n list of `tf.Tensor` which are the reshaped inputs.", "source": "github_repos"} -{"code": "def on_skip(self, record):", "docstring": "A function that is executed upon a test being skipped.\n\n Implementation is optional.\n\nArgs:\n record: records.TestResultRecord, a copy of the test record for\n this test, containing all information of the test execution\n including exception objects.", "source": "github_repos"} -{"code": "def assert_is_fully_defined(self):\n if not self.is_fully_defined():\n raise ValueError('Shape %s is not fully defined' % self)", "docstring": "Raises an exception if `self` is not fully defined in every dimension.\n\nRaises:\n ValueError: If `self` does not have a known value for every dimension.", "source": "github_repos"} -{"code": "def f():\n return constant_op.constant(1)", "docstring": "First sentence.\n\n Second sentence.\n\nReturns:\n Something.", "source": "github_repos"} -{"code": "def get_dict_table_schema(schema):\n if isinstance(schema, (dict, value_provider.ValueProvider)) or callable(schema) or schema is None:\n return schema\n elif isinstance(schema, str):\n table_schema = get_table_schema_from_string(schema)\n return table_schema_to_dict(table_schema)\n elif isinstance(schema, bigquery.TableSchema):\n return table_schema_to_dict(schema)\n else:\n raise TypeError('Unexpected schema argument: %s.' % schema)", "docstring": "Transform the table schema into a dictionary instance.\n\nArgs:\n schema (str, dict, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema):\n The schema to be used if the BigQuery table to write has to be created.\n This can either be a dict or string or in the TableSchema format.\n\nReturns:\n Dict[str, Any]: The schema to be used if the BigQuery table to write has\n to be created but in the dictionary format.", "source": "github_repos"} -{"code": "def _validate_testbed_configs(testbed_configs):\n seen_names = set()\n for config in testbed_configs:\n name = config[keys.Config.key_testbed_name.value]\n _validate_testbed_name(name)\n if name in seen_names:\n raise MoblyConfigError('Duplicate testbed name %s found.' % name)\n seen_names.add(name)", "docstring": "Validates the testbed configurations.\n\nArgs:\n testbed_configs: A list of testbed configuration dicts.\n\nRaises:\n MoblyConfigError: Some parts of the configuration is invalid.", "source": "github_repos"} -{"code": "def m_to_inches(value):\n if value is None:\n return None\n return value / 39.37", "docstring": "Converts distance in meters to inches\n\nArgs:\n value: floating point representing the distance in meters\n Returns: distance in inches", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.Tensor):\n gate_score = self.act(self.w_0(hidden_states))\n hidden_states = self.w_1(hidden_states)\n hidden_states = gate_score * hidden_states\n return hidden_states", "docstring": "Transform an input tensor from one feature space to another via a nonlinear operation\n\nArgs:\n hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)", "source": "github_repos"} -{"code": "def score_retrieval(self, query_embeddings: Union['torch.Tensor', List['torch.Tensor']], passage_embeddings: Union['torch.Tensor', List['torch.Tensor']], batch_size: int=128, output_dtype: Optional['torch.dtype']=None, output_device: Union['torch.device', str]='cpu') -> 'torch.Tensor':\n if len(query_embeddings) == 0:\n raise ValueError('No queries provided')\n if len(passage_embeddings) == 0:\n raise ValueError('No passages provided')\n if query_embeddings[0].device != passage_embeddings[0].device:\n raise ValueError('Queries and passages must be on the same device')\n if query_embeddings[0].dtype != passage_embeddings[0].dtype:\n raise ValueError('Queries and passages must have the same dtype')\n if output_dtype is None:\n output_dtype = query_embeddings[0].dtype\n scores: List[torch.Tensor] = []\n for i in range(0, len(query_embeddings), batch_size):\n batch_scores: List[torch.Tensor] = []\n batch_queries = torch.nn.utils.rnn.pad_sequence(query_embeddings[i:i + batch_size], batch_first=True, padding_value=0)\n for j in range(0, len(passage_embeddings), batch_size):\n batch_passages = torch.nn.utils.rnn.pad_sequence(passage_embeddings[j:j + batch_size], batch_first=True, padding_value=0)\n batch_scores.append(torch.einsum('bnd,csd->bcns', batch_queries, batch_passages).max(dim=3)[0].sum(dim=2))\n scores.append(torch.cat(batch_scores, dim=1).to(output_dtype).to(output_device))\n return torch.cat(scores, dim=0)", "docstring": "Compute the late-interaction/MaxSim score (ColBERT-like) for the given multi-vector\n query embeddings (`qs`) and passage embeddings (`ps`). For ColQwen2, a passage is the\n image of a document page.\n\n Because the embedding tensors are multi-vector and can thus have different shapes, they\n should be fed as:\n (1) a list of tensors, where the i-th tensor is of shape (sequence_length_i, embedding_dim)\n (2) a single tensor of shape (n_passages, max_sequence_length, embedding_dim) -> usually\n obtained by padding the list of tensors.\n\nArgs:\n query_embeddings (`Union[torch.Tensor, List[torch.Tensor]`): Query embeddings.\n passage_embeddings (`Union[torch.Tensor, List[torch.Tensor]`): Passage embeddings.\n batch_size (`int`, *optional*, defaults to 128): Batch size for computing scores.\n output_dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The dtype of the output tensor.\n If `None`, the dtype of the input embeddings is used.\n output_device (`torch.device` or `str`, *optional*, defaults to \"cpu\"): The device of the output tensor.\n\nReturns:\n `torch.Tensor`: A tensor of shape `(n_queries, n_passages)` containing the scores. The score\n tensor is saved on the \"cpu\" device.", "source": "github_repos"} -{"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: bool | None=None, output_hidden_states: bool | None=None, return_dict: bool | None=None, training: bool=False) -> TFDPRQuestionEncoderOutput | Tuple[tf.Tensor, ...]:\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')\n elif input_ids is not None:\n input_shape = shape_list(input_ids)\n elif inputs_embeds is not None:\n input_shape = shape_list(inputs_embeds)[:-1]\n else:\n raise ValueError('You have to specify either input_ids or inputs_embeds')\n if attention_mask is None:\n attention_mask = tf.ones(input_shape, dtype=tf.dtypes.int32) if input_ids is None else input_ids != self.config.pad_token_id\n if token_type_ids is None:\n token_type_ids = tf.zeros(input_shape, dtype=tf.dtypes.int32)\n outputs = self.question_encoder(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n if not return_dict:\n return outputs[1:]\n return TFDPRQuestionEncoderOutput(pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "Return:\n\nExample:\n ```python\n >>> from transformers import TFDPRQuestionEncoder, DPRQuestionEncoderTokenizer\n\n >>> tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(\"facebook/dpr-question_encoder-single-nq-base\")\n >>> model = TFDPRQuestionEncoder.from_pretrained(\"facebook/dpr-question_encoder-single-nq-base\", from_pt=True)\n >>> input_ids = tokenizer(\"Hello, is my dog cute ?\", return_tensors=\"tf\")[\"input_ids\"]\n >>> embeddings = model(input_ids).pooler_output\n ```", "source": "github_repos"} -{"code": "def scale_translation(self, trans_scale_factor: float) -> Rigid:\n return self.apply_trans_fn(lambda t: t * trans_scale_factor)", "docstring": "Scales the translation by a constant factor.\n\nArgs:\n trans_scale_factor:\n The constant factor\n\nReturns:\n A transformation object with a scaled translation.", "source": "github_repos"} -{"code": "def _tokenize(self, text, **kwargs):\n return self.sp_model.encode(text, out_type=str)", "docstring": "Args:\n text: TextInput\n Returns a tokenized string. The Gemma tokenizer never adds a prefix space.", "source": "github_repos"} -{"code": "def bessel_i1(x, name=None):\n with ops.name_scope(name, 'bessel_i1', [x]):\n return gen_special_math_ops.bessel_i1(x)", "docstring": "Computes the Bessel i1 function of `x` element-wise.\n\n Modified Bessel function of order 1.\n\n It is preferable to use the numerically stabler function `i1e(x)` instead.\n\n >>> tf.math.special.bessel_i1([-1., -0.5, 0.5, 1.]).numpy()\n array([-0.5651591 , -0.25789431, 0.25789431, 0.5651591 ], dtype=float32)\n\nArgs:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n @compatibility(scipy)\n Equivalent to scipy.special.i1\n @end_compatibility", "source": "github_repos"} -{"code": "def take_bug_report(self, test_name=None, begin_time=None, timeout=300, destination=None):\n prefix = DEFAULT_BUG_REPORT_NAME\n if test_name:\n prefix = '%s,%s' % (DEFAULT_BUG_REPORT_NAME, test_name)\n if begin_time is None:\n begin_time = mobly_logger.get_log_file_timestamp()\n new_br = True\n try:\n stdout = self.adb.shell('bugreportz -v').decode('utf-8')\n if 'not found' in stdout:\n new_br = False\n except adb.AdbError:\n new_br = False\n if destination is None:\n destination = os.path.join(self.log_path, 'BugReports')\n br_path = utils.abs_path(destination)\n utils.create_dir(br_path)\n filename = self.generate_filename(prefix, str(begin_time), 'txt')\n if new_br:\n filename = filename.replace('.txt', '.zip')\n full_out_path = os.path.join(br_path, filename)\n self.wait_for_boot_completion()\n self.log.debug('Start taking bugreport.')\n if new_br:\n out = self.adb.shell('bugreportz', timeout=timeout).decode('utf-8')\n if not out.startswith('OK'):\n raise DeviceError(self, 'Failed to take bugreport: %s' % out)\n br_out_path = out.split(':')[1].strip()\n self.adb.pull([br_out_path, full_out_path])\n self.adb.shell(['rm', br_out_path])\n else:\n self.adb.bugreport(' > \"%s\"' % full_out_path, shell=True, timeout=timeout)\n self.log.debug('Bugreport taken at %s.', full_out_path)\n return full_out_path", "docstring": "Takes a bug report on the device and stores it in a file.\n\nArgs:\n test_name: Name of the test method that triggered this bug report.\n begin_time: Timestamp of when the test started. If not set, then\n this will default to the current time.\n timeout: float, the number of seconds to wait for bugreport to\n complete, default is 5min.\n destination: string, path to the directory where the bugreport\n should be saved.\n\nReturns:\n A string that is the absolute path to the bug report on the host.", "source": "github_repos"} -{"code": "def manual_variable_initialization(value):\n global _MANUAL_VAR_INIT\n _MANUAL_VAR_INIT = value", "docstring": "Sets the manual variable initialization flag.\n\n This boolean flag determines whether\n variables should be initialized\n as they are instantiated (default), or if\n the user should handle the initialization\n (e.g. via `tf.compat.v1.initialize_all_variables()`).\n\nArgs:\n value: Python boolean.", "source": "github_repos"} -{"code": "def input_node_from_schema(schema: Schema, same_sampling_as: Optional[EventSetNode]=None, name: Optional[str]=None) -> EventSetNode:\n return input_node(features=schema.features, indexes=schema.indexes, is_unix_timestamp=schema.is_unix_timestamp, same_sampling_as=same_sampling_as, name=name)", "docstring": "Creates an input [`EventSetNode`][temporian.EventSetNode] from a schema.\n\n Usage example:\n\n ```python\n >>> # Create two nodes with the same schema.\n >>> a = tp.input_node(features=[(\"f1\", tp.float64), (\"f2\", tp.str_)])\n >>> b = tp.input_node_from_schema(a.schema)\n\n ```\n\nArgs:\n schema: Schema of the node.\n same_sampling_as: If set, the created EventSetNode is guaranteed to have the\n same sampling as same_sampling_as`. In this case, `indexes` and\n `is_unix_timestamp` should not be provided. Some operators require\n for input EventSetNodes to have the same sampling.\n name: Name for the EventSetNode.\n\nReturns:\n EventSetNode with the given specifications.", "source": "github_repos"} -{"code": "def row_splits(self):\n return self._row_splits", "docstring": "Returns the row-split indices for this row partition.\n\n `row_splits` specifies where the values for each row begin and end.\n In particular, the values for row `i` are stored in the slice\n `values[row_splits[i]:row_splits[i+1]]`.\n\nReturns:\n A 1-D integer `Tensor` with shape `[self.nrows+1]`.\n The returned tensor is non-empty, and is sorted in ascending order.\n `self.row_splits()[0] == 0`.\n `self.row_splits()[-1] == self.nvals()`.", "source": "github_repos"} -{"code": "def _has_attr(self, node, obj, attr):\n if isinstance(obj, abstract.AMBIGUOUS_OR_EMPTY):\n return (node, None)\n if not isinstance(attr, abstract.PythonConstant) or not isinstance(attr.pyval, str):\n return (node, None)\n node, ret = self.ctx.attribute_handler.get_attribute(node, obj, attr.pyval)\n return (node, ret is not None)", "docstring": "Check if the object has attribute attr.\n\nArgs:\n node: The given node.\n obj: A BaseValue, generally the left hand side of a hasattr() call.\n attr: A BaseValue, generally the right hand side of a hasattr() call.\n\nReturns:\n (node, result) where result = True if the object has attribute attr, False\n if it does not, and None if it is ambiguous.", "source": "github_repos"} -{"code": "def check_compatibility(self):\n usr_keys = list(self.usr_config.keys())\n for k in self.usr_config.keys():\n if k not in usr_keys:\n err_msg = '[Error] Required config not found in user config.'\n err_msg += '(required = %s, ' % str(k)\n err_msg += 'user configs = %s)' % str(usr_keys)\n logging.error(err_msg)\n self.error_msg.append(err_msg)\n self.failures.append([k, err_msg])\n return False\n overall_status = True\n for config_name, spec in self.usr_config.items():\n temp_status = True\n in_required = config_name in list(self.required.keys())\n in_optional = config_name in list(self.optional.keys())\n in_unsupported = config_name in list(self.unsupported.keys())\n in_dependency = config_name in list(self.dependency.keys())\n if not (in_required or in_optional or in_unsupported or in_dependency):\n warn_msg = '[Error] User config not defined in config file.'\n warn_msg += '(user config = %s)' % str(config_name)\n logging.warning(warn_msg)\n self.warning_msg.append(warn_msg)\n self.failures.append([config_name, warn_msg])\n temp_status = False\n else:\n if in_unsupported:\n if self.in_range(spec, self.unsupported[config_name]):\n err_msg = '[Error] User config is unsupported. It is '\n err_msg += \"defined under 'Unsupported' section in the config file.\"\n err_msg += ' (config = %s, spec = %s)' % (config_name, str(spec))\n logging.error(err_msg)\n self.error_msg.append(err_msg)\n self.failures.append([config_name, err_msg])\n temp_status = False\n if in_required:\n if not self.in_range(spec, self.required[config_name]):\n err_msg = '[Error] User config cannot be supported. It is not in '\n err_msg += \"the supported range as defined in the 'Required' \"\n err_msg += 'section. (config = %s, ' % config_name\n err_msg += 'spec = %s)' % str(spec)\n logging.error(err_msg)\n self.error_msg.append(err_msg)\n self.failures.append([config_name, err_msg])\n temp_status = False\n if in_optional:\n if not self.in_range(spec, self.optional[config_name]):\n err_msg = '[Error] User config cannot be supported. It is not in '\n err_msg += \"the supported range as defined in the 'Optional' \"\n err_msg += 'section. (config = %s, ' % config_name\n err_msg += 'spec = %s)' % str(spec)\n logging.error(err_msg)\n self.error_msg.append(err_msg)\n self.failures.append([config_name, err_msg])\n temp_status = False\n if in_dependency:\n dep_list = self.dependency[config_name]\n if dep_list:\n for rule in dep_list:\n cfg = rule[0]\n cfg_req = rule[1]\n dep = rule[2]\n dep_req = rule[3]\n try:\n cfg_name = self.usr_config[cfg]\n dep_name = self.usr_config[dep]\n cfg_status = self.in_range(cfg_name, cfg_req)\n dep_status = self.in_range(dep_name, dep_req)\n if cfg_status:\n if not dep_status:\n err_msg = '[Error] User config has a dependency that cannot'\n err_msg += ' be supported. '\n err_msg += \"'%s' has a dependency on \" % str(config_name)\n err_msg += \"'%s'.\" % str(dep)\n logging.error(err_msg)\n self.error_msg.append(err_msg)\n self.failures.append([config_name, err_msg])\n temp_status = False\n except KeyError:\n err_msg = '[Error] Dependency is missing from `Required`. '\n err_msg += '(config = %s, dep = %s)' % (cfg, dep)\n logging.error(err_msg)\n self.error_msg.append(err_msg)\n self.failures.append([config_name, err_msg])\n temp_status = False\n if temp_status:\n self.successes.append([config_name, spec])\n else:\n overall_status = False\n return overall_status", "docstring": "Checks version and dependency compatibility for a given configuration.\n\n `check_compatibility` immediately returns with `False` (or failure status)\n if any child process or checks fail. For error and warning messages, either\n print `self.(error_msg|warning_msg)` or call `_print` function.\n\nReturns:\n Boolean that is a status of the compatibility check result.", "source": "github_repos"} -{"code": "def _get_argspec_for_partial(obj):\n n_prune_args = len(obj.args)\n partial_keywords = obj.keywords or {}\n args, varargs, keywords, defaults = getargspec(obj.func)\n args = args[n_prune_args:]\n no_default = object()\n all_defaults = [no_default] * len(args)\n if defaults:\n all_defaults[-len(defaults):] = defaults\n for kw, default in iter(partial_keywords.items()):\n if kw in args:\n idx = args.index(kw)\n all_defaults[idx] = default\n elif not keywords:\n raise ValueError(f'{obj} does not have a **kwargs parameter, but contains an unknown partial keyword {kw}.')\n first_default = next((idx for idx, x in enumerate(all_defaults) if x is not no_default), None)\n if first_default is None:\n return ArgSpec(args, varargs, keywords, None)\n invalid_default_values = [args[i] for i, j in enumerate(all_defaults) if j is no_default and i > first_default]\n if invalid_default_values:\n raise ValueError(f'{obj} has some keyword-only arguments, which are not supported: {invalid_default_values}.')\n return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))", "docstring": "Implements `getargspec` for `functools.partial` objects.\n\nArgs:\n obj: The `functools.partial` object\n\nReturns:\n An `inspect.ArgSpec`\n\nRaises:\n ValueError: When callable's signature can not be expressed with\n ArgSpec.", "source": "github_repos"} -{"code": "def __init__(self, backend: str='quanto', nbits: Optional[int]=4, axis_key: Optional[int]=0, axis_value: Optional[int]=0, q_group_size: Optional[int]=64, residual_length: Optional[int]=128, compute_dtype: Optional[torch.dtype]=torch.float16, device: Optional[str]='cpu'):\n self.backend = backend\n self.nbits = nbits\n self.axis_key = axis_key\n self.axis_value = axis_value\n self.q_group_size = q_group_size\n self.residual_length = residual_length\n self.compute_dtype = compute_dtype\n self.device = device", "docstring": "Configuration class for quantized cache settings.\n\nAttributes:\n backend (`str`, *optional*, defaults to `\"quanto\"`):\n Backend to use when performing quantization, Can be one of [`quanto`, `HQQ`]\n nbits (`Optional[int]`, *optional*, defaults to 4):\n Number of bits, can be 2 or 4 for the `quanto` backend and one of [1, 2, 3, 4, 8] for the `HQQ` backend. Defaults to 2.\n axis_key (`int`, *optional*, defaults to 0):\n Axis over which to perform grouping for the key tensors. Can be [0, -1] for `quanto` backend and [0, 1] for `HQQ` backend.\n axis_value (`int`, *optional*, defaults to 0):\n Axis over which to perform grouping for the value tensors. Can be [0, -1] for `quanto` backend and [0, 1] for `HQQ` backend.\n q_group_size (`Optional[int]`, *optional*, defaults to 64):\n Size of the quantization group, should be a divisor of the model's hidden dimension.\n Defaults to 64.\n residual_length (`Optional[int]`, *optional*, defaults to 128):\n Length of the residual cache which will always be stored in original precision.\n Defaults to 128.\n compute_dtype (`torch.dtype`, *optional*, defaults to `torch.float16`):\n The default dtype used for computations in the model. Keys and Values will be cast to this dtype after dequantization.\n device (`str`, *optional*, defaults to `\"cpu\"`):\n Device on which to perform computations, should be same as the model's device.", "source": "github_repos"} -{"code": "def assert_split_at_fraction_binary(source, expected_items, num_items_to_read_before_split, left_fraction, left_result, right_fraction, right_result, stats, start_position=None, stop_position=None):\n assert right_fraction > left_fraction\n if right_fraction - left_fraction < 0.001:\n return\n middle_fraction = (left_fraction + right_fraction) / 2\n if left_result is None:\n left_result = _assert_split_at_fraction_behavior(source, expected_items, num_items_to_read_before_split, left_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)\n if right_result is None:\n right_result = _assert_split_at_fraction_behavior(source, expected_items, num_items_to_read_before_split, right_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)\n middle_result = _assert_split_at_fraction_behavior(source, expected_items, num_items_to_read_before_split, middle_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)\n if middle_result[1] != -1:\n stats.successful_fractions.append(middle_fraction)\n if middle_result[1] > 0:\n stats.non_trivial_fractions.append(middle_fraction)\n if left_result[0] != middle_result[0]:\n assert_split_at_fraction_binary(source, expected_items, num_items_to_read_before_split, left_fraction, left_result, middle_fraction, middle_result, stats)\n if right_fraction == 1.0 or middle_result[0] != right_result[0]:\n assert_split_at_fraction_binary(source, expected_items, num_items_to_read_before_split, middle_fraction, middle_result, right_fraction, right_result, stats)", "docstring": "Performs dynamic work rebalancing for fractions within a given range.\n\n Asserts that given a start position, a source can be split at every\n interesting fraction (halfway between two fractions that differ by at\n least one item) and the results are consistent if a split succeeds.\n\nArgs:\n source: source to perform dynamic splitting on.\n expected_items: total set of items expected when reading the source.\n num_items_to_read_before_split: number of items to read before splitting.\n left_fraction: left fraction for binary splitting.\n left_result: result received by splitting at left fraction.\n right_fraction: right fraction for binary splitting.\n right_result: result received by splitting at right fraction.\n stats: a ``SplitFractionStatistics`` for storing results.", "source": "github_repos"} -{"code": "def _validate_signature_def_map(self, signature_def_map):\n for signature_def_key in signature_def_map:\n signature_def = signature_def_map[signature_def_key]\n inputs = signature_def.inputs\n outputs = signature_def.outputs\n for inputs_key in inputs:\n self._validate_tensor_info(inputs[inputs_key])\n for outputs_key in outputs:\n self._validate_tensor_info(outputs[outputs_key])\n if constants.INIT_OP_SIGNATURE_KEY in signature_def_map:\n raise KeyError(f'SignatureDef map key \"{constants.INIT_OP_SIGNATURE_KEY}\" is reserved for initialization. Please use a different key.')\n if constants.TRAIN_OP_SIGNATURE_KEY in signature_def_map:\n raise KeyError(f'SignatureDef map key \"{constants.TRAIN_OP_SIGNATURE_KEY}\" is reserved for the train op. Please use a different key.')", "docstring": "Validates the `SignatureDef` entries in the signature def map.\n\n Validation of entries in the signature def map includes ensuring that the\n `name` and `dtype` fields of the TensorInfo protos of the `inputs` and\n `outputs` of each `SignatureDef` are populated. Also ensures that reserved\n SignatureDef keys for the initialization and train ops are not used.\n\nArgs:\n signature_def_map: The map of signature defs to be validated.\n\nRaises:\n AssertionError: If a TensorInfo is not valid.\n KeyError: If a reserved signature key is used in the map.", "source": "github_repos"} -{"code": "def to_channel_dimension_format(image: np.ndarray, channel_dim: Union[ChannelDimension, str], input_channel_dim: Optional[Union[ChannelDimension, str]]=None) -> np.ndarray:\n if not isinstance(image, np.ndarray):\n raise ValueError(f'Input image must be of type np.ndarray, got {type(image)}')\n if input_channel_dim is None:\n input_channel_dim = infer_channel_dimension_format(image)\n target_channel_dim = ChannelDimension(channel_dim)\n if input_channel_dim == target_channel_dim:\n return image\n if target_channel_dim == ChannelDimension.FIRST:\n image = image.transpose((2, 0, 1))\n elif target_channel_dim == ChannelDimension.LAST:\n image = image.transpose((1, 2, 0))\n else:\n raise ValueError('Unsupported channel dimension format: {}'.format(channel_dim))\n return image", "docstring": "Converts `image` to the channel dimension format specified by `channel_dim`.\n\nArgs:\n image (`numpy.ndarray`):\n The image to have its channel dimension set.\n channel_dim (`ChannelDimension`):\n The channel dimension format to use.\n input_channel_dim (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred from the input image.\n\nReturns:\n `np.ndarray`:\n The image with the channel dimension set to `channel_dim`.", "source": "github_repos"} -{"code": "def call_replica_local_fn(fn, *args, **kwargs):\n strategy = None\n if 'strategy' in kwargs:\n strategy = kwargs.pop('strategy')\n elif distribute_lib.has_strategy():\n strategy = distribute_lib.get_strategy()\n is_tpu = backend.is_tpu_strategy(strategy)\n if not is_tpu and strategy and distribute_lib.in_cross_replica_context():\n with strategy.scope():\n return strategy.extended.call_for_each_replica(fn, args, kwargs)\n return fn(*args, **kwargs)", "docstring": "Call a function that uses replica-local variables.\n\n This function correctly handles calling `fn` in a cross-replica\n context.\n\nArgs:\n fn: The function to call.\n *args: Positional arguments to the `fn`.\n **kwargs: Keyword argument to `fn`.\n\nReturns:\n The result of calling `fn`.", "source": "github_repos"} -{"code": "def hsv_to_rgb(images, data_format=None):\n if any_symbolic_tensors((images,)):\n return HSVToRGB(data_format=data_format).symbolic_call(images)\n return backend.image.hsv_to_rgb(images, data_format=data_format)", "docstring": "Convert HSV images to RGB.\n\n `images` must be of float dtype, and the output is only well defined if the\n values in `images` are in `[0, 1]`.\n\nArgs:\n images: Input image or batch of images. Must be 3D or 4D.\n data_format: A string specifying the data format of the input tensor.\n It can be either `\"channels_last\"` or `\"channels_first\"`.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)`, while `\"channels_first\"`\n corresponds to inputs with shape `(batch, channels, height, width)`.\n If not specified, the value will default to\n `keras.config.image_data_format`.\n\nReturns:\n RGB image or batch of RGB images.\n\nExample:\n >>> import numpy as np\n >>> from keras import ops\n >>> x = np.random.random((2, 4, 4, 3))\n >>> y = ops.image.hsv_to_rgb(x)\n >>> y.shape\n (2, 4, 4, 3)\n\n >>> x = np.random.random((4, 4, 3)) # Single HSV image\n >>> y = ops.image.hsv_to_rgb(x)\n >>> y.shape\n (4, 4, 3)\n\n >>> x = np.random.random((2, 3, 4, 4))\n >>> y = ops.image.hsv_to_rgb(x, data_format=\"channels_first\")\n >>> y.shape\n (2, 3, 4, 4)", "source": "github_repos"} -{"code": "def get_v2_constants(module: Any) -> Sequence[str]:\n constants_v2 = []\n tensorflow_constants_attr = API_ATTRS[TENSORFLOW_API_NAME].constants\n if hasattr(module, tensorflow_constants_attr):\n constants_v2.extend(getattr(module, tensorflow_constants_attr))\n return constants_v2", "docstring": "Get a list of TF 2.0 constants in this module.\n\nArgs:\n module: TensorFlow module.\n\nReturns:\n List of all API constants under the given module.", "source": "github_repos"} -{"code": "def byte_swap_string_content(buffer, from_endiness, to_endiness):\n num_of_strings = int.from_bytes(buffer.data[0:4], from_endiness)\n string_content = bytearray(buffer.data[4 * (num_of_strings + 2):])\n prefix_data = b''.join([int.from_bytes(buffer.data[i:i + 4], from_endiness).to_bytes(4, to_endiness) for i in range(0, (num_of_strings + 1) * 4 + 1, 4)])\n buffer.data = prefix_data + string_content", "docstring": "Helper function for byte-swapping the string buffer.\n\nArgs:\n buffer: TFLite string buffer of from_endiness format.\n from_endiness: The original endianness format of the string buffer.\n to_endiness: The destined endianness format of the string buffer.", "source": "github_repos"} -{"code": "def init_variable(v, init, name='init'):\n with ops.name_scope(None, v.op.name + '/', [v, init]):\n with ops.name_scope(name) as scope:\n with ops.colocate_with(v):\n if callable(init):\n assert v.get_shape().is_fully_defined(), 'Variable shape unknown.'\n value = init(v.get_shape().as_list(), v.dtype.base_dtype)\n value = ops.convert_to_tensor(value, name='value')\n return gen_state_ops.assign(v, value, name=scope)\n else:\n init = ops.convert_to_tensor(init, name='init')\n return gen_state_ops.assign(v, init, name=scope)", "docstring": "Initializes variable with \"init\".\n\n This op does the following:\n if init is a Tensor, v = init\n if callable(init): v = init(VariableShape(v), v.dtype)\n\nArgs:\n v: Variable to initialize\n init: Tensor to assign to v,\n Or an object convertible to Tensor e.g. nparray,\n Or an Initializer that generates a tensor given the shape and type of v.\n An \"Initializer\" is a callable that returns a tensor that \"v\" should be\n set to. It will be called as init(shape, dtype).\n name: Optional name for the op.\n\nReturns:\n The operation that initializes v.", "source": "github_repos"} -{"code": "def handle_malformed_config(error: MalformedConfigError) -> ResponseReturnValue:\n return (DQMResponse(name='MalformedConfigError', description=str(error), code=400), 400)", "docstring": "DQM Malformed Config Response.\n\nArgs:\n * error: Config error\n\nReturns:\n * DQMResponse for the error with a 400 status code", "source": "github_repos"} -{"code": "def reducer_override(self, obj):\n t = type(obj)\n try:\n is_anyclass = issubclass(t, type)\n except TypeError:\n is_anyclass = False\n if is_anyclass:\n return _class_reduce(obj)\n elif isinstance(obj, types.FunctionType):\n return self._function_reduce(obj)\n else:\n return NotImplemented", "docstring": "Type-agnostic reducing callback for function and classes.\n\n For performance reasons, subclasses of the C `pickle.Pickler` class\n cannot register custom reducers for functions and classes in the\n dispatch_table attribute. Reducers for such types must instead\n implemented via the special `reducer_override` method.\n\n Note that this method will be called for any object except a few\n builtin-types (int, lists, dicts etc.), which differs from reducers\n in the Pickler's dispatch_table, each of them being invoked for\n objects of a specific type only.\n\n This property comes in handy for classes: although most classes are\n instances of the ``type`` metaclass, some of them can be instances\n of other custom metaclasses (such as enum.EnumMeta for example). In\n particular, the metaclass will likely not be known in advance, and\n thus cannot be special-cased using an entry in the dispatch_table.\n reducer_override, among other things, allows us to register a\n reducer that will be called for any class, independently of its\n type.\n\nNote:\n * reducer_override has the priority over dispatch_table-registered\n reducers.\n * reducer_override can be used to fix other limitations of\n cloudpickle for other types that suffered from type-specific\n reducers, such as Exceptions. See\n https://github.com/cloudpipe/cloudpickle/issues/248", "source": "github_repos"} -{"code": "def field(*, validate: Optional[Callable[[_In], _OutT]]=None, **kwargs: Any) -> dataclasses.Field[_OutT]:\n if validate is None:\n return dataclasses.field(**kwargs)\n else:\n field_ = _Field(validate=validate, field_kwargs=kwargs)\n return typing.cast(dataclasses.Field, field_)", "docstring": "Like `dataclasses.field`, but allow `validator`.\n\nArgs:\n validate: A callable `(x) -> x` called each time the variable is assigned.\n **kwargs: Kwargs forwarded to `dataclasses.field`\n\nReturns:\n The field.", "source": "github_repos"} -{"code": "def __init__(self, decorator: Callable[[_GenericCallable], _GenericCallable]):\n ...", "docstring": "A type annotation for decorators that do not change signatures.\n\n This is a stand-in for using `Callable[[T], T]` to represent a decorator.\n\n Given a decorator function, which takes in a callable and returns a callable\n with the same signature, apply this class as a decorator to that function.\n This can also be used for decorator factories.\n\nExample:\n Plain decorator (decorator matches Callable[[T], T]):\n\n >>> @pytype_extensions.Decorator\n ... def MyDecorator(func):\n ... def wrapper(...):\n ... ...\n ... return wrapper\n\n Decorator factory (factory matches Callable[..., Callable[[T], T]]):\n\n >>> def MyDecoratorFactory(foo: int) -> pytype_extensions.Decorator:\n ... @pytype_extensions.Decorator\n ... def MyDecorator(func):\n ... def Wrapper(*args, **kwargs):\n ... return func(foo, *args, **kwargs)\n ... return Wrapper\n ... return MyDecorator\n\n Note for the above example: the return type annotation (first line) is the\n most important one; it indicates to callers that MyDecoratorFactory is\n returning a decorator. The \"@pytype_extensions.Decorator\" annotation (second\n line) indicates to pytype that MyDecorator is a Decorator; without it, you\n would need to add \"pytype: disable=bad-return-type\" on the final line.\n\n This class only exists at build time, for typechecking. At runtime, the\n 'Decorator' member of this module is a simple identity function (see below).\n\n More information: pytype-decorators\n\n Shortlink: pytype_extensions.Decorator", "source": "github_repos"} -{"code": "def forward(self, hidden_states):\n hidden_states = hidden_states.transpose(1, 2)\n hidden_states = self.pointwise_conv1(hidden_states)\n hidden_states = nn.functional.glu(hidden_states, dim=1)\n hidden_states = self.depthwise_conv(hidden_states)\n hidden_states = self.norm(hidden_states)\n hidden_states = hidden_states * torch.sigmoid(hidden_states)\n hidden_states = self.pointwise_conv2(hidden_states)\n return hidden_states.transpose(1, 2)", "docstring": "Compute convolution module.\n\nArgs:\n hidden_states (`torch.Tensor` of shape `(batch, time, channels)`): Input tensor.\n\nReturns:\n `torch.Tensor`: Output tensor of shape `(batch, time, channels)`.", "source": "github_repos"} -{"code": "def sym_has(self, path: Union[utils.KeyPath, str, int]) -> bool:\n return utils.KeyPath.from_value(path).exists(self)", "docstring": "Returns True if a path exists in the sub-tree.\n\nArgs:\n path: A KeyPath object or equivalence.\n\nReturns:\n True if the path exists in current sub-tree, otherwise False.", "source": "github_repos"} -{"code": "def float_operation():\n return {'max_depth': 10000, 'min_bytes': 0, 'min_micros': 0, 'min_params': 0, 'min_float_ops': 1, 'min_occurrence': 0, 'order_by': 'float_ops', 'account_type_regexes': ['.*'], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': True, 'select': ['float_ops'], 'step': -1, 'output': 'stdout'}", "docstring": "Options used to profile float operations.\n\n Please see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/profile_model_architecture.md\n on the caveats of calculating float operations.\n\nReturns:\n A dict of profiling options.", "source": "github_repos"} -{"code": "def export(preprocessor: Union['PreTrainedTokenizer', 'FeatureExtractionMixin', 'ProcessorMixin'], model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: OnnxConfig, opset: int, output: Path, tokenizer: Optional['PreTrainedTokenizer']=None, device: str='cpu') -> Tuple[List[str], List[str]]:\n if not (is_torch_available() or is_tf_available()):\n raise ImportError('Cannot convert because neither PyTorch nor TensorFlow are not installed. Please install torch or tensorflow first.')\n if is_tf_available() and isinstance(model, TFPreTrainedModel) and (device == 'cuda'):\n raise RuntimeError('`tf2onnx` does not support export on CUDA device.')\n if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:\n raise ValueError('You cannot provide both a tokenizer and a preprocessor to export the model.')\n if tokenizer is not None:\n warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning)\n logger.info('Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.')\n preprocessor = tokenizer\n if is_torch_available():\n from ..utils import get_torch_version\n if not config.is_torch_support_available:\n logger.warning(f'Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version}, got: {get_torch_version()}')\n if is_torch_available() and issubclass(type(model), PreTrainedModel):\n return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer, device=device)\n elif is_tf_available() and issubclass(type(model), TFPreTrainedModel):\n return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer)", "docstring": "Export a Pytorch or TensorFlow model to an ONNX Intermediate Representation (IR)\n\nArgs:\n preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]):\n The preprocessor used for encoding the data.\n model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):\n The model to export.\n config ([`~onnx.config.OnnxConfig`]):\n The ONNX configuration associated with the exported model.\n opset (`int`):\n The version of the ONNX operator set to use.\n output (`Path`):\n Directory to store the exported ONNX model.\n device (`str`, *optional*, defaults to `cpu`):\n The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for\n export on CUDA devices.\n\nReturns:\n `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from\n the ONNX configuration.", "source": "github_repos"} -{"code": "def normalize(self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n return normalize(image, mean=mean, std=std, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Normalize an image. image = (image - image_mean) / image_std.\n\nArgs:\n image (`np.ndarray`):\n Image to normalize.\n mean (`float` or `Iterable[float]`):\n Image mean to use for normalization.\n std (`float` or `Iterable[float]`):\n Image standard deviation to use for normalization.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n\nReturns:\n `np.ndarray`: The normalized image.", "source": "github_repos"} -{"code": "def experimental_local_results(self, value):\n return super(CentralStorageStrategy, self).experimental_local_results(value)", "docstring": "Returns the list of all local per-replica values contained in `value`.\n\n In `CentralStorageStrategy` there is a single worker so the value returned\n will be all the values on that worker.\n\nArgs:\n value: A value returned by `run()`, `extended.call_for_each_replica()`,\n or a variable created in `scope`.\n\nReturns:\n A tuple of values contained in `value`. If `value` represents a single\n value, this returns `(value,).`", "source": "github_repos"} -{"code": "def process(self, feed_item):\n if not feed_item.get(FieldMap.CAMPAIGN_CREATIVE_ASSOCIATION_ID, None):\n campaign = self.campaign_dao.get(feed_item, required=True)\n creative = self.creative_dao.get(feed_item, required=True)\n if campaign and creative:\n if campaign:\n feed_item[FieldMap.CAMPAIGN_ID] = campaign['id']\n feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name']\n association = {'creativeId': str(creative['id'])}\n result = self._api().insert(profileId=self.profile_id, campaignId=str(campaign['id']), body=association).execute()\n feed_item[FieldMap.CAMPAIGN_CREATIVE_ASSOCIATION_ID] = '%s|%s' % (campaign['id'], creative['id'])\n return result\n return None", "docstring": "Processes a feed item by creating the creative association in DCM.\n\nArgs:\n feed_item: Feed item representing the creative association from the\n Bulkdozer feed.\n\nReturns:\n The newly created object from DCM.", "source": "github_repos"} -{"code": "def in_top_k_v2(targets, predictions, k, name=None):\n return in_top_k(predictions, targets, k, name)", "docstring": "Outputs whether the targets are in the top `K` predictions.\n\n This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the\n prediction for the target class is finite (not inf, -inf, or nan) and among\n the top `k` predictions among all predictions for example `i`.\n `predictions` does not have to be normalized.\n\n Note that the behavior of `InTopK` differs from the `TopK` op in its handling\n of ties; if multiple classes have the same prediction value and straddle the\n top-`k` boundary, all of those classes are considered to be in the top `k`.\n\n >>> target = tf.constant([0, 1, 3])\n >>> pred = tf.constant([\n ... [1.2, -0.3, 2.8, 5.2],\n ... [0.1, 0.0, 0.0, 0.0],\n ... [0.0, 0.5, 0.3, 0.3]],\n ... dtype=tf.float32)\n >>> print(tf.math.in_top_k(target, pred, 2))\n tf.Tensor([False True True], shape=(3,), dtype=bool)\n\nArgs:\n targets: A `batch_size` vector of class ids. Must be `int32` or `int64`.\n predictions: A `batch_size` x `classes` tensor of type `float32`.\n k: An `int`. The parameter to specify search space.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` with the same shape of `targets` with type of `bool`. Each\n element specifies if the target falls into top-k predictions.", "source": "github_repos"} -{"code": "def call(self, inputs, **kwargs):\n return inputs", "docstring": "This is where the layer's logic lives.\n\nArgs:\n inputs: Input tensor, or list/tuple of input tensors.\n **kwargs: Additional keyword arguments.\n\nReturns:\n A tensor or list/tuple of tensors.", "source": "github_repos"} -{"code": "def is_sparse(tensor):\n spec = getattr(tensor, '_type_spec', None)\n if spec is not None:\n return isinstance(spec, sparse_tensor.SparseTensorSpec)\n return isinstance(tensor, sparse_tensor.SparseTensor)", "docstring": "Returns whether a tensor is a sparse tensor.\n\nArgs:\n tensor: A tensor instance.\n\nReturns:\n A boolean.\n\nExample:\n >>> a = tf.keras.backend.placeholder((2, 2), sparse=False)\n >>> print(tf.keras.backend.is_sparse(a))\n False\n >>> b = tf.keras.backend.placeholder((2, 2), sparse=True)\n >>> print(tf.keras.backend.is_sparse(b))\n True", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n residual = hidden_states\n hidden_states = self.input_layernorm(hidden_states)\n hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n if use_cache:\n outputs += (present_key_value,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n `(batch, sequence_length)` where padding elements are indicated by 0.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence.\n position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):\n Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,\n with `head_dim` being the embedding dimension of each attention head.\n kwargs (`dict`, *optional*):\n Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code\n into the model", "source": "github_repos"} -{"code": "def __init__(self, model_name: str, columns: list[str], title: Optional[str]=None, task_type: str=DEFAULT_TASK_TYPE, project: Optional[str]=None, location: Optional[str]=None, credentials: Optional[Credentials]=None, **kwargs):\n self.model_name = model_name\n self.project = project\n self.location = location\n self.credentials = credentials\n self.title = title\n self.task_type = task_type\n super().__init__(columns=columns, **kwargs)", "docstring": "Embedding Config for Vertex AI Text Embedding models following\n https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings # pylint: disable=line-too-long\n Text Embeddings are generated for a batch of text using the Vertex AI SDK.\n Embeddings are returned in a list for each text in the batch. Look at\n https://cloud.google.com/vertex-ai/docs/generative-ai/learn/model-versioning#stable-versions-available.md # pylint: disable=line-too-long\n for more information on model versions and lifecycle.\n\nArgs:\n model_name: The name of the Vertex AI Text Embedding model.\n columns: The columns containing the text to be embedded.\n task_type: The downstream task for the embeddings. Valid values are\n RETRIEVAL_QUERY, RETRIEVAL_DOCUMENT, SEMANTIC_SIMILARITY,\n CLASSIFICATION, CLUSTERING. For more information on the task type,\n look at https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings # pylint: disable=line-too-long\n title: Identifier of the text content.\n project: The default GCP project for API calls.\n location: The default location for API calls.\n credentials: Custom credentials for API calls.\n Defaults to environment credentials.", "source": "github_repos"} -{"code": "def ndtri(p, name='ndtri'):\n with ops.name_scope(name, values=[p]):\n p = ops.convert_to_tensor(p, name='p')\n if p.dtype.as_numpy_dtype not in [np.float32, np.float64]:\n raise TypeError('p.dtype=%s is not handled, see docstring for supported types.' % p.dtype)\n return _ndtri(p)", "docstring": "The inverse of the CDF of the Normal distribution function.\n\n Returns x such that the area under the pdf from minus infinity to x is equal\n to p.\n\n A piece-wise rational approximation is done for the function.\n This is a port of the implementation in netlib.\n\nArgs:\n p: `Tensor` of type `float32`, `float64`.\n name: Python string. A name for the operation (default=\"ndtri\").\n\nReturns:\n x: `Tensor` with `dtype=p.dtype`.\n\nRaises:\n TypeError: if `p` is not floating-type.", "source": "github_repos"} -{"code": "def reduce_max(values, index, name='segmented_reduce_max'):\n return _segment_reduce(values, index, 'amax', name)", "docstring": "Computes the maximum over segments.\n\n This operation computes the maximum over segments, with support for:\n\n - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.\n - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise\n maximum of vectors rather than scalars.\n\n Only the middle dimensions [I1, ..., Ik] are reduced by the operation.\n\nArgs:\n values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):\n Tensor containing the values of which the max must be taken segment-wise.\n index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):\n Index defining the segments.\n name (`str`, *optional*, defaults to 'segmented_reduce_sum'):\n Name for the operation. Currently not used\n\nReturns:\n output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the\n output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments].", "source": "github_repos"} -{"code": "def close(self):\n if not self.closed:\n self._uploader.finish()\n super().close()", "docstring": "Complete the upload and close this stream.\n\n This method has no effect if the stream is already closed.\n\nRaises:\n Any error encountered by the uploader.", "source": "github_repos"} -{"code": "def __init__(self, learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-07, weight_decay=None, clipnorm=None, clipvalue=None, global_clipnorm=None, use_ema=False, ema_momentum=0.99, ema_overwrite_frequency=None, loss_scale_factor=None, gradient_accumulation_steps=None, name='adagrad', **kwargs):\n super().__init__(learning_rate=learning_rate, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue, global_clipnorm=global_clipnorm, use_ema=use_ema, ema_momentum=ema_momentum, ema_overwrite_frequency=ema_overwrite_frequency, loss_scale_factor=loss_scale_factor, gradient_accumulation_steps=gradient_accumulation_steps, name=name, **kwargs)\n self.initial_accumulator_value = initial_accumulator_value\n self.epsilon = epsilon", "docstring": "Optimizer that implements the Adagrad algorithm.\n\n Adagrad is an optimizer with parameter-specific learning rates,\n which are adapted relative to how frequently a parameter gets\n updated during training. The more updates a parameter receives,\n the smaller the updates.\n\nArgs:\n learning_rate: A float, a\n `keras.optimizers.schedules.LearningRateSchedule` instance, or\n a callable that takes no arguments and returns the actual value to\n use. The learning rate. Defaults to `0.001`. Note that `Adagrad`\n tends to benefit from higher initial learning rate values compared\n to other optimizers. To match the exact form in the original paper,\n use `1.0`.\n initial_accumulator_value: Floating point value. Starting value for the\n accumulators (per-parameter momentum values). Must be non-negative.\n epsilon: Small floating point value for maintaining numerical stability.\n {{base_optimizer_keyword_args}}\n\n Reference:\n\n - [Duchi et al., 2011](\n http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).", "source": "github_repos"} -{"code": "def save(value: Any, path: str, *args, **kwargs) -> Any:\n save_handler = flags.get_save_handler() or default_save_handler\n return save_handler(value, path, *args, **kwargs)", "docstring": "Save a symbolic value using the global save handler.\n\n Example::\n\n @pg.members([\n ('x', pg.typing.Any())\n ])\n class A(pg.Object):\n pass\n\n a1 = A(1)\n file = 'my_file.json'\n a1.save(file)\n a2 = pg.load(file)\n assert pg.eq(a1, a2)\n\nArgs:\n value: value to save.\n path: A path string for saving `value`.\n *args: Positional arguments that will be passed through to the global\n save handler.\n **kwargs: Keyword arguments that will be passed through to the global\n save handler.\n\nReturns:\n Return value from the global save handler.\n\nRaises:\n RuntimeError: if global save handler is not set.", "source": "github_repos"} -{"code": "def assertAllGreater(self, a, comparison_target):\n a, comparison_target = self.evaluate_if_both_tensors(a, comparison_target)\n a = self._GetNdArray(a)\n self.assertGreater(np.min(a), comparison_target)", "docstring": "Assert element values are all greater than a target value.\n\nArgs:\n a: The numpy `ndarray`, or anything that can be converted into a numpy\n `ndarray` (including Tensor).\n comparison_target: The target value of comparison.", "source": "github_repos"} -{"code": "def get_seed(seed):\n seed, seed2 = random_seed.get_seed(seed)\n if seed is None:\n seed = constant_op.constant(0, dtype=dtypes.int64, name='seed')\n else:\n seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name='seed')\n if seed2 is None:\n seed2 = constant_op.constant(0, dtype=dtypes.int64, name='seed2')\n else:\n with ops.name_scope('seed2') as scope:\n seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64)\n seed2 = array_ops.where_v2(math_ops.logical_and(math_ops.equal(seed, 0), math_ops.equal(seed2, 0)), constant_op.constant(2 ** 31 - 1, dtype=dtypes.int64), seed2, name=scope)\n return (seed, seed2)", "docstring": "Returns the local seeds an operation should use given an op-specific seed.\n\n See `random_seed.get_seed` for more details. This wrapper adds support for\n the case where `seed` may be a tensor.\n\nArgs:\n seed: An integer or a `tf.int64` scalar tensor.\n\nReturns:\n A tuple of two `tf.int64` scalar tensors that should be used for the local\n seed of the calling dataset.", "source": "github_repos"} -{"code": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, config: PhimoeConfig, past_key_values: Cache):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n text_config = config.get_text_config()\n if getattr(text_config, 'use_sliding_window', True) and text_config.sliding_window is not None:\n if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:\n sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= cache_position.reshape(-1, 1) - text_config.sliding_window\n diagonal_attend_mask.bitwise_or_(sliding_attend_mask)\n causal_mask *= diagonal_attend_mask\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n if attention_mask.shape[-1] > target_length:\n attention_mask = attention_mask[:, :target_length]\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size.\n config (`PhimoeConfig`):\n The model's configuration class\n past_key_values (`Cache`):\n The cache class that is being used currently to generate", "source": "github_repos"} -{"code": "def ndtri(x, name=None):\n with ops.name_scope(name, 'ndtri', [x]):\n return gen_math_ops.ndtri(x)", "docstring": "Compute quantile of Standard Normal.\n\nArgs:\n x: `Tensor` with type `float` or `double`.\n name: A name for the operation (optional).\n\nReturns:\n Inverse error function of `x`.", "source": "github_repos"} -{"code": "def on_predict_batch_end(self, batch, logs=None):\n if self._should_call_predict_batch_hooks:\n self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)", "docstring": "Calls the `on_predict_batch_end` methods of its callbacks.\n\nArgs:\n batch: Integer, index of batch within the current epoch.\n logs: Dict. Aggregated metric results up until this batch.", "source": "github_repos"} -{"code": "def _get_token_budget(self, question_tokens, max_length=None):\n return (max_length if max_length is not None else self.model_max_length) - self._question_encoding_cost(question_tokens)", "docstring": "Computes the number of tokens left for the table after tokenizing a question, taking into account the max\n sequence length of the model.\n\nArgs:\n question_tokens (`List[String]`):\n List of question tokens. Returns: `int`: the number of tokens left for the table, given the model max\n length.", "source": "github_repos"} -{"code": "def to_dict(self) -> dict[str, Any]:\n output = copy.deepcopy(self.__dict__)\n output['image_processor_type'] = self.__class__.__name__\n return output", "docstring": "Serializes this instance to a Python dictionary.\n\nReturns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this image processor instance.", "source": "github_repos"} -{"code": "def fit(self, X, y, **kwargs):\n X, y = _validate_data(self, X, y)\n y = self._process_target(y, reset=True)\n model = self._get_model(X, y)\n _check_model(model)\n fit_kwargs = self.fit_kwargs or {}\n fit_kwargs.update(kwargs)\n self.history_ = model.fit(X, y, **fit_kwargs)\n self.model_ = model\n return self", "docstring": "Fit the model.\n\nArgs:\n X: array-like, shape=(n_samples, n_features)\n The input samples.\n y: array-like, shape=(n_samples,) or (n_samples, n_outputs)\n The targets.\n **kwargs: keyword arguments passed to `model.fit`", "source": "github_repos"} -{"code": "def _build_cond(pred, true_graph, false_graph, true_inputs, false_inputs, building_gradient, add_identities: bool=True, prevent_lowering: bool=False, name=None):\n _make_indexed_slices_indices_types_match(_COND, [true_graph, false_graph])\n _check_same_outputs(_COND, [true_graph, false_graph])\n cond_inputs = _make_inputs_match([true_graph, false_graph], [true_inputs, false_inputs])\n if not building_gradient and util.output_all_intermediates():\n true_intermediates = _get_intermediates(true_graph)\n false_intermediates = _get_intermediates(false_graph)\n wrapped_true_intermediates = _wrap_intermediates(true_graph, true_intermediates)\n wrapped_false_intermediates = _wrap_intermediates(false_graph, false_intermediates)\n extra_true_outputs, extra_false_outputs = _make_intermediates_match([true_graph, false_graph], [wrapped_true_intermediates, wrapped_false_intermediates])\n true_graph.outputs.extend(extra_true_outputs)\n false_graph.outputs.extend(extra_false_outputs)\n _check_same_outputs(_COND, [true_graph, false_graph])\n with ops.control_dependencies(list(true_graph.function_captures.control) + list(false_graph.function_captures.control)):\n true_stateful_ops = [op for op in true_graph.get_operations() if _is_op_stateful(op)]\n false_stateful_ops = [op for op in false_graph.get_operations() if _is_op_stateful(op)]\n if true_stateful_ops or false_stateful_ops:\n op_fn = gen_functional_ops._if\n else:\n op_fn = gen_functional_ops.stateless_if\n\n def _make_op(inputs):\n if_op, tensors = util.get_op_and_outputs(op_fn(pred, inputs, [t.dtype for t in true_graph.outputs], util.create_new_tf_function(true_graph), util.create_new_tf_function(false_graph), output_shapes=_get_output_shapes(true_graph.outputs, false_graph.outputs), name=name))\n _copy_handle_data(tensors, true_graph.outputs, false_graph.outputs)\n if if_op is not None:\n true_graph.outer_graph = ops.get_default_graph()\n false_graph.outer_graph = ops.get_default_graph()\n if_op._true_graph = true_graph\n if_op._false_graph = false_graph\n util.maybe_set_lowering_attr(if_op, False if prevent_lowering else None)\n util.maybe_propagate_compile_time_consts_in_xla(if_op)\n _set_read_only_resource_inputs_attr(if_op, [true_graph, false_graph])\n if not prevent_lowering:\n if_op.graph.prevent_fetching(if_op)\n return tensors\n tensors = util.run_as_function_for_tape_gradients(_make_op, cond_inputs)\n if add_identities:\n tensors = [array_ops.identity(t) for t in tensors]\n structured_output_specs = _get_compatible_structured_output_specs(true_graph, false_graph)\n return _pack_sequence_as(structured_output_specs, tensors)", "docstring": "Creates an If op from the specified predicate, branch functions and inputs.\n\n Note that this modifies true_graph and false_graph to make the inputs match,\n and to output all intermediates values so they're available for the gradient\n computation.\n\n true_graph and false_graph need not have the same input types, but they must\n have the same output types.\n\nArgs:\n pred: boolean Tensor\n true_graph: FuncGraph\n false_graph: FuncGraph\n true_inputs: a list of Tensors to be passed to true_graph as input.\n false_inputs: a list of Tensors to be passed to false_graph as input.\n building_gradient: Whether this is a gradient If op.\n add_identities: If `True`, adds an identity op for each output of the If op.\n This is useful for pruning, but can be disabled if the caller does not\n want to rely on pruning and wants to avoid the overhead of extra identity\n ops in the graph.\n prevent_lowering: If `True`, prevents the If op from being lowered to\n V1 `Switch` and `Merge` ops.\n name: the name for the If op.\n\nReturns:\n A list of Tensors which are the outputs of the If op. Does not include added\n intermediate outputs.", "source": "github_repos"} -{"code": "def build_info(self):\n if self.is_bootloader:\n self.log.error('Device is in fastboot mode, could not get build info.')\n return\n if self._build_info is None or self._is_rebooting:\n info = {}\n build_info = self.adb.getprops(CACHED_SYSTEM_PROPS)\n for build_info_constant in BuildInfoConstants:\n info[build_info_constant.build_info_key] = build_info.get(build_info_constant.system_prop_key, '')\n self._build_info = info\n return info\n return self._build_info", "docstring": "Gets the build info of this Android device, including build id and type.\n\n This is not available if the device is in bootloader mode.\n\nReturns:\n A dict with the build info of this Android device, or None if the\n device is in bootloader mode.", "source": "github_repos"} -{"code": "def stochastic_cast(t, dtype, seed, alg='auto_select', name=None):\n with ops.name_scope(name, 'stochastic_cast', [t, seed]) as name:\n t = ops.convert_to_tensor(t)\n key, counter, algorithm = random_ops_util.get_key_counter_alg(seed, alg)\n if dtype in allowed_to_types(is_integer=True):\n return gen_stochastic_cast_op.stochastic_cast_to_int(t, key=key, counter=counter, alg=algorithm, Tout=dtype)\n else:\n raise NotImplementedError(f'Stochastic cast to small float {dtype} has not yet been supported.')", "docstring": "Casts input to the desired precision with stochastic rounding.\n\n This means the value of the cast result will be rounded to two of the closest\n values with with a probability proportional to the distance between the number\n and the two closest to the input. For example, if a number falls between 2 and\n 3, and is closer to 2 than to 3, it has a higher probability of being rounded\n to 2. On the other hand, if it's closer to 3 than to 2, it has a higher\n probability of being rounded to 3. This is intended to eliminate rounding bias\n introduced by determinisitc rounding methods. If cast to integers, the values\n will saturate if out of range, e.g. 254.8 in floating point will become 127 in\n int8. If inputs are NaN, the results will be zero. Given the same random seed,\n the results will be deterministic, but not otherwise.\n\nArgs:\n t: The input tensor. This is the same as the output shape.\n dtype: The output type, currently int32, int16 and int8 are supported.\n seed: A shape [2] Tensor, the seed to the random number generator. Must have\n dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)\n alg: The RNG algorithm used to generate the random numbers. See\n `tf.random.stateless_uniform` for a detailed explanation.\n name: A name for the operation (optional).\n\nReturns:\n A tensor of the specified data type whose values are rounded to the\n specified precisions with stochastic rounding.", "source": "github_repos"} -{"code": "def save(model, filepath, overwrite, include_optimizer, signatures=None, options=None, save_traces=True):\n if not overwrite and os.path.exists(filepath):\n proceed = ask_to_proceed_with_overwrite(filepath)\n if not proceed:\n return\n if save_traces:\n if save_impl.should_skip_serialization(model):\n saving_utils.raise_model_input_error(model)\n if not include_optimizer:\n orig_optimizer = model.optimizer\n model.optimizer = None\n model._delete_tracking('optimizer')\n with K.deprecated_internal_learning_phase_scope(0):\n with utils.keras_option_scope(save_traces):\n saved_nodes, node_paths = save_lib.save_and_return_nodes(model, filepath, signatures, options)\n metadata = generate_keras_metadata(saved_nodes, node_paths)\n with gfile.GFile(os.path.join(filepath, constants.SAVED_METADATA_PATH), 'wb') as w:\n w.write(metadata.SerializeToString(deterministic=True))\n if not include_optimizer:\n model.optimizer = orig_optimizer", "docstring": "Saves a model as a SavedModel to the filepath.\n\nArgs:\n model: Keras model instance to be saved.\n filepath: String path to save the model.\n overwrite: whether to overwrite the existing filepath.\n include_optimizer: If True, save the model's optimizer state.\n signatures: Signatures to save with the SavedModel. Applicable to the 'tf'\n format only. Please see the `signatures` argument in `tf.saved_model.save`\n for details.\n options: (only applies to SavedModel format) `tf.saved_model.SaveOptions`\n object that specifies options for saving to SavedModel.\n save_traces: (only applies to SavedModel format) When enabled, the\n SavedModel will store the function traces for each layer. This\n can be disabled, so that only the configs of each layer are stored.\n Defaults to `True`. Disabling this will decrease serialization time\n and reduce file size, but it requires that all custom layers/models\n implement a `get_config()` method.\n\nRaises:\n ValueError: if the model's inputs have not been defined.", "source": "github_repos"} -{"code": "def __init__(self, content=None, min=0, max=HUGE, name=None):\n assert 0 <= min <= max <= HUGE, (min, max)\n if content is not None:\n content = tuple(map(tuple, content))\n assert len(content), repr(content)\n for alt in content:\n assert len(alt), repr(alt)\n self.content = content\n self.min = min\n self.max = max\n self.name = name", "docstring": "Initializer.\n\nArgs:\n content: optional sequence of subsequences of patterns;\n if absent, matches one node;\n if present, each subsequence is an alternative [*]\n min: optional minimum number of times to match, default 0\n max: optional maximum number of times to match, default HUGE\n name: optional name assigned to this match\n\n [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is\n equivalent to (a b c | d e | f g h); if content is None,\n this is equivalent to '.' in regular expression terms.\n The min and max parameters work as follows:\n min=0, max=maxint: .*\n min=1, max=maxint: .+\n min=0, max=1: .?\n min=1, max=1: .\n If content is not None, replace the dot with the parenthesized\n list of alternatives, e.g. (a b c | d e | f g h)*", "source": "github_repos"} -{"code": "def calibrate_and_quantize(self, dataset_gen, input_type, output_type, allow_float, activations_type=dtypes.int8, bias_type=dtypes.int32, resize_input=True, disable_per_channel=False, disable_per_channel_quantization_for_dense_layers=False):\n self._feed_tensors(dataset_gen, resize_input)\n return self._calibrator.QuantizeModel(np.dtype(input_type.as_numpy_dtype()).num, np.dtype(output_type.as_numpy_dtype()).num, allow_float, np.dtype(activations_type.as_numpy_dtype()).num, np.dtype(bias_type.as_numpy_dtype()).num, disable_per_channel, disable_per_channel_quantization_for_dense_layers)", "docstring": "Calibrates the model with specified generator and then quantizes it.\n\n The input shapes of the calibrator are resized with the calibration data if\n `resize_input` is set.\n\nReturns:\n A quantized model.\n\nArgs:\n dataset_gen: A generator that generates calibration samples.\n input_type: A tf.dtype representing the desired real-value input type.\n output_type: A tf.dtype representing the desired real-value output type.\n allow_float: A boolean. False if the resulting model cannot perform float\n computation, useful when targeting an integer-only backend. If False, an\n error will be thrown if an operation cannot be quantized, otherwise the\n model will fallback to float ops.\n activations_type: A tf.dtype representing the desired type for\n activations.\n bias_type: A tf.dtype representing the desired type for bias.\n resize_input: A boolean. True if the shape of the sample data is different\n from the input.\n disable_per_channel: A boolean. True if disabling per-channel\n quantization.\n disable_per_channel_quantization_for_dense_layers: A boolean. True if\n disabling per-channel quantization only in Dense layers.", "source": "github_repos"} -{"code": "def hinge(y_true, y_pred):\n y_pred = ops.convert_to_tensor(y_pred)\n y_true = ops.cast(y_true, dtype=y_pred.dtype)\n y_true = ops.convert_to_tensor(y_true)\n y_true = convert_binary_labels_to_hinge(y_true)\n return ops.mean(ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1)", "docstring": "Computes the hinge loss between `y_true` & `y_pred`.\n\n Formula:\n\n ```python\n loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)\n ```\n\nArgs:\n y_true: The ground truth values. `y_true` values are expected to be -1\n or 1. If binary (0 or 1) labels are provided they will be converted\n to -1 or 1 with shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.\n\nReturns:\n Hinge loss values with shape = `[batch_size, d0, .. dN-1]`.\n\nExample:\n >>> y_true = np.random.choice([-1, 1], size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = keras.losses.hinge(y_true, y_pred)", "source": "github_repos"} -{"code": "def metrics_names(self):\n return [m.name for m in self.metrics]", "docstring": "Returns the model's display labels for all outputs.\n\n Note: `metrics_names` are available only after a `keras.Model` has been\n trained/evaluated on actual data.\n\nExample:\n >>> inputs = tf.keras.layers.Input(shape=(3,))\n >>> outputs = tf.keras.layers.Dense(2)(inputs)\n >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs)\n >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\"])\n >>> model.metrics_names\n []\n\n >>> x = np.random.random((2, 3))\n >>> y = np.random.randint(0, 2, (2, 2))\n >>> model.fit(x, y)\n >>> model.metrics_names\n ['loss', 'mae']\n\n >>> inputs = tf.keras.layers.Input(shape=(3,))\n >>> d = tf.keras.layers.Dense(2, name='out')\n >>> output_1 = d(inputs)\n >>> output_2 = d(inputs)\n >>> model = tf.keras.models.Model(\n ... inputs=inputs, outputs=[output_1, output_2])\n >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\", \"acc\"])\n >>> model.fit(x, (y, y))\n >>> model.metrics_names\n ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae',\n 'out_1_acc']", "source": "github_repos"} -{"code": "def __init__(self, e, position=None):\n self.exception = e\n self.type = type(e).__name__\n self.stacktrace = None\n self.extras = None\n self.position = position\n self.is_test_signal = isinstance(e, signals.TestSignal)\n exc_traceback = e.__traceback__\n if exc_traceback:\n self.stacktrace = ''.join(traceback.format_exception(e.__class__, e, exc_traceback))\n if self.is_test_signal:\n self._set_details(e.details)\n self.extras = e.extras\n else:\n self._set_details(e)", "docstring": "A record representing exception objects in TestResultRecord.\n\nAttributes:\n exception: Exception object, the original Exception.\n type: string, type name of the exception object.\n stacktrace: string, stacktrace of the Exception.\n extras: optional serializable, this corresponds to the\n `TestSignal.extras` field.\n position: string, an optional label specifying the position where the\n Exception ocurred.", "source": "github_repos"} -{"code": "def collect_constant_renames():\n renames = set()\n for module in sys.modules.copy().values():\n try:\n constants_v1_list = tf_export.get_v1_constants(module)\n constants_v2_list = tf_export.get_v2_constants(module)\n except:\n pass\n constants_v1 = {constant_name: api_names for api_names, constant_name in constants_v1_list}\n constants_v2 = {constant_name: api_names for api_names, constant_name in constants_v2_list}\n for constant_name, api_names_v1 in constants_v1.items():\n api_names_v2 = constants_v2[constant_name]\n for name in api_names_v1:\n if name not in api_names_v2:\n renames.add((name, get_canonical_name(api_names_v2, name)))\n return renames", "docstring": "Looks for constants that need to be renamed in TF 2.0.\n\nReturns:\n Set of tuples of the form (current name, new name).", "source": "github_repos"} -{"code": "def get_indent(code: str) -> str:\n lines = code.split('\\n')\n idx = 0\n while idx < len(lines) and len(lines[idx]) == 0:\n idx += 1\n if idx < len(lines):\n return re.search('^(\\\\s*)\\\\S', lines[idx]).groups()[0]\n return ''", "docstring": "Find the indent in the first non empty line in a code sample.\n\nArgs:\n code (`str`): The code to inspect.\n\nReturns:\n `str`: The indent looked at (as string).", "source": "github_repos"} -{"code": "def take_along_axis(x, indices, axis=None):\n if any_symbolic_tensors((x, indices)):\n return TakeAlongAxis(axis=axis).symbolic_call(x, indices)\n return backend.numpy.take_along_axis(x, indices, axis=axis)", "docstring": "Select values from `x` at the 1-D `indices` along the given axis.\n\nArgs:\n x: Source tensor.\n indices: The indices of the values to extract.\n axis: The axis over which to select values. By default, the flattened\n input tensor is used.\n\nReturns:\n The corresponding tensor of values.", "source": "github_repos"} -{"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[Tuple, TFSeq2SeqModelOutput]:\n if head_mask is not None and decoder_head_mask is None:\n warnings.warn(_HEAD_MASK_WARNING_MSG, FutureWarning)\n decoder_head_mask = head_mask\n if encoder_outputs is None:\n encoder_outputs = self.encoder(input_ids, attention_mask=attention_mask, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=inputs_embeds, head_mask=head_mask, past_key_values=None, use_cache=False, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n hidden_states = encoder_outputs[0]\n decoder_outputs = self.decoder(decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, inputs_embeds=decoder_inputs_embeds, head_mask=decoder_head_mask, encoder_head_mask=head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n past = decoder_outputs[1] if use_cache else None\n if not return_dict:\n if past_key_values is not None:\n decoder_outputs = decoder_outputs[:1] + (past,) + decoder_outputs[2:]\n return decoder_outputs + encoder_outputs\n return TFSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=past, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)", "docstring": "Returns:\n\nExample:\n ```python\n >>> from transformers import AutoTokenizer, TFT5Model\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"google-t5/t5-small\")\n >>> model = TFT5Model.from_pretrained(\"google-t5/t5-small\")\n\n >>> input_ids = tokenizer(\n ... \"Studies have been shown that owning a dog is good for you\", return_tensors=\"tf\"\n ... ).input_ids # Batch size 1\n >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"tf\").input_ids # Batch size 1\n\n >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model.\n >>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg.\n >>> decoder_input_ids = model._shift_right(decoder_input_ids)\n\n >>> # forward pass\n >>> outputs = model(input_ids, decoder_input_ids=decoder_input_ids)\n >>> last_hidden_states = outputs.last_hidden_state\n ```", "source": "github_repos"} -{"code": "def _inspect_summary_cache(self, cache, replica_id, step_num, output_stream, tensor_trace_order):\n\n def _inspect_tensor(tensor):\n \"\"\"Returns the text to be printed for inspection output.\"\"\"\n if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:\n return cond.cond(math_ops.greater(tensor, 0.0), lambda: 'has NaNs/Infs!', lambda: 'has no NaNs or Infs.')\n else:\n return tensor\n if not tensor_trace_order.traced_tensors:\n logging.warn('Inspect mode has no tensors in the cache to check.')\n return control_flow_ops.no_op\n if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:\n step_has_nan_or_inf = math_ops.greater(math_ops.reduce_sum(cache), 0.0)\n else:\n step_has_nan_or_inf = math_ops.reduce_any(gen_math_ops.logical_or(gen_math_ops.is_nan(cache), gen_math_ops.is_inf(cache)))\n step_error_message = cond.cond(step_has_nan_or_inf, lambda: 'NaNs or Infs in the step!', lambda: 'No numerical issues have been found for the step.')\n if self._parameters.collect_summary_per_core:\n stats = ['\\n\\n', 'core:', replica_id, ',', 'step:', step_num, '-->', step_error_message, 'Printing tensors for mode:%s...' % self._parameters.trace_mode]\n else:\n stats = ['\\n\\n', 'step:', step_num, '-->', step_error_message, 'Printing tensors for mode:%s...' % self._parameters.trace_mode]\n for tensor_name, cache_idx in sorted(tensor_trace_order.tensorname_to_cache_idx.items(), key=lambda item: item[1]):\n if self._parameters.collect_summary_per_core:\n stats.extend(['\\n', 'core:', replica_id, ',', 'step:', step_num, ',', tensor_name, '-->', _inspect_tensor(cache[cache_idx, 0])])\n else:\n stats.extend(['\\n', 'step:', step_num, ',', tensor_name, '-->', _inspect_tensor(cache[cache_idx, 0])])\n return logging_ops.print_v2(*stats, summarize=-1, output_stream=output_stream)", "docstring": "Generates a print operation to print trace inspection.\n\nArgs:\n cache: Tensor storing the trace results for the step.\n replica_id: Tensor storing the replica id of the running core.\n step_num: Step number.\n output_stream: Where to print the outputs, e.g., file path, or sys.stderr.\n tensor_trace_order: TensorTraceOrder object holding tensorname to id map.\n\nReturns:\n The Op to flush the cache to file.", "source": "github_repos"} -{"code": "def _agg_with_no_function(gb, *args, **kwargs):\n if not kwargs:\n raise ValueError('No aggregation functions specified')\n result_columns, result_frames = ([], [])\n for col_name, (input_col, agg_fn) in kwargs.items():\n frame = _handle_agg_function(gb[input_col], agg_fn, f'agg_{col_name}', *args)\n result_frames.append(frame)\n result_columns.append(col_name)\n return DeferredDataFrame(expressions.ComputedExpression('agg', lambda *results: pd.concat(results, axis=1, keys=result_columns), [frame._expr for frame in result_frames], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Singleton()))", "docstring": "Applies aggregation functions to the grouped data based on the provided\n keyword arguments and combines the results into a single DataFrame.\n\nArgs:\n gb: The groupby instance (DeferredGroupBy).\n *args: Additional positional arguments passed to the aggregation funcs.\n **kwargs: A dictionary where each key is the column name to aggregate,\n the value is a tuple containing the input column name and\n the aggregation function to apply.\n\nReturns:\n DeferredDataFrame: A DataFrame that contains the aggregated results of\n all specified columns.\n\nRaises:\n ValueError: If no aggregation functions are provided in the `kwargs`.\n NotImplementedError: If the aggregation function type is unsupported.", "source": "github_repos"} -{"code": "def _sample_proposals(self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor):\n has_gt = gt_classes.numel() > 0\n if has_gt:\n gt_classes = gt_classes[matched_idxs]\n gt_classes[matched_labels == 0] = self.bg_label\n gt_classes[matched_labels == -1] = -1\n else:\n gt_classes = torch.zeros_like(matched_idxs) + self.bg_label\n sampled_fg_idxs, sampled_bg_idxs = subsample_labels(gt_classes, self.batch_size_per_image, self.positive_fraction, self.bg_label)\n sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)\n return (sampled_idxs, gt_classes[sampled_idxs])", "docstring": "Based on the matching between N proposals and M groundtruth, sample the proposals and set their classification\n labels.\n\nArgs:\n matched_idxs (Tensor): a vector of length N, each is the best-matched\n gt index in [0, M) for each proposal.\n matched_labels (Tensor): a vector of length N, the matcher's label\n (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.\n gt_classes (Tensor): a vector of length M.\n\nReturns:\n Tensor: a vector of indices of sampled proposals. Each is in [0, N). Tensor: a vector of the same length,\n the classification label for\n each sampled proposal. Each sample is labeled as either a category in [0, num_classes) or the\n background (num_classes).", "source": "github_repos"} -{"code": "def get_regularization_loss(scope=None, name='total_regularization_loss'):\n losses = get_regularization_losses(scope)\n if losses:\n return math_ops.add_n(losses, name=name)\n else:\n return constant_op.constant(0.0)", "docstring": "Gets the total regularization loss.\n\nArgs:\n scope: An optional scope name for filtering the losses to return.\n name: The name of the returned tensor.\n\nReturns:\n A scalar regularization loss.", "source": "github_repos"} -{"code": "def tanh_shrink(x):\n if any_symbolic_tensors((x,)):\n return TanhShrink().symbolic_call(x)\n return backend.nn.tanh_shrink(x)", "docstring": "Applies the tanh shrink function element-wise.\n\n It is defined as:\n\n `f(x) = x - tanh(x)`.\n\nArgs:\n x: Input tensor.\n\nReturns:\n Output tensor of the same shape as `x`, where each element is\n transformed according to the tanh shrink operation.\n\nExample:\n >>> x = np.array([ -1., 0., 1.])\n >>> x_tanh_shrink = keras.ops.tanh_shrink(x)\n >>> print(x_tanh_shrink)\n array([-0.23840584 0. 0.23840584], shape=(3,), dtype=float64)", "source": "github_repos"} -{"code": "def add_import(self, from_package, import_list):\n if from_package:\n for item in import_list:\n t = self.module_info.process_from_import(from_package, item)\n self.type_map[t.new_name] = t.pytd_node\n if isinstance(item, tuple) or from_package != 'typing' or self.module_info.module_name == 'protocols':\n self.aliases[t.new_name] = t.pytd_alias()\n if t.new_name != 'typing':\n self.module_path_map[t.new_name] = t.qualified_name\n else:\n for item in import_list:\n t = self.module_info.process_import(item)\n if t:\n self.aliases[t.new_name] = t.pytd_alias()", "docstring": "Add an import.\n\nArgs:\n from_package: A dotted package name if this is a \"from\" statement, or None\n if it is an \"import\" statement.\n import_list: A list of imported items, which are either strings or pairs\n of strings. Pairs are used when items are renamed during import using\n \"as\".", "source": "github_repos"} -{"code": "def get_special_dtypes_update(self, model, torch_dtype: 'torch.dtype') -> Dict[str, 'torch.dtype']:\n return {name: torch_dtype for name, _ in model.named_parameters() if any((m in name for m in self.modules_to_not_convert))}", "docstring": "returns dtypes for modules that are not quantized - used for the computation of the device_map in case\n one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified\n in `_process_model_before_weight_loading`.\n\nArgs:\n model (`~transformers.PreTrainedModel`):\n The model to quantize\n torch_dtype (`torch.dtype`):\n The dtype passed in `from_pretrained` method.", "source": "github_repos"} -{"code": "def enable_traceback_filtering():\n if sys.version_info.major != 3 or sys.version_info.minor < 7:\n raise RuntimeError(f'Traceback filtering is only available with Python 3.7 or higher. This Python version: {sys.version}')\n global _ENABLE_TRACEBACK_FILTERING\n _ENABLE_TRACEBACK_FILTERING.value = True", "docstring": "Enable filtering out TensorFlow-internal frames in exception stack traces.\n\n Raw TensorFlow stack traces involve many internal frames, which can be\n challenging to read through, while not being actionable for end users.\n By default, TensorFlow filters internal frames in most exceptions that it\n raises, to keep stack traces short, readable, and focused on what's\n actionable for end users (their own code).\n\n If you have previously disabled traceback filtering via\n `tf.debugging.disable_traceback_filtering()`, you can re-enable it via\n `tf.debugging.enable_traceback_filtering()`.\n\nRaises:\n RuntimeError: If Python version is not at least 3.7.", "source": "github_repos"} -{"code": "def ParseCodeToTree(code):\n if not code.endswith(os.linesep):\n code += os.linesep\n try:\n parser_driver = driver.Driver(_PYTHON_GRAMMAR, convert=pytree.convert)\n tree = parser_driver.parse_string(code, debug=False)\n except parse.ParseError:\n ast.parse(code)\n raise\n return _WrapEndMarker(tree)", "docstring": "Parse the given code to a lib2to3 pytree.\n\nArgs:\n code: a string with the code to parse.\n\nRaises:\n SyntaxError if the code is invalid syntax.\n parse.ParseError if some other parsing failure.\n\nReturns:\n The root node of the parsed tree.", "source": "github_repos"} -{"code": "def _scipy_raised_cosine(length, symmetric=True, a=0.5, b=0.5):\n if length == 1:\n return np.ones(1)\n odd = length % 2\n if not symmetric and (not odd):\n length += 1\n window = a - b * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))\n if not symmetric and (not odd):\n window = window[:-1]\n return window", "docstring": "A simple implementation of a raised cosine window that matches SciPy.\n\n https://en.wikipedia.org/wiki/Window_function#Hann_window\n https://github.com/scipy/scipy/blob/v0.14.0/scipy/signal/windows.py#L615\n\nArgs:\n length: The window length.\n symmetric: Whether to create a symmetric window.\n a: The alpha parameter of the raised cosine window.\n b: The beta parameter of the raised cosine window.\n\nReturns:\n A raised cosine window of length `length`.", "source": "github_repos"} -{"code": "def __init__(self, tie_breaker=DEFAULT_NORMAL_LABEL, **kwargs):\n self._tie_breaker = tie_breaker\n\n def inner(predictions: Iterable[int]) -> int:\n counters = collections.Counter(predictions)\n if counters[self._normal_label] < counters[self._outlier_label]:\n vote = self._outlier_label\n elif counters[self._normal_label] > counters[self._outlier_label]:\n vote = self._normal_label\n else:\n vote = self._tie_breaker\n return vote\n super().__init__(agg_func=inner, **kwargs)", "docstring": "Aggregates anomaly labels using majority voting.\n\n This `AggregationFn` implements a majority voting strategy to combine\n anomaly labels from multiple `AnomalyPrediction` objects. It counts the\n occurrences of normal and outlier labels and selects the label with the\n higher count as the aggregated label. In case of a tie, a tie-breaker\n label is used.\n\nExample:\n If input labels are [normal, outlier, outlier, normal, outlier], and\n normal_label=0, outlier_label=1, then the aggregated label will be\n outlier (1) because outliers have a majority (3 vs 2).\n\nArgs:\n normal_label (int): The integer label for normal predictions. Defaults to 0.\n outlier_label (int): The integer label for outlier predictions. Defaults to\n 1.\n tie_breaker (int): The label to return if there is a tie in votes.\n Defaults to 0 (normal_label).\n **kwargs: Additional keyword arguments to pass to the base\n `LabelAggregation` class.", "source": "github_repos"} -{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n known_args, pipeline_args = parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n milk_quality_data = pandas.read_csv(known_args.pipeline_input_data)\n start = time.mktime(time.strptime('2023/06/29 10:00:00', '%Y/%m/%d %H:%M:%S'))\n test_stream = TestStream()\n test_stream.advance_watermark_to(start)\n samples = [milk_quality_data.iloc[i:i + 1] for i in range(len(milk_quality_data))]\n for watermark_offset, sample in enumerate(samples, 1):\n test_stream.advance_watermark_to(start + watermark_offset)\n test_stream.add_elements([sample])\n test_stream.advance_watermark_to_infinity()\n model_handler = XGBoostModelHandlerPandas(model_class=xgboost.XGBClassifier, model_state=known_args.model_state)\n with beam.Pipeline() as p:\n _ = p | test_stream | 'window' >> beam.WindowInto(window.SlidingWindows(30, 5)) | 'RunInference' >> RunInference(model_handler) | 'Count number of elements in window' >> beam.CombineGlobally(AggregateMilkQualityResults()).without_defaults() | 'Print' >> beam.Map(print)", "docstring": "Args:\n argv: Command line arguments defined for this example.\n save_main_session: Used for internal testing.\n test_pipeline: Used for internal testing.", "source": "github_repos"} -{"code": "def broadcast_flat_values(self, rt, inner_dimensions=True):\n if ragged_tensor.is_ragged(rt):\n rt = rt.flat_values\n if self.target_shape.rank == 0:\n return rt\n inner_rank = self.target_shape.inner_rank\n if inner_rank > self._source_shape.rank:\n if self.source_shape.num_row_partitions > 0:\n rt = array_ops.reshape(rt, self.source_shape._alt_inner_shape(self.source_shape.rank))\n if inner_dimensions:\n return array_ops.broadcast_to(rt, self._target_inner_shape_int32())\n return rt\n else:\n if self._source_shape.inner_rank != inner_rank:\n rt = array_ops.reshape(rt, self._source_shape._alt_inner_shape(inner_rank))\n flat_broadcaster = self._layer_broadcasters[-inner_rank]\n rt = flat_broadcaster.broadcast_tensor(rt)\n if inner_dimensions:\n rt = array_ops.broadcast_to(rt, self._target_inner_shape_int32())\n return rt", "docstring": "flat_values of a ragged tensor broadcast to target_shape.\n\n If inner_dimensions==True, then the result is a dense tensor with shape\n target_shape.inner_shape, the flat values of the broadcasted shape.\n\n If you add target_shape.row_partitions, you will get the full broadcasted\n shape.\n\n If inner_dimensions==False, the result is a dense tensor that satsifies\n certain properties:\n 1. broadcast_to(result, target_shape.inner_shape) will give the result\n if inner_dimensions==True.\n 2. Either (a) (result.rank < target_shape.inner_rank)\n or (b) (result.shape[0] == target_shape.inner_shape[0]).\n 3. result.rank = min(target_shape.inner_rank, rt.rank)\n 4. For i < target_shape.inner_rank - 1, and i < rt.rank,\n and if rt.shape[-i]!=1, then result.shape[-i]=target_shape[-i].\n\nArgs:\n rt: a ragged or dense tensor.\n inner_dimensions: if true, broadcast the inner dimensions as well.\n\nReturns:\n a dense tensor", "source": "github_repos"} -{"code": "def exp(x):\n if any_symbolic_tensors((x,)):\n return Exp().symbolic_call(x)\n return backend.numpy.exp(x)", "docstring": "Calculate the exponential of all elements in the input tensor.\n\nArgs:\n x: Input tensor.\n\nReturns:\n Output tensor, element-wise exponential of `x`.", "source": "github_repos"} -{"code": "def register_binary_elementwise_assert_api(func):\n _BINARY_ELEMENTWISE_ASSERT_APIS.append(func)\n for args, handler in _ELEMENTWISE_API_HANDLERS.items():\n if len(args) == 3 and args[2] is _ASSERT_API_TAG:\n _add_dispatch_for_binary_elementwise_api(func, args[0], args[1], handler)\n return func", "docstring": "Decorator that registers a TensorFlow op as a binary elementwise assert API.\n\n Different from `dispatch_for_binary_elementwise_apis`, this decorator is used\n for assert apis, such as assert_equal, assert_none_equal, etc, which return\n None in eager mode and an op in graph mode.\n\nArgs:\n func: The function that implements the binary elementwise assert API.\n\nReturns:\n `func`", "source": "github_repos"} -{"code": "def __init__(self):\n self.inputs = {}\n self.outputs = {}\n self.function_name = None\n self.uuid = None\n self.params = {}\n self.level = -1\n self.children_inputs_mappings = {}", "docstring": "Represent a TensorFlow Lite custom function.\n\n This is uses to accumulate found hints in the graphdef into a single\n conceptual unit.\n\nAttributes:\n inputs: inputs to the op (hash from index # to argument)\n outputs: outputs to the op (hash from index # to argument)\n function_name: the tflite custom op name to use\n uuid: a unique call id for this particular call (i.e. multiple function\n calls would have the same function_name but different uuids.\n params: A param name to key value for op constant data. I.e. for axis on a\n reduction, strides on a convolution, etc.\n level: Level of the OpHint.\n children_inputs_mappings: If the Ophint has children, children inputs\n mappings indicate how their inputs & outputs are mapped.", "source": "github_repos"} -{"code": "def batch_encode_plus(self, notes: Union[np.ndarray, List[pretty_midi.Note]], truncation_strategy: Optional[TruncationStrategy]=None, max_length: Optional[int]=None, **kwargs) -> BatchEncoding:\n encoded_batch_token_ids = []\n for i in range(len(notes)):\n encoded_batch_token_ids.append(self.encode_plus(notes[i], truncation_strategy=truncation_strategy, max_length=max_length, **kwargs)['token_ids'])\n return BatchEncoding({'token_ids': encoded_batch_token_ids})", "docstring": "This is the `batch_encode_plus` method for `Pop2PianoTokenizer`. It converts the midi notes to the transformer\n generated token ids. It works on multiple batches by calling `encode_plus` multiple times in a loop.\n\nArgs:\n notes (`numpy.ndarray` of shape `[batch_size, sequence_length, 4]` or `list` of `pretty_midi.Note` objects):\n This represents the midi notes. If `notes` is a `numpy.ndarray`:\n - Each sequence must have 4 values, they are `onset idx`, `offset idx`, `pitch` and `velocity`.\n If `notes` is a `list` containing `pretty_midi.Note` objects:\n - Each sequence must have 4 attributes, they are `start`, `end`, `pitch` and `velocity`.\n truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`], *optional*):\n Indicates the truncation strategy that is going to be used during truncation.\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n\nReturns:\n `BatchEncoding` containing the tokens ids.", "source": "github_repos"} -{"code": "def _copy_source(s, graph, op_map, handle_captures, inverse_captures, base_graph):\n if handle_captures and s in inverse_captures:\n copied_placeholder = graph.capture(inverse_captures[s], name=s.op.name)\n elif s.op.type == 'PlaceholderWithDefault' and _constant_inputs(s):\n default_value = s.op.inputs[0]\n unavailable_inputs, unavailable_control_inputs = _copy_non_source(op=default_value.op, graph=graph, op_map=op_map, base_graph=base_graph)\n if unavailable_inputs or unavailable_control_inputs:\n raise AssertionError('Could not copy source node {} because it has inputs.'.format(default_value))\n with ops.device(s.op.device):\n copied_placeholder = array_ops.placeholder_with_default(input=op_map[default_value], shape=s.shape, name=s.op.name)\n else:\n with ops.device(s.op.device):\n copied_placeholder = array_ops.placeholder(dtype=s.dtype, shape=s.shape, name=s.op.name)\n base_handle = resource_variable_ops.get_resource_handle_data(s)\n if base_handle.shape_and_type:\n resource_variable_ops._set_handle_shapes_and_types(copied_placeholder, base_handle, graph_mode=True)\n op_map[s] = copied_placeholder\n op_map[s.op] = copied_placeholder.op", "docstring": "Create a source in a graph based on a Tensor from a different graph.\n\n This function creates a placeholder analog of `s` in a graph with the\n following behavior:\n\n 1) If s is a captured Tensor or Variable and handle_captures is set to True,\n simply capture it in the new graph as well.\n\n 2) If s is a PlaceholderWithDefault whose default is a constant, preserve\n said default in the new graph.\n\n 3) When applicable, copy resource variable metadata from `s` to the newly\n created placeholder.\n\nArgs:\n s: The source of interest.\n graph: The destination graph.\n op_map: A dict mapping ops and tensors in the old graph to the new one.\n handle_captures: A boolean indicating whether to re-capture s in the new\n graph or simply create a vanilla placeholder.\n inverse_captures: A dict mapping s back to the Tensor or Variable that it\n captures.\n base_graph: The graph being copied from.", "source": "github_repos"} -{"code": "def print_tensor(x, message='', summarize=3):\n if isinstance(x, tensor_lib.Tensor) and hasattr(x, 'graph'):\n with get_graph().as_default():\n op = logging_ops.print_v2(message, x, output_stream=sys.stdout, summarize=summarize)\n with ops.control_dependencies([op]):\n return array_ops.identity(x)\n else:\n logging_ops.print_v2(message, x, output_stream=sys.stdout, summarize=summarize)\n return x", "docstring": "Prints `message` and the tensor value when evaluated.\n\n Note that `print_tensor` returns a new tensor identical to `x`\n which should be used in the following code. Otherwise the\n print operation is not taken into account during evaluation.\n\nExample:\n >>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n >>> _ = tf.keras.backend.print_tensor(x)\n [[1 2]\n [3 4]]\n\nArgs:\n x: Tensor to print.\n message: Message to print jointly with the tensor.\n summarize: The first and last `summarize` elements within each dimension\n are recursively printed per Tensor. If None, then the first 3 and last\n 3 elements of each dimension are printed for each tensor. If set to\n -1, it will print all elements of every tensor.\n\nReturns:\n The same tensor `x`, unchanged.", "source": "github_repos"} -{"code": "def generate_file(filename: str, config: Config, nrows: int) -> None:\n fake = Faker(use_weighting=False)\n columns = [column.bq_name for column in config]\n with open(filename, 'w+') as f:\n csv = DictWriter(f, fieldnames=columns)\n csv.writeheader()\n buffer_: List[Row] = []\n buffer_size = nrows // 100\n buffer = Buffer[Row](buffer_, buffer_size, csv.writerows)\n for _ in range(nrows + 1):\n buffer.push(generate_row(fake, config))\n buffer.flush(force=True)", "docstring": "Generate a test data file conforming to the given config.\n\nArgs:\n * filename: Local path to csv file to be generated\n * config: Config key from configs.CONFIGS dictionary\n * nrows: number of rows to generate", "source": "github_repos"} -{"code": "def _extract_next_file(archive_file: io.BufferedIOBase) -> Iterator[Tuple[str, bytes]]:\n while True:\n header = archive_file.read(60)\n if not header:\n return\n elif len(header) < 60:\n raise RuntimeError('Invalid file header format.')\n name, _, _, _, _, size, end = struct.unpack('=16s12s6s6s8s10s2s', header)\n if end != b'`\\n':\n raise RuntimeError('Invalid file header format.')\n name = name.decode('ascii').strip()\n size = int(size, base=10)\n odd_size = size % 2 == 1\n if name.startswith('#1/'):\n filename_size = int(name[3:])\n name = archive_file.read(filename_size).decode('utf-8').strip(' \\x00')\n size -= filename_size\n file_content = archive_file.read(size)\n if odd_size:\n archive_file.read(1)\n yield (name, file_content)", "docstring": "Extracts the next available file from the archive.\n\n Reads the next available file header section and yields its filename and\n content in bytes as a tuple. Stops when there are no more available files in\n the provided archive_file.\n\nArgs:\n archive_file: The archive file object, of which cursor is pointing to the\n next available file header section.\n\nYields:\n The name and content of the next available file in the given archive file.\n\nRaises:\n RuntimeError: The archive_file is in an unknown format.", "source": "github_repos"} -{"code": "def build_estimator(tf_transform_output, config, hidden_units=None):\n transformed_feature_spec = tf_transform_output.transformed_feature_spec().copy()\n transformed_feature_spec.pop(taxi.transformed_name(taxi.LABEL_KEY))\n real_valued_columns = [tf.feature_column.numeric_column(key, shape=()) for key in taxi.transformed_names(taxi.DENSE_FLOAT_FEATURE_KEYS)]\n categorical_columns = [tf.feature_column.categorical_column_with_identity(key, num_buckets=taxi.VOCAB_SIZE + taxi.OOV_SIZE, default_value=0) for key in taxi.transformed_names(taxi.VOCAB_FEATURE_KEYS)]\n categorical_columns += [tf.feature_column.categorical_column_with_identity(key, num_buckets=taxi.FEATURE_BUCKET_COUNT, default_value=0) for key in taxi.transformed_names(taxi.BUCKET_FEATURE_KEYS)]\n categorical_columns += [tf.feature_column.categorical_column_with_identity(key, num_buckets=num_buckets, default_value=0) for key, num_buckets in zip(taxi.transformed_names(taxi.CATEGORICAL_FEATURE_KEYS), taxi.MAX_CATEGORICAL_FEATURE_VALUES)]\n return tf_estimator.DNNLinearCombinedClassifier(config=config, linear_feature_columns=categorical_columns, dnn_feature_columns=real_valued_columns, dnn_hidden_units=hidden_units or [100, 70, 50, 25])", "docstring": "Build an estimator for predicting the tipping behavior of taxi riders.\n\nArgs:\n tf_transform_output: A TFTransformOutput.\n config: tf.contrib.learn.RunConfig defining the runtime environment for the\n estimator (including model_dir).\n hidden_units: [int], the layer sizes of the DNN (input layer first)\n\nReturns:\n Resulting DNNLinearCombinedClassifier.", "source": "github_repos"} -{"code": "def __init__(self, default: Any=utils.MISSING_VALUE):\n self.default = default", "docstring": "Value placeholder for arguments whose value will be provided by presets.\n\nExample:\n def foo(x, y=pg.PresetArgValue(default=1))\n return x + y\n\n with pg.preset_args(y=2):\n print(foo(x=1)) # 3: y=2\n print(foo(x=1)) # 2: y=1", "source": "github_repos"} -{"code": "def from_question_encoder_generator_configs(cls, question_encoder_config: PretrainedConfig, generator_config: PretrainedConfig, **kwargs) -> PretrainedConfig:\n return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and\n decoder model configuration.\n\nReturns:\n [`EncoderDecoderConfig`]: An instance of a configuration object", "source": "github_repos"} -{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = ops.convert_to_tensor(y_true, dtype=self._dtype)\n y_pred = ops.convert_to_tensor(y_pred, dtype=self._dtype)\n y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)\n if not self._built:\n self._build(y_true.shape, y_pred.shape)\n if sample_weight is None:\n sample_weight = 1\n sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype)\n if len(sample_weight.shape) == 1:\n sample_weight = ops.expand_dims(sample_weight, axis=1)\n sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true))\n weighted_y_true = y_true * ops.cast(sample_weight, y_true.dtype)\n self.sum.assign(self.sum + ops.sum(weighted_y_true, axis=0))\n self.squared_sum.assign(self.squared_sum + ops.sum(y_true * weighted_y_true, axis=0))\n self.total_mse.assign(self.total_mse + ops.sum((y_true - y_pred) ** 2 * ops.cast(sample_weight, y_true.dtype), axis=0))\n self.count.assign(self.count + ops.sum(sample_weight, axis=0))\n self.num_samples.assign(self.num_samples + ops.size(y_true))", "docstring": "Accumulates root mean squared error statistics.\n\nArgs:\n y_true: The ground truth values.\n y_pred: The predicted values.\n sample_weight: Optional weighting of each example. Can\n be a `Tensor` whose rank is either 0, or the same rank as\n `y_true`, and must be broadcastable to `y_true`.\n Defaults to `1`.\n\nReturns:\n Update op.", "source": "github_repos"} -{"code": "def imag(input, name=None):\n with ops.name_scope(name, 'Imag', [input]) as name:\n input = ops.convert_to_tensor(input, name='input')\n if input.dtype.is_complex:\n return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)\n else:\n return array_ops.zeros_like(input)", "docstring": "Returns the imaginary part of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the imaginary part of each element in `input` considered as a complex\n number. If `input` is real, a tensor of all zeros is returned.\n\n For example:\n\n ```python\n x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\n tf.math.imag(x) # [4.75, 5.75]\n ```\n\nArgs:\n input: A `Tensor`. Must be one of the following types: `float`, `double`,\n `complex64`, `complex128`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` of type `float32` or `float64`.", "source": "github_repos"} -{"code": "def _convert(self, value, dtype):\n if isinstance(value, resource_variable_ops.ResourceVariable):\n raise RuntimeError(f'Attempting to return a variable from an eagerly executed py_func. Only numeric data structures like Tensors or NumPy arrays should be returned; to return the value of a variable, make sure to obtain the Tensor backing it by calling `.read_value()` on the variable in question: {value}')\n if value is None and self._is_grad_func:\n return constant_op.constant(0.0, dtype=dtype)\n return ops.convert_to_tensor(value, dtype=dtype)", "docstring": "Converts `value` to a tensor of type `dtype`, with error checking.\n\nArgs:\n value: The tensor to convert.\n dtype: The desired dtype.\n\nReturns:\n A tensor of type `dtype`, or a zeros tensor if value is None and\n this function is in fact a gradient function.\n\nRaises:\n RuntimeError: if `value` is a variable.", "source": "github_repos"} -{"code": "def unbatch(self, spec):\n raise NotImplementedError(f'{type(self).__name__}.unbatch')", "docstring": "Returns the TypeSpec for a single unbatched element in `spec`.\n\nArgs:\n spec: The `TypeSpec` for a batch of values.\n\nReturns:\n A `TypeSpec` for an individual value.", "source": "github_repos"} -{"code": "def forward(self, hidden_states):\n hidden_states = hidden_states.view(self.num_experts, -1, self.hidden_size)\n num_tokens = None\n next_states = torch.empty_like(hidden_states)\n for i in range(self.num_experts):\n expert_hidden = hidden_states[i]\n expert_hidden_reshaped = expert_hidden.reshape(-1, self.hidden_size)\n expert_quantized, expert_scale = torch.ops.fbgemm.quantize_fp8_per_row(expert_hidden_reshaped, num_tokens, self.input_scale_ub)\n sharded_expert_dim = self.gate_up_proj.shape[-1] // 2\n gate_up_proj_scale_float32 = self.gate_up_proj_scale.to(torch.float32)\n gate = torch.ops.fbgemm.f8f8bf16_rowwise(expert_quantized, self.gate_up_proj[i].transpose(0, 1)[:sharded_expert_dim].contiguous(), expert_scale, gate_up_proj_scale_float32[i][0][:sharded_expert_dim].view(-1, 1).contiguous(), use_fast_accum=True)\n up = torch.ops.fbgemm.f8f8bf16_rowwise(expert_quantized, self.gate_up_proj[i].transpose(0, 1)[sharded_expert_dim:].contiguous(), expert_scale, gate_up_proj_scale_float32[i][0][sharded_expert_dim:].view(-1, 1).contiguous(), use_fast_accum=True)\n activated = up * self.act_fn(gate)\n activated_quantized, activated_scale = torch.ops.fbgemm.quantize_fp8_per_row(activated, num_tokens, self.input_scale_ub)\n down_proj_scale_float32 = self.down_proj_scale.to(torch.float32)\n expert_output = torch.ops.fbgemm.f8f8bf16_rowwise(activated_quantized, self.down_proj[i].transpose(0, 1).contiguous(), activated_scale, down_proj_scale_float32[i].view(-1, 1).contiguous(), use_fast_accum=True)\n next_states[i] = expert_output\n next_states = next_states.to(hidden_states.device)\n return next_states.view(-1, self.hidden_size)", "docstring": "Args:\n hidden_states (torch.Tensor): (batch_size * token_num, hidden_size)\n\nReturns:\n torch.Tensor: (batch_size * token_num, hidden_size)", "source": "github_repos"} -{"code": "def add_input(self, mutable_accumulator, element, *args, **kwargs):\n raise NotImplementedError(str(self))", "docstring": "Return result of folding element into accumulator.\n\n CombineFn implementors must override add_input.\n\nArgs:\n mutable_accumulator: the current accumulator,\n may be modified and returned for efficiency\n element: the element to add, should not be mutated\n *args: Additional arguments and side inputs.\n **kwargs: Additional arguments and side inputs.", "source": "github_repos"} -{"code": "def is_compatible_with(self, other):\n other = as_dtype(other)\n return self._type_enum in (other.as_datatype_enum, other.base_dtype.as_datatype_enum)", "docstring": "Returns True if the `other` DType will be converted to this DType (TF1).\n\n Programs written for TensorFlow 2.x do not need this function.\n Instead, they can do equality comparison on `DType` objects directly:\n `tf.as_dtype(this) == tf.as_dtype(other)`.\n\n This function exists only for compatibility with TensorFlow 1.x, where it\n additionally allows conversion from a reference type (used by\n `tf.compat.v1.Variable`) to its base type.\n\nArgs:\n other: A `DType` (or object that may be converted to a `DType`).\n\nReturns:\n True if a Tensor of the `other` `DType` will be implicitly converted to\n this `DType`.", "source": "github_repos"} -{"code": "def call(self, pixel_values: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, labels: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False, **kwargs) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith('decoder_')}\n kwargs_decoder = {argument[len('decoder_'):]: value for argument, value in kwargs.items() if argument.startswith('decoder_')}\n if encoder_outputs is not None:\n if return_dict and (not isinstance(encoder_outputs, ModelOutput)):\n raise ValueError(f'If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of `ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`.')\n if encoder_outputs is None:\n encoder_inputs = {'input_ids': pixel_values, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict, 'training': training}\n encoder_inputs.update(kwargs_encoder)\n if 'input_ids' in encoder_inputs:\n encoder_inputs['pixel_values'] = encoder_inputs.pop('input_ids')\n if encoder_inputs['pixel_values'] is None:\n raise ValueError('You have to specify pixel_values')\n if 'labels' in encoder_inputs:\n labels = encoder_inputs.pop('labels')\n if 'decoder_input_ids' in encoder_inputs:\n decoder_input_ids = encoder_inputs.pop('decoder_input_ids')\n if 'decoder_attention_mask' in encoder_inputs:\n decoder_attention_mask = encoder_inputs.pop('decoder_attention_mask')\n encoder_outputs = self.encoder(**encoder_inputs)\n encoder_hidden_states = encoder_outputs[0]\n if self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None:\n encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)\n if labels is not None and (decoder_input_ids is None and decoder_inputs_embeds is None):\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n batch_size, sequence_length = shape_list(encoder_hidden_states)[:2]\n encoder_attention_mask = tf.ones(shape=(batch_size, sequence_length), dtype=tf.int32)\n decoder_inputs = {'input_ids': decoder_input_ids, 'attention_mask': decoder_attention_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, 'inputs_embeds': decoder_inputs_embeds, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'use_cache': use_cache, 'past_key_values': past_key_values, 'return_dict': return_dict, 'training': training}\n decoder_inputs.update(kwargs_decoder)\n decoder_outputs = self.decoder(**decoder_inputs)\n logits = decoder_outputs[0]\n loss = None\n if labels is not None:\n warnings.warn(DEPRECATION_WARNING, FutureWarning)\n loss = self.hf_compute_loss(labels, logits)\n if not return_dict:\n past_key_values = None\n if use_cache:\n past_key_values = decoder_outputs[1]\n start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)])\n if not isinstance(encoder_outputs, tuple):\n encoder_outputs = encoder_outputs.to_tuple()\n output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs\n output = tuple([x for x in output if x is not None])\n return output\n return TFSeq2SeqLMOutput(loss=loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)", "docstring": "Returns:\n\nExample:\n ```python\n >>> from transformers import AutoImageProcessor, AutoTokenizer, TFVisionEncoderDecoderModel\n >>> from PIL import Image\n >>> import requests\n\n >>> image_processor = AutoImageProcessor.from_pretrained(\"google/vit-base-patch16-224-in21k\")\n >>> decoder_tokenizer = AutoTokenizer.from_pretrained(\"openai-community/gpt2\")\n\n >>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized\n >>> model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(\n ... \"google/vit-base-patch16-224-in21k\", \"openai-community/gpt2\"\n ... )\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> img = Image.open(requests.get(url, stream=True).raw)\n\n >>> # forward\n >>> pixel_values = image_processor(images=img, return_tensors=\"tf\").pixel_values # Batch size 1\n >>> decoder_input_ids = decoder_tokenizer(\"Linda Davis\", return_tensors=\"tf\").input_ids # Batch size 1\n >>> outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids)\n\n >>> # training\n >>> outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids)\n >>> loss, logits = outputs.loss, outputs.logits\n\n >>> # save and load from pretrained\n >>> model.save_pretrained(\"vit-gpt2\")\n >>> model = TFVisionEncoderDecoderModel.from_pretrained(\"vit-gpt2\")\n\n >>> # generation\n >>> generated = model.generate(pixel_values, decoder_start_token_id=model.config.decoder.bos_token_id)\n ```", "source": "github_repos"} -{"code": "def reduce_sum(values, index, name='segmented_reduce_sum'):\n return _segment_reduce(values, index, tf.math.unsorted_segment_sum, name)", "docstring": "Sums a tensor over its segments. Outputs 0 for empty segments. This operations computes the sum over segments, with\n support for:\n\n - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.\n - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a sum of vectors\n rather than scalars.\n Only the middle dimensions [I1, ..., Ik] are reduced by the operation.\n\nArgs:\n values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be\n averaged.\n index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments.\n name: Name for the TensorFlow ops.\n\nReturns:\n A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments,\n V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments].", "source": "github_repos"} -{"code": "def __init__(self, sess_creator):\n self._sess_creator = sess_creator\n _WrappedSession.__init__(self, self._create_session())", "docstring": "Create a new `_RecoverableSession`.\n\n The value returned by calling `sess_creator.create_session()` will be the\n session wrapped by this recoverable session.\n\nArgs:\n sess_creator: A 'SessionCreator' to be wrapped by recoverable.", "source": "github_repos"} -{"code": "def regex_full_match(input, pattern, name=None):\n if isinstance(pattern, util_compat.bytes_or_text_types):\n return gen_string_ops.static_regex_full_match(input=input, pattern=pattern, name=name)\n return gen_string_ops.regex_full_match(input=input, pattern=pattern, name=name)", "docstring": "Match elements of `input` with regex `pattern`.\n\nArgs:\n input: string `Tensor`, the source strings to process.\n pattern: string or scalar string `Tensor`, regular expression to use,\n see more details at https://github.com/google/re2/wiki/Syntax\n name: Name of the op.\n\nReturns:\n bool `Tensor` of the same shape as `input` with match results.", "source": "github_repos"} -{"code": "def request_stop(self, ex=None):\n self._coord.request_stop(ex=ex)", "docstring": "Request that the coordinator stop the threads.\n\n See `Coordinator.request_stop()`.\n\nArgs:\n ex: Optional `Exception`, or Python `exc_info` tuple as returned by\n `sys.exc_info()`. If this is the first call to `request_stop()` the\n corresponding exception is recorded and re-raised from `join()`.", "source": "github_repos"} -{"code": "def _infer_type(str_val, na_value, prev_type):\n if str_val in ('', na_value):\n return prev_type\n type_list = [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string]\n type_functions = [_is_valid_int32, _is_valid_int64, lambda str_val: _is_valid_float(str_val, dtypes.float32), lambda str_val: _is_valid_float(str_val, dtypes.float64), lambda str_val: True]\n for i in range(len(type_list)):\n validation_fn = type_functions[i]\n if validation_fn(str_val) and (prev_type is None or prev_type in type_list[:i + 1]):\n return type_list[i]", "docstring": "Given a string, infers its tensor type.\n\n Infers the type of a value by picking the least 'permissive' type possible,\n while still allowing the previous type inference for this column to be valid.\n\nArgs:\n str_val: String value to infer the type of.\n na_value: Additional string to recognize as a NA/NaN CSV value.\n prev_type: Type previously inferred based on values of this column that\n we've seen up till now.\n\nReturns:\n Inferred dtype.", "source": "github_repos"} -{"code": "def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str='patches') -> None:\n super().__init__()\n if postproc_type not in ('patches',):\n raise ValueError('Invalid postproc_type!')\n self.classifier = nn.Linear(in_channels, config.samples_per_patch)", "docstring": "Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features.\n\nArgs:\n config ([*PerceiverConfig*]):\n Model configuration.\n in_channels (`int`):\n Number of channels in the input.\n postproc_type (`str`, *optional*, defaults to `\"patches\"`):\n Postprocessor type to use. Currently, only \"patches\" is supported.", "source": "github_repos"} -{"code": "def annotate_test_case(self, test_case: Iterator[str]) -> tuple[Iterator[str], Iterator[str]]:\n split_input = HloStreamSplitter(test_case, record_directives={DirectiveComment.RUN})\n optimizer_input, passthrough_input = itertools.tee(_fix_whitespace(split_input.non_directive_lines()))\n test_case_output = optimize_hlo(optimizer_input, self._optimizer_path, self._optimizer_args, expand_to_input=self._expand_to_input)\n test_case_checks = HloFileCheckLines(test_case_output)\n transformed_test_case = itertools.chain(test_case_checks, ['\\n'], passthrough_input)\n run_directives = split_input.directive_history(DirectiveComment.RUN)\n return (transformed_test_case, run_directives)", "docstring": "Transforms a test case, inserting FileCheck directives above it.\n\nArgs:\n test_case: An iterator over the lines of a single test case.\n\nReturns:\n A tuple of two iterators. The first iterator yields the annotated test\n case containing FileCheck directives followed by the original HLO IR. The\n second iterator yields any RUN directives that were stripped from the\n original test case.", "source": "github_repos"} -{"code": "def report_fhir_path_error(self, element_path: str, fhir_path_constraint: str, msg: str) -> None:", "docstring": "Reports a FHIRPath constraint error during validation and/or encoding.\n\n The base implementation logs to the `error` context and raises `e` by\n default. Subclasses should override this behavior as necessary.\n\nArgs:\n element_path: The path to the field that the constraint is defined on.\n fhir_path_constraint: The FHIRPath constraint expression.\n msg: The error message produced.", "source": "github_repos"} -{"code": "def create_proxy_api_files(output_files, proxy_module_root, output_dir):\n for file_path in output_files:\n module = get_module(os.path.dirname(file_path), output_dir)\n if not os.path.isdir(os.path.dirname(file_path)):\n os.makedirs(os.path.dirname(file_path))\n contents = f'from {proxy_module_root}.{module} import *'\n with open(file_path, 'w') as fp:\n fp.write(contents)", "docstring": "Creates __init__.py files in proxy format for the Python API.\n\nArgs:\n output_files: List of __init__.py file paths to create.\n proxy_module_root: Module root for proxy-import format. If specified, proxy\n files with content like `from proxy_module_root.proxy_module import *`\n will be created to enable import resolution under TensorFlow.\n output_dir: output API root directory.", "source": "github_repos"} -{"code": "def __init__(self, expr_token):\n self.expr_token = expr_token\n self.for_token = None\n self.has_split_at_for = False\n self.has_interior_split = False", "docstring": "Maintains the state of list comprehension formatting decisions.\n\n A stack of ComprehensionState objects are kept to ensure that list\n comprehensions are wrapped with well-defined rules.\n\nAttributes:\n expr_token: The first token in the comprehension.\n for_token: The first 'for' token of the comprehension.\n opening_bracket: The opening bracket of the list comprehension.\n closing_bracket: The closing bracket of the list comprehension.\n has_split_at_for: Whether there is a newline immediately before the\n for_token.\n has_interior_split: Whether there is a newline within the comprehension.\n That is, a split somewhere after expr_token or before closing_bracket.", "source": "github_repos"} -{"code": "def __init__(self, config: Llama4VisionConfig):\n super().__init__()\n self.config = config\n self.layers = nn.ModuleList([Llama4VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n self.config = config", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n [`Llama4VisionEncoderLayer`].\n\nArgs:\n config: Llama4VisionConfig", "source": "github_repos"} -{"code": "def __init__(self, serialized=None, mesh_shape=None, device_coordinates=None):\n self._serialized = serialized\n if serialized:\n self._parse_topology(serialized)\n else:\n self._mesh_shape = numpy_compat.np_asarray(mesh_shape, dtype=np.int32)\n self._device_coordinates = numpy_compat.np_asarray(device_coordinates, dtype=np.int32)\n if len(self._mesh_shape) != 4 or any(self._mesh_shape < 1):\n raise ValueError(f'`mesh_shape` must be a sequence of 4 positive entries; got `mesh_shape={self._mesh_shape}`')\n if len(self._device_coordinates.shape) != 3 or self._device_coordinates.shape[2] != len(self._mesh_shape):\n raise ValueError('`device_coordinates` must be a rank 3 int32 array with minor dimension equal to the `mesh_shape` rankgot device_coordinates.shape={} len(device_coordinates.shape)={} device_coordinates.shape[2]={} mesh_shape={}, len(mesh_shape)={}'.format(self._device_coordinates.shape, len(self._device_coordinates.shape), self._device_coordinates.shape[2], self._mesh_shape, len(self._mesh_shape)))\n self._topology_tasks, self._topology_devices = self._invert_topology()\n self._missing_devices = np.argwhere(self._topology_tasks < 0)", "docstring": "Builds a Topology object.\n\n If `serialized` is not `None`, the topology is parsed from `serialized` and\n the other arguments are ignored. Otherwise, the topology is computed from\n `mesh_shape` and `device_coordinates`.\n\nArgs:\n serialized: A serialized `TopologyProto`, or `None`. If not `None`, the\n serialized proto is parsed to discover the topology.\n mesh_shape: A sequence of 4 positive integers, or `None`. If not `None`,\n the shape of the TPU topology, in number of cores. Ignored if\n `serialized` is not `None`.\n device_coordinates: A rank 3 numpy array that describes the mapping from\n TensorFlow TPU devices to TPU fabric coordinates, or `None`. If\n specified, array is a rank 3 int32 array with shape\n `[tasks, devices, axis]`. `tasks` is the number of tasks in the TPU\n cluster, `devices` is the number of TPU devices per task, and `axis` is\n the number of axes in the TPU cluster topology. Each entry gives the\n `axis`-th coordinate in the topology of a task/device pair. TPU\n topologies are 4-dimensional, with dimensions `(x, y, z, core number)`.\n This arg is ignored if `serialized is not `None`.\n\nRaises:\n ValueError: If `serialized` does not describe a well-formed topology.\n ValueError: If `serialized` is `None` and `mesh_shape` is not a sequence\n of 4 positive integers.\n ValueError: If `serialized` is `None` and `device_coordinates` is not a\n rank 3 numpy int32 array that describes a valid coordinate mapping.", "source": "github_repos"} -{"code": "def make_one_shot_iterator(self) -> Union[iterator_ops.Iterator, iterator_ops.OwnedIterator]:\n return self._make_one_shot_iterator()", "docstring": "Creates an iterator for elements of this dataset.\n\n Note: The returned iterator will be initialized automatically.\n A \"one-shot\" iterator does not currently support re-initialization. For\n that see `make_initializable_iterator`.\n\nExample:\n ```python\n # Building graph ...\n dataset = ...\n next_value = dataset.make_one_shot_iterator().get_next()\n\n # ... from within a session ...\n try:\n while True:\n value = sess.run(next_value)\n ...\n except tf.errors.OutOfRangeError:\n pass\n ```\n\nReturns:\n An `tf.data.Iterator` for elements of this dataset.", "source": "github_repos"} -{"code": "def retrieve_collected_errors():\n serialized_message_list = wrap_converter.wrapped_retrieve_collected_errors()\n return list(map(converter_error_data_pb2.ConverterErrorData.FromString, serialized_message_list))", "docstring": "Returns and clears the list of collected errors in ErrorCollector.\n\n The RetrieveCollectedErrors function in C++ returns a list of serialized proto\n messages. This function will convert them to ConverterErrorData instances.\n\nReturns:\n A list of ConverterErrorData.", "source": "github_repos"} -{"code": "def send_eager_tracebacks(destinations, origin_stack, send_source=True):\n _send_call_tracebacks(destinations, origin_stack, is_eager_execution=True, send_source=send_source)", "docstring": "Send the tracebacks of an eager execution call to debug server(s).\n\nArgs:\n destinations: gRPC destination addresses, a `str` or a `list` of `str`s,\n e.g., \"localhost:4242\". If a `list`, gRPC requests containing the same\n origin_stack: The traceback of the eager operation invocation.\n send_source: Whether the source files involved in the op tracebacks but\n outside the TensorFlow library are to be sent.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_base_image_embeds: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, OwlViTOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)\n text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n text_embeds = text_outputs[1]\n text_embeds = self.text_projection(text_embeds)\n image_embeds = vision_outputs[1]\n image_embeds = self.visual_projection(image_embeds)\n image_embeds = image_embeds / torch.linalg.norm(image_embeds, ord=2, dim=-1, keepdim=True)\n text_embeds_norm = text_embeds / torch.linalg.norm(text_embeds, ord=2, dim=-1, keepdim=True)\n logit_scale = self.logit_scale.exp().to(image_embeds.device)\n logits_per_text = torch.matmul(text_embeds_norm, image_embeds.t()) * logit_scale\n logits_per_image = logits_per_text.t()\n loss = None\n if return_loss:\n loss = owlvit_loss(logits_per_text)\n text_embeds = text_embeds_norm\n if not return_dict:\n output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)\n return (loss,) + output if loss is not None else output\n return OwlViTOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)", "docstring": "return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n return_base_image_embeds (`bool`, *optional*):\n Whether or not to return the base image embeddings.\n\nExample:\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoProcessor, OwlViTModel\n\n >>> model = OwlViTModel.from_pretrained(\"google/owlvit-base-patch32\")\n >>> processor = AutoProcessor.from_pretrained(\"google/owlvit-base-patch32\")\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n >>> inputs = processor(text=[[\"a photo of a cat\", \"a photo of a dog\"]], images=image, return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score\n >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities\n ```", "source": "github_repos"} -{"code": "def zeros(shape, dtype=None, name=None):\n with ops.init_scope():\n if dtype is None:\n dtype = floatx()\n tf_dtype = dtypes_module.as_dtype(dtype)\n v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)\n if py_all(v.shape.as_list()):\n return variable(v, dtype=dtype, name=name)\n return v", "docstring": "Instantiates an all-zeros variable and returns it.\n\nArgs:\n shape: Tuple or list of integers, shape of returned Keras variable\n dtype: data type of returned Keras variable\n name: name of returned Keras variable\n\nReturns:\n A variable (including Keras metadata), filled with `0.0`.\n Note that if `shape` was symbolic, we cannot return a variable,\n and will return a dynamically-shaped tensor instead.\n\nExample:\n >>> kvar = tf.keras.backend.zeros((3,4))\n >>> tf.keras.backend.eval(kvar)\n array([[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]], dtype=float32)\n >>> A = tf.constant([1,2,3])\n >>> kvar2 = tf.keras.backend.zeros(A.shape) # [0., 0., 0.]\n >>> tf.keras.backend.eval(kvar2)\n array([0., 0., 0.], dtype=float32)\n >>> kvar3 = tf.keras.backend.zeros(A.shape,dtype=tf.int32)\n >>> tf.keras.backend.eval(kvar3)\n array([0, 0, 0], dtype=int32)\n >>> kvar4 = tf.keras.backend.zeros([2,3])\n >>> tf.keras.backend.eval(kvar4)\n array([[0., 0., 0.],\n [0., 0., 0.]], dtype=float32)", "source": "github_repos"} -{"code": "def _step(time, output_ta_t, prev_output, *states):\n current_input = tuple((ta[time] for ta in input_ta))\n current_input = tree.pack_sequence_as(inputs, current_input)\n mask_t = masking_fn(time)\n output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n flat_output = tree.flatten(output)\n flat_mask_output = flat_zero_output if zero_output_for_mask else tree.flatten(prev_output)\n flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output)\n flat_state = tree.flatten(states)\n flat_new_state = tree.flatten(new_states)\n flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state)\n new_states = tree.pack_sequence_as(new_states, flat_final_state)\n ta_index_to_write = time if return_all_outputs else 0\n for ta, out in zip(output_ta_t, flat_new_output):\n ta[ta_index_to_write] = out\n return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)", "docstring": "RNN step function.\n\nArgs:\n time: Current timestep value.\n output_ta_t: TensorArray.\n prev_output: tuple of outputs from time - 1.\n *states: List of states.\n\nReturns:\n Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`", "source": "github_repos"} -{"code": "def _get_dir_size(self, path: str='.'):\n total = 0\n for root, _, files in os.walk(path):\n for filename in files:\n total += os.path.getsize(os.path.join(root, filename))\n return total", "docstring": "Get the total size of files and sub-directories under the path.\n\nArgs:\n path: Path of a directory or a file to calculate the total size.\n\nReturns:\n Total size of the directory or a file.", "source": "github_repos"} -{"code": "def AddCalledComponent(self, component, target, args, filename, lineno, capacity, action=CALLED_CALLABLE):\n element = FireTraceElement(component=component, action=action, target=target, args=args, filename=filename, lineno=lineno, capacity=capacity)\n self.elements.append(element)", "docstring": "Adds an element to the trace indicating that a component was called.\n\n Also applies to instantiating a class.\n\nArgs:\n component: The result of calling the callable.\n target: The name of the callable.\n args: The args consumed in order to call this callable.\n filename: The file in which the callable is defined, or None if N/A.\n lineno: The line number on which the callable is defined, or None if N/A.\n capacity: (bool) Whether the callable could have accepted additional args.\n action: The value to include as the action in the FireTraceElement.", "source": "github_repos"} -{"code": "def _google_section(line_info):\n colon_index = line_info.remaining.find(':')\n possible_title = line_info.remaining[:colon_index]\n return _section_from_possible_title(possible_title)", "docstring": "Checks whether the current line is the start of a new Google-style section.\n\n This docstring is a Google-style docstring. Google-style sections look like\n this:\n\n Section Name:\n section body goes here\n\nArgs:\n line_info: Information about the current line.\n\nReturns:\n A Section type if one matches, or None if no section type matches.", "source": "github_repos"} -{"code": "def _stop_profiler(self, save=True):\n if not self._profiler_started:\n return\n try:\n backend.tensorboard.stop_trace(save=save)\n except Exception as e:\n logging.error('Failed to stop profiler: %s', e)\n finally:\n self._profiler_started = False", "docstring": "Stops the profiler if currently active.\n\nArgs:\n save: Whether to save the profiler results to TensorBoard.", "source": "github_repos"} -{"code": "def hertz_to_mel(frequencies_hertz):\n return _MEL_HIGH_FREQUENCY_Q * np.log(1.0 + frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)", "docstring": "Convert frequencies to mel scale using HTK formula.\n\n Copied from\n https://github.com/tensorflow/models/blob/master/research/audioset/mel_features.py.\n\nArgs:\n frequencies_hertz: Scalar or np.array of frequencies in hertz.\n\nReturns:\n Object of same size as frequencies_hertz containing corresponding values\n on the mel scale.", "source": "github_repos"} -{"code": "def __init__(self, x, y=None, **kwargs):\n if not self.can_handle(x, y):\n raise ValueError('{} Cannot handle input {}, {}'.format(self.__class__, x, y))", "docstring": "Create a DataAdapter based on data inputs.\n\n The caller must make sure to call `can_handle()` first before invoking this\n method. Provide unsupported data type will result into unexpected behavior.\n\nArgs:\n x: input features.\n y: target labels. Note that y could be None in the case of prediction.\n **kwargs: Other keyword arguments for DataAdapter during the construction\n of the tf.dataset.Dataset. For example:\n - Numpy data might have `sample_weights` which will be used for\n weighting the loss function during training.\n - Numpy data might need to have `batch_size` parameter when constructing\n the dataset and iterator.\n - Certain input might need to be distribution strategy aware. When\n `distribution_strategy` is passed, the created dataset need to respect\n the strategy.\n DataAdapter might choose to ignore any keyword argument if it doesn't\n use it, or raise exception if any required argument is not provide.", "source": "github_repos"} -{"code": "def __init__(self, session_creator, hooks, should_recover, stop_grace_period_secs=120):\n self._graph_was_finalized = ops.get_default_graph().finalized\n self._hooks = hooks or []\n for h in self._hooks:\n h.begin()\n worker_context = distribute_coordinator_context.get_current_worker_context()\n if not session_creator and worker_context:\n session_creator = worker_context.session_creator()\n self._coordinated_creator = self._CoordinatedSessionCreator(session_creator=session_creator or ChiefSessionCreator(), hooks=self._hooks, stop_grace_period_secs=stop_grace_period_secs)\n if should_recover:\n self._sess = _RecoverableSession(self._coordinated_creator)\n else:\n self._sess = self._coordinated_creator.create_session()", "docstring": "Sets up a Monitored or Hooked Session.\n\nArgs:\n session_creator: A factory object to create session. Typically a\n `ChiefSessionCreator` or a `WorkerSessionCreator`.\n hooks: An iterable of `SessionRunHook' objects.\n should_recover: A bool. Indicates whether to recover from `AbortedError`\n and `UnavailableError` or not.\n stop_grace_period_secs: Number of seconds given to threads to stop after\n `close()` has been called.", "source": "github_repos"} -{"code": "def _init_values_from_proto(self, values_def, import_scope=None):\n assert isinstance(values_def, control_flow_pb2.ValuesDef)\n self._values = set((ops.prepend_name_scope(value, import_scope) for value in values_def.values))\n g = ops.get_default_graph()\n self._external_values = {}\n for k, v in values_def.external_values.items():\n k = ops.prepend_name_scope(k, import_scope)\n self._external_values[k] = g.as_graph_element(ops.prepend_name_scope(v, import_scope))\n op_names = set([op.split(':')[0] for op in self._values - set(self._external_values.keys())])\n for op in op_names:\n g.as_graph_element(op)._set_control_flow_context(self)", "docstring": "Initializes values and external_values from `ValuesDef` protocol buffer.\n\nArgs:\n values_def: `ValuesDef` protocol buffer.\n import_scope: Optional `string`. Name scope to add.", "source": "github_repos"} -{"code": "def assert_sources_equal_reference_source(reference_source_info, sources_info):\n if not (isinstance(reference_source_info, tuple) and len(reference_source_info) == 3 and isinstance(reference_source_info[0], iobase.BoundedSource)):\n raise ValueError('reference_source_info must a three-tuple where firstitem of the tuple gives a iobase.BoundedSource. Received: %r' % reference_source_info)\n reference_records = read_from_source(*reference_source_info)\n source_records = []\n for source_info in sources_info:\n assert isinstance(source_info, tuple)\n assert len(source_info) == 3\n if not (isinstance(source_info, tuple) and len(source_info) == 3 and isinstance(source_info[0], iobase.BoundedSource)):\n raise ValueError('source_info must a three tuple where firstitem of the tuple gives a iobase.BoundedSource. Received: %r' % source_info)\n if type(reference_source_info[0].default_output_coder()) != type(source_info[0].default_output_coder()):\n raise ValueError('Reference source %r and the source %r must use the same coder. They are using %r and %r respectively instead.' % (reference_source_info[0], source_info[0], type(reference_source_info[0].default_output_coder()), type(source_info[0].default_output_coder())))\n source_records.extend(read_from_source(*source_info))\n if len(reference_records) != len(source_records):\n raise ValueError('Reference source must produce the same number of records as the list of sources. Number of records were %d and %d instead.' % (len(reference_records), len(source_records)))\n if equal_to(reference_records)(source_records):\n raise ValueError('Reference source and provided list of sources must produce the same set of records.')", "docstring": "Tests if a reference source is equal to a given set of sources.\n\n Given a reference source (a :class:`~apache_beam.io.iobase.BoundedSource`\n and a position range) and a list of sources, assert that the union of the\n records read from the list of sources is equal to the records read from the\n reference source.\n\nArgs:\n reference_source_info (Tuple[~apache_beam.io.iobase.BoundedSource, int, int]):\n a three-tuple that gives the reference\n :class:`~apache_beam.io.iobase.BoundedSource`, position to start\n reading at, and position to stop reading at.\n sources_info (Iterable[Tuple[~apache_beam.io.iobase.BoundedSource, int, int]]):\n a set of sources. Each source is a three-tuple that is of the same\n format described above.\n\nRaises:\n ValueError: if the set of data produced by the reference source\n and the given set of sources are not equivalent.", "source": "github_repos"} -{"code": "def __init__(self, input_energy: energy.BitstringEnergy, num_expectation_samples: int, initial_seed: Union[None, tf.Tensor]=None, name: Union[None, str]=None):\n super().__init__(input_energy, initial_seed, name)\n self.num_expectation_samples = num_expectation_samples", "docstring": "Initializes an EnergyInference.\n\nArgs:\n input_energy: The parameterized energy function which defines this\n distribution via the equations of an energy based model. This class\n assumes that all parameters of `energy` are `tf.Variable`s and that\n they are all returned by `energy.variables`.\n num_expectation_samples: Number of samples to draw and use for estimating\n the expectation value.\n initial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This\n seed will be used in the `sample` method. If None, the seed is updated\n after every inference call. Otherwise, the seed is fixed.\n name: Optional name for the model.", "source": "github_repos"} -{"code": "def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n if token_ids_1 is not None:\n raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')\n return [1 if token in self.all_special_ids else 0 for token in token_ids_0]\n mask = [1] + [0] * len(token_ids_0) + [1]\n if token_ids_1 is not None:\n mask += [0] * len(token_ids_1) + [1]\n return mask", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of ids of the first sequence.\n token_ids_1 (`List[int]`, *optional*):\n List of ids of the second sequence.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github_repos"} -{"code": "def make_batched_images(images) -> List[List[ImageInput]]:\n if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):\n return [img for img_list in images for img in img_list]\n elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):\n return images\n elif is_valid_image(images):\n return [images]\n raise ValueError(f'Could not make batched images from {images}')", "docstring": "Accepts images in list or nested list format, and makes a list of images for preprocessing.\n\nArgs:\n images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):\n The input image.\n\nReturns:\n list: A list of images.", "source": "github_repos"} -{"code": "def load_raw_type(self, typ: type[Any]) -> abstract.BaseValue:\n if typ is type(None):\n return self.consts[None]\n pytd_node = self._pytd_loader.lookup_pytd(typ.__module__, typ.__name__)\n return self._load_pytd_node(pytd_node)", "docstring": "Converts a raw type to an abstract value.\n\n For convenience, this method can also be called via ctx.types[typ].\n\nArgs:\n typ: The type.\n\nReturns:\n The abstract representation of the type. For example, when passed `int`,\n this function returns `abstract.SimpleClass(int)`.", "source": "github_repos"} -{"code": "def recursive_create_dir_v2(path):\n _pywrap_file_io.RecursivelyCreateDir(compat.path_to_bytes(path))", "docstring": "Creates a directory and all parent/intermediate directories.\n\n It succeeds if path already exists and is writable.\n\nArgs:\n path: string, name of the directory to be created\n\nRaises:\n errors.OpError: If the operation fails.", "source": "github_repos"} -{"code": "def conv(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n scale = [1.0] * self.out_channel_size\n offset = [0.5] * self.out_channel_size\n mean, variance = (scale, offset)\n out = nn_ops.conv2d(input_tensor, self.filters, strides=strides, dilations=dilations, padding=padding, data_format='NHWC')\n if has_bias:\n out = nn_ops.bias_add(out, self.bias, data_format='NHWC')\n if has_batch_norm:\n out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(out, scale, offset, mean, variance, is_training=False)\n if activation_fn is not None:\n out = activation_fn(out)\n return {'output': out}", "docstring": "Performs a 2D convolution operation.\n\nArgs:\n input_tensor: Input tensor to perform convolution on.\n\nReturns:\n A map of: output key -> output result.", "source": "github_repos"} -{"code": "def _make_class_weight_map_fn(class_weight):\n class_ids = list(sorted(class_weight.keys()))\n expected_class_ids = list(range(len(class_ids)))\n if class_ids != expected_class_ids:\n error_msg = 'Expected `class_weight` to be a dict with keys from 0 to one less than the number of classes, found {}'.format(class_weight)\n raise ValueError(error_msg)\n class_weight_tensor = tensor_conversion.convert_to_tensor_v2_with_dispatch([class_weight[int(c)] for c in class_ids])\n\n def _class_weights_map_fn(*data):\n \"\"\"Convert `class_weight` to `sample_weight`.\"\"\"\n x, y, sw = unpack_x_y_sample_weight(data)\n if nest.is_nested(y):\n raise ValueError('`class_weight` is only supported for Models with a single output.')\n if y.shape.rank > 2:\n raise ValueError('`class_weight` not supported for 3+ dimensional targets.')\n y_classes = smart_cond.smart_cond(y.shape.rank == 2 and backend.shape(y)[1] > 1, lambda: backend.argmax(y, axis=1), lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))\n cw = array_ops.gather_v2(class_weight_tensor, y_classes)\n if sw is not None:\n cw = math_ops.cast(cw, sw.dtype)\n sw, cw = expand_1d((sw, cw))\n sw = sw * cw\n else:\n sw = cw\n return (x, y, sw)\n return _class_weights_map_fn", "docstring": "Applies class weighting to a `Dataset`.\n\n The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where\n `y` must be a single `Tensor`.\n\nArgs:\n class_weight: A map where the keys are integer class ids and values are\n the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`\n\nReturns:\n A function that can be used with `tf.data.Dataset.map` to apply class\n weighting.", "source": "github_repos"} -{"code": "def rgb_to_yuv(images):\n images = ops.convert_to_tensor(images, name='images')\n kernel = ops.convert_to_tensor(_rgb_to_yuv_kernel, dtype=images.dtype, name='kernel')\n ndims = images.get_shape().ndims\n return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])", "docstring": "Converts one or more images from RGB to YUV.\n\n Outputs a tensor of the same shape as the `images` tensor, containing the YUV\n value of the pixels.\n The output is only well defined if the value in images are in [0, 1].\n There are two ways of representing an image: [0, 255] pixel values range or\n [0, 1] (as float) pixel values range. Users need to convert the input image\n into a float [0, 1] range.\n\nArgs:\n images: 2-D or higher rank. Image data to convert. Last dimension must be\n size 3.\n\nReturns:\n images: tensor with the same shape as `images`.", "source": "github_repos"} -{"code": "def _ImageDimensions(image, rank):\n if image.get_shape().is_fully_defined():\n return image.get_shape().as_list()\n else:\n static_shape = image.get_shape().with_rank(rank).as_list()\n dynamic_shape = array_ops_stack.unstack(array_ops.shape(image), rank)\n return [s if s is not None else d for s, d in zip(static_shape, dynamic_shape)]", "docstring": "Returns the dimensions of an image tensor.\n\nArgs:\n image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.\n rank: The expected rank of the image\n\nReturns:\n A list of corresponding to the dimensions of the\n input image. Dimensions that are statically known are python integers,\n otherwise, they are integer scalar tensors.", "source": "github_repos"} -{"code": "def get_help(self, cmd_prefix=None):\n if not cmd_prefix:\n help_info = RichTextLines([])\n if self._help_intro:\n help_info.extend(self._help_intro)\n sorted_prefixes = sorted(self._handlers)\n for cmd_prefix in sorted_prefixes:\n lines = self._get_help_for_command_prefix(cmd_prefix)\n lines.append('')\n lines.append('')\n help_info.extend(RichTextLines(lines))\n return help_info\n else:\n return RichTextLines(self._get_help_for_command_prefix(cmd_prefix))", "docstring": "Compile help information into a RichTextLines object.\n\nArgs:\n cmd_prefix: Optional command prefix. As the prefix itself or one of its\n aliases.\n\nReturns:\n A RichTextLines object containing the help information. If cmd_prefix\n is None, the return value will be the full command-line help. Otherwise,\n it will be the help information for the specified command.", "source": "github_repos"} -{"code": "def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> torch.FloatTensor:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n vision_outputs: BaseModelOutputWithPooling = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding)\n pooled_output = vision_outputs.pooler_output\n return pooled_output", "docstring": "Returns:\n image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by\n applying the projection layer to the pooled output of [`SiglipVisionModel`].\n\nExample:\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoProcessor, AutoModel\n >>> import torch\n\n >>> model = AutoModel.from_pretrained(\"google/siglip-base-patch16-224\")\n >>> processor = AutoProcessor.from_pretrained(\"google/siglip-base-patch16-224\")\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = processor(images=image, return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... image_features = model.get_image_features(**inputs)\n ```", "source": "github_repos"} -{"code": "def forward(self, embedding: torch.Tensor) -> torch.Tensor:\n embedding = self.linear(self.dropout(embedding))\n if self.use_cls_token:\n embedding = embedding[:, :, 1:, :]\n return embedding", "docstring": "Parameters:\n embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or\n `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):\n Embedding from the model\n\nReturns:\n `torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or\n `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True", "source": "github_repos"} -{"code": "def get_extended_attention_mask(self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor]):\n attention_mask = word_attention_mask\n if entity_attention_mask is not None:\n attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1)\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(f'Wrong shape for attention_mask (shape {attention_mask.shape})')\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)\n extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min\n return extended_attention_mask", "docstring": "Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\nArgs:\n word_attention_mask (`torch.LongTensor`):\n Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore.\n entity_attention_mask (`torch.LongTensor`, *optional*):\n Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore.\n\nReturns:\n `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.", "source": "github_repos"} -{"code": "def _retrieve_variables_impl(config: Text, hosts: List[Tuple[int, Text]], variables: Dict[Text, Dict[Text, tf_variables.Variable]], table_config: tpu_embedding_v2_utils.TableConfig):\n for host_id, host in enumerate(hosts):\n with ops.device(host):\n for table in table_config:\n retrieved = table.optimizer._retrieve()(table_name=table.name, num_shards=len(hosts), shard_id=host_id, config=config)\n if not isinstance(retrieved, tuple):\n retrieved = (retrieved,)\n for i, slot in enumerate(['parameters'] + table.optimizer._slot_names()):\n sharded_var = variables[table.name][slot]\n if host_id < len(sharded_var.variables):\n sharded_var.variables[host_id].assign(retrieved[i])\n config = None", "docstring": "Retrieve embedding tables from TPU to host memory.\n\nArgs:\n config: A serialized TPUEmbeddingConfiguration proto.\n hosts: A list of all the host CPU devices.\n variables: A dictionary of dictionaries of TPUEmbeddingVariables. First key\n is the table name, second key is 'parameters' or the optimizer slot name.\n table_config: A list of tf.tpu.experimental.embedding.TableConfig objects.", "source": "github_repos"} -{"code": "def __init__(self, name, func=None):\n NamedObject.__init__(self, name)\n self.func = func\n self.args = []", "docstring": "External function call.\n\nAttributes:\n func: The callable function this wraps and runs.\n args: List of arguments for given function with |name|.", "source": "github_repos"} -{"code": "def path_of_module(self, mod: nn.Module) -> str:\n try:\n return super().path_of_module(mod)\n except NameError as e:\n if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and (len(list(mod.buffers())) == 0):\n path = self._insert_module_as_submodule(mod)\n return path\n raise e", "docstring": "Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has\n a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the\n string \"foo.bar\".\n\nArgs:\n mod (str): The `Module` to retrieve the qualified name for.", "source": "github_repos"} -{"code": "def interpolate_graph(message, graph):\n parsed_messaged, _, node_tags = parse_message(message)\n error_message = ['Graph execution error:', '']\n for tag in node_tags:\n try:\n op = graph.get_operation_by_name(tag.name)\n except KeyError:\n continue\n else:\n error_message.append(_build_node_error_message(op))\n error_message.append(parsed_messaged.strip())\n return '\\n'.join(error_message)", "docstring": "Interpolates an error message.\n\n The error message can contain tags of form `{{node_type node_name}}`\n which will be parsed to identify the tf.Graph and op. If the op contains\n traceback, the traceback will be attached to the error message.\n\nArgs:\n message: A string to interpolate.\n graph: ops.Graph object containing all nodes referenced in the error\n message.\n\nReturns:\n The error message string with node definition traceback.", "source": "github_repos"} -{"code": "def get_input_shape_at(self, node_index):\n return self._get_node_attribute_at_index(node_index, 'input_shapes', 'input shape')", "docstring": "Retrieves the input shape(s) of a layer at a given node.\n\nArgs:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\nReturns:\n A shape tuple\n (or list of shape tuples if the layer has multiple inputs).\n\nRaises:\n RuntimeError: If called in Eager mode.", "source": "github_repos"} -{"code": "def wait_on_failure(self, on_failure_fn=None, on_transient_failure_fn=None, on_recovery_fn=None, worker_device_name='(unknown)'):\n assert self._should_preemption_thread_run\n try:\n yield\n except (errors.OpError, ClosureInputError, ClosureAbortedError) as e:\n with self._next_task_state_cond:\n self._next_task_state_cond.wait(_POLL_FREQ_IN_SEC * 1.25)\n with self._next_task_state_cond:\n self._next_task_state_cond.wait(_POLL_FREQ_IN_SEC * 1.25)\n if not self._task_states:\n self._log_ps_failure_and_raise(e, 0)\n worker_states = self._task_states[:self._num_workers]\n ps_states = self._task_states[self._num_workers:]\n if any(ps_states):\n failed_ps_index = [ix for ix, ps_state in enumerate(ps_states) if ps_state]\n self._log_ps_failure_and_raise(e, failed_ps_index[0])\n worker_ix = int(worker_device_name.split(':')[-1])\n if worker_states[worker_ix]:\n if self._cluster.closure_queue._cancellation_mgr.is_cancelled:\n if isinstance(e, errors.CancelledError):\n raise e\n else:\n raise errors.CancelledError(None, None, 'The corresponding function was cancelled while attempting to recover from worker failure.')\n self._handle_failure_and_recovery(e, on_failure_fn, on_transient_failure_fn, on_recovery_fn, worker_device_name)\n return\n if self._cluster._record_and_ignore_transient_timeouts(e):\n logging.error('Remote function on worker %s failed with %r:%s\\nThis derived error is ignored and not reported to users.', worker_device_name, e, e)\n if on_transient_failure_fn:\n on_transient_failure_fn()\n return\n raise e", "docstring": "Catches errors during closure execution and handles them.\n\nArgs:\n on_failure_fn: an optional function to run if preemption happens.\n on_transient_failure_fn: an optional function to run if transient failure\n happens.\n on_recovery_fn: an optional function to run when a worker is recovered\n from preemption.\n worker_device_name: the device name of the worker instance that is passing\n through the failure.\n\nYields:\n None.", "source": "github_repos"} -{"code": "def filter_def_file(def_file: str, filter_file: str, filtered_file: str) -> None:\n with open(filter_file, 'r', encoding='utf-8') as filter_file_handle:\n filter_json: Dict[str, Any] = json.load(filter_file_handle)\n inclusion_patterns: List[str] = filter_json['global'] + ['EXPORTS', '*;*']\n incl_patterns: List[Pattern[str]] = [re.compile(re.escape(p).replace('\\\\*', '.*')) for p in inclusion_patterns]\n exclusion_patterns: List[str] = filter_json['local']\n excl_patterns: List[Pattern[str]] = [re.compile(re.escape(p).replace('\\\\*', '.*')) for p in exclusion_patterns]\n with open(def_file, 'r') as orig_file, open(filtered_file, 'w') as filt_file:\n for l in orig_file:\n if not matches_any(excl_patterns, l) or matches_any(incl_patterns, l):\n filt_file.write(l)", "docstring": "Filters a windows .def file based on a filter .json.\n\nArgs:\n def_file: The path to the input windows .def file.\n filter_file: The path to the filter file (JSON format).\n filtered_file: The path to the output filtered windows .def file.", "source": "github_repos"} -{"code": "def convert_tensor_tf_type_to_tflite_type(tf_type: dtypes.DType, usage: str='') -> _types_pb2.IODataType:\n mapping = {dtypes.float16: _types_pb2.FLOAT16, dtypes.float32: _types_pb2.FLOAT, dtypes.float64: _types_pb2.FLOAT64, dtypes.int8: _types_pb2.INT8, dtypes.int16: _types_pb2.INT16, dtypes.uint16: _types_pb2.UINT16, dtypes.int32: _types_pb2.INT32, dtypes.int64: _types_pb2.INT64, dtypes.uint8: _types_pb2.UINT8, dtypes.uint32: _types_pb2.UINT32, dtypes.uint64: _types_pb2.UINT64, dtypes.string: _types_pb2.STRING, dtypes.bool: _types_pb2.BOOL, dtypes.complex64: _types_pb2.COMPLEX64, dtypes.complex128: _types_pb2.COMPLEX128}\n tflite_type = mapping.get(tf_type)\n if tflite_type is None:\n raise ValueError('Unsupported TensorFlow type `{0}` provided for the {1}'.format(tf_type, usage))\n return tflite_type", "docstring": "Convert tensor type from tf type to tflite type.\n\nArgs:\n tf_type: TensorFlow type.\n usage: Text describing the reason for invoking this function.\n\nRaises:\n ValueError: If `tf_type` is unsupported.\n\nReturns:\n tflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto.", "source": "github_repos"} -{"code": "def load(path, compile=True, options=None):\n metadata = saved_metadata_pb2.SavedMetadata()\n meta_graph_def = loader_impl.parse_saved_model(path).meta_graphs[0]\n object_graph_def = meta_graph_def.object_graph_def\n path_to_metadata_pb = os.path.join(path, constants.SAVED_METADATA_PATH)\n if gfile.Exists(path_to_metadata_pb):\n try:\n with gfile.GFile(path_to_metadata_pb, 'rb') as f:\n file_content = f.read()\n metadata.ParseFromString(file_content)\n except message.DecodeError as e:\n raise IOError('Cannot parse keras metadata {}: {}.'.format(path_to_metadata_pb, str(e)))\n else:\n logging.warning('SavedModel saved prior to TF 2.5 detected when loading Keras model. Please ensure that you are saving the model with model.save() or tf.keras.models.save_model(), *NOT* tf.saved_model.save(). To confirm, there should be a file named \"keras_metadata.pb\" in the SavedModel directory.')\n _read_legacy_metadata(object_graph_def, metadata)\n if not metadata.nodes:\n return tf_load.load(path, options=options)\n keras_loader = KerasObjectLoader(metadata, object_graph_def)\n keras_loader.load_layers(compile=compile)\n nodes_to_load = {'root': None}\n for node_id, loaded_node in keras_loader.loaded_nodes.items():\n nodes_to_load[keras_loader.get_path(node_id)] = loaded_node\n loaded = tf_load.load_partial(path, nodes_to_load, options=options)\n keras_loader.finalize_objects()\n keras_loader.del_tracking()\n model = loaded['root']\n if isinstance(model, training_lib.Model) and compile:\n training_config = model._serialized_attributes['metadata'].get('training_config', None)\n if training_config is not None:\n model.compile(**saving_utils.compile_args_from_training_config(training_config), from_serialized=True)\n saving_utils.try_build_compiled_arguments(model)\n if isinstance(model.optimizer, optimizer_v2.OptimizerV2):\n if model.optimizer.get_slot_names():\n logging.warning('Your optimizer uses slots. Slots cannot be restored from saved_model, as a result, your model is starting with a new initialized optimizer.')\n else:\n logging.warning('No training configuration found in save file, so the model was *not* compiled. Compile it manually.')\n if not context.executing_eagerly():\n sess = backend.get_session()\n sess.run(ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS))\n return model", "docstring": "Loads Keras objects from a SavedModel.\n\n Any Keras layer or model saved to the SavedModel will be loaded back\n as Keras objects. Other objects are loaded as regular trackable objects (same\n as `tf.saved_model.load`).\n\n Currently, Keras saving/loading only retains the Keras object's weights,\n losses, and call function.\n\n The loaded model can be re-compiled, but the original optimizer, compiled loss\n functions, and metrics are not retained. This is temporary, and `model.save`\n will soon be able to serialize compiled models.\n\nArgs:\n path: Path to SavedModel.\n compile: If true, compile the model after loading it.\n options: Optional `tf.saved_model.LoadOptions` object that specifies\n options for loading from SavedModel.\n\nReturns:\n Object loaded from SavedModel.", "source": "github_repos"} -{"code": "def __init__(self, config: PatchTSMixerConfig):\n super().__init__()\n self.norm = PatchTSMixerNormLayer(config)\n self.self_attn = config.self_attn\n self.gated_attn = config.gated_attn\n self.mlp = PatchTSMixerMLP(in_features=config.num_patches, out_features=config.num_patches, config=config)\n if config.gated_attn:\n self.gating_block = PatchTSMixerGatedAttention(in_size=config.num_patches, out_size=config.num_patches)\n if config.self_attn:\n self.self_attn_layer = PatchTSMixerAttention(embed_dim=config.d_model, num_heads=config.self_attn_heads, dropout=config.dropout, config=config)\n self.norm_attn = PatchTSMixerNormLayer(config)", "docstring": "This module mixes the patch dimension.\n\nArgs:\n config (`PatchTSMixerConfig`):\n Configuration.", "source": "github_repos"} -{"code": "def split(self, path: str) -> Tuple[str, str]:\n raise NotImplementedError", "docstring": "Splits the given path into two parts.\n\n Splits the path into a pair (head, tail) such that tail contains the last\n component of the path and head contains everything up to that.\n\n For file-systems other than the local file-system, head should include the\n prefix.\n\nArgs:\n path: path as a string\n\nReturns:\n a pair of path components as strings.", "source": "github_repos"} -{"code": "def data_gen_sig2() -> repr_dataset.RepresentativeDataset:\n for _ in range(4):\n yield {'conv_input': random_ops.random_uniform(shape=(1, 3, 4, 3))}", "docstring": "Generates tuple-style samples for signature 'sig2'.\n\n The first element of the tuple identifies the signature key the input data\n is for.\n\nYields:\n Representative sample for 'sig2'.", "source": "github_repos"} -{"code": "def add_to_collection(self, name, value) -> None:\n self._check_not_finalized()\n with self._lock:\n if name not in self._collections:\n self._collections[name] = [value]\n else:\n self._collections[name].append(value)", "docstring": "Stores `value` in the collection with the given `name`.\n\n Note that collections are not sets, so it is possible to add a value to\n a collection several times.\n\nArgs:\n name: The key for the collection. The `GraphKeys` class contains many\n standard names for collections.\n value: The value to add to the collection.", "source": "github_repos"} -{"code": "def __init__(self):\n self._last_step_outputs = {}\n self._last_step_outputs_reduce_ops = {}\n self._non_tensor_outputs = {}", "docstring": "Initialize an output context.\n\nReturns:\n A context object.", "source": "github_repos"} -{"code": "def select_best_resolution(original_size: tuple, possible_resolutions: list) -> tuple:\n original_height, original_width = original_size\n best_fit = None\n max_effective_resolution = 0\n min_wasted_resolution = float('inf')\n for height, width in possible_resolutions:\n scale = min(width / original_width, height / original_height)\n downscaled_width, downscaled_height = (int(original_width * scale), int(original_height * scale))\n effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)\n wasted_resolution = width * height - effective_resolution\n if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):\n max_effective_resolution = effective_resolution\n min_wasted_resolution = wasted_resolution\n best_fit = (height, width)\n return best_fit", "docstring": "Selects the best resolution from a list of possible resolutions based on the original size.\n\n This is done by calculating the effective and wasted resolution for each possible resolution.\n\n The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.\n\nArgs:\n original_size (tuple):\n The original size of the image in the format (height, width).\n possible_resolutions (list):\n A list of possible resolutions in the format [(height1, width1), (height2, width2), ...].\n\nReturns:\n tuple: The best fit resolution in the format (height, width).", "source": "github_repos"} -{"code": "def get_gradients(self, loss, params):\n params = nest.flatten(params)\n with backend.get_graph().as_default(), backend.name_scope(self._name + '/gradients'):\n grads = gradients.gradients(loss, params)\n for grad, param in zip(grads, params):\n if grad is None:\n raise ValueError('Variable {} has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.'.format(param))\n return grads", "docstring": "Returns gradients of `loss` with respect to `params`.\n\n Should be used only in legacy v1 graph mode.\n\nArgs:\n loss: Loss tensor.\n params: List of variables.\n\nReturns:\n List of gradient tensors.\n\nRaises:\n ValueError: In case any gradient cannot be computed (e.g. if gradient\n function not implemented).", "source": "github_repos"} -{"code": "def __init__(self, input_ids: torch.LongTensor, assistant_model: 'PreTrainedModel', generation_config: 'GenerationConfig', model_kwargs: Dict, inputs_tensor: Optional[torch.Tensor]=None, logits_processor: 'LogitsProcessorList'=None):\n super().__init__(input_ids=input_ids, assistant_model=assistant_model, generation_config=generation_config, model_kwargs=model_kwargs, inputs_tensor=inputs_tensor, logits_processor=logits_processor)\n self.assistant_early_exit = self.generation_config.assistant_early_exit\n self.generation_config.assistant_early_exit = None", "docstring": "`CandidateGenerator` class to be used for assisted generation and speculative decoding. This class generates\n candidates through the use of **the model itself**, exiting early. Can only be used with models that support early\n exit, e.g., `facebook/layerskip-llama3.2-1B`.\n\nArgs:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\n assistant_model (`PreTrainedModel`):\n The original model. This model must support early exit (i.e. is trained to compute logits in earlier\n layers).\n generation_config (`~generation.GenerationConfig`, *optional*):\n The generation configuration to be used as base parametrization for the generation call.\n logits_processor (`LogitsProcessorList`):\n An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]\n used to modify the prediction scores of the language modeling head applied at each generation step.\n model_kwargs (`Dict`):\n The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant\n model as well.\n inputs_tensor (`torch.Tensor`, *optional*):\n The model input tensor. In encoder-decoder models, this is the encoder input.", "source": "github_repos"} -{"code": "def build_results(self, session, tensor_values):\n full_values = []\n assert len(self._final_fetches) == len(tensor_values)\n i = 0\n j = 0\n for is_op in self._ops:\n if is_op:\n full_values.append(None)\n else:\n if self._fetches[i].ref() in self._feed_handles:\n value = self._feed_handles[self._fetches[i].ref()].eval()\n else:\n value = self._feeds.get(self._fetches[i].ref())\n if value is None:\n value = tensor_values[j]\n j += 1\n dtype = self._fetch_handles.get(self._fetches[i].ref())\n if dtype:\n full_values.append(session_ops.TensorHandle(value, dtype, session))\n else:\n full_values.append(value)\n i += 1\n assert j == len(tensor_values)\n return self._fetch_mapper.build_results(full_values)", "docstring": "Build results matching the original fetch shape.\n\n `tensor_values` must be a list of the same length as\n the one returned by `fetches()`, and holding the requested\n fetch values.\n\n This method builds a struct with the same shape as the original `fetches`\n passed to the constructor, in which the fetches are replaced by their\n fetched value.\n\nArgs:\n session: The enclosing session. Used for tensor handles.\n tensor_values: List of values matching the list returned by fetches().\n\nReturns:\n A structure of the same shape as the original `fetches` argument but\n containing tensors or None (for fetched ops).", "source": "github_repos"} -{"code": "def __init__(self, original_lm_head, assistant_overlap_token_ids):\n super().__init__()\n self.pruned_lm_head = prune_linear_layer(original_lm_head, assistant_overlap_token_ids).to(original_lm_head.weight.dtype)", "docstring": "A class to prune and reindex the language model head.\n\n This class prunes the language model head to only include the specified token IDs and reindexes the logits\n to map back to the original vocabulary.\n\nArgs:\n original_lm_head (nn.Module): The original language model head.\n token_ids (list[int]): The list of token IDs to keep.", "source": "github_repos"} -{"code": "def _is_executing(self) -> bool:\n executors = self._executor.transform_executor_services.executors\n if not executors:\n return False\n for transform_executor in executors:\n if not transform_executor.blocked:\n return True\n return False", "docstring": "Checks whether the job is still executing.\n\nReturns:\n True if there is at least one non-blocked TransformExecutor active.", "source": "github_repos"} -{"code": "def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[Tuple, PerceiverClassifierOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n loss = None\n if labels is not None:\n raise NotImplementedError('Optical flow training is not yet supported')\n outputs = self.perceiver(inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n logits = outputs.logits if return_dict else outputs[0]\n if not return_dict:\n output = (logits,) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return PerceiverClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)", "docstring": "inputs (`torch.FloatTensor`):\n Inputs to the perceiver. Can be anything: images, text, audio, video, etc.\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`.\n\nExample:\n ```python\n >>> from transformers import PerceiverForOpticalFlow\n >>> import torch\n\n >>> model = PerceiverForOpticalFlow.from_pretrained(\"deepmind/optical-flow-perceiver\")\n\n >>> # in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel,\n >>> # leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels)\n >>> # patches have shape (batch_size, num_frames, num_channels, height, width)\n >>> # the authors train on resolutions of 368 x 496\n >>> patches = torch.randn(1, 2, 27, 368, 496)\n >>> outputs = model(inputs=patches)\n >>> logits = outputs.logits\n >>> list(logits.shape)\n [1, 368, 496, 2]\n ```", "source": "github_repos"} -{"code": "def fn(x: list[list[Union[str, int]]]):\n return x", "docstring": "Test function\n\nArgs:\n x: The input", "source": "github_repos"} -{"code": "def __init__(self, total: int, prefix: Optional[str]=None, leave: bool=True, parent: Optional['NotebookTrainingTracker']=None, width: int=300):\n self.total = total\n self.prefix = '' if prefix is None else prefix\n self.leave = leave\n self.parent = parent\n self.width = width\n self.last_value = None\n self.comment = None\n self.output = None\n self.value = None\n self.label = None\n if 'VSCODE_PID' in os.environ:\n self.update_every = 0.5", "docstring": "A progress par for display in a notebook.\n\n Class attributes (overridden by derived classes)\n\n - **warmup** (`int`) -- The number of iterations to do at the beginning while ignoring `update_every`.\n - **update_every** (`float`) -- Since calling the time takes some time, we only do it every presumed\n `update_every` seconds. The progress bar uses the average time passed up until now to guess the next value\n for which it will call the update.\n\nArgs:\n total (`int`):\n The total number of iterations to reach.\n prefix (`str`, *optional*):\n A prefix to add before the progress bar.\n leave (`bool`, *optional*, defaults to `True`):\n Whether or not to leave the progress bar once it's completed. You can always call the\n [`~utils.notebook.NotebookProgressBar.close`] method to make the bar disappear.\n parent ([`~notebook.NotebookTrainingTracker`], *optional*):\n A parent object (like [`~utils.notebook.NotebookTrainingTracker`]) that spawns progress bars and handle\n their display. If set, the object passed must have a `display()` method.\n width (`int`, *optional*, defaults to 300):\n The width (in pixels) that the bar will take.\n\nExample:\n ```python\n import time\n\n pbar = NotebookProgressBar(100)\n for val in range(100):\n pbar.update(val)\n time.sleep(0.07)\n pbar.update(100)\n ```", "source": "github_repos"} -{"code": "def create_and_fill_np_array(start_or_end_logits, dataset, max_len):\n step = 0\n logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32)\n for i, output_logit in enumerate(start_or_end_logits):\n batch_size = output_logit.shape[0]\n cols = output_logit.shape[1]\n if step + batch_size < len(dataset):\n logits_concat[step:step + batch_size, :cols] = output_logit\n else:\n logits_concat[step:, :cols] = output_logit[:len(dataset) - step]\n step += batch_size\n return logits_concat", "docstring": "Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor\n\nArgs:\n start_or_end_logits(:obj:`tensor`):\n This is the output predictions of the model. We can only enter either start or end logits.\n eval_dataset: Evaluation dataset\n max_len(:obj:`int`):\n The maximum length of the output tensor. ( See the model.eval() part for more details )", "source": "github_repos"} -{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = math_ops.cast(y_true, self._dtype)\n y_pred = math_ops.cast(y_pred, self._dtype)\n [y_pred, y_true], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values([y_pred, y_true], sample_weight)\n y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n y_pred, self.normalizer = losses_utils.remove_squeezable_dimensions(y_pred, self.normalizer)\n y_pred.shape.assert_is_compatible_with(y_true.shape)\n relative_errors = math_ops.div_no_nan(math_ops.abs(y_true - y_pred), self.normalizer)\n return super(MeanRelativeError, self).update_state(relative_errors, sample_weight=sample_weight)", "docstring": "Accumulates metric statistics.\n\nArgs:\n y_true: The ground truth values.\n y_pred: The predicted values.\n sample_weight: Optional weighting of each example. Defaults to 1. Can be a\n `Tensor` whose rank is either 0, or the same rank as `y_true`, and must\n be broadcastable to `y_true`.\n\nReturns:\n Update op.", "source": "github_repos"} -{"code": "def gradient_tensor(self, x_tensor):\n x_tensor_name = self._get_tensor_name(x_tensor)\n if x_tensor_name not in self._gradient_tensors:\n raise LookupError('This GradientsDebugger has not received any gradient tensor for x-tensor %s' % x_tensor_name)\n return self._gradient_tensors[x_tensor_name]", "docstring": "Get the gradient tensor of an x-tensor.\n\nArgs:\n x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its\n name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor\n on the denominator of the differentiation.\n\nReturns:\n If found, the gradient tensor.\n\nRaises:\n TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.\n LookupError: If the `x_tensor` has not been registered with a gradient\n tensor.", "source": "github_repos"} -{"code": "def _is_variable(node_def: node_def_pb2.NodeDef) -> bool:\n return node_def.op == 'VarHandleOp'", "docstring": "Determines whether `node_def` is a variable node.\n\nArgs:\n node_def: `NodeDef` to test whether it is a variable or not.\n\nReturns:\n Returns True if it is a variable.", "source": "github_repos"} -{"code": "def _collect_layer_statistics(self) -> Dict[str, Dict[str, float]]:\n layer_statistics = collections.defaultdict(lambda: collections.defaultdict(list))\n initialize = True\n for tensor_data in self._data_gen():\n self._set_input_tensors(self._quant_interpreter, tensor_data, initialize)\n initialize = False\n self._quant_interpreter.invoke()\n for tensor_detail in self._get_numeric_verify_tensor_details():\n tensor_name = tensor_detail['name']\n diffs = self._quant_interpreter.get_tensor(tensor_detail['index'])\n for metric_name, metric_fn in self._layer_debug_metrics.items():\n layer_statistics[tensor_name][metric_name].append(metric_fn(diffs))\n if self._debug_options.layer_direct_compare_metrics is not None:\n for tensor_detail in self._get_numeric_verify_tensor_details():\n tensor_name = tensor_detail['name']\n op_idx = self._defining_op[tensor_detail['index']]\n op_detail = self._quant_interpreter._get_op_details(op_idx)\n q_idx, f_idx = op_detail['inputs']\n quant_input_detail = self._quant_interpreter._get_tensor_details(q_idx, subgraph_index=0)\n for metric_name, metric_fn in self._debug_options.layer_direct_compare_metrics.items():\n layer_statistics[tensor_name][metric_name].append(metric_fn(self._quant_interpreter.get_tensor(f_idx), self._quant_interpreter.get_tensor(q_idx), quant_input_detail['quantization_parameters']['scales'][0], quant_input_detail['quantization_parameters']['zero_points'][0]))\n for metrics in layer_statistics.values():\n for metric_name in metrics:\n metrics[metric_name] = np.nanmean(metrics[metric_name])\n return layer_statistics", "docstring": "Collects layer statistics by applying layer debug metrics.\n\n For all data from the given RepresentativeDataset, collect statistics per\n example by getting the NumericVerify op results in _quant_interpreter\n and calculating layer debug metrics on the results.\n\nReturns:\n aggregated per-layer statistics of NumericVerify results.\n {layer_name: {metric_name: metric}}", "source": "github_repos"} -{"code": "def trace(self, name='trace'):\n with self._name_scope(name):\n return self._trace()", "docstring": "Trace of the linear operator, equal to sum of `self.diag_part()`.\n\n If the operator is square, this is also the sum of the eigenvalues.\n\nArgs:\n name: A name for this `Op`.\n\nReturns:\n Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.", "source": "github_repos"} -{"code": "def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1, device=None):\n q_ids = torch.arange(0, query_size, device=device)\n k_ids = torch.arange(0, key_size, device=device)\n rel_pos_ids = q_ids[:, None] - k_ids[None, :]\n if bucket_size > 0 and max_position > 0:\n rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)\n rel_pos_ids = rel_pos_ids.to(torch.long)\n rel_pos_ids = rel_pos_ids[:query_size, :]\n rel_pos_ids = rel_pos_ids.unsqueeze(0)\n return rel_pos_ids", "docstring": "Build relative position according to the query and key\n\n We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key\n \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -\n P_k\\)\n\nArgs:\n query_size (int): the length of query\n key_size (int): the length of key\n bucket_size (int): the size of position bucket\n max_position (int): the maximum allowed absolute position\n device (`torch.device`): the device on which tensors will be created.\n\nReturns:\n `torch.LongTensor`: A tensor with shape [1, query_size, key_size]", "source": "github_repos"} -{"code": "def _with_dependencies(self, dependencies):\n new_row_splits = control_flow_ops.with_dependencies(dependencies, self._row_splits)\n return RowPartition(row_splits=new_row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self._nrows, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key)", "docstring": "Returns a new RowPartition equal to self with control dependencies.\n\n Specifically, self._row_splits is gated by the given control dependencies.\n Used to add sanity checks to the constructors.\n\nArgs:\n dependencies: a list of tensors to use as dependencies.\n\nReturns:\n A new RowPartition object.", "source": "github_repos"} -{"code": "def assert_true(expr, msg, extras=None):\n if not expr:\n fail(msg, extras)", "docstring": "Assert an expression evaluates to true, otherwise fail the test.\n\nArgs:\n expr: The expression that is evaluated.\n msg: A string explaining the details in case of failure.\n extras: An optional field for extra information to be included in\n test result.", "source": "github_repos"} -{"code": "def calendar_day_of_week(self: EventSetOrNode, tz: Union[str, float, int]=0) -> EventSetOrNode:\n from temporian.core.operators.calendar.day_of_week import calendar_day_of_week\n return calendar_day_of_week(self, tz)", "docstring": "Obtains the day of the week the timestamps in an\n [`EventSet`][temporian.EventSet]'s sampling are in.\n\n Features in the input are ignored, only the timestamps are used and\n they must be unix timestamps (`is_unix_timestamp=True`).\n\n Output feature contains numbers from 0 (Monday) to 6 (Sunday).\n\n By default, the timezone is UTC unless the `tz` argument is specified,\n as an offset in hours or a timezone name. See\n [`EventSet.calendar_hour()`][temporian.EventSet.calendar_hour] for an\n example using timezones.\n\n Usage example:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[\"2023-06-19\", \"2023-06-21\", \"2023-06-25\", \"2023-07-03\"],\n ... )\n >>> b = a.calendar_day_of_week()\n >>> b\n indexes: ...\n features: [('calendar_day_of_week', int32)]\n events:\n (4 events):\n timestamps: [...]\n 'calendar_day_of_week': [0 2 6 0]\n ...\n\n ```\n\nArgs:\n tz: timezone name (see `pytz.all_timezones`) or UTC offset in hours.\n\nReturns:\n EventSet with a single feature with the day of the week each timestamp\n in `sampling` belongs to.", "source": "github_repos"} -{"code": "def __init__(self, token_ids: List[int]):\n super(Constraint, self).__init__()\n if not isinstance(token_ids, list) or len(token_ids) == 0:\n raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.')\n if any((not isinstance(token_id, int) or token_id < 0 for token_id in token_ids)):\n raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.')\n self.token_ids = token_ids\n self.seqlen = len(self.token_ids)\n self.fulfilled_idx = -1\n self.completed = False", "docstring": "[`Constraint`] enforcing that an ordered sequence of tokens is included in the output.\n\nArgs:\n token_ids (`List[int]`):\n The id of the token that must be generated by the output.", "source": "github_repos"} -{"code": "def replace_flat_tensors_for_gradients(xs, flat_grads):\n xs_structure = [_get_tensors_for_gradient(x) for x in xs]\n grads = nest.pack_sequence_as(xs_structure, flat_grads)\n return [_replace_tensors_for_gradient(x, grad) for x, grad in zip(xs, grads)]", "docstring": "Replaces Tensors that should be differentiated in `xs` with `flat_grads`.\n\nArgs:\n xs: A list of `Tensor`s or `CompositeTensor`s.\n flat_grads: A list of `Tensor`.\n\nReturns:\n A list of `Tensor` or `CompositeTensor`.", "source": "github_repos"} -{"code": "def _get_bounding_box(self, box: 'torch.Tensor') -> Dict[str, int]:\n if self.framework != 'pt':\n raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.')\n xmin, ymin, xmax, ymax = box.int().tolist()\n bbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}\n return bbox", "docstring": "Turns list [xmin, xmax, ymin, ymax] into dict { \"xmin\": xmin, ... }\n\nArgs:\n box (`torch.Tensor`): Tensor containing the coordinates in corners format.\n\nReturns:\n bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.", "source": "github_repos"} -{"code": "def update_pcollection_stats(self, pcollection_stats):\n edge_dict = {}\n for pcoll_id, stats in pcollection_stats.items():\n attrs = {}\n pcoll_list = stats['sample']\n if pcoll_list:\n attrs['label'] = format_sample(pcoll_list, 1)\n attrs['labeltooltip'] = format_sample(pcoll_list, 10)\n else:\n attrs['label'] = '?'\n edge_dict[pcoll_id] = attrs\n self._update_graph(edge_dict=edge_dict)", "docstring": "Updates PCollection stats.\n\nArgs:\n pcollection_stats: (dict of dict) maps PCollection IDs to informations. In\n particular, we only care about the field 'sample' which should be a\n the PCollection result in as a list.", "source": "github_repos"} -{"code": "def before_starting_server(self):\n self._validate_snippet_app_on_device()\n self._disable_hidden_api_blocklist()", "docstring": "Performs the preparation steps before starting the remote server.\n\n This function performs following preparation steps:\n * Validate that the Mobly Snippet app is available on the device.\n * Disable hidden api blocklist if necessary and possible.\n\nRaises:\n errors.ServerStartPreCheckError: if the server app is not installed\n for the current user.", "source": "github_repos"} -{"code": "def container_type_mismatch(self, stack, cls, mutations, name):\n details = f'Container: {self._pp.print_generic_type(cls)}\\n'\n allowed_contained = ''\n new_contained = ''\n for formal in cls.formal_type_parameters.keys():\n if formal in mutations:\n params, values, _ = mutations[formal]\n allowed_content = self._pp.print_type_of_instance(cls.get_formal_type_parameter(formal))\n new_content = self._pp.join_printed_types(sorted((self._pp.print_type(v) for v in set(values.data) - set(params.data))))\n allowed_contained += f' {formal}: {allowed_content}\\n'\n new_contained += f' {formal}: {new_content}\\n'\n annotation = self._pp.print_type_of_instance(cls)\n details += 'Allowed contained types (from annotation %s):\\n%sNew contained types:\\n%s' % (annotation, allowed_contained, new_contained)\n suffix = '' if name is None else ' for ' + name\n err_msg = f'New container type{suffix} does not match type annotation'\n self.error(stack, err_msg, details=details)", "docstring": "Invalid combination of annotation and mutation.\n\nArgs:\n stack: the frame stack\n cls: the container type\n mutations: a dict of {parameter name: (annotated types, new types)}\n name: the variable name (or None)", "source": "github_repos"} -{"code": "def process(self, feed_item):\n item = self.get(feed_item)\n if item:\n self._process_update(item, feed_item)\n self._clean(item)\n self._update(item, feed_item)\n else:\n new_item = self._process_new(feed_item)\n self._clean(new_item)\n item = self._insert(new_item, feed_item)\n if self._id_field and feed_item.get(self._id_field, '').startswith('ext'):\n store.map(self._entity, feed_item.get(self._id_field), item['id'])\n store.set(self._entity, [feed_item[self._id_field]], item)\n if self._search_field and feed_item.get(self._search_field, ''):\n store.map(self._entity, feed_item.get(self._search_field), item['id'])\n store.set(self._entity, [feed_item[self._search_field]], item)\n if item:\n feed_item[self._id_field] = item['id']\n store.set(self._entity, [item['id']], item)\n self._post_process(feed_item, item)\n return item", "docstring": "Processes a Bulkdozer feed item.\n\n This method identifies if the item needs to be inserted or updated, cleans\n it, performs the CM operations required, and update the feed item with newly\n created ids and name lookups so that the feed can be updated.\n\nArgs:\n feed_item: Bulkdozer feed item to process.\n\nReturns:\n Newly created or updated CM object.", "source": "github_repos"} -{"code": "def _execute_bundle(self, runner_execution_context: execution.FnApiRunnerExecutionContext, bundle_context_manager: execution.BundleContextManager, bundle_input: DataInput) -> beam_fn_api_pb2.InstructionResponse:\n worker_handler_manager = runner_execution_context.worker_handler_manager\n worker_handler_manager.register_process_bundle_descriptor(bundle_context_manager.process_bundle_descriptor)\n bundle_manager = self._get_bundle_manager(bundle_context_manager)\n last_result, deferred_inputs, newly_set_timers, watermark_updates = self._run_bundle(runner_execution_context, bundle_context_manager, bundle_input, bundle_context_manager.stage_data_outputs, bundle_context_manager.stage_timer_outputs, bundle_manager)\n for pc_name, watermark in watermark_updates.items():\n _BUNDLE_LOGGER.debug('Update: %s %s', pc_name, watermark)\n runner_execution_context.watermark_manager.set_pcoll_watermark(pc_name, watermark)\n if deferred_inputs:\n assert runner_execution_context.watermark_manager.get_stage_node(bundle_context_manager.stage.name).output_watermark() < timestamp.MAX_TIMESTAMP, 'wrong timestamp for %s. ' % runner_execution_context.watermark_manager.get_stage_node(bundle_context_manager.stage.name)\n runner_execution_context.queues.ready_inputs.enque((bundle_context_manager.stage.name, DataInput(deferred_inputs, {})))\n self._enqueue_set_timers(runner_execution_context, bundle_context_manager, newly_set_timers, bundle_input)\n if not deferred_inputs and (not newly_set_timers):\n for _, output_pc in bundle_context_manager.stage_data_outputs.items():\n _, update_output_pc = translations.split_buffer_id(output_pc)\n runner_execution_context.watermark_manager.get_pcoll_node(update_output_pc).set_produced_watermark(timestamp.MAX_TIMESTAMP)\n data_side_input = runner_execution_context.side_input_descriptors_by_stage.get(bundle_context_manager.stage.name, {})\n runner_execution_context.commit_side_inputs_to_state(data_side_input)\n buffers_to_clean = set()\n known_consumers = set()\n for transform_id, buffer_id in bundle_context_manager.stage_data_outputs.items():\n for consuming_stage_name, consuming_transform in runner_execution_context.buffer_id_to_consumer_pairs.get(buffer_id, []):\n buffer = runner_execution_context.pcoll_buffers.get(buffer_id, None)\n if buffer_id in runner_execution_context.pcoll_buffers and buffer_id not in buffers_to_clean:\n buffers_to_clean.add(buffer_id)\n elif buffer and buffer_id in buffers_to_clean:\n runner_execution_context.pcoll_buffers[buffer_id] = buffer.copy()\n buffer = runner_execution_context.pcoll_buffers[buffer_id]\n if buffer_id not in runner_execution_context.pcoll_buffers:\n buffer = bundle_context_manager.get_buffer(buffer_id, transform_id)\n runner_execution_context.pcoll_buffers[buffer_id] = buffer\n buffers_to_clean.add(buffer_id)\n if (consuming_stage_name, consuming_transform, buffer_id) in known_consumers:\n continue\n else:\n known_consumers.add((consuming_stage_name, consuming_transform, buffer_id))\n runner_execution_context.queues.watermark_pending_inputs.enque(((consuming_stage_name, timestamp.MAX_TIMESTAMP), DataInput({consuming_transform: buffer}, {})))\n for bid in buffers_to_clean:\n if bid in runner_execution_context.pcoll_buffers:\n del runner_execution_context.pcoll_buffers[bid]\n return last_result", "docstring": "Execute a bundle end-to-end.\n\nArgs:\n runner_execution_context (execution.FnApiRunnerExecutionContext): An\n object containing execution information for the pipeline.\n bundle_context_manager (execution.BundleContextManager): A description of\n the stage to execute, and its context.\n bundle_input: The set of buffers to input into this bundle", "source": "github_repos"} -{"code": "def __init__(self, resume_delay=0):\n self.resume_delay = resume_delay", "docstring": "Initializes a ProcessContinuation object.\n\nArgs:\n resume_delay: indicates the minimum time, in seconds, that should elapse\n before re-invoking process() method for resuming the invocation of the\n current element.", "source": "github_repos"} -{"code": "def parse_args(test: ArgList=None) -> argparse.Namespace:\n parser = argparse.ArgumentParser(prog='budoux', formatter_class=lambda prog: BudouxHelpFormatter(prog, **{'width': shutil.get_terminal_size(fallback=(120, 50)).columns, 'max_help_position': 30}), description=textwrap.dedent(' BudouX is the successor to Budou,\\n the machine learning powered line break organizer tool.'), epilog='\\n- '.join(['supported languages of `-l`, `--lang`:', *langs.keys()]))\n parser.add_argument('text', metavar='TXT', nargs='?', type=str, help='text')\n parser.add_argument('-H', '--html', action='store_true', help='HTML mode')\n model_select_group = parser.add_mutually_exclusive_group()\n model_select_group.add_argument('-m', '--model', metavar='JSON', type=check_file, default=check_lang('ja'), help='custom model file path')\n model_select_group.add_argument('-l', '--lang', metavar='LANG', type=check_lang, help='language of custom model')\n parser.add_argument('-s', '--sep', metavar='STR', type=str, default='\\n', help='output phrase separator in TEXT mode')\n parser.add_argument('-d', '--delim', metavar='STR', type=str, default='---', help='output sentence delimiter in TEXT mode')\n parser.add_argument('-V', '--version', action='version', version='%(prog)s {}'.format(budoux.__version__))\n if test is not None:\n return parser.parse_args(test)\n else:\n return parser.parse_args()", "docstring": "Parse commandline arguments.\n\nArgs:\n test (typing.Optional[typing.List[str]], optional): Commandline args for testing. Defaults to None.\n\nReturns:\n argparse.Namespace: Parsed data of args.", "source": "github_repos"} -{"code": "def parse_indices(indices_string):\n indices_string = re.sub('\\\\s+', '', indices_string)\n if indices_string.startswith('[') and indices_string.endswith(']'):\n indices_string = indices_string[1:-1]\n return [int(element) for element in indices_string.split(',')]", "docstring": "Parse a string representing indices.\n\n For example, if the input is \"[1, 2, 3]\", the return value will be a list of\n indices: [1, 2, 3]\n\nArgs:\n indices_string: (str) a string representing indices. Can optionally be\n surrounded by a pair of brackets.\n\nReturns:\n (list of int): Parsed indices.", "source": "github_repos"} -{"code": "def deregister_context(self, context_words):\n for context_word in context_words:\n if context_word not in self._comp_dict:\n raise KeyError('Cannot deregister unregistered context word \"%s\"' % context_word)\n for context_word in context_words:\n del self._comp_dict[context_word]", "docstring": "Deregister a list of context words.\n\nArgs:\n context_words: A list of context words to deregister, as a list of str.\n\nRaises:\n KeyError: if there are word(s) in context_words that do not correspond\n to any registered contexts.", "source": "github_repos"} -{"code": "def as_videos(content: ProcessorContentTypes, *, ignore_unsupported_types: bool=False) -> list[ProcessorPart]:\n return _as_format_helper(content, lambda mime: mime.startswith('video/'), ignore_unsupported_types)", "docstring": "Returns the video parts from the content.\n\nArgs:\n content: Input content.\n ignore_unsupported_types: By default if content contains non-video parts a\n ValueError would be raised. This argument allows ingoring such parts.\n\nReturns:\n A list of video parts.", "source": "github_repos"} -{"code": "def _get_func_name():\n return tf_inspect.stack()[1][3]", "docstring": "Get the name of current function.\n\nReturns:\n String that is the name of current function.", "source": "github_repos"} -{"code": "def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, past_key_values_length=0, training: bool=False) -> tf.Tensor:\n if input_ids is None and inputs_embeds is None:\n raise ValueError('Need to provide either `input_ids` or `input_embeds`.')\n if input_ids is not None:\n check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n input_shape = shape_list(inputs_embeds)[:-1]\n if token_type_ids is None:\n token_type_ids = tf.fill(dims=input_shape, value=0)\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0)\n position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n final_embeddings = self.LayerNorm(inputs=final_embeddings)\n final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\n final_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github_repos"} -{"code": "def create_detector(model_uri: str, **kwargs) -> OfflineDetector:\n model_handler = KeyedModelHandler(PyODModelHandler(model_uri=model_uri)).with_postprocess_fn(OfflineDetector.score_prediction_adapter)\n m = model_handler.load_model()\n assert isinstance(m, PyODBaseDetector)\n threshold = float(m.threshold_)\n detector = OfflineDetector(model_handler, threshold_criterion=FixedThreshold(threshold), **kwargs)\n return detector", "docstring": "A utility function to create OfflineDetector for a PyOD model.\n\n **NOTE:** This API and its implementation are currently under active\n development and may not be backward compatible.\n\nArgs:\n model_uri: The URI specifying the location of the pickled PyOD model.\n **kwargs: Additional keyword arguments.", "source": "github_repos"} -{"code": "def is_registered(self, prefix):\n return self._resolve_prefix(prefix) is not None", "docstring": "Test if a command prefix or its alias is has a registered handler.\n\nArgs:\n prefix: A prefix or its alias, as a str.\n\nReturns:\n True iff a handler is registered for prefix.", "source": "github_repos"} -{"code": "def report_list(config, auth, account):\n account_id, advertiser_id = parse_account(config, auth, account)\n is_superuser, profile_id = get_profile_for_api(config, auth, account_id)\n kwargs = {'profileId': profile_id, 'accountId': account_id} if is_superuser else {'profileId': profile_id}\n for report in API_DCM(auth, iterate=True, internal=is_superuser).reports().list(**kwargs).execute():\n yield report", "docstring": "Lists all the DCM report configurations for an account given the current credentials.\n\n Bulletproofing:\n https://developers.google.com/doubleclick-advertisers/v3.2/reports/list\n\nArgs:\n * auth: (string) Either user or service.\n * account: (string) [account:advertiser@profile] token.\n\nReturns:\n * Iterator of JSONs.", "source": "github_repos"} -{"code": "def forward(self, pixel_values: Tensor, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> MaskFormerModelOutput:\n if pixel_values is None:\n raise ValueError('You have to specify pixel_values')\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n batch_size, _, height, width = pixel_values.shape\n if pixel_mask is None:\n pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device)\n pixel_level_module_output = self.pixel_level_module(pixel_values, output_hidden_states, return_dict=return_dict)\n image_features = pixel_level_module_output[0]\n pixel_embeddings = pixel_level_module_output[1]\n transformer_module_output = self.transformer_module(image_features, output_hidden_states, output_attentions)\n queries = transformer_module_output.last_hidden_state\n encoder_hidden_states = None\n pixel_decoder_hidden_states = None\n transformer_decoder_hidden_states = None\n hidden_states = None\n if output_hidden_states:\n encoder_hidden_states = pixel_level_module_output[2]\n pixel_decoder_hidden_states = pixel_level_module_output[3]\n transformer_decoder_hidden_states = transformer_module_output[1]\n hidden_states = encoder_hidden_states + pixel_decoder_hidden_states + transformer_decoder_hidden_states\n output = MaskFormerModelOutput(encoder_last_hidden_state=image_features, pixel_decoder_last_hidden_state=pixel_embeddings, transformer_decoder_last_hidden_state=queries, encoder_hidden_states=encoder_hidden_states, pixel_decoder_hidden_states=pixel_decoder_hidden_states, transformer_decoder_hidden_states=transformer_decoder_hidden_states, hidden_states=hidden_states, attentions=transformer_module_output.attentions)\n if not return_dict:\n output = tuple((v for v in output.values()))\n return output", "docstring": "Examples:\n\n ```python\n >>> from transformers import AutoImageProcessor, MaskFormerModel\n >>> from PIL import Image\n >>> import requests\n\n >>> # load MaskFormer fine-tuned on ADE20k semantic segmentation\n >>> image_processor = AutoImageProcessor.from_pretrained(\"facebook/maskformer-swin-base-ade\")\n >>> model = MaskFormerModel.from_pretrained(\"facebook/maskformer-swin-base-ade\")\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = image_processor(image, return_tensors=\"pt\")\n\n >>> # forward pass\n >>> outputs = model(**inputs)\n\n >>> # the decoder of MaskFormer outputs hidden states of shape (batch_size, num_queries, hidden_size)\n >>> transformer_decoder_last_hidden_state = outputs.transformer_decoder_last_hidden_state\n >>> list(transformer_decoder_last_hidden_state.shape)\n [1, 100, 256]\n ```", "source": "github_repos"} -{"code": "def argmax(x, axis=-1):\n return math_ops.argmax(x, axis)", "docstring": "Returns the index of the maximum value along an axis.\n\nArgs:\n x: Tensor or variable.\n axis: axis along which to perform the reduction.\n\nReturns:\n A tensor.", "source": "github_repos"} -{"code": "def forward_rates(df_start_dates, df_end_dates, daycount_fractions, dtype=None, name=None):\n name = name or 'forward_rates'\n with tf.name_scope(name):\n df_start_dates = tf.convert_to_tensor(df_start_dates, dtype, name='df_start_dates')\n dtype = dtype or df_start_dates.dtype\n df_end_dates = tf.convert_to_tensor(df_end_dates, dtype, name='df_end_dates')\n daycount_fractions = tf.convert_to_tensor(daycount_fractions, dtype, name='daycount_fractions')\n return tf.math.divide_no_nan(tf.math.divide_no_nan(df_start_dates, df_end_dates) - 1, daycount_fractions)", "docstring": "Computes forward rates from daycount fractions and discount factors.\n\n #### Example\n ```python\n # Discount factors at start dates\n df_start_dates = [[0.95, 0.9, 0.75], [0.95, 0.99, 0.85]]\n # Discount factors at end dates\n df_end_dates = [[0.8, 0.6, 0.5], [0.8, 0.9, 0.5]]\n # Daycount fractions between the dates\n daycount_fractions = [[0.5, 1.0, 2], [0.6, 0.4, 4.0]]\n # Expected:\n # [[0.375 , 0.5 , 0.25 ],\n # [0.3125, 0.25 , 0.175 ]]\n forward_rates(df_start_dates, df_end_dates, daycount_fractions,\n dtype=tf.float64)\n ```\n\nArgs:\n df_start_dates: A real `Tensor` representing discount factors at the start\n dates.\n df_end_dates: A real `Tensor` representing discount factors at the end\n dates.\n daycount_fractions: A real `Tensor` representing year fractions for the\n coupon accrual.\n dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.\n Default value: None which maps to the default dtype inferred from\n `df_start_dates`.\n name: Python str. The name to give to the ops created by this function.\n Default value: None which maps to 'forward_rates'.\n\n Returns:", "source": "github_repos"} -{"code": "def executions(self, digest=False, begin=None, end=None):\n digests = self._execution_digests\n if begin is not None or end is not None:\n begin = begin or 0\n end = end or len(digests)\n digests = digests[begin:end]\n if digest:\n return digests\n else:\n return [self.read_execution(digest) for digest in digests]", "docstring": "Get `Execution`s or `ExecutionDigest`s this reader has read so far.\n\nArgs:\n digest: Whether the results are returned in a digest form, i.e.,\n `ExecutionDigest` format, instead of the more detailed `Execution`\n format.\n begin: Optional beginning index for the requested execution data objects\n or their digests. Python-style negative indices are supported.\n end: Optional ending index for the requested execution data objects or\n their digests. Python-style negative indices are supported.\n\nReturns:\n If `digest`: a `list` of `ExecutionDigest` objects.\n Else: a `list` of `Execution` objects.", "source": "github_repos"} -{"code": "def np_asarray(values, dtype=None, order=None, copy=None):\n if np.lib.NumpyVersion(np.__version__) >= '2.0.0.dev0':\n if dtype is not None and np.issubdtype(dtype, np.number):\n return np.asarray(values, order=order, copy=copy).astype(dtype, copy=copy)\n else:\n return np.asarray(values, dtype=dtype, order=order, copy=copy)\n else:\n return np.asarray(values, dtype=dtype, order=order)", "docstring": "Converts input values to a NumPy array.\n\n It will not make a copy.\n\n In NumPy 2.x and later, strict type casting can lead to errors when values\n overflow the specified dtype. This function addresses this by replacing direct\n np.array(..., dtype=...) calls with np.array(...).astype(...). This allows for\n intended overflows, aligning with the behavior of older NumPy versions.\n\nArgs:\n values: Array_like objects. E.g., a python list, tuple, or an object whose\n __array__ method returns an array.\n dtype: The desired numpy data type for the array.\n order: {‘C’, ‘F’, ‘A’, ‘K’}.\n copy: bool. If True, then the object is copied. If None then the object is\n copied only if needed, i.e. if __array__ returns a copy, if obj is a\n nested sequence, or if a copy is needed to satisfy any of the other\n requirements (dtype, order, etc.). For False it raises a ValueError if a\n copy cannot be avoided.\n\nReturns:\n A NumPy array with the specified data type.", "source": "github_repos"} -{"code": "def is_layouts_same(self, embedding_layouts) -> bool:\n if self._checkpoint_layouts.keys() != embedding_layouts.keys():\n raise ValueError('Layouts in checkpoint and embedding must have the same keys. found {} and {}'.format(self._checkpoint_layouts.keys(), embedding_layouts.keys()))\n for key, layout in self._checkpoint_layouts.items():\n if not compare.ProtoEq(layout, embedding_layouts[key]):\n logging.info('Layouts do not match for %s this will require resharding; %s vs %s', key, layout, embedding_layouts[key])\n return False\n return True", "docstring": "Returns True if the all the embedding and checkpoint layouts are the same.\n\nArgs:\n embedding_layouts: dict of layouts for embedding tables.\n\n Raises: ValueError if the embedding layouts and checkpoint layouts do not\n have the same keys.\n Returns: Bool representing if the embedding layouts match the layouts in\n checkpoint.", "source": "github_repos"} -{"code": "def _set_verbosity_from(posarg):\n\n def decorator(f):\n\n def wrapper(*args, **kwargs):\n options = kwargs.get('options', args[posarg])\n with config.verbosity_from(options):\n return f(*args, **kwargs)\n return wrapper\n return decorator", "docstring": "Decorator to set the verbosity for a function that takes an options arg.\n\n Assumes that the function has an argument named `options` that is a\n config.Options object.\n\nArgs:\n posarg: The index of `options` in the positional arguments.\n\nReturns:\n The decorator.", "source": "github_repos"} -{"code": "def stddev(self, name='stddev'):\n with self._name_scope(name):\n try:\n return self._stddev()\n except NotImplementedError as original_exception:\n try:\n return math_ops.sqrt(self._variance())\n except NotImplementedError:\n raise original_exception", "docstring": "Standard deviation.\n\n Standard deviation is defined as,\n\n ```none\n stddev = E[(X - E[X])**2]**0.5\n ```\n\n where `X` is the random variable associated with this distribution, `E`\n denotes expectation, and `stddev.shape = batch_shape + event_shape`.\n\nArgs:\n name: Python `str` prepended to names of ops created by this function.\n\nReturns:\n stddev: Floating-point `Tensor` with shape identical to\n `batch_shape + event_shape`, i.e., the same shape as `self.mean()`.", "source": "github_repos"} -{"code": "def summarize(self, highlight=None):\n lines = [RL('Command-line configuration:', 'bold'), RL('')]\n for name, val in self._config.items():\n highlight_attr = 'bold' if name == highlight else None\n line = RL(' ')\n line += RL(name, ['underline', highlight_attr])\n line += RL(': ')\n line += RL(str(val), font_attr=highlight_attr)\n lines.append(line)\n return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)", "docstring": "Get a text summary of the config.\n\nArgs:\n highlight: A property name to highlight in the output.\n\nReturns:\n A `RichTextLines` output.", "source": "github_repos"} -{"code": "def merge_summaries(prev_summary, next_summary, epsilon):\n merged = np.concatenate((prev_summary, next_summary), axis=1)\n merged = np.take(merged, np.argsort(merged[0]), axis=1)\n return compress_summary(merged, epsilon)", "docstring": "Weighted merge sort of summaries.\n\n Given two summaries of distinct data, this function merges (and compresses)\n them to stay within `epsilon` error tolerance.\n\nArgs:\n prev_summary: 2D `np.ndarray` summary to be merged with `next_summary`.\n next_summary: 2D `np.ndarray` summary to be merged with `prev_summary`.\n epsilon: A float that determines the approximate desired precision.\n\nReturns:\n A 2-D `np.ndarray` that is a merged summary. First column is the\n interpolated partition values, the second is the weights (counts).", "source": "github_repos"} -{"code": "def split(self, path):\n return os.path.split(os.path.abspath(path))", "docstring": "Splits the given path into two parts.\n\n Splits the path into a pair (head, tail) such that tail contains the last\n component of the path and head contains everything up to that.\n\nArgs:\n path: path as a string\n\nReturns:\n a pair of path components as strings.", "source": "github_repos"} -{"code": "def delete_recursively_v2(path):\n _pywrap_file_io.DeleteRecursively(compat.path_to_bytes(path))", "docstring": "Deletes everything under path recursively.\n\nArgs:\n path: string, a path\n\nRaises:\n errors.OpError: If the operation fails.", "source": "github_repos"} -{"code": "def eq(left: Any, right: Any) -> bool:\n if left is right:\n return True\n if isinstance(left, list) and isinstance(right, list) or (isinstance(left, tuple) and isinstance(right, tuple)):\n if len(left) != len(right):\n return False\n for x, y in zip(left, right):\n if ne(x, y):\n return False\n return True\n elif isinstance(left, dict):\n if not isinstance(right, dict) or len(left) != len(right) or set(left.keys()) != set(right.keys()):\n return False\n left_items = left.sym_items if isinstance(left, Symbolic) else left.items\n right_item = right.sym_getattr if isinstance(right, Symbolic) else right.__getitem__\n for k, v in left_items():\n if ne(v, right_item(k)):\n return False\n return True\n elif hasattr(left, 'sym_eq') and (not inspect.isclass(left)) and (left.sym_eq.__code__ is not Symbolic.sym_eq.__code__):\n return left.sym_eq(right)\n elif hasattr(right, 'sym_eq') and (not inspect.isclass(right)) and (right.sym_eq.__code__ is not Symbolic.sym_eq.__code__):\n return right.sym_eq(left)\n return pg_typing.callable_eq(left, right)", "docstring": "Compares if two values are equal. Use symbolic equality if possible.\n\n Example::\n\n @pg.members([\n ('x', pg.typing.Any())\n ])\n class A(pg.Object):\n def sym_eq(self, right):\n if super().sym_eq(right):\n return True\n return pg.eq(self.x, right)\n\n class B:\n pass\n\n assert pg.eq(1, 1)\n assert pg.eq(A(1), A(1))\n # This is True since A has override `sym_eq`.\n assert pg.eq(A(1), 1)\n # Objects of B are compared by references.\n assert not pg.eq(A(B()), A(B()))\n\nArgs:\n left: The left-hand value to compare.\n right: The right-hand value to compare.\n\nReturns:\n True if left and right is equal or symbolically equal. Otherwise False.", "source": "github_repos"} -{"code": "def RegisterImplementation(cache_name, map_name, cache):\n global _cache_implementations\n if cache_name not in _cache_implementations:\n logging.info('Registering [%s] cache for [%s].', cache_name, map_name)\n _cache_implementations[cache_name] = {}\n _cache_implementations[cache_name][map_name] = cache", "docstring": "Register a Cache implementation with the CacheFactory.\n\n Child modules are expected to call this method in the file-level scope\n so that the CacheFactory is aware of them.\n\nArgs:\n cache_name: (string) The name of the NSS backend.\n map_name: (string) The name of the map handled by this Cache.\n cache: A class type that is a subclass of Cache.\n\n Returns: Nothing", "source": "github_repos"} -{"code": "def get_resource(self, feature_column, name):\n del feature_column, name\n raise NotImplementedError('StateManager.get_resource')", "docstring": "Returns an already created resource.\n\n Resources can be things such as tables, variables, trackables, etc.\n\nArgs:\n feature_column: A `FeatureColumn` object this variable corresponds to.\n name: Name of the resource.", "source": "github_repos"} -{"code": "def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)", "docstring": "Rescale the image by the given factor. image = image * rescale_factor.\n\nArgs:\n image (`np.ndarray`):\n Image to rescale.\n rescale_factor (`float`):\n The value to use for rescaling.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. If unset, is inferred from the input image. Can be\n one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.", "source": "github_repos"} -{"code": "def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:\n if encoder_attention_mask.dim() == 3:\n encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]\n if encoder_attention_mask.dim() == 2:\n encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]\n encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype)\n encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min\n return encoder_extended_attention_mask", "docstring": "Invert an attention mask (e.g., switches 0. and 1.).\n\nArgs:\n encoder_attention_mask (`torch.Tensor`): An attention mask.\n\nReturns:\n `torch.Tensor`: The inverted attention mask.", "source": "github_repos"} -{"code": "def get_use_xla_spmd(device_type):\n return device_type == 'TPU' and '0' != os.environ.get('DTENSOR_TEST_USE_XLA_SPMD', '0')", "docstring": "Returns True when device_type is TPU and environment variable is set.\n\nArgs:\n device_type: A str representing the type of device on the mesh.\n\nReturns:\n bool: True when device_type is TPU and environment variable is set.", "source": "github_repos"} -{"code": "def conv_output_shape(input_shape, kernel_shape, strides, padding):\n dims = range(len(kernel_shape))\n output_shape = [conv_output_length(input_shape[d], kernel_shape[d], padding, strides[d]) for d in dims]\n output_shape = tuple([0 if input_shape[d] == 0 else output_shape[d] for d in dims])\n return output_shape", "docstring": "Return the output shape of an N-D convolution.\n\n Forces dimensions where input is empty (size 0) to remain empty.\n\nArgs:\n input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the\n input.\n kernel_shape: tuple of size N, spatial shape of the convolutional kernel /\n receptive field.\n strides: tuple of size N, strides along each spatial dimension.\n padding: type of padding, string `\"same\"` or `\"valid\"`.\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n\nReturns:\n tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output.", "source": "github_repos"} -{"code": "def get_layer(self, name=None, index=None):\n if index is not None and name is not None:\n raise ValueError('Provide only a layer name or a layer index.')\n if index is not None:\n if len(self.layers) <= index:\n raise ValueError('Was asked to retrieve layer at index ' + str(index) + ' but model only has ' + str(len(self.layers)) + ' layers.')\n else:\n return self.layers[index]\n if name is not None:\n for layer in self.layers:\n if layer.name == name:\n return layer\n raise ValueError('No such layer: ' + name + '.')\n raise ValueError('Provide either a layer name or layer index.')", "docstring": "Retrieves a layer based on either its name (unique) or index.\n\n If `name` and `index` are both provided, `index` will take precedence.\n Indices are based on order of horizontal graph traversal (bottom-up).\n\nArgs:\n name: String, name of layer.\n index: Integer, index of layer.\n\nReturns:\n A layer instance.\n\nRaises:\n ValueError: In case of invalid layer name or index.", "source": "github_repos"} -{"code": "def __init__(self, config: FastSpeech2ConformerConfig, num_layers=2, num_chans=384, kernel_size=3, dropout_rate=0.5):\n super().__init__()\n self.conv_layers = nn.ModuleList()\n for idx in range(num_layers):\n input_channels = config.hidden_size if idx == 0 else num_chans\n layer = FastSpeech2ConformerPredictorLayer(input_channels, num_chans, kernel_size, dropout_rate)\n self.conv_layers.append(layer)\n self.linear = nn.Linear(num_chans, 1)", "docstring": "Initialize variance predictor module.\n\nArgs:\n input_dim (`int`): Input dimension.\n num_layers (`int`, *optional*, defaults to 2): Number of convolutional layers.\n num_chans (`int`, *optional*, defaults to 384): Number of channels of convolutional layers.\n kernel_size (`int`, *optional*, defaults to 3): Kernel size of convolutional layers.\n dropout_rate (`float`, *optional*, defaults to 0.5): Dropout rate.", "source": "github_repos"} -{"code": "def _get_saver_def_or_none(exported_model: exported_model_pb2.ExportedModel) -> Optional[saver_pb2.SaverDef]:\n if exported_model.HasField('saver_def'):\n return exported_model.saver_def\n return None", "docstring": "Returns the SaverDef from ExportedModel, None otherwise.\n\nArgs:\n exported_model: ExportedModel to take the SaverDef from.\n\nReturns:\n SaverDef instance if the field `saver_def` is set. None otherwise.", "source": "github_repos"} -{"code": "def get_attr(self, name):\n fields = ('s', 'i', 'f', 'b', 'type', 'shape', 'tensor', 'func')\n try:\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n except errors.InvalidArgumentError as e:\n raise ValueError(e.message)\n x = attr_value_pb2.AttrValue()\n x.ParseFromString(data)\n oneof_value = x.WhichOneof('value')\n if oneof_value is None:\n return []\n if oneof_value == 'list':\n for f in fields:\n if getattr(x.list, f):\n if f == 'type':\n return [dtypes.as_dtype(t) for t in x.list.type]\n else:\n return list(getattr(x.list, f))\n return []\n if oneof_value == 'type':\n return dtypes.as_dtype(x.type)\n assert oneof_value in fields, 'Unsupported field type in ' + str(x)\n return getattr(x, oneof_value)", "docstring": "Returns the value of the attr of this op with the given `name`.\n\nArgs:\n name: The name of the attr to fetch.\n\nReturns:\n The value of the attr, as a Python object.\n\nRaises:\n ValueError: If this op does not have an attr with the given `name`.", "source": "github_repos"} -{"code": "def _extract_type_spec_recursively(value):\n if isinstance(value, composite_tensor.CompositeTensor):\n return value._type_spec\n if isinstance(value, variables.Variable):\n return resource_variable_ops.VariableSpec(value.shape, dtype=value.dtype, trainable=value.trainable)\n if tensor_util.is_tensor(value):\n return tensor_spec.TensorSpec(value.shape, value.dtype)\n if isinstance(value, list):\n return list((_extract_type_spec_recursively(v) for v in value))\n if isinstance(value, data_structures.TrackableDataStructure):\n return _extract_type_spec_recursively(value.__wrapped__)\n if isinstance(value, tuple):\n return type(value)((_extract_type_spec_recursively(x) for x in value))\n if isinstance(value, dict):\n return type(value)(((k, _extract_type_spec_recursively(v)) for k, v in value.items()))\n return value", "docstring": "Return (collection of) `TypeSpec`(s) for `value` if it includes `Tensor`s.\n\n If `value` is a `Tensor` or `CompositeTensor`, return its `TypeSpec`. If\n `value` is a collection containing `Tensor` values, recursively supplant them\n with their respective `TypeSpec`s in a collection of parallel stucture.\n\n If `value` is none of the above, return it unchanged.\n\nArgs:\n value: a Python `object` to (possibly) turn into a (collection of)\n `tf.TypeSpec`(s).\n\nReturns:\n spec: the `TypeSpec` or collection of `TypeSpec`s corresponding to `value`\n or `value`, if no `Tensor`s are found.", "source": "github_repos"} -{"code": "def concatenate(xs, axis=0):\n if any_symbolic_tensors(xs):\n return Concatenate(axis=axis).symbolic_call(xs)\n return backend.numpy.concatenate(xs, axis=axis)", "docstring": "Join a sequence of tensors along an existing axis.\n\nArgs:\n xs: The sequence of tensors to concatenate.\n axis: The axis along which the tensors will be joined. Defaults to `0`.\n\nReturns:\n The concatenated tensor.", "source": "github_repos"} -{"code": "def get_registered_kernels_for_op(name):\n buf = c_api.TF_GetRegisteredKernelsForOp(name)\n data = c_api.TF_GetBuffer(buf)\n kernel_list = kernel_def_pb2.KernelList()\n kernel_list.ParseFromString(compat.as_bytes(data))\n return kernel_list", "docstring": "Returns a KernelList proto of registered kernels for a given op.\n\nArgs:\n name: A string representing the name of the op whose kernels to retrieve.", "source": "github_repos"} -{"code": "def _determine_and_instrument_traced_tensors(self, graph_order, ops_in_exec_path, tensor_trace_points, report_handler):\n traced_tensors = []\n checkpoint_operations = set([tensor.op for tensor, _ in tensor_trace_points])\n for op_id, op in enumerate(graph_order.operations):\n if checkpoint_operations and op not in checkpoint_operations:\n continue\n if self._skip_op(op_id, op, ops_in_exec_path, report_handler):\n continue\n for i in range(len(op.outputs)):\n out_tensor = op.outputs[i]\n if not self._skip_tensor(op_id, out_tensor, report_handler):\n traced_tensors.append(out_tensor)\n return traced_tensors", "docstring": "Determines the tensors to trace and instruments the trace details.\n\nArgs:\n graph_order: graph_order tuple containing graph (tf.graph), operations\n (list of operations), op_to_idx (op id mapping), (tensors) list of\n tensors, tensor_to_idx (tensor id mapping), contains_cycle (whether\n there is a cycle in the graph), topological_order_or_cycle (list of ops\n in topological order or list of ops creating a cycle).\n ops_in_exec_path: Set of ops in the execution path.\n tensor_trace_points: Collection of programatic tensor trace points.\n report_handler: An instance of tensor_tracer_report.TTReportHandle.\n\nReturns:\n List of tensors to be traced.", "source": "github_repos"} -{"code": "def _get_profile_data_generator(self):\n node_to_file_path = {}\n node_to_line_number = {}\n node_to_func_name = {}\n node_to_op_type = {}\n for op in self._graph.get_operations():\n for trace_entry in reversed(op.traceback):\n file_path = trace_entry[0]\n line_num = trace_entry[1]\n func_name = trace_entry[2]\n if not source_utils.guess_is_tensorflow_py_library(file_path):\n break\n node_to_file_path[op.name] = file_path\n node_to_line_number[op.name] = line_num\n node_to_func_name[op.name] = func_name\n node_to_op_type[op.name] = op.type\n\n def profile_data_generator(device_step_stats):\n for node_stats in device_step_stats.node_stats:\n if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':\n continue\n yield profiling.ProfileDatum(device_step_stats.device, node_stats, node_to_file_path.get(node_stats.node_name, ''), node_to_line_number.get(node_stats.node_name, 0), node_to_func_name.get(node_stats.node_name, ''), node_to_op_type.get(node_stats.node_name, ''))\n return profile_data_generator", "docstring": "Get function that generates `ProfileDatum` objects.\n\nReturns:\n A function that generates `ProfileDatum` objects.", "source": "github_repos"} -{"code": "def __init__(self, dump_root, tfdbg_run_id, circular_buffer_size=DEFAULT_CIRCULAR_BUFFER_SIZE):\n if not dump_root:\n raise ValueError('Empty or None dump root')\n self._dump_root = dump_root\n self._tfdbg_run_id = tfdbg_run_id\n _pywrap_debug_events_writer.Init(self._dump_root, self._tfdbg_run_id, circular_buffer_size)", "docstring": "Construct a DebugEventsWriter object.\n\n NOTE: Given the same `dump_root`, all objects from this constructor\n will point to the same underlying set of writers. In other words, they\n will write to the same set of debug events files in the `dump_root`\n folder.\n\nArgs:\n dump_root: The root directory for dumping debug data. If `dump_root` does\n not exist as a directory, it will be created.\n tfdbg_run_id: Debugger Run ID.\n circular_buffer_size: Size of the circular buffer for each of the two\n execution-related debug events files: with the following suffixes: -\n .execution - .graph_execution_traces If <= 0, the circular-buffer\n behavior will be abolished in the constructed object.", "source": "github_repos"} -{"code": "def __init__(self, model: PreTrainedModel):\n super().__init__()\n if model.generation_config is None:\n raise AssertionError('The model must have a generation config to be exported with static caching. Please set `generation_config`.')\n if not model.generation_config.use_cache:\n raise AssertionError('The model must have caching enabled to be exported with static caching. Please set `generation_config.use_cache=True`.')\n if model.generation_config.cache_implementation != 'static':\n raise AssertionError(\"The model must use a 'static' caching implementation to be exported with static caching. Please set `generation_config.cache_implementation='static'`.\")\n self.model = model\n self.static_cache = StaticCache(config=self.model.config, max_batch_size=self.model.generation_config.cache_config.batch_size, max_cache_len=self.model.generation_config.cache_config.max_cache_len, device=self.model.generation_config.cache_config.device, dtype=self.model.dtype)\n for i in range(len(self.static_cache.key_cache)):\n self.register_buffer(f'key_cache_{i}', self.static_cache.key_cache[i], persistent=False)\n self.register_buffer(f'value_cache_{i}', self.static_cache.value_cache[i], persistent=False)", "docstring": "Initializes the wrapper module with the pretrained model.\n\nArgs:\n model (`PreTrainedModel`): The pretrained model to wrap. The model must have caching\n enabled and use a 'static' caching implementation.\n\nRaises:\n AssertionError: If the pretrained model does not have caching enabled or if it does\n not use a 'static' caching implementation in `model.generation_config`.", "source": "github_repos"} -{"code": "def strides(self) -> List[int]:\n return _compute_mesh_strides(self.shape())", "docstring": "Returns the strides tensor array for this mesh.\n\n If the mesh shape is `[a, b, c, d]`, then the strides array can be computed\n as `[b*c*d, c*d, d, 1]`. This array can be useful in computing local device\n offsets given a device ID. Using the same example, the device coordinates of\n the mesh can be computed as:\n\n ```\n [(device_id / (b*c*d)) % a,\n (device_id / (c*d)) % b,\n (device_id / (d)) % c,\n (device_id) % d]\n ```\n\n This is the same as `(device_id // mesh.strides) % mesh.shape`.\n\nReturns:\n The mesh strides as an integer tensor.", "source": "github_repos"} -{"code": "def forward(**super_kwargs):\n super().forward(**super_kwargs)", "docstring": "Example:\n\n ```python\n >>> import torch\n >>> from transformers import AutoProcessor, AutoModelForImageTextToText\n\n >>> torch_device = \"cuda\"\n >>> processor = AutoProcessor.from_pretrained(\"OpenGVLab/InternVL3-1B-hf\")\n >>> model = AutoModelForImageTextToText.from_pretrained(\n ... \"OpenGVLab/InternVL3-1B-hf\", torch_dtype=torch.bfloat16, device_map=torch_device\n ... )\n\n >>> messages = [\n ... {\n ... \"role\": \"user\",\n ... \"content\": [\n ... {\n ... \"type\": \"image\",\n ... \"url\": \"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg\",\n ... },\n ... {\n ... \"type\": \"image\",\n ... \"url\": \"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg\",\n ... },\n ... {\"type\": \"text\", \"text\": \"These images depict two different landmarks. Can you identify them?\"},\n ... ],\n ... },\n ... ]\n\n >>> inputs = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=\"pt\").to(torch_device)\n >>> generate_ids = model.generate(**inputs, max_new_tokens=200)\n >>> print(processor.decode(generate_ids[0, inputs[\"input_ids\"].shape[1] :], skip_special_tokens=True))\n The images depict the Statue of Liberty and the Golden Gate Bridge.\n ```", "source": "github_repos"} -{"code": "def sanity_check_tensor_sync(tensor: torch.Tensor, mesh: DeviceMesh, rtol: float=0.0001, atol: float=0.0001, not_sync: bool=False) -> None:\n if not dist.is_initialized() or mesh.size() == 1:\n return\n pg = mesh.get_group()\n if hasattr(tensor, 'to_local'):\n local_tensor = tensor.to_local()\n else:\n local_tensor = tensor\n world_size = dist.get_world_size(pg)\n gathered_tensors = [torch.empty_like(local_tensor) for _ in range(world_size)]\n dist.all_gather(gathered_tensors, local_tensor, group=pg)\n for i in range(1, world_size):\n try:\n torch.testing.assert_close(gathered_tensors[0], gathered_tensors[i], rtol=rtol, atol=atol)\n except AssertionError as e:\n if not_sync:\n continue\n raise e", "docstring": "Verify that a tensor is synchronized (or not synchronized) across all processes in the mesh's process group.\n Handles both regular tensors and DTensors.\n\nArgs:\n tensor (torch.Tensor): The tensor to check for synchronization (can be DTensor)\n mesh (DeviceMesh): The device mesh containing the process group\n rtol (float): Relative tolerance for comparison\n atol (float): Absolute tolerance for comparison\n not_sync (bool): If True, asserts that tensors are NOT synchronized. If False, asserts they are synchronized.", "source": "github_repos"} -{"code": "def dense_labels_to_sparse(dense, length):\n flat_values = array_ops.reshape(dense, [-1])\n flat_indices = math_ops.range(array_ops.shape(flat_values, out_type=dtypes.int64)[0])\n mask = array_ops.sequence_mask(length, maxlen=array_ops.shape(dense)[1])\n flat_mask = array_ops.reshape(mask, [-1])\n indices = array_ops.expand_dims(array_ops.boolean_mask(flat_indices, flat_mask), 1)\n values = array_ops.boolean_mask(flat_values, flat_mask)\n sparse = sparse_tensor.SparseTensor(indices=indices, values=math_ops.cast(values, dtypes.int32), dense_shape=array_ops.shape(flat_values, out_type=dtypes.int64))\n reshaped = sparse_ops.sparse_reshape(sparse, array_ops.shape(dense))\n max_length = math_ops.reduce_max(length)\n return sparse_tensor.SparseTensor(indices=reshaped.indices, values=reshaped.values, dense_shape=[math_ops.cast(reshaped.dense_shape[0], dtypes.int64), math_ops.cast(max_length, dtypes.int64)])", "docstring": "Convert dense labels with sequence lengths to sparse tensor.\n\nArgs:\n dense: tensor of shape [batch, max_length]\n length: int tensor of shape [batch] The length of each sequence in dense.\n\nReturns:\n tf.sparse.SparseTensor with values only for the valid elements of sequences.", "source": "github_repos"} -{"code": "def dtype(x):\n return backend.standardize_dtype(x.dtype)", "docstring": "Return the dtype of the tensor input as a standardized string.\n\n Note that due to the standardization, the dtype will not compare equal\n to the backend-specific version of the dtype.\n\nArgs:\n x: A tensor. This function will try to access the `dtype` attribute of\n the input tensor.\n\nReturns:\n A string indicating the dtype of the input tensor, e.g. `\"float32\"`.\n\nExample:\n >>> x = keras.ops.zeros((8, 12))\n >>> keras.ops.dtype(x)\n 'float32'", "source": "github_repos"} -{"code": "def __init__(self, max_length: int, eos_token_id: int):\n self.max_length = max_length\n if eos_token_id < 0:\n raise ValueError(f'The forced eos token id must be a non-negative integer, got {eos_token_id}')\n self.eos_token_id = eos_token_id", "docstring": "[`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.\n\nArgs:\n max_length (`int`):\n The maximum length of the sequence to be generated.\n eos_token_id (`int`):\n The id of the token to force as the last generated token when `max_length` is reached.", "source": "github_repos"} -{"code": "def _MaybeCaptured(t):\n if not isinstance(t, ops.EagerTensor) and _IsFunction(t.op.graph) and (t.op.type == 'Placeholder'):\n for input_t, placeholder_t in _Captures(t.op.graph):\n if t is placeholder_t:\n return _MaybeCaptured(input_t)\n return t", "docstring": "If t is a captured value placeholder, returns the original captured value.\n\nArgs:\n t: Tensor\n\nReturns:\n A tensor, potentially from a different Graph/FuncGraph.", "source": "github_repos"} -{"code": "def last_updated(self, path):\n return self._gcsIO().last_updated(path)", "docstring": "Get UNIX Epoch time in seconds on the FileSystem.\n\nArgs:\n path: string path of file.\n\n Returns: float UNIX Epoch time\n\nRaises:\n ``BeamIOError``: if path doesn't exist.", "source": "github_repos"} -{"code": "def cos(x):\n if any_symbolic_tensors((x,)):\n return Cos().symbolic_call(x)\n return backend.numpy.cos(x)", "docstring": "Cosine, element-wise.\n\nArgs:\n x: Input tensor.\n\nReturns:\n The corresponding cosine values.", "source": "github_repos"} -{"code": "def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False):\n if isinstance(ts, ops.Graph):\n if allow_graph:\n return get_tensors(ts)\n else:\n raise TypeError('allow_graph is False: cannot convert a tf.Graph.')\n else:\n if not is_iterable(ts):\n ts = [ts]\n if not ts:\n return []\n if check_graph:\n check_types = None if ignore_ops else tensor_lib.Tensor\n get_unique_graph(ts, check_types=check_types)\n return [t for t in ts if isinstance(t, tensor_lib.Tensor)]", "docstring": "Convert ts to a list of `tf.Tensor`.\n\nArgs:\n ts: can be an iterable of `tf.Tensor`, a `tf.Graph` or a single tensor.\n check_graph: if `True` check if all the tensors belong to the same graph.\n allow_graph: if `False` a `tf.Graph` cannot be converted.\n ignore_ops: if `True`, silently ignore `tf.Operation`.\n\nReturns:\n A newly created list of `tf.Tensor`.\n\nRaises:\n TypeError: if `ts` cannot be converted to a list of `tf.Tensor` or,\n if `check_graph` is `True`, if all the ops do not belong to the same graph.", "source": "github_repos"} -{"code": "def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):\n if not context.executing_eagerly() and ops.get_default_graph()._get_control_flow_context() is not None:\n with ops.init_scope():\n self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name='keys')\n self._values = ops.convert_to_tensor(values, dtype=value_dtype, name='values')\n else:\n self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name='keys')\n self._values = ops.convert_to_tensor(values, dtype=value_dtype, name='values')\n self._name = name if name is not None else 'key_value_init'\n if context.executing_eagerly():\n self._name += str(ops.uid())\n super(KeyValueTensorInitializer, self).__init__(self._keys.dtype, self._values.dtype)", "docstring": "Constructs a table initializer object based on keys and values tensors.\n\nArgs:\n keys: The tensor for the keys.\n values: The tensor for the values.\n key_dtype: The `keys` data type. Used when `keys` is a python array.\n value_dtype: The `values` data type. Used when `values` is a python array.\n name: A name for the operation (optional).", "source": "github_repos"} -{"code": "def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:\n super().__init__()\n self.pool_scales = pool_scales\n self.align_corners = align_corners\n self.in_channels = in_channels\n self.channels = channels\n self.blocks = []\n for i, pool_scale in enumerate(pool_scales):\n block = UperNetPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)\n self.blocks.append(block)\n self.add_module(str(i), block)", "docstring": "Pyramid Pooling Module (PPM) used in PSPNet.\n\nArgs:\n pool_scales (`Tuple[int]`):\n Pooling scales used in Pooling Pyramid Module.\n in_channels (`int`):\n Input channels.\n channels (`int`):\n Channels after modules, before conv_seg.\n align_corners (`bool`):\n align_corners argument of F.interpolate.", "source": "github_repos"} -{"code": "def __init__(self, name=None):\n self._name = name\n self._items = []", "docstring": "Menu constructor.\n\nArgs:\n name: (str or None) name of this menu.", "source": "github_repos"} -{"code": "def __sub__(self, period_tensor):\n return self + periods.PeriodTensor(-period_tensor.quantity(), period_tensor.period_type())", "docstring": "Subtracts a tensor of periods.\n\n When subtracting months or years, the resulting day of the month is\n decreased to the largest valid value if necessary. E.g. 31.03.2020 - 1 month\n = 29.02.2020, 29.02.2020 - 1 year = 28.02.2019.\n\nArgs:\n period_tensor: a PeriodTensor object broadcastable to the shape of \"self\".\n\nReturns:\n The new instance of DateTensor.", "source": "github_repos"} -{"code": "def _get_target_dtype(self, from_dtype: Optional[_NpDType]) -> Optional[_NpDType]:", "docstring": "Validate and normalize the numpy dtype.\n\nArgs:\n from_dtype: DType of the array to cast\n\nReturns:\n to_dtype: DType of the array after casting", "source": "github_repos"} -{"code": "def try_listify_dict_with_int_keys(src: Dict[Any, Any], convert_when_sparse: bool=False) -> Tuple[Union[List[Any], Dict[Any, Any]], bool]:\n if not src:\n return (src, False)\n min_key = None\n max_key = None\n for key in src.keys():\n if not isinstance(key, int):\n return (src, False)\n if min_key is None or min_key > key:\n min_key = key\n if max_key is None or max_key < key:\n max_key = key\n if convert_when_sparse or (min_key == 0 and max_key == len(src) - 1):\n return ([src[key] for key in sorted(src.keys())], True)\n return (src, False)", "docstring": "Try to convert a dictionary with consequentive integer keys to a list.\n\nArgs:\n src: A dict whose keys may be int type and their range form a perfect\n range(0, N) list unless convert_when_sparse is set to True.\n convert_when_sparse: When src is a int-key dict, force convert\n it to a list ordered by key, even it's sparse.\n\nReturns:\n converted list or src unchanged.", "source": "github_repos"} -{"code": "def transform_feature(self, transformation_cache, state_manager):\n input_tensor = transformation_cache.get(self.key, state_manager)\n if self.normalizer_fn is not None:\n input_tensor = self.normalizer_fn(input_tensor)\n return input_tensor", "docstring": "See `FeatureColumn` base class.\n\n In this case, we apply the `normalizer_fn` to the input tensor.\n\nArgs:\n transformation_cache: A `FeatureTransformationCache` object to access\n features.\n state_manager: A `StateManager` to create / access resources such as\n lookup tables.\n\nReturns:\n Normalized input tensor.", "source": "github_repos"} -{"code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n self._validate_kwargs(kwargs, support_partition=False)\n dtype = _assert_float_dtype(dtype)\n if len(shape) < 2:\n raise ValueError(f'The tensor to initialize, specified by argument `shape` must be at least two-dimensional. Received shape={shape}')\n num_rows = 1\n for dim in shape[:-1]:\n num_rows *= dim\n num_cols = shape[-1]\n flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))\n a = self._random_generator.random_normal(flat_shape, dtype=dtype)\n q, r = gen_linalg_ops.qr(a, full_matrices=False)\n d = array_ops.diag_part(r)\n q *= math_ops.sign(d)\n if num_rows < num_cols:\n q = array_ops.matrix_transpose(q)\n return self.gain * array_ops.reshape(q, shape)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported.\n **kwargs: Additional keyword arguments.\n\nRaises:\n ValueError: If the dtype is not floating point or the input shape is not\n valid.", "source": "github_repos"} -{"code": "def is_null_merge(self):\n return not bool(self._spec.to_string())", "docstring": "Indicate whether the wrapped spec is empty.\n\n In the degenerate case where self._spec is an empty specification, a caller\n may wish to skip a merge step entirely. (However this class does not have\n enough information to make that determination.)\n\nReturns:\n A boolean indicating whether a device merge will be trivial.", "source": "github_repos"} -{"code": "def __init__(self, name='mean_tensor', dtype=None, shape=None):\n super(MeanTensor, self).__init__(name=name, dtype=dtype)\n self._shape = None\n self._total = None\n self._count = None\n self._built = False\n if shape is not None:\n self._build(shape)", "docstring": "Computes the element-wise (weighted) mean of the given tensors.\n\n `MeanTensor` returns a tensor with the same shape of the input tensors. The\n mean value is updated by keeping local variables `total` and `count`. The\n `total` tracks the sum of the weighted values, and `count` stores the sum of\n the weighted counts.\n\nArgs:\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n shape: (Optional) A list of integers, a tuple of integers, or a 1-D Tensor\n of type int32. If not specified, the shape is inferred from the values at\n the first call of update_state.\n\n Standalone usage:\n\n >>> m = tf.keras.metrics.MeanTensor()\n >>> m.update_state([0, 1, 2, 3])\n >>> m.update_state([4, 5, 6, 7])\n >>> m.result().numpy()\n array([2., 3., 4., 5.], dtype=float32)\n\n >>> m.update_state([12, 10, 8, 6], sample_weight= [0, 0.2, 0.5, 1])\n >>> m.result().numpy()\n array([2. , 3.6363635, 4.8 , 5.3333335], dtype=float32)\n\n >>> m = tf.keras.metrics.MeanTensor(dtype=tf.float64, shape=(1, 4))\n >>> m.result().numpy()\n array([[0., 0., 0., 0.]])\n >>> m.update_state([[0, 1, 2, 3]])\n >>> m.update_state([[4, 5, 6, 7]])\n >>> m.result().numpy()\n array([[2., 3., 4., 5.]])", "source": "github_repos"} -{"code": "def flatten_with_joined_string_paths(structure, separator='/', expand_composites=False):\n flat_paths = yield_flat_paths(structure, expand_composites=expand_composites)\n\n def stringify_and_join(path_elements):\n return separator.join((str(path_element) for path_element in path_elements))\n flat_string_paths = (stringify_and_join(path) for path in flat_paths)\n return list(zip(flat_string_paths, flatten(structure, expand_composites=expand_composites)))", "docstring": "Returns a list of (string path, atom) tuples.\n\n The order of tuples produced matches that of `nest.flatten`. This allows you\n to flatten a nested structure while keeping information about where in the\n structure each atom was located. See `nest.yield_flat_paths`\n for more information.\n\nArgs:\n structure: the nested structure to flatten.\n separator: string to separate levels of hierarchy in the results, defaults\n to '/'.\n expand_composites: If true, then composite tensors such as\n `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their\n component tensors.\n\nReturns:\n A list of (string, atom) tuples.", "source": "github_repos"} -{"code": "def where(condition, x1=None, x2=None):\n if x1 is None and x2 is not None or (x1 is not None and x2 is None):\n raise ValueError('`x1` and `x2` either both should be `None` or both should have non-None value.')\n if any_symbolic_tensors((condition, x1, x2)):\n return Where().symbolic_call(condition, x1, x2)\n return backend.numpy.where(condition, x1, x2)", "docstring": "Return elements chosen from `x1` or `x2` depending on `condition`.\n\nArgs:\n condition: Where `True`, yield `x1`, otherwise yield `x2`.\n x1: Values from which to choose when `condition` is `True`.\n x2: Values from which to choose when `condition` is `False`.\n\nReturns:\n A tensor with elements from `x1` where `condition` is `True`, and\n elements from `x2` where `condition` is `False`.", "source": "github_repos"} -{"code": "def _measure_tensor_list_column_widths(self, data):\n max_timestamp_width = 0\n if data:\n max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0\n max_timestamp_width = len('[%.3f] ' % max_rel_time_ms) + 1\n max_timestamp_width = max(max_timestamp_width, len(self._TIMESTAMP_COLUMN_HEAD) + 1)\n max_dump_size_width = 0\n for dump in data:\n dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)\n if len(dump_size_str) + 1 > max_dump_size_width:\n max_dump_size_width = len(dump_size_str) + 1\n max_dump_size_width = max(max_dump_size_width, len(self._DUMP_SIZE_COLUMN_HEAD) + 1)\n max_op_type_width = 0\n for dump in data:\n op_type = self._debug_dump.node_op_type(dump.node_name)\n if len(op_type) + 1 > max_op_type_width:\n max_op_type_width = len(op_type) + 1\n max_op_type_width = max(max_op_type_width, len(self._OP_TYPE_COLUMN_HEAD) + 1)\n return (max_timestamp_width, max_dump_size_width, max_op_type_width)", "docstring": "Determine the maximum widths of the timestamp and op-type column.\n\n This method assumes that data is sorted in the default order, i.e.,\n by ascending timestamps.\n\nArgs:\n data: (list of DebugTensorDaum) the data based on which the maximum\n column widths will be determined.\n\nReturns:\n (int) maximum width of the timestamp column. 0 if data is empty.\n (int) maximum width of the dump size column. 0 if data is empty.\n (int) maximum width of the op type column. 0 if data is empty.", "source": "github_repos"} -{"code": "def broadcast(self, tensor, destinations):\n validate_destinations(destinations)\n return self.broadcast_implementation(tensor, destinations)", "docstring": "Broadcast `tensor` to `destinations`.\n\n This can only be called in the cross-replica context.\n\nArgs:\n tensor: a `tf.Tensor` like object. The value to broadcast.\n destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a\n `tf.Tensor` alike object, or a device string. It specifies the devices\n to broadcast to. Note that if it's a `tf.Variable`, the value is\n broadcasted to the devices of that variable, this method doesn't update\n the variable.\n\nReturns:\n A `tf.Tensor` or `tf.distribute.DistributedValues`.", "source": "github_repos"} -{"code": "def __init__(self, losses):\n super().__init__()\n self.loss_map = {'iou': self.loss_iou, 'distance': self.loss_distance, 'duration': self.loss_duration}\n for loss in losses:\n if loss not in self.loss_map:\n raise ValueError(f'Loss {loss} not supported')\n self.losses = losses", "docstring": "This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute\n hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched\n ground-truth / prediction (supervise class and box).\n\nArgs:\n losses (`List[str]`):\n List of all the losses to be applied.", "source": "github_repos"} -{"code": "def fn(x: int) -> int:\n return x", "docstring": "Test function\n\nArgs:\n x: The input", "source": "github_repos"} -{"code": "def compute_optimal_blocks(device: torch.device, config: PretrainedConfig, generation_config: GenerationConfig, inputs: List[List[int]], dtype: torch.dtype=torch.bfloat16, safety_margin: float=0.9, median_prefill_length: Optional[int]=None):\n head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)\n num_kv_heads = getattr(config, 'num_key_value_heads', config.num_attention_heads)\n num_hidden_layers = getattr(config, 'num_hidden_layers', 40)\n if device.type == 'cuda':\n device_properties = torch.cuda.get_device_properties(device)\n total_memory = device_properties.total_memory\n allocated_memory = torch.cuda.memory_allocated(device)\n reserved_memory = torch.cuda.memory_reserved(device)\n available_memory = total_memory - max(allocated_memory, reserved_memory)\n elif device.type == 'mps':\n logger.warning('MPS memory estimation is approximate. Using conservative defaults.')\n return (2048, 256)\n else:\n logger.warning(f'Unsupported device type {device.type} for optimal block calculation. Using defaults.')\n return (32, 128)\n available_memory = int(available_memory * safety_margin)\n if available_memory <= 0:\n logger.warning('Not enough available memory. Using minimum configuration.')\n return (8, 128)\n dtype_size = torch.tensor([], dtype=dtype).element_size()\n memory_per_token = 2 * num_kv_heads * head_dim * dtype_size * num_hidden_layers\n tokens_to_generate = getattr(generation_config, 'max_new_tokens', 20)\n if median_prefill_length is None and inputs:\n non_empty_inputs = [len(seq) for seq in inputs if seq]\n median_prefill_length = int(statistics.median(non_empty_inputs)) if non_empty_inputs else 64\n elif median_prefill_length is None:\n median_prefill_length = 64\n seq_length = median_prefill_length + tokens_to_generate\n MIN_BLOCK_SIZE = 16\n per_sequence_memory = seq_length * memory_per_token\n max_concurrent_sequences = max(1, int(available_memory // per_sequence_memory))\n total_tokens = available_memory // memory_per_token\n initial_block_size = max(MIN_BLOCK_SIZE, total_tokens // (max_concurrent_sequences * 2))\n block_size = 1 << (initial_block_size - 1).bit_length()\n num_blocks = max(1, total_tokens // block_size)\n logger.info(f'Optimal cache: {num_blocks} blocks of size {block_size} (can handle ~{num_blocks * block_size // seq_length} sequences of length {seq_length})')\n return (int(num_blocks), int(block_size))", "docstring": "Calculate optimal number and size of blocks for the KV cache.\n\nArgs:\n device: The device where the model runs\n config: The model configuration\n generation_config: The generation configuration\n inputs: Sample input sequences to estimate memory requirements\n dtype: Data type for cache tensors\n safety_margin: Fraction of available memory to use\n median_prefill_length: Override for median prefill length calculation\n\nReturns:\n Tuple of (num_blocks, block_size)", "source": "github_repos"} -{"code": "def price(self, valuation_date, market, model=None, pricing_context=None, name=None):\n del model, pricing_context\n name = name or self._name + '_price'\n with tf.name_scope(name):\n discount_curve = market.discount_curve\n reference_curve = market.reference_curve\n libor_rate = rc.get_rate_index(market, self._start_date, rc.RateIndexType.LIBOR, dtype=self._dtype)\n libor_rate = tf.repeat(tf.convert_to_tensor(libor_rate, dtype=self._dtype), self._num_cashflows)\n discount_factors = discount_curve.get_discount_factor(self._payment_dates)\n forward_rates = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fractions)\n forward_rates = tf.where(self._daycount_fractions > 0.0, forward_rates, tf.zeros_like(forward_rates))\n forward_rates = tf.where(self._coupon_end_dates < valuation_date, tf.constant(0.0, dtype=self._dtype), tf.where(self._coupon_start_dates < valuation_date, libor_rate, forward_rates))\n coupon_rate = self._coupon_multiplier * (forward_rates + self._coupon_basis)\n cashflow_pvs = self._notional * (self._daycount_fractions * coupon_rate * discount_factors)\n return tf.math.reduce_sum(tf.reshape(cashflow_pvs, (self._batch_size, self._num_cashflows)), axis=1)", "docstring": "Returns the present value of the stream on the valuation date.\n\nArgs:\n valuation_date: A scalar `DateTensor` specifying the date on which\n valuation is being desired.\n market: A namedtuple of type `InterestRateMarket` which contains the\n necessary information for pricing the cashflow stream.\n model: Reserved for future use.\n pricing_context: Additional context relevant for pricing.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'price'.\n\nReturns:\n A Rank 1 `Tensor` of real type containing the modeled price of each stream\n contract based on the input market data.", "source": "github_repos"} -{"code": "def __call__(self, shape, dtype=None):\n dtype = standardize_dtype(dtype)\n return ops.zeros(shape, dtype=dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes\n are supported. If not specified, `keras.backend.floatx()`\n is used, which default to `float32` unless you configured it\n otherwise (via `keras.backend.set_floatx(float_dtype)`).", "source": "github_repos"} -{"code": "def assign(self, value, use_locking=None, name=None, read_value=True):\n with _handle_graph(self.handle):\n value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)\n if not self._shape.is_compatible_with(value_tensor.shape):\n if self.name is None:\n tensor_name = ''\n else:\n tensor_name = ' ' + str(self.name)\n raise ValueError(f\"Cannot assign value to variable '{tensor_name}': Shape mismatch.The variable shape {self._shape}, and the assigned value shape {value_tensor.shape} are incompatible.\")\n kwargs = {}\n if forward_compat.forward_compatible(2022, 3, 23):\n validate_shape = self._validate_shape and self._shape.is_fully_defined()\n kwargs['validate_shape'] = validate_shape\n assign_op = gen_resource_variable_ops.assign_variable_op(self.handle, value_tensor, name=name, **kwargs)\n if read_value:\n return self._lazy_read(assign_op)\n return assign_op", "docstring": "Assigns a new value to this variable.\n\nArgs:\n value: A `Tensor`. The new value for this variable.\n use_locking: If `True`, use locking during the assignment.\n name: The name to use for the assignment.\n read_value: A `bool`. Whether to read and return the new value of the\n variable or not.\n\nReturns:\n If `read_value` is `True`, this method will return the new value of the\n variable after the assignment has completed. Otherwise, when in graph mode\n it will return the `Operation` that does the assignment, and when in eager\n mode it will return `None`.", "source": "github_repos"} -{"code": "def GetFile(self, map_name, dst_file, current_file, location=None):\n if map_name == config.MAP_PASSWORD:\n return self.GetPasswdFile(dst_file, current_file)\n elif map_name == config.MAP_GROUP:\n return self.GetGroupFile(dst_file, current_file)\n elif map_name == config.MAP_SHADOW:\n return self.GetShadowFile(dst_file, current_file)\n elif map_name == config.MAP_NETGROUP:\n return self.GetNetgroupFile(dst_file, current_file)\n elif map_name == config.MAP_AUTOMOUNT:\n return self.GetAutomountFile(dst_file, current_file, location=location)\n raise error.UnsupportedMap('Source can not fetch %s' % map_name)", "docstring": "Retrieve a file from this source.\n\nArgs:\n map_name: A string representation of the map whose file you want\n dst_file: Temporary filename to write to.\n current_file: Path to the current cache.\n location: optional field used by automounts to indicate a specific map\n\nReturns:\n path to new file\n\nRaises:\n UnsupportedMap: for unknown source maps", "source": "github_repos"} -{"code": "def lookup(self, keys, name=None):\n if keys.dtype.base_dtype != self._key_dtype:\n raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')\n values = keys\n if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)):\n values = keys.values\n if self._table and self._table.key_dtype.base_dtype == dtypes.int64:\n values = math_ops.cast(values, dtypes.int64)\n if self._num_oov_buckets == 0:\n ids = self._table.lookup(values, name=name)\n else:\n with ops.name_scope(name, '%s_Lookup' % self.name):\n str_to_hash_bucket = self._get_string_to_hash_bucket_fn(self._hasher_spec)\n buckets = str_to_hash_bucket(_as_string(values), num_buckets=self._num_oov_buckets, name='hash_bucket')\n if self._table:\n ids = self._table.lookup(values)\n buckets = math_ops.add(buckets, self._table.size())\n is_id_non_default = math_ops.not_equal(ids, self._table.default_value)\n ids = array_ops.where_v2(is_id_non_default, ids, buckets)\n else:\n ids = buckets\n if isinstance(keys, sparse_tensor.SparseTensor):\n return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)\n elif isinstance(keys, internal.RaggedTensor):\n return keys.with_values(ids)\n return ids", "docstring": "Looks up `keys` in the table, outputs the corresponding values.\n\n It assigns out-of-vocabulary keys to buckets based in their hashes.\n\nArgs:\n keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.\n name: Optional name for the op.\n\nReturns:\n A `SparseTensor` if keys are sparse, a `RaggedTensor` if keys are ragged,\n otherwise a dense `Tensor`.\n\nRaises:\n TypeError: when `keys` doesn't match the table key data type.", "source": "github_repos"} -{"code": "def report_run(config, auth, account, report_id=None, name=None):\n account_id, advertiser_id = parse_account(config, auth, account)\n is_superuser, profile_id = get_profile_for_api(config, auth, account_id)\n kwargs = {'profileId': profile_id, 'accountId': account_id} if is_superuser else {'profileId': profile_id}\n if config.verbose:\n print('DCM REPORT RUN INIT', report_id or name)\n if report_id is None:\n report = report_get(config, auth, account, name=name)\n if report is None:\n raise Exception('Report does not exist:', name)\n else:\n report_id = report['id']\n kwargs = {'profileId': profile_id, 'accountId': account_id} if is_superuser else {'profileId': profile_id}\n kwargs['reportId'] = report_id\n files = report_files(config, auth, account, report_id)\n latest_file_json = next(files, None)\n if latest_file_json == None or latest_file_json['status'] != 'PROCESSING':\n if config.verbose:\n print('RUNNING REPORT', report_id or name)\n API_DCM(config, auth, internal=is_superuser).reports().run(**kwargs).execute()\n return True\n if config.verbose:\n print('REPORT RUN SKIPPED', report_id or name)\n return False", "docstring": "Trigger a DCM report to run by name or ID.\n\n Will do nothing if report is currently in progress.\n\n Bulletproofing:\n https://developers.google.com/doubleclick-advertisers/v3.3/reports/run\n\nArgs:\n * auth: (string) Either user or service.\n * account: (string) [account:advertiser@profile] token.\n * report_id: (int) ID of DCm report to fetch ( either or name ).\n * name: (string) Name of report to fetch ( either or report_id ).\n\nReturns:\n * True if report run is executed\n * False otherwise", "source": "github_repos"} -{"code": "def send_graph_tracebacks(destinations, run_key, origin_stack, graph, send_source=True):\n _send_call_tracebacks(destinations, origin_stack, is_eager_execution=False, call_key=run_key, graph=graph, send_source=send_source)", "docstring": "Send the tracebacks of a graph execution call to debug server(s).\n\nArgs:\n destinations: gRPC destination addresses, a `str` or a `list` of `str`s,\n e.g., \"localhost:4242\". If a `list`, gRPC requests containing the same\n `CallTraceback` proto payload will be sent to all the destinations.\n run_key: A string describing the feeds, fetches (and targets) names of the\n `tf.Session.run` call.\n origin_stack: The traceback of the `tf.Session.run()` invocation.\n graph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`),\n which contains op tracebacks.\n send_source: Whether the source files involved in the op tracebacks but\n outside the TensorFlow library are to be sent.", "source": "github_repos"} -{"code": "def get_creds(use_personal_account: bool, service_account: str, private_key: str) -> credentials.Credentials:\n if service_account and private_key:\n try:\n with open_local(private_key) as local_path:\n creds = ee.ServiceAccountCredentials(service_account, local_path)\n except Exception:\n raise RuntimeError(f'Unable to open the private key {private_key}.')\n elif use_personal_account:\n ee.Authenticate()\n creds, _ = default()\n elif is_compute_engine():\n creds = compute_engine.Credentials()\n else:\n creds, _ = default()\n creds.refresh(requests.Request())\n return creds", "docstring": "Fetches credentials for authentication.\n\n If the `use_personal_account` argument is true then it will authenticate with pop-up\n browser window using personal account. Otherwise, if the application is running\n in compute engine, it will use credentials of service account bound to the VM.\n Otherwise, it will try to use user credentials.\n\nArgs:\n use_personal_account: A flag to use personal account for ee authentication.\n service_account: Service account address when using a private key for earth engine authentication.\n private_key: A private key path to authenticate earth engine using private key.\n\nReturns:\n cred: Credentials object.", "source": "github_repos"} -{"code": "def dismantle_graph(graph) -> None:\n graph._functions.clear()\n graph.Dismantle()", "docstring": "Cleans up reference cycles from a `Graph`.\n\n Helpful for making sure the garbage collector doesn't need to run after a\n temporary `Graph` is no longer needed.\n\nArgs:\n graph: A `Graph` object to destroy. Neither it nor any of its ops are usable\n after this function runs.", "source": "github_repos"} -{"code": "def get_metrics(pred: jax.Array, actual: jax.Array) -> Result:\n tp: int = jnp.sum(jnp.logical_and(pred == 1, actual == 1))\n tn: int = jnp.sum(jnp.logical_and(pred == 0, actual == 0))\n fp: int = jnp.sum(jnp.logical_and(pred == 1, actual == 0))\n fn: int = jnp.sum(jnp.logical_and(pred == 0, actual == 1))\n accuracy = (tp + tn) / (tp + tn + fp + fn + EPS)\n precision = tp / (tp + fp + EPS)\n recall = tp / (tp + fn + EPS)\n fscore = 2 * precision * recall / (precision + recall + EPS)\n return Result(tp=tp, tn=tn, fp=fp, fn=fn, accuracy=accuracy, precision=precision, recall=recall, fscore=fscore)", "docstring": "Gets evaluation metrics from the prediction and the actual target.\n\nArgs:\n pred (jax.Array): A prediction of the target.\n actual (jax.Array): The actual target.\n\nReturns:\n result (Result): A result.", "source": "github_repos"} -{"code": "def _gather_saveables_for_checkpoint(self) -> Dict[str, Callable[..., Any]]:\n\n def _saveable_factory(name=self._common_name):\n saveables = []\n num_shards = len(self.values)\n for shard_id in range(num_shards):\n saveables.append(TPUEmbeddingShardedSaveable(self.values[shard_id], shard_id, num_shards, self.shard_dim, name))\n return saveables\n return {base.VARIABLE_VALUE_KEY: _saveable_factory}", "docstring": "Overrides Trackable method.\n\nReturns:\n A dictionary mapping attribute names to `SaveableObject` factories.", "source": "github_repos"} -{"code": "def get_image_tokens(self, pixel_values: torch.FloatTensor):\n batch_size = pixel_values.shape[0]\n _, _, image_toks = self.vqmodel.encode(pixel_values)\n bpe_toks = self.vocabulary_mapping.convert_img2bpe(image_toks)\n bpe_toks = bpe_toks.view(batch_size, -1)\n return bpe_toks", "docstring": "Tokenizes images into discrete tokens with VQGAN module. Converts\n obtained image tokens into BPE tokens and wraps with \"boi\" and \"eoi\"\n special tokens.\n\nArgs:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):\n The tensors corresponding to the input images.", "source": "github_repos"} -{"code": "def get_collection(key, scope=None) -> list[Any]:\n return get_default_graph().get_collection(key, scope)", "docstring": "Wrapper for `Graph.get_collection()` using the default graph.\n\n See `tf.Graph.get_collection`\n for more details.\n\nArgs:\n key: The key for the collection. For example, the `GraphKeys` class contains\n many standard names for collections.\n scope: (Optional.) If supplied, the resulting list is filtered to include\n only items whose `name` attribute matches using `re.match`. Items without\n a `name` attribute are never returned if a scope is supplied and the\n choice or `re.match` means that a `scope` without special tokens filters\n by prefix.\n\nReturns:\n The list of values in the collection with the given `name`, or\n an empty list if no value has been added to that collection. The\n list contains the values in the order under which they were\n collected.\n\n @compatibility(eager)\n Collections are not supported when eager execution is enabled.\n @end_compatibility", "source": "github_repos"} -{"code": "def _get_value_type(cls, value):\n type_ = cls.typeDict.get(type(value))\n if type_ is None:\n type_ = 'CLASS' if inspect.isclass(value) else None\n if type_ is None and value is None:\n type_ = 'STRING'\n return type_", "docstring": "Infers the type of a given value.\n\nArgs:\n value: The value whose type needs to be inferred. For 'DURATION' and\n 'TIMESTAMP', the corresponding Python type is datetime.timedelta and\n datetime.datetime respectively. For Python classes, the API type is\n just 'STRING' at the moment.\n\nReturns:\n One of 'STRING', 'INTEGER', 'FLOAT', 'CLASS', 'DURATION', or\n 'TIMESTAMP', depending on the type of the value.", "source": "github_repos"} -{"code": "def _add_train_op(self, train_op):\n if train_op is not None:\n if not isinstance(train_op, tensor.Tensor) and (not isinstance(train_op, ops.Operation)):\n raise TypeError(f'`train_op` {train_op} needs to be a Tensor or Op.')\n ops.add_to_collection(constants.TRAIN_OP_KEY, train_op)", "docstring": "Add train op to the SavedModel.\n\n Note that this functionality is in development, and liable to be\n moved elsewhere.\n\nArgs:\n train_op: Op or group of ops that are used for training. These are stored\n as a collection with key TRAIN_OP_KEY, but not executed.\n\nRaises:\n TypeError if Train op is not of type `Operation`.", "source": "github_repos"} -{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return token_ids_0 + [self.sep_token_id]\n sep = [self.sep_token_id]\n return token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A XLMProphetNet sequence has the following format:\n\n - single sequence: `X [SEP]`\n - pair of sequences: `A [SEP] B [SEP]`\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github_repos"} -{"code": "def __getitem__(self, indices):\n return self.array[indices]", "docstring": "Select elements in the 0th dimension.\n\nArgs:\n indices: the indices to select. Only needs to support one dimension,\n the 0th dimension. Should support a `slice` or a list, tuple,\n `np.array` or 1D tensor.\n Returns: A slice of `self.array`.", "source": "github_repos"} -{"code": "def _assert_sample_values_all_close(self, sess: session.Session, repr_ds_1: repr_dataset.RepresentativeDataset, repr_ds_2: repr_dataset.RepresentativeDataset) -> None:\n for sample_1, sample_2 in zip(repr_ds_1, repr_ds_2):\n self.assertCountEqual(sample_1.keys(), sample_2.keys())\n for input_key in sample_1:\n self._assert_tensorlike_all_close(sess, sample_1[input_key], sample_2[input_key])", "docstring": "Asserts that the sample values are \"all close\" between the two datasets.\n\n This assumes that the order of corresponding samples is preserved and the\n size of the two datasets are equal.\n\nArgs:\n sess: Session instance used to evaluate any tf.Tensors.\n repr_ds_1: A RepresentativeDataset.\n repr_ds_2: A RepresentativeDataset.", "source": "github_repos"} -{"code": "def better_logging():\n app.call_after_init(_better_logging)", "docstring": "Improve Python logging when running locally.\n\n * Display Python logs by default (even when user forgot `--logtostderr`),\n without being polluted by hundreds of C++ logs.\n * Cleaner minimal log format (e.g. `I 15:04:05 [main.py:24]:`)\n * Avoid visual artifacts between TQDM & `logging`\n * Clickable hyperlinks redirecting to code search (require terminal support)\n\n Usage:\n\n ```python\n if __name__ == '__main__':\n eapp.better_logging()\n app.run(main)\n ```\n\n Note this has only effect when user run locally and without `--logtostderr`.", "source": "github_repos"} -{"code": "def create_database_view(self, view: views.View, view_name: str) -> None:\n view_sql = f'CREATE OR REPLACE VIEW {self._view_dataset}.{view_name} AS\\n{self.to_sql(view)}'\n self._engine.execute(view_sql).fetchall()", "docstring": "Creates a Spark view with the given name in the runner's view_dataset.\n\nArgs:\n view: the FHIR view that creates\n view_name: the view name passed to the CREATE OR REPLACE VIEW statement.", "source": "github_repos"} -{"code": "def distribution(namespace: Union[Type, str], name: str) -> 'Metrics.DelegatingDistribution':\n namespace = Metrics.get_namespace(namespace)\n return Metrics.DelegatingDistribution(MetricName(namespace, name))", "docstring": "Obtains or creates a Distribution metric.\n\n Distribution metrics are restricted to integer-only distributions.\n\nArgs:\n namespace: A class or string that gives the namespace to a metric\n name: A string that gives a unique name to a metric\n\nReturns:\n A Distribution object.", "source": "github_repos"} -{"code": "def join(path, *paths):\n path_ = compat.as_str_any(compat.path_to_str(path))\n if '://' in path_[1:]:\n return urljoin(path, *paths)\n return os.path.join(path, *paths)", "docstring": "Join one or more path components intelligently.\n\n TensorFlow specific filesystems will be joined\n like a url (using \"/\" as the path seperator) on all platforms:\n\n On Windows or Linux/Unix-like:\n >>> tf.io.gfile.join(\"gcs://folder\", \"file.py\")\n 'gcs://folder/file.py'\n\n >>> tf.io.gfile.join(\"ram://folder\", \"file.py\")\n 'ram://folder/file.py'\n\n But the native filesystem is handled just like os.path.join:\n\n >>> path = tf.io.gfile.join(\"folder\", \"file.py\")\n >>> if os.name == \"nt\":\n ... expected = \"folder\\\\file.py\" # Windows\n ... else:\n ... expected = \"folder/file.py\" # Linux/Unix-like\n >>> path == expected\n True\n\nArgs:\n path: string, path to a directory\n paths: string, additional paths to concatenate\n\nReturns:\n path: the joined path.", "source": "github_repos"} -{"code": "def convert_nested_bidirectional(weights):\n num_weights_per_layer = len(weights) // 2\n forward_weights = preprocess_weights_for_loading(layer.forward_layer, weights[:num_weights_per_layer], original_keras_version, original_backend)\n backward_weights = preprocess_weights_for_loading(layer.backward_layer, weights[num_weights_per_layer:], original_keras_version, original_backend)\n return forward_weights + backward_weights", "docstring": "Converts layers nested in `Bidirectional` wrapper.\n\n This function uses `preprocess_weights_for_loading()` for converting\n layers.\n\nArgs:\n weights: List of weights values (Numpy arrays).\n\nReturns:\n A list of weights values (Numpy arrays).", "source": "github_repos"} -{"code": "def __init__(self, graph_parents=None, is_constant_jacobian=False, validate_args=False, dtype=None, forward_min_event_ndims=None, inverse_min_event_ndims=None, name=None):\n self._graph_parents = graph_parents or []\n if forward_min_event_ndims is None and inverse_min_event_ndims is None:\n raise ValueError('Must specify at least one of `forward_min_event_ndims` and `inverse_min_event_ndims`.')\n elif inverse_min_event_ndims is None:\n inverse_min_event_ndims = forward_min_event_ndims\n elif forward_min_event_ndims is None:\n forward_min_event_ndims = inverse_min_event_ndims\n if not isinstance(forward_min_event_ndims, int):\n raise TypeError('Expected forward_min_event_ndims to be of type int, got {}'.format(type(forward_min_event_ndims).__name__))\n if not isinstance(inverse_min_event_ndims, int):\n raise TypeError('Expected inverse_min_event_ndims to be of type int, got {}'.format(type(inverse_min_event_ndims).__name__))\n if forward_min_event_ndims < 0:\n raise ValueError('forward_min_event_ndims must be a non-negative integer.')\n if inverse_min_event_ndims < 0:\n raise ValueError('inverse_min_event_ndims must be a non-negative integer.')\n self._forward_min_event_ndims = forward_min_event_ndims\n self._inverse_min_event_ndims = inverse_min_event_ndims\n self._is_constant_jacobian = is_constant_jacobian\n self._constant_ildj_map = {}\n self._validate_args = validate_args\n self._dtype = dtype\n self._from_y = {}\n self._from_x = {}\n if name:\n self._name = name\n else:\n\n def camel_to_snake(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', name)\n return re.sub('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1).lower()\n self._name = camel_to_snake(type(self).__name__.lstrip('_'))\n for i, t in enumerate(self._graph_parents):\n if t is None or not tensor_util.is_tf_type(t):\n raise ValueError('Graph parent item %d is not a Tensor; %s.' % (i, t))", "docstring": "Constructs Bijector.\n\n A `Bijector` transforms random variables into new random variables.\n\nExample:\n ```python\n # Create the Y = g(X) = X transform.\n identity = Identity()\n\n # Create the Y = g(X) = exp(X) transform.\n exp = Exp()\n ```\n\n See `Bijector` subclass docstring for more details and specific examples.\n\nArgs:\n graph_parents: Python list of graph prerequisites of this `Bijector`.\n is_constant_jacobian: Python `bool` indicating that the Jacobian matrix is\n not a function of the input.\n validate_args: Python `bool`, default `False`. Whether to validate input\n with asserts. If `validate_args` is `False`, and the inputs are invalid,\n correct behavior is not guaranteed.\n dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not\n enforced.\n forward_min_event_ndims: Python `integer` indicating the minimum number of\n dimensions `forward` operates on.\n inverse_min_event_ndims: Python `integer` indicating the minimum number of\n dimensions `inverse` operates on. Will be set to\n `forward_min_event_ndims` by default, if no value is provided.\n name: The name to give Ops created by the initializer.\n\nRaises:\n ValueError: If neither `forward_min_event_ndims` and\n `inverse_min_event_ndims` are specified, or if either of them is\n negative.\n ValueError: If a member of `graph_parents` is not a `Tensor`.", "source": "github_repos"} -{"code": "def auto_shard_dataset(dataset, num_shards, index, num_replicas_in_sync=None):\n if isinstance(dataset, distribute_types.DistributedDatasetInterface):\n return dataset.auto_shard(num_shards, index)\n if dataset.options().experimental_distribute.auto_shard_policy != AutoShardPolicy.OFF:\n if num_replicas_in_sync is None:\n num_replicas_in_sync = 1\n if isinstance(dataset, data_types.DatasetV1):\n return distribute._AutoShardDatasetV1(dataset, num_shards, index, num_replicas_in_sync)\n else:\n return distribute._AutoShardDataset(dataset, num_shards, index, num_replicas_in_sync)\n else:\n return dataset", "docstring": "Shard the input pipeline by sharding the underlying list of files.\n\nArgs:\n dataset: A `tf.data.Dataset` instance, typically the result of a bunch of\n dataset transformations.\n num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of\n shards operating in parallel. Same usage as in `tf.data.Dataset.shard`.\n index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.\n Same usage as in `tf.data.Dataset.shard`.\n num_replicas_in_sync: An integer representing the total number of replicas\n across all workers. This is used in the rewrite when sharding by data.\n\nReturns:\n A modified `Dataset` obtained by updating the pipeline sharded by the\n files. The input dataset will be returned if we cannot automatically\n determine a good way to shard the input dataset.", "source": "github_repos"} -{"code": "def index_add(x, idx, y):\n return _index_update_helper(tf_np.ndarray._with_index_add, x, idx, y)", "docstring": "Pure equivalent of `x[idx] += y`.\n\n Returns the value of x that would result from the NumPy-style indexed\n assignment `x[idx] += y`. Because it's a pure function, `x` itself won't be\n changed.\n\nArgs:\n x: an array with the values to be updated.\n idx: a Numpy-style index, consisting of `None`, integers, slice objects,\n ellipses, ndarrays with integer dtypes, or a tuple of the above.\n y: the array of updates. `y` must be broadcastable to the shape of the array\n that would be returned by `x[idx]`.\n\nReturns:\n The updated version of `x`.", "source": "github_repos"} -{"code": "def rename(self, source_file_names, destination_file_names):\n err_msg = 'source_file_names and destination_file_names should be equal in length'\n assert len(source_file_names) == len(destination_file_names), err_msg\n\n def _rename_file(source, destination):\n \"\"\"Rename a single file object\"\"\"\n try:\n os.rename(source, destination)\n except OSError as err:\n raise IOError(err)\n exceptions = {}\n for source, destination in zip(source_file_names, destination_file_names):\n try:\n _rename_file(source, destination)\n except Exception as e:\n exceptions[source, destination] = e\n if exceptions:\n raise BeamIOError('Rename operation failed', exceptions)", "docstring": "Rename the files at the source list to the destination list.\n Source and destination lists should be of the same size.\n\nArgs:\n source_file_names: List of file paths that need to be moved\n destination_file_names: List of destination_file_names for the files\n\nRaises:\n ``BeamIOError``: if any of the rename operations fail", "source": "github_repos"} -{"code": "def shape(input, name=None, out_type=None):\n if out_type is None:\n if flags.config().tf_shape_default_int64.value():\n out_type = dtypes.int64\n else:\n out_type = dtypes.int32\n return shape_internal(input, name, optimize=True, out_type=out_type)", "docstring": "Returns the shape of a tensor.\n\n This operation returns a 1-D integer tensor representing the shape of `input`.\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.shape(t) # [2, 2, 3]\n ```\n\nArgs:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified output type of the operation (`int32`\n or `int64`). Defaults to `tf.int32`.\n\nReturns:\n A `Tensor` of type `out_type`.", "source": "github_repos"} -{"code": "def _powerset(iterable):\n s = list(iterable)\n return itertools.chain.from_iterable((itertools.combinations(s, r) for r in range(len(s) + 1)))", "docstring": "Helper for generating all possible reduction_axes arguments.\n\n Example: powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2)\n\nArgs:\n iterable: An iterable of items to generate the powerset of.\n\nReturns:\n The powerset of all items in iterable.", "source": "github_repos"} -{"code": "def max_steps_per_epoch():\n return _MAX_STEPS_PER_EPOCH", "docstring": "Get the maximum number of steps for any call to fit/evaluate/predict.\n\n Retrieves the limit on the number of epochs set by\n `keras.config.set_max_steps_per_epoch` or the `KERAS_MAX_STEPS_PER_EPOCH`\n environment variable.\n\nArgs:\n max_epochs: The integer limit on the number of epochs or `None`. If\n `None`, no limit is applied.", "source": "github_repos"} -{"code": "def _apply_scores(self, scores, value, scores_mask=None, training=False):\n if scores_mask is not None:\n padding_mask = ops.logical_not(scores_mask)\n max_value = 65504.0 if scores.dtype == 'float16' else 1000000000.0\n scores -= max_value * ops.cast(padding_mask, dtype=scores.dtype)\n weights = ops.softmax(scores, axis=-1)\n if training and self.dropout > 0:\n weights = backend.random.dropout(weights, self.dropout, seed=self.seed_generator)\n return (ops.matmul(weights, value), weights)", "docstring": "Applies attention scores to the given value tensor.\n\n To use this method in your attention layer, follow the steps:\n\n * Use `query` tensor of shape `(batch_size, Tq)` and `key` tensor of\n shape `(batch_size, Tv)` to calculate the attention `scores`.\n * Pass `scores` and `value` tensors to this method. The method applies\n `scores_mask`, calculates\n `attention_distribution = softmax(scores)`, then returns\n `matmul(attention_distribution, value).\n * Apply `query_mask` and return the result.\n\nArgs:\n scores: Scores float tensor of shape `(batch_size, Tq, Tv)`.\n value: Value tensor of shape `(batch_size, Tv, dim)`.\n scores_mask: A boolean mask tensor of shape `(batch_size, 1, Tv)`\n or `(batch_size, Tq, Tv)`. If given, scores at positions where\n `scores_mask==False` do not contribute to the result. It must\n contain at least one `True` value in each line along the last\n dimension.\n training: Python boolean indicating whether the layer should behave\n in training mode (adding dropout) or in inference mode\n (no dropout).\n\nReturns:\n Tensor of shape `(batch_size, Tq, dim)`.\n Attention scores after masking and softmax with shape\n `(batch_size, Tq, Tv)`.", "source": "github_repos"} -{"code": "def _generate_pickle_name(gt):\n grammar_textfile_name = os.path.basename(gt)\n head, tail = os.path.splitext(grammar_textfile_name)\n if tail == '.txt':\n tail = ''\n cache_dir = user_cache_dir(appname='YAPF', appauthor='Google', version=yapf_version)\n return cache_dir + os.sep + head + tail + '-py' + '.'.join(map(str, sys.version_info)) + '.pickle'", "docstring": "Get the filepath to write a pickle file to\n given the path of a grammar textfile.\n\n The returned filepath should be in a user-specific cache directory.\n\nArgs:\n gt (str): path to grammar text file\n\nReturns:\n str: path to pickle file", "source": "github_repos"} -{"code": "def _CheckGrayscaleImage(image, require_static=True):\n try:\n if image.get_shape().ndims is None:\n image_shape = image.get_shape().with_rank(2)\n else:\n image_shape = image.get_shape().with_rank_at_least(2)\n except ValueError:\n raise ValueError('A grayscale image (shape %s) must be at least two-dimensional.' % image.shape)\n if require_static and (not image_shape.is_fully_defined()):\n raise ValueError(\"'image' must be fully defined.\")\n if image_shape.is_fully_defined():\n if image_shape[-1] != 1:\n raise ValueError('Last dimension of a grayscale image should be size 1.')\n if not image_shape.is_fully_defined():\n return [check_ops.assert_equal(array_ops.shape(image)[-1], 1, message='Last dimension of a grayscale image should be size 1.'), check_ops.assert_greater_equal(array_ops.rank(image), 3, message='A grayscale image must be at least two-dimensional.')]\n else:\n return []", "docstring": "Assert that we are working with properly shaped grayscale image.\n\nArgs:\n image: >= 2-D Tensor of size [*, 1]\n require_static: Boolean, whether static shape is required.\n\nRaises:\n ValueError: if image.shape is not a [>= 2] vector or if\n last dimension is not size 1.\n\nReturns:\n An empty list, if `image` has fully defined dimensions. Otherwise, a list\n containing an assert op is returned.", "source": "github_repos"} -{"code": "def convert_to_rgb(video: np.array, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array:\n if not isinstance(video, np.ndarray):\n raise ValueError(f'Video has to be a numpy array to convert to RGB format, but found {type(video)}')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(video)\n video = to_channel_dimension_format(video, ChannelDimension.FIRST, input_channel_dim=input_data_format)\n if video.shape[-3] == 3:\n return video\n if video.shape[-3] == 1:\n return video.repeat(3, -3)\n if not (video[..., 3, :, :] < 255).any():\n return video\n alpha = video[..., 3, :, :] / 255.0\n video = (1 - alpha[..., None, :, :]) * 255 + alpha[..., None, :, :] * video[..., 3, :, :]\n return video", "docstring": "Convert video to RGB by blending the transparency layer if it's in RGBA format, otherwise simply returns it.\n\nArgs:\n video (`np.array`):\n The video to convert.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output video. If unset, will use the inferred format from the input.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input video. If unset, will use the inferred format from the input.", "source": "github_repos"} -{"code": "def parallel_part_functions(fns: Sequence[PartFn], match_fns: Sequence[MatchFn] | None=None, with_default_output: bool=False, with_always_output: bool=False) -> PartFn:\n return functools.partial(_parallel_part_functions, _to_tuple_fns(fns, match_fns), with_default_output=with_default_output, with_always_output=with_always_output)", "docstring": "Combine `fns` to execute on _T in parallel across the `fns`.\n\nArgs:\n fns: sequence of part functions to chain.\n match_fns: sequence of functions that return True if the part should be\n processed by the part function. When the part should not be processed, the\n part function will not be called and nothing will be yielded by the part\n function. When match_fns is not provided, all parts are processed by\n default.\n with_default_output: True when the parallel execution should fallback to\n return the input part as is when fns do not return any output part.\n with_always_output: True when the parallel execution should always return\n the input part as is independent of the output of the fns. This is a\n stronger condition than `with_default_output`. When `with_always_output`\n is True, `with_default_output` is basically ignored.\n\nReturns:\n Part function that runs all functions 'fns' in parallel. The output stream\n will keep the order of the input parts:\n\n f_0(c) = c00, c01\n f_1(c) = c10, c11, c12, c14\n\n f_0(c) // f_1(c) = c00, c01, c10, c11, c12", "source": "github_repos"} -{"code": "def resize_attention_map(attentions: tf.Tensor, height: int, width: int, align_corners: bool=False) -> tf.Tensor:\n scale = (height * width // attentions.shape[2]) ** 0.5\n if height > width:\n feat_width = int(np.round(width / scale))\n feat_height = shape_list(attentions)[2] // feat_width\n else:\n feat_height = int(np.round(height / scale))\n feat_width = shape_list(attentions)[2] // feat_height\n batch_size = shape_list(attentions)[0]\n groups = shape_list(attentions)[1]\n attentions = tf.reshape(attentions, (batch_size, groups, feat_height, feat_width))\n attentions = tf.transpose(attentions, perm=(0, 2, 3, 1))\n if align_corners:\n attentions = tf.compat.v1.image.resize(attentions, size=(height, width), method='bilinear', align_corners=align_corners)\n else:\n attentions = tf.image.resize(attentions, size=(height, width), method='bilinear')\n attentions = tf.transpose(attentions, perm=(0, 3, 1, 2))\n return attentions", "docstring": "Args:\n attentions (`tf.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]\n height (`int`): height of the output attention map\n width (`int`): width of the output attention map\n align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.\n\nReturns:\n `tf.Tensor`: resized attention map of shape [batch_size, groups, height, width]", "source": "github_repos"} -{"code": "def waitAndGet(self, event_name, timeout=DEFAULT_TIMEOUT):\n if timeout:\n if timeout > MAX_TIMEOUT:\n raise Error(self._ad, 'Specified timeout %s is longer than max timeout %s.' % (timeout, MAX_TIMEOUT))\n try:\n raw_event = self._callEventWaitAndGet(self._id, event_name, timeout)\n except Exception as e:\n if 'EventSnippetException: timeout.' in str(e):\n raise TimeoutError(self._ad, 'Timed out after waiting %ss for event \"%s\" triggered by %s (%s).' % (timeout, event_name, self._method_name, self._id))\n raise\n return snippet_event.from_dict(raw_event)", "docstring": "Blocks until an event of the specified name has been received and\n return the event, or timeout.\n\nArgs:\n event_name: string, name of the event to get.\n timeout: float, the number of seconds to wait before giving up.\n\nReturns:\n SnippetEvent, the oldest entry of the specified event.\n\nRaises:\n Error: If the specified timeout is longer than the max timeout\n supported.\n TimeoutError: The expected event does not occur within time limit.", "source": "github_repos"} -{"code": "def RegisterImplementation(source):\n global _source_implementations\n if 'name' not in source.__dict__:\n raise RuntimeError(\"'name' not defined in Source %r\" % (source,))\n _source_implementations[source.name] = source", "docstring": "Register a Source implementation with the factory method.\n\n Sources being registered are expected to have a name attribute,\n unique to themselves.\n\n Child modules are expected to call this method in the file-level\n scope.\n\nArgs:\n source: A class type that is a subclass of Source\n\nReturns:\n Nothing\n\nRaises:\n RuntimeError: no 'name' entry in this source.", "source": "github_repos"} -{"code": "def infer_graph(inputs: Optional[Set[EventSetNode]], outputs: Set[EventSetNode]) -> Graph:\n graph = Graph()\n graph.outputs.update(outputs)\n pending_nodes: Set[EventSetNode] = outputs.copy()\n done_nodes: Set[EventSetNode] = set()\n missing_nodes: Set[EventSetNode] = set()\n while pending_nodes:\n node = next(iter(pending_nodes))\n pending_nodes.remove(node)\n assert node not in done_nodes\n graph.add_node(node)\n if inputs is not None and node in inputs:\n graph.inputs.add(node)\n continue\n if node.creator is None:\n if inputs is not None:\n missing_nodes.add(node)\n else:\n graph.inputs.add(node)\n continue\n graph.add_operator(node.creator)\n for input_node in node.creator.inputs.values():\n if input_node in done_nodes:\n continue\n pending_nodes.add(input_node)\n for output_node in node.creator.outputs.values():\n graph.add_node(output_node)\n if missing_nodes:\n raise ValueError(f'The following input nodes are required but not provided as input:\\n{missing_nodes}')\n for e in graph.nodes:\n graph.add_sampling(e.sampling_node)\n for f in e.feature_nodes:\n graph.add_feature(f)\n return graph", "docstring": "Extracts the nodes in between the output and input nodes.\n\n If inputs is set, fails if outputs cannot be computed from `inputs`.\n If inputs is not set, infers the required set of inputs.\n\nArgs:\n inputs: Set of available input nodes. If None, inputs are inferred.\n outputs: Set of expected output nodes.\n\nReturns:\n The inferred graph.\n\nRaises:\n ValueError: If there are repeated nodes in the `inputs`; an\n unexpected type of input is provided; an unnamed node is inferred\n as input; or some nodes are required but not provided.", "source": "github_repos"} -{"code": "def adjoint(self, name: str='adjoint') -> 'LinearOperator':\n if self.is_self_adjoint is True:\n return self\n with self._name_scope(name):\n return self._linop_adjoint()", "docstring": "Returns the adjoint of the current `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, return `A*`.\n Note that calling `self.adjoint()` and `self.H` are equivalent.\n\nArgs:\n name: A name for this `Op`.\n\nReturns:\n `LinearOperator` which represents the adjoint of this `LinearOperator`.", "source": "github_repos"} -{"code": "def shape_tensor(self, name='shape_tensor'):\n with self._name_scope(name):\n if self.shape.is_fully_defined():\n return linear_operator_util.shape_tensor(self.shape.as_list())\n else:\n return self._shape_tensor()", "docstring": "Shape of this `LinearOperator`, determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding\n `[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.\n\nArgs:\n name: A name for this `Op`.\n\nReturns:\n `int32` `Tensor`", "source": "github_repos"} -{"code": "def __init__(self, l2=0.01, **kwargs):\n l2 = kwargs.pop('l', l2)\n if kwargs:\n raise TypeError('Argument(s) not recognized: %s' % (kwargs,))\n l2 = 0.01 if l2 is None else l2\n _check_penalty_number(l2)\n self.l2 = backend.cast_to_floatx(l2)", "docstring": "A regularizer that applies a L2 regularization penalty.\n\n The L2 regularization penalty is computed as:\n `loss = l2 * reduce_sum(square(x))`\n\n L2 may be passed to a layer as a string identifier:\n\n >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l2')\n\n In this case, the default value used is `l2=0.01`.\n\nAttributes:\n l2: Float; L2 regularization factor.", "source": "github_repos"} -{"code": "def GetAutomountMasterMap(self):\n search_base = self.conf['base']\n search_scope = ldap.SCOPE_SUBTREE\n search_filter = '(&(objectclass=automountMap)(ou=auto.master))'\n self.log.debug('retrieving automount master map.')\n self.Search(search_base=search_base, search_filter=search_filter, search_scope=search_scope, attrs=['dn'])\n search_base = None\n for obj in self:\n search_base = obj['dn']\n if search_base is None:\n self.log.critical('Could not find automount master map!')\n raise error.EmptyMap\n self.log.debug('found ou=auto.master at %s', search_base)\n master_map = self.GetAutomountMap(location=search_base)\n for map_entry in master_map:\n map_entry.location = map_entry.location.split(':')[2]\n map_entry.location = map_entry.location.split(' ')[0]\n self.log.debug('master map has: %s' % map_entry.location)\n return master_map", "docstring": "Return the autmount master map from this source.\n\n The automount master map is a special-case map which points to a dynamic\n list of additional maps. We currently support only the schema outlined at\n http://docs.sun.com/source/806-4251-10/mapping.htm commonly used by linux\n automount clients, namely ou=auto.master and objectclass=automount entries.\n\nReturns:\n an instance of maps.AutomountMap", "source": "github_repos"} -{"code": "def string_split_v2(source, sep=None, maxsplit=-1):\n if sep is None:\n sep = ''\n sep = ops.convert_to_tensor(sep, dtype=dtypes.string)\n source = ops.convert_to_tensor(source, dtype=dtypes.string)\n indices, values, shape = gen_string_ops.string_split_v2(source, sep=sep, maxsplit=maxsplit)\n indices.set_shape([None, 2])\n values.set_shape([None])\n shape.set_shape([2])\n return sparse_tensor.SparseTensor(indices, values, shape)", "docstring": "Split elements of `source` based on `sep` into a `SparseTensor`.\n\n Let N be the size of source (typically N will be the batch size). Split each\n element of `source` based on `sep` and return a `SparseTensor`\n containing the split tokens. Empty tokens are ignored.\n\n For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',\n then the output will be\n\n st.indices = [0, 0;\n 0, 1;\n 1, 0;\n 1, 1;\n 1, 2]\n st.shape = [2, 3]\n st.values = ['hello', 'world', 'a', 'b', 'c']\n\n If `sep` is given, consecutive delimiters are not grouped together and are\n deemed to delimit empty strings. For example, source of `\"1<>2<><>3\"` and\n sep of `\"<>\"` returns `[\"1\", \"2\", \"\", \"3\"]`. If `sep` is None or an empty\n string, consecutive whitespace are regarded as a single separator, and the\n result will contain no empty strings at the start or end if the string has\n leading or trailing whitespace.\n\n Note that the above mentioned behavior matches python's str.split.\n\nArgs:\n source: `1-D` string `Tensor`, the strings to split.\n sep: `0-D` string `Tensor`, the delimiter character.\n maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.\n\nRaises:\n ValueError: If sep is not a string.\n\nReturns:\n A `SparseTensor` of rank `2`, the strings split according to the delimiter.\n The first column of the indices corresponds to the row in `source` and the\n second column corresponds to the index of the split component in this row.", "source": "github_repos"} -{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return self.bos_token_id + token_ids_0 + self.eos_token_id\n return self.bos_token_id + token_ids_0 + token_ids_1 + self.eos_token_id", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. The special tokens depend on calling set_lang.\n\n An NLLB sequence has the following format, where `X` represents the sequence:\n\n - `input_ids` (for encoder) `X [eos, src_lang_code]`\n - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\n BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\n separator.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github_repos"} -{"code": "def __init__(self, path_elements: List[Union[str, int]], parent: Optional['Key']=None, project: Optional[str]=None, namespace: Optional[str]=None):\n self.path_elements = tuple(path_elements)\n self.parent = parent\n self.namespace = namespace\n self.project = project", "docstring": "Represents a Datastore key.\n\n The partition ID is represented by its components: namespace and project.\n If key has a parent, project and namespace should either be unset or match\n the parent's.\n\nArgs:\n path_elements: (list of str and int) Key path: an alternating sequence of\n kind and identifier. The kind must be of type ``str`` and identifier may\n be a ``str`` or an ``int``.\n If the last identifier is omitted this is an incomplete key, which is\n unsupported in ``WriteToDatastore`` and ``DeleteFromDatastore``.\n See :class:`google.cloud.datastore.key.Key` for more details.\n parent: (:class:`~apache_beam.io.gcp.datastore.v1new.types.Key`)\n (optional) Parent for this key.\n project: (str) Project ID. Required unless set by parent.\n namespace: (str) (optional) Namespace ID", "source": "github_repos"} -{"code": "def custom_gradient(fun):\n\n def __init__(self, fun):\n warnings.warn('`custom_gradient` for the openvino backend acts as a pass-through to support the forward pass. No gradient computation or modification takes place.')\n self.fun = fun\n\n def __call__(self, *args, **kwargs):\n outputs, _ = self.fun(*args, **kwargs)\n return outputs", "docstring": "Decorator for custom gradients.\n\nArgs:\n fun: Forward pass function.", "source": "github_repos"} -{"code": "def write(self, index, value, name=None):\n return self._implementation.write(index, value, name=name)", "docstring": "Write `value` into index `index` of the TensorArray.\n\nArgs:\n index: 0-D. int32 scalar with the index to write to.\n value: N-D. Tensor of type `dtype`. The Tensor to write to this index.\n name: A name for the operation (optional).\n\nReturns:\n A new TensorArray object with flow that ensures the write occurs.\n Use this object for all subsequent operations.\n\nRaises:\n ValueError: if there are more writers than specified.", "source": "github_repos"} -{"code": "def _process_update(self, item, feed_item):\n item['name'] = feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_NAME, None)\n item['url'] = feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_URL, None)", "docstring": "Updates an landing page based on the values from the feed.\n\nArgs:\n item: Object representing the landing page to be updated, this object is\n updated directly.\n feed_item: Feed item representing landing page values from the Bulkdozer\n feed.", "source": "github_repos"} -{"code": "def minimum(inputs, **kwargs):\n return Minimum(**kwargs)(inputs)", "docstring": "Functional interface to the `Minimum` layer.\n\nArgs:\n inputs: A list of input tensors (at least 2).\n **kwargs: Standard layer keyword arguments.\n\nReturns:\n A tensor, the element-wise minimum of the inputs.", "source": "github_repos"} -{"code": "def dice(y_true, y_pred, axis=None):\n y_pred = ops.convert_to_tensor(y_pred)\n y_true = ops.cast(y_true, y_pred.dtype)\n inputs = y_true\n targets = y_pred\n intersection = ops.sum(inputs * targets, axis=axis)\n dice = ops.divide(2.0 * intersection, ops.sum(y_true, axis=axis) + ops.sum(y_pred, axis=axis) + backend.epsilon())\n return 1 - dice", "docstring": "Computes the Dice loss value between `y_true` and `y_pred`.\n\n Formula:\n ```python\n loss = 1 - (2 * sum(y_true * y_pred)) / (sum(y_true) + sum(y_pred))\n ```\n\nArgs:\n y_true: tensor of true targets.\n y_pred: tensor of predicted targets.\n axis: tuple for which dimensions the loss is calculated\n\nReturns:\n Dice loss value.\n\nExample:\n >>> y_true = [[[[1.0], [1.0]], [[0.0], [0.0]]],\n ... [[[1.0], [1.0]], [[0.0], [0.0]]]]\n >>> y_pred = [[[[0.0], [1.0]], [[0.0], [1.0]]],\n ... [[[0.4], [0.0]], [[0.0], [0.9]]]]\n >>> axis = (1, 2, 3)\n >>> loss = keras.losses.dice(y_true, y_pred, axis=axis)\n >>> assert loss.shape == (2,)\n >>> loss\n array([0.5, 0.75757575], shape=(2,), dtype=float32)\n\n >>> loss = keras.losses.dice(y_true, y_pred)\n >>> assert loss.shape == ()\n >>> loss\n array(0.6164384, shape=(), dtype=float32)", "source": "github_repos"} -{"code": "def _compose_output_rep(lhs_rep, rhs_rep, lhs_contraction, rhs_contraction, lhs_batch, rhs_batch):\n output_rep = []\n for dim in lhs_batch:\n output_rep.append(lhs_rep[dim])\n for i in _minus(range(len(lhs_rep)), lhs_batch + lhs_contraction):\n output_rep.append(lhs_rep[i])\n for i in _minus(range(len(rhs_rep)), rhs_batch + rhs_contraction):\n output_rep.append(rhs_rep[i])\n return ''.join(output_rep)", "docstring": "Compose the output string representation.\n\n e.g., ij, jk, (((1,), (0,)), ((), ())) -> ik\n aij, ajk, (((2,), (1,)), ((0,), (0,))) -> aik\n\nArgs:\n lhs_rep: A string representation for the left-hand side input array\n rhs_rep: A string representation for the right-hand side input array\n lhs_contraction: Sequence[int] (the contraction dimensions of lhs)\n rhs_contraction: Sequence[int] (the contraction dimensions of rhs)\n lhs_batch: Sequence[int] (the batch dimensions of lhs)\n rhs_batch: Sequence[int] (the batch dimensions of rhs)\n\nReturns:\n A string representation of the result array.", "source": "github_repos"} -{"code": "def BuildParams(self, graph_fn, dtype, input_shapes, output_shapes):\n input_mask = [[False] + [True] * (len(shape) - 1) for shape in input_shapes]\n output_mask = [[False] + [True] * (len(shape) - 1) if shape else [] for shape in output_shapes]\n return self.BuildParamsWithMask(graph_fn, dtype, input_shapes, output_shapes, input_mask, output_mask, [], [])", "docstring": "Build test parameters.\n\n The input_shapes and output_shapes arguments are known (static) shapes that\n can be used to generate test data. To define the model, we also specify\n corresponding input/output TensorSpecs. These are defined using the shape\n arguments. For each input tensor we define:\n\n input_spec = [None] + input_shape[1:]\n\n and similarly for output shapes. This means that we leave the first (batch)\n dimension unknown, the rest is just copied from the shapes arg.\n\nArgs:\n graph_fn: The function to build the graph.\n dtype: The element type.\n input_shapes: The input shapes.\n output_shapes: The output shapes.\n\nReturns:\n The test parameters.", "source": "github_repos"} -{"code": "def __init__(self, path, file_naming=None, destination=None, temp_directory=None, sink=None, shards=None, output_fn=None, max_writers_per_bundle=MAX_NUM_WRITERS_PER_BUNDLE):\n self.path = path if isinstance(path, ValueProvider) else StaticValueProvider(str, path)\n self.file_naming_fn = file_naming or default_file_naming('output')\n self.destination_fn = self._get_destination_fn(destination)\n self._temp_directory = temp_directory\n self.sink_fn = self._get_sink_fn(sink)\n self.shards = shards or WriteToFiles.DEFAULT_SHARDING\n self.output_fn = output_fn or (lambda x: x)\n self._max_num_writers_per_bundle = max_writers_per_bundle", "docstring": "Initializes a WriteToFiles transform.\n\nArgs:\n path (str, ValueProvider): The directory to write files into.\n file_naming (callable): A callable that takes in a window, pane,\n shard_index, total_shards and compression; and returns a file name.\n destination (callable): If this argument is provided, the sink parameter\n must also be a callable.\n temp_directory (str, ValueProvider): To ensure atomicity in the transform,\n the output is written into temporary files, which are written to a\n directory that is meant to be temporary as well. Once the whole output\n has been written, the files are moved into their final destination, and\n given their final names. By default, the temporary directory will be\n within the temp_location of your pipeline.\n sink (callable, ~apache_beam.io.fileio.FileSink): The sink to use to write\n into a file. It should implement the methods of a ``FileSink``. Pass a\n class signature or an instance of FileSink to this parameter. If none is\n provided, a ``TextSink`` is used.\n shards (int): The number of shards per destination and trigger firing.\n output_fn (callable, optional): A callable to process the output. This\n parameter is currently unused and retained for backward compatibility.\n max_writers_per_bundle (int): The number of writers that can be open\n concurrently in a single worker that's processing one bundle.", "source": "github_repos"} -{"code": "def random_mixed_density_matrix(num_qubits, num_mixtures=5):\n pre_probs = tf.random.uniform([num_mixtures], 1e-09)\n mixture_probabilities = pre_probs / tf.reduce_sum(pre_probs)\n random_unitary = random_unitary_matrix(num_qubits)\n dim = 2 ** num_qubits\n final_state = tf.zeros([dim, dim], tf.complex128)\n for i in range(num_mixtures):\n pure_state = tf.one_hot(i, dim, 1.0, 0.0, 0, tf.complex128)\n evolved_pure_state = tf.linalg.matvec(random_unitary, pure_state)\n adjoint_evolved_pure_state = tf.squeeze(tf.linalg.adjoint(tf.expand_dims(evolved_pure_state, 0)))\n final_state = final_state + tf.cast(mixture_probabilities[i], tf.complex128) * tf.einsum('i,j->ij', evolved_pure_state, adjoint_evolved_pure_state)\n return (final_state, mixture_probabilities)", "docstring": "Returns a random pure density matrix.\n\n Applies a common random unitary to `num_mixtures` orthogonal states, then\n mixes them with random weights.\n\nArgs:\n num_qubits: Number of qubits on which the matrix acts.\n num_mixtures: The number of orthogonal pure states to mix.\n\nReturns:\n final_state: The mixed density matrix.\n mixture_probabilities: The probability of each state in the mixture.", "source": "github_repos"} -{"code": "def wrap_cached_variables(concrete_function):\n outer_graph = func_graph_module.FuncGraph('{}_no_cache'.format(concrete_function.graph.name))\n mapped_captures = None\n remapped_captures = {}\n with outer_graph.as_default():\n for capture, placeholder in concrete_function.graph.captures:\n cached_variable = getattr(capture, '_cached_variable', None)\n if cached_variable is None:\n continue\n cached_variable = cached_variable()\n new_cached_value = cached_variable.read_value()\n key = id(capture)\n external = concrete_function.graph.function_captures.by_val_external[key]\n internal = concrete_function.graph.function_captures.by_val_internal[key]\n remapped_captures[key] = [external, internal]\n concrete_function.graph.function_captures.add_or_replace(key=key, external=new_cached_value, internal=placeholder, is_by_ref=False)\n mapped_captures = True\n if not mapped_captures:\n return concrete_function\n inner_concrete = defun.ConcreteFunction.from_func_graph(concrete_function.graph, concrete_function.function_type, {})\n\n def wrap_function(*args):\n return inner_concrete._call_flat(list(args), inner_concrete.captured_inputs)\n args = nest.flatten(concrete_function.structured_input_signature, expand_composites=True)\n func_graph_module.func_graph_from_py_func(None, wrap_function, args=tuple(args), kwargs={}, func_graph=outer_graph)\n fn = defun.ConcreteFunction.from_func_graph(outer_graph, concrete_function.function_type, {})\n fn._arg_keywords = concrete_function._arg_keywords\n fn._num_positional_args = concrete_function._num_positional_args\n for key, capture in remapped_captures.items():\n external, internal = capture\n concrete_function.graph._function_captures.add_or_replace(key=key, external=external, internal=internal, is_by_ref=False)\n return fn", "docstring": "Wraps the concrete function if it uses cached read tensors.\n\n This function creates a new concrete function that captures variables\n instead of the cached read tensors.\n\nArgs:\n concrete_function: A Concrete function that maybe captures cached read\n tensors.\n\nReturns:\n A concrete function that wraps the original concrete function, which\n captures variables instead. If the original function did not capture any\n cached values, then the function is not wrapped and the original object is\n returned.", "source": "github_repos"} -{"code": "def _build_system_message(self, error: str) -> LogMessage:\n return self._base_log.copy() | LogMessage(log_type=LogType.SYSTEM.value, error=error)", "docstring": "Adds system error information to base log message.\n\nArgs:\n * error: error that occurred\n\nReturns:\n * Log: dictionary containing log data", "source": "github_repos"} -{"code": "def post_process_grounded_object_detection(self, outputs: 'OwlViTObjectDetectionOutput', threshold: float=0.1, target_sizes: Optional[Union[TensorType, List[Tuple]]]=None, text_labels: Optional[List[List[str]]]=None):\n output = self.image_processor.post_process_object_detection(outputs=outputs, threshold=threshold, target_sizes=target_sizes)\n if text_labels is not None and len(text_labels) != len(output):\n raise ValueError('Make sure that you pass in as many lists of text labels as images')\n if text_labels is not None:\n for image_output, image_text_labels in zip(output, text_labels):\n object_text_labels = [image_text_labels[i] for i in image_output['labels']]\n image_output['text_labels'] = object_text_labels\n else:\n for image_output in output:\n image_output['text_labels'] = None\n return output", "docstring": "Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,\n bottom_right_x, bottom_right_y) format.\n\nArgs:\n outputs ([`OwlViTObjectDetectionOutput`]):\n Raw outputs of the model.\n threshold (`float`, *optional*, defaults to 0.1):\n Score threshold to keep object detection predictions.\n target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):\n Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n `(height, width)` of each image in the batch. If unset, predictions will not be resized.\n text_labels (`List[List[str]]`, *optional*):\n List of lists of text labels for each image in the batch. If unset, \"text_labels\" in output will be\n set to `None`.\n\nReturns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the following keys:\n - \"scores\": The confidence scores for each predicted box on the image.\n - \"labels\": Indexes of the classes predicted by the model on the image.\n - \"boxes\": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.\n - \"text_labels\": The text labels for each predicted bounding box on the image.", "source": "github_repos"} -{"code": "def assert_non_singular(self, name='assert_non_singular'):\n with self._name_scope(name):\n return self._assert_non_singular()", "docstring": "Returns an `Op` that asserts this operator is non singular.\n\n This operator is considered non-singular if\n\n ```\n ConditionNumber < max{100, range_dimension, domain_dimension} * eps,\n eps := np.finfo(self.dtype.as_numpy_dtype).eps\n ```\n\nArgs:\n name: A string name to prepend to created ops.\n\nReturns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is singular.", "source": "github_repos"} -{"code": "def __init__(self, market_data_dict: Dict[str, Any], dtype: Optional[tf.DType]=None):\n self._valuation_date = dateslib.convert_to_date_tensor(market_data_dict['reference_date'])\n self._market_data_dict = market_data_dict\n self._dtype = dtype or tf.float64", "docstring": "Market data constructor.\n\n The dictionary must follow a format outlined in the doc:\n https://github.com/google/tf-quant-finance/tree/master/tf_quant_finance/experimental/pricing_platform/framework/market_data/market_data.pdf\n\nArgs:\n market_data_dict: Market data dictionary.\n dtype: A `dtype` to use for float-like `Tensor`s.\n Default value: `tf.float64`.", "source": "github_repos"} -{"code": "def max_pool(x, pool_size, strides, padding):\n x = tf_np.asarray(x)\n return tf_np.asarray(nn_ops.pool(input=x, window_shape=pool_size, pooling_type='MAX', strides=strides, padding=padding))", "docstring": "Performs an N-D max pooling.\n\nArgs:\n x: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape +\n [num_channels]`. Pooling happens over the spatial dimensions only.\n pool_size: sequence of N ints.\n strides: sequence of N ints.\n padding: a string, the padding algorithm. Must be \"SAME\" or \"VALID\".\n\nReturns:\n An (N+2)-D array, of shape\n [batch_size] + output_spatial_shape + [num_channels],\n where `output_spatial_shape` depends on the value of padding:\n If padding = \"SAME\":\n output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])\n If padding = \"VALID\":\n output_spatial_shape[i] =\n ceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]).", "source": "github_repos"} -{"code": "def fail(msg, extras=None):\n raise signals.TestFailure(msg, extras)", "docstring": "Explicitly fail a test.\n\nArgs:\n msg: A string explaining the details of the failure.\n extras: An optional field for extra information to be included in\n test result.\n\nRaises:\n signals.TestFailure: Mark a test as failed.", "source": "github_repos"} -{"code": "def __init__(self, grpc_debug_server_addresses, thread_name_filter=None, send_traceback_and_source_code=True):\n\n def _gated_grpc_watch_fn(fetches, feeds):\n del fetches, feeds\n return framework.WatchOptions(debug_ops=['DebugIdentity(gated_grpc=true)'])\n super(TensorBoardDebugHook, self).__init__(grpc_debug_server_addresses, watch_fn=_gated_grpc_watch_fn, thread_name_filter=thread_name_filter)\n self._grpc_debug_server_addresses = grpc_debug_server_addresses\n self._send_traceback_and_source_code = send_traceback_and_source_code\n self._sent_graph_version = -1\n grpc_wrapper.register_signal_handler()", "docstring": "Constructor of TensorBoardDebugHook.\n\nArgs:\n grpc_debug_server_addresses: gRPC address(es) of debug server(s), as a\n `str` or a `list` of `str`s. E.g., \"localhost:2333\",\n \"grpc://localhost:2333\", [\"192.168.0.7:2333\", \"192.168.0.8:2333\"].\n thread_name_filter: Optional filter for thread names.\n send_traceback_and_source_code: Whether traceback of graph elements and\n the source code are to be sent to the debug server(s).", "source": "github_repos"} -{"code": "def add(self, word: str):\n if not word:\n return\n self._tokens.add(word)\n ref = self.data\n for char in word:\n ref[char] = ref.setdefault(char, {})\n ref = ref[char]\n ref[self._termination_char] = 1", "docstring": "Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.\n The special key `\"\"` in `self._termination_char` is used to represent termination.\n\n This function is idempotent, adding twice the same word will leave the trie unchanged\n\nExample:\n ```python\n >>> trie = Trie()\n >>> trie.add(\"Hello 友達\")\n >>> trie.data\n {\"H\": {\"e\": {\"l\": {\"l\": {\"o\": {\" \": {\"友\": {\"達\": {\"\": 1}}}}}}}}}\n\n >>> trie.add(\"Hello\")\n >>> trie.data\n {\"H\": {\"e\": {\"l\": {\"l\": {\"o\": {\"\": 1, \" \": {\"友\": {\"達\": {\"\": 1}}}}}}}}}\n ```", "source": "github_repos"} -{"code": "def __lt__(self: EventSetOrNode, other: Any) -> EventSetOrNode:\n if isinstance(other, self.__class__):\n from temporian.core.operators.binary import less\n return less(input_left=self, input_right=other)\n if isinstance(other, T_SCALAR):\n from temporian.core.operators.scalar import less_scalar\n return less_scalar(input=self, value=other)\n self._raise_error('compare', other, '(int,float)')\n assert False", "docstring": "Computes less (`self < other`) element-wise with another\n [`EventSet`][temporian.EventSet] or a scalar value.\n\n If an EventSet, each feature in `self` is compared element-wise to the\n feature in `other` in the same position. `self` and `other` must have\n the same sampling and the same number of features.\n\n If a scalar value, each item in each feature in `input` is compared to\n `value`.\n\n Note that it will always return False on NaN elements.\n\n Example with EventSet:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"f1\": [0, 100, 200]}\n ... )\n >>> b = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"f2\": [-10, 100, 5]},\n ... same_sampling_as=a\n ... )\n\n >>> c = a < b\n >>> c\n indexes: []\n features: [('f1', bool_)]\n events:\n (3 events):\n timestamps: [1. 2. 3.]\n 'f1': [False False False]\n ...\n\n ```\n\n Example with scalar:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"f1\": [0, 100, 200], \"f2\": [-10, 100, 5]}\n ... )\n\n >>> b = a < 100\n >>> b\n indexes: []\n features: [('f1', bool_), ('f2', bool_)]\n events:\n (3 events):\n timestamps: [1. 2. 3.]\n 'f1': [ True False False]\n 'f2': [ True False True]\n ...\n\n ```\n\nArgs:\n other: EventSet or scalar value.\n\nReturns:\n Result of the comparison.", "source": "github_repos"} -{"code": "def empty(self) -> 'Builder':\n return self._to_builder(_evaluation.EmptyFunction(self.node.context, self.node, []))", "docstring": "The FHIRPath empty() function.\n\nReturns:\n An expression that evaluates to True if the parent evaluates to empty.", "source": "github_repos"} -{"code": "def as_default(self) -> ContextManager['Graph']:\n return _default_graph_stack.get_controller(self)", "docstring": "Returns a context manager that makes this `Graph` the default graph.\n\n This method should be used if you want to create multiple graphs\n in the same process. For convenience, a global default graph is\n provided, and all ops will be added to this graph if you do not\n create a new graph explicitly.\n\n Use this method with the `with` keyword to specify that ops created within\n the scope of a block should be added to this graph. In this case, once\n the scope of the `with` is exited, the previous default graph is set again\n as default. There is a stack, so it's ok to have multiple nested levels\n of `as_default` calls.\n\n The default graph is a property of the current thread. If you\n create a new thread, and wish to use the default graph in that\n thread, you must explicitly add a `with g.as_default():` in that\n thread's function.\n\n The following code examples are equivalent:\n\n ```python\n # 1. Using Graph.as_default():\n g = tf.Graph()\n with g.as_default():\n c = tf.constant(5.0)\n assert c.graph is g\n\n # 2. Constructing and making default:\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0)\n assert c.graph is g\n ```\n\n If eager execution is enabled ops created under this context manager will be\n added to the graph instead of executed eagerly.\n\nReturns:\n A context manager for using this graph as the default graph.", "source": "github_repos"} -{"code": "def experimental_tpu_fit_loop(model, dataset, epochs=100, verbose=1, callbacks=None, initial_epoch=0, steps_per_epoch=None, val_dataset=None, validation_steps=None, validation_freq=1):\n mode = ModeKeys.TRAIN\n current_strategy = model._distribution_strategy\n iteration_value = min(steps_per_epoch, current_strategy.extended.steps_per_run)\n steps_per_run = backend.variable(value=iteration_value, dtype='int32', name='steps_per_run')\n iterator = dist_utils.get_iterator(dataset, current_strategy)\n scope = dist_utils.distributed_scope(strategy=current_strategy, learning_phase=1)\n scope.__enter__()\n out_labels = model.metrics_names or []\n step_fn = _make_train_step_fn(model, ModeKeys.TRAIN, current_strategy, out_labels)\n initial_loop_values = {}\n initial_loop_values['loss'] = constant_op.constant(10000000.0)\n for m in model._get_training_eval_metrics():\n tensor = m.result()\n initial_loop_values[m.name] = array_ops.zeros(tensor.shape, tensor.dtype)\n ctx = current_strategy.extended.experimental_run_steps_on_iterator(step_fn, iterator, iterations=steps_per_run, initial_loop_values=initial_loop_values)\n train_op = ctx.run_op\n output_tensors = ctx.last_step_outputs\n do_validation = bool(validation_steps)\n if model._compile_distribution:\n dist_utils._copy_weights_to_distributed_model(model, mode)\n callbacks = cbks.configure_callbacks(callbacks, model, do_validation=do_validation, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose, count_mode='steps', mode=mode)\n steps_to_run = [current_strategy.extended.steps_per_run] * (steps_per_epoch // current_strategy.extended.steps_per_run)\n if steps_per_epoch % current_strategy.extended.steps_per_run:\n steps_to_run.append(steps_per_epoch % current_strategy.extended.steps_per_run)\n target_steps = len(steps_to_run)\n callbacks._call_begin_hook(mode)\n initial_epoch = model._maybe_load_initial_epoch_from_ckpt(initial_epoch, mode)\n for epoch in range(initial_epoch, epochs):\n dist_utils._reset_metrics(model)\n callbacks.on_epoch_begin(epoch)\n epoch_logs = {}\n step_index = 0\n prev_step_count = None\n current_step = 0\n while current_step < target_steps:\n step_count = steps_to_run[current_step]\n batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count}\n callbacks._call_batch_hook(mode, 'begin', step_index, batch_logs)\n if prev_step_count is None or step_count != prev_step_count:\n backend.get_session().run(steps_per_run.assign(step_count))\n prev_step_count = step_count\n try:\n _, outputs = backend.batch_get_value([train_op, output_tensors])\n except errors.OutOfRangeError:\n logging.warning('Your dataset iterator ran out of data; interrupting training. Make sure that your dataset can generate at least `steps_per_epoch * epochs` batches (in this case, %d batches).' % steps_per_epoch * epochs)\n break\n batch_logs.update(outputs)\n callbacks._call_batch_hook(mode, 'end', step_index, batch_logs)\n step_index = step_index + step_count\n current_step += 1\n if callbacks.model.stop_training:\n break\n if do_validation and training_utils_v1.should_run_validation(validation_freq, epoch):\n logging.info('Running validation at fit epoch: %s', epoch)\n if model._compile_distribution:\n dist_utils._copy_weights_to_original_model(model, ModeKeys.TRAIN)\n val_outs = experimental_tpu_test_loop(model, val_dataset, steps=validation_steps, verbose=verbose, callbacks=callbacks)\n if not isinstance(val_outs, list):\n val_outs = [val_outs]\n for label, val_out in zip(out_labels, val_outs):\n epoch_logs['val_' + label] = val_out\n callbacks.on_epoch_end(epoch, epoch_logs)\n if callbacks.model.stop_training:\n break\n model._successful_loop_finish = True\n callbacks._call_end_hook(mode)\n if model._compile_distribution:\n dist_utils._copy_weights_to_original_model(model, ModeKeys.TRAIN)\n scope.__exit__(None, None, None)\n return model.history", "docstring": "Fit loop for training with TPU tf.distribute.Strategy.\n\nArgs:\n model: Keras Model instance.\n dataset: Dataset that returns inputs and targets\n epochs: Number of times to iterate over the data\n verbose: Integer, Verbosity mode, 0, 1 or 2\n callbacks: List of callbacks to be called during training\n initial_epoch: Epoch at which to start training\n (useful for resuming a previous training run)\n steps_per_epoch: Total number of steps (batches of samples)\n before declaring one epoch finished and starting the\n next epoch. Ignored with the default value of `None`.\n val_dataset: Dataset for validation data.\n validation_steps: Number of steps to run validation for\n (only if doing validation from data tensors).\n Ignored with the default value of `None`.\n validation_freq: Only relevant if validation data is provided. Integer or\n `collections.abc.Container` instance (e.g. list, tuple, etc.). If an\n integer, specifies how many training epochs to run before a new\n validation run is performed, e.g. `validation_freq=2` runs\n validation every 2 epochs. If a Container, specifies the epochs on\n which to run validation, e.g. `validation_freq=[1, 2, 10]` runs\n validation at the end of the 1st, 2nd, and 10th epochs.\n\nReturns:\n Returns `None`.\n\nRaises:\n ValueError: in case of invalid arguments.", "source": "github_repos"} -{"code": "def fn_args(fn):\n if isinstance(fn, functools.partial):\n args = fn_args(fn.func)\n args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])]\n else:\n if _is_callable_object(fn):\n fn = fn.__call__\n args = tf_inspect.getfullargspec(fn).args\n if _is_bound_method(fn) and args:\n args.pop(0)\n return tuple(args)", "docstring": "Get argument names for function-like object.\n\nArgs:\n fn: Function, or function-like object (e.g., result of `functools.partial`).\n\nReturns:\n `tuple` of string argument names.\n\nRaises:\n ValueError: if partial function has positionally bound arguments", "source": "github_repos"} -{"code": "def GetBullets(self):\n return self._bullets", "docstring": "Returns the bullet characters list.\n\n Use the list elements in order for best appearance in nested bullet lists,\n wrapping back to the first element for deep nesting. The list size depends\n on the console implementation.\n\nReturns:\n A tuple of bullet characters.", "source": "github_repos"} -{"code": "def collect(self):\n if self.is_external:\n raise ValueError(f'`collect` cannot be called on a dynamic evaluation context that is using an external DNASpec: {self._dna_spec}.')\n _dynamic_evaluation_stack.ensure_thread_safety(self)\n self._hyper_dict = {}\n with dynamic_evaluate(self.add_decision_point, per_thread=self._per_thread):\n try:\n _dynamic_evaluation_stack.push(self)\n yield self._hyper_dict\n finally:\n self._dna_spec = None\n _dynamic_evaluation_stack.pop(self)", "docstring": "A context manager for collecting hyper primitives within this context.\n\n Example::\n\n context = DynamicEvaluationContext()\n with context.collect():\n x = pg.oneof([1, 2, 3]) + pg.oneof([4, 5, 6])\n\n # Will print 1 + 4 = 5. Meanwhile 2 hyper primitives will be registered\n # in the search space represented by the context.\n print(x)\n\nYields:\n The hyper dict representing the search space.", "source": "github_repos"} -{"code": "def list_summaries(logdir):\n result = _SummaryFile()\n for dirpath, _, filenames in os.walk(logdir):\n for filename in filenames:\n if not filename.startswith('events.out.'):\n continue\n path = os.path.join(dirpath, filename)\n for event in _SummaryIterator(path):\n if event.graph_def:\n result.graph_defs.append(event.graph_def)\n if not event.summary:\n continue\n for value in event.summary.value:\n tag = value.tag\n kind = value.WhichOneof('value')\n container = {'simple_value': result.scalars, 'image': result.images, 'histo': result.histograms, 'tensor': result.tensors}.get(kind)\n if container is None:\n raise ValueError('Unexpected summary kind %r in event file %s:\\n%r' % (kind, path, event))\n elif kind == 'tensor' and tag != 'keras':\n plugin_name = value.metadata.plugin_data.plugin_name\n container = {'images': result.images, 'histograms': result.histograms, 'scalars': result.scalars}.get(plugin_name)\n if container is not None:\n result.convert_from_v2_summary_proto = True\n else:\n container = result.tensors\n container.add(_ObservedSummary(logdir=dirpath, tag=tag))\n return result", "docstring": "Read all summaries under the logdir into a `_SummaryFile`.\n\nArgs:\n logdir: A path to a directory that contains zero or more event\n files, either as direct children or in transitive subdirectories.\n Summaries in these events must only contain old-style scalars,\n images, and histograms. Non-summary events, like `graph_def`s, are\n ignored.\n\nReturns:\n A `_SummaryFile` object reflecting all summaries written to any\n event files in the logdir or any of its descendant directories.\n\nRaises:\n ValueError: If an event file contains an summary of unexpected kind.", "source": "github_repos"} -{"code": "def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding)\n return vision_outputs", "docstring": "Returns:\n vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`):\n The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that\n contains the image features, the pooled image features and the hidden states if\n `output_hidden_states=True`.\n\nExample:\n ```python\n >>> import torch\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoProcessor, Blip2Model\n\n >>> model = Blip2Model.from_pretrained(\"Salesforce/blip2-opt-2.7b\")\n\n >>> processor = AutoProcessor.from_pretrained(\"Salesforce/blip2-opt-2.7b\")\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n >>> inputs = processor(images=image, return_tensors=\"pt\")\n >>> image_outputs = model.get_image_features(**inputs)\n ```", "source": "github_repos"} -{"code": "def _parse_line(self, instrumentation_block, line):\n if instrumentation_block.state == _InstrumentationBlockStates.METHOD:\n return self._parse_method_block_line(instrumentation_block, line)\n elif instrumentation_block.state == _InstrumentationBlockStates.RESULT:\n return self._parse_result_block_line(instrumentation_block, line)\n else:\n return self._parse_unknown_block_line(instrumentation_block, line)", "docstring": "Parses an arbitrary line from the instrumentation output based upon\n the current parser state.\n\nArgs:\n instrumentation_block: _InstrumentationBlock, an instrumentation\n block with any of the possible parser states.\n line: string, the raw instrumentation output line to parse\n appropriately.\n\nReturns:\n The next instrumenation block to continue parsing with.", "source": "github_repos"} -{"code": "def stripped_op_list_for_graph(graph_def):\n used_ops = ops_used_by_graph_def(graph_def)\n op_defs = []\n for op in sorted(used_ops):\n op_def = op_def_registry.get(op)\n if op_def is not None:\n op_defs.append(op_def)\n return op_def_pb2.OpList(op=op_defs)", "docstring": "Collect the stripped OpDefs for ops used by a graph.\n\n This function computes the `stripped_op_list` field of `MetaGraphDef` and\n similar protos. The result can be communicated from the producer to the\n consumer, which can then use the C++ function\n `RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.\n\nArgs:\n graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.\n\nReturns:\n An `OpList` of ops used by the graph.", "source": "github_repos"} -{"code": "def _setup_rotation_strategy(self, creative_rotation, feed_item):\n option = feed_item.get(FieldMap.CREATIVE_ROTATION, 'Even').upper()\n if option == 'EVEN':\n creative_rotation['type'] = 'CREATIVE_ROTATION_TYPE_RANDOM'\n creative_rotation['weightCalculationStrategy'] = 'WEIGHT_STRATEGY_EQUAL'\n elif option == 'SEQUENTIAL':\n creative_rotation['type'] = 'CREATIVE_ROTATION_TYPE_SEQUENTIAL'\n creative_rotation['weightCalculationStrategy'] = None\n elif option == 'CUSTOM':\n creative_rotation['type'] = 'CREATIVE_ROTATION_TYPE_RANDOM'\n creative_rotation['weightCalculationStrategy'] = 'WEIGHT_STRATEGY_CUSTOM'\n elif option == 'CLICK-THROUGH RATE':\n creative_rotation['type'] = 'CREATIVE_ROTATION_TYPE_RANDOM'\n creative_rotation['weightCalculationStrategy'] = 'WEIGHT_STRATEGY_HIGHEST_CTR'\n elif option == 'OPTIMIZED':\n creative_rotation['type'] = 'CREATIVE_ROTATION_TYPE_RANDOM'\n creative_rotation['weightCalculationStrategy'] = 'WEIGHT_STRATEGY_OPTIMIZED'", "docstring": "Analyzes the feed and sets up rotation strategy for the ad.\n\n For better user experience, the creative rotaion values that come from the\n feed map directly to values in the UI, this function is responsible for\n translating that to API specific values for the creative rotation objects\n under the ad.\n\nArgs:\n creative_rotation: Feed item representing the creative rotation setup from\n the feed.\n feed_item: Feed item representing the ad.", "source": "github_repos"} -{"code": "def _convert_type(self, t, as_instance=False):\n src = textwrap.dedent(f'\\n from typing import Any, Callable, Iterator, Tuple, Type, Union\\n from protocols import Sequence, SupportsLower\\n x = ... # type: {t}\\n ')\n filename = str(hash((t, as_instance)))\n x = self._parse_and_lookup(src, 'x', filename).type\n if as_instance:\n x = abstract_utils.AsInstance(x)\n return self.ctx.convert.constant_to_value(x, {}, self.ctx.root_node)", "docstring": "Convenience function for turning a string into an abstract value.\n\n Note that this function cannot be called more than once per test with\n the same arguments, since we hash the arguments to get a filename for\n the temporary pyi.\n\nArgs:\n t: The string representation of a type.\n as_instance: Whether to convert as an instance.\n\nReturns:\n A BaseValue.", "source": "github_repos"} -{"code": "def __init__(self, thresholds=None, name=None, dtype=None):\n super().__init__(confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES, thresholds=thresholds, name=name, dtype=dtype)", "docstring": "Calculates the number of false negatives.\n\n If `sample_weight` is given, calculates the sum of the weights of\n false negatives. This metric creates one local variable, `accumulator`\n that is used to keep track of the number of false negatives.\n\n If `sample_weight` is `None`, weights default to 1.\n Use `sample_weight` of 0 to mask values.\n\nArgs:\n thresholds: (Optional) Defaults to `0.5`. A float value, or a Python\n list/tuple of float threshold values in `[0, 1]`. A threshold is\n compared with prediction values to determine the truth value of\n predictions (i.e., above the threshold is `True`, below is `False`).\n If used with a loss function that sets `from_logits=True` (i.e. no\n sigmoid applied to predictions), `thresholds` should be set to 0.\n One metric value is generated for each threshold value.\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nExample:\n >>> m = keras.metrics.FalseNegatives()\n >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0])\n >>> m.result()\n 2.0\n\n >>> m.reset_state()\n >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0])\n >>> m.result()\n 1.0", "source": "github_repos"} -{"code": "def _from_definition(fdef, grad_func=None):\n func = None\n argnames = [arg.name for arg in fdef.signature.input_arg]\n input_types = tuple((dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg))\n func_name = fdef.signature.name\n python_grad_func = None\n out_names = [arg.name for arg in fdef.signature.output_arg]\n result = _DefinedFunction(func, argnames, input_types, func_name, grad_func, python_grad_func, out_names)\n if is_oss:\n serialized = fdef.SerializeToString()\n c_func = c_api.TF_FunctionImportFunctionDef(serialized)\n else:\n c_func = c_api.TF_FunctionImportFunctionDefNoSerialization(fdef)\n result._c_func = c_api_util.ScopedTFFunction(c_func, func_name)\n result._extra_inputs = []\n result._op_def = fdef.signature\n return result", "docstring": "Creates a _DefinedFunction initialized from a FunctionDef proto.\n\nArgs:\n fdef: a FunctionDef\n grad_func: a _DefinedFunction or None\n\nReturns:\n A _DefinedFunction representing fdef", "source": "github_repos"} -{"code": "def get_model_files(model_type: str, frameworks: Optional[List[str]]=None) -> Dict[str, Union[Path, List[Path]]]:\n module_name = model_type_to_module_name(model_type)\n model_module = TRANSFORMERS_PATH / 'models' / module_name\n model_files = list(model_module.glob('*.py'))\n model_files = filter_framework_files(model_files, frameworks=frameworks)\n doc_file = REPO_PATH / 'docs' / 'source' / 'en' / 'model_doc' / f'{model_type}.md'\n test_files = [f'test_modeling_{module_name}.py', f'test_modeling_tf_{module_name}.py', f'test_modeling_flax_{module_name}.py', f'test_tokenization_{module_name}.py', f'test_image_processing_{module_name}.py', f'test_feature_extraction_{module_name}.py', f'test_processor_{module_name}.py']\n test_files = filter_framework_files(test_files, frameworks=frameworks)\n test_files = [REPO_PATH / 'tests' / 'models' / module_name / f for f in test_files]\n test_files = [f for f in test_files if f.exists()]\n return {'doc_file': doc_file, 'model_files': model_files, 'module_name': module_name, 'test_files': test_files}", "docstring": "Retrieves all the files associated to a model.\n\nArgs:\n model_type (`str`): A valid model type (like \"bert\" or \"gpt2\")\n frameworks (`List[str]`, *optional*):\n If passed, will only keep the model files corresponding to the passed frameworks.\n\nReturns:\n `Dict[str, Union[Path, List[Path]]]`: A dictionary with the following keys:\n - **doc_file** -- The documentation file for the model.\n - **model_files** -- All the files in the model module.\n - **test_files** -- The test files for the model.", "source": "github_repos"} -{"code": "def _has_kwargs(fn):\n if isinstance(fn, functools.partial):\n fn = fn.func\n elif _is_callable_object(fn):\n fn = fn.__call__\n elif not callable(fn):\n raise TypeError('fn should be a function-like object, but is of type {}.'.format(type(fn)))\n return tf_inspect.getfullargspec(fn).varkw is not None", "docstring": "Returns whether the passed callable has **kwargs in its signature.\n\nArgs:\n fn: Function, or function-like object (e.g., result of `functools.partial`).\n\nReturns:\n `bool`: if `fn` has **kwargs in its signature.\n\nRaises:\n `TypeError`: If fn is not a Function, or function-like object.", "source": "github_repos"} -{"code": "def call(self, inputs: List[Any], global_state: pg.geno.AttributeDict, step: int=0) -> List[Any]:\n raise NotImplementedError()", "docstring": "Subclasses should override this method.\n\n The `global_state` and `step` are optional for the subclasses' call\n signature.\n\nArgs:\n inputs: A list of values as inputs.\n global_state: An `AttributeDict` object as the global state container,\n which is readable/writable during the operation.\n step: Number of examples historically proposed, which can be used for\n determining a cross over schedule.\n\nReturns:\n A list of values as output of current operation.", "source": "github_repos"} -{"code": "def read(self, size):\n data_list = []\n bytes_read = 0\n last_block_position = self.position\n while bytes_read < size:\n bytes_from_remaining = min(size - bytes_read, len(self.remaining))\n data_list.append(self.remaining[0:bytes_from_remaining])\n self.remaining = self.remaining[bytes_from_remaining:]\n self.position += bytes_from_remaining\n bytes_read += bytes_from_remaining\n if not self.remaining:\n try:\n self.remaining = self.conn.recv_bytes()\n except EOFError:\n break\n last_block = b''.join(data_list)\n if last_block:\n self.last_block_position = last_block_position\n self.last_block = last_block\n return last_block", "docstring": "Read data from the wrapped pipe connection.\n\nArgs:\n size: Number of bytes to read. Actual number of bytes read is always\n equal to size unless EOF is reached.\n\nReturns:\n data read as str.", "source": "github_repos"} -{"code": "def run_ninja(targets, fail_collector=None, fail_fast=False, verbose=False):\n cmd = ['ninja', '-k', '1' if fail_fast else '100000'] + targets\n with subprocess.Popen(cmd, cwd=OUT_DIR, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) as process:\n failed_targets = []\n print_if_verbose = False\n with open(NINJA_LOG, 'w') as ninja_log:\n while True:\n line = process.stdout.readline()\n if not line:\n break\n ninja_log.write(line)\n msg_type, modname, logfile = parse_ninja_output_line(line)\n if msg_type == _NINJA_FAILURE_MSG:\n failed_targets.append(line[len(NINJA_FAILURE_PREFIX):].strip())\n print_if_verbose = True\n if msg_type == _TEST_MODULE_PASS_MSG or msg_type == _TEST_MODULE_FAIL_MSG:\n print(line)\n if msg_type == _TEST_MODULE_FAIL_MSG:\n fail_collector.add_failure(modname, logfile)\n print_if_verbose = False\n if verbose and print_if_verbose:\n print(line.rstrip())\n if failed_targets:\n summary_hdr = '>>> Found Ninja target failures (includes test failures):'\n print('\\n' + summary_hdr)\n ninja_log.write('\\n' + summary_hdr + '\\n')\n for t in failed_targets:\n target = f' - {t}'\n print(target)\n ninja_log.write(target + '\\n')\n process.wait()\n if process.returncode == 0:\n return True\n else:\n print(f\">>> FAILED: Ninja command '{shlex.join(cmd)}'.\")\n print(\">>> Run it in the 'out' directory to reproduce.\")\n print(f\">>> Full Ninja output is available in '{NINJA_LOG}'.\")\n print('>>> Failing test modules (if any) will be reported below.')\n return False", "docstring": "Run ninja over the list of specified targets.\n\nArgs:\n targets: The list of targets to run.\n fail_collector: A FailCollector object to collect failures.\n fail_fast: If True, abort at the first target failure.\n verbose: If True, print verbose output.\n\nReturns:\n True if no target fails. False, otherwise.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **loss_kwargs) -> CausalLMOutputWithPast:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position)\n hidden_states = outputs.last_hidden_state\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n loss = None\n if labels is not None:\n loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)\n return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n ```python\n >>> from transformers import AutoTokenizer, NemotronForCausalLM\n\n >>> model = NemotronForCausalLM.from_pretrained(\"nvidia/nemotron-3-8b-base-4k-hf\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"nvidia/nemotron-3-8b-base-4k-hf\")\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```", "source": "github_repos"} -{"code": "def get_models_in_diff():\n fork_point_sha = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')\n modified_files = subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('utf-8').split()\n relevant_modified_files = [x for x in modified_files if '/models/' in x and x.endswith('.py')]\n model_names = set()\n for file_path in relevant_modified_files:\n model_name = file_path.split('/')[-2]\n model_names.add(model_name)\n return model_names", "docstring": "Finds all models that have been modified in the diff.\n\nReturns:\n A set containing the names of the models that have been modified (e.g. {'llama', 'whisper'}).", "source": "github_repos"} -{"code": "def __init__(self, config: RoFormerConfig):\n super().__init__()\n self.summary_type = getattr(config, 'summary_type', 'last')\n if self.summary_type == 'attn':\n raise NotImplementedError\n self.summary = nn.Identity()\n if hasattr(config, 'summary_use_proj') and config.summary_use_proj:\n if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and (config.num_labels > 0):\n num_classes = config.num_labels\n else:\n num_classes = config.hidden_size\n self.summary = nn.Linear(config.hidden_size, num_classes)\n activation_string = getattr(config, 'summary_activation', None)\n self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity()\n self.first_dropout = nn.Identity()\n if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0:\n self.first_dropout = nn.Dropout(config.summary_first_dropout)\n self.last_dropout = nn.Identity()\n if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:\n self.last_dropout = nn.Dropout(config.summary_last_dropout)", "docstring": "Compute a single vector summary of a sequence hidden states.\n\nArgs:\n config ([`RoFormerConfig`]):\n The config used by the model. Relevant arguments in the config class of the model are (refer to the actual\n config class of your model for the default values it uses):\n\n - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:\n\n - `\"last\"` -- Take the last token hidden state (like XLNet)\n - `\"first\"` -- Take the first token hidden state (like Bert)\n - `\"mean\"` -- Take the mean of all tokens hidden states\n - `\"cls_index\"` -- Supply a Tensor of classification token position (GPT/GPT-2)\n - `\"attn\"` -- Not implemented now, use multi-head attention\n\n - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.\n - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes\n (otherwise to `config.hidden_size`).\n - **summary_activation** (`Optional[str]`) -- Set to `\"tanh\"` to add a tanh activation to the output,\n another string or `None` will add no activation.\n - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.\n - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.", "source": "github_repos"} -{"code": "def __init__(self, dataset: str, level: int=logging.INFO) -> None:\n \"\"\"Clients are initialized with the general CLI configuration.\"\"\"\n self.dataset = dataset\n self.logger = logging.getLogger(f'{__name__}.{type(self).__name__}')\n self.logger.setLevel(level)", "docstring": "Weather data provider client interface.\n\n Defines methods and properties required to efficiently interact with weather\n data providers.\n\nAttributes:\n config: A config that contains pipeline parameters, such as API keys.\n level: Default log level for the client.", "source": "github_repos"} -{"code": "def expand_dims(x, axis=-1):\n return array_ops.expand_dims(x, axis)", "docstring": "Adds a 1-sized dimension at index \"axis\".\n\nArgs:\n x: A tensor or variable.\n axis: Position where to add a new axis.\n\nReturns:\n A tensor with expanded dimensions.", "source": "github_repos"} -{"code": "def AsyncPopenArgs(self):\n args = {}\n if self.operating_system == OperatingSystem.WINDOWS:\n args['close_fds'] = True\n detached_process = 8\n create_new_process_group = 512\n args['creationflags'] = detached_process | create_new_process_group\n else:\n args['preexec_fn'] = os.setsid\n args['close_fds'] = True\n args['stdin'] = subprocess.PIPE\n args['stdout'] = subprocess.PIPE\n args['stderr'] = subprocess.PIPE\n return args", "docstring": "Returns the args for spawning an async process using Popen on this OS.\n\n Make sure the main process does not wait for the new process. On windows\n this means setting the 0x8 creation flag to detach the process.\n\n Killing a group leader kills the whole group. Setting creation flag 0x200 on\n Windows or running setsid on *nix makes sure the new process is in a new\n session with the new process the group leader. This means it can't be killed\n if the parent is killed.\n\n Finally, all file descriptors (FD) need to be closed so that waiting for the\n output of the main process does not inadvertently wait for the output of the\n new process, which means waiting for the termination of the new process.\n If the new process wants to write to a file, it can open new FDs.\n\nReturns:\n {str:}, The args for spawning an async process using Popen on this OS.", "source": "github_repos"} -{"code": "def __eq__(self, other):\n return isinstance(other, ArgumentPlaceholder)", "docstring": "Tests for equality of two placeholder objects.\n\nArgs:\n other: Another placeholder object to compare to.\n\n This method is used only for test code. All placeholder objects are\n equal to each other.", "source": "github_repos"} -{"code": "def forward(self, pixel_values: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:\n if labels is not None and self.config.num_labels == 1:\n raise ValueError('The number of labels should be greater than one')\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n outputs = self.backbone.forward_with_filtered_kwargs(pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions)\n features = outputs.feature_maps\n logits = self.decode_head(features)\n logits = nn.functional.interpolate(logits, size=pixel_values.shape[2:], mode='bilinear', align_corners=False)\n auxiliary_logits = None\n if self.auxiliary_head is not None:\n auxiliary_logits = self.auxiliary_head(features)\n auxiliary_logits = nn.functional.interpolate(auxiliary_logits, size=pixel_values.shape[2:], mode='bilinear', align_corners=False)\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)\n loss = loss_fct(logits, labels)\n if auxiliary_logits is not None:\n auxiliary_loss = loss_fct(auxiliary_logits, labels)\n loss += self.config.auxiliary_loss_weight * auxiliary_loss\n if not return_dict:\n if output_hidden_states:\n output = (logits,) + outputs[1:]\n else:\n output = (logits,) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).\n\nExample:\n ```python\n >>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation\n >>> from PIL import Image\n >>> from huggingface_hub import hf_hub_download\n\n >>> image_processor = AutoImageProcessor.from_pretrained(\"openmmlab/upernet-convnext-tiny\")\n >>> model = UperNetForSemanticSegmentation.from_pretrained(\"openmmlab/upernet-convnext-tiny\")\n\n >>> filepath = hf_hub_download(\n ... repo_id=\"hf-internal-testing/fixtures_ade20k\", filename=\"ADE_val_00000001.jpg\", repo_type=\"dataset\"\n ... )\n >>> image = Image.open(filepath).convert(\"RGB\")\n\n >>> inputs = image_processor(images=image, return_tensors=\"pt\")\n\n >>> outputs = model(**inputs)\n\n >>> logits = outputs.logits # shape (batch_size, num_labels, height, width)\n >>> list(logits.shape)\n [1, 150, 512, 512]\n ```", "source": "github_repos"} -{"code": "def _benchmarkFetch(self, name, target, size, iters):\n times = []\n with ops.Graph().as_default():\n v = variables.Variable(random_ops.random_normal([size]))\n with session.Session(target) as sess:\n sess.run(v.initializer)\n sess.run(v)\n for _ in range(iters):\n start_time = time.time()\n sess.run(v)\n end_time = time.time()\n times.append(end_time - start_time)\n print('%s %d %f' % (name, size, np.median(times)))\n self.report_benchmark(iters=1, wall_time=np.median(times), name=name)", "docstring": "Runs a microbenchmark to measure the cost of fetching a tensor.\n\n Reports the median cost of fetching a tensor of `size` * `sizeof(float)`\n bytes.\n\nArgs:\n name: A human-readable name for logging the output.\n target: The session target to use for the benchmark.\n size: The number of floating-point numbers to be fetched.\n iters: The number of iterations to perform.", "source": "github_repos"} -{"code": "def __init__(self, argv_or_options, command_line=False):\n argument_parser = make_parser()\n if command_line:\n assert isinstance(argv_or_options, list)\n options = argument_parser.parse_args(argv_or_options)\n else:\n if isinstance(argv_or_options, list):\n raise TypeError('Do not construct an Options object directly; call Options.create() instead.')\n options = argv_or_options\n for name, default in _LIBRARY_ONLY_OPTIONS.items():\n if not hasattr(options, name):\n setattr(options, name, default)\n names = set(vars(options))\n opt_map = {k: v.option_strings[-1] for k, v in argument_parser.actions.items() if v.option_strings}\n try:\n Postprocessor(names, opt_map, options, self).process()\n except PostprocessingError as e:\n if command_line:\n argument_parser.error(str(e))\n else:\n raise", "docstring": "Parse and encapsulate the configuration options.\n\n Also sets up some basic logger configuration.\n\n IMPORTANT: If creating an Options object from code, do not construct it\n directly! Call Options.create() instead.\n\nArgs:\n argv_or_options: Either sys.argv[1:] (sys.argv[0] is the main script), or\n already parsed options object returned by ArgumentParser.parse_args.\n command_line: Set this to true when argv_or_options == sys.argv[1:].\n\nRaises:\n sys.exit(2): bad option or input filenames.", "source": "github_repos"} -{"code": "def _on_fail(self, record):\n self.on_fail(record)", "docstring": "Proxy function to guarantee the base implementation of on_fail is\n called.\n\nArgs:\n record: records.TestResultRecord, a copy of the test record for\n this test, containing all information of the test execution\n including exception objects.", "source": "github_repos"} -{"code": "def _get_test_methods(self, test_names):\n test_methods = []\n for test_name in test_names:\n if test_name.startswith(TEST_SELECTOR_REGEX_PREFIX):\n regex_matching_methods = self._get_regex_matching_test_methods(test_name.removeprefix(TEST_SELECTOR_REGEX_PREFIX))\n test_methods += regex_matching_methods\n continue\n self._assert_valid_test_name(test_name)\n if test_name not in self.get_existing_test_names():\n raise Error(f'{self.TAG} does not have test method {test_name}.')\n if hasattr(self, test_name):\n test_method = getattr(self, test_name)\n elif test_name in self._generated_test_table:\n test_method = self._generated_test_table[test_name]\n test_methods.append((test_name, test_method))\n return test_methods", "docstring": "Resolves test method names to bound test methods.\n\nArgs:\n test_names: A list of strings, each string is a test method name or a\n regex for matching test names.\n\nReturns:\n A list of tuples of (string, function). String is the test method\n name, function is the actual python method implementing its logic.\n\nRaises:\n Error: The test name does not follow naming convention 'test_*'.\n This can only be caused by user input.", "source": "github_repos"} -{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n image_height, image_width = get_image_size(image, input_data_format)\n target_height, target_width = (size['height'], size['width'])\n if image_width <= target_width and image_height <= target_height:\n return image\n height_scale_factor = target_height / image_height\n width_scale_factor = target_width / image_width\n optimal_scale_factor = min(height_scale_factor, width_scale_factor)\n new_height = int(image_height * optimal_scale_factor)\n new_width = int(image_width * optimal_scale_factor)\n scaled_image = resize(image=image, size=(new_height, new_width), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)\n return scaled_image", "docstring": "Resize an image to `(size[\"height\"], size[\"width\"])`.\n\nArgs:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n\nReturns:\n `np.ndarray`: The resized image.", "source": "github_repos"} -{"code": "def response_utf8_stream(response, chunksize):\n leftovers = b''\n while True:\n chunk = leftovers + response.read(chunksize)\n position = find_utf8_split(chunk)\n if position < len(chunk):\n leftovers = chunk[position:]\n chunk = chunk[:position]\n else:\n leftovers = b''\n if chunk:\n yield chunk.decode('UTF-8')\n else:\n break", "docstring": "Re-aligns a streaming buffer with UTF-8 boundraries.\n\n Buffers incomplete utf-8 characters to next chunk. Chunks are always returned as <= chunksize.\n In the future may add bytes as input type in addition to io using type detect.\n\n Parameters:\n reponse (io) - buffer containing bytes data.\n\nReturns:\n (bytes generator) - the reponse buffer in chunksize aligned to utf-8 boundries.", "source": "github_repos"} -{"code": "def max(self):\n if self.is_quantized or self.base_dtype in (bool, string, complex64, complex128):\n raise TypeError(f'Cannot find maximum value of {self} with {('quantized type' if self.is_quantized else 'type')} {self.base_dtype}.')\n try:\n return ml_dtypes.finfo(self.as_numpy_dtype).max\n except:\n try:\n return ml_dtypes.iinfo(self.as_numpy_dtype).max\n except:\n raise TypeError(f'Cannot find maximum value of {self}.')", "docstring": "Returns the maximum representable value in this data type.\n\nRaises:\n TypeError: if this is a non-numeric, unordered, or quantized type.", "source": "github_repos"} -{"code": "def serialize_state(self, name=None):\n if self._reader_ref.dtype == dtypes.resource:\n return gen_io_ops.reader_serialize_state_v2(self._reader_ref, name=name)\n else:\n return gen_io_ops.reader_serialize_state(self._reader_ref, name=name)", "docstring": "Produce a string tensor that encodes the state of a reader.\n\n Not all Readers support being serialized, so this can produce an\n Unimplemented error.\n\nArgs:\n name: A name for the operation (optional).\n\nReturns:\n A string Tensor.", "source": "github_repos"} -{"code": "def _flatten_and_filter_composite(maybe_composite, non_composite_output, composite_output=None):\n if isinstance(maybe_composite, composite_tensor.CompositeTensor):\n num_components = len(nest.flatten(maybe_composite, expand_composites=True))\n return (composite_output,) * num_components\n return non_composite_output", "docstring": "For an input, replaced the input by a tuple if the input is composite.\n\n If `maybe_composite` is not composite, return the parameter\n `non_composite_output` otherwise return a tuple which consists of the value of\n the parameter `composite_output` the same number of times as there are\n components of the composite tensor.\n\n This is useful for computing a mask when flattening nested data with\n `expand_composites=True`. For example\n\n ```python\n nest.flatten(data, expand_composites=True)\n ```\n\n and\n\n ```python\n nest.flatten(nest.map(\n data, lambda x: _flatten_and_filter_composite(x, False, True)))\n ```\n\n will have the same length and second will be True if the tensor in the first\n is derived from a expanding a composite tensor.\n\nArgs:\n maybe_composite: A value to test for being a composite tensor.\n non_composite_output: The value to return when `maybe_composite` is not a\n composite.\n composite_output: the value to fill the output tuple with if\n `maybe_composite` is a composite.\n\nReturns:\n `non_composite_output` or a tuple with multiple copies of\n `composite_output`.", "source": "github_repos"} -{"code": "def run_task_tests(self, task, torch_dtype='float32'):\n if task not in self.pipeline_model_mapping:\n self.skipTest(f'{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: `{task}` is not in `self.pipeline_model_mapping` for `{self.__class__.__name__}`.')\n model_architectures = self.pipeline_model_mapping[task]\n if not isinstance(model_architectures, tuple):\n model_architectures = (model_architectures,)\n at_least_one_model_is_tested = False\n for model_architecture in model_architectures:\n model_arch_name = model_architecture.__name__\n model_type = model_architecture.config_class.model_type\n for _prefix in ['Flax', 'TF']:\n if model_arch_name.startswith(_prefix):\n model_arch_name = model_arch_name[len(_prefix):]\n break\n if model_arch_name not in tiny_model_summary:\n continue\n tokenizer_names = tiny_model_summary[model_arch_name]['tokenizer_classes']\n image_processor_names = []\n feature_extractor_names = []\n processor_classes = tiny_model_summary[model_arch_name]['processor_classes']\n for cls_name in processor_classes:\n if 'ImageProcessor' in cls_name:\n image_processor_names.append(cls_name)\n elif 'FeatureExtractor' in cls_name:\n feature_extractor_names.append(cls_name)\n processor_names = PROCESSOR_MAPPING_NAMES.get(model_type, None)\n if not isinstance(processor_names, (list, tuple)):\n processor_names = [processor_names]\n commit = None\n if model_arch_name in tiny_model_summary and 'sha' in tiny_model_summary[model_arch_name]:\n commit = tiny_model_summary[model_arch_name]['sha']\n repo_name = f'tiny-random-{model_arch_name}'\n if TRANSFORMERS_TINY_MODEL_PATH != 'hf-internal-testing':\n repo_name = model_arch_name\n self.run_model_pipeline_tests(task, repo_name, model_architecture, tokenizer_names=tokenizer_names, image_processor_names=image_processor_names, feature_extractor_names=feature_extractor_names, processor_names=processor_names, commit=commit, torch_dtype=torch_dtype)\n at_least_one_model_is_tested = True\n if task in task_to_pipeline_and_spec_mapping:\n pipeline, hub_spec = task_to_pipeline_and_spec_mapping[task]\n compare_pipeline_args_to_hub_spec(pipeline, hub_spec)\n if not at_least_one_model_is_tested:\n self.skipTest(f'{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: Could not find any model architecture in the tiny models JSON file for `{task}`.')", "docstring": "Run pipeline tests for a specific `task`\n\nArgs:\n task (`str`):\n A task name. This should be a key in the mapping `pipeline_test_mapping`.\n torch_dtype (`str`, `optional`, defaults to `'float32'`):\n The torch dtype to use for the model. Can be used for FP16/other precision inference.", "source": "github_repos"} -{"code": "def replace_batch_norm(model):\n for name, module in model.named_children():\n if isinstance(module, nn.BatchNorm2d):\n new_module = DFineFrozenBatchNorm2d(module.num_features)\n if not module.weight.device == torch.device('meta'):\n new_module.weight.data.copy_(module.weight)\n new_module.bias.data.copy_(module.bias)\n new_module.running_mean.data.copy_(module.running_mean)\n new_module.running_var.data.copy_(module.running_var)\n model._modules[name] = new_module\n if len(list(module.children())) > 0:\n replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `DFineFrozenBatchNorm2d`.\n\nArgs:\n model (torch.nn.Module):\n input model", "source": "github_repos"} -{"code": "def __init__(self, columns: list[str]) -> None:\n self.columns = columns", "docstring": "Base Opertation class data processing transformations.\n\nArgs:\n columns: List of column names to apply the transformation.", "source": "github_repos"} -{"code": "def recover_session(self, master: str, saver: saver_lib.Saver=None, checkpoint_dir: str=None, checkpoint_filename_with_path: str=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None) -> Tuple[session.Session, bool]:\n sess, is_loaded_from_checkpoint = self._restore_checkpoint(master, saver, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config)\n local_init_success, msg = self._try_run_local_init_op(sess)\n if not is_loaded_from_checkpoint:\n return (sess, False)\n restoring_file = checkpoint_dir or checkpoint_filename_with_path\n if not local_init_success:\n logging.info('Restoring model from %s did not make model ready for local init: %s', restoring_file, msg)\n return (sess, False)\n is_ready, msg = self._model_ready(sess)\n if not is_ready:\n logging.info('Restoring model from %s did not make model ready: %s', restoring_file, msg)\n return (sess, False)\n logging.info('Restored model from %s', restoring_file)\n return (sess, is_loaded_from_checkpoint)", "docstring": "Creates a `Session`, recovering if possible.\n\n Creates a new session on 'master'. If the session is not initialized\n and can be recovered from a checkpoint, recover it.\n\nArgs:\n master: `String` representation of the TensorFlow master to use.\n saver: A `Saver` object used to restore a model.\n checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the\n dir will be used to restore.\n checkpoint_filename_with_path: Full file name path to the checkpoint file.\n wait_for_checkpoint: Whether to wait for checkpoint to become available.\n max_wait_secs: Maximum time to wait for checkpoints to become available.\n config: Optional `ConfigProto` proto used to configure the session.\n\nReturns:\n A pair (sess, initialized) where 'initialized' is `True` if\n the session could be recovered and initialized, `False` otherwise.\n\nRaises:\n ValueError: If both checkpoint_dir and checkpoint_filename_with_path are\n set.", "source": "github_repos"} -{"code": "def _strip_layer_names(self, summaries, model_type):\n result = set()\n for s in summaries:\n if '/' not in s.tag:\n raise ValueError(f'tag has no layer name: {s.tag!r}')\n start_from = 2 if 'subclass' in model_type else 1\n new_tag = '/'.join(s.tag.split('/')[start_from:])\n result.add(s._replace(tag=new_tag))\n return result", "docstring": "Deduplicate summary names modulo layer prefix.\n\n This removes the first slash-component of each tag name: for\n instance, \"foo/bar/baz\" becomes \"bar/baz\".\n\nArgs:\n summaries: A `set` of `_ObservedSummary` values.\n model_type: The model type currently being tested.\n\nReturns:\n A new `set` of `_ObservedSummary` values with layer prefixes\n removed.", "source": "github_repos"} -{"code": "def _broadcast_half(ac_0: _LayerBroadcaster, a_1: RowPartition) -> Tuple[_LayerBroadcaster, RowPartition]:\n c_1 = ac_0.broadcast_row_partition(a_1)\n old_value_rowids = array_ops.gather(ac_0.gather_index, c_1.value_rowids())\n old_row_starts = array_ops.gather(a_1.row_splits(), old_value_rowids)\n gather_index = old_row_starts + c_1.offsets_in_rows()\n return [_LayerBroadcaster.from_gather_index(gather_index), c_1]", "docstring": "Does a NOOP broadcast of a_1.\n\n *-ac_0-->*\n | |\n a_1 c_1\n | |\n V V\n *-ac_1-->*\n\n Note that by definition this cannot fail: there is always a well-defined\n NOOP broadcast. This is usually intended as half of broadcasting two shapes\n together.\n\nArgs:\n ac_0: previous LayerBroadcaster\n a_1: previous RowPartition\n\nReturns:\n [ac_1, c_1] where ac_1 is the next LayerBroadcaster, and c_1 is the\n broadcast RowPartition", "source": "github_repos"} -{"code": "def gauge(namespace: Union[Type, str], name: str) -> 'Metrics.DelegatingGauge':\n namespace = Metrics.get_namespace(namespace)\n return Metrics.DelegatingGauge(MetricName(namespace, name))", "docstring": "Obtains or creates a Gauge metric.\n\n Gauge metrics are restricted to integer-only values.\n\nArgs:\n namespace: A class or string that gives the namespace to a metric\n name: A string that gives a unique name to a metric\n\nReturns:\n A Distribution object.", "source": "github_repos"} -{"code": "def __le__(self: EventSetOrNode, other: Any) -> EventSetOrNode:\n if isinstance(other, self.__class__):\n from temporian.core.operators.binary import less_equal\n return less_equal(input_left=self, input_right=other)\n if isinstance(other, T_SCALAR):\n from temporian.core.operators.scalar import less_equal_scalar\n return less_equal_scalar(input=self, value=other)\n self._raise_error('compare', other, '(int,float)')\n assert False", "docstring": "Computes less equal (`self <= other`) element-wise with another\n [`EventSet`][temporian.EventSet] or a scalar value.\n\n If an EventSet, each feature in `self` is compared element-wise to the\n feature in `other` in the same position. `self` and `other` must have\n the same sampling and the same number of features.\n\n If a scalar value, each item in each feature in `input` is compared to\n `value`.\n\n Note that it will always return False on NaN elements.\n\n Example with EventSet:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"f1\": [0, 100, 200]}\n ... )\n >>> b = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"f2\": [-10, 100, 5]},\n ... same_sampling_as=a\n ... )\n\n >>> c = a <= b\n >>> c\n indexes: []\n features: [('f1', bool_)]\n events:\n (3 events):\n timestamps: [1. 2. 3.]\n 'f1': [False True False]\n ...\n\n ```\n\n Example with scalar:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"f1\": [0, 100, 200], \"f2\": [-10, 100, 5]}\n ... )\n\n >>> b = a <= 100\n >>> b\n indexes: []\n features: [('f1', bool_), ('f2', bool_)]\n events:\n (3 events):\n timestamps: [1. 2. 3.]\n 'f1': [ True True False]\n 'f2': [ True True True]\n ...\n\n ```\n\nArgs:\n other: EventSet or scalar value.\n\nReturns:\n Result of the comparison.", "source": "github_repos"} -{"code": "def load_checkpoint(ckpt_dir_or_file):\n filename = _get_checkpoint_filename(ckpt_dir_or_file)\n if filename is None:\n raise ValueError(\"Couldn't find 'checkpoint' file or checkpoints in given directory %s\" % ckpt_dir_or_file)\n return py_checkpoint_reader.NewCheckpointReader(filename)", "docstring": "Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.\n\n If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,\n reader for the latest checkpoint is returned.\n\n Example usage:\n\n ```python\n import tensorflow as tf\n a = tf.Variable(1.0)\n b = tf.Variable(2.0)\n ckpt = tf.train.Checkpoint(var_list={'a': a, 'b': b})\n ckpt_path = ckpt.save('tmp-ckpt')\n reader= tf.train.load_checkpoint(ckpt_path)\n print(reader.get_tensor('var_list/a/.ATTRIBUTES/VARIABLE_VALUE')) # 1.0\n ```\n\nArgs:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint\n file.\n\nReturns:\n `CheckpointReader` object.\n\nRaises:\n ValueError: If `ckpt_dir_or_file` resolves to a directory with no\n checkpoints.", "source": "github_repos"} -{"code": "def _assert_weights_created(self):\n if self.dynamic:\n return\n if 'build' in self.__class__.__dict__ and self.__class__ != Model and (not self.built):\n raise ValueError('Weights for model %s have not yet been created. Weights are created when the Model is first called on inputs or `build()` is called with an `input_shape`.' % self.name)", "docstring": "Asserts that all the weights for the model have been created.\n\n For a non-dynamic model, the weights must already be created after the\n layer has been called. For a dynamic model, the exact list of weights can\n never be known for certain since it may change at any time during execution.\n\n We run this check right before accessing weights or getting the Numpy value\n for the current weights. Otherwise, if the layer has never been called,\n the user would just get an empty list, which is misleading.\n\nRaises:\n ValueError: if the weights of the network has not yet been created.", "source": "github_repos"} -{"code": "def rot90(array, k=1, axes=(0, 1)):\n array = convert_to_tensor(array)\n if array.shape.rank < 2:\n raise ValueError(f'Input array must have at least 2 dimensions. Received: array.ndim={array.shape.rank}')\n if len(axes) != 2 or axes[0] == axes[1]:\n raise ValueError(f'Invalid axes: {axes}. Axes must be a tuple of two different dimensions.')\n k = k % 4\n if k == 0:\n return array\n axes = tuple((axis if axis >= 0 else array.shape.rank + axis for axis in axes))\n perm = [i for i in range(array.shape.rank) if i not in axes]\n perm.extend(axes)\n array = tf.transpose(array, perm)\n shape = tf.shape(array)\n non_rot_shape = shape[:-2]\n h, w = (shape[-2], shape[-1])\n array = tf.reshape(array, tf.concat([[-1], [h, w]], axis=0))\n array = tf.reverse(array, axis=[2])\n array = tf.transpose(array, [0, 2, 1])\n if k % 2 == 1:\n final_h, final_w = (w, h)\n else:\n final_h, final_w = (h, w)\n if k > 1:\n array = tf.reshape(array, tf.concat([[-1], [final_h, final_w]], axis=0))\n for _ in range(k - 1):\n array = tf.reverse(array, axis=[2])\n array = tf.transpose(array, [0, 2, 1])\n final_shape = tf.concat([non_rot_shape, [final_h, final_w]], axis=0)\n array = tf.reshape(array, final_shape)\n inv_perm = [0] * len(perm)\n for i, p in enumerate(perm):\n inv_perm[p] = i\n array = tf.transpose(array, inv_perm)\n return array", "docstring": "Rotate an array by 90 degrees in the specified plane.\n\nArgs:\n array: Input tensor\n k: Number of 90-degree rotations (default=1)\n axes: Tuple of two axes that define the plane of rotation.\n Defaults to (0, 1).\n\nReturns:\n Rotated tensor with correct shape transformation", "source": "github_repos"} -{"code": "def __len__(self):\n raise NotImplementedError", "docstring": "Number of batch in the Sequence.\n\nReturns:\n The number of batches in the Sequence.", "source": "github_repos"} -{"code": "def copy(self, source_file_names, destination_file_names):\n raise NotImplementedError", "docstring": "Recursively copy the file tree from the source to the destination\n\nArgs:\n source_file_names: list of source file objects that needs to be copied\n destination_file_names: list of destination of the new object\n\nRaises:\n ``BeamIOError``: if any of the copy operations fail", "source": "github_repos"} -{"code": "def _merge_choice_field(self, json_value: Any, choice_field: descriptor.FieldDescriptor, field_name: str, parent: message.Message) -> None:\n choice_field_name = _get_choice_field_name(choice_field, field_name)\n choice_field_map = _get_field_map(choice_field.message_type)\n choice_value_field = choice_field_map.get(choice_field_name)\n if choice_value_field is None:\n raise ValueError(f'Cannot find {choice_field_name!r} on {choice_field.full_name}')\n choice_message = proto_utils.set_in_parent_or_add(parent, choice_field)\n self._merge_field(json_value, choice_value_field, choice_message)", "docstring": "Creates a Message based on the choice_field Descriptor and json_value.\n\n The resulting message is merged into parent.\n\nArgs:\n json_value: The JSON value to merge into a message of the type described\n by choice_field.\n choice_field: The field descriptor of the FHIR choice type on parent.\n field_name: The nested field name of the choice type, e.g.: _valueBoolean.\n parent: The parent Message to merge into.", "source": "github_repos"} -{"code": "def print_layer_summary_with_connections(layer):\n try:\n output_shape = layer.output_shape\n except AttributeError:\n output_shape = 'multiple'\n connections = []\n for node in layer._inbound_nodes:\n if relevant_nodes and node not in relevant_nodes:\n continue\n for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound():\n connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index, tensor_index))\n name = layer.name\n cls_name = layer.__class__.__name__\n if not connections:\n first_connection = ''\n else:\n first_connection = connections[0]\n fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection]\n print_row(fields, positions)\n if len(connections) > 1:\n for i in range(1, len(connections)):\n fields = ['', '', '', connections[i]]\n print_row(fields, positions)", "docstring": "Prints a summary for a single layer (including topological connections).\n\nArgs:\n layer: target layer.", "source": "github_repos"} -{"code": "def as_default(self, step=None):\n return _SummaryContextManager(self, step)", "docstring": "Returns a context manager that enables summary writing.\n\n For convenience, if `step` is not None, this function also sets a default\n value for the `step` parameter used in summary-writing functions elsewhere\n in the API so that it need not be explicitly passed in every such\n invocation. The value can be a constant or a variable.\n\n Note: when setting `step` in a @tf.function, the step value will be\n captured at the time the function is traced, so changes to the step outside\n the function will not be reflected inside the function unless using\n a `tf.Variable` step.\n\n For example, `step` can be used as:\n\n ```python\n with writer_a.as_default(step=10):\n tf.summary.scalar(tag, value) # Logged to writer_a with step 10\n with writer_b.as_default(step=20):\n tf.summary.scalar(tag, value) # Logged to writer_b with step 20\n tf.summary.scalar(tag, value) # Logged to writer_a with step 10\n ```\n\nArgs:\n step: An `int64`-castable default step value, or `None`. When not `None`,\n the current step is captured, replaced by a given one, and the original\n one is restored when the context manager exits. When `None`, the current\n step is not modified (and not restored when the context manager exits).\n\nReturns:\n The context manager.", "source": "github_repos"} -{"code": "def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]]=None, boxes: Optional[Union[List[List[int]], List[List[List[int]]]]]=None, word_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n\n def _is_valid_text_input(t):\n if isinstance(t, str):\n return True\n elif isinstance(t, (list, tuple)):\n if len(t) == 0:\n return True\n elif isinstance(t[0], str):\n return True\n elif isinstance(t[0], (list, tuple)):\n return len(t[0]) == 0 or isinstance(t[0][0], str)\n else:\n return False\n else:\n return False\n if text_pair is not None:\n if not _is_valid_text_input(text):\n raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')\n if not isinstance(text_pair, (list, tuple)):\n raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n elif not isinstance(text, (list, tuple)):\n raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n if text_pair is not None:\n is_batched = isinstance(text, (list, tuple))\n else:\n is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))\n words = text if text_pair is None else text_pair\n if boxes is None:\n raise ValueError('You must provide corresponding bounding boxes')\n if is_batched:\n if len(words) != len(boxes):\n raise ValueError('You must provide words and boxes for an equal amount of examples')\n for words_example, boxes_example in zip(words, boxes):\n if len(words_example) != len(boxes_example):\n raise ValueError('You must provide as many words as there are bounding boxes')\n elif len(words) != len(boxes):\n raise ValueError('You must provide as many words as there are bounding boxes')\n if is_batched:\n if text_pair is not None and len(text) != len(text_pair):\n raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')\n batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text\n is_pair = bool(text_pair is not None)\n return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n else:\n return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of\n sequences with word-level normalized bounding boxes and optional labels.\n\nArgs:\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings\n (words of a single example or questions of a batch of examples) or a list of list of strings (batch of\n words).\n text_pair (`List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence should be a list of strings\n (pretokenized string).\n boxes (`List[List[int]]`, `List[List[List[int]]]`):\n Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.\n word_labels (`List[int]`, `List[List[int]]`, *optional*):\n Word-level integer labels (for token classification tasks such as FUNSD, CORD).", "source": "github_repos"} -{"code": "def __init__(self, custom_op_registerers=None, **kwargs):\n self._custom_op_registerers = custom_op_registerers or []\n super(InterpreterWithCustomOps, self).__init__(**kwargs)", "docstring": "Constructor.\n\nArgs:\n custom_op_registerers: List of str (symbol names) or functions that take a\n pointer to a MutableOpResolver and register a custom op. When passing\n functions, use a pybind function that takes a uintptr_t that can be\n recast as a pointer to a MutableOpResolver.\n **kwargs: Additional arguments passed to Interpreter.\n\nRaises:\n ValueError: If the interpreter was unable to create.", "source": "github_repos"} -{"code": "def forward(self, input_ids: torch.LongTensor=None, pixel_values: torch.FloatTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[KwargsForCausalLM]) -> Union[Tuple, GotOcr2CausalLMOutputWithPast]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs)\n hidden_states = outputs[0]\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n loss = None\n if labels is not None:\n loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)\n return GotOcr2CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoProcessor, GotOcr2ForConditionalGeneration, TextStreamer\n\n >>> model = GotOcr2ForConditionalGeneration.from_pretrained(\"stepfun-ai/GOT-OCR-2.0-hf\").to(\"cuda\")\n >>> processor = AutoProcessor.from_pretrained(\"stepfun-ai/GOT-OCR-2.0-hf\")\n\n >>> url = \"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = processor(image, return_tensors=\"pt\", color=\"green\").to(\"cuda\")\n\n >>> # Generate\n >>> streamer = TextStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True)\n >>> generate_ids = model.generate(\n ... **inputs,\n ... do_sample=False,\n ... tokenizer = processor.tokenizer,\n ... stop_strings='<|im_end|>',\n ... streamer=streamer,\n ... max_new_tokens=4096,\n ... )\n \"You should keep in mind what features from the module should be used, especially\n when you're planning to sell a template.\"\n ```", "source": "github_repos"} -{"code": "def setup_mock_socket_file(self, mock_create_connection, resp=MOCK_RESP):\n fake_file = self.MockSocketFile(resp)\n fake_conn = mock.MagicMock()\n fake_conn.makefile.return_value = fake_file\n mock_create_connection.return_value = fake_conn\n return fake_file", "docstring": "Sets up a fake socket file from the mock connection.\n\nArgs:\n mock_create_connection: The mock method for creating a method.\n resp: (str) response to give. MOCK_RESP by default.\n\nReturns:\n The mock file that will be injected into the code.", "source": "github_repos"} -{"code": "def rescale(self, image: 'torch.Tensor', scale: float, offset: Optional[bool]=True, **kwargs) -> 'torch.Tensor':\n rescaled_image = image * scale\n if offset:\n rescaled_image -= 1\n return rescaled_image", "docstring": "Rescale an image by a scale factor.\n\n If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is\n 1/127.5, the image is rescaled between [-1, 1].\n image = image * scale - 1\n\n If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1].\n image = image * scale\n\nArgs:\n image (`torch.Tensor`):\n Image to rescale.\n scale (`float`):\n The scaling factor to rescale pixel values by.\n offset (`bool`, *optional*):\n Whether to scale the image in both negative and positive directions.\n\nReturns:\n `torch.Tensor`: The rescaled image.", "source": "github_repos"} -{"code": "def convert_to_sliceable(arrays, target_backend=None):\n\n def convert_single_array(x):\n if x is None:\n return x\n if isinstance(x, np.ndarray) and str(x.dtype) == 'object' and (backend.backend() == 'tensorflow') and all((isinstance(e, str) for e in x)):\n x = tf.convert_to_tensor(x, dtype='string')\n if isinstance(x, np.ndarray):\n sliceable_class = NumpySliceable\n elif data_adapter_utils.is_tensorflow_tensor(x):\n if data_adapter_utils.is_tensorflow_ragged(x):\n sliceable_class = TensorflowRaggedSliceable\n elif data_adapter_utils.is_tensorflow_sparse(x):\n sliceable_class = TensorflowSparseSliceable\n else:\n sliceable_class = TensorflowSliceable\n elif data_adapter_utils.is_jax_array(x):\n if data_adapter_utils.is_jax_sparse(x):\n sliceable_class = JaxSparseSliceable\n else:\n x = np.asarray(x)\n sliceable_class = NumpySliceable\n elif data_adapter_utils.is_torch_tensor(x):\n sliceable_class = TorchSliceable\n elif pandas is not None and isinstance(x, pandas.DataFrame):\n sliceable_class = PandasDataFrameSliceable\n elif pandas is not None and isinstance(x, pandas.Series):\n sliceable_class = PandasSeriesSliceable\n elif data_adapter_utils.is_scipy_sparse(x):\n sliceable_class = ScipySparseSliceable\n elif hasattr(x, '__array__'):\n x = np.asarray(x)\n sliceable_class = NumpySliceable\n else:\n raise ValueError(f'Expected a NumPy array, tf.Tensor, tf.RaggedTensor, tf.SparseTensor, jax.np.ndarray, jax.experimental.sparse.JAXSparse, torch.Tensor, Pandas Dataframe, or Pandas Series. Received invalid input: {x} (of type {type(x)})')\n\n def is_non_floatx_float(dtype):\n return dtype is not object and backend.is_float_dtype(dtype) and (not backend.standardize_dtype(dtype) == backend.floatx())\n cast_dtype = None\n if pandas is not None and isinstance(x, pandas.DataFrame):\n if any((is_non_floatx_float(d) for d in x.dtypes.values)):\n cast_dtype = backend.floatx()\n elif is_non_floatx_float(x.dtype):\n cast_dtype = backend.floatx()\n if cast_dtype is not None:\n x = sliceable_class.cast(x, cast_dtype)\n if target_backend is None:\n return sliceable_class(x)\n if target_backend == 'tensorflow':\n return sliceable_class.convert_to_tf_dataset_compatible(x)\n if target_backend == 'jax' and sliceable_class in (TensorflowSliceable, TorchSliceable):\n x = np.asarray(x)\n sliceable_class = NumpySliceable\n return sliceable_class(x)\n return tree.map_structure(convert_single_array, arrays)", "docstring": "Convert a structure of arrays into `Sliceable` instances\n\nArgs:\n arrays: the arrays to convert.\n target_backend: the target backend for the output:\n - `None` indicates that `arrays` will be wrapped into `Sliceable`s\n as-is without using a different representation. This is used by\n `train_validation_split()`.\n - `tensorflow` indicates that\n `Sliceable.convert_to_tf_dataset_compatible` will be called. The\n returned structure therefore contains arrays, not `Sliceable`s.\n - `numpy`, `jax` or `torch` indices that the arrays will eventually\n be converted to this backend type after slicing. In this case,\n the intermediary `Sliceable`s may use a different representation\n from the input `arrays` for better performance.\n Returns: the same structure with `Sliceable` instances or arrays.", "source": "github_repos"} -{"code": "def before(self: EventSetOrNode, timestamp: Union[int, float, datetime]) -> EventSetOrNode:\n from temporian.core.operators.filter import before\n return before(self, timestamp=timestamp)", "docstring": "Filters events [`EventSet`][temporian.EventSet] that happened before\n a particular timestamp.\n\n The timestamp can be a datetime if the EventSet's timestamps are unix\n timestamps.\n\n The comparison is strict, meaning that the obtained timestamps would be\n less than (`<`) the provided timestamp.\n\n This operation is equivalent to:\n `input.filter(input.timestamps() < timestamp)`\n\n Usage example:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[0, 1, 5, 6],\n ... features={\"f1\": [0, 10, 50, 60]},\n ... )\n\n >>> a.before(5)\n indexes: []\n features: [('f1', int64)]\n events:\n (2 events):\n timestamps: [0. 1.]\n 'f1': [ 0 10]\n ...\n\n >>> from datetime import datetime\n >>> a = tp.event_set(\n ... timestamps=[datetime(2022, 1, 1), datetime(2022, 1, 2)],\n ... features={\"f1\": [1, 2]},\n ... )\n\n >>> a.before(datetime(2022, 1, 1, 12))\n indexes: []\n features: [('f1', int64)]\n events:\n (1 events):\n timestamps: ['2022-01-01T00:00:00']\n 'f1': [1]\n ...\n\n ```\n\nArgs:\n timestamp: EventSet with a single boolean feature.\n\nReturns:\n Filtered EventSet.", "source": "github_repos"} -{"code": "def compute_gradients(self, loss, var_list=None, **kwargs):\n num_shards = tpu_function.get_tpu_context().number_of_shards\n if num_shards is None:\n logging.warning('CrossShardOptimizer should be used within a tpu_shard_context, but got unset number_of_shards. Assuming 1.')\n num_shards = 1\n subgroup_size = self._verify_and_get_subgroup_size(self._group_assignment, num_shards)\n if num_shards > 1 and self._reduction == losses.Reduction.MEAN:\n if self._group_assignment:\n scale = 1.0 / subgroup_size\n else:\n scale = 1.0 / num_shards\n loss *= scale\n return self._opt.compute_gradients(loss, var_list=var_list, **kwargs)", "docstring": "Compute gradients of \"loss\" for the variables in \"var_list\".\n\n This simply wraps `compute_gradients()` from the real optimizer. The\n gradients will be aggregated in `apply_gradients()` so that user can\n modify the gradients like clipping with per replica global norm if needed.\n The global norm with aggregated gradients can be bad as one replica's huge\n gradients can hurt the gradients from other replicas.\n\n When the CrossShardOptimizer is constructed with\n `reduction == losses.Reduction.MEAN` (default), this function scales the\n loss by `1.0 / num_shards` before computing the gradients. Assuming the\n optimizer uses the default implementation of `compute_gradients()`, the\n gradients of the scaled loss are scaled by `1.0 / num_shards` compared to\n the gradients of the original loss. This scaling factor is important because\n `apply_gradients()` sums gradients across shards, rather than averaging\n them. However, the scaling factor must be taken into account when clipping\n the norm of the gradients or performing other postprocessing.\n\nArgs:\n loss: A Tensor containing the value to minimize.\n var_list: Optional list or tuple of `tf.Variable` to update to minimize\n `loss`. Defaults to the list of variables collected in the graph\n under the key `GraphKey.TRAINABLE_VARIABLES`.\n **kwargs: Keyword arguments for compute_gradients().\n\nReturns:\n A list of (gradient, variable) pairs.\n\nRaises:\n ValueError: If not within a tpu_shard_context or group_assignment is\n invalid.", "source": "github_repos"} -{"code": "def get_associated_prs(api: github_api.GitHubAPI, commit_hashes: Sequence[str]) -> Generator[int, None, None]:\n regex = re.compile('PR #(\\\\d+)')\n for commit_hash in commit_hashes:\n response = api.get_commit('openxla/xla', commit_hash)\n message = response['commit']['message']\n if (maybe_match := regex.match(message)):\n pr_number = maybe_match.group(1)\n print(f'Found PR #{pr_number} associated with commit hash {commit_hash}')\n yield int(pr_number)\n print(f\"Didn't find any PRs associated with commit hashes: {commit_hashes}\")", "docstring": "Finds PRs associated with commits.\n\nArgs:\n api: GitHubAPI object which will be used to make requests\n commit_hashes: A sequence of SHAs which may have PRs associated with them\n\nYields:\n Associated pairs of (PR number, SHA), both as strings", "source": "github_repos"} -{"code": "def object_graph_key_mapping(checkpoint_path):\n reader = py_checkpoint_reader.NewCheckpointReader(checkpoint_path)\n object_graph_string = reader.get_tensor(trackable.OBJECT_GRAPH_PROTO_KEY)\n object_graph_proto = trackable_object_graph_pb2.TrackableObjectGraph()\n object_graph_proto.ParseFromString(object_graph_string)\n names_to_keys = {}\n for node in object_graph_proto.nodes:\n for attribute in node.attributes:\n names_to_keys[attribute.full_name] = attribute.checkpoint_key\n return names_to_keys", "docstring": "Return name to key mappings from the checkpoint.\n\nArgs:\n checkpoint_path: string, path to object-based checkpoint\n\nReturns:\n Dictionary mapping tensor names to checkpoint keys.", "source": "github_repos"} -{"code": "def _gather_implementation(self, per_replica_value, destinations, axis, options):\n raise NotImplementedError('_gather method must be implemented in descendants.')", "docstring": "Implementation of `gather` method of `tf.distribute.CrossDeviceOps`.\n\n Overriding this method is useful for subclass implementers.\n\nArgs:\n per_replica_value: a `tf.distribute.DistributedValues`, or a `tf.Tensor`\n like object.\n destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a\n `tf.Tensor` alike object, or a device string. It specifies the devices\n to gather to. To perform an all-gather, pass the same to `value` and\n `destinations`. Note that if it's a `tf.Variable`, the value is gathered\n to the devices of that variable, this method doesn't update the\n variable.\n axis: specifies the dimension to gather along within each replica's\n tensor.\n options: a `tf.distribute.experimental.CommunicationOptions`. See\n `tf.distribute.experimental.CommunicationOptions` for details.\n\nReturns:\n A `tf.Tensor` or `tf.distribute.DistributedValues`.\n\nRaises:\n ValueError: if per_replica_value can't be converted to a\n `tf.distribute.DistributedValues` or if destinations is not a string,\n `tf.Variable` or `tf.distribute.DistributedValues`.", "source": "github_repos"} -{"code": "def binary_crossentropy(target, output, from_logits=False):\n if any_symbolic_tensors((target, output)):\n return BinaryCrossentropy(from_logits=from_logits).symbolic_call(target, output)\n return backend.nn.binary_crossentropy(target, output, from_logits=from_logits)", "docstring": "Computes binary cross-entropy loss between target and output tensor.\n\n The binary cross-entropy loss is commonly used in binary\n classification tasks where each input sample belongs to one\n of the two classes. It measures the dissimilarity between the\n target and output probabilities or logits.\n\nArgs:\n target: The target tensor representing the true binary labels.\n Its shape should match the shape of the `output` tensor.\n output: The output tensor representing the predicted probabilities\n or logits. Its shape should match the shape of the\n `target` tensor.\n from_logits: (optional) Whether `output` is a tensor of logits or\n probabilities.\n Set it to `True` if `output` represents logits; otherwise,\n set it to `False` if `output` represents probabilities.\n Defaults to `False`.\n\nReturns:\n Integer tensor: The computed binary cross-entropy loss between\n `target` and `output`.\n\nExample:\n >>> target = keras.ops.convert_to_tensor([0, 1, 1, 0])\n >>> output = keras.ops.convert_to_tensor([0.1, 0.9, 0.8, 0.2])\n >>> binary_crossentropy(target, output)\n array([0.10536054 0.10536054 0.22314355 0.22314355],\n shape=(4,), dtype=float32)", "source": "github_repos"} -{"code": "def _run_calibration(saved_model_path: str, signature_keys: Sequence[str], tags: Collection[str], force_graph_mode_calibration: bool, representative_dataset_file_map: Mapping[str, quantization_options_pb2.RepresentativeDatasetFile]) -> bool:\n repr_dataset_map = rd.TfRecordRepresentativeDatasetLoader(representative_dataset_file_map).load()\n _run_graph_for_calibration(saved_model_path, signature_keys, tags, repr_dataset_map, force_graph_mode_calibration)\n return True", "docstring": "Runs calibration and adds calibration statistics to exported model.\n\nArgs:\n saved_model_path: Path to the SavedModel to run calibration.\n signature_keys: List of signature keys corresponding to SignatureDefs to run\n calibration on.\n tags: A set of tags that identify the MetaGraphDef.\n force_graph_mode_calibration: If True, runs the calibration in graph mode.\n representative_dataset_file_map: Signature key ->\n `RepresentativeDatasetFile` mapping for running the calibration step. Each\n dataset file stores the representative dataset for the function matching\n the signature key.\n\nReturns:\n `True` upon successfully running calibration.", "source": "github_repos"} -{"code": "def create_slot_with_initializer(primary, initializer, shape, dtype, name, colocate_with_primary=True, *, copy_xla_sharding=False):\n validate_shape = shape.is_fully_defined()\n if isinstance(primary, variables.Variable):\n prefix = primary._shared_name\n else:\n prefix = primary.op.name\n with variable_scope.variable_scope(None, prefix + '/' + name):\n if colocate_with_primary:\n distribution_strategy = distribute_lib.get_strategy()\n with distribution_strategy.extended.colocate_vars_with(primary):\n return _create_slot_var(primary, initializer, '', validate_shape, shape, dtype, copy_xla_sharding=copy_xla_sharding)\n else:\n return _create_slot_var(primary, initializer, '', validate_shape, shape, dtype, copy_xla_sharding=copy_xla_sharding)", "docstring": "Creates a slot initialized using an `Initializer`.\n\n The type of the slot is determined by the given value.\n\nArgs:\n primary: The primary `Variable` or `Tensor`.\n initializer: An `Initializer`. The initial value of the slot.\n shape: Shape of the initial value of the slot.\n dtype: Type of the value of the slot.\n name: Name to use for the slot variable.\n colocate_with_primary: Boolean. If True the slot is located\n on the same device as `primary`.\n copy_xla_sharding: Boolean. If True also copies XLA sharding\n from primary.\n\nReturns:\n A `Variable` object.", "source": "github_repos"} -{"code": "def generate_row(fake: Faker, config: Config) -> Row:\n row: Row = {}\n for column in config:\n row[column.bq_name] = column.value(fake)\n return row", "docstring": "Generates a Row of Faker data, conforming to the config.\n\nArgs:\n * fake: Faker instance\n * config: List of Columns\n\nReturns:\n * Row of Faker data", "source": "github_repos"} -{"code": "def generate_examples(options):\n _prepare_dir(options)\n out = options.zip_to_output\n if options.multi_gen_state:\n test_name = options.multi_gen_state.test_name\n else:\n test_name = re.sub('(_(|with-flex|forward-compat|edgetpu|mlir-quant))?(_xnnpack)?\\\\.zip$', '', out, count=1)\n test_function_name = 'make_%s_tests' % test_name\n test_function = get_test_function(test_function_name)\n if test_function is None:\n raise RuntimeError(\"Can't find a test function to create %r. Tried %r\" % (out, test_function_name))\n if options.make_forward_compat_test:\n future_date = datetime.date.today() + datetime.timedelta(days=30)\n with tf.compat.forward_compatibility_horizon(future_date.year, future_date.month, future_date.day):\n test_function(options)\n else:\n test_function(options)", "docstring": "Generate examples for a test set.\n\nArgs:\n options: Options containing information to generate examples.\n\nRaises:\n RuntimeError: if the test function cannot be found.", "source": "github_repos"} -{"code": "def saved_model_format_scope(value, **kwargs):\n previous_format = _thread_local_data.saved_model_format\n previous_kwargs = _thread_local_data.save_kwargs\n try:\n _thread_local_data.saved_model_format = value\n _thread_local_data.save_kwargs = kwargs\n yield\n finally:\n _thread_local_data.saved_model_format = previous_format\n _thread_local_data.save_kwargs = previous_kwargs", "docstring": "Provides a scope within which the savde model format to test is `value`.\n\n The saved model format gets restored to its original value upon exiting the\n scope.\n\nArgs:\n value: saved model format value\n **kwargs: optional kwargs to pass to the save function.\n\nYields:\n The provided value.", "source": "github_repos"} -{"code": "def post_process(self, outputs, target_sizes):\n logging.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.')\n out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n if len(out_logits) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n if target_sizes.shape[1] != 2:\n raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')\n prob = out_logits.sigmoid()\n topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1)\n scores = topk_values\n topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')\n labels = topk_indexes % out_logits.shape[2]\n boxes = center_to_corners_format(out_bbox)\n boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n img_h, img_w = target_sizes.unbind(1)\n scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n boxes = boxes * scale_fct[:, None, :]\n results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n return results", "docstring": "Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the Pascal VOC format (xmin, ymin, xmax, ymax).\n Only supports PyTorch.\n\nArgs:\n outputs ([`ConditionalDetrObjectDetectionOutput`]):\n Raw outputs of the model.\n target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):\n Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original\n image size (before any data augmentation). For visualization, this should be the image size after data\n augment, but before padding.\n\nReturns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\n in the batch as predicted by the model.", "source": "github_repos"} -{"code": "def __init__(self, config: UnivNetConfig):\n super().__init__()\n self.channels = config.model_in_channels\n self.kernel_size = config.kernel_predictor_conv_size\n self.dropout_prob = config.kernel_predictor_dropout\n self.leaky_relu_slope = config.leaky_relu_slope\n padding = (self.kernel_size - 1) // 2\n self.dropout = nn.Dropout(self.dropout_prob)\n self.conv1 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True)\n self.conv2 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True)", "docstring": "Implementation of the residual block for the kernel predictor network inside each location variable convolution\n block (LVCBlock).\n\n Parameters:\n config: (`UnivNetConfig`):\n Config for the `UnivNetModel` model.", "source": "github_repos"} -{"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, next_sentence_label: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFNextSentencePredictorOutput]:\n outputs = self.mobilebert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n pooled_output = outputs[1]\n seq_relationship_scores = self.cls(pooled_output)\n next_sentence_loss = None if next_sentence_label is None else self.hf_compute_loss(labels=next_sentence_label, logits=seq_relationship_scores)\n if not return_dict:\n output = (seq_relationship_scores,) + outputs[2:]\n return (next_sentence_loss,) + output if next_sentence_loss is not None else output\n return TFNextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "Return:\n\nExample:\n ```python\n >>> import tensorflow as tf\n >>> from transformers import AutoTokenizer, TFMobileBertForNextSentencePrediction\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"google/mobilebert-uncased\")\n >>> model = TFMobileBertForNextSentencePrediction.from_pretrained(\"google/mobilebert-uncased\")\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> next_sentence = \"The sky is blue due to the shorter wavelength of blue light.\"\n >>> encoding = tokenizer(prompt, next_sentence, return_tensors=\"tf\")\n\n >>> logits = model(encoding[\"input_ids\"], token_type_ids=encoding[\"token_type_ids\"])[0]\n ```", "source": "github_repos"} -{"code": "def run_inside_wrap_function_in_eager_mode(graph_function):\n\n def wrap_and_execute(self):\n if context.executing_eagerly():\n wrapped = wrap_function.wrap_function(graph_function, [self])\n wrapped()\n else:\n graph_function(self)\n return wrap_and_execute", "docstring": "Decorator to execute the same graph code in eager and graph modes.\n\n In graph mode, we just execute the graph_function passed as argument. In eager\n mode, we wrap the function using wrap_function and then execute the wrapped\n result.\n\nArgs:\n graph_function: python function containing graph code to be wrapped\n\nReturns:\n decorated function", "source": "github_repos"} -{"code": "def to_code_v1(entity, recursive=True, arg_values=None, arg_types=None, indentation=' ', experimental_optional_features=None):\n del arg_values\n del arg_types\n del indentation\n return to_code(entity, recursive=recursive, experimental_optional_features=experimental_optional_features)", "docstring": "Returns the source code generated by AutoGraph, as a string.\n\n Example usage:\n\n >>> def f(x):\n ... if x < 0:\n ... x = -x\n ... return x\n >>> tf.autograph.to_code(f)\n \"...def tf__f(x):...\"\n\n Also see: `tf.autograph.to_graph`.\n\n Note: If a function has been decorated with `tf.function`, pass its\n underlying Python function, rather than the callable that `tf.function\n creates:\n\n >>> @tf.function\n ... def f(x):\n ... if x < 0:\n ... x = -x\n ... return x\n >>> tf.autograph.to_code(f.python_function)\n \"...def tf__f(x):...\"\n\nArgs:\n entity: Python callable or class.\n recursive: Whether to recursively convert any functions that the converted\n function may call.\n arg_values: Deprecated.\n arg_types: Deprecated.\n indentation: Deprecated.\n experimental_optional_features: `None`, a tuple of, or a single\n `tf.autograph.experimental.Feature` value.\n\nReturns:\n The converted code as string.", "source": "github_repos"} -{"code": "def scan_meta_graph_def(meta_graph_def, op_denylist):\n ops_in_metagraph = set(meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))\n denylisted_ops = op_denylist & ops_in_metagraph\n if denylisted_ops:\n print('MetaGraph with tag set %s contains the following denylisted ops:' % meta_graph_def.meta_info_def.tags, denylisted_ops)\n else:\n print('MetaGraph with tag set %s does not contain the default denylisted ops:' % meta_graph_def.meta_info_def.tags, op_denylist)", "docstring": "Scans meta_graph_def and reports if there are ops on denylist.\n\n Print ops if they are on denylist, or print success if no denylisted ops\n found.\n\nArgs:\n meta_graph_def: MetaGraphDef protocol buffer.\n op_denylist: set of ops to scan for.", "source": "github_repos"} -{"code": "def _run_inline_graph_optimization(func, lower_control_flow, aggressive_inlining):\n graph_def = func.graph.as_graph_def()\n if not lower_control_flow:\n graph_def = disable_lower_using_switch_merge(graph_def)\n for function in graph_def.library.function:\n if 'api_implements' in function.attr:\n del function.attr['api_implements']\n meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph)\n for name in ['variables', 'model_variables', 'trainable_variables', 'local_variables']:\n raw_list = []\n for raw in meta_graph.collection_def['variables'].bytes_list.value:\n variable = variable_pb2.VariableDef()\n variable.ParseFromString(raw)\n variable.ClearField('initializer_name')\n raw_list.append(variable.SerializeToString())\n meta_graph.collection_def[name].bytes_list.value[:] = raw_list\n fetch_collection = meta_graph_pb2.CollectionDef()\n for array in func.inputs + func.outputs:\n fetch_collection.node_list.value.append(array.name)\n meta_graph.collection_def['train_op'].CopyFrom(fetch_collection)\n config = config_pb2.ConfigProto()\n rewrite_options = config.graph_options.rewrite_options\n rewrite_options.min_graph_nodes = -1\n rewrite_options.optimizers.append('function')\n if aggressive_inlining:\n rewrite_options.function_optimization = rewriter_config_pb2.RewriterConfig.AGGRESSIVE\n return tf_optimizer.OptimizeGraph(config, meta_graph)", "docstring": "Apply function inline optimization to the graph.\n\n Returns the GraphDef after Grappler's function inlining optimization is\n applied. This optimization does not work on models with control flow.\n\nArgs:\n func: ConcreteFunction.\n lower_control_flow: Boolean indicating whether or not to lower control flow\n ops such as If and While. (default True)\n aggressive_inlining: Boolean indicating whether or not to do aggressive\n function inlining (might be unsafe if function has stateful ops not\n properly connected to control outputs).\n\nReturns:\n GraphDef", "source": "github_repos"} -{"code": "def np_where(condition, x=None, y=None):\n if x is None and y is None:\n if np.lib.NumpyVersion(np.__version__) >= '2.1.0.rc0':\n return np.atleast_1d(np.asarray(condition)).nonzero()\n return np.where(condition)\n return np.where(condition, x, y)", "docstring": "Return elements chosen from x or y depending on condition.\n\n When only condition is provided, np.where(condition) is a shorthand for\n np.asarray(condition).nonzero(). See\n https://numpy.org/doc/stable/reference/generated/numpy.where.html. NumPy\n 2.1.0rc0 disallows 0D input arrays in nonzero, so np.atleast_1d is used here\n to remain compatible with NumPy 1.x. See\n https://github.com/numpy/numpy/pull/26268.\n\nArgs:\n condition: Array_like, bool. Where True, yield x, otherwise yield y.\n x: Array_like. Values from which to choose. x, y and condition need to be\n broadcastable to some shape.\n y: Array_like. Values from which to choose. x, y and condition need to be\n broadcastable to some shape.\n\nReturns:\n An array with elements from x where condition is True, and elements from y\n elsewhere. Or the indices of the elements that are non-zero.", "source": "github_repos"} -{"code": "def _track_partially_matched_protocols(self):\n old_protocol_cache = set(self._protocol_cache)\n yield\n self._protocol_cache = old_protocol_cache", "docstring": "Context manager for handling the protocol cache.\n\n Some protocols have methods that return instances of the protocol, e.g.\n Iterator.next returns Iterator. This will cause an infinite loop, which can\n be avoided by tracking partially matched protocols. To prevent collisions,\n keys are removed from the cache as soon as match is completed.\n\nYields:\n Into the protocol matching context.", "source": "github_repos"} -{"code": "def run(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', dest='input', default='gs://apache-beam-samples/nyc_trip/avro/fhvhv_tripdata_2023-02.avro', help='Input file of NYC FHV data to process. Larger dataset can be found here: gs://apache-beam-samples/nyc_trip/avro/*')\n parser.add_argument('--output', dest='output', help='Output file to write results to.', required=True)\n known_args, pipeline_args = parser.parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n with beam.Pipeline(options=pipeline_options) as p:\n p | ReadFromAvro(known_args.input) | beam.Filter(lambda record: all((record[k] is not None for k in ('hvfhs_license_num', 'trip_miles', 'trip_time', 'base_passenger_fare', 'tips', 'driver_pay'))) and any((record[k] is not None for k in ('request_datetime', 'on_scene_datetime', 'pickup_datetime', 'dropoff_datetime')))) | beam.ParDo(CreateKeyWithServiceAndDay()) | beam.CombinePerKey(CalculatePricePerAttribute()) | beam.Map(flatten_group) | WriteToAvro(known_args.output, SCHEMA, file_name_suffix='.avro')", "docstring": "Runs the New York City trips pipeline.\n\nArgs:\n argv: Pipeline options as a list of arguments.", "source": "github_repos"} -{"code": "def embedding_lookup(self, features: Any, weights: Optional[Any]=None) -> Any:\n return cpu_embedding_lookup(features, weights, self.embedding_tables, self._feature_config)", "docstring": "Apply standard lookup ops on CPU.\n\nArgs:\n features: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or\n `tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs\n will be downcast to `tf.int32`. Only one type out of `tf.SparseTensor`\n or `tf.RaggedTensor` is supported per call.\n weights: If not `None`, a nested structure of `tf.Tensor`s,\n `tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except\n that the tensors should be of float type (and they will be downcast to\n `tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the\n same for the parallel entries from `features` and similarly for\n `tf.RaggedTensor`s we assume the row_splits are the same.\n\nReturns:\n A nested structure of Tensors with the same structure as input features.", "source": "github_repos"} -{"code": "def __init__(self, values, reshuffle=True):\n super().__init__()\n if isinstance(values, (str, bytes)):\n raise TypeError('PTransform Create: Refusing to treat string as an iterable. (string=%r)' % values)\n elif isinstance(values, dict):\n values = values.items()\n self.values = tuple(values)\n self.reshuffle = reshuffle\n self._coder = typecoders.registry.get_coder(self.get_output_type())", "docstring": "Initializes a Create transform.\n\nArgs:\n values: An object of values for the PCollection", "source": "github_repos"} -{"code": "def report_clean(rows):\n print('DCM REPORT CLEAN')\n first = True\n last = False\n for row in rows:\n if row and row[0] == 'Report Fields':\n break\n for row in rows:\n if 'No data returned by the reporting service.' in row:\n break\n if not row or row[0] == 'Grand Total:':\n break\n if first:\n try:\n date_column = row.index('Date')\n row[date_column] = 'Report_Day'\n except ValueError:\n pass\n row = [column_header_sanitize(cell) for cell in row]\n row = ['' if cell.strip() in ('(not set)', '-') else cell for cell in row]\n yield row\n first = False", "docstring": "Helper to fix DCM report issues for BigQuery and ensure schema compliance.\n\n Memory efficiently cleans each row by fixing:\n * Strips header and footer to preserve only data rows.\n * Changes 'Date' to 'Report_Day' to avoid using reserved name in BigQuery.\n * removes '-' as columns\n * Changes data format to match data studio if datastusio=True.\n\n Usage example:\n\n ```\n filename, report = report_file(...)\n rows = report_to_rows(report)\n rows = report_clean(rows)\n ```\n\nArgs:\n * rows: (iterator) Rows to clean.\n\nReturns:\n * Iterator of cleaned rows.", "source": "github_repos"} -{"code": "def cosh(x):\n if any_symbolic_tensors((x,)):\n return Cosh().symbolic_call(x)\n return backend.numpy.cosh(x)", "docstring": "Hyperbolic cosine, element-wise.\n\nArgs:\n x: Input tensor.\n\nReturns:\n Output tensor of same shape as `x`.", "source": "github_repos"} -{"code": "def keras_model_summary(name, data, step=None):\n summary_metadata = summary_pb2.SummaryMetadata()\n summary_metadata.plugin_data.plugin_name = 'graph_keras_model'\n summary_metadata.plugin_data.content = b'1'\n try:\n json_string = data.to_json()\n except Exception as exc:\n logging.warning('Model failed to serialize as JSON. Ignoring... %s', exc)\n return False\n with summary_ops_v2.summary_scope(name, 'graph_keras_model', [data, step]) as (tag, _):\n with ops.device('cpu:0'):\n tensor = constant_op.constant(json_string, dtype=dtypes.string)\n return summary_ops_v2.write(tag=tag, tensor=tensor, step=step, metadata=summary_metadata)", "docstring": "Writes a Keras model as JSON to as a Summary.\n\n Writing the Keras model configuration allows the TensorBoard graph plugin to\n render a conceptual graph, as opposed to graph of ops. In case the model fails\n to serialize as JSON, it ignores and returns False.\n\nArgs:\n name: A name for this summary. The summary tag used for TensorBoard will be\n this name prefixed by any active name scopes.\n data: A Keras Model to write.\n step: Explicit `int64`-castable monotonic step value for this summary. If\n omitted, this defaults to `tf.summary.experimental.get_step()`, which must\n not be None.\n\nReturns:\n True on success, or False if no summary was written because no default\n summary writer was available.\n\nRaises:\n ValueError: if a default writer exists, but no step was provided and\n `tf.summary.experimental.get_step()` is None.", "source": "github_repos"} -{"code": "def from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, encoding='utf-8') as reader:\n text = reader.read()\n image_processor_dict = json.loads(text)\n return cls(**image_processor_dict)", "docstring": "Instantiates a image processor of type [`~image_processing_utils.ImageProcessingMixin`] from the path to a JSON\n file of parameters.\n\nArgs:\n json_file (`str` or `os.PathLike`):\n Path to the JSON file containing the parameters.\n\nReturns:\n A image processor of type [`~image_processing_utils.ImageProcessingMixin`]: The image_processor object\n instantiated from that JSON file.", "source": "github_repos"} -{"code": "def window_partition(hidden_states, window_size):\n batch_size, height, width, num_channels = hidden_states.shape\n hidden_states = hidden_states.view(batch_size, height // window_size, window_size, width // window_size, window_size, num_channels)\n windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)\n return windows", "docstring": "Returns the resized hidden states. The output shape should be `(batch_size * num_windows, window_size, window_size,\n num_channels)`\n\nArgs:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, height, width, num_channels)`):\n Input hidden states\n window_size (`int`):\n Window size", "source": "github_repos"} -{"code": "def get_size_dict(size: Optional[Union[int, Iterable[int], dict[str, int]]]=None, max_size: Optional[int]=None, height_width_order: bool=True, default_to_square: bool=True, param_name='size') -> dict:\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(f'{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}. Converted to {size_dict}.')\n else:\n size_dict = size\n if not is_valid_size_dict(size_dict):\n raise ValueError(f'{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}')\n return size_dict", "docstring": "Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\nArgs:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.", "source": "github_repos"} -{"code": "def get_visible_devices(device_type=None):\n return context.context().get_visible_devices(device_type)", "docstring": "Get the list of visible physical devices.\n\n Returns the list of `PhysicalDevice`s currently marked as visible to the\n runtime. A visible device will have at least one `LogicalDevice` associated\n with it once the runtime is initialized.\n\n The following example verifies all visible GPUs have been disabled:\n\n >>> physical_devices = tf.config.list_physical_devices('GPU')\n >>> try:\n ... # Disable all GPUS\n ... tf.config.set_visible_devices([], 'GPU')\n ... visible_devices = tf.config.get_visible_devices()\n ... for device in visible_devices:\n ... assert device.device_type != 'GPU'\n ... except:\n ... # Invalid device or cannot modify virtual devices once initialized.\n ... pass\n\nArgs:\n device_type: (optional string) Only include devices matching this device\n type. For example \"CPU\" or \"GPU\".\n\nReturns:\n List of visible `PhysicalDevice`s", "source": "github_repos"} -{"code": "def _aggregate_grads(gradients):\n assert gradients, 'No gradients to aggregate'\n if len(gradients) == 1:\n return gradients[0]\n if all((isinstance(g, tensor_lib.Tensor) for g in gradients)):\n return gen_math_ops.add_n(gradients)\n else:\n assert all((isinstance(g, (tensor_lib.Tensor, indexed_slices.IndexedSlices)) for g in gradients))\n return backprop_util.AggregateIndexedSlicesGradients(gradients)", "docstring": "Aggregate gradients from multiple sources.\n\nArgs:\n gradients: A list of 'Tensor' or 'IndexedSlices' gradients.\n\nReturns:\n If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.\n Otherwise returns an aggregated 'IndexedSlices'.", "source": "github_repos"} -{"code": "def _least_upper_bound(*nodes):\n N = set(nodes)\n UB = LATTICE_UPPER_BOUNDS\n try:\n bounds = [UB[n] for n in N]\n except KeyError:\n dtype = next((n for n in N if n not in UB))\n raise ValueError(f'dtype={dtype!r} is not a valid dtype for Keras type promotion.')\n CUB = set.intersection(*bounds)\n LUB = CUB & N or {c for c in CUB if CUB.issubset(UB[c])}\n if len(LUB) == 1:\n return LUB.pop()\n elif len(LUB) == 0:\n msg = f'Input dtypes {tuple((str(n) for n in nodes))} have no available implicit dtype promotion path. Try explicitly casting inputs to the desired output type.'\n raise ValueError(msg)\n else:\n raise ValueError(f\"Internal Type Promotion error: {nodes} do not have a unique least upper bound on the specified lattice; options are {LUB}. This is an unexpected error in Keras's internal logic; please report it to the maintainers.\")", "docstring": "Compute the least upper bound of a set of nodes.\n\nArgs:\n nodes: sequence of entries from dtypes + weak_types\n\nReturns:\n The type representing the least upper bound of the input nodes on the\n promotion lattice.", "source": "github_repos"} -{"code": "def __init__(self, dump_root, partition_graphs=None, validate=True):\n if not gfile.IsDirectory(dump_root):\n raise IOError('Dump root directory %s does not exist' % dump_root)\n self._core_metadata = []\n self._dump_root = dump_root\n self._load_core_metadata()\n self._load_fetches_info()\n self._load_feeds_info()\n self._load_all_device_dumps(partition_graphs, validate)\n self._python_graph = None", "docstring": "`DebugDumpDir` constructor.\n\nArgs:\n dump_root: (`str`) path to the dump root directory.\n partition_graphs: A repeated field of GraphDefs representing the\n partition graphs executed by the TensorFlow runtime.\n validate: (`bool`) whether the dump files are to be validated against the\n partition graphs.\n\nRaises:\n IOError: If dump_root does not exist as a directory.\n ValueError: If more than one core metadata file is found under the dump\n root directory.", "source": "github_repos"} -{"code": "def get_float(self, min_float=_MIN_FLOAT, max_float=_MAX_FLOAT):\n return self.fdp.ConsumeFloatInRange(min_float, max_float)", "docstring": "Consume a float with given constraints.\n\nArgs:\n min_float: Minimum allowed float.\n max_float: Maximum allowed float.\n\nReturns:\n Consumed float based on input bytes and constraints.", "source": "github_repos"} -{"code": "def _ContainsAll(self, verb, expected):\n actual_list = list(self._actual)\n missing = _DuplicateCounter()\n actual_not_in_order = set()\n ordered = True\n for i in expected:\n try:\n index = actual_list.index(i)\n for _ in six.moves.xrange(index):\n actual_element = actual_list.pop(0)\n if _IsHashable(actual_element) and isinstance(actual_not_in_order, collections_abc.Set):\n actual_not_in_order.add(actual_element)\n else:\n if isinstance(actual_not_in_order, collections_abc.Set):\n actual_not_in_order = list(actual_not_in_order)\n if actual_element not in actual_not_in_order:\n actual_not_in_order.append(actual_element)\n actual_list.pop(0)\n except ValueError:\n if not _IsHashable(i) and isinstance(actual_not_in_order, collections_abc.Set):\n actual_not_in_order = list(actual_not_in_order)\n if i in actual_not_in_order:\n actual_not_in_order.remove(i)\n ordered = False\n else:\n missing.Increment(i)\n if missing:\n self._FailWithBadResults(verb, expected, 'is missing', missing)\n if ordered:\n return _InOrder()\n else:\n return _NotInOrder(self._actual, 'contains all elements in order', expected)", "docstring": "Determines if the subject contains all the expected elements.\n\n Helper function for ContainsAllIn() and ContainsAllOf().\n\nArgs:\n verb: string describing how the expected elements should be contained.\n expected: iterable of objects that should be contained in the subject.\n\nReturns:\n If the subject does contain all the expected elements, returns an\n _Ordered predicate on which .InOrder() can be subsequently called.\n\nRaises:\n TruthAssertionError: the subject is missing any of the expected elements.", "source": "github_repos"} -{"code": "def convert_to_rgb(self, video: 'torch.Tensor') -> VideoInput:\n video = F.grayscale_to_rgb(video)\n if video.shape[-3] == 3 or not (video[..., 3, :, :] < 255).any():\n return video\n alpha = video[..., 3, :, :] / 255.0\n video = (1 - alpha[..., None, :, :]) * 255 + alpha[..., None, :, :] * video[..., :3, :, :]\n return video", "docstring": "Converts a video to RGB format.\n\nArgs:\n video (`\"torch.Tensor\"`):\n The video to convert.\n\nReturns:\n `torch.Tensor`: The converted video.", "source": "github_repos"} -{"code": "def get_profiles(self, cmd):\n if cmd not in self._views:\n raise ValueError('No autoprofiler for command: {}, was run'.format(cmd))\n return self._views[cmd]", "docstring": "Returns profiling results for each step at which `cmd` was run.\n\nArgs:\n cmd: string, profiling command used in an `add_auto_profiling` call.\n\nReturns:\n dict[int: (MultiGraphNodeProto | GraphNodeProto)]. Keys are steps at which\n the profiling command was run. Values are the outputs of profiling.\n For \"code\" and \"op\" commands this will be a `MultiGraphNodeProto`, for\n \"scope\" and \"graph\" commands this will be a `GraphNodeProto.\n\nRaises:\n ValueError: if `cmd` was never run (either because no session.run call was\n made or because there was no `add_auto_profiling` call with the specified\n `cmd`.", "source": "github_repos"} -{"code": "def _create_partition_config(option: t.Tuple, config: Config) -> Config:\n copy = cp.deepcopy(config.selection)\n out = cp.deepcopy(config)\n for idx, key in enumerate(config.partition_keys):\n copy[key] = [option[idx]]\n if 'hdate' in copy:\n copy['hdate'] = [generate_hdate(copy['date'][0], v) for v in copy['hdate']]\n out.selection = copy\n return out", "docstring": "Create a config for a single partition option.\n\n Output a config dictionary, overriding the range of values for\n each key with the partition instance in 'selection'.\n Continuing the example from prepare_partitions, the selection section\n would be:\n { 'foo': ..., 'year': ['2020'], 'month': ['01'], ... }\n { 'foo': ..., 'year': ['2020'], 'month': ['02'], ... }\n { 'foo': ..., 'year': ['2020'], 'month': ['03'], ... }\n\nArgs:\n option: A single item in the range of partition_keys.\n config: The download config, including the parameters and selection sections.\n\nReturns:\n A configuration with that selects a single download partition.", "source": "github_repos"} -{"code": "def __init__(self, num_rows, multiplier, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=True, assert_proper_shapes=False, name='LinearOperatorScaledIdentity'):\n parameters = dict(num_rows=num_rows, multiplier=multiplier, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, assert_proper_shapes=assert_proper_shapes, name=name)\n self._assert_proper_shapes = assert_proper_shapes\n with ops.name_scope(name, values=[multiplier, num_rows]):\n self._multiplier = linear_operator_util.convert_nonref_to_tensor(multiplier, name='multiplier')\n if not self._multiplier.dtype.is_complex:\n if is_self_adjoint is False:\n raise ValueError('A real diagonal operator is always self adjoint.')\n else:\n is_self_adjoint = True\n if not is_square:\n raise ValueError('A ScaledIdentity operator is always square.')\n linear_operator_util.assert_not_ref_type(num_rows, 'num_rows')\n super(LinearOperatorScaledIdentity, self).__init__(dtype=self._multiplier.dtype.base_dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)\n self._num_rows = linear_operator_util.shape_tensor(num_rows, name='num_rows')\n self._num_rows_static = tensor_util.constant_value(self._num_rows)\n self._check_num_rows_possibly_add_asserts()\n self._num_rows_cast_to_dtype = math_ops.cast(self._num_rows, self.dtype)\n self._num_rows_cast_to_real_dtype = math_ops.cast(self._num_rows, self.dtype.real_dtype)", "docstring": "Initialize a `LinearOperatorScaledIdentity`.\n\n The `LinearOperatorScaledIdentity` is initialized with `num_rows`, which\n determines the size of each identity matrix, and a `multiplier`,\n which defines `dtype`, batch shape, and scale of each matrix.\n\n This operator is able to broadcast the leading (batch) dimensions.\n\nArgs:\n num_rows: Scalar non-negative integer `Tensor`. Number of rows in the\n corresponding identity matrix.\n multiplier: `Tensor` of shape `[B1,...,Bb]`, or `[]` (a scalar).\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n assert_proper_shapes: Python `bool`. If `False`, only perform static\n checks that initialization and method arguments have proper shape.\n If `True`, and static checks are inconclusive, add asserts to the graph.\n name: A name for this `LinearOperator`\n\nRaises:\n ValueError: If `num_rows` is determined statically to be non-scalar, or\n negative.", "source": "github_repos"} -{"code": "def _compute_direction_numbers(dim):\n m = np.empty((dim, 32), dtype=np.int32)\n m[0, :] = np.ones(32, dtype=np.int32)\n for k in range(dim - 1):\n a_k = _PRIMITIVE_POLYNOMIAL_COEFFICIENTS[k]\n deg = np.int32(np.floor(np.log2(a_k)))\n m[k + 1, :deg] = _INITIAL_DIRECTION_NUMBERS[:deg, k]\n for j in range(deg, 32):\n m[k + 1, j] = m[k + 1, j - deg]\n for i in range(deg):\n if a_k >> i & 1:\n m[k + 1, j] = np.bitwise_xor(m[k + 1, j], m[k + 1, j - deg + i] << deg - i)\n return m", "docstring": "Returns array of direction numbers for dimension dim.\n\n These are the m_kj values in the Joe & Kuo notes[1], not the v_kj values. So\n these refer to the 'abuse of notation' mentioned in the notes -- it is a\n matrix of integers, not floats. The variable names below are intended to match\n the notation in the notes as closely as possible.\n\nArgs:\n dim: int, dimension.\n\nReturns:\n `numpy.array` of direction numbers with `shape` [dim, 32].", "source": "github_repos"} -{"code": "def _get_help_for_command_prefix(self, cmd_prefix):\n lines = []\n resolved_prefix = self._resolve_prefix(cmd_prefix)\n if not resolved_prefix:\n lines.append('Invalid command prefix: \"%s\"' % cmd_prefix)\n return lines\n lines.append(resolved_prefix)\n if resolved_prefix in self._prefix_to_aliases:\n lines.append(HELP_INDENT + 'Aliases: ' + ', '.join(self._prefix_to_aliases[resolved_prefix]))\n lines.append('')\n help_lines = self._prefix_to_help[resolved_prefix].split('\\n')\n for line in help_lines:\n lines.append(HELP_INDENT + line)\n return lines", "docstring": "Compile the help information for a given command prefix.\n\nArgs:\n cmd_prefix: Command prefix, as the prefix itself or one of its aliases.\n\nReturns:\n A list of str as the help information for cmd_prefix. If the cmd_prefix\n does not exist, the returned list of str will indicate that.", "source": "github_repos"} -{"code": "def __init__(self, initial_learning_rate, first_decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0, name=None):\n super(CosineDecayRestarts, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.first_decay_steps = first_decay_steps\n self._t_mul = t_mul\n self._m_mul = m_mul\n self.alpha = alpha\n self.name = name", "docstring": "Applies cosine decay with restarts to the learning rate.\n\nArgs:\n initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python\n number. The initial learning rate.\n first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python\n number. Number of steps to decay over.\n t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.\n Used to derive the number of iterations in the i-th period\n m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.\n Used to derive the initial learning rate of the i-th period:\n alpha: A scalar `float32` or `float64` Tensor or a Python number.\n Minimum learning rate value as a fraction of the initial_learning_rate.\n name: String. Optional name of the operation. Defaults to 'SGDRDecay'.", "source": "github_repos"} -{"code": "def forward(self, hidden_states, output_router_logits):\n forwarded_states, router_tuple = self.mlp(hidden_states)\n forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states))\n output = hidden_states + self.norm(forwarded_states)\n if output_router_logits and router_tuple is not None:\n return (output, router_tuple)\n else:\n return output", "docstring": "Args:\n hidden_states (`torch.Tensor`) :\n [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.\n output_router_logits (`bool`) :\n output experts router output.\n\nReturns:\n torch.Tensor[num_groups, tokens_per_group, hidden_dim]", "source": "github_repos"} -{"code": "def _cast_indexed_slice_indices(a, b):\n if isinstance(a, indexed_slices.IndexedSlices) and isinstance(b, indexed_slices.IndexedSlices) and (a.indices.dtype != b.indices.dtype):\n a._indices = math_ops.cast(a.indices, dtypes.int64)\n b._indices = math_ops.cast(b.indices, dtypes.int64)", "docstring": "Cast IndexedSlice.indices from int32 to int64 where necessary.\n\n If `a` and `b` are both IndexedSlices, and their indices have different\n dtypes, then cast both their dtypes to `int64` (modifies `a` and `b`\n in-place). Otherwise, does nothing.\n\nArgs:\n a: A value, which may be an IndexedSlices.\n b: A value, which may be an IndexedSlices.", "source": "github_repos"} -{"code": "def center_crop(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n size = get_size_dict(size)\n if 'height' not in size or 'width' not in size:\n raise ValueError(f\"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}\")\n return center_crop(image, size=(size['height'], size['width']), data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Center crop an image to `(size[\"height\"], size[\"width\"])`. If the input size is smaller than `crop_size` along\n any edge, the image is padded with 0's and then center cropped.\n\nArgs:\n image (`np.ndarray`):\n Image to center crop.\n size (`Dict[str, int]`):\n Size of the output image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.", "source": "github_repos"} -{"code": "def forward(self, input_features: Optional[torch.FloatTensor]=None, is_longer: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return self.audio_encoder(input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)", "docstring": "input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Input audio features. This should be returned by the [`ClapFeatureExtractor`] class that you can also\n retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details.\n is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*):\n Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance\n the features.\n\nExample:\n ```python\n >>> from datasets import load_dataset\n >>> from transformers import AutoProcessor, ClapAudioModel\n\n >>> dataset = load_dataset(\"hf-internal-testing/ashraq-esc50-1-dog-example\")\n >>> audio_sample = dataset[\"train\"][\"audio\"][0][\"array\"]\n\n >>> model = ClapAudioModel.from_pretrained(\"laion/clap-htsat-fused\")\n >>> processor = AutoProcessor.from_pretrained(\"laion/clap-htsat-fused\")\n\n >>> inputs = processor(audios=audio_sample, return_tensors=\"pt\")\n\n >>> outputs = model(**inputs)\n >>> last_hidden_state = outputs.last_hidden_state\n ```", "source": "github_repos"} -{"code": "def __init__(self, feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], optimizer: Optional[tpu_embedding_v2_utils._Optimizer], pipeline_execution_with_tensor_core: bool=False):\n self._strategy = distribute_lib.get_strategy()\n self._using_tpu = isinstance(self._strategy, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV2))\n self._pipeline_execution_with_tensor_core = pipeline_execution_with_tensor_core\n self._feature_config = feature_config\n self._output_shapes = []\n for feature in nest.flatten(feature_config):\n self._output_shapes.append(feature.output_shape)\n device_assignment = getattr(self._strategy.extended, '_device_assignment', None)\n self._num_cores_per_replica = device_assignment.num_cores_per_replica if device_assignment else None\n self._table_config = []\n for feature in nest.flatten(feature_config):\n if feature.table not in self._table_config:\n self._table_config.append(feature.table)\n table_names = []\n for i, table in enumerate(self._table_config):\n if table.optimizer is None:\n table.optimizer = optimizer\n if (table.optimizer is not None or self._using_tpu) and (not isinstance(table.optimizer, tpu_embedding_v2_utils._Optimizer)):\n raise ValueError('{} is an unsupported optimizer class. Please pass an instance of one of the optimizer classes under tf.tpu.experimental.embedding.'.format(type(table.optimizer)))\n if table.name is None:\n table.name = 'table_{}'.format(i)\n if table.name in table_names:\n raise ValueError(f'Tables must have a unique name. Multiple tables with name {table.name} found.')\n table_names.append(table.name)\n if self._using_tpu:\n self._dynamic_learning_rates = []\n for table in self._table_config:\n if callable(table.optimizer.learning_rate) and table.optimizer.learning_rate not in self._dynamic_learning_rates:\n self._dynamic_learning_rates.append(table.optimizer.learning_rate)\n self._hosts = tpu_embedding_v2_utils.get_list_of_hosts(self._strategy)\n self._built = False\n self._verify_output_shapes_on_enqueue = True", "docstring": "Creates the TPUEmbedding mid level API object.\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=tf.tpu.experimental.embedding.FeatureConfig(\n table=tf.tpu.experimental.embedding.TableConfig(\n dim=...,\n vocabulary_size=...)))\n ```\n\nArgs:\n feature_config: A nested structure of\n `tf.tpu.experimental.embedding.FeatureConfig` configs.\n optimizer: An instance of one of `tf.tpu.experimental.embedding.SGD`,\n `tf.tpu.experimental.embedding.Adagrad` or\n `tf.tpu.experimental.embedding.Adam`. When not created under\n TPUStrategy may be set to None to avoid the creation of the optimizer\n slot variables, useful for optimizing memory consumption when exporting\n the model for serving where slot variables aren't needed.\n pipeline_execution_with_tensor_core: If True, the TPU embedding\n computations will overlap with the TensorCore computations (and hence\n will be one step old). Set to True for improved performance.\n\nRaises:\n ValueError: If optimizer is not one of tf.tpu.experimental.embedding.(SGD,\n Adam or Adagrad) or None when created under a TPUStrategy.", "source": "github_repos"} -{"code": "def delete_directory(directory):\n filesystems.FileSystems.delete([directory])", "docstring": "Delete a directory in a filesystem.\n\nArgs:\n directory: Full path to a directory supported by Beam filesystems (e.g.\n \"gs://mybucket/mydir/\", \"s3://...\", ...)", "source": "github_repos"} -{"code": "def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n hidden_states = inputs_embeds\n for encoder_layer in self.layers:\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github_repos"} -{"code": "def _find_op(graph: ops.Graph, op_name: Optional[str]) -> Optional[ops.Operation]:\n if not op_name:\n return None\n init_op = graph.get_operation_by_name(op_name)\n logging.debug('Op found in the graph: %s', op_name)\n return init_op", "docstring": "Finds the operation with `op_name`.\n\nArgs:\n graph: The graph to find from.\n op_name: Name of the node.\n\nReturns:\n The operation that corresponds to `op_name`. Returns None iff op_name is an\n empty string or None.\n\nRaises:\n ValueError: `op_name` is malformed.", "source": "github_repos"} -{"code": "def squeeze(x, axis=None):\n if any_symbolic_tensors((x,)):\n return Squeeze(axis=axis).symbolic_call(x)\n return backend.numpy.squeeze(x, axis=axis)", "docstring": "Remove axes of length one from `x`.\n\nArgs:\n x: Input tensor.\n axis: Select a subset of the entries of length one in the shape.\n\nReturns:\n The input tensor with all or a subset of the dimensions of\n length 1 removed.", "source": "github_repos"} -{"code": "def from_json(cls, json_value: Dict[str, Any], *, allow_partial: bool=False, root_path: Optional[utils.KeyPath]=None) -> 'DNA':\n cloneable_metadata_keys = json_value.pop('_cloneable_metadata_keys', None)\n if json_value.get('format', None) == 'compact':\n with symbolic.enable_type_check(False):\n dna = DNA.parse(symbolic.from_json(json_value.get('value')))\n if 'metadata' in json_value:\n dna.rebind(metadata=symbolic.from_json(json_value.get('metadata')), raise_on_no_change=False, skip_notification=True)\n else:\n dna = super(DNA, cls).from_json(json_value, allow_partial=allow_partial, root_path=root_path)\n assert isinstance(dna, DNA)\n if cloneable_metadata_keys:\n dna._cloneable_metadata_keys = set(cloneable_metadata_keys)\n return dna", "docstring": "Class method that load a DNA from a JSON value.\n\nArgs:\n json_value: Input JSON value, only JSON dict is acceptable.\n allow_partial: Whether to allow elements of the list to be partial.\n root_path: KeyPath of loaded object in its object tree.\n\nReturns:\n A DNA object.", "source": "github_repos"} -{"code": "def _save_model(self):\n if not file_utils.exists(self.backup_dir):\n file_utils.makedirs(self.backup_dir)\n if self.double_checkpoint and file_utils.exists(self._weights_path):\n file_utils.copy(self._weights_path, self._prev_weights_path)\n if self.double_checkpoint and file_utils.exists(self._training_metadata_path):\n file_utils.copy(self._training_metadata_path, self._prev_training_metadata_path)\n self.model.save_weights(filepath=self._weights_path, overwrite=True)\n with file_utils.File(self._training_metadata_path, 'w') as f:\n training_metadata = {'epoch': self._current_epoch, 'batch': self._last_batch_seen}\n f.write(json.dumps(training_metadata))", "docstring": "Saves the model.\n\nArgs:\n epoch: the epoch this iteration is in.\n batch: the batch this iteration is in. `None` if the `save_freq`\n is set to `\"epoch\"`.\n logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.", "source": "github_repos"} -{"code": "def __call__(self, x: core.Tensor) -> Mapping[str, core.Tensor]:\n out = nn_ops.conv2d(x, self.filters, strides=[1, 1, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC')\n return {'output': out}", "docstring": "Performs a 2D convolution operation.\n\nArgs:\n x: Input tensor to perform convolution on.\n\nReturns:\n A map of: output key -> output result.", "source": "github_repos"} -{"code": "def __init__(self, packages: Optional[List[FhirPackage[_StructDefT, _SearchParameterT, _CodeSystemT, _ValueSetT]]]=None) -> None:\n self.packages = packages or []", "docstring": "Manages access to a collection of FhirPackage instances.\n\n Allows users to add packages to the package manager and then search all of\n them for a particular resource.\n\nAttributes:\n packages: The packages added to the package manager.", "source": "github_repos"} -{"code": "def forward(self, outputs, targets):\n batch_size, num_queries = outputs['logits'].shape[:2]\n out_prob = outputs['logits'].flatten(0, 1).softmax(-1)\n out_bbox = outputs['pred_boxes'].flatten(0, 1)\n target_ids = torch.cat([v['class_labels'] for v in targets])\n target_bbox = torch.cat([v['boxes'] for v in targets])\n class_cost = -out_prob[:, target_ids]\n bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)\n giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))\n cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost\n cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()\n sizes = [len(v['boxes']) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]", "docstring": "Args:\n outputs (`dict`):\n A dictionary that contains at least these entries:\n * \"logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n * \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates.\n targets (`List[dict]`):\n A list of targets (len(targets) = batch_size), where each target is a dict containing:\n * \"class_labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of\n ground-truth\n objects in the target) containing the class labels\n * \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates.\n\nReturns:\n `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes)", "source": "github_repos"} -{"code": "def pred(scores: jax.Array, rows: jax.Array, cols: jax.Array, N: int) -> jax.Array:\n r: jax.Array = 2 * jax.ops.segment_sum(scores.take(cols), rows, N) - scores.sum()\n return r > 0", "docstring": "Predicts the target output from the learned scores and input entries.\n\nArgs:\n scores (jax.Array): Contribution scores of features.\n rows (jax.Array): Row indices of True values in the input.\n cols (jax.Array): Column indices of True values in the input.\n N (int): The number of input entries.\n\nReturns:\n res (jax.Array): A prediction of the target.", "source": "github_repos"} -{"code": "def flip_back(output_flipped, flip_pairs, target_type='gaussian-heatmap'):\n if target_type not in ['gaussian-heatmap', 'combined-target']:\n raise ValueError('target_type should be gaussian-heatmap or combined-target')\n if output_flipped.ndim != 4:\n raise ValueError('output_flipped should be [batch_size, num_keypoints, height, width]')\n batch_size, num_keypoints, height, width = output_flipped.shape\n channels = 1\n if target_type == 'combined-target':\n channels = 3\n output_flipped[:, 1::3, ...] = -output_flipped[:, 1::3, ...]\n output_flipped = output_flipped.reshape(batch_size, -1, channels, height, width)\n output_flipped_back = output_flipped.clone()\n for left, right in flip_pairs.tolist():\n output_flipped_back[:, left, ...] = output_flipped[:, right, ...]\n output_flipped_back[:, right, ...] = output_flipped[:, left, ...]\n output_flipped_back = output_flipped_back.reshape((batch_size, num_keypoints, height, width))\n output_flipped_back = output_flipped_back.flip(-1)\n return output_flipped_back", "docstring": "Flip the flipped heatmaps back to the original form.\n\nArgs:\n output_flipped (`torch.tensor` of shape `(batch_size, num_keypoints, height, width)`):\n The output heatmaps obtained from the flipped images.\n flip_pairs (`torch.Tensor` of shape `(num_keypoints, 2)`):\n Pairs of keypoints which are mirrored (for example, left ear -- right ear).\n target_type (`str`, *optional*, defaults to `\"gaussian-heatmap\"`):\n Target type to use. Can be gaussian-heatmap or combined-target.\n gaussian-heatmap: Classification target with gaussian distribution.\n combined-target: The combination of classification target (response map) and regression target (offset map).\n Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n\nReturns:\n torch.Tensor: heatmaps that flipped back to the original image", "source": "github_repos"} -{"code": "def __init__(self, pretrained_cfg: Dict[str, Any], architecture: Optional[str]=None, **kwargs):\n requires_backends(self, 'timm')\n super().__init__(architecture=architecture)\n self.data_config = timm.data.resolve_data_config(pretrained_cfg, model=None, verbose=False)\n self.val_transforms = timm.data.create_transform(**self.data_config, is_training=False)\n self.train_transforms = timm.data.create_transform(**self.data_config, is_training=True)\n self._not_supports_tensor_input = any((transform.__class__.__name__ == 'ToTensor' for transform in self.val_transforms.transforms))", "docstring": "Wrapper class for timm models to be used within transformers.\n\nArgs:\n pretrained_cfg (`Dict[str, Any]`):\n The configuration of the pretrained model used to resolve evaluation and\n training transforms.\n architecture (`Optional[str]`, *optional*):\n Name of the architecture of the model.", "source": "github_repos"} -{"code": "def __init__(self, model_name: str, columns: list[str], api_key: Optional[str]=None, organization: Optional[str]=None, dimensions: Optional[int]=None, user: Optional[str]=None, max_batch_size: Optional[int]=None, **kwargs):\n self.model_name = model_name\n self.api_key = api_key\n self.organization = organization\n self.dimensions = dimensions\n self.user = user\n self.max_batch_size = max_batch_size\n super().__init__(columns=columns, **kwargs)", "docstring": "Embedding Config for OpenAI Text Embedding models.\n Text Embeddings are generated for a batch of text using the OpenAI API.\n\nArgs:\n model_name: Name of the OpenAI embedding model\n columns: The columns where the embeddings will be stored in the output\n api_key: OpenAI API key\n organization: OpenAI organization ID\n dimensions: Specific embedding dimensions to use (if model supports it)\n user: End-user identifier for tracking and rate limit calculations\n max_batch_size: Maximum batch size for requests to OpenAI API", "source": "github_repos"} -{"code": "def _safe_exec_func(self, func, *args):\n try:\n return func(*args)\n except signals.TestAbortAll:\n raise\n except Exception:\n logging.exception('Exception happened when executing %s in %s.', func.__name__, self.TAG)", "docstring": "Executes a function with exception safeguard.\n\n This will let signals.TestAbortAll through so abort_all works in all\n procedure functions.\n\nArgs:\n func: Function to be executed.\n args: Arguments to be passed to the function.\n\nReturns:\n Whatever the function returns.", "source": "github_repos"} -{"code": "def run_model_pipeline_tests(self, task, repo_name, model_architecture, tokenizer_names, image_processor_names, feature_extractor_names, processor_names, commit, torch_dtype='float32'):\n pipeline_test_class_name = pipeline_test_mapping[task]['test'].__name__\n tokenizer_names = tokenizer_names or [None]\n image_processor_names = image_processor_names or [None]\n feature_extractor_names = feature_extractor_names or [None]\n processor_names = processor_names or [None]\n test_cases = [{'tokenizer_name': tokenizer_name, 'image_processor_name': image_processor_name, 'feature_extractor_name': feature_extractor_name, 'processor_name': processor_name} for tokenizer_name in tokenizer_names for image_processor_name in image_processor_names for feature_extractor_name in feature_extractor_names for processor_name in processor_names]\n for test_case in test_cases:\n tokenizer_name = test_case['tokenizer_name']\n image_processor_name = test_case['image_processor_name']\n feature_extractor_name = test_case['feature_extractor_name']\n processor_name = test_case['processor_name']\n do_skip_test_case = self.is_pipeline_test_to_skip(pipeline_test_class_name, model_architecture.config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name)\n if do_skip_test_case:\n logger.warning(f'{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: test is currently known to fail for: model `{model_architecture.__name__}` | tokenizer `{tokenizer_name}` | image processor `{image_processor_name}` | feature extractor {feature_extractor_name}.')\n continue\n self.run_pipeline_test(task, repo_name, model_architecture, tokenizer_name=tokenizer_name, image_processor_name=image_processor_name, feature_extractor_name=feature_extractor_name, processor_name=processor_name, commit=commit, torch_dtype=torch_dtype)", "docstring": "Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class names\n\nArgs:\n task (`str`):\n A task name. This should be a key in the mapping `pipeline_test_mapping`.\n repo_name (`str`):\n A model repository id on the Hub.\n model_architecture (`type`):\n A subclass of `PretrainedModel` or `PretrainedModel`.\n tokenizer_names (`List[str]`):\n A list of names of a subclasses of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`.\n image_processor_names (`List[str]`):\n A list of names of subclasses of `BaseImageProcessor`.\n feature_extractor_names (`List[str]`):\n A list of names of subclasses of `FeatureExtractionMixin`.\n processor_names (`List[str]`):\n A list of names of subclasses of `ProcessorMixin`.\n commit (`str`):\n The commit hash of the model repository on the Hub.\n torch_dtype (`str`, `optional`, defaults to `'float32'`):\n The torch dtype to use for the model. Can be used for FP16/other precision inference.", "source": "github_repos"} -{"code": "def env():\n return _env", "docstring": "Returns the object holds the test environment information.\n\n Tests should modify this in the main process if needed, and it will be passed\n to the worker processes each time a test case is run.\n\nReturns:\n a TestEnvironment object.", "source": "github_repos"} -{"code": "def __init__(self, counters=None, distributions=None, gauges=None, string_sets=None, bounded_tries=None):\n self.counters = counters or {}\n self.distributions = distributions or {}\n self.gauges = gauges or {}\n self.string_sets = string_sets or {}\n self.bounded_tries = bounded_tries or {}", "docstring": "Create a MetricUpdates object.\n\nArgs:\n counters: Dictionary of MetricKey:MetricUpdate updates.\n distributions: Dictionary of MetricKey:MetricUpdate objects.\n gauges: Dictionary of MetricKey:MetricUpdate objects.\n string_sets: Dictionary of MetricKey:MetricUpdate objects.\n bounded_tries: Dictionary of MetricKey:MetricUpdate objects.", "source": "github_repos"} -{"code": "def __init__(self, dataset=None, components=None, element_spec=None):\n super(OwnedIterator, self).__init__()\n if dataset is None:\n if components is None or element_spec is None:\n raise ValueError('When `dataset` is not provided, both `components` and `element_spec` must be specified.')\n self._element_spec = element_spec\n self._flat_output_types = structure.get_flat_tensor_types(self._element_spec)\n self._flat_output_shapes = structure.get_flat_tensor_shapes(self._element_spec)\n self._components = components\n self._iterator_resource, = components\n else:\n if components is not None or element_spec is not None:\n raise ValueError('When `dataset` is provided, `element_spec` and `components` must not be specified.')\n self._create_iterator(dataset)\n self._get_next_call_count = 0", "docstring": "Creates a new iterator from the given dataset.\n\n If `dataset` is not specified, the iterator will be created from the given\n tensor components and element structure. In particular, the alternative for\n constructing the iterator is used when the iterator is reconstructed from\n it `CompositeTensor` representation.\n\nArgs:\n dataset: A `tf.data.Dataset` object.\n components: Tensor components to construct the iterator from.\n element_spec: A (nested) structure of `TypeSpec` objects that\n represents the type specification of elements of the iterator.\n\nRaises:\n ValueError: If `dataset` is not provided and either `components` or\n `element_spec` is not provided. Or `dataset` is provided and either\n `components` and `element_spec` is provided.", "source": "github_repos"} -{"code": "def exp(x):\n return math_ops.exp(x)", "docstring": "Element-wise exponential.\n\nArgs:\n x: Tensor or variable.\n\nReturns:\n A tensor.", "source": "github_repos"} -{"code": "def random_shuffle(value, seed=None, name=None):\n seed1, seed2 = random_seed.get_seed(seed)\n return gen_random_ops.random_shuffle(value, seed=seed1, seed2=seed2, name=name)", "docstring": "Randomly shuffles a tensor along its first dimension.\n\n The tensor is shuffled along dimension 0, such that each `value[j]` is mapped\n to one and only one `output[i]`. For example, a mapping that might occur for a\n 3x2 tensor is:\n\n ```python\n [[1, 2], [[5, 6],\n [3, 4], ==> [1, 2],\n [5, 6]] [3, 4]]\n ```\n\nArgs:\n value: A Tensor to be shuffled.\n seed: A Python integer. Used to create a random seed for the distribution.\n See\n `tf.random.set_seed`\n for behavior.\n name: A name for the operation (optional).\n\nReturns:\n A tensor of same shape and type as `value`, shuffled along its first\n dimension.", "source": "github_repos"} -{"code": "def _is_device_list_single_worker(devices):\n specs = []\n for d in devices:\n name = d.name if isinstance(d, context.LogicalDevice) else d\n specs.append(tf_device.DeviceSpec.from_string(name))\n num_workers = len({(d.job, d.task, d.replica) for d in specs})\n all_local = all((d.job in (None, 'localhost') for d in specs))\n any_local = any((d.job in (None, 'localhost') for d in specs))\n if any_local and (not all_local):\n raise ValueError(\"Local device should have only 'localhost' in the job field in device string. E.g. 'job:localhost' in /job:localhost/replica:0/task:0/device:CPU:0Devices cannot have mixed list of device strings containing both localhost and other job types such as worker, ps etc. \")\n if num_workers == 1 and (not all_local):\n if any((d.task is None for d in specs)):\n raise ValueError(\"Remote device string must have task specified.E.g. 'task:0' in /job:worker/replica:0/task:0/device:CPU:0\")\n return num_workers == 1", "docstring": "Checks whether the devices list is for single or multi-worker.\n\nArgs:\n devices: a list of device strings or tf.config.LogicalDevice objects, for\n either local or for remote devices.\n\nReturns:\n a boolean indicating whether these device strings are for local or for\n remote.\n\nRaises:\n ValueError: if device strings are not consistent.", "source": "github_repos"} -{"code": "def on_test_batch_end(self, batch, logs=None):", "docstring": "Called at the end of a batch in `evaluate` methods.\n\n Also called at the end of a validation batch in the `fit`\n methods, if validation data is provided.\n\n Subclasses should override for any actions to run.\n\n Note that if the `steps_per_execution` argument to `compile` in\n `tf.keras.Model` is set to `N`, this method will only be called every `N`\n batches.\n\nArgs:\n batch: Integer, index of batch within the current epoch.\n logs: Dict. Aggregated metric results up until this batch.", "source": "github_repos"} -{"code": "def make_reduce_tests(reduce_op, min_value=-10, max_value=10, boolean_tensor_only=False, allow_fully_quantize=False):\n\n def f(options):\n \"\"\"Actual function that generates examples.\"\"\"\n test_parameters = [{'input_dtype': [tf.float32, tf.int32, tf.int64], 'input_shape': [[3, 3, 2, 4]], 'axis': [0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0], [2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1], [-1, 0], [-1, -2, -3]], 'const_axis': [True, False], 'keepdims': [True, False], 'fully_quantize': [False]}, {'input_dtype': [tf.float32], 'input_shape': [[1, 8, 8, 3]], 'axis': [0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2, 3], [3, 2, 1, 0], [3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2, -3, -4, [0, -2], [2, 3, 1, 0], [3, 1, 2], [3, -4]], 'const_axis': [True, False], 'keepdims': [True, False], 'fully_quantize': [False]}, {'input_dtype': [tf.float32], 'input_shape': [[], [1, 8, 8, 3], [3, 2, 4]], 'axis': [[]], 'const_axis': [False], 'keepdims': [True, False], 'fully_quantize': [False]}, {'input_dtype': [tf.float32], 'input_shape': [[], [1, 8, 8, 3], [3, 2, 4]], 'axis': [None], 'const_axis': [True], 'keepdims': [True, False], 'fully_quantize': [False]}, {'input_dtype': [tf.float32], 'input_shape': [[3, 3, 2, 4]], 'axis': [0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0], [2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1], [-1, 0], [-1, -2, -3]], 'const_axis': [True], 'keepdims': [True, False], 'fully_quantize': [True]}, {'input_dtype': [tf.float32], 'input_shape': [[1, 8, 8, 4], [1, 8, 8, 3]], 'axis': [0, 1, 2, 3, [0], [1], [2], [3], [-1], [-2], [-3], [1, 2], [0, 3], [1, 2, 3], [1, 3], [2, 3]], 'const_axis': [True], 'keepdims': [True, False], 'fully_quantize': [True]}, {'input_dtype': [tf.float32, tf.int32], 'input_shape': [[2, 0, 2], [0]], 'axis': [0], 'const_axis': [True], 'keepdims': [True, False], 'fully_quantize': [False]}]\n if not allow_fully_quantize:\n test_parameters = [test_parameter for test_parameter in test_parameters if True not in test_parameter['fully_quantize']]\n\n def build_graph(parameters):\n \"\"\"Build the mean op testing graph.\"\"\"\n dtype = parameters['input_dtype']\n if boolean_tensor_only:\n dtype = tf.bool\n input_tensor = tf.compat.v1.placeholder(dtype=dtype, name='input', shape=parameters['input_shape'])\n if parameters['const_axis']:\n axis = parameters['axis']\n input_tensors = [input_tensor]\n else:\n if isinstance(parameters['axis'], list):\n shape = [len(parameters['axis'])]\n else:\n shape = []\n axis = tf.compat.v1.placeholder(dtype=tf.int32, name='axis', shape=shape)\n input_tensors = [input_tensor, axis]\n out = reduce_op(input_tensor, axis=axis, keepdims=parameters['keepdims'])\n return (input_tensors, [out])\n\n def build_inputs(parameters, sess, inputs, outputs):\n \"\"\"Build the inputs for reduced operators.\"\"\"\n dtype = parameters['input_dtype']\n if boolean_tensor_only:\n dtype = tf.bool\n values = [create_tensor_data(dtype, parameters['input_shape'], min_value=min_value, max_value=max_value)]\n if not parameters['const_axis']:\n values.append(np.array(parameters['axis']))\n return (values, sess.run(outputs, feed_dict=dict(zip(inputs, values))))\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)\n return f", "docstring": "Make a set of tests to do reduce operation.\n\nArgs:\n reduce_op: TensorFlow reduce operation to test, i.e. `tf.reduce_mean`.\n min_value: min value for created tensor data.\n max_value: max value for created tensor data.\n boolean_tensor_only: If true, will only generate tensor with boolean value.\n allow_fully_quantize: bool, whether fully_quantize is allowed.\n\nReturns:\n a function representing the true generator with `reduce_op_in` curried.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.LongTensor]=None, lang_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n outputs = self.roberta(input_ids, lang_ids=lang_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n lm_loss = None\n if labels is not None:\n lm_loss = self.loss_function(prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs)\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return (lm_loss,) + output if lm_loss is not None else output\n return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)", "docstring": "lang_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of the language adapters that should be activated for each sample, respectively. Default: the index\n that corresponds to `self.config.default_language`.\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are\n ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n\nExample:\n ```python\n >>> from transformers import AutoTokenizer, XmodForCausalLM, AutoConfig\n >>> import torch\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"FacebookAI/xlm-roberta-base\")\n >>> config = AutoConfig.from_pretrained(\"facebook/xmod-base\")\n >>> config.is_decoder = True\n >>> model = XmodForCausalLM.from_pretrained(\"facebook/xmod-base\", config=config)\n >>> model.set_default_language(\"en_XX\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n ```", "source": "github_repos"} -{"code": "def forward(self, x):\n embeddings = self.embedding_convPxP(x).flatten(2)\n embeddings = nn.functional.pad(embeddings, (1, 0))\n embeddings = embeddings.permute(0, 2, 1)\n batch_size, sequence_length, embedding_dim = embeddings.shape\n embeddings = embeddings + self.positional_encoding_1d(batch_size, sequence_length, embedding_dim, device=embeddings.device, dtype=embeddings.dtype)\n for i in range(4):\n embeddings = self.transformer_encoder[i](embeddings)\n return embeddings", "docstring": "Forward pass\n\nArgs:\n x (torch.Tensor - NCHW): Input feature tensor\n\nReturns:\n torch.Tensor - Transformer output embeddings of shape (batch_size, sequence_length, embedding_dim)", "source": "github_repos"} -{"code": "def __init__(self, config: XLMConfig):\n super().__init__()\n self.summary_type = getattr(config, 'summary_type', 'last')\n if self.summary_type == 'attn':\n raise NotImplementedError\n self.summary = nn.Identity()\n if hasattr(config, 'summary_use_proj') and config.summary_use_proj:\n if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and (config.num_labels > 0):\n num_classes = config.num_labels\n else:\n num_classes = config.hidden_size\n self.summary = nn.Linear(config.hidden_size, num_classes)\n activation_string = getattr(config, 'summary_activation', None)\n self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity()\n self.first_dropout = nn.Identity()\n if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0:\n self.first_dropout = nn.Dropout(config.summary_first_dropout)\n self.last_dropout = nn.Identity()\n if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:\n self.last_dropout = nn.Dropout(config.summary_last_dropout)", "docstring": "Compute a single vector summary of a sequence hidden states.\n\nArgs:\n config ([`XLMConfig`]):\n The config used by the model. Relevant arguments in the config class of the model are (refer to the actual\n config class of your model for the default values it uses):\n\n - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:\n\n - `\"last\"` -- Take the last token hidden state (like XLNet)\n - `\"first\"` -- Take the first token hidden state (like Bert)\n - `\"mean\"` -- Take the mean of all tokens hidden states\n - `\"cls_index\"` -- Supply a Tensor of classification token position (GPT/GPT-2)\n - `\"attn\"` -- Not implemented now, use multi-head attention\n\n - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.\n - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes\n (otherwise to `config.hidden_size`).\n - **summary_activation** (`Optional[str]`) -- Set to `\"tanh\"` to add a tanh activation to the output,\n another string or `None` will add no activation.\n - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.\n - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.", "source": "github_repos"} -{"code": "def debug_string(self, indent: int=0) -> str:\n self_repr = f'{'| ' * indent}{self.__class__.__name__}<{repr(self)}>'\n if self._children:\n child_repr = '\\n'.join((child.debug_string(indent=indent + 1) for child in self._children))\n self_repr = f'{self_repr}\\n{child_repr}'\n return self_repr", "docstring": "Returns a debug string for the tree rooted at this node.\n\nArgs:\n indent: The level of indentation to begin printing the tree.\n\nReturns:\n A string representing this node and its descendant nodes.", "source": "github_repos"} -{"code": "def hash_file(fpath, algorithm='sha256', chunk_size=65535):\n if isinstance(algorithm, str):\n hasher = resolve_hasher(algorithm)\n else:\n hasher = algorithm\n with open(fpath, 'rb') as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n hasher.update(chunk)\n return hasher.hexdigest()", "docstring": "Calculates a file sha256 or md5 hash.\n\nExample:\n >>> hash_file('/path/to/file.zip')\n 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'\n\nArgs:\n fpath: Path to the file being validated.\n algorithm: Hash algorithm, one of `\"auto\"`, `\"sha256\"`, or `\"md5\"`.\n The default `\"auto\"` detects the hash algorithm in use.\n chunk_size: Bytes to read at a time, important for large files.\n\nReturns:\n The file hash.", "source": "github_repos"} -{"code": "def _validate_or_infer_batch_size(self, batch_size, steps, x):\n if isinstance(x, (data_types.DatasetV1, data_types.DatasetV2, data_utils.Sequence)) or tf_inspect.isgenerator(x):\n if batch_size is not None:\n raise ValueError('The `batch_size` argument must not be specified for the given input type. Received input: {}, batch_size: {}'.format(x, batch_size))\n return\n layers = self._flatten_layers(include_self=False, recursive=False)\n first_layer = next(layers, None)\n if first_layer:\n static_batch_size = training_utils.get_static_batch_size(first_layer)\n if static_batch_size is not None:\n if self._distribution_strategy and distributed_training_utils.global_batch_size_supported(self._distribution_strategy):\n num_splits_for_ds = self._distribution_strategy.num_replicas_in_sync\n else:\n num_splits_for_ds = 1\n if batch_size is not None:\n if batch_size % num_splits_for_ds != 0:\n raise ValueError('The `batch_size` argument ({}) must be divisible the by number of replicas ({})'.format(batch_size, num_splits_for_ds))\n per_replica_batch_size = batch_size // num_splits_for_ds\n if per_replica_batch_size != static_batch_size:\n raise ValueError('The `batch_size` argument value {} is incompatible with the specified batch size of your Input Layer: {}'.format(per_replica_batch_size, static_batch_size))\n if isinstance(x, (data_types.DatasetV2, iterator_ops.Iterator, iterator_ops.IteratorBase)):\n ds_batch_size = tensor_shape.Dimension(nest.flatten(dataset_ops.get_legacy_output_shapes(x))[0][0]).value\n if ds_batch_size is not None:\n if ds_batch_size % num_splits_for_ds != 0:\n raise ValueError('The batch output shape of your `Dataset` {} cannot be divisible by number of replicas {}'.format(ds_batch_size, num_splits_for_ds))\n ds_per_replica_batch_size = ds_batch_size // num_splits_for_ds\n if ds_per_replica_batch_size != static_batch_size:\n raise ValueError('The batch output shape of your `Dataset` is {}, which is incompatible with the specified batch size of your Input Layer: {}'.format(ds_per_replica_batch_size, static_batch_size))\n if steps is None:\n batch_size = static_batch_size * num_splits_for_ds\n if batch_size is None and steps is None:\n batch_size = 32\n return batch_size", "docstring": "Validates that the `batch_size` provided is consistent with InputLayer.\n\n It's possible that the user specified a static batch size in their\n InputLayer. If so, this method checks the provided `batch_size` and `x`\n arguments are consistent with this static batch size. Also, if\n `batch_size` is `None`, this method will attempt to infer the batch size\n from the static batch size of the InputLayer. Lastly, ValueError will be\n raised if `x` is a tf.data.Dataset and `batch_size` is specified as we\n expect users to provide batched datasets.\n\nArgs:\n batch_size: The batch_size provided as an argument to\n fit/evaluate/predict.\n steps: The steps provided as an argument to fit/evaluate/predict.\n x: The data passed as `x` to fit/evaluate/predict.\n\nReturns:\n The validated batch_size, auto-inferred from the first layer if not\n provided.", "source": "github_repos"} -{"code": "def input_shape(self):\n return nest.map_structure(backend.int_shape, self.input)", "docstring": "Retrieves the input shape(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer, or if all inputs\n have the same shape.\n\nReturns:\n Input shape, as an integer shape tuple\n (or list of shape tuples, one tuple per input tensor).\n\nRaises:\n AttributeError: if the layer has no defined input_shape.\n RuntimeError: if called in Eager mode.", "source": "github_repos"} -{"code": "def build_shuffle_all_reduce(input_tensors, gather_devices, red_op, un_op=None):\n input_tensors, shape = _flatten_tensors(input_tensors)\n dst_devices = [t.device for t in input_tensors]\n reduced_shards = _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op)\n output_tensors = _build_shuffle_scatter(reduced_shards, dst_devices)\n if len(shape) != 1:\n output_tensors = _reshape_tensors(output_tensors, shape)\n return output_tensors", "docstring": "Construct a subgraph for shuffle all-reduce.\n\n Shuffle reduce is essentially the algorithm implemented when using\n parameter servers. Suppose tensor length is n, there are d devices\n and g gather shards. Each device sends a n/g length sub-tensor to\n each gather shard. The gather shards perform a reduction across d\n fragments, then broadcast the result back to each device. The\n devices then join the g fully reduced fragments they receive from\n the shards. The gather shards could perform d-1 pairwise\n reductions, or one d-way reduction. The first is better where\n reduction Op time is low compared to transmission time, the second\n better in the other case.\n\nArgs:\n input_tensors: list of `tf.Tensor` values to be reduced.\n gather_devices: list of names of devices on which reduction shards\n should be placed.\n red_op: an n-array elementwise reduction Op\n un_op: optional elementwise unary Op to be applied to fully-reduced values.\n\nReturns:\n list of `tf.Tensor` which are the fully reduced tensors.", "source": "github_repos"} -{"code": "def _find_children_hints_in_while_loop(function_def, nodes_mapping):\n new_nodes = []\n for node in function_def.node_def:\n for i, _ in enumerate(node.input):\n if node.input[i] in nodes_mapping:\n node.input[i] = nodes_mapping[node.input[i]]\n new_nodes.append(_copy.deepcopy(node))\n name_to_seq_num = _extract_topology_sequence_mapping(function_def.node_def)\n children_hints = _find_all_hints_in_nodes(new_nodes)\n children_hints_q = []\n for hint in children_hints.values():\n _, output_names = hint.flattened_inputs_and_outputs()\n seq = name_to_seq_num[output_names[0]]\n for output_name in output_names:\n seq = min(seq, name_to_seq_num[output_name])\n children_hints_q.append((seq, hint))\n children_hints_q.sort(key=lambda tup: tup[0])\n ordered_children_hints = [x[1] for x in children_hints_q]\n return (ordered_children_hints, new_nodes)", "docstring": "Find children hints and all nodes inside the while loop.\n\nArgs:\n function_def: Function def of the while loop.\n nodes_mapping: While loop input_arg : real node name.\n\nReturns:\n Ordered children hints and all re-mapped nodes inside the while loop.", "source": "github_repos"} -{"code": "def _benchmarkRunOpPrebuilt(self, name, target, iters):\n times = []\n with ops.Graph().as_default():\n v = variables.Variable(random_ops.random_normal([]))\n with session.Session(target) as sess:\n sess.run(v.initializer)\n runner = sess.make_callable(v.op)\n runner()\n for _ in range(iters):\n start_time = time.time()\n runner()\n end_time = time.time()\n times.append(end_time - start_time)\n print('%s %f' % (name, np.median(times)))\n self.report_benchmark(iters=1, wall_time=np.median(times), name=name)", "docstring": "Runs a microbenchmark to measure the cost of running an op.\n\n Reports the median cost of running a trivial (Variable) op.\n\nArgs:\n name: A human-readable name for logging the output.\n target: The session target to use for the benchmark.\n iters: The number of iterations to perform.", "source": "github_repos"} -{"code": "def __init__(self, weights='int8', activations=None, modules_to_not_convert: Optional[List]=None, **kwargs):\n self.quant_method = QuantizationMethod.QUANTO\n self.weights = weights\n self.activations = activations\n self.modules_to_not_convert = modules_to_not_convert\n self.post_init()", "docstring": "This is a wrapper class about all possible attributes and features that you can play with a model that has been\n loaded using `quanto`.\n\nArgs:\n weights (`str`, *optional*, defaults to `\"int8\"`):\n The target dtype for the weights after quantization. Supported values are (\"float8\",\"int8\",\"int4\",\"int2\")\n activations (`str`, *optional*):\n The target dtype for the activations after quantization. Supported values are (None,\"int8\",\"float8\")\n modules_to_not_convert (`list`, *optional*, default to `None`):\n The list of modules to not quantize, useful for quantizing models that explicitly require to have\n some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers).", "source": "github_repos"} -{"code": "def make_raw_assign_fn(raw_assign_fn, use_handle=True):\n\n def assign_fn(var, value, use_locking=False, name=None, read_value=True):\n del use_locking\n handle = var.handle if use_handle else var\n with _maybe_enter_graph(handle), _maybe_on_device(var):\n op = raw_assign_fn(handle, ops.convert_to_tensor(value, dtype=var.dtype), name=name)\n with ops.control_dependencies([op]):\n if read_value:\n return var._read_variable_op() if use_handle else var.read_value()\n else:\n return op\n return assign_fn", "docstring": "Wrap `raw_assign_fn` with the proper graph context and device scope.\n\nArgs:\n raw_assign_fn: the function to be wrapped.\n use_handle: if True, the `raw_assign_fn` will be applied to the handle of a\n variable; otherwise it will be applied to the variable itself.\n\nReturns:\n The wrapped function.", "source": "github_repos"} -{"code": "def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, past_observed_mask: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTForClassificationOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n model_output = self.model(past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True)\n y_hat = self.head(model_output.last_hidden_state)\n loss_val = None\n if target_values is not None:\n loss = nn.CrossEntropyLoss()\n loss_val = loss(y_hat, target_values)\n if not return_dict:\n outputs = (y_hat,) + model_output[1:-3]\n outputs = (loss_val,) + outputs if loss_val is not None else outputs\n return outputs\n return PatchTSTForClassificationOutput(loss=loss_val, prediction_logits=y_hat, hidden_states=model_output.hidden_states, attentions=model_output.attentions)", "docstring": "past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):\n Input sequence to the model\n target_values (`torch.Tensor`, *optional*):\n Labels associates with the `past_values`\n past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):\n Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected\n in `[0, 1]`:\n\n - 1 for values that are **observed**,\n - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).\n\nExample:\n ```python\n >>> from transformers import PatchTSTConfig, PatchTSTForClassification\n\n >>> # classification task with two input channel2 and 3 classes\n >>> config = PatchTSTConfig(\n ... num_input_channels=2,\n ... num_targets=3,\n ... context_length=512,\n ... patch_length=12,\n ... stride=12,\n ... use_cls_token=True,\n ... )\n >>> model = PatchTSTForClassification(config=config)\n\n >>> # during inference, one only provides past values\n >>> past_values = torch.randn(20, 512, 2)\n >>> outputs = model(past_values=past_values)\n >>> labels = outputs.prediction_logits\n ```", "source": "github_repos"} -{"code": "def with_transform(self, transform: MLTransformProvider):\n self._validate_transform(transform)\n self.transforms.append(transform)\n return self", "docstring": "Add a transform to the MLTransform pipeline.\n\nArgs:\n transform: A BaseOperation instance.\n\nReturns:\n A MLTransform instance.", "source": "github_repos"} -{"code": "def from_np_datetimes(np_datetimes):\n ordinals = tf.constant(np_datetimes, dtype=tf.int32) + _ORDINAL_OF_1_1_1970\n return from_ordinals(ordinals, validate=False)", "docstring": "Creates DateTensor from a Numpy array of dtype datetime64.\n\nArgs:\n np_datetimes: Numpy array of dtype datetime64.\n\nReturns:\n DateTensor object.\n\n #### Example\n\n ```python\n import datetime\n import numpy as np\n\n date_tensor_np = np.array(\n [[datetime.date(2019, 3, 25), datetime.date(2020, 6, 2)],\n [datetime.date(2020, 9, 15), datetime.date(2020, 12, 27)]],\n dtype=np.datetime64)\n\n date_tensor = tff.datetime.dates_from_np_datetimes(date_tensor_np)\n ```", "source": "github_repos"} -{"code": "def get_cross_attention_token_mask(input_ids: List[int], image_token_id: int) -> List[List[int]]:\n image_token_locations = [i for i, token in enumerate(input_ids) if token == image_token_id]\n if len(image_token_locations) == 0:\n return []\n if len(image_token_locations) == 1:\n return [[image_token_locations[0], -1]]\n vision_masks = [[loc1, loc2] for loc1, loc2 in zip(image_token_locations[:-1], image_token_locations[1:])]\n vision_masks.append([image_token_locations[-1], len(input_ids)])\n last_mask_end = vision_masks[-1][1]\n for vision_mask in vision_masks[::-1]:\n if vision_mask[0] == vision_mask[1] - 1:\n vision_mask[1] = last_mask_end\n last_mask_end = vision_mask[1]\n return vision_masks", "docstring": "Generate a cross-attention token mask for image tokens in the input sequence.\n\n This function identifies the positions of image tokens in the input sequence and creates\n a mask that defines which subsequent tokens each image token should attend to.\n\nArgs:\n input_ids (List[int]): A list of token ids representing the input sequence.\n image_token_id (int): The id of the token used to represent images in the sequence.\n\nReturns:\n List[List[int]]: A list of [start, end] pairs, where each pair represents the range\n of tokens an image token should attend to.\n\nNote:\n - If no image tokens are present, an empty list is returned.\n - For a single image token, it attends to all subsequent tokens until the end of the sequence.\n - For multiple image tokens, each attends to tokens up to the next image token or the end of the sequence.\n - Consecutive image tokens are treated as a group and attend to all subsequent tokens together.", "source": "github_repos"} -{"code": "def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n key_states = key_states.to(self.key_cache[layer_idx].dtype)\n value_states = value_states.to(self.value_cache[layer_idx].dtype)\n if layer_idx == 0:\n self._seen_tokens += key_states.shape[-2]\n k_out = self.key_cache[0]\n v_out = self.value_cache[0]\n else:\n if self._prefetch_stream is not None:\n torch.cuda.default_stream(self.device).wait_stream(self._prefetch_stream)\n k_out = self._device_key_cache[layer_idx & 1]\n v_out = self._device_value_cache[layer_idx & 1]\n self._prefetch_layer(layer_idx + 1)\n cache_position = cache_kwargs.get('cache_position') if cache_kwargs is not None else None\n if cache_position is None:\n k_out.copy_(key_states)\n v_out.copy_(value_states)\n if layer_idx == 0:\n self.key_cache[layer_idx].copy_(key_states.to(self.offload_device))\n self.value_cache[layer_idx].copy_(value_states.to(self.offload_device))\n else:\n try:\n k_out.index_copy_(2, cache_position, key_states)\n v_out.index_copy_(2, cache_position, value_states)\n except NotImplementedError:\n k_out[:, :, cache_position] = key_states\n v_out[:, :, cache_position] = value_states\n if layer_idx != 0:\n cache_position = cache_position.to(self.offload_device)\n key_states = key_states.to(self.offload_device)\n value_states = value_states.to(self.offload_device)\n try:\n self.key_cache[layer_idx].index_copy_(2, cache_position, key_states)\n self.value_cache[layer_idx].index_copy_(2, cache_position, value_states)\n except NotImplementedError:\n self.key_cache[layer_idx][:, :, cache_position] = key_states\n self.value_cache[layer_idx][:, :, cache_position] = value_states\n return (k_out, v_out)", "docstring": "Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.\n It is VERY important to index using a tensor, otherwise you introduce a copy to the device.\n\n Parameters:\n key_states (`torch.Tensor`):\n The new key states to cache.\n value_states (`torch.Tensor`):\n The new value states to cache.\n layer_idx (`int`):\n The index of the layer to cache the states for.\n cache_kwargs (`Dict[str, Any]`, *optional*):\n Additional arguments for the cache subclass. The `OffloadedStaticCache` needs the\n `cache_position` input to know how where to write in the cache.\n\nReturns:\n A tuple containing the updated key and value states.", "source": "github_repos"} -{"code": "def to_tensors(self, value: Any) -> List[core.Tensor]:\n del value\n return []", "docstring": "Breaks down a value of this type into Tensors.\n\n For a TraceType instance, the number of tensors generated for corresponding\n value should be constant.\n\nArgs:\n value: A value belonging to this TraceType\n\nReturns:\n List of Tensors.", "source": "github_repos"} -{"code": "def _create_valueset_codes_table_if_not_exists(self) -> bigquery.table.Table:\n schema = [bigquery.SchemaField('valueseturi', 'STRING', mode='REQUIRED'), bigquery.SchemaField('valuesetversion', 'STRING', mode='NULLABLE'), bigquery.SchemaField('system', 'STRING', mode='REQUIRED'), bigquery.SchemaField('code', 'STRING', mode='REQUIRED')]\n table = bigquery.Table(self._value_set_codes_table, schema=schema)\n table.clustering_fields = ['valueseturi', 'code']\n return self._client.create_table(table, exists_ok=True)", "docstring": "Creates a table for storing value set code mappings.\n\n Creates a table named after the `value_set_codes_table` provided at class\n initialization as described by\n https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md#valueset-support\n\n If the table already exists, no action is taken.\n\nReturns:\n An bigquery.Table object representing the created table.", "source": "github_repos"} -{"code": "def _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds, class_func, seed, name=None) -> DatasetV2:\n\n def maybe_warn_on_large_rejection(accept_dist, initial_dist):\n proportion_rejected = math_ops.reduce_sum((1 - accept_dist) * initial_dist)\n return cond.cond(math_ops.less(proportion_rejected, 0.5), lambda: accept_dist, lambda: logging_ops.Print(accept_dist, [proportion_rejected, initial_dist, accept_dist], message='Proportion of examples rejected by sampler is high: ', summarize=100, first_n=10))\n acceptance_dist_ds = DatasetV2.zip((acceptance_dist_ds, initial_dist_ds), name=name).map(maybe_warn_on_large_rejection, name=name)\n\n def _gather_and_copy(acceptance_prob, data):\n if isinstance(data, tuple):\n class_val = class_func(*data)\n else:\n class_val = class_func(data)\n return (class_val, array_ops.gather(acceptance_prob, class_val), data)\n current_probabilities_and_class_and_data_ds = DatasetV2.zip((acceptance_dist_ds, dataset), name=name).map(_gather_and_copy, name=name)\n\n def _reject(unused_class_val, p, unused_data):\n return random_ops.random_uniform([], seed=seed, dtype=p.dtype) < p\n filtered_ds = current_probabilities_and_class_and_data_ds.filter(_reject, name=name)\n return filtered_ds.map(lambda class_value, _, data: (class_value, data), name=name)", "docstring": "Filters a dataset based on per-class acceptance probabilities.\n\nArgs:\n dataset: The dataset to be filtered.\n acceptance_dist_ds: A dataset of acceptance probabilities.\n initial_dist_ds: A dataset of the initial probability distribution, given or\n estimated.\n class_func: A function mapping an element of the input dataset to a scalar\n `tf.int32` tensor. Values should be in `[0, num_classes)`.\n seed: (Optional.) Python integer seed for the resampler.\n name: (Optional.) A name for the tf.data operation.\n\nReturns:\n A dataset of (class value, data) after filtering.", "source": "github_repos"} -{"code": "def testGradient(self, params, indices, expected_out, out_grad, expected_grad, params_ragged_rank=None):\n if context.executing_eagerly():\n return\n params = ragged_factory_ops.constant(params, dtype=dtypes.float32, ragged_rank=params_ragged_rank)\n indices = constant_op.constant(indices, dtype=dtypes.int32)\n out_ragged_rank = params.ragged_rank + indices.shape.ndims - 1\n out_grad = ragged_factory_ops.constant(out_grad, dtype=dtypes.float32, ragged_rank=out_ragged_rank)\n expected_out = ragged_factory_ops.constant(expected_out, dtype=dtypes.float32, ragged_rank=out_ragged_rank)\n expected_grad = ragged_factory_ops.constant(expected_grad, dtype=dtypes.float32, ragged_rank=params.ragged_rank)\n out = ragged_gather_ops.gather(params, indices)\n self.assertAllClose(out, expected_out)\n grads = gradients_impl.gradients(out.flat_values, params.nested_row_splits + (params.flat_values, indices), out_grad.flat_values)\n param_nested_splits_grads = grads[:-2]\n params_flat_values_grad = grads[-2]\n indices_grad = grads[-1]\n self.assertEqual(indices_grad, None)\n for splits_grad in param_nested_splits_grads:\n self.assertEqual(splits_grad, None)\n self.assertIsInstance(params_flat_values_grad, indexed_slices.IndexedSlices)\n params_flat_values_grad = ops.convert_to_tensor(params_flat_values_grad)\n params_grad = params.with_flat_values(params_flat_values_grad)\n self.assertAllClose(params_grad, expected_grad, atol=2e-06, rtol=2e-06)", "docstring": "Tests that ragged_gather generates the right gradient.\n\nArgs:\n params: The `params` that should be passed to `gather`.\n indices: The `indices` that should be passed to `gather`.\n expected_out: The expected value of `gather(params, indices)`.\n `expected_out.shape = indices.shape + params.shape[1:]`.\n out_grad: The value that should be fed in as the gradient for `out`\n when testing the gradient of `ragged_gather`. Must have the same\n shape as `expected_out`.\n expected_grad: The expected gradient for that should be returned for\n `params`. Must have hte same shape as `params`.\n params_ragged_rank: The ragged_rank of `params`.", "source": "github_repos"} -{"code": "def compute_token_logits(sequence_output, temperature, output_weights, output_bias):\n logits = (torch.einsum('bsj,j->bs', sequence_output, output_weights) + output_bias) / temperature\n return logits", "docstring": "Computes logits per token\n\nArgs:\n sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.\n temperature (`float`):\n Temperature for the Bernoulli distribution.\n output_weights (`torch.FloatTensor` of shape `(hidden_size,)`):\n Weights of the linear layer for cell selection.\n output_bias (`torch.FloatTensor` of shape `()`):\n Bias of the linear layer for cell selection\n\nReturns:\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token.", "source": "github_repos"} -{"code": "def pad_to_square(self, images: 'torch.Tensor', background_color: Union[int, Tuple[int, int, int]]=0) -> 'torch.Tensor':\n height, width = get_image_size(images, ChannelDimension.FIRST)\n if height == width:\n return images\n num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0]\n if isinstance(background_color, int):\n background_color = [background_color] + [0] * (num_channels - 1)\n elif len(background_color) != num_channels:\n raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels')\n max_dim = max(height, width)\n paste_x_left = (max_dim - width) // 2\n paste_y_left = (max_dim - height) // 2\n paste_x_right = max_dim - width - paste_x_left\n paste_y_right = max_dim - height - paste_y_left\n padded_images = F.pad(images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color)\n return padded_images", "docstring": "Pads an image to a square based on the longest edge.\n\nArgs:\n images (`np.ndarray`):\n The images to pad.\n background_color (`int` or `Tuple[int, int, int]`, *optional*, defaults to 0):\n The color to use for the padding. Can be an integer for single channel or a\n tuple of integers representing for multi-channel images. If passed as integer\n in mutli-channel mode, it will default to `0` in subsequent channels.\n\nReturns:\n `torch.Tensor`: The padded images.", "source": "github_repos"} -{"code": "def __init__(self, default: typing.Optional[bool]=MISSING_VALUE, is_noneable: bool=False, frozen: bool=False):\n super().__init__(bool, default, is_noneable=is_noneable, frozen=frozen)", "docstring": "Constructor.\n\nArgs:\n default: Default value for the value spec.\n is_noneable: If True, None is acceptable.\n frozen: If True, values other than the default value is not accceptable.", "source": "github_repos"} -{"code": "def _pad(self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:\n if return_attention_mask is None:\n return_attention_mask = 'attention_mask' in self.model_input_names\n if padding_strategy == PaddingStrategy.LONGEST:\n max_length = len(encoded_inputs['input_ids'])\n if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of\n needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(encoded_inputs['input_ids']) != max_length\n if return_attention_mask and 'attention_mask' not in encoded_inputs:\n encoded_inputs['attention_mask'] = [1] * len(encoded_inputs['input_ids'])\n if needs_to_be_padded:\n difference = max_length - len(encoded_inputs['input_ids'])\n padding_side = padding_side if padding_side is not None else self.padding_side\n if padding_side == 'right':\n if return_attention_mask:\n encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference\n if 'token_type_ids' in encoded_inputs:\n encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [[self.pad_token_type_id] * 7] * difference\n if 'labels' in encoded_inputs:\n encoded_inputs['labels'] = encoded_inputs['labels'] + [0] * difference\n if 'numeric_values' in encoded_inputs:\n encoded_inputs['numeric_values'] = encoded_inputs['numeric_values'] + [float('nan')] * difference\n if 'numeric_values_scale' in encoded_inputs:\n encoded_inputs['numeric_values_scale'] = encoded_inputs['numeric_values_scale'] + [1.0] * difference\n if 'special_tokens_mask' in encoded_inputs:\n encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference\n encoded_inputs['input_ids'] = encoded_inputs['input_ids'] + [self.pad_token_id] * difference\n elif padding_side == 'left':\n if return_attention_mask:\n encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']\n if 'token_type_ids' in encoded_inputs:\n encoded_inputs['token_type_ids'] = [[self.pad_token_type_id] * 7] * difference + encoded_inputs['token_type_ids']\n if 'labels' in encoded_inputs:\n encoded_inputs['labels'] = [0] * difference + encoded_inputs['labels']\n if 'numeric_values' in encoded_inputs:\n encoded_inputs['numeric_values'] = [float('nan')] * difference + encoded_inputs['numeric_values']\n if 'numeric_values_scale' in encoded_inputs:\n encoded_inputs['numeric_values_scale'] = [1.0] * difference + encoded_inputs['numeric_values_scale']\n if 'special_tokens_mask' in encoded_inputs:\n encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']\n encoded_inputs['input_ids'] = [self.pad_token_id] * difference + encoded_inputs['input_ids']\n else:\n raise ValueError('Invalid padding strategy:' + str(padding_side))\n return encoded_inputs", "docstring": "Pad encoded inputs (on left/right and up to predefined length or max length in the batch)\n\nArgs:\n encoded_inputs:\n Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).\n max_length: maximum length of the returned list and optionally padding length (see below).\n Will truncate by taking into account the special tokens.\n padding_strategy: PaddingStrategy to use for padding.\n\n - PaddingStrategy.LONGEST Pad to the longest sequence in the batch\n - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)\n - PaddingStrategy.DO_NOT_PAD: Do not pad\n The tokenizer padding sides are defined in self.padding_side:\n\n - 'left': pads on the left of the sequences\n - 'right': pads on the right of the sequences\n pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.\n This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability\n `>= 7.5` (Volta).\n padding_side:\n The side on which the model should have padding applied. Should be selected between ['right', 'left'].\n Default value is picked from the class attribute of the same name.\n return_attention_mask:\n (optional) Set to False to avoid returning attention mask (default: set to model specifics)", "source": "github_repos"} -{"code": "def _lookup_dependency(self, name, cached_dependencies=None):\n if cached_dependencies:\n return cached_dependencies.get(name)\n return self._self_unconditional_dependency_names.get(name)", "docstring": "Look up a dependency by name.\n\n May be overridden to include conditional dependencies.\n\nArgs:\n name: The local name of the dependency.\n cached_dependencies: Optional dict containing all computed dependencies\n returned by `self._trackable_children()`.\n\nReturns:\n A `Trackable` object, or `None` if no dependency by this name was\n found.", "source": "github_repos"} -{"code": "def __init__(self, **kwargs):\n super(_Merge, self).__init__(**kwargs)\n self.supports_masking = True", "docstring": "Initializes a Merge layer.\n\nArgs:\n **kwargs: standard layer keyword arguments.", "source": "github_repos"} -{"code": "def __init__(self, runtime_list, metric_id):\n value = self._prepare_runtime_metrics(runtime_list)\n submit_timestamp = time.time()\n label = runtime_list[0].key.metric.namespace + '_' + RUNTIME_METRIC\n super().__init__(submit_timestamp, metric_id, value, None, label)", "docstring": "The Distribution Metric in ready-to-publish format.\n\nArgs:\n runtime_list: list of distributions metrics from MetricResult\n with runtime name\n metric_id(uuid): unique id to identify test run", "source": "github_repos"} -{"code": "def __setattr__(self, name: str, value: Any) -> None:\n if name.startswith('_'):\n super().__setattr__(name, value)\n else:\n self[name] = value", "docstring": "Set attribute of this Dict.\n\n NOTE(daiyip): When setting attributes, public attributes (not started with\n '_') are set as dict fields, while private attributes (started with '_') are\n set on the object instance.\n\nArgs:\n name: Name of attribute.\n value: Value of attribute.", "source": "github_repos"} -{"code": "def pad(x, pad_width, mode='constant', constant_values=None):\n return Pad(pad_width, mode=mode)(x, constant_values=constant_values)", "docstring": "Pad a tensor.\n\nArgs:\n x: Tensor to pad.\n pad_width: Number of values padded to the edges of each axis.\n `((before_1, after_1), ...(before_N, after_N))` unique pad\n widths for each axis.\n `((before, after),)` yields same before and after pad for\n each axis.\n `(pad,)` or `int` is a shortcut for `before = after = pad`\n width for all axes.\n mode: One of `\"constant\"`, `\"edge\"`, `\"linear_ramp\"`,\n `\"maximum\"`, `\"mean\"`, `\"median\"`, `\"minimum\"`,\n `\"reflect\"`, `\"symmetric\"`, `\"wrap\"`, `\"empty\"`,\n `\"circular\"`. Defaults to `\"constant\"`.\n constant_values: value to pad with if `mode == \"constant\"`.\n Defaults to `0`. A `ValueError` is raised if not None and\n `mode != \"constant\"`.\n\nNote:\n Torch backend only supports modes `\"constant\"`, `\"reflect\"`,\n `\"symmetric\"` and `\"circular\"`.\n Only Torch backend supports `\"circular\"` mode.\n\nNote:\n Tensorflow backend only supports modes `\"constant\"`, `\"reflect\"`\n and `\"symmetric\"`.\n\nReturns:\n Padded tensor.", "source": "github_repos"} -{"code": "def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n return backend.sparse_categorical_crossentropy(y_true, y_pred, from_logits=from_logits, axis=axis)", "docstring": "Computes the sparse categorical crossentropy loss.\n\n Standalone usage:\n\n >>> y_true = [1, 2]\n >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]\n >>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> loss.numpy()\n array([0.0513, 2.303], dtype=float32)\n\nArgs:\n y_true: Ground truth values.\n y_pred: The predicted values.\n from_logits: Whether `y_pred` is expected to be a logits tensor. By default,\n we assume that `y_pred` encodes a probability distribution.\n axis: Defaults to -1. The dimension along which the entropy is\n computed.\n\nReturns:\n Sparse categorical crossentropy loss value.", "source": "github_repos"} -{"code": "def _get_dependent_variables(input_ops, output_ops):\n output_ops = nest.map_structure(gen_array_ops.identity, output_ops)\n inbetween_ops = op_selector.get_backward_walk_ops(seed_ops=output_ops, stop_at_ts=input_ops, inclusive=False, only_differentiable=True)\n var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES)\n var_names = (op.name for op in var_ops)\n tf_vars = (get_variable_by_name(var_name) for var_name in var_names)\n tf_vars = [v for v in tf_vars if v is not None]\n return tf_vars", "docstring": "Finds variables involved in the subgraph between input_ops and output_ops.\n\nArgs:\n input_ops: Flattened list of input ops\n output_ops: Flattened list of output ops\n\nReturns:\n A list of variables", "source": "github_repos"} -{"code": "def _default_layout(self, layout: layout_lib.Layout):\n previous_default = None\n previous_graph_size = None\n graph = None\n self._register_mesh(layout.mesh)\n try:\n previous_default = self._current_output_layout\n self._current_output_layout = layout.to_string().encode('utf-8')\n _pywrap_dtensor_device.ExperimentalSetDefaultLayout(self._device_info, self._current_output_layout)\n if context.executing_eagerly():\n with ops.device(self.name):\n yield\n else:\n graph = ops.get_default_graph()\n previous_graph_size = len(graph.get_operations())\n yield\n finally:\n if graph is not None:\n for operation in graph.get_operations()[previous_graph_size:]:\n operation._set_attr('_layout', attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(s=[self._current_output_layout])))\n operation._set_attr('_mesh', attr_value_pb2.AttrValue(s=layout.mesh.to_string().encode('utf-8')))\n self._current_output_layout = previous_default\n if self._current_output_layout is None:\n _pywrap_dtensor_device.ExperimentalClearDefaultLayout(self._device_info)\n else:\n _pywrap_dtensor_device.ExperimentalSetDefaultLayout(self._device_info, self._current_output_layout.decode('utf-8'))", "docstring": "Sets a default output layout for all ops in the scope.\n\n Note: This is an internal helper method, which is not user facing api.\n\n Useful for requesting a specific layout for ops which would have no inferred\n layout, e.g. tf.zeros.\n\n Caveats:\n\n - Currently only affects the first output of an op. For Op with multiple\n outputs, this does not support yet.\n\n - All Ops in the scope will be attached with the same layout. This might not\n be valid as the rank is different. The current suggestion is: Try to wrap\n the raw op wheneven possible.\n\nArgs:\n layout: A Layout for the outputs of all operations in this scope.\n\nYields:\n Nothing.", "source": "github_repos"} -{"code": "def create_feed_dict_from_input_data(input_data: RepresentativeSample, signature_def: meta_graph_pb2.SignatureDef) -> Mapping[str, np.ndarray]:\n feed_dict = {}\n for input_key, input_value in input_data.items():\n input_tensor_name = signature_def.inputs[input_key].name\n value = input_value\n if isinstance(input_value, core.Tensor):\n value = input_value.eval()\n feed_dict[input_tensor_name] = value\n return feed_dict", "docstring": "Constructs a feed_dict from input data.\n\n Note: This function should only be used in graph mode.\n\n This is a helper function that converts an 'input key -> input value' mapping\n to a feed dict. A feed dict is an 'input tensor name -> input value' mapping\n and can be directly passed to the `feed_dict` argument of `sess.run()`.\n\nArgs:\n input_data: Input key -> input value mapping. The input keys should match\n the input keys of `signature_def`.\n signature_def: A SignatureDef representing the function that `input_data` is\n an input to.\n\nReturns:\n Feed dict, which is intended to be used as input for `sess.run`. It is\n essentially a mapping: input tensor name -> input value. Note that the input\n value in the feed dict is not a `Tensor`.", "source": "github_repos"} -{"code": "def extract_model_metrics(model):\n if getattr(model, '_compile_metrics', None):\n return {m.name: m for m in model._compile_metric_functions}\n return None", "docstring": "Convert metrics from a Keras model `compile` API to dictionary.\n\n This is used for converting Keras models to SavedModels.\n\nArgs:\n model: A `tf.keras.Model` object.\n\nReturns:\n Dictionary mapping metric names to metric instances. May return `None` if\n the model does not contain any metrics.", "source": "github_repos"} -{"code": "def _convert_bytes_to_cc_source(data, array_name, max_line_width=80, include_guard=None, include_path=None, use_tensorflow_license=False):\n starting_pad = ' '\n array_lines = []\n array_line = starting_pad\n for value in bytearray(data):\n if len(array_line) + 4 > max_line_width:\n array_lines.append(array_line + '\\n')\n array_line = starting_pad\n array_line += ' 0x%02x,' % value\n if len(array_line) > len(starting_pad):\n array_lines.append(array_line + '\\n')\n array_values = ''.join(array_lines)\n if include_guard is None:\n include_guard = 'TENSORFLOW_LITE_UTIL_' + array_name.upper() + '_DATA_H_'\n if include_path is not None:\n include_line = '#include \"{include_path}\"\\n'.format(include_path=include_path)\n else:\n include_line = ''\n if use_tensorflow_license:\n license_text = '\\n/* Copyright {year} The TensorFlow Authors. All Rights Reserved.\\n\\nLicensed under the Apache License, Version 2.0 (the \"License\");\\nyou may not use this file except in compliance with the License.\\nYou may obtain a copy of the License at\\n\\n http://www.apache.org/licenses/LICENSE-2.0\\n\\nUnless required by applicable law or agreed to in writing, software\\ndistributed under the License is distributed on an \"AS IS\" BASIS,\\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\nSee the License for the specific language governing permissions and\\nlimitations under the License.\\n==============================================================================*/\\n'.format(year=datetime.date.today().year)\n else:\n license_text = ''\n source_template = '{license_text}\\n// This is a binary file that has been converted into a C++ data array using the\\n// //tensorflow/lite/experimental/acceleration/compatibility/convert_binary_to_cc_source.py\\n// script. This form is useful for compiling into a binary to simplify\\n// deployment on mobile devices\\n\\n{include_line}\\n// We need to keep the data array aligned on some architectures.\\n#ifdef __has_attribute\\n#define HAVE_ATTRIBUTE(x) __has_attribute(x)\\n#else\\n#define HAVE_ATTRIBUTE(x) 0\\n#endif\\n#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))\\n#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(16)))\\n#else\\n#define DATA_ALIGN_ATTRIBUTE\\n#endif\\n\\nextern const unsigned char {array_name}[] DATA_ALIGN_ATTRIBUTE = {{\\n{array_values}}};\\nextern const int {array_name}_len = {array_length};\\n'\n source_text = source_template.format(array_name=array_name, array_length=len(data), array_values=array_values, license_text=license_text, include_line=include_line)\n header_template = '\\n{license_text}\\n\\n// This is a binary file that has been converted into a C++ data array using the\\n// //tensorflow/lite/experimental/acceleration/compatibility/convert_binary_to_cc_source.py\\n// script. This form is useful for compiling into a binary to simplify\\n// deployment on mobile devices\\n\\n#ifndef {include_guard}\\n#define {include_guard}\\n\\nextern const unsigned char {array_name}[];\\nextern const int {array_name}_len;\\n\\n#endif // {include_guard}\\n'\n header_text = header_template.format(array_name=array_name, include_guard=include_guard, license_text=license_text)\n return (source_text, header_text)", "docstring": "Returns strings representing a C++ constant array containing `data`.\n\nArgs:\n data: Byte array that will be converted into a C++ constant.\n array_name: String to use as the variable name for the constant array.\n max_line_width: The longest line length, for formatting purposes.\n include_guard: Name to use for the include guard macro definition.\n include_path: Optional path to include in the source file.\n use_tensorflow_license: Whether to include the standard TensorFlow Apache2\n license in the generated files.\n\nReturns:\n Text that can be compiled as a C++ source file to link in the data as a\n literal array of values.\n Text that can be used as a C++ header file to reference the literal array.", "source": "github_repos"} -{"code": "def recipe_video(config, auth_read, sheet, tab, project, dataset, table):\n sheets(config, {'__comment__': 'Copy the tamplate sheet to the users sheet. If it already exists, nothing happens.', 'auth': auth_read, 'template': {'sheet': 'https://docs.google.com/spreadsheets/d/1BXRHWz-1P3gNS92WZy-3sPZslU8aalXa8heOgygWEFs/edit#gid=0', 'tab': 'Video'}, 'sheet': sheet, 'tab': tab})\n video(config, {'__comment__': 'Read video effects and values from sheet and/or bigquery.', 'auth': auth_read, 'sheets': {'sheet': sheet, 'tab': tab}, 'bigquery': {'project': project, 'dataset': dataset, 'table': table}})", "docstring": "Add images, text, and audio to videos.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n sheet (string) - Name or URL of sheet.\n tab (string) - Name of sheet tab.\n project (string) - Google Cloud Project Identifier.\n dataset (string) - Name of dataset.\n table (string) - Name of table.", "source": "github_repos"} -{"code": "def __init__(self, nbits: int=4, group_size: int=64, view_as_float: bool=False, axis: Optional[int]=None, dynamic_config: Optional[dict]=None, skip_modules: List[str]=['lm_head'], **kwargs):\n if is_hqq_available():\n from hqq.core.quantize import BaseQuantizeConfig as HQQBaseQuantizeConfig\n else:\n raise ImportError('A valid HQQ version (>=0.2.1) is not available. Please follow the instructions to install it: `https://github.com/mobiusml/hqq/`.')\n for deprecated_key in ['quant_zero', 'quant_scale', 'offload_meta']:\n if deprecated_key in kwargs:\n logger.info(deprecated_key + ' is deprecated. This parameter will be ignored in quantization settings.')\n if axis is None:\n axis = 1\n logger.info('Setting axis=1 as faster backends such as TorchAO or BitBlas are only compatible with it.')\n if axis not in [0, 1]:\n raise ValueError('Invalid axis value. Only 0 and 1 are allowed.')\n if dynamic_config is not None:\n self.quant_config = {}\n for key in dynamic_config:\n self.quant_config[key] = HQQBaseQuantizeConfig(**dynamic_config[key])\n else:\n self.quant_config = HQQBaseQuantizeConfig(**{'nbits': nbits, 'group_size': group_size, 'view_as_float': view_as_float, 'axis': axis})\n self.quant_method = QuantizationMethod.HQQ\n self.skip_modules = skip_modules\n self.post_init()", "docstring": "This is wrapper around hqq's BaseQuantizeConfig.\n\nArgs:\n nbits (`int`, *optional*, defaults to 4):\n Number of bits. Supported values are (8, 4, 3, 2, 1).\n group_size (`int`, *optional*, defaults to 64):\n Group-size value. Supported values are any value that is divisible by weight.shape[axis]).\n view_as_float (`bool`, *optional*, defaults to `False`):\n View the quantized weight as float (used in distributed training) if set to `True`.\n axis (`Optional[int]`, *optional*):\n Axis along which grouping is performed. Supported values are 0 or 1.\n dynamic_config (dict, *optional*):\n Parameters for dynamic configuration. The key is the name tag of the layer and the value is a quantization config.\n If set, each layer specified by its id will use its dedicated quantization configuration.\n skip_modules (`List[str]`, *optional*, defaults to `['lm_head']`):\n List of `nn.Linear` layers to skip.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional parameters from which to initialize the configuration object.", "source": "github_repos"} -{"code": "def conv3d(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n out = nn_ops.conv3d(input_tensor, self.filters, strides=[1, 1, 2, 1, 1], dilations=[1, 1, 1, 1, 1], padding=padding, data_format='NDHWC')\n if has_bias:\n out = nn_ops.bias_add(out, self.bias)\n if activation_fn is not None:\n out = activation_fn(out)\n return {'output': out}", "docstring": "Performs a 3D convolution operation.\n\nArgs:\n input_tensor: Input tensor to perform convolution on.\n\nReturns:\n A map of: output key -> output result.", "source": "github_repos"} -{"code": "def _SwitchRefOrTensor(data, pred, name='Switch'):\n data = ops.convert_to_tensor_or_composite(data, name='data')\n with ops.colocate_with(data, ignore_existing=True):\n if isinstance(data, tensor_lib.Tensor):\n if data.dtype._is_ref_dtype:\n return ref_switch(data, pred, name=name)\n return switch(data, pred, name=name)", "docstring": "Forwards `data` to an output determined by `pred`.\n\n If `pred` is false, the `data` input is forwarded to the first output.\n Otherwise, the data goes to the second output.\n\n This op handles `Tensor`s and `IndexedSlices`.\n\nArgs:\n data: The tensor to be forwarded to the appropriate output.\n pred: A scalar that specifies which output port will receive data.\n name: A name for this operation (optional).\n\nReturns:\n `(output_false, output_true)`: If `pred` is true, data will be forwarded to\n `output_true`, otherwise it goes to `output_false`.\n\nRaises:\n TypeError: if data is not a Tensor or IndexedSlices", "source": "github_repos"} -{"code": "def load_wav_file(filename):\n with tf.compat.v1.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(wav_filename_placeholder)\n wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)\n return sess.run(wav_decoder, feed_dict={wav_filename_placeholder: filename}).audio.flatten()", "docstring": "Loads an audio file and returns a float PCM-encoded array of samples.\n\nArgs:\n filename: Path to the .wav file to load.\n\nReturns:\n Numpy array holding the sample data as floats between -1.0 and 1.0.", "source": "github_repos"} -{"code": "def run_query(query: str) -> None:\n try:\n result = parse_query(query)\n except Exception as e:\n result = f'ERROR: {type(e).__name__}: {e.__str__()}.'\n return result\n return filter_records(convert_to_dataframe(result), query)", "docstring": "Run a query and display the result.\n\nArgs:\n query (str): The query to be executed.", "source": "github_repos"} -{"code": "def min(x, axis=None, keepdims=False, initial=None):\n if any_symbolic_tensors((x,)):\n return Min(axis=axis, keepdims=keepdims, initial=initial).symbolic_call(x)\n return backend.numpy.min(x, axis=axis, keepdims=keepdims, initial=initial)", "docstring": "Return the minimum of a tensor or minimum along an axis.\n\nArgs:\n x: Input tensor.\n axis: Axis or axes along which to operate. By default, flattened input\n is used.\n keepdims: If this is set to `True`, the axes which are reduced are left\n in the result as dimensions with size one. Defaults to `False`.\n initial: The maximum value of an output element. Defaults to `None`.\n\nReturns:\n Minimum of `x`.", "source": "github_repos"} -{"code": "def __init__(self, config: GraniteMoeConfig):\n super(GraniteMoeMoE, self).__init__()\n self.input_size = config.hidden_size\n self.hidden_size = config.intermediate_size\n self.activation = ACT2FN[config.hidden_act]\n self.input_linear = GraniteMoeParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2)\n self.output_linear = GraniteMoeParallelExperts(config.num_local_experts, self.hidden_size, self.input_size)\n self.router = GraniteMoeTopKGating(input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok)", "docstring": "A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.\n\nArgs:\n config:\n Configuration object with model hyperparameters.", "source": "github_repos"} -{"code": "def _input_array(num_dims):\n formatter = '{:0%db}' % num_dims\n strings = [formatter.format(i) for i in range(2 ** num_dims)]\n return np.array(strings, dtype='S%d' % num_dims).reshape([2] * num_dims)", "docstring": "Creates an ndarray where each element is the binary of its linear index.\n\nArgs:\n num_dims: The number of dimensions to create.\n\nReturns:\n An ndarray of shape [2] * num_dims.", "source": "github_repos"} -{"code": "def _SanitizedMRO(obj):\n return_list = []\n for cls in tf_inspect.getmro(obj):\n if cls.__name__ == '_NewClass':\n continue\n str_repr = _NormalizeType(str(cls))\n return_list.append(str_repr)\n if 'tensorflow' not in str_repr and 'keras' not in str_repr:\n break\n if 'StubOutForTesting' in str_repr:\n break\n return return_list", "docstring": "Get a list of superclasses with minimal amount of non-TF classes.\n\n Based on many parameters like python version, OS, protobuf implementation\n or changes in google core libraries the list of superclasses of a class\n can change. We only return the first non-TF class to be robust to non API\n affecting changes. The Method Resolution Order returned by `tf_inspect.getmro`\n is still maintained in the return value.\n\nArgs:\n obj: A python routine for us the create the sanitized arspec of.\n\nReturns:\n list of strings, string representation of the class names.", "source": "github_repos"} -{"code": "def _test_tensorflow_vs_numpy(self, x_np):\n y_np = self._total_variation_np(x_np)\n self._test(x_np, y_np)", "docstring": "Test the TensorFlow implementation against a numpy implementation.\n\nArgs:\n x_np: Numpy array with 3 or 4 dimensions.", "source": "github_repos"} -{"code": "def Colorize(self, string, color, justify=None):\n if justify:\n string = justify(string)\n if self._csi and color in self._ANSI_COLOR:\n return '{csi}{color_code}{string}{csi}{reset_code}'.format(csi=self._csi, color_code=self._ANSI_COLOR[color], reset_code=self._ANSI_COLOR_RESET, string=string)\n return string", "docstring": "Generates a colorized string, optionally justified.\n\nArgs:\n string: The string to write.\n color: The color name -- must be in _ANSI_COLOR.\n justify: The justification function, no justification if None. For\n example, justify=lambda s: s.center(10)\n\nReturns:\n str, The colorized string that can be printed to the console.", "source": "github_repos"} -{"code": "def create_database_view(self, view: views.View, view_name: str) -> None:\n dataset = f'{self._view_dataset.project}.{self._view_dataset.dataset_id}'\n view_sql = f'CREATE OR REPLACE VIEW `{dataset}.{view_name}` AS\\n{self.to_sql(view)}'\n self._client.query(view_sql).result()", "docstring": "Creates a BigQuery view with the given name in the runner's view_dataset.\n\nArgs:\n view: the FHIR view that creates\n view_name: the view name passed to the CREATE OR REPLACE VIEW statement.\n\nRaises:\n google.cloud.exceptions.GoogleAPICallError if the job failed.", "source": "github_repos"} -{"code": "def sum(x, axis=None, keepdims=False):\n if any_symbolic_tensors((x,)):\n return Sum(axis=axis, keepdims=keepdims).symbolic_call(x)\n return backend.numpy.sum(x, axis=axis, keepdims=keepdims)", "docstring": "Sum of a tensor over the given axes.\n\nArgs:\n x: Input tensor.\n axis: Axis or axes along which the sum is computed. The default is to\n compute the sum of the flattened tensor.\n keepdims: If this is set to `True`, the axes which are reduced are left\n in the result as dimensions with size one.\n\nReturns:\n Output tensor containing the sum.", "source": "github_repos"} -{"code": "def run_inference(self, batch: Sequence[numpy.ndarray], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n return self._inference_fn(batch, model, inference_args)", "docstring": "Runs inferences on a batch of 2d numpy arrays.\n\nArgs:\n batch: A sequence of examples as 2d numpy arrays. Each\n row in an array is a single example. The dimensions\n must match the dimensions of the data used to train\n the model.\n model: XGBoost booster or XBGModel (sklearn interface). Must\n implement predict(X). Where the parameter X is a 2d numpy array.\n inference_args: Any additional arguments for an inference.\n\nReturns:\n An Iterable of type PredictionResult.", "source": "github_repos"} -{"code": "def __init__(self, feature_extractor, tokenizer):\n super().__init__(feature_extractor, tokenizer)\n self.current_processor = self.feature_extractor\n self._in_target_context_manager = False", "docstring": "Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a\n single processor.\n\n [`Speech2TextProcessor`] offers all the functionalities of [`Speech2TextFeatureExtractor`] and\n [`Speech2TextTokenizer`]. See the [`~Speech2TextProcessor.__call__`] and [`~Speech2TextProcessor.decode`] for more\n information.\n\nArgs:\n feature_extractor (`Speech2TextFeatureExtractor`):\n An instance of [`Speech2TextFeatureExtractor`]. The feature extractor is a required input.\n tokenizer (`Speech2TextTokenizer`):\n An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[Tuple, AltCLIPOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)\n image_embeds = vision_outputs[1]\n image_embeds = self.visual_projection(image_embeds)\n text_embeds = text_outputs[1]\n text_embeds = self.text_projection(text_embeds)\n image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)\n text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)\n logit_scale = self.logit_scale.exp()\n logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale\n logits_per_image = logits_per_text.T\n loss = None\n if return_loss:\n loss = clip_loss(logits_per_text)\n if not return_dict:\n output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)\n return (loss,) + output if loss is not None else output\n return AltCLIPOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)", "docstring": "return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n\nExample:\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoProcessor, AltCLIPModel\n\n >>> model = AltCLIPModel.from_pretrained(\"BAAI/AltCLIP\")\n >>> processor = AutoProcessor.from_pretrained(\"BAAI/AltCLIP\")\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n >>> inputs = processor(\n ... text=[\"a photo of a cat\", \"a photo of a dog\"], images=image, return_tensors=\"pt\", padding=True\n ... )\n >>> outputs = model(**inputs)\n >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score\n >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities\n ```", "source": "github_repos"} -{"code": "def get_size_with_aspect_ratio(image_size: Tuple[int, int], size: int, max_size: Optional[int]=None, mod_size: int=16) -> Tuple[int, int]:\n height, width = image_size\n raw_size = None\n if max_size is not None:\n min_original_size = float(min((height, width)))\n max_original_size = float(max((height, width)))\n if max_original_size / min_original_size * size > max_size:\n raw_size = max_size * min_original_size / max_original_size\n size = int(round(raw_size))\n if width < height:\n ow = size\n if max_size is not None and raw_size is not None:\n oh = int(raw_size * height / width)\n else:\n oh = int(size * height / width)\n elif height <= width and height == size or (width <= height and width == size):\n oh, ow = (height, width)\n else:\n oh = size\n if max_size is not None and raw_size is not None:\n ow = int(raw_size * width / height)\n else:\n ow = int(size * width / height)\n if mod_size is not None:\n ow_mod = np.mod(ow, mod_size)\n oh_mod = np.mod(oh, mod_size)\n ow = ow - ow_mod\n oh = oh - oh_mod\n return (oh, ow)", "docstring": "Computes the output image size given the input image size and the desired output size with multiple of divisible_size.\n\nArgs:\n image_size (`Tuple[int, int]`):\n The input image size.\n size (`int`):\n The desired output size.\n max_size (`int`, *optional*):\n The maximum allowed output size.\n mod_size (`int`, *optional*):\n The size to make multiple of mod_size.", "source": "github_repos"} -{"code": "def _call_unittest_assertion(assertion_method, *args, msg=None, extras=None, **kwargs):\n my_msg = None\n try:\n assertion_method(*args, **kwargs)\n except AssertionError as e:\n my_msg = str(e)\n if msg:\n my_msg = f'{my_msg} {msg}'\n if my_msg is not None:\n raise signals.TestFailure(my_msg, extras=extras)", "docstring": "Wrapper for converting a unittest assertion into a Mobly one.\n\nArgs:\n assertion_method: unittest.TestCase assertion method to call.\n *args: Positional arguments for the assertion call.\n msg: A string that adds additional info about the failure.\n extras: An optional field for extra information to be included in\n test result.\n **kwargs: Keyword arguments for the assertion call.", "source": "github_repos"} -{"code": "def get_temp_dir():\n return _googletest.GetTempDir()", "docstring": "Returns a temporary directory for use during tests.\n\n There is no need to delete the directory after the test.\n\n @compatibility(TF2)\n This function is removed in TF2. Please use `TestCase.get_temp_dir` instead\n in a test case.\n Outside of a unit test, obtain a temporary directory through Python's\n `tempfile` module.\n @end_compatibility\n\nReturns:\n The temporary directory.", "source": "github_repos"} -{"code": "def __init__(self, missing_modules: Collection[str]=()):\n if os.getenv('TYPESHED_HOME'):\n self._store = ExternalTypeshedFs(missing_file=self.MISSING_FILE)\n else:\n self._store = InternalTypeshedFs(missing_file=self.MISSING_FILE)\n self._missing = self._load_missing().union(missing_modules)\n self._stdlib_versions = self._load_stdlib_versions()\n self._third_party_packages = self._load_third_party_packages()", "docstring": "Initializer.\n\nArgs:\n missing_modules: A collection of modules in the format\n 'stdlib/module_name', which will be combined with the contents of\n MISSING_FILE to form a set of missing modules for which pytype will not\n report errors.", "source": "github_repos"} -{"code": "def initialize_multi_client_cluster(job_name: str, dtensor_jobs: List[str], client_id: int, collective_leader: str, port: Optional[int]=None, gpu_use_nccl_communication: bool=False, enable_coordination_service: bool=True):\n assert context.executing_eagerly()\n if not collective_leader.startswith('/job:'):\n collective_leader = '/job:' + collective_leader\n context.context().configure_collective_ops(use_nccl_communication=gpu_use_nccl_communication, collective_leader=collective_leader)\n if enable_coordination_service:\n context.context().configure_coordination_service(service_type='standalone', service_leader=collective_leader)\n config_proto = context.get_config()\n cluster_def = cluster_pb2.ClusterDef()\n cluster_def.job.add(name=job_name, tasks=dict(enumerate(dtensor_jobs)))\n server_def = tensorflow_server_pb2.ServerDef(cluster=cluster_def, default_session_config=config_proto, job_name=job_name, task_index=client_id, protocol=remote_utils.get_default_communication_protocol(), port=port)\n server_def.default_session_config.rpc_options.num_channels_per_target = 4\n server_def.default_session_config.experimental.recv_buf_max_chunk = -1\n logging.info('Enabling collectives with server_def: %s', server_def)\n context.context().enable_collective_ops(server_def)\n context.ensure_initialized()", "docstring": "Initialize GRPC servers and collectives for multi-client DTensor setup.\n\n This function can be used to initialize a multi-client cluster and enable\n collective ops. GRPC servers are necessary in the multi-client mode, even\n when the number of clientis is 1.\n\n NOTE: this function must be called in an eager context.\n\nArgs:\n job_name: The job name used by all clients in the DTensor cluster.\n dtensor_jobs: A list of the DTensor client jobs participating in the\n cluster. Must be strings of the form \"hostname:port\".\n client_id: The ID of the DTensor client this function is being called in.\n collective_leader: The job/task that will be used to run collectives.\n port: The port this client's GRPC server will run on. If omitted, use the\n port from dtensor_jobs for this client.\n gpu_use_nccl_communication: if True, configure TensorFlow to use NCCL by\n default.\n enable_coordination_service: If true, enable distributed coordination\n service to make sure that workers know the devices on each other, a\n prerequisite for data transfer through cross-worker rendezvous.\n\nRaises:\n RuntimeError: If running inside a tf.function.", "source": "github_repos"} -{"code": "def __init__(self, mapping: PythonDict[Hashable, trace.TraceType], placeholder_type: Optional[Type[Any]]=None):\n self.mapping = mapping\n self._placeholder_type = placeholder_type", "docstring": "Represents a dictionary of TraceType objects.\n\nAttributes:\n mapping: A mapping from keys to corresponding TraceTypes of the dict values.", "source": "github_repos"} -{"code": "def save_subset_weights_to_hdf5_group(f, weights):\n weight_values = [backend.convert_to_numpy(w) for w in weights]\n weight_names = [str(w.path).encode('utf8') for w in weights]\n save_attributes_to_hdf5_group(f, 'weight_names', weight_names)\n for name, val in zip(weight_names, weight_values):\n param_dset = f.create_dataset(name, val.shape, dtype=val.dtype)\n if not val.shape:\n param_dset[()] = val\n else:\n param_dset[:] = val", "docstring": "Save top-level weights of a model to a HDF5 group.\n\nArgs:\n f: HDF5 group.\n weights: List of weight variables.", "source": "github_repos"} -{"code": "def _join_test_cases_n_ary(self, test_cases: Iterator[tuple[Iterator[str], ...]], delimiters: ListOrTuple[str]) -> tuple[Iterator[str], ...]:\n output_stream_tuple, delimiter_stream_tuple = zip(*((io.StringIO(), io.StringIO(delimiter)) for delimiter in delimiters))\n for delimiter_stream in delimiter_stream_tuple:\n delimiter_stream.seek(0, os.SEEK_END)\n for test_case_stream_tuple in test_cases:\n for test_case_stream, delimiter_stream, output_stream in zip(test_case_stream_tuple, delimiter_stream_tuple, output_stream_tuple):\n output_stream.writelines(delimiter_stream)\n output_stream.writelines(test_case_stream)\n delimiter_stream.seek(0)\n for output_stream in output_stream_tuple:\n output_stream.seek(0)\n return output_stream_tuple", "docstring": "Joins each output[i] from all `test_cases`, delimited by `delimiters[i]`.\n\n Given an iterator to tuples of iterators, this function concatenates along\n the first \"dimension\" and returns a tuple of concatenated iterators. The\n concatenated iterators comprising `output_stream_tuple[i]` are delimited by\n `delimiters[i]`.\n\n For example, given the following input:\n `test_cases = [(\"a\", \"b\"), (\"c\", \"d\")]`\n `delimiters = (\"-\", \"_\")`\n This function will return a tuple of two iterators:\n `[(\"a-\", \"b-\"), (\"c_\", \"d_\")]`\n\nArgs:\n test_cases: An iterator over the test cases to join. Each test case is a\n tuple of N iterators yielding lines of text.\n delimiters: A list or tuple of N delimiter strings, one for each member of\n the returned tuple `output_stream_tuple`.\n\nReturns:\n A tuple of N output-stream iterators. For each tuple-element index `i`,\n `output_stream_tuple[i]` is the concatenation of `test_cases[:][i]`,\n delimited by `delimiters[i]`.", "source": "github_repos"} -{"code": "def take(self, count, name=None) -> 'DatasetV2':\n from tensorflow.python.data.ops import take_op\n return take_op._take(self, count, name=name)", "docstring": "Creates a `Dataset` with at most `count` elements from this dataset.\n\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset = dataset.take(3)\n >>> [a.item() for a in dataset.as_numpy_iterator()]\n [0, 1, 2]\n\nArgs:\n count: A `tf.int64` scalar `tf.Tensor`, representing the number of\n elements of this dataset that should be taken to form the new dataset.\n If `count` is -1, or if `count` is greater than the size of this\n dataset, the new dataset will contain all elements of this dataset.\n name: (Optional.) A name for the tf.data operation.\n\nReturns:\n A new `Dataset` with the transformation applied as described above.", "source": "github_repos"} -{"code": "def invoke_process_element(self, sdf_invoker, output_processor, element, restriction, watermark_estimator_state, *args, **kwargs):\n assert isinstance(sdf_invoker, DoFnInvoker)\n\n class CheckpointState(object):\n\n def __init__(self):\n self.checkpointed = None\n self.residual_restriction = None\n checkpoint_state = CheckpointState()\n\n def initiate_checkpoint():\n with self._checkpoint_lock:\n if checkpoint_state.checkpointed:\n return\n checkpoint_state.checkpointed = object()\n split = sdf_invoker.try_split(0)\n if split:\n _, checkpoint_state.residual_restriction = split\n else:\n checkpoint_state.checkpointed = None\n output_processor.reset()\n Timer(self._max_duration, initiate_checkpoint).start()\n sdf_invoker.invoke_process(element, additional_args=args, restriction=restriction, watermark_estimator_state=watermark_estimator_state)\n assert output_processor.output_iter is not None\n output_count = 0\n process_continuation = None\n for output in output_processor.output_iter:\n assert not process_continuation\n if isinstance(output, ProcessContinuation):\n initiate_checkpoint()\n process_continuation = output\n continue\n yield output\n output_count += 1\n if self._max_num_outputs and output_count >= self._max_num_outputs:\n initiate_checkpoint()\n result = SDFProcessElementInvoker.Result(residual_restriction=checkpoint_state.residual_restriction) if checkpoint_state.residual_restriction else SDFProcessElementInvoker.Result()\n yield result", "docstring": "Invokes `process()` method of a Splittable `DoFn` for a given element.\n\nArgs:\n sdf_invoker: a `DoFnInvoker` for the Splittable `DoFn`.\n element: the element to process\n\nReturns:\n a `SDFProcessElementInvoker.Result` object.", "source": "github_repos"} -{"code": "def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):\n raise NotImplementedError", "docstring": "Calls the wrapped cell and performs the wrapping logic.\n\n This method is called from the wrapper's `call` or `__call__` methods.\n\nArgs:\n inputs: A tensor with wrapped cell's input.\n state: A tensor or tuple of tensors with wrapped cell's state.\n cell_call_fn: Wrapped cell's method to use for step computation (cell's\n `__call__` or 'call' method).\n **kwargs: Additional arguments.\n\nReturns:\n A pair containing:\n - Output: A tensor with cell's output.\n - New state: A tensor or tuple of tensors with new wrapped cell's state.", "source": "github_repos"} -{"code": "def _generate_schedule(self, cpn_frequency, roll_convention):\n if self._first_coupon_date is None and self._penultimate_coupon_date is None:\n cpn_dates = dates.PeriodicSchedule(start_date=self._start_date, end_date=self._end_date, tenor=cpn_frequency, roll_convention=roll_convention).dates()\n is_regular_cpn = tf.constant(True, dtype=bool, shape=cpn_dates[:, :-1].shape)\n elif self._first_coupon_date is not None:\n cpn_dates = dates.PeriodicSchedule(start_date=self._first_coupon_date, end_date=self._end_date, tenor=cpn_frequency, roll_convention=roll_convention).dates()\n cpn_dates = dates.DateTensor.concat([self._start_date.expand_dims(-1), cpn_dates], axis=1)\n is_irregular_cpn = tf.constant(False, dtype=bool, shape=self._start_date.shape)\n is_regular_cpn = tf.concat([tf.expand_dims(is_irregular_cpn, axis=-1), tf.constant(True, dtype=bool, shape=cpn_dates[:, :-2].shape)], axis=1)\n else:\n cpn_dates = dates.PeriodicSchedule(start_date=self._start_date, end_date=self._penultimate_coupon_date, backward=True, tenor=cpn_frequency, roll_convention=roll_convention).dates()\n cpn_dates = dates.DateTensor.concat([cpn_dates, self._end_date.expand_dims(-1)], axis=1)\n is_irregular_cpn = tf.constant(False, dtype=bool, shape=self._end_date.shape)\n is_regular_cpn = tf.concat([tf.constant(True, dtype=bool, shape=cpn_dates[:, :-2].shape), tf.expand_dims(is_irregular_cpn, axis=-1)], axis=1)\n return (cpn_dates, is_regular_cpn)", "docstring": "Method to generate coupon dates.\n\nArgs:\n cpn_frequency: A `PeriodTensor` specifying the frequency of coupon\n payments.\n roll_convention: Scalar of type `BusinessDayConvention` specifying how\n dates are rolled if they fall on holidays.\n\nReturns:\n A tuple containing the generated date schedule and a boolean `Tensor`\n of the same shape as the schedule specifying whether the coupons are\n regular coupons.", "source": "github_repos"} -{"code": "def breakpoints(self):\n return self._breakpoints", "docstring": "Get a set of the currently-activated breakpoints.\n\nReturns:\n A `set` of 3-tuples: (node_name, output_slot, debug_op), e.g.,\n {(\"MatMul\", 0, \"DebugIdentity\")}.", "source": "github_repos"} -{"code": "def _merge_with(self, other: 'DynamicRaggedShape.Spec') -> 'DynamicRaggedShape.Spec':\n max_num_row_partitions = max(self.num_row_partitions, other.num_row_partitions)\n a = self._with_num_row_partitions(max_num_row_partitions)\n b = other._with_num_row_partitions(max_num_row_partitions)\n new_rp = [a._merge_with(b) for a, b in zip(a._row_partitions, b._row_partitions)]\n new_static_inner_shape = a._static_inner_shape.merge_with(b._static_inner_shape)\n dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64\n return DynamicRaggedShape.Spec(new_rp, new_static_inner_shape, dtype=dtype)", "docstring": "Merges all information between two specs.\n\n Specs are expected to represent the same information modulo\n num_row_partitons.\n\n If the specs are of different ranks, then fail.\n\nArgs:\n other: another Spec of the same rank.\n\nReturns:\n a Spec with the union of information.", "source": "github_repos"} -{"code": "def debug_string(self, with_typing: bool=False, indent: int=0) -> str:\n operand_name = f'{self} '\n operand_prints = ''.join(('\\n' + op.debug_string(with_typing, indent + 1) for op in self.operands))\n type_print = f' type={self.return_type}' if with_typing else ''\n return f'{'| ' * indent}+ {operand_name}<{self.__class__.__name__}{type_print}> ({operand_prints})'", "docstring": "Builds a string describing the expression tree starting from this node.\n\nArgs:\n with_typing: If true, includes the type each node evaluates to.\n indent: The initial number of spaces to use as indentation for the debug\n string.\n\nReturns:\n A string which recursively describes this node and its operands.", "source": "github_repos"} -{"code": "def FromTimestampToHttp(self, ts):\n ts = time.gmtime(ts)\n return time.strftime('%a, %d %b %Y %H:%M:%S GMT', ts)", "docstring": "Converts internal nss_cache timestamp to HTTP timestamp.\n\nArgs:\n ts: number of seconds since epoch\n\nReturns:\n HTTP format timestamp string", "source": "github_repos"} -{"code": "def _get_trainable_state(self):\n layers = self._flatten_layers(include_self=False, recursive=False)\n trainable_state = {self: self.trainable}\n for l in layers:\n trainable_state.update(l._get_trainable_state())\n return trainable_state", "docstring": "Get the `trainable` state of each sublayer.\n\nReturns:\n A dict mapping all sublayers to their `trainable` value.", "source": "github_repos"} -{"code": "def _unique_parameters(self) -> 'list[cfg.Variable]':\n return []", "docstring": "Get unique parameter subtypes as variables.\n\n This will retrieve 'children' of this value that contribute to the\n type of it. So it will retrieve type parameters, but not attributes. To\n keep the number of possible combinations reasonable, when we encounter\n multiple instances of the same type, we include only one.\n\nReturns:\n A list of variables.", "source": "github_repos"} -{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, encoder_hidden_states: tf.Tensor | None=None, encoder_attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Tuple[tf.Tensor] | None=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n cross_attn_present_key_value = None\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n present_key_value = present_key_value + cross_attn_present_key_value\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = self.activation_dropout(hidden_states, training=training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n return (hidden_states, self_attn_weights, cross_attn_weights, present_key_value)", "docstring": "Args:\n hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*\n attention_mask (`tf.Tensor`): attention mask of size\n *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.\n encoder_hidden_states (`tf.Tensor`):\n cross attention input to the layer of shape *(batch, seq_len, embed_dim)*\n encoder_attention_mask (`tf.Tensor`): encoder attention mask of size\n *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.\n layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size\n *(decoder_attention_heads,)*\n cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.\n *(decoder_attention_heads,)*\n past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states", "source": "github_repos"} -{"code": "def __init__(self, name='mean', dtype=None):\n super(Mean, self).__init__(reduction=metrics_utils.Reduction.WEIGHTED_MEAN, name=name, dtype=dtype)", "docstring": "Computes the (weighted) mean of the given values.\n\n For example, if values is [1, 3, 5, 7] then the mean is 4.\n If the weights were specified as [1, 1, 0, 0] then the mean would be 2.\n\n This metric creates two variables, `total` and `count` that are used to\n compute the average of `values`. This average is ultimately returned as `mean`\n which is an idempotent operation that simply divides `total` by `count`.\n\n If `sample_weight` is `None`, weights default to 1.\n Use `sample_weight` of 0 to mask values.\n\nArgs:\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\n Standalone usage:\n\n >>> m = tf.keras.metrics.Mean()\n >>> m.update_state([1, 3, 5, 7])\n >>> m.result().numpy()\n 4.0\n >>> m.reset_state()\n >>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])\n >>> m.result().numpy()\n 2.0\n\n Usage with `compile()` API:\n\n ```python\n model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs))\n model.compile(optimizer='sgd', loss='mse')\n ```", "source": "github_repos"} -{"code": "def __init__(self, quantile_tracker: Optional[QuantileTracker]=None):\n self._quantile_tracker = quantile_tracker or BufferedSlidingQuantileTracker(DEFAULT_WINDOW_SIZE, 0.5)\n assert self._quantile_tracker._q == 0.5, 'quantile_tracker must be initialized with q = 0.5'", "docstring": "Tracks the median of a stream of values using a quantile tracker.\n\n This wrapper class encapsulates a `QuantileTracker` configured specifically\n for the 0.5 quantile (median).\n\nArgs:\n quantile_tracker: An optional `QuantileTracker` instance. If not provided,\n a `BufferedSlidingQuantileTracker` with a default window size 1000 and\n q=0.5 is created.\n\nRaises:\n AssertionError: If the provided quantile_tracker is not initialized with\n q=0.5.", "source": "github_repos"} -{"code": "def _check_trt_version_compatibility():\n if not _pywrap_py_utils.is_tensorrt_enabled():\n logging.error('Tensorflow needs to be built with TensorRT support enabled to allow TF-TRT to operate.')\n raise RuntimeError('Tensorflow has not been built with TensorRT support.')\n if platform.system() == 'Windows':\n logging.warn('Windows support is provided experimentally. No guarantee is made regarding functionality or engineering support. Use at your own risk.')\n linked_version = _pywrap_py_utils.get_linked_tensorrt_version()\n loaded_version = _pywrap_py_utils.get_loaded_tensorrt_version()\n logging.info('Linked TensorRT version: %s', str(linked_version))\n logging.info('Loaded TensorRT version: %s', str(loaded_version))\n\n def raise_trt_version_deprecated(version_type, trt_version):\n assert version_type in ['linked', 'loaded'], \"Incorrect value received for version_type: %s. Accepted: ['linked', 'loaded']\" % version_type\n logging.error('The {version_type} version of TensorRT: `{trt_version}` has now been removed. Please upgrade to TensorRT 7 or more recent.'.format(version_type=version_type, trt_version=trt_utils.version_tuple_to_string(trt_version)))\n raise RuntimeError('Incompatible %s TensorRT versions' % version_type)\n if not trt_utils.is_linked_tensorrt_version_greater_equal(7, 0, 0):\n raise_trt_version_deprecated('linked', linked_version)\n if not trt_utils.is_loaded_tensorrt_version_greater_equal(7, 0, 0):\n raise_trt_version_deprecated('loaded', loaded_version)\n if loaded_version[0] != linked_version[0] or not trt_utils.is_loaded_tensorrt_version_greater_equal(*linked_version):\n logging.error('Loaded TensorRT %s but linked TensorFlow against TensorRT %s. A few requirements must be met:\\n\\t-It is required to use the same major version of TensorRT during compilation and runtime.\\n\\t-TensorRT does not support forward compatibility. The loaded version has to be equal or more recent than the linked version.', trt_utils.version_tuple_to_string(loaded_version), trt_utils.version_tuple_to_string(linked_version))\n raise RuntimeError('Incompatible TensorRT major version')\n elif loaded_version != linked_version:\n logging.info('Loaded TensorRT %s and linked TensorFlow against TensorRT %s. This is supported because TensorRT minor/patch upgrades are backward compatible.', trt_utils.version_tuple_to_string(loaded_version), trt_utils.version_tuple_to_string(linked_version))", "docstring": "Check compatibility of TensorRT version.\n\nRaises:\n RuntimeError: if the TensorRT library version is incompatible.", "source": "github_repos"} -{"code": "def default_session(session):\n return _default_session_stack.get_controller(session)", "docstring": "Python \"with\" handler for defining a default session.\n\n This function provides a means of registering a session for handling\n Tensor.eval() and Operation.run() calls. It is primarily intended for use\n by session.Session, but can be used with any object that implements\n the Session.run() interface.\n\n Use with the \"with\" keyword to specify that Tensor.eval() and Operation.run()\n invocations within the scope of a block should be executed by a particular\n session.\n\n The default session applies to the current thread only, so it is always\n possible to inspect the call stack and determine the scope of a default\n session. If you create a new thread, and wish to use the default session\n in that thread, you must explicitly add a \"with ops.default_session(sess):\"\n block in that thread's function.\n\nExample:\n The following code examples are equivalent:\n\n # 1. Using the Session object directly:\n sess = ...\n c = tf.constant(5.0)\n sess.run(c)\n\n # 2. Using default_session():\n sess = ...\n with ops.default_session(sess):\n c = tf.constant(5.0)\n result = c.eval()\n\n # 3. Overriding default_session():\n sess = ...\n with ops.default_session(sess):\n c = tf.constant(5.0)\n with ops.default_session(...):\n c.eval(session=sess)\n\nArgs:\n session: The session to be installed as the default session.\n\nReturns:\n A context manager for the default session.", "source": "github_repos"} -{"code": "def repeat(x, repeats, axis=None):\n if any_symbolic_tensors((x,)):\n return Repeat(repeats, axis=axis).symbolic_call(x)\n return backend.numpy.repeat(x, repeats, axis=axis)", "docstring": "Repeat each element of a tensor after themselves.\n\nArgs:\n x: Input tensor.\n repeats: The number of repetitions for each element.\n axis: The axis along which to repeat values. By default, use\n the flattened input array, and return a flat output array.\n\nReturns:\n Output tensor.", "source": "github_repos"} -{"code": "def _get_truncated_table_rows(self, query_tokens: List[str], tokenized_table: TokenizedTable, num_rows: int, num_columns: int, max_length: int, truncation_strategy: Union[str, TapasTruncationStrategy]) -> Tuple[int, int]:\n if not isinstance(truncation_strategy, TapasTruncationStrategy):\n truncation_strategy = TapasTruncationStrategy(truncation_strategy)\n if max_length is None:\n max_length = self.model_max_length\n if truncation_strategy == TapasTruncationStrategy.DROP_ROWS_TO_FIT:\n while True:\n num_tokens = self._get_max_num_tokens(query_tokens, tokenized_table, num_rows=num_rows, num_columns=num_columns, max_length=max_length)\n if num_tokens is not None:\n break\n num_rows -= 1\n if num_rows < 1:\n break\n elif truncation_strategy != TapasTruncationStrategy.DO_NOT_TRUNCATE:\n raise ValueError(f'Unknown truncation strategy {truncation_strategy}.')\n return (num_rows, num_tokens or 1)", "docstring": "Truncates a sequence pair in-place following the strategy.\n\nArgs:\n query_tokens (`List[str]`):\n List of strings corresponding to the tokenized query.\n tokenized_table (`TokenizedTable`):\n Tokenized table\n num_rows (`int`):\n Total number of table rows\n num_columns (`int`):\n Total number of table columns\n max_length (`int`):\n Total maximum length.\n truncation_strategy (`str` or [`TapasTruncationStrategy]`):\n Truncation strategy to use. Seeing as this method should only be called when truncating, the only\n available strategy is the `\"drop_rows_to_fit\"` strategy.\n\nReturns:\n `Tuple(int, int)`: tuple containing the number of rows after truncation, and the number of tokens available\n for each table element.", "source": "github_repos"} -{"code": "def squared_hinge(y_true, y_pred):\n y_pred = ops.convert_to_tensor(y_pred)\n y_true = ops.cast(y_true, y_pred.dtype)\n y_true = convert_binary_labels_to_hinge(y_true)\n return ops.mean(ops.square(ops.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1)", "docstring": "Computes the squared hinge loss between `y_true` & `y_pred`.\n\n Formula:\n\n ```python\n loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)\n ```\n\nArgs:\n y_true: The ground truth values. `y_true` values are expected to be -1\n or 1. If binary (0 or 1) labels are provided we will convert them\n to -1 or 1 with shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.\n\nReturns:\n Squared hinge loss values with shape = `[batch_size, d0, .. dN-1]`.\n\nExample:\n >>> y_true = np.random.choice([-1, 1], size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = keras.losses.squared_hinge(y_true, y_pred)", "source": "github_repos"} -{"code": "def learn_one(self, x: beam.Row) -> None:\n if len(x.__dict__) != 1:\n raise ValueError('RobustZScore.learn_one expected univariate input, but got %s', str(x))\n v = next(iter(x))\n self._mad_tracker.push(v)", "docstring": "Updates the `MadTracker` with a new data point.\n\nArgs:\n x: A `beam.Row` containing a single numerical value.", "source": "github_repos"} -{"code": "def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):\n del shape\n if dtype.is_floating:\n initializer = init_ops.glorot_uniform_initializer()\n initializing_from_value = False\n elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool or (dtype == dtypes.string):\n initializer = init_ops.zeros_initializer()\n initializing_from_value = False\n else:\n raise ValueError('An initializer for variable %s of %s is required' % (name, dtype.base_dtype))\n return (initializer, initializing_from_value)", "docstring": "Provide a default initializer and a corresponding value.\n\nArgs:\n name: see get_variable.\n shape: see get_variable.\n dtype: see get_variable.\n\nReturns:\n initializer and initializing_from_value. See get_variable above.\n\nRaises:\n ValueError: When giving unsupported dtype.", "source": "github_repos"} -{"code": "def test_fail(self, e=None):\n self._test_end(TestResultEnums.TEST_RESULT_FAIL, e)", "docstring": "To mark the test as failed in this record.\n\n Only test_fail does instance check because we want 'assert xxx' to also\n fail the test same way assert_true does.\n\nArgs:\n e: An exception object. It can be an instance of AssertionError or\n mobly.base_test.TestFailure.", "source": "github_repos"} -{"code": "def set_save_handler(save_handler: Optional[Callable[..., Any]]) -> Optional[Callable[..., Any]]:\n if save_handler and (not callable(save_handler)):\n raise ValueError('`save_handler` must be callable.')\n global _SAVE_HANDLER\n old_handler = _SAVE_HANDLER\n _SAVE_HANDLER = save_handler\n return old_handler", "docstring": "Sets global save handler.\n\nArgs:\n save_handler: A callable object that takes at least one argument as value to\n save. `symbolic.save` method will pass through all arguments to this\n handler and return its return value.\n\nReturns:\n Previous global save handler.", "source": "github_repos"} -{"code": "def call(self, pixel_values: tf.Tensor, labels: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TFSemanticSegmenterOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n if labels is not None and (not self.config.num_labels > 1):\n raise ValueError('The number of labels should be greater than one')\n outputs = self.segformer(pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)\n encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]\n logits = self.decode_head(encoder_hidden_states)\n loss = None\n if labels is not None:\n loss = self.hf_compute_loss(logits=logits, labels=labels)\n logits = tf.transpose(logits, perm=[0, 3, 1, 2])\n if not return_dict:\n if output_hidden_states:\n output = (logits,) + outputs[1:]\n else:\n output = (logits,) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return TFSemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)", "docstring": "labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*):\n Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels > 1`, a (per-pixel) classification loss is computed\n (Cross-Entropy).\n\nReturns:\n\nExample:\n ```python\n >>> from transformers import AutoImageProcessor, TFSegformerForSemanticSegmentation\n >>> from PIL import Image\n >>> import requests\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> image_processor = AutoImageProcessor.from_pretrained(\"nvidia/segformer-b0-finetuned-ade-512-512\")\n >>> model = TFSegformerForSemanticSegmentation.from_pretrained(\"nvidia/segformer-b0-finetuned-ade-512-512\")\n\n >>> inputs = image_processor(images=image, return_tensors=\"tf\")\n >>> outputs = model(**inputs, training=False)\n >>> # logits are of shape (batch_size, num_labels, height/4, width/4)\n >>> logits = outputs.logits\n >>> list(logits.shape)\n [1, 150, 128, 128]\n ```", "source": "github_repos"} -{"code": "def __neg__(self: EventSetOrNode) -> EventSetOrNode:\n from temporian.core.operators.scalar import multiply_scalar\n return multiply_scalar(input=self, value=-1)", "docstring": "Negates an [`EventSet`][temporian.EventSet] element-wise.\n\nExample:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2],\n ... features={\"M\": [1, -5], \"N\": [-1.0, 5.5]},\n ... )\n >>> -a\n indexes: ...\n 'M': [-1 5]\n 'N': [ 1. -5.5]\n ...\n\n ```\n\nReturns:\n Negated EventSet.", "source": "github_repos"} -{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not\n make use of token type ids, therefore a list of zeros is returned.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of zeros.", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.Tensor):\n if hidden_states.size(-1) != self.dim_norm:\n raise AssertionError('hidden_states.size(-1) != self.dim_norm')\n old_dtype = hidden_states.dtype\n variance = hidden_states.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)\n hidden_states = (hidden_states * torch.rsqrt(variance + self.eps)).to(old_dtype) * self.weight\n return hidden_states", "docstring": "Args:\n hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)", "source": "github_repos"} -{"code": "def _gelu_new(x):\n x = tf.convert_to_tensor(x)\n pi = tf.cast(math.pi, x.dtype)\n coeff = tf.cast(0.044715, x.dtype)\n cdf = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))\n return x * cdf", "docstring": "Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://huggingface.co/papers/1606.0841\n\nArgs:\n x: float Tensor to perform activation\n\nReturns:\n `x` with the GELU activation applied.", "source": "github_repos"} -{"code": "def forward(self, masks_queries_logits: torch.Tensor, class_queries_logits: torch.Tensor, mask_labels: torch.Tensor, class_labels: torch.Tensor) -> List[Tuple[Tensor]]:\n indices: List[Tuple[np.array]] = []\n batch_size = masks_queries_logits.shape[0]\n for i in range(batch_size):\n pred_probs = class_queries_logits[i].softmax(-1)\n pred_mask = masks_queries_logits[i]\n cost_class = -pred_probs[:, class_labels[i]]\n target_mask = mask_labels[i].to(pred_mask)\n target_mask = target_mask[:, None]\n pred_mask = pred_mask[:, None]\n point_coordinates = torch.rand(1, self.num_points, 2, device=pred_mask.device)\n target_coordinates = point_coordinates.repeat(target_mask.shape[0], 1, 1)\n target_mask = sample_point(target_mask, target_coordinates, align_corners=False).squeeze(1)\n pred_coordinates = point_coordinates.repeat(pred_mask.shape[0], 1, 1)\n pred_mask = sample_point(pred_mask, pred_coordinates, align_corners=False).squeeze(1)\n cost_mask = pair_wise_sigmoid_cross_entropy_loss(pred_mask, target_mask)\n cost_dice = pair_wise_dice_loss(pred_mask, target_mask)\n cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice\n cost_matrix = torch.minimum(cost_matrix, torch.tensor(10000000000.0))\n cost_matrix = torch.maximum(cost_matrix, torch.tensor(-10000000000.0))\n cost_matrix = torch.nan_to_num(cost_matrix, 0)\n assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())\n indices.append(assigned_indices)\n matched_indices = [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]\n return matched_indices", "docstring": "Params:\n masks_queries_logits (`torch.Tensor`):\n A tensor of dim `batch_size, num_queries, num_labels` with the classification logits.\n class_queries_logits (`torch.Tensor`):\n A tensor of dim `batch_size, num_queries, height, width` with the predicted masks.\n class_labels (`torch.Tensor`):\n A tensor of dim `num_target_boxes` (where num_target_boxes is the number of ground-truth objects in the\n target) containing the class labels.\n mask_labels (`torch.Tensor`):\n A tensor of dim `num_target_boxes, height, width` containing the target masks.\n\nReturns:\n matched_indices (`List[Tuple[Tensor]]`): A list of size batch_size, containing tuples of (index_i, index_j)\n where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected labels (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes).", "source": "github_repos"} -{"code": "def __init__(self, write_config: Dict[str, Any], *, schema_config: Optional[SchemaConfig]=None):\n if 'table' not in write_config:\n raise ValueError(\"write_config must be provided with 'table' specified\")\n self.write_config = write_config\n self.schema_config = schema_config", "docstring": "Configuration for writing vectors to BigQuery using managed transforms.\n\n Supports both default schema (id, embedding, content, metadata columns) and\n custom schemas through SchemaConfig.\n\n Example with default schema:\n >>> config = BigQueryVectorWriterConfig(\n ... write_config={'table': 'project.dataset.embeddings'})\n\n Example with custom schema:\n >>> schema_config = SchemaConfig(\n ... schema={\n ... 'fields': [\n ... {'name': 'id', 'type': 'STRING'},\n ... {'name': 'embedding', 'type': 'FLOAT64', 'mode': 'REPEATED'},\n ... {'name': 'source_url', 'type': 'STRING'}\n ... ]\n ... },\n ... chunk_to_dict_fn=lambda chunk: {\n ... 'id': chunk.id,\n ... 'embedding': chunk.embedding.dense_embedding,\n ... 'source_url': chunk.metadata.get('url')\n ... }\n ... )\n >>> config = BigQueryVectorWriterConfig(\n ... write_config={'table': 'project.dataset.embeddings'},\n ... schema_config=schema_config\n ... )\n\nArgs:\n write_config: BigQuery write configuration dict. Must include 'table'.\n Other options like create_disposition, write_disposition can be\n specified.\n schema_config: Optional configuration for custom schema and row\n conversion.\n If not provided, uses default schema with id, embedding, content and\n metadata columns.\n\nRaises:\n ValueError: If write_config doesn't include table specification.", "source": "github_repos"} -{"code": "def make_pool_tests(pool_op_in, allow_fully_quantize=False):\n pool_op = pool_op_in\n\n def f(options, expected_tf_failures=0):\n \"\"\"Actual function that generates examples.\n\n Args:\n options: An Options instance.\n expected_tf_failures: number of expected tensorflow failures.\n \"\"\"\n test_parameters = [{'ksize': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'strides': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'input_shape': [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [False], 'quant_16x8': [False]}, {'ksize': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'strides': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'input_shape': [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [True], 'quant_16x8': [False]}, {'ksize': [[1, 1, 1, 1]], 'strides': [[1, 1, 1, 1]], 'input_shape': [[1, 1, 1, 1]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [True], 'quant_16x8': [True]}]\n if not allow_fully_quantize:\n test_parameters = [test_parameter for test_parameter in test_parameters if True not in test_parameter['fully_quantize']]\n\n def build_graph(parameters):\n input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=parameters['input_shape'])\n out = pool_op(input_tensor, ksize=parameters['ksize'], strides=parameters['strides'], data_format=parameters['data_format'], padding=parameters['padding'])\n return ([input_tensor], [out])\n\n def build_inputs(parameters, sess, inputs, outputs):\n if allow_fully_quantize:\n input_values = create_tensor_data(tf.float32, parameters['input_shape'], min_value=-1, max_value=1)\n else:\n input_values = create_tensor_data(tf.float32, parameters['input_shape'])\n return ([input_values], sess.run(outputs, feed_dict=dict(zip(inputs, [input_values]))))\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs, expected_tf_failures=expected_tf_failures)\n return f", "docstring": "Make a set of tests to do average pooling.\n\nArgs:\n pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool2d`.\n allow_fully_quantize: bool, whether fully_quantize is allowed.\n\nReturns:\n A function representing the true generator (after curried pool_op_in).", "source": "github_repos"} -{"code": "def _GetMaps(name, commands, default_options):\n global_options = copy.copy(default_options)\n options_map = collections.defaultdict(lambda: copy.copy(default_options))\n subcommands_map = collections.defaultdict(set)\n for command in commands:\n if len(command) == 1:\n if _IsOption(command[0]):\n global_options.add(command[0])\n else:\n subcommands_map[name].add(command[0])\n elif command:\n subcommand = command[-2]\n arg = _FormatForCommand(command[-1])\n if _IsOption(arg):\n args_map = options_map\n else:\n args_map = subcommands_map\n args_map[subcommand].add(arg)\n args_map[subcommand.replace('_', '-')].add(arg)\n return (global_options, options_map, subcommands_map)", "docstring": "Returns sets of subcommands and options for each command.\n\nArgs:\n name: The first token in the commands, also the name of the command.\n commands: A list of all possible commands that tab completion can complete\n to. Each command is a list or tuple of the string tokens that make up\n that command.\n default_options: A dict of options that can be used with any command. Use\n this if there are flags that can always be appended to a command.\n\nReturns:\n global_options: A set of all options of the first token of the command.\n subcommands_map: A dict storing set of subcommands for each\n command/subcommand.\n options_map: A dict storing set of options for each subcommand.", "source": "github_repos"} -{"code": "def convert_variable_to_constant(self, incoming_edge, tensor_data):\n index = incoming_edge.destination.index\n for edge in self.outgoing_edges:\n if edge.source.index == index:\n edge.destination.convertible.convert_variable_to_constant(edge, tensor_data)\n function = self.converted_self().function\n function.signature.input_arg[index].type = tensor_data.dtype\n if '_input_shapes' in function.attr:\n function.attr['_input_shapes'].list.shape[index].unknown_rank = True\n del function.attr['_input_shapes'].list.shape[index].dim[:]\n arg_attrs = function.arg_attr[index].attr\n if '_output_shapes' in arg_attrs:\n arg_attrs['_output_shapes'].list.shape[0].unknown_rank = True\n del arg_attrs['_output_shapes'].list.shape[0].dim[:]", "docstring": "Converts one function argument into a constant.\n\nArgs:\n incoming_edge: The edge into the argument to be converted.\n tensor_data: The constant value.", "source": "github_repos"} -{"code": "def byte_swap_tflite_model_obj(model, from_endiness, to_endiness):\n if model is None:\n return\n buffer_swapped = []\n types_of_16_bits = [schema_fb.TensorType.FLOAT16, schema_fb.TensorType.INT16, schema_fb.TensorType.UINT16]\n types_of_32_bits = [schema_fb.TensorType.FLOAT32, schema_fb.TensorType.INT32, schema_fb.TensorType.COMPLEX64, schema_fb.TensorType.UINT32]\n types_of_64_bits = [schema_fb.TensorType.INT64, schema_fb.TensorType.FLOAT64, schema_fb.TensorType.COMPLEX128, schema_fb.TensorType.UINT64]\n for subgraph in model.subgraphs:\n for tensor in subgraph.tensors:\n if tensor.buffer > 0 and tensor.buffer < len(model.buffers) and (tensor.buffer not in buffer_swapped) and (model.buffers[tensor.buffer].data is not None):\n if tensor.type == schema_fb.TensorType.STRING:\n byte_swap_string_content(model.buffers[tensor.buffer], from_endiness, to_endiness)\n elif tensor.type in types_of_16_bits:\n byte_swap_buffer_content(model.buffers[tensor.buffer], 2, from_endiness, to_endiness)\n elif tensor.type in types_of_32_bits:\n byte_swap_buffer_content(model.buffers[tensor.buffer], 4, from_endiness, to_endiness)\n elif tensor.type in types_of_64_bits:\n byte_swap_buffer_content(model.buffers[tensor.buffer], 8, from_endiness, to_endiness)\n else:\n continue\n buffer_swapped.append(tensor.buffer)", "docstring": "Byte swaps the buffers field in a TFLite model.\n\nArgs:\n model: TFLite model object of from_endiness format.\n from_endiness: The original endianness format of the buffers in model.\n to_endiness: The destined endianness format of the buffers in model.", "source": "github_repos"} -{"code": "def broadcast(tensor):\n _check_device(tensor)\n with ops.device(tensor.device):\n return gen_nccl_ops.nccl_broadcast(input=tensor, shape=tensor.shape)", "docstring": "Returns a tensor that can be efficiently transferred to other devices.\n\nArgs:\n tensor: The tensor to send; must be assigned to a GPU device.\n\nReturns:\n A tensor with the value of `src_tensor`, which can be used as input to\n ops on other GPU devices.", "source": "github_repos"} -{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n deps = []\n if not self._built:\n self._build(tensor_shape.TensorShape(y_pred.shape))\n if self.multi_label or self.label_weights is not None:\n shapes = [(y_true, ('N', 'L'))]\n if self.multi_label:\n shapes.extend([(self.true_positives, ('T', 'L')), (self.true_negatives, ('T', 'L')), (self.false_positives, ('T', 'L')), (self.false_negatives, ('T', 'L'))])\n if self.label_weights is not None:\n shapes.append((self.label_weights, ('L',)))\n deps = [check_ops.assert_shapes(shapes, message='Number of labels is not consistent.')]\n label_weights = None if self.multi_label else self.label_weights\n if self._from_logits:\n y_pred = activations.sigmoid(y_pred)\n with ops.control_dependencies(deps):\n return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, self._thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight, multi_label=self.multi_label, label_weights=label_weights)", "docstring": "Accumulates confusion matrix statistics.\n\nArgs:\n y_true: The ground truth values.\n y_pred: The predicted values.\n sample_weight: Optional weighting of each example. Defaults to 1. Can be a\n `Tensor` whose rank is either 0, or the same rank as `y_true`, and must\n be broadcastable to `y_true`.\n\nReturns:\n Update op.", "source": "github_repos"} -{"code": "def _get_base_converter_args(self):\n args = {'input_format': constants.TENSORFLOW_GRAPHDEF, 'allow_custom_ops': self.allow_custom_ops, 'debug_info': self._debug_info, 'target_ops': self.target_spec.supported_ops, 'select_user_tf_ops': self.target_spec.experimental_select_user_tf_ops, 'supported_backends': self.target_spec.experimental_supported_backends, 'unfold_batchmatmul': self.unfold_batchmatmul, 'legalize_custom_tensor_list_ops': self.legalize_custom_tensor_list_ops, 'lower_tensor_list_ops': self._experimental_lower_tensor_list_ops, 'unfold_large_splat_constant': self._experimental_unfold_large_splat_constant, 'default_to_single_batch_in_tensor_list_ops': self._experimental_default_to_single_batch_in_tensor_list_ops, 'tf_quantization_mode': self._experimental_tf_quantization_mode, 'experimental_enable_resource_variables': self.experimental_enable_resource_variables, 'enable_dynamic_update_slice': self._experimental_enable_dynamic_update_slice, 'preserve_assert_op': self._experimental_preserve_assert_op, 'guarantee_all_funcs_one_use': self._experimental_guarantee_all_funcs_one_use, 'allow_all_select_tf_ops': self._experimental_allow_all_select_tf_ops, 'disable_fuse_mul_and_fc': self._experimental_disable_fuse_mul_and_fc, 'quantization_options': self._experimental_quantization_options, 'ir_dump_dir': self.ir_dump_dir, 'ir_dump_pass_regex': self.ir_dump_pass_regex, 'ir_dump_func_regex': self.ir_dump_func_regex, 'enable_timing': self.enable_timing, 'print_ir_before': self.print_ir_before, 'print_ir_after': self.print_ir_after, 'print_ir_module_scope': self.print_ir_module_scope, 'elide_elementsattrs_if_larger': self.elide_elementsattrs_if_larger, 'use_buffer_offset': self._experimental_use_buffer_offset, 'reduce_type_precision': self._experimental_reduce_type_precision, 'use_stablehlo_quantizer': self.experimental_use_stablehlo_quantizer, 'stablehlo_quantizer_config': self.experimental_stablehlo_quantizer_config, 'qdq_conversion_mode': self._experimental_qdq_conversion_mode, 'strict_qdq_mode': self._experimental_strict_qdq, 'disable_per_channel_quantization_for_dense_layers': self._experimental_disable_per_channel_quantization_for_dense_layers, 'enable_composite_direct_lowering': self._experimental_enable_composite_direct_lowering, 'model_origin_framework': self.model_origin_framework, 'canonicalizing_inf_as_min_max_float': self.canonicalizing_inf_as_min_max_float, 'serialize_debug_metadata': self.serialize_debug_metadata, 'unsafe_fuse_dynamic_shaped_broadcast': self._experimental_unsafe_fuse_dynamic_shaped_broadcast}\n if self.saved_model_dir:\n args.update({'saved_model_dir': self.saved_model_dir, 'saved_model_version': self._saved_model_version, 'saved_model_tags': self._saved_model_tags, 'saved_model_exported_names': self._saved_model_exported_names})\n if self._experimental_quantization_options:\n logging.warning('Configs from custom methods in experimental_quantization_options may not produce a valid tflite model. Note that currently this option only supports StableHLO path. Setting this option in TFLite path will be a no-op.')\n if self.experimental_use_stablehlo_quantizer:\n self._assign_stablehlo_quantization_config_or_populate_default(args)\n elif self.experimental_stablehlo_quantizer_config is not None:\n raise ValueError('QuantizationConfig should be provided only when experimental_use_stablehlo_quantizer is set to true.')\n return args", "docstring": "Returns the base converter args.\n\nReturns:\n {key str: val}", "source": "github_repos"} -{"code": "def __init__(self, qas_id, question_text, context_text, answer_text, start_position_character, title, answers=[], is_impossible=False):\n self.qas_id = qas_id\n self.question_text = question_text\n self.context_text = context_text\n self.answer_text = answer_text\n self.title = title\n self.is_impossible = is_impossible\n self.answers = answers\n self.start_position, self.end_position = (0, 0)\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in self.context_text:\n if _is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n self.doc_tokens = doc_tokens\n self.char_to_word_offset = char_to_word_offset\n if start_position_character is not None and (not is_impossible):\n self.start_position = char_to_word_offset[start_position_character]\n self.end_position = char_to_word_offset[min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)]", "docstring": "A single training/test example for the Squad dataset, as loaded from disk.\n\nArgs:\n qas_id: The example's unique identifier\n question_text: The question string\n context_text: The context string\n answer_text: The answer string\n start_position_character: The character position of the start of the answer\n title: The title of the example\n answers: None by default, this is used during evaluation. Holds answers as well as their start positions.\n is_impossible: False by default, set to True if the example has no possible answer.", "source": "github_repos"} -{"code": "def __init__(self, q1_tracker: Optional[QuantileTracker]=None, q3_tracker: Optional[QuantileTracker]=None, **kwargs):\n if 'threshold_criterion' not in kwargs:\n kwargs['threshold_criterion'] = FixedThreshold(1.5)\n super().__init__(**kwargs)\n self._q1_tracker = q1_tracker or BufferedSlidingQuantileTracker(DEFAULT_WINDOW_SIZE, 0.25)\n assert self._q1_tracker._q == 0.25, 'q1_tracker must be initialized with q = 0.25'\n self._q3_tracker = q3_tracker or SecondaryBufferedQuantileTracker(self._q1_tracker, 0.75)\n assert self._q3_tracker._q == 0.75, 'q3_tracker must be initialized with q = 0.75'", "docstring": "Interquartile Range (IQR) anomaly detector.\n\n This class implements an anomaly detection algorithm based on the\n Interquartile Range (IQR) [#]_ . It calculates the IQR using quantile trackers\n for Q1 (25th percentile) and Q3 (75th percentile) and scores data points based\n on their deviation from these quartiles.\n\n The score is calculated as follows:\n\n * If a data point is above Q3, the score is (value - Q3) / IQR.\n * If a data point is below Q1, the score is (Q1 - value) / IQR.\n * If a data point is within the IQR (Q1 <= value <= Q3), the score is 0.\n Initializes the IQR anomaly detector.\n\nArgs:\n q1_tracker: Optional QuantileTracker for Q1 (25th percentile). If None, a\n BufferedSlidingQuantileTracker with a default window size is used.\n q3_tracker: Optional QuantileTracker for Q3 (75th percentile). If None, a\n SecondaryBufferedQuantileTracker based on q1_tracker is used.\n threshold_criterion: Optional ThresholdFn to apply on the score. Defaults\n to `FixedThreshold(1.5)` since outliers are commonly defined as data\n points that fall below Q1 - 1.5 IQR or above Q3 + 1.5 IQR.\n **kwargs: Additional keyword arguments.\n\n .. [#] https://en.wikipedia.org/wiki/Interquartile_range", "source": "github_repos"} -{"code": "def dequantize_higgs(model, current_key_name=None):\n with torch.no_grad():\n for name, module in model.named_children():\n if current_key_name is None:\n current_key_name = []\n current_key_name.append(name)\n if isinstance(module, HiggsLinear):\n in_features = module.in_features\n out_features = module.out_features\n model._modules[name] = torch.nn.Linear(in_features, out_features, bias=module.bias is not None, device=module.scales.device, dtype=module.scales.dtype)\n model._modules[name].weight.data = module(torch.eye(in_features, device=module.scales.device, dtype=module.scales.dtype)).T.contiguous()\n if len(list(module.children())) > 0:\n _ = dequantize_higgs(module, current_key_name=current_key_name)\n current_key_name.pop(-1)\n return model", "docstring": "Dequantizes the HiggsLinear layers in the given model by replacing them with standard torch.nn.Linear layers.\n\nArgs:\n model (torch.nn.Module): The model containing HiggsLinear layers to be dequantized.\n current_key_name (list, optional): A list to keep track of the current module names during recursion. Defaults to None.\n\nReturns:\n torch.nn.Module: The model with HiggsLinear layers replaced by torch.nn.Linear layers.", "source": "github_repos"} -{"code": "def add_extension_to_message(extension: message.Message, msg: message.Message) -> None:\n desc = msg.DESCRIPTOR\n fields_by_url = {get_inlined_extension_url(field): field for field in desc.fields if field.name != 'id'}\n id_field = desc.fields_by_name.get('id')\n if proto_utils.field_is_set(extension, id_field):\n proto_utils.set_value_at_field(msg, id_field, cast(Any, extension).id)\n if proto_utils.field_is_set(extension, 'value'):\n if len(fields_by_url) != 1:\n raise fhir_errors.InvalidFhirError(f'Expected a single field, found {len(fields_by_url)}; {desc.full_name} is an invalid extension type.')\n field = list(fields_by_url.items())[0][1]\n if proto_utils.field_is_repeated(field):\n raise fhir_errors.InvalidFhirError(f'Expected {field.full_name} to be a singular field. {desc.full_name} is an invalid extension type.')\n _add_extension_value_to_message(extension, msg, field)\n return\n child_extensions = proto_utils.get_value_at_field(extension, 'extension')\n for child_extension in child_extensions:\n field = fields_by_url.get(child_extension.url.value)\n if field is None:\n raise ValueError(f'Message of type: {desc.full_name} has no field with name: {child_extension.url.value}.')\n if proto_utils.field_is_set(child_extension, 'value'):\n _add_extension_value_to_message(child_extension, msg, field)\n continue\n if not proto_utils.field_is_repeated(field):\n if proto_utils.field_is_set(msg, field):\n raise ValueError(f'Field: {field.full_name} is already set on message: {desc.full_name}.')\n if proto_utils.field_content_length(child_extension, 'extension') > 1:\n raise ValueError(f'Cardinality mismatch between field: {field.full_name} and extension: {desc.full_name}.')\n child_message = proto_utils.set_in_parent_or_add(msg, field)\n add_extension_to_message(child_extension, child_message)", "docstring": "Recursively parses extension and adds to message.\n\nArgs:\n extension: The FHIR extension to serialize and add.\n msg: The message to add the extension onto\n\nRaises:\n InvalidFhirError: In the event that a value is set on the extension, but the\n corresponding message field to copy it to is repeated (extension values are\n singular only).", "source": "github_repos"} -{"code": "def early_stop_by_value(step_values: List[Tuple[int, float]], metric: Union[str, Callable[[pg.tuning.Measurement], float]]='reward', maximize: bool=True):\n assert isinstance(step_values, list), step_values\n for v in step_values:\n if not isinstance(v, tuple) or len(v) != 2 or (not isinstance(v[0], int)) or (not isinstance(v[1], numbers.Number)):\n raise ValueError(f'Invalid definition in `step_values`: {v}. Expect a tuple of 2 elements: (step: int, threshold: float).')\n\n def _cmp(x, y) -> bool:\n return x < y if maximize else x > y\n\n def _value(m: pg.tuning.Measurement) -> float:\n if isinstance(metric, str):\n return m.reward if metric == 'reward' else m.metrics[metric]\n assert callable(metric), metric\n return metric(m)\n\n def _make_predicate(threshold: float):\n\n def _predicate(m: pg.tuning.Measurement, unused_history):\n v = _value(m)\n ret = _cmp(v, threshold)\n return ret\n return _predicate\n return StepWise([(step, _make_predicate(threshold)) for step, threshold in step_values])", "docstring": "Step-wise early stopping policy based on the value of reward/metric.\n\n Example::\n\n policy = early_stop_by_value([\n # Stop at step 1 if trial reward is less than 0.2.\n (1, 0.2),\n\n # Stop at step 2 if trial reward is less than 0.8.\n (2, 0.8),\n ])()\n\nArgs:\n step_values: A list of tuple (gating step, value threshold).\n gating step - At which step this rule will be triggered.\n value threshold - A float number indicating the threshold value for\n early stopping.\n metric: Based on which metric the value should be compared against.\n Use str for metric name or a callable object that takes a measurement\n object at a given step as input and returns a float value.\n maximize: If True, reward or metric value below the threshold will be\n stopped, otherwise trials with values above the threshold will be stopped.\n\nReturns:\n A `StepWise` early stopping policy.", "source": "github_repos"} -{"code": "def __rmul__(self, other):\n return self * other", "docstring": "Returns the product of `self` and `other`.\n\nArgs:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\n A Dimension whose value is the product of `self` and `other`.", "source": "github_repos"} -{"code": "def reparameterization_type(self):\n return self._reparameterization_type", "docstring": "Describes how samples from the distribution are reparameterized.\n\n Currently this is one of the static instances\n `distributions.FULLY_REPARAMETERIZED`\n or `distributions.NOT_REPARAMETERIZED`.\n\nReturns:\n An instance of `ReparameterizationType`.", "source": "github_repos"} -{"code": "def CanSplit(self, must_split):\n current = self.next_token\n previous = current.previous_token\n if current.is_pseudo:\n return False\n if not must_split and subtypes.DICTIONARY_KEY_PART in current.subtypes and (subtypes.DICTIONARY_KEY not in current.subtypes) and (not style.Get('ALLOW_MULTILINE_DICTIONARY_KEYS')):\n return False\n if not must_split and subtypes.DICTIONARY_VALUE in current.subtypes and (not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')):\n return False\n if previous and previous.value == '(' and (current.value == ')'):\n token = previous.previous_token\n while token:\n prev = token.previous_token\n if not prev or prev.name not in {'NAME', 'DOT'}:\n break\n token = token.previous_token\n if token and subtypes.DICTIONARY_VALUE in token.subtypes:\n if not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE'):\n return False\n if previous and previous.value == '.' and (current.value == '.'):\n return False\n return current.can_break_before", "docstring": "Determine if we can split before the next token.\n\nArgs:\n must_split: (bool) A newline was required before this token.\n\nReturns:\n True if the line can be split before the next token.", "source": "github_repos"} -{"code": "def to_code(entity, recursive=True, experimental_optional_features=None):\n source = tf_inspect.getsource(to_graph(entity, recursive=recursive, experimental_optional_features=experimental_optional_features))\n return textwrap.dedent(source)", "docstring": "Returns the source code generated by AutoGraph, as a string.\n\n Example usage:\n\n >>> def f(x):\n ... if x < 0:\n ... x = -x\n ... return x\n >>> tf.autograph.to_code(f)\n \"...def tf__f(x):...\"\n\n Also see: `tf.autograph.to_graph`.\n\n Note: If a function has been decorated with `tf.function`, pass its\n underlying Python function, rather than the callable that `tf.function\n creates:\n\n >>> @tf.function\n ... def f(x):\n ... if x < 0:\n ... x = -x\n ... return x\n >>> tf.autograph.to_code(f.python_function)\n \"...def tf__f(x):...\"\n\nArgs:\n entity: Python callable or class to convert.\n recursive: Whether to recursively convert any functions that the converted\n function may call.\n experimental_optional_features: `None`, a tuple of, or a single\n `tf.autograph.experimental.Feature` value.\n\nReturns:\n The converted code as string.", "source": "github_repos"} -{"code": "def year_month_day_to_ordinal(year, month, day):\n with tf.compat.v1.name_scope(None, 'ymd2o', [year, month, day]):\n year = tf.convert_to_tensor(year, tf.int32, name='year')\n month = tf.convert_to_tensor(month, tf.int32, name='month')\n day = tf.convert_to_tensor(day, tf.int32, name='day')\n year -= tf.compat.v2.where(month <= 2, 1, 0)\n month += tf.compat.v2.where(month > 2, -3, 9)\n era = year // _YEARS_IN_ERA\n year_of_era = year % _YEARS_IN_ERA\n day_of_year = _days_in_year_before_month(month) + day - 1\n day_of_era = year_of_era * _DAYS_IN_YEAR + year_of_era // 4 - year_of_era // 100 + day_of_year\n return era * _DAYS_IN_ERA + day_of_era + _ORDINAL_OF_1_3_0000", "docstring": "Calculates ordinals Tensor given years, months and dates.\n\nArgs:\n year: Tensor of int32 type. Elements should be positive.\n month: Tensor of int32 type of same shape as `year`. Elements should be in\n range `[1, 12]`.\n day: Tensor of int32 type of same shape as `year`. Elements should be in\n range `[1, 31]` and represent valid dates together with corresponding\n elements of `month` and `year` Tensors.\n\nReturns:\n Tensor of int32 type. Each element is number of days since 1 Jan 0001. 1 Jan\n 0001 has `ordinal = 1`.", "source": "github_repos"} -{"code": "def smart_constant_value(pred):\n if isinstance(pred, tensor.Tensor):\n pred_value = tensor_util.constant_value(pred)\n if pred_value is None:\n pred_value = tensor_util.try_evaluate_constant(pred)\n elif pred in {0, 1}:\n pred_value = bool(pred)\n elif isinstance(pred, bool):\n pred_value = pred\n else:\n raise TypeError(f'Argument `pred` must be a Tensor, or a Python bool, or 1 or 0. Received: pred={pred} of type {type(pred).__name__}')\n return pred_value", "docstring": "Return the bool value for `pred`, or None if `pred` had a dynamic value.\n\nArgs:\n pred: A scalar, either a Python bool or tensor.\n\nReturns:\n True or False if `pred` has a constant boolean value, None otherwise.\n\nRaises:\n TypeError: If `pred` is not a Tensor or bool.", "source": "github_repos"} -{"code": "def FormatFiles(filenames, lines, style_config=None, no_local_style=False, in_place=False, print_diff=False, parallel=False, quiet=False, verbose=False, print_modified=False):\n changed = False\n if parallel:\n import concurrent.futures\n import multiprocessing\n workers = min(multiprocessing.cpu_count(), len(filenames))\n with concurrent.futures.ProcessPoolExecutor(workers) as executor:\n future_formats = [executor.submit(_FormatFile, filename, lines, style_config, no_local_style, in_place, print_diff, quiet, verbose, print_modified) for filename in filenames]\n for future in concurrent.futures.as_completed(future_formats):\n changed |= future.result()\n else:\n for filename in filenames:\n changed |= _FormatFile(filename, lines, style_config, no_local_style, in_place, print_diff, quiet, verbose, print_modified)\n return changed", "docstring": "Format a list of files.\n\nArgs:\n filenames: (list of unicode) A list of files to reformat.\n lines: (list of tuples of integers) A list of tuples of lines, [start, end],\n that we want to format. The lines are 1-based indexed. This argument\n overrides the 'args.lines'. It can be used by third-party code (e.g.,\n IDEs) when reformatting a snippet of code.\n style_config: (string) Style name or file path.\n no_local_style: (string) If style_config is None don't search for\n directory-local style configuration.\n in_place: (bool) Modify the files in place.\n print_diff: (bool) Instead of returning the reformatted source, return a\n diff that turns the formatted source into reformatter source.\n parallel: (bool) True if should format multiple files in parallel.\n quiet: (bool) True if should output nothing.\n verbose: (bool) True if should print out filenames while processing.\n print_modified: (bool) True if should print out filenames of modified files.\n\nReturns:\n True if the source code changed in any of the files being formatted.", "source": "github_repos"} -{"code": "def save_randomly_initialized_version(config_name: str, save_dir: str, **config_kwargs):\n cfg = AutoConfig.from_pretrained(config_name, **config_kwargs)\n model = AutoModelForSeq2SeqLM.from_config(cfg)\n model.save_pretrained(save_dir)\n AutoTokenizer.from_pretrained(config_name).save_pretrained(save_dir)\n return model", "docstring": "Save a randomly initialized version of a model using a pretrained config.\n\nArgs:\n config_name: which config to use\n save_dir: where to save the resulting model and tokenizer\n config_kwargs: Passed to AutoConfig\n\n Usage::\n save_randomly_initialized_version(\"facebook/bart-large-cnn\", \"distilbart_random_cnn_6_3\", encoder_layers=6, decoder_layers=3, num_beams=3)", "source": "github_repos"} -{"code": "def get_prompt_embeddings(self, input_points: tf.Tensor | None=None, input_labels: tf.Tensor | None=None, input_boxes: tf.Tensor | None=None, input_masks: tf.Tensor | None=None):\n prompt_output = self.prompt_encoder(input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks)\n return prompt_output", "docstring": "Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder.\n\nArgs:\n input_points (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`):\n Optional input points for the prompt encoder. The padding of the point is automatically done by the\n processor. `point_batch_size` refers to the number of masks that we want the model to predict per\n point. The model will output `point_batch_size` times 3 masks in total.\n input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image)`):\n Optional input labels for the prompt encoder. The padding of the labels is automatically done by the\n processor, or can be fed by the user.\n input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes_per_image, 4)`):\n Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the\n processor. users can also pass manually the input boxes.\n input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`):\n Optional input masks for the prompt encoder.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, ClapTextModelOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n pooled_output = text_outputs[1] if not return_dict else text_outputs.pooler_output\n text_embeds = self.text_projection(pooled_output)\n if not return_dict:\n outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]\n return tuple((output for output in outputs if output is not None))\n return ClapTextModelOutput(text_embeds=text_embeds, last_hidden_state=text_outputs.last_hidden_state, hidden_states=text_outputs.hidden_states, attentions=text_outputs.attentions)", "docstring": "Examples:\n\n ```python\n >>> from transformers import AutoTokenizer, ClapTextModelWithProjection\n\n >>> model = ClapTextModelWithProjection.from_pretrained(\"laion/clap-htsat-unfused\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"laion/clap-htsat-unfused\")\n\n >>> inputs = tokenizer([\"a sound of a cat\", \"a sound of a dog\"], padding=True, return_tensors=\"pt\")\n\n >>> outputs = model(**inputs)\n >>> text_embeds = outputs.text_embeds\n ```", "source": "github_repos"} -{"code": "def For(start, limit, delta, inputs, body, name=None, hostmem=None, rewrite_with_while=None):\n if rewrite_with_while:\n return _ForUsingWhile(start, limit, delta, inputs, body, name, hostmem)\n if body.captured_inputs:\n ret = gen_functional_ops._for(start, limit, delta, inputs + body.captured_inputs, _LoopBodyCaptureWrapper(body), name=name)\n ret = ret[:-len(body.captured_inputs)]\n else:\n ret = gen_functional_ops._for(start, limit, delta, inputs, body, name=name)\n if hostmem:\n num_for_params = 3\n input_attr = attr_value_pb2.AttrValue()\n input_attr.list.i.extend([num_for_params + i for i in hostmem])\n ret[0].op._set_attr('_input_hostmem', input_attr)\n output_attr = attr_value_pb2.AttrValue()\n output_attr.list.i.extend(hostmem)\n ret[0].op._set_attr('_output_hostmem', output_attr)\n return ret", "docstring": "out = input; for i in range(start, limit, delta) out = body(i, out).\n\nArgs:\n start: A `Tensor` of type `int32`.\n limit: A `Tensor` of type `int32`.\n delta: A `Tensor` of type `int32`.\n inputs: A list of `Tensor` objects. A list of input tensors whose types are\n T.\n body: A function takes a list of tensors and returns another list of\n tensors. Both lists have the same types as (int32, T...).\n name: A name for the operation (optional).\n hostmem: A list of integer. If i is in the list, inputs[i] is a host memory\n tensor. In other words, (i+1)-th argument of the body function is\n expecting a host memory.\n rewrite_with_while: If True, using While op to implement the For.\n\nReturns:\n A list of `Tensor` objects. Has the same type as `input`.\n A list of output tensors whose types are T.", "source": "github_repos"} -{"code": "def extract_sequences(x, sequence_length, sequence_stride):\n if any_symbolic_tensors((x,)):\n return ExtractSequences(sequence_length, sequence_stride).symbolic_call(x)\n return backend.math.extract_sequences(x, sequence_length, sequence_stride)", "docstring": "Expands the dimension of last axis into sequences of `sequence_length`.\n\n Slides a window of size `sequence_length` over the last axis of the input\n with a stride of `sequence_stride`, replacing the last axis with\n `[num_sequences, sequence_length]` sequences.\n\n If the dimension along the last axis is N, the number of sequences can be\n computed by:\n\n `num_sequences = 1 + (N - sequence_length) // sequence_stride`\n\nArgs:\n x: Input tensor.\n sequence_length: An integer representing the sequences length.\n sequence_stride: An integer representing the sequences hop size.\n\nReturns:\n A tensor of sequences with shape [..., num_sequences, sequence_length].\n\nExample:\n >>> x = keras.ops.convert_to_tensor([1, 2, 3, 4, 5, 6])\n >>> extract_sequences(x, 3, 2)\n array([[1, 2, 3],\n [3, 4, 5]])", "source": "github_repos"} -{"code": "def set_callback_parameters(callback_list, model, do_validation=False, batch_size=None, epochs=None, steps_per_epoch=None, samples=None, verbose=1, mode=ModeKeys.TRAIN):\n metric_names = model.metrics_names\n for cbk in callback_list:\n if isinstance(cbk, (BaseLogger, ProgbarLogger)):\n cbk.stateful_metrics = metric_names[1:]\n callback_metrics = []\n if mode != ModeKeys.PREDICT:\n callback_metrics = copy.copy(metric_names)\n if do_validation:\n callback_metrics += ['val_' + n for n in metric_names]\n callback_params = {'batch_size': batch_size, 'epochs': epochs, 'steps': steps_per_epoch, 'samples': samples, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics}\n callback_list.set_params(callback_params)", "docstring": "Sets callback parameters.\n\nArgs:\n callback_list: CallbackList instance.\n model: Model being trained.\n do_validation: Whether or not validation loop will be run.\n batch_size: Number of samples per batch.\n epochs: Number of epoch to train.\n steps_per_epoch: Number of batches to run per training epoch.\n samples: Number of training samples.\n verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.\n mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.\n Which loop mode to configure callbacks for.", "source": "github_repos"} -{"code": "def remove_hallucinated_references(self, text: str) -> str:\n lines = text.split('\\n')\n if len(lines) == 0:\n return ''\n clean_lines = remove_numbers(lines)\n slices = get_slices(lines, clean_lines)\n to_delete = []\n for slice in slices:\n to_delete.append(remove_slice_from_lines(lines, clean_lines, slice))\n for to_delete in reversed(to_delete):\n text = text.replace(to_delete, '\\n\\n[MISSING_PAGE_POST]\\n\\n')\n text = re.sub('## References\\\\n+\\\\[MISSING_PAGE_POST(:\\\\d+)?\\\\]', '\\n\\n[MISSING_PAGE_POST\\\\1]', text)\n return text", "docstring": "Remove hallucinated or missing references from the text.\n\n This function identifies and removes references that are marked as missing or hallucinated from the input text.\n\nArgs:\n text (`str`):\n The input text containing references.\n\nReturns:\n `str`: The text with hallucinated references removed.", "source": "github_repos"} -{"code": "def tpu_core_locations_to_ids(self, tpu_core_locations):\n return _pywrap_dtensor_device.TPUCoreLocationsToIDs(context.context()._handle, self._device_info, tpu_core_locations)", "docstring": "Translates TPU core locations to TPU core IDs.\n\nArgs:\n tpu_core_locations: A list of TPU core locations. Each one is a list of\n four unsigned integers, [x, y, z, core].\n\nReturns:\n A list of corresponding TPU core IDs.", "source": "github_repos"} -{"code": "def add_debug_tensor_watch(run_options, node_name, output_slot=0, debug_ops='DebugIdentity', debug_urls=None, tolerate_debug_op_creation_failures=False, global_step=-1):\n watch_opts = run_options.debug_options.debug_tensor_watch_opts\n run_options.debug_options.global_step = global_step\n watch = watch_opts.add()\n watch.tolerate_debug_op_creation_failures = tolerate_debug_op_creation_failures\n watch.node_name = node_name\n watch.output_slot = output_slot\n if isinstance(debug_ops, str):\n debug_ops = [debug_ops]\n watch.debug_ops.extend(debug_ops)\n if debug_urls:\n if isinstance(debug_urls, str):\n debug_urls = [debug_urls]\n watch.debug_urls.extend(debug_urls)", "docstring": "Add watch on a `Tensor` to `RunOptions`.\n\n N.B.:\n 1. Under certain circumstances, the `Tensor` may not get actually watched\n (e.g., if the node of the `Tensor` is constant-folded during runtime).\n 2. For debugging purposes, the `parallel_iteration` attribute of all\n `tf.while_loop`s in the graph are set to 1 to prevent any node from\n being executed multiple times concurrently. This change does not affect\n subsequent non-debugged runs of the same `tf.while_loop`s.\n\nArgs:\n run_options: An instance of `config_pb2.RunOptions` to be modified.\n node_name: (`str`) name of the node to watch.\n output_slot: (`int`) output slot index of the tensor from the watched node.\n debug_ops: (`str` or `list` of `str`) name(s) of the debug op(s). Can be a\n `list` of `str` or a single `str`. The latter case is equivalent to a\n `list` of `str` with only one element.\n For debug op types with customizable attributes, each debug op string can\n optionally contain a list of attribute names, in the syntax of:\n debug_op_name(attr_name_1=attr_value_1;attr_name_2=attr_value_2;...)\n debug_urls: (`str` or `list` of `str`) URL(s) to send debug values to,\n e.g., `file:///tmp/tfdbg_dump_1`, `grpc://localhost:12345`.\n tolerate_debug_op_creation_failures: (`bool`) Whether to tolerate debug op\n creation failures by not throwing exceptions.\n global_step: (`int`) Optional global_step count for this debug tensor\n watch.", "source": "github_repos"} -{"code": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n raise NotImplementedError", "docstring": "Subtracts `tf.IndexedSlices` from this variable.\n\nArgs:\n sparse_delta: `tf.IndexedSlices` to be subtracted from this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\nReturns:\n The updated variable.\n\nRaises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github_repos"} -{"code": "def _preprocess_token_ids(self, token_ids, skip_special_tokens: bool=False):\n if skip_special_tokens:\n prompt_token_id = self.convert_tokens_to_ids('<|startofprev|>')\n decoder_start_token_id = self.convert_tokens_to_ids('<|startoftranscript|>')\n token_ids = self._strip_prompt(token_ids, prompt_token_id, decoder_start_token_id)\n return token_ids", "docstring": "Pre-process the token ids for decoding by removing the prompt tokens ids and timestamp token ids.\n\nArgs:\n token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\n List of tokenized input ids. Typically, obtained using the `__call__` method of the tokenizer.\n skip_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to remove special tokens from the token ids. If `True`, the prompt token ids will be\n removed.", "source": "github_repos"} -{"code": "def recipe_sheets_copy(config, auth_read, from_sheet, from_tab, to_sheet, to_tab):\n sheets(config, {'auth': auth_read, 'template': {'sheet': from_sheet, 'tab': from_tab}, 'sheet': to_sheet, 'tab': to_tab})", "docstring": "Copy tab from a sheet to a sheet.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n from_sheet (string) - NA\n from_tab (string) - NA\n to_sheet (string) - NA\n to_tab (string) - NA", "source": "github_repos"} -{"code": "def __init__(self, conf, map_name, automount_mountpoint=None):\n super(Cache, self).__init__()\n self.log = logging.getLogger(__name__)\n self.conf = conf\n self.output_dir = conf.get('dir', '.')\n self.automount_mountpoint = automount_mountpoint\n self.map_name = map_name\n if map_name == config.MAP_PASSWORD:\n self.data = passwd.PasswdMap()\n elif map_name == config.MAP_SSHKEY:\n self.data = sshkey.SshkeyMap()\n elif map_name == config.MAP_GROUP:\n self.data = group.GroupMap()\n elif map_name == config.MAP_SHADOW:\n self.data = shadow.ShadowMap()\n elif map_name == config.MAP_NETGROUP:\n self.data = netgroup.NetgroupMap()\n elif map_name == config.MAP_AUTOMOUNT:\n self.data = automount.AutomountMap()\n else:\n raise error.UnsupportedMap('Cache does not support %s' % map_name)", "docstring": "Initialise the Cache object.\n\nArgs:\n conf: A dictionary of key/value pairs\n map_name: A string representation of the map type\n automount_mountpoint: A string containing the automount mountpoint,\n used only by automount maps.\n\nRaises:\n UnsupportedMap: for map types we don't know about", "source": "github_repos"} -{"code": "def get(self):\n return self._get_helper(self._sorted_items, self._q)", "docstring": "Returns the current quantile value using the sorted list.\n\n Calculates the quantile using linear interpolation on the sorted values.\n\nReturns:\n float: The calculated quantile value. Returns NaN if the window is empty.", "source": "github_repos"} -{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is None:\n return [1] + [0] * len(token_ids_0)\n return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1)", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github_repos"} -{"code": "def keyword_args_only(func):\n decorator_utils.validate_callable(func, 'keyword_args_only')\n\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n \"\"\"Keyword args only wrapper.\"\"\"\n if args:\n raise ValueError(f'The function {func.__name__} only accepts keyword arguments. Do not pass positional arguments. Received the following positional arguments: {args}')\n return func(**kwargs)\n return new_func", "docstring": "Decorator for marking specific function accepting keyword args only.\n\n This decorator raises a `ValueError` if the input `func` is called with any\n non-keyword args. This prevents the caller from providing the arguments in\n wrong order.\n\nArgs:\n func: The function or method needed to be decorated.\n\nReturns:\n Decorated function or method.\n\nRaises:\n ValueError: If `func` is not callable.", "source": "github_repos"} -{"code": "def get_operation_by_name(self, name) -> 'Operation':\n if not isinstance(name, str):\n raise TypeError('Operation names are strings (or similar), not %s.' % type(name).__name__)\n op = cast(Operation, self.as_graph_element(name, allow_tensor=False, allow_operation=True))\n return op", "docstring": "Returns the `Operation` with the given `name`.\n\n This method may be called concurrently from multiple threads.\n\nArgs:\n name: The name of the `Operation` to return.\n\nReturns:\n The `Operation` with the given `name`.\n\nRaises:\n TypeError: If `name` is not a string.\n KeyError: If `name` does not correspond to an operation in this graph.", "source": "github_repos"} -{"code": "def visit_boolean_op(self, boolean_logic: _evaluation.BooleanOperatorNode) -> _sql_data_types.Select:\n lhs_result = self.visit(boolean_logic.left)\n rhs_result = self.visit(boolean_logic.right)\n if lhs_result.sql_data_type != _sql_data_types.Boolean:\n lhs_result = lhs_result.is_not_null()\n if rhs_result.sql_data_type != _sql_data_types.Boolean:\n rhs_result = rhs_result.is_not_null()\n lhs_subquery = lhs_result.as_operand()\n rhs_subquery = rhs_result.as_operand()\n if boolean_logic.op == _ast.BooleanLogic.Op.IMPLIES:\n sql_value = f'NOT {lhs_subquery} OR {rhs_subquery}'\n elif boolean_logic.op == _ast.BooleanLogic.Op.XOR:\n sql_value = f'{lhs_subquery} <> {rhs_subquery}'\n else:\n sql_value = f'{lhs_subquery} {boolean_logic.op.upper()} {rhs_subquery}'\n return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=_sql_data_types.Boolean, _sql_alias='logic_'), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Translates a FHIRPath Boolean logic operation to Spark SQL.\n\n Note that evaluation for Boolean logic is only supported for Boolean\n operands of scalar cardinality.\n\nArgs:\n boolean_logic: The FHIRPath AST `BooleanLogic` node.\n\nReturns:\n A compiled Spark SQL expression.", "source": "github_repos"} -{"code": "def compute_output_signature(self, input_signature):\n\n def check_type_return_shape(s):\n if not isinstance(s, tensor.TensorSpec):\n raise TypeError('Only TensorSpec signature types are supported, but saw signature entry: {}.'.format(s))\n return s.shape\n input_shape = nest.map_structure(check_type_return_shape, input_signature)\n output_shape = self.compute_output_shape(input_shape)\n dtype = self._compute_dtype\n if dtype is None:\n input_dtypes = [s.dtype for s in nest.flatten(input_signature)]\n dtype = input_dtypes[0]\n return nest.map_structure(lambda s: tensor.TensorSpec(dtype=dtype, shape=s), output_shape)", "docstring": "Compute the output tensor signature of the layer based on the inputs.\n\n Unlike a TensorShape object, a TensorSpec object contains both shape\n and dtype information for a tensor. This method allows layers to provide\n output dtype information if it is different from the input dtype.\n For any layer that doesn't implement this function,\n the framework will fall back to use `compute_output_shape`, and will\n assume that the output dtype matches the input dtype.\n\nArgs:\n input_signature: Single TensorSpec or nested structure of TensorSpec\n objects, describing a candidate input for the layer.\n\nReturns:\n Single TensorSpec or nested structure of TensorSpec objects, describing\n how the layer would transform the provided input.\n\nRaises:\n TypeError: If input_signature contains a non-TensorSpec object.", "source": "github_repos"} -{"code": "def matches(self, applied_ptransform):\n raise NotImplementedError", "docstring": "Determines whether the given AppliedPTransform matches.\n\n Note that the matching will happen *after* Runner API proto translation.\n If matching is done via type checks, to/from_runner_api[_parameter] methods\n must be implemented to preserve the type (and other data) through proto\n serialization.\n\n Consider URN-based translation instead.\n\nArgs:\n applied_ptransform: AppliedPTransform to be matched.\n\nReturns:\n a bool indicating whether the given AppliedPTransform is a match.", "source": "github_repos"} -{"code": "def _GetAxisFromLabel(subscripts, label):\n splits = subscripts.split(ellipsis)\n index = splits[0].find(label)\n if index != -1:\n return index\n if len(splits) < 2:\n return None\n index = splits[1].find(label)\n if index != -1:\n return index - len(splits[1])\n return None", "docstring": "Returns the axis (possibly negative) corresponding to a label.\n\n Returns the axis index of the axis label if it is before an ellipsis (or if\n the ellipsis is not present), and the negative index if it occurs after the\n ellipsis. E.g. index of `b` in `ab...cd`, is `1`, but that of `c` is `-2`.\n\n For multiple occurrences, returns the leftmost one. If not found, returns\n None.\n\nArgs:\n subscripts: A string denoting the einsum subscript (e.g. `ab...cd`)\n label: The single character axis label.", "source": "github_repos"} -{"code": "def _list_profile_sort_key(profile_datum, sort_by):\n if sort_by == SORT_OPS_BY_OP_NAME:\n return profile_datum.node_exec_stats.node_name\n elif sort_by == SORT_OPS_BY_OP_TYPE:\n return profile_datum.op_type\n elif sort_by == SORT_OPS_BY_LINE:\n return profile_datum.file_line_func\n elif sort_by == SORT_OPS_BY_OP_TIME:\n return profile_datum.op_time\n elif sort_by == SORT_OPS_BY_EXEC_TIME:\n return profile_datum.node_exec_stats.all_end_rel_micros\n else:\n return profile_datum.node_exec_stats.all_start_micros", "docstring": "Get a profile_datum property to sort by in list_profile command.\n\nArgs:\n profile_datum: A `ProfileDatum` object.\n sort_by: (string) indicates a value to sort by.\n Must be one of SORT_BY* constants.\n\nReturns:\n profile_datum property to sort by.", "source": "github_repos"} -{"code": "def __init__(self, meshes: List[layout_lib.Mesh], is_async=True, in_flight_nodes_limit=8):\n if any((not isinstance(mesh, layout_lib.Mesh) for mesh in meshes)):\n raise TypeError('Expected a flat list of Mesh objects, got {}'.format(meshes))\n global _next_device_number\n ctx = context.context()\n with _next_device_number_lock:\n self.name = '{}/device:CUSTOM:{}'.format(ctx.host_address_space(), _next_device_number)\n _next_device_number += 1\n device, device_info = _pywrap_dtensor_device.Allocate(self.name, is_async, in_flight_nodes_limit)\n context.register_custom_device(device, self.name, device_info)\n self._device_info = device_info\n self._current_output_layout = None\n self._current_default_mesh = None\n self._meshes = set()\n self._mesh_lock = threading.Lock()\n for mesh in meshes:\n self._register_mesh(mesh)", "docstring": "Create a new DTensorDevice which executes ops on `underlying_device`.\n\nArgs:\n meshes: A list of `Mesh` objects indicating groups of devices to execute\n on. These may also be registered lazily.\n is_async: Indicates whether DTensor operations on this client will return\n immediately (with \"non-ready\" handles) or block until executed. This is\n on by default and is exposed as an option for ease of debugging.\n in_flight_nodes_limit: Indicates the limit of in-flight nodes before\n enqueueing of async operations to DTensorDevice is blocked. This limit\n is per mesh. 0 for no limits from DTensor. Default is 8.", "source": "github_repos"} -{"code": "def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None):\n vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy\n batch_size, frames, channels, height, width = pixel_values.shape\n pixel_values = pixel_values.reshape(batch_size * frames, channels, height, width)\n video_features = self.vision_tower(pixel_values, output_hidden_states=True)\n if isinstance(vision_feature_layer, int):\n selected_video_features = video_features.hidden_states[vision_feature_layer]\n else:\n hs_pool = [video_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n selected_video_features = torch.cat(hs_pool, dim=-1)\n if vision_feature_select_strategy == 'default':\n selected_video_features = selected_video_features[:, 1:]\n elif vision_feature_select_strategy == 'full':\n selected_video_features = selected_video_features\n video_features = self.vision_resampler(selected_video_features)\n video_features = self.multi_modal_projector(video_features)\n video_features = torch.split(video_features, frames, dim=0)\n return video_features", "docstring": "Obtains video last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\n pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)\n The tensors corresponding to the input video.\n vision_feature_layer (`Union[int, List[int]]`, *optiona;*):\n The index of the layer to select the vision feature. If multiple indices are provided,\n the vision feature of the corresponding indices will be concatenated to form the\n vision features.\n vision_feature_select_strategy (`str`, *optional*):\n The feature selection strategy used to select the vision feature from the vision backbone.\n Can be one of `\"default\"` or `\"full\"`\n\nReturns:\n video_features (List[`torch.Tensor`]): List of video feature tensor, each contains all the visual feature of all patches\n and are of shape `(num_videos, video_length, embed_dim)`).", "source": "github_repos"} -{"code": "def cd(path):\n if not path:\n yield\n return\n curdir = path_utils.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(curdir)", "docstring": "Context manager. Change the directory, and restore it afterwards.\n\n Example usage:\n with cd(\"/path\"):\n ...\n\nArgs:\n path: The directory to change to. If empty, this function is a no-op.\n\nYields:\n Executes your code, in a changed directory.", "source": "github_repos"} -{"code": "def _MakeParseFn(fn, metadata):\n fn_spec = inspectutils.GetFullArgSpec(fn)\n num_required_args = len(fn_spec.args) - len(fn_spec.defaults)\n required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults)\n\n def _ParseFn(args):\n \"\"\"Parses the list of `args` into (varargs, kwargs), remaining_args.\"\"\"\n kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs(args, fn_spec)\n parsed_args, kwargs, remaining_args, capacity = _ParseArgs(fn_spec.args, fn_spec.defaults, num_required_args, kwargs, remaining_args, metadata)\n if fn_spec.varargs or fn_spec.varkw:\n capacity = True\n extra_kw = set(kwargs) - set(fn_spec.kwonlyargs)\n if fn_spec.varkw is None and extra_kw:\n raise FireError('Unexpected kwargs present:', extra_kw)\n missing_kwonly = set(required_kwonly) - set(kwargs)\n if missing_kwonly:\n raise FireError('Missing required flags:', missing_kwonly)\n if fn_spec.varargs is not None:\n varargs, remaining_args = (remaining_args, [])\n else:\n varargs = []\n for index, value in enumerate(varargs):\n varargs[index] = _ParseValue(value, None, None, metadata)\n varargs = parsed_args + varargs\n remaining_args += remaining_kwargs\n consumed_args = args[:len(args) - len(remaining_args)]\n return ((varargs, kwargs), consumed_args, remaining_args, capacity)\n return _ParseFn", "docstring": "Creates a parse function for fn.\n\nArgs:\n fn: The function or class to create the parse function for.\n metadata: Additional metadata about the component the parse function is for.\n\nReturns:\n A parse function for fn. The parse function accepts a list of arguments\n and returns (varargs, kwargs), remaining_args. The original function fn\n can then be called with fn(*varargs, **kwargs). The remaining_args are\n the leftover args from the arguments to the parse function.", "source": "github_repos"} -{"code": "def _FindNodeWithStandaloneLineParent(node):\n if pytree_utils.NodeName(node.parent) in _STANDALONE_LINE_NODES:\n return node\n else:\n return _FindNodeWithStandaloneLineParent(node.parent)", "docstring": "Find a node whose parent is a 'standalone line' node.\n\n See the comment above _STANDALONE_LINE_NODES for more details.\n\nArgs:\n node: node to start from\n\nReturns:\n Suitable node that's either the node itself or one of its ancestors.", "source": "github_repos"} -{"code": "def _encode_reference_type_constraints(self, builder: expressions.Builder, elem: message.Message) -> List[validation_pb2.SqlRequirement]:\n field_name = _last_path_token(builder)\n constraint_key = f'{field_name}-resource-type-exclusivity'\n if constraint_key in self._options.skip_keys:\n return []\n element_definition = cast(Any, elem)\n type_codes = _utils.element_type_codes(element_definition)\n if type_codes != ['Reference']:\n return []\n allowed_reference_types = [target_profile.value for target_profile in element_definition.type[0].target_profile]\n if len(allowed_reference_types) <= 1:\n return []\n num_references_exist: expressions.Builder = _num_fields_exist((builder.getReferenceKey(reference_type) for reference_type in sorted(allowed_reference_types)))\n constraint: expressions.Builder = num_references_exist <= 1\n if _fhir_path_data_types.is_collection(builder.return_type):\n constraint: expressions.Builder = builder.all(constraint)\n constraint_sql = self._encode_fhir_path_builder_constraint(constraint, builder.get_parent_builder())\n if constraint_sql is None:\n return []\n reference_type_path = self._abs_path_invocation(builder)\n column_name = f'{_path_to_sql_column_name(reference_type_path)}_{_key_to_sql_column_name(constraint_key)}'\n parent_path = self._abs_path_invocation(builder.get_parent_builder())\n description = f'Reference type {reference_type_path} links to multiple resources or to resources of a type restricted by the profile.'\n return [validation_pb2.SqlRequirement(column_name=column_name, sql_expression=constraint_sql.sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_REFERENCE_TYPE, element_path=parent_path, description=description, fhir_path_key=constraint_key, fhir_path_expression=constraint_sql.builder.fhir_path, fields_referenced_by_expression=[field_name])]", "docstring": "Generates constraints for reference types.\n\n Ensures that a reference type only has a value for one of the resourceId\n columns across each of the possible resources the reference can link.\n\nArgs:\n builder: The builder to the reference type for which to encode\n constraints.\n elem: Element definition of the builder.\n\nReturns:\n A constraint enforcing the above requirements for the given reference\n type.", "source": "github_repos"} -{"code": "def expand(self, pcoll: beam.PCollection[Chunk]) -> beam.PTransform[Chunk, Any]:\n write_transform = self.database_config.create_write_transform()\n return pcoll | write_transform", "docstring": "Creates and applies the database-specific write transform.\n\nArgs:\n pcoll: PCollection of Chunks with embeddings to write to the\n vector database. Each Chunk must have:\n - An embedding\n - An ID\n - Metadata used to filter results as specified by database config\n\nReturns:\n Result of writing to database (implementation specific).", "source": "github_repos"} -{"code": "def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):\n row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n self._dtype = dtype\n self._ragged_rank = ragged_rank\n self._row_splits_dtype = row_splits_dtype", "docstring": "Initializes a RaggedTensorType object.\n\nArgs:\n dtype: data type of the `RaggedTensor`'s inner values.\n ragged_rank: ragged_rank of the declared `RaggedTensor`.\n row_splits_dtype: data type for the `RaggedTensor`'s row splits.\n One of: `tf.int32` or `tf.int64`.", "source": "github_repos"} -{"code": "def emit_pid(self, name: str, pid: int) -> None:\n event = {}\n event['name'] = 'process_name'\n event['ph'] = 'M'\n event['pid'] = pid\n event['args'] = {'name': name}\n self._metadata.append(event)", "docstring": "Adds a process metadata event to the trace.\n\nArgs:\n name: The process name as a string.\n pid: Identifier of the process as an integer.", "source": "github_repos"} -{"code": "def check_missing_deps() -> Iterator[None]:\n try:\n yield\n except ImportError as e:\n reraise_utils.reraise(e, suffix='\\nEach etils sub-modules require deps to be installed separately (e.g. `from etils import ecolab` -> `pip install etils[ecolab]`)')", "docstring": "Raise a better error message in case of `ImportError`.\n\n Usage:\n\n ```python\n from etils.epy import _internal\n\n with _internal.check_missing_deps():\n # pylint: disable=g-import-not-at-top\n import xyz\n # pylint: enable=g-import-not-at-top\n ```\n\nYields:\n None", "source": "github_repos"} -{"code": "def dummy_inputs(self) -> Dict[str, tf.Tensor]:\n dummies = {}\n for key, spec in self.input_signature.items():\n dummy_shape = [dim if dim is not None else 2 for dim in spec.shape]\n if spec.shape[0] is None:\n dummy_shape[0] = 1\n dummies[key] = tf.ones(shape=dummy_shape, dtype=spec.dtype)\n if key == 'token_type_ids':\n dummies[key] = tf.zeros_like(dummies[key])\n if self.config.add_cross_attention and 'encoder_hidden_states' in inspect.signature(self.call).parameters:\n if 'encoder_hidden_states' not in dummies:\n if self.main_input_name == 'input_ids':\n dummies['encoder_hidden_states'] = tf.ones(shape=(1, 2, self.config.hidden_size), dtype=tf.float32, name='encoder_hidden_states')\n else:\n raise NotImplementedError(\"Model has cross-attention but we couldn't infer the shape for the encoder hidden states. Please manually override dummy_inputs!\")\n return dummies", "docstring": "Dummy inputs to build the network.\n\nReturns:\n `Dict[str, tf.Tensor]`: The dummy inputs.", "source": "github_repos"} -{"code": "def create_accumulator(self, *args, **kwargs):\n raise NotImplementedError(str(self))", "docstring": "Return a fresh, empty accumulator for the combine operation.\n\nArgs:\n *args: Additional arguments and side inputs.\n **kwargs: Additional arguments and side inputs.", "source": "github_repos"} -{"code": "def GetMap(self, cache_info, data):\n for obj in json.loads(cache_info.read()):\n key = obj.get('Key', '')\n value = obj.get('Value', '')\n if not value or not key:\n continue\n map_entry = self._ReadEntry(key, value)\n if map_entry is None:\n self.log.warning('Could not create entry from line %r in cache, skipping', value)\n continue\n if not data.Add(map_entry):\n self.log.warning('Could not add entry %r read from line %r in cache', map_entry, value)\n return data", "docstring": "Returns a map from a cache.\n\nArgs:\n cache_info: file like object containing the cache.\n data: a Map to populate.\n\nReturns:\n A child of Map containing the cache data.", "source": "github_repos"} -{"code": "def get_resize_output_image_size(video, resolution_max_side: int) -> tuple[int, int]:\n height, width = video.size()[-2:]\n resolution_max_side = min(MAX_IMAGE_SIZE, resolution_max_side)\n resolution_max_side = max(height, width) if resolution_max_side is None else resolution_max_side\n aspect_ratio = width / height\n if width >= height:\n width = resolution_max_side\n height = int(width / aspect_ratio)\n if height % 2 != 0:\n height += 1\n elif height > width:\n height = resolution_max_side\n width = int(height * aspect_ratio)\n if width % 2 != 0:\n width += 1\n height = max(height, 1)\n width = max(width, 1)\n return (height, width)", "docstring": "Get the output size of the video after resizing given a dictionary specifying the max and min sizes.\n\nArgs:\n video (`np.ndarray`):\n Video to resize.\n resolution_max_side (`int`):\n The longest edge of the video will be resized to this value. The shortest edge will be resized to keep the\n input aspect ratio.\n\nReturns:\n The output size of the video after resizing.", "source": "github_repos"} -{"code": "def get_temp_dir(self) -> str:\n if not self._tempdir:\n self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())\n return self._tempdir", "docstring": "Returns a unique temporary directory for the test to use.\n\n If you call this method multiple times during in a test, it will return the\n same folder. However, across different runs the directories will be\n different. This will ensure that across different runs tests will not be\n able to pollute each others environment.\n If you need multiple unique directories within a single test, you should\n use tempfile.mkdtemp as follows:\n tempfile.mkdtemp(dir=self.get_temp_dir()):\n\nReturns:\n string, the path to the unique temporary directory created for this test.", "source": "github_repos"} -{"code": "def _merge_precomputed_encodings(self, other, validate=True):\n if self is other or (self._row_splits is other._row_splits and self._row_lengths is other._row_lengths and (self._value_rowids is other._value_rowids) and (self._nrows is other._nrows) and (self._nvals is other._nvals) and (self._uniform_row_length is other._uniform_row_length)):\n return self\n nrows, nrows_validated = _merge_tensors(self._nrows, other._nrows, 'nrows', validate)\n nvals, _ = _merge_tensors(self._nvals, other._nvals, 'nvals', validate)\n uniform_row_length, uniform_row_length_validated = _merge_tensors(self._uniform_row_length, other._uniform_row_length, 'uniform_row_length', validate)\n if uniform_row_length_validated and nrows_validated:\n validate = False\n row_splits, row_splits_validated = _merge_tensors(self._row_splits, other._row_splits, 'row_splits', validate)\n if row_splits_validated:\n validate = False\n row_lengths, row_lengths_validated = _merge_tensors(self._row_lengths, other._row_lengths, 'row_lengths', validate)\n if row_lengths_validated:\n validate = False\n value_rowids, value_rowids_validated = _merge_tensors(self._value_rowids, other._value_rowids, 'value_rowids', validate)\n if value_rowids_validated and nrows_validated:\n validate = False\n if row_splits is self._row_splits and row_lengths is self._row_lengths and (value_rowids is self._value_rowids) and (nrows is self._nrows) and (uniform_row_length is self._uniform_row_length):\n return self\n if row_splits is other._row_splits and row_lengths is other._row_lengths and (value_rowids is other._value_rowids) and (nrows is other._nrows) and (uniform_row_length is other._uniform_row_length):\n return other\n return RowPartition(row_splits=row_splits, row_lengths=row_lengths, value_rowids=value_rowids, nrows=nrows, uniform_row_length=uniform_row_length, nvals=nvals, internal=_row_partition_factory_key)", "docstring": "Returns a RowPartition that merges encodings from `self` and `other`.\n\n Requires that `self` and `other` describe the same partition.\n\nArgs:\n other: A `RowPartition` that encodes the same partition as `self`.\n validate: If true, then add runtime checks to verify that `self` and\n `other` encode the same row partition.\n\nReturns:\n A `RowPartition`.", "source": "github_repos"} -{"code": "def report_fetch(config, auth, account, report_id=None, name=None, timeout=60):\n if config.verbose:\n print('DCM REPORT FILE', report_id or name)\n if report_id is None:\n report = report_get(config, auth, account, name=name)\n if report is None:\n raise Exception('Report does not exist:', name)\n else:\n report_id = report['id']\n running = False\n while timeout >= 0:\n for file_json in report_files(config, auth, account, report_id):\n if file_json.get('status') in ('PROCESSING', 'QUEUED', None):\n if config.verbose:\n print('REPORT PROCESSING WILL WAIT')\n running = True\n if timeout > 0:\n break\n elif file_json.get('status') == 'REPORT_AVAILABLE':\n if config.verbose:\n print('REPORT DONE')\n return file_json\n if not running:\n break\n if timeout > 0:\n if config.verbose:\n print('WAITING MINUTES', timeout)\n sleep(60)\n timeout -= 1\n if config.verbose:\n print('NO REPORT FILES')\n return running", "docstring": "Retrieves most recent DCM file JSON by name or ID, if in progress, waits for it to complete.\n\n Bulletproofing:\n https://developers.google.com/doubleclick-advertisers/v3.2/files/get\n\n Timeout is in minutes ( retries will happen at 1 minute interval, default\n total time is 60 minutes )\n\nArgs:\n * auth: (string) Either user or service.\n * account: (string) [account:advertiser@profile] token.\n * report_id: (int) ID of DCm report to fetch ( either or name ).\n * name: (string) Name of report to fetch ( either or report_id ).\n * timeout: (int) Minutes to wait for in progress report before giving up.\n\nReturns:\n * Report JSON if report exists and is ready.\n * True if report is in progress but not ready.\n * False if report does not exist.", "source": "github_repos"} -{"code": "def find_all_build_files(dir: str) -> List[Tuple[str, str]]:\n build_file_dirs = []\n for root, _, files in os.walk(dir):\n for file in files:\n if file in BUILD_FILENAMES:\n root = root.strip('./')\n build_file_dirs.append((root, file))\n return build_file_dirs", "docstring": "List all the BUILD files.\n\nReturns:\n The list of (directory, filename) of all BUILD files.", "source": "github_repos"} -{"code": "def __init__(self, min_batch_size: int, max_batch_size: int, *, inference_fn: TensorRTInferenceFn=_default_tensorRT_inference_fn, large_model: bool=False, model_copies: Optional[int]=None, max_batch_duration_secs: Optional[int]=None, **kwargs):\n self.min_batch_size = min_batch_size\n self.max_batch_size = max_batch_size\n self.max_batch_duration_secs = max_batch_duration_secs\n self.inference_fn = inference_fn\n if 'engine_path' in kwargs:\n self.engine_path = kwargs.get('engine_path')\n elif 'onnx_path' in kwargs:\n self.onnx_path = kwargs.get('onnx_path')\n self._env_vars = kwargs.get('env_vars', {})\n self._share_across_processes = large_model or model_copies is not None\n self._model_copies = model_copies or 1", "docstring": "Implementation of the ModelHandler interface for TensorRT.\n\n Example Usage::\n\n pcoll | RunInference(\n TensorRTEngineHandlerNumPy(\n min_batch_size=1,\n max_batch_size=1,\n engine_path=\"my_uri\"))\n\n **NOTE:** This API and its implementation are under development and\n do not provide backward compatibility guarantees.\n\nArgs:\n min_batch_size: minimum accepted batch size.\n max_batch_size: maximum accepted batch size.\n inference_fn: the inference function to use on RunInference calls.\n default: _default_tensorRT_inference_fn\n large_model: set to true if your model is large enough to run into\n memory pressure if you load multiple copies. Given a model that\n consumes N memory and a machine with W cores and M memory, you should\n set this to True if N*W > M.\n model_copies: The exact number of models that you would like loaded\n onto your machine. This can be useful if you exactly know your CPU or\n GPU capacity and want to maximize resource utilization.\n max_batch_duration_secs: the maximum amount of time to buffer\n a batch before emitting; used in streaming contexts.\n kwargs: Additional arguments like 'engine_path' and 'onnx_path' are\n currently supported. 'env_vars' can be used to set environment variables\n before loading the model.\n\n See https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/\n for details", "source": "github_repos"} -{"code": "def __init__(self, variable_holder=None, **kwargs):\n self._variable_holder = variable_holder or VariableHolder(share_variables=True)\n name = kwargs.pop('name', 'wrapped_function_graph')\n collections = kwargs.pop('collections', {})\n self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)\n self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)\n self._functions = {}", "docstring": "Class for wrapping multiple TF 1.X functions in a single graph.\n\n Maintains a dictionary mapping names to wrapped functions. See\n `tf.compat.v1.wrap_function` to learn more about wrapping V1 functions.\n\n Functions wrapped using this class have access to variables and collections\n created in other wrapped functions, using the standard TF 1.X API (\n `tf.compat.v1.get_variable` or\n `tf.compat.v1.get_default_graph().get_collection(...)`)\n\n Outside a function, variables and collections may be accessed using the\n `variables` and `graph` properties.\n\nExample:\n ```\n def add_v1(x):\n with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):\n v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)\n return v + x\n\n def increment_var_v1(x):\n with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):\n v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)\n return v.assign_add(x)\n\n g = WrappedGraph()\n add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)])\n increment_var = g.wrap_function(increment_var_v1,\n [tf.TensorSpec([], tf.int32)])\n\n assert len(g.variables) == 1\n assert g.variables[0].numpy() == 0\n increment_var(tf.constant(5))\n assert g.variables[0].numpy() == 5\n\n ```", "source": "github_repos"} -{"code": "def remove_padding_from_sc(value_in_checkpoint: tensor.Tensor, variable_shape: tuple[int, int]) -> tensor.Tensor:\n checkpoint_value_shape = value_in_checkpoint.shape.as_list()\n is_init_value_padded = all([i >= j for i, j in zip(checkpoint_value_shape, variable_shape)])\n if not is_init_value_padded:\n return value_in_checkpoint\n begin = [0] * len(checkpoint_value_shape)\n return array_ops.slice(value_in_checkpoint, begin=begin, size=variable_shape)", "docstring": "Removes padding, if any, from sparsecore checkpoint.\n\nArgs:\n value_in_checkpoint: input tensor value, usually from checkpoint.\n variable_shape: Expected shape of tensor after removing padding.\n\nReturns:\n A slice of the input tensor to match the variable_shape if the\n variable shape is a valid slice if the input tensor.", "source": "github_repos"} -{"code": "def __ne__(self: EventSetOrNode, other: Any) -> EventSetOrNode:\n if isinstance(other, self.__class__):\n from temporian.core.operators.binary import not_equal\n return not_equal(input_1=self, input_2=other)\n if isinstance(other, T_SCALAR + (bool, str)):\n from temporian.core.operators.scalar import not_equal_scalar\n return not_equal_scalar(input=self, value=other)\n self._raise_error('ne', other, 'int,float,bool,str')\n assert False", "docstring": "Computes not equal (`self != other`) element-wise with another\n [`EventSet`][temporian.EventSet] or a scalar value.\n\n If an EventSet, each feature in `self` is compared element-wise to\n the feature in `other` in the same position. `self` and `other`\n must have the same sampling and the same number of features.\n\n If a scalar value, each item in each feature in `self` is compared to\n `other`.\n\n Note that it will always return True on NaNs (even if both are).\n\n Example with EventSet:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"f1\": [0, 100, 200]}\n ... )\n >>> b = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"f2\": [-10, 100, 5]},\n ... same_sampling_as=a\n ... )\n\n >>> c = a != b\n >>> c\n indexes: []\n features: [('f1', bool_)]\n events:\n (3 events):\n timestamps: [1. 2. 3.]\n 'f1': [ True False True]\n ...\n\n ```\n\n Example with scalar value:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"f1\": [0, 100, 200], \"f2\": [-10, 100, 5]}\n ... )\n\n >>> b = a != 100\n >>> b\n indexes: []\n features: [('f1', bool_), ('f2', bool_)]\n events:\n (3 events):\n timestamps: [1. 2. 3.]\n 'f1': [ True False True]\n 'f2': [ True False True]\n ...\n\n ```\n\nArgs:\n other: EventSet or scalar value.\n\nReturns:\n Result of the comparison.", "source": "github_repos"} -{"code": "def check_output(expected: List[str]):\n\n def _check_inner(actual: List[PCollection[str]]):\n formatted_actual = actual | beam.Flatten() | beam.Map(lambda row: str(beam.Row(**row._asdict())))\n assert_matches_stdout(formatted_actual, expected)\n return _check_inner", "docstring": "Helper function to check the output of a pipeline against expected values.\n\n This function takes a list of expected output strings and returns a\n callable that can be used within a Beam pipeline to assert that the\n actual output matches the expected output.\n\nArgs:\n expected: A list of strings representing the expected output elements.\n\nReturns:\n A callable that takes a list of PCollections and asserts their combined\n elements match the expected output.", "source": "github_repos"} -{"code": "def self_adjoint_eigvals(tensor, name=None):\n e, _ = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=False, name=name)\n return e", "docstring": "Computes the eigenvalues of one or more self-adjoint matrices.\n\n Note: If your program backpropagates through this function, you should replace\n it with a call to tf.linalg.eigh (possibly ignoring the second output) to\n avoid computing the eigen decomposition twice. This is because the\n eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See\n _SelfAdjointEigV2Grad in linalg_grad.py.\n\nArgs:\n tensor: `Tensor` of shape `[..., N, N]`.\n name: string, optional name of the operation.\n\nReturns:\n e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`\n eigenvalues of `tensor[..., :, :]`.", "source": "github_repos"} -{"code": "def create_source_map(nodes, code, filepath):\n reparsed_nodes = parser.parse(code, preamble_len=0, single_node=False)\n for node in reparsed_nodes:\n resolve(node, code, filepath, node.lineno, node.col_offset)\n source_map = {}\n try:\n for before, after in ast_util.parallel_walk(nodes, reparsed_nodes):\n origin_info = anno.getanno(before, anno.Basic.ORIGIN, default=None)\n final_info = anno.getanno(after, anno.Basic.ORIGIN, default=None)\n if origin_info is None or final_info is None:\n continue\n line_loc = LineLocation(final_info.loc.filename, final_info.loc.lineno)\n existing_origin = source_map.get(line_loc)\n if existing_origin is not None:\n if existing_origin.loc.line_loc == origin_info.loc.line_loc:\n if existing_origin.loc.lineno >= origin_info.loc.lineno:\n continue\n if existing_origin.loc.col_offset <= origin_info.loc.col_offset:\n continue\n source_map[line_loc] = origin_info\n except ValueError as err:\n new_msg = 'Inconsistent ASTs detected. This is a bug. Cause: \\n'\n new_msg += str(err)\n new_msg += 'Diff:\\n'\n for n, rn in zip(nodes, reparsed_nodes):\n nodes_str = pretty_printer.fmt(n, color=False, noanno=True)\n reparsed_nodes_str = pretty_printer.fmt(rn, color=False, noanno=True)\n diff = difflib.context_diff(nodes_str.split('\\n'), reparsed_nodes_str.split('\\n'), fromfile='Original nodes', tofile='Reparsed nodes', n=7)\n diff = '\\n'.join(diff)\n new_msg += diff + '\\n'\n raise ValueError(new_msg)\n return source_map", "docstring": "Creates a source map between an annotated AST and the code it compiles to.\n\n Note: this function assumes nodes nodes, code and filepath correspond to the\n same code.\n\nArgs:\n nodes: Iterable[ast.AST, ...], one or more AST modes.\n code: Text, the source code in which nodes are found.\n filepath: Text\n\nReturns:\n Dict[LineLocation, OriginInfo], mapping locations in code to locations\n indicated by origin annotations in node.", "source": "github_repos"} -{"code": "def set_global_step(self, new_global_step, name=None):\n return gen_data_flow_ops.accumulator_set_global_step(self._accumulator_ref, math_ops.cast(ops.convert_to_tensor(new_global_step), _dtypes.int64), name=name)", "docstring": "Sets the global time step of the accumulator.\n\n The operation logs a warning if we attempt to set to a time step that is\n lower than the accumulator's own time step.\n\nArgs:\n new_global_step: Value of new time step. Can be a variable or a constant\n name: Optional name for the operation.\n\nReturns:\n Operation that sets the accumulator's time step.", "source": "github_repos"} -{"code": "def _constant_value(ragged_factory, inner_factory, pylist, dtype, ragged_rank, inner_shape):\n if ragged_tensor.is_ragged(pylist):\n raise TypeError('pylist may not be a RaggedTensor or RaggedTensorValue.')\n if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0:\n if ragged_rank is not None and ragged_rank != 0:\n raise ValueError('Invalid pylist=%r: incompatible with ragged_rank=%d' % (pylist, ragged_rank))\n if inner_shape is not None and inner_shape:\n raise ValueError('Invalid pylist=%r: incompatible with dim(inner_shape)=%d' % (pylist, len(inner_shape)))\n return inner_factory(pylist, dtype, ())\n if ragged_rank is not None and ragged_rank < 0:\n raise ValueError('Invalid ragged_rank=%r: must be nonnegative' % ragged_rank)\n scalar_depth, max_depth = _find_scalar_and_max_depth(pylist)\n if scalar_depth is not None:\n if max_depth > scalar_depth:\n raise ValueError('Invalid pylist=%r: empty list nesting is greater than scalar value nesting' % pylist)\n if ragged_rank is not None and max_depth < ragged_rank:\n raise ValueError(f'Invalid pylist={pylist}, max depth smaller than ragged_rank={ragged_rank}')\n if inner_shape is not None and ragged_rank is not None:\n expected_depth = ragged_rank + len(inner_shape) + 1\n if scalar_depth is not None and expected_depth != scalar_depth or (scalar_depth is None and expected_depth < max_depth):\n raise ValueError('Invalid pylist=%r: incompatible with ragged_rank=%d and dim(inner_shape)=%d' % (pylist, ragged_rank, len(inner_shape)))\n if ragged_rank == 0 or (ragged_rank is None and (max_depth < 2 or (inner_shape is not None and max_depth - len(inner_shape) < 2))):\n return inner_factory(pylist, dtype, inner_shape)\n if inner_shape is None:\n if ragged_rank is None:\n inner_shape = ()\n else:\n inner_shape = _default_inner_shape_for_pylist(pylist, ragged_rank)\n if ragged_rank is None:\n if scalar_depth is None:\n ragged_rank = max(1, max_depth - 1)\n else:\n ragged_rank = max(1, scalar_depth - 1 - len(inner_shape))\n nested_splits = []\n values = pylist\n for dim in range(ragged_rank):\n nested_splits.append([0])\n concatenated_values = []\n for row in values:\n nested_splits[dim].append(nested_splits[dim][-1] + len(row))\n concatenated_values.extend(row)\n values = concatenated_values\n values = inner_factory(values, dtype=dtype, shape=(len(values),) + inner_shape, name='values')\n for row_splits in reversed(nested_splits):\n values = ragged_factory(values, row_splits)\n return values", "docstring": "Constructs a constant RaggedTensor or RaggedTensorValue.\n\nArgs:\n ragged_factory: A factory function with the signature:\n `ragged_factory(values, row_splits)`\n inner_factory: A factory function with the signature: `inner_factory(pylist,\n dtype, shape, name)`\n pylist: A nested `list`, `tuple` or `np.ndarray`.\n dtype: Data type for returned value.\n ragged_rank: Ragged rank for returned value.\n inner_shape: Inner value shape for returned value.\n\nReturns:\n A value returned by `ragged_factory` or `inner_factory`.\n\nRaises:\n ValueError: If the scalar values in `pylist` have inconsistent nesting\n depth; or if ragged_rank or inner_shape are incompatible with `pylist`.", "source": "github_repos"} -{"code": "def _estimate_initial_position(log_moneyness, total_variance):\n dtype = total_variance.dtype\n minvol_index = tf.argmin(total_variance, axis=1)\n m = tf.gather(log_moneyness, minvol_index, axis=1, batch_dims=1)\n sigma = 0.5 * tf.ones_like(minvol_index, dtype=dtype)\n rho = tf.zeros_like(minvol_index, dtype=dtype)\n y = total_variance\n x = tf.sqrt((log_moneyness - m[:, None]) ** 2 + sigma[:, None] ** 2)\n e_x = tf.math.reduce_mean(x, axis=1)\n e_y = tf.math.reduce_mean(y, axis=1)\n e_xy = tf.math.reduce_mean(x * y, axis=1)\n var_x = tf.math.reduce_variance(x, axis=1)\n b = (e_xy - e_x * e_y) / var_x\n a = e_y - b * e_x\n initial_position = tf.transpose([a, b, rho, m, sigma])\n return initial_position", "docstring": "Provides a heuristic initial guess for the SVI parameters.\n\n The values for `rho` and `sigma` are predetermined.\n `rho = 0` enforces the symmetry of the initial skew.\n `sigma = 0.5` enforces certain smoothness at the vertex of the skew.\n\n The value for `m` estimated as the position of the vertex of the skew:\n `m` is the log-moneyness corresponding to the smallest input variance.\n\n The values for `a`, `b` are computed using a simple linear regression, using\n the input data and the above estimates for `rho`, `sigma` and `m`.\n\nArgs:\n log_moneyness: A rank 2 real `Tensor` of shape [batch_size, num_strikes].\n The log-moneyness `k := log(K/F)` of the options.\n total_variance: A rank 2 real `Tensor` of shape [batch_size, num_strikes].\n The target total variance to be approximated by the SVI model.\n\nReturns:\n A rank 2 real `Tensor` of shape [batch_size, 5], representing an initial\n guess for the SVI parameter optimization.", "source": "github_repos"} -{"code": "def _format_data_list_with_options(self, data_list):\n if self._options and self._options.experimental_replication_mode == InputReplicationMode.PER_REPLICA and (not self._options.experimental_fetch_to_device):\n return [data_list]\n else:\n return data_list", "docstring": "Change the data in to a list type if required.\n\n The OwnedMultiDeviceIterator returns the list data type,\n while the PER_REPLICA iterator (when used with prefetch disabled)\n returns without the enclosed list. This is to fix the inconsistency.\n\nArgs:\n data_list: data_list\n\nReturns:\n list", "source": "github_repos"} -{"code": "def build_relative_position(query_size, key_size):\n q_ids = tf.range(query_size, dtype=tf.int32)\n k_ids = tf.range(key_size, dtype=tf.int32)\n rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1])\n rel_pos_ids = rel_pos_ids[:query_size, :]\n rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)\n return tf.cast(rel_pos_ids, tf.int64)", "docstring": "Build relative position according to the query and key\n\n We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key\n \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -\n P_k\\)\n\nArgs:\n query_size (int): the length of query\n key_size (int): the length of key\n\nReturns:\n `tf.Tensor`: A tensor with shape [1, query_size, key_size]", "source": "github_repos"} -{"code": "def fram_wave(waveform: np.array, hop_length: int=160, fft_window_size: int=400, center: bool=True):\n warnings.warn('The function `fram_wave` is deprecated and will be removed in version 4.31.0 of Transformers', FutureWarning)\n frames = []\n for i in range(0, waveform.shape[0] + 1, hop_length):\n if center:\n half_window = (fft_window_size - 1) // 2 + 1\n start = i - half_window if i > half_window else 0\n end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0]\n frame = waveform[start:end]\n if start == 0:\n padd_width = (-i + half_window, 0)\n frame = np.pad(frame, pad_width=padd_width, mode='reflect')\n elif end == waveform.shape[0]:\n padd_width = (0, i - waveform.shape[0] + half_window)\n frame = np.pad(frame, pad_width=padd_width, mode='reflect')\n else:\n frame = waveform[i:i + fft_window_size]\n frame_width = frame.shape[0]\n if frame_width < waveform.shape[0]:\n frame = np.lib.pad(frame, pad_width=(0, fft_window_size - frame_width), mode='constant', constant_values=0)\n frames.append(frame)\n frames = np.stack(frames, 0)\n return frames", "docstring": "In order to compute the short time fourier transform, the waveform needs to be split in overlapping windowed\n segments called `frames`.\n\n The window length (window_length) defines how much of the signal is contained in each frame, while the hop length\n defines the step between the beginning of each new frame.\n\nArgs:\n waveform (`np.array` of shape `(sample_length,)`):\n The raw waveform which will be split into smaller chunks.\n hop_length (`int`, *optional*, defaults to 160):\n Step between each window of the waveform.\n fft_window_size (`int`, *optional*, defaults to 400):\n Defines the size of the window.\n center (`bool`, defaults to `True`):\n Whether or not to center each frame around the middle of the frame. Centering is done by reflecting the\n waveform on the left and on the right.\n\nReturns:\n framed_waveform (`np.array` of shape `(waveform.shape // hop_length , fft_window_size)`):\n The framed waveforms that can be fed to `np.fft`.", "source": "github_repos"} -{"code": "def __init__(self, func, argnames, func_name=None, grad_func=None, python_grad_func=None, out_names=None, **kwargs):\n self._func = func\n self._argnames = argnames\n self._func_name = func_name\n assert grad_func is None or isinstance(grad_func, _OverloadedFunction)\n self._grad_func = grad_func\n self._python_grad_func = python_grad_func\n self._out_names = out_names\n self._extra_kwargs = kwargs\n self._overload = {}", "docstring": "Creates _DefinedFunction.\n\nArgs:\n func: A python callable which constructs a tf function body.\n argnames: A list of strings for function argument names.\n func_name: The function name. Defaults to None, in which derives from\n 'func'.\n grad_func: This function's gradient function, if not None. Defaults\n to None.\n python_grad_func: A python callable implementing the gradient of\n the function python-side.\n out_names: A list of strings for the function return value names.\n **kwargs: The keyword arguments. **kwargs is passed to every call\n site of this function.\n\nRaises:\n ValueError: The function definition is invalid.", "source": "github_repos"} -{"code": "def __init__(self, checkpoint_dir: Text, save_secs: Optional[int]=None, save_steps: Optional[int]=None, saver: Optional[saver_lib.Saver]=None, checkpoint_basename: Text='model.ckpt', scaffold: Optional[monitored_session.Scaffold]=None, listeners: Optional[List[basic_session_run_hooks.CheckpointSaverListener]]=None):\n save_path = os.path.join(checkpoint_dir, checkpoint_basename)\n logging.info('Create AsyncCheckpointSaverHook saving to path\\n%s', save_path)\n if listeners:\n logging.info(' with %d listener(s).', len(listeners))\n if saver is not None and scaffold is not None:\n raise ValueError('You cannot provide both saver and scaffold.')\n self._saver = saver\n self._save_thread = None\n self._write_graph_thread = None\n self._checkpoint_dir = checkpoint_dir\n self._save_path = save_path\n self._scaffold = scaffold\n self._timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps)\n self._listeners = listeners or []\n self._steps_per_run = 1\n self._summary_writer = None\n self._global_step_tensor = None\n self._last_checkpoint_step = None\n global _END_TIME_OF_LAST_WRITE\n with _END_TIME_OF_LAST_WRITE_LOCK:\n if _END_TIME_OF_LAST_WRITE is None:\n _END_TIME_OF_LAST_WRITE = time.time()", "docstring": "Initializes a `CheckpointSaverHook`.\n\nArgs:\n checkpoint_dir: `str`, base directory for the checkpoint files.\n save_secs: `int`, save every N secs.\n save_steps: `int`, save every N steps.\n saver: `Saver` object, used for saving.\n checkpoint_basename: `str`, base name for the checkpoint files.\n scaffold: `Scaffold`, use to get saver object.\n listeners: List of `CheckpointSaverListener` subclass instances. Used for\n callbacks that run immediately before or after this hook saves the\n checkpoint.\n\nRaises:\n ValueError: One of `save_steps` or `save_secs` should be set.\n ValueError: At most one of `saver` or `scaffold` should be set.", "source": "github_repos"} -{"code": "def _update(self, item, feed_item):\n self._api().update(profileId=self.profile_id, body=item).execute()", "docstring": "Updates a new item in CM.\n\nArgs:\n item: The CM object to update.\n feed_item: The feed item from the Bulkdozer feed representing the item to\n update.", "source": "github_repos"} -{"code": "def smart_cond(pred, true_fn=None, false_fn=None, name=None):\n if isinstance(pred, variables.Variable):\n return cond.cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)\n return smart_module.smart_cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)", "docstring": "Return either `true_fn()` if predicate `pred` is true else `false_fn()`.\n\n If `pred` is a bool or has a constant value, we return either `true_fn()`\n or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.\n\nArgs:\n pred: A scalar determining whether to return the result of `true_fn` or\n `false_fn`.\n true_fn: The callable to be performed if pred is true.\n false_fn: The callable to be performed if pred is false.\n name: Optional name prefix when using `tf.cond`.\n\nReturns:\n Tensors returned by the call to either `true_fn` or `false_fn`.\n\nRaises:\n TypeError: If `true_fn` or `false_fn` is not callable.", "source": "github_repos"} -{"code": "def _get_attrs_items(obj):\n attrs = getattr(obj.__class__, '__attrs_attrs__')\n attr_names = (a.name for a in attrs)\n return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]", "docstring": "Returns a list of (name, value) pairs from an attrs instance.\n\n TODO(b/268078256): check if this comment is valid, and if so, ensure it's\n handled in the function below.\n The list will be sorted by name.\n\nArgs:\n obj: an object.\n\nReturns:\n A list of (attr_name, attr_value) pairs, sorted by attr_name.", "source": "github_repos"} -{"code": "def get_next(self):\n raise NotImplementedError('Iterator.get_next()')", "docstring": "Returns the next element.\n\n >>> dataset = tf.data.Dataset.from_tensors(42)\n >>> iterator = iter(dataset)\n >>> print(iterator.get_next())\n tf.Tensor(42, shape=(), dtype=int32)\n\nReturns:\n A (nested) structure of values matching `tf.data.Iterator.element_spec`.\n\nRaises:\n `tf.errors.OutOfRangeError`: If the end of the iterator has been reached.", "source": "github_repos"} -{"code": "def register(self, candidate, name=None):\n if not name:\n name = candidate.__name__\n if name in self._registry:\n frame = self._registry[name][_LOCATION_TAG]\n raise KeyError(\"Registering two %s with name '%s'! (Previous registration was in %s %s:%d)\" % (self._name, name, frame.name, frame.filename, frame.lineno))\n logging.vlog(1, 'Registering %s (%s) in %s.', name, candidate, self._name)\n stack = traceback.extract_stack(limit=3)\n stack_index = min(2, len(stack) - 1)\n if stack_index >= 0:\n location_tag = stack[stack_index]\n else:\n location_tag = ('UNKNOWN', 'UNKNOWN', 'UNKNOWN', 'UNKNOWN', 'UNKNOWN')\n self._registry[name] = {_TYPE_TAG: candidate, _LOCATION_TAG: location_tag}", "docstring": "Registers a Python object \"candidate\" for the given \"name\".\n\nArgs:\n candidate: The candidate object to add to the registry.\n name: An optional string specifying the registry key for the candidate.\n If None, candidate.__name__ will be used.\n\nRaises:\n KeyError: If same name is used twice.", "source": "github_repos"} -{"code": "def on_pass(self, record):", "docstring": "A function that is executed upon a test passing.\n\n Implementation is optional.\n\nArgs:\n record: records.TestResultRecord, a copy of the test record for\n this test, containing all information of the test execution\n including exception objects.", "source": "github_repos"} -{"code": "def freeze_saved_model(saved_model_dir, input_arrays, input_shapes, output_arrays, tag_set, signature_key):\n meta_graph = get_meta_graph_def(saved_model_dir, tag_set)\n signature_def = get_signature_def(meta_graph, signature_key)\n inputs, outputs = get_inputs_outputs(signature_def)\n collection_def = meta_graph.collection_def\n if constants.ASSETS_KEY in collection_def:\n raise ValueError('SavedModels with assets/ directory are not supported.')\n graph = ops.Graph()\n with session.Session(graph=graph) as sess:\n loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir)\n in_tensors = _get_tensors(graph, inputs, input_arrays)\n out_tensors = _get_tensors(graph, outputs, output_arrays)\n util.set_tensor_shapes(in_tensors, input_shapes)\n frozen_graph_def = util.freeze_graph(sess, in_tensors, out_tensors)\n return (frozen_graph_def, in_tensors, out_tensors, sess.graph)", "docstring": "Converts a SavedModel to a frozen graph.\n\nArgs:\n saved_model_dir: SavedModel directory to convert.\n input_arrays: List of input tensors to freeze graph with. Uses input arrays\n from SignatureDef when none are provided.\n input_shapes: Dict of strings representing input tensor names to list of\n integers representing input shapes (e.g., {\"foo\": : [1, 16, 16, 3]}).\n Automatically determined when input shapes is None (e.g., {\"foo\" : None}).\n output_arrays: List of output tensors to freeze graph with. Uses output\n arrays from SignatureDef when none are provided.\n tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to\n analyze. All tags in the tag set must be present.\n signature_key: Key identifying SignatureDef containing inputs and outputs.\n\nReturns:\n frozen_graph_def: Frozen GraphDef.\n in_tensors: List of input tensors for the graph.\n out_tensors: List of output tensors for the graph.\n graph: `Graph` object.\n\nRaises:\n ValueError:\n SavedModel doesn't contain a MetaGraphDef identified by tag_set.\n signature_key is not in the MetaGraphDef.\n assets/ directory is in the MetaGraphDef.\n input_shapes does not match the length of input_arrays.\n input_arrays or output_arrays are not valid.", "source": "github_repos"} -{"code": "def words_string(fake: Faker, n: int) -> str:\n return ' '.join(fake.words(n))", "docstring": "Provide Faker words as a joined string.\n\nArgs:\n * fake: Faker instance\n * n: number of words\n\nReturns:\n * string of n words joined by spaces", "source": "github_repos"} -{"code": "def __init__(self, rate, **kwargs):\n super(SpatialDropout1D, self).__init__(rate, **kwargs)\n self.input_spec = InputSpec(ndim=3)", "docstring": "Spatial 1D version of Dropout.\n\n This version performs the same function as Dropout, however, it drops\n entire 1D feature maps instead of individual elements. If adjacent frames\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout1D will help promote independence\n between feature maps and should be used instead.\n\nArgs:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n\n Call arguments:\n inputs: A 3D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 3D tensor with shape:\n `(samples, timesteps, channels)`\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)", "source": "github_repos"} -{"code": "def _parse_resource(self, uri: str, json_obj: Dict[str, Any]) -> Optional[_T]:\n json_parser = _json_parser.JsonParser(self.handler, self.resource_time_zone)\n resource_type = json_obj.get('resourceType')\n if resource_type is None:\n raise ValueError(f'JSON for URI {uri} does not have a resource type.')\n if resource_type == 'Bundle':\n json_value = _find_resource_in_bundle(uri, json_obj)\n if json_value is None:\n return None\n else:\n target = self.proto_cls()\n json_parser.merge_value(json_value, target)\n return target\n else:\n target = self.proto_cls()\n json_parser.merge_value(json_obj, target)\n return target", "docstring": "Parses a protocol buffer for the given JSON object.\n\nArgs:\n uri: The URI of the resource to parse.\n json_obj: The JSON object to parse into a proto.\n\nReturns:\n The protocol buffer for the resource or `None` if it can not be found.", "source": "github_repos"} -{"code": "def experimental_run(self, fn, input_iterator=None):\n return super(StrategyV1, self).experimental_run(fn, input_iterator)", "docstring": "Runs ops in `fn` on each replica, with inputs from `input_iterator`.\n\n DEPRECATED: This method is not available in TF 2.x. Please switch\n to using `run` instead.\n\n When eager execution is enabled, executes ops specified by `fn` on each\n replica. Otherwise, builds a graph to execute the ops on each replica.\n\n Each replica will take a single, different input from the inputs provided by\n one `get_next` call on the input iterator.\n\n `fn` may call `tf.distribute.get_replica_context()` to access members such\n as `replica_id_in_sync_group`.\n\n IMPORTANT: Depending on the `tf.distribute.Strategy` implementation being\n used, and whether eager execution is enabled, `fn` may be called one or more\n times (once for each replica).\n\nArgs:\n fn: The function to run. The inputs to the function must match the outputs\n of `input_iterator.get_next()`. The output must be a `tf.nest` of\n `Tensor`s.\n input_iterator: (Optional) input iterator from which the inputs are taken.\n\nReturns:\n Merged return value of `fn` across replicas. The structure of the return\n value is the same as the return value from `fn`. Each element in the\n structure can either be `PerReplica` (if the values are unsynchronized),\n `Mirrored` (if the values are kept in sync), or `Tensor` (if running on a\n single replica).", "source": "github_repos"} -{"code": "def _testReduceSum(self, expected_result, dtype, test_inputs, rtol=0.001, atol=0.0001):\n for test_input in test_inputs:\n with self.session() as sess:\n with self.test_scope():\n a = array_ops.placeholder(dtype)\n index = array_ops.placeholder(dtypes.int32)\n out = math_ops.reduce_sum(a, index)\n result = sess.run(out, {a: np.array(test_input, dtype=dtype), index: [0]})\n self.assertAllClose(np.float32(result), np.float32(expected_result), rtol=rtol, atol=atol)", "docstring": "Tests reduce sum on a list of input arrays.\n\n For each array in test_inputs, check that performing reduce sum on the array\n produces a value that is close to the expected result.\n\nArgs:\n expected_result: the expected result.\n dtype: the data type of the reduce sum operation.\n test_inputs: a list of input arrays for the reduce sum operation.\n rtol: the relative error.\n atol: the absolute error.", "source": "github_repos"} -{"code": "def __init__(self, granularity: Granularity) -> None:\n super().__init__()\n self.chunks = ['']\n self.row = 0\n self.col = 0\n self.current_word = ''\n self.on_split_row = False\n self.granularity = granularity", "docstring": "Initializes the HTML parser for the KNBC corpus.\n\nArgs:\n granularity: Granularity of the output chunks.", "source": "github_repos"} -{"code": "def fn(x: Optional[int]):\n return x", "docstring": "Test function\n\nArgs:\n x: The input", "source": "github_repos"} -{"code": "def softplus(x):\n return ops.softplus(x)", "docstring": "Softplus activation function.\n\n It is defined as: `softplus(x) = log(exp(x) + 1)`.\n\nArgs:\n x: Input tensor.", "source": "github_repos"} -{"code": "def count_tornadoes(input_data):\n return input_data | 'months with tornadoes' >> beam.FlatMap(lambda row: [(int(row['month']), 1)] if row['tornado'] else []) | 'monthly count' >> beam.CombinePerKey(sum) | 'format' >> beam.Map(lambda k_v: {'month': k_v[0], 'tornado_count': k_v[1]})", "docstring": "Workflow computing the number of tornadoes for each month that had one.\n\nArgs:\n input_data: a PCollection of dictionaries representing table rows. Each\n dictionary will have a 'month' and a 'tornado' key as described in the\n module comment.\n\nReturns:\n A PCollection of dictionaries containing 'month' and 'tornado_count' keys.\n Months without tornadoes are skipped.", "source": "github_repos"} -{"code": "def set_number_of_partitions(self, number_of_partitions):\n if self._frozen:\n if self._number_of_partitions != number_of_partitions:\n raise ValueError(f\"Can't set number_of_partitions to {number_of_partitions} since it has been frozen to use {self._number_of_partitions}.\")\n else:\n self._number_of_partitions = number_of_partitions", "docstring": "Sets the number of partitions for the current policy.\n\n If the policy has been frozen then shard_dimension must match the\n existing setting.\n\nArgs:\n number_of_partitions: The number of partitions to use in the policy.\n\nRaises:\n ValueError: If the policy has been frozen and shard_dimension\n differs from the frozen value.", "source": "github_repos"} -{"code": "def get_atten(self, idx=0):\n if not self.is_open:\n raise attenuator.Error('Connection to attenuator at %s is not open!' % self._telnet_client.host)\n if idx + 1 > self.path_count or idx < 0:\n raise IndexError('Attenuator index out of range!', self.path_count, idx)\n telnet_cmd = ':ATT?' if self.path_count == 1 else 'CHAN:%s:ATT?' % (idx + 1)\n atten_val_str = self._telnet_client.cmd(telnet_cmd)\n atten_val = float(atten_val_str)\n return atten_val", "docstring": "This function returns the current attenuation from an attenuator at a\n given index in the instrument.\n\nArgs:\n idx: This zero-based index is the identifier for a particular\n attenuator in an instrument.\n\nRaises:\n Error: The underlying telnet connection to the instrument is not\n open.\n\nReturns:\n A float that is the current attenuation value.", "source": "github_repos"} -{"code": "def read_init() -> Dict[str, List[str]]:\n with open(os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'), 'r', encoding='utf-8', newline='\\n') as f:\n lines = f.readlines()\n line_index = 0\n while not lines[line_index].startswith('if TYPE_CHECKING'):\n line_index += 1\n backend_specific_objects = {}\n while line_index < len(lines):\n backend = find_backend(lines[line_index])\n if backend is not None:\n while not lines[line_index].startswith(' else:'):\n line_index += 1\n line_index += 1\n objects = []\n while len(lines[line_index]) <= 1 or lines[line_index].startswith(' ' * 8):\n line = lines[line_index]\n single_line_import_search = _re_single_line_import.search(line)\n if single_line_import_search is not None:\n objects.extend(single_line_import_search.groups()[0].split(', '))\n elif line.startswith(' ' * 12):\n objects.append(line[12:-2])\n line_index += 1\n backend_specific_objects[backend] = objects\n else:\n line_index += 1\n return backend_specific_objects", "docstring": "Read the init and extract backend-specific objects.\n\nReturns:\n Dict[str, List[str]]: A dictionary mapping backend name to the list of object names requiring that backend.", "source": "github_repos"} -{"code": "def solve(self, rhs, adjoint=False, adjoint_arg=False, name='solve'):\n if self.is_non_singular is False:\n raise NotImplementedError('Exact solve not implemented for an operator that is expected to be singular.')\n if self.is_square is False:\n raise NotImplementedError('Exact solve not implemented for an operator that is expected to not be square.')\n\n def _check_operators_agree(r, l, message):\n if r.range_dimension is not None and l.domain_dimension is not None and (r.range_dimension != l.domain_dimension):\n raise ValueError(message)\n if isinstance(rhs, linear_operator.LinearOperator):\n left_operator = self.adjoint() if adjoint else self\n right_operator = rhs.adjoint() if adjoint_arg else rhs\n _check_operators_agree(right_operator, left_operator, 'Operators are incompatible. Expected `x` to have dimension {} but got {}.'.format(left_operator.domain_dimension, right_operator.range_dimension))\n if isinstance(right_operator, LinearOperatorBlockDiag):\n if len(left_operator.operators) != len(right_operator.operators):\n raise ValueError('Can not efficiently solve `LinearOperatorBlockDiag` when number of blocks differ.')\n for o1, o2 in zip(left_operator.operators, right_operator.operators):\n _check_operators_agree(o2, o1, 'Blocks are incompatible. Expected `x` to have dimension {} but got {}.'.format(o1.domain_dimension, o2.range_dimension))\n with self._name_scope(name):\n return self._linop_solve(left_operator, right_operator)\n with self._name_scope(name):\n block_dimensions = self._block_domain_dimensions() if adjoint else self._block_range_dimensions()\n arg_dim = -1 if adjoint_arg else -2\n blockwise_arg = linear_operator_util.arg_is_blockwise(block_dimensions, rhs, arg_dim)\n if blockwise_arg:\n split_rhs = rhs\n for i, block in enumerate(split_rhs):\n if not isinstance(block, linear_operator.LinearOperator):\n block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)\n self._check_input_dtype(block)\n block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim])\n split_rhs[i] = block\n else:\n rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(rhs, name='rhs')\n self._check_input_dtype(rhs)\n op_dimension = self.domain_dimension if adjoint else self.range_dimension\n op_dimension.assert_is_compatible_with(rhs.shape[arg_dim])\n split_dim = -1 if adjoint_arg else -2\n split_rhs = linear_operator_util.split_arg_into_blocks(self._block_domain_dimensions(), self._block_domain_dimension_tensors, rhs, axis=split_dim)\n solution_list = []\n for index, operator in enumerate(self.operators):\n solution_list += [operator.solve(split_rhs[index], adjoint=adjoint, adjoint_arg=adjoint_arg)]\n if blockwise_arg:\n return solution_list\n solution_list = linear_operator_util.broadcast_matrix_batch_dims(solution_list)\n return array_ops.concat(solution_list, axis=-2)", "docstring": "Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.\n\n The returned `Tensor` will be close to an exact solution if `A` is well\n conditioned. Otherwise closeness will vary. See class docstring for details.\n\nExample:\n ```python\n # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]\n operator = LinearOperator(...)\n operator.shape = [..., M, N]\n\n # Solve R > 0 linear systems for every member of the batch.\n RHS = ... # shape [..., M, R]\n\n X = operator.solve(RHS)\n # X[..., :, r] is the solution to the r'th linear system\n # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]\n\n operator.matmul(X)\n ==> RHS\n ```\n\nArgs:\n rhs: `Tensor` with same `dtype` as this operator and compatible shape,\n or a list of `Tensor`s (for blockwise operators). `Tensor`s are treated\n like a [batch] matrices meaning for every set of leading dimensions, the\n last two dimensions defines a matrix.\n See class docstring for definition of compatibility.\n adjoint: Python `bool`. If `True`, solve the system involving the adjoint\n of this `LinearOperator`: `A^H X = rhs`.\n adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`\n is the hermitian transpose (transposition and complex conjugation).\n name: A name scope to use for ops added by this method.\n\nReturns:\n `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.\n\nRaises:\n NotImplementedError: If `self.is_non_singular` or `is_square` is False.", "source": "github_repos"} -{"code": "def __str__(self):\n return self.str_internal()", "docstring": "Generates a useful string for this object.\n\n Compactly displays interesting fields. In particular, pickled\n fields are not displayed. Note that we collapse the fields of the\n contained Worker* object into this object, since there is a 1-1\n mapping between Operation and operation_specs.Worker*.\n\nReturns:\n Compact string representing this object.", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions: bool=False):\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n self_attention_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=from_blocked_mask, to_blocked_mask=to_blocked_mask)\n hidden_states = self_attention_outputs[0]\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attention_outputs[1],)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.", "source": "github_repos"} -{"code": "def _setup_mock_socket_file(mock_socket_create_conn, resp):\n fake_file = mock.Mock()\n fake_file.readline.side_effect = resp\n fake_conn = mock.Mock()\n fake_conn.makefile.return_value = fake_file\n mock_socket_create_conn.return_value = fake_conn\n return fake_file", "docstring": "Sets up a mock socket file from the mock connection.\n\nArgs:\n mock_socket_create_conn: The mock method for creating a socket connection.\n resp: iterable, the side effect of the `readline` function of the mock\n socket file.\n\nReturns:\n The mock socket file that will be injected into the code.", "source": "github_repos"} -{"code": "def serialize_to_string(self):\n return print_mdl.SerializeToString()", "docstring": "Serialize the ProfileProto to a binary string.\n\n Users can write it to file for offline analysis by tfprof commandline\n or graphical interface.\n\nReturns:\n ProfileProto binary string.", "source": "github_repos"} -{"code": "def add_optimizer_variables(self, trainable_variables, name, initializer='zeros'):\n name_list = name\n initializer_list = initializer\n if isinstance(name, str):\n name_list = [name]\n initializer_list = [initializer]\n elif isinstance(initializer, str) or isinstance(initializer, initializers.Initializer):\n initializer_list = [initializer] * len(name_list)\n if len(name_list) != len(initializer_list):\n raise ValueError(f\"The number of provided names must match the number of provided initializers. Received name='{name}', initializer='{initializer}'\")\n optimizer_variables = tuple(([] for _ in name_list))\n for variable in trainable_variables:\n if not self._overwrite_variable_with_gradient(variable):\n for i, (var_name, var_init) in enumerate(zip(name_list, initializer_list)):\n optimizer_variables[i].append(self.add_variable_from_reference(variable, name=var_name, initializer=var_init))\n else:\n for i in range(len(name_list)):\n optimizer_variables[i].append(None)\n if isinstance(name, str):\n return optimizer_variables[0]\n return optimizer_variables", "docstring": "Add optimizer variables from the list of trainable model variables.\n\n Create an optimizer variable based on the information of the supplied\n model variables. For example, in SGD optimizer momemtum, for each model\n variable, a corresponding momemtum variable is created of the same shape\n and dtype.\n\n Note that trainable variables with `v.overwrite_with_gradient == True`\n will insert `None`, into the output list, since the optimizer variable\n will not be used anyways, and could be wasteful.\n\nArgs:\n trainable_variables: `keras.Variable`, the corresponding model\n variable to the optimizer variable to be created.\n name: The name prefix(es) of the optimizer variable(s) to be\n created. Can be a single string or list of strings. If a\n list of strings, will create an optimizer variable for each\n prefix. The variable name will follow the pattern\n `{variable_name}_{trainable_variable.name}`, e.g.,\n `momemtum/dense_1`.\n initializer: Initializer object(s) to use to populate the initial\n variable value(s), or string name of a built-in initializer\n (e.g. `\"random_normal\"`). If unspecified, defaults to\n `\"zeros\"`.\n\nReturns:\n A list of optimizer variables, in the format of `keras.Variable`s.\n If multiple names are provide, returns a tuple of lists.", "source": "github_repos"} -{"code": "def _Check(self):\n success = True\n for path in self._paths:\n if not os.path.isfile(path):\n logging.error('No such file: %s', path)\n success = False\n elif not os.access(path, os.R_OK):\n logging.error('No read access: %s', path)\n success = False\n elif not FLAGS.output and (not os.access(path, os.W_OK)):\n logging.error('No write access: %s', path)\n success = False\n return success", "docstring": "Verifies the existence and read+write access to all paths.\n\nReturns:\n Boolean, True if all paths are OK, otherwise False.", "source": "github_repos"} -{"code": "def conv_output_length(input_length, filter_size, padding, stride, dilation=1):\n if input_length is None:\n return None\n assert padding in {'same', 'valid', 'full', 'causal'}\n dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n if padding in ['same', 'causal']:\n output_length = input_length\n elif padding == 'valid':\n output_length = input_length - dilated_filter_size + 1\n elif padding == 'full':\n output_length = input_length + dilated_filter_size - 1\n return (output_length + stride - 1) // stride", "docstring": "Determines output length of a convolution given input length.\n\nArgs:\n input_length: integer.\n filter_size: integer.\n padding: one of \"same\", \"valid\", \"full\", \"causal\"\n stride: integer.\n dilation: dilation rate, integer.\n\nReturns:\n The output length (integer).", "source": "github_repos"} -{"code": "def set_vocabulary(self, vocabulary, idf_weights=None):\n if self.output_mode == 'tf_idf':\n if idf_weights is None:\n raise ValueError(\"`idf_weights` must be set if output_mode is 'tf_idf'.\")\n elif idf_weights is not None:\n raise ValueError(f\"`idf_weights` should only be set if output_mode is `'tf_idf'`. Received: output_mode={self.output_mode} and idf_weights={idf_weights}\")\n if isinstance(vocabulary, str):\n if not tf.io.gfile.exists(vocabulary):\n raise ValueError(f'Vocabulary file {vocabulary} does not exist.')\n if self.output_mode == 'tf_idf':\n raise ValueError(\"output_mode `'tf_idf'` does not support loading a vocabulary from file.\")\n self.lookup_table = self._lookup_table_from_file(vocabulary)\n self._record_vocabulary_size()\n return\n if not tf.executing_eagerly() and (tf.is_tensor(vocabulary) or tf.is_tensor(idf_weights)):\n raise RuntimeError(f'Cannot set a tensor vocabulary on layer {self.name} when not executing eagerly. Create this layer or call `set_vocabulary()` outside of any traced function.')\n if tf.is_tensor(vocabulary):\n vocabulary = self._tensor_vocab_to_numpy(vocabulary)\n elif isinstance(vocabulary, (list, tuple)):\n vocabulary = np.array(vocabulary)\n if tf.is_tensor(idf_weights):\n idf_weights = idf_weights.numpy()\n elif isinstance(idf_weights, (list, tuple)):\n idf_weights = np.array(idf_weights)\n if vocabulary.size == 0:\n raise ValueError(f'Cannot set an empty vocabulary. Received: vocabulary={vocabulary}')\n oov_start = self._oov_start_index()\n token_start = self._token_start_index()\n special_tokens = [self.mask_token] * oov_start + [self.oov_token] * self.num_oov_indices\n found_special_tokens = np.array_equal(special_tokens, vocabulary[:token_start])\n if found_special_tokens:\n tokens = vocabulary[token_start:]\n else:\n tokens = vocabulary\n repeated_tokens = self._find_repeated_tokens(tokens)\n if repeated_tokens:\n raise ValueError(f'The passed vocabulary has at least one repeated term. Please uniquify your dataset. The repeated terms are: {repeated_tokens}')\n if self.mask_token is not None and self.mask_token in tokens:\n mask_index = np.argwhere(vocabulary == self.mask_token)[-1]\n raise ValueError(f'Found reserved mask token at unexpected location in `vocabulary`. Note that passed `vocabulary` does not need to include the OOV and mask tokens. Either remove all mask and OOV tokens, or include them only at the start of the vocabulary in precisely this order: {special_tokens}. Received: mask_token={self.mask_token} at vocabulary index {mask_index}')\n if self.oov_token is not None and self.invert and (self.oov_token in tokens):\n oov_index = np.argwhere(vocabulary == self.oov_token)[-1]\n raise ValueError(f'Found reserved OOV token at unexpected location in `vocabulary`. Note that passed `vocabulary` does not need to include the OOV and mask tokens. Either remove all mask and OOV tokens, or include them only at the start of the vocabulary in precisely this order: {special_tokens}. Received: oov_token={self.oov_token} at vocabulary index {oov_index}')\n new_vocab_size = token_start + len(tokens)\n if self.max_tokens is not None and new_vocab_size > self.max_tokens:\n raise ValueError(f'Attempted to set a vocabulary larger than the maximum vocab size. Received vocabulary size is {new_vocab_size}; `max_tokens` is {self.max_tokens}.')\n self.lookup_table = self._lookup_table_from_tokens(tokens)\n self._record_vocabulary_size()\n if self.output_mode == 'tf_idf' and idf_weights is not None:\n if len(vocabulary) != len(idf_weights):\n raise ValueError(f'`idf_weights` must be the same length as vocabulary. len(idf_weights) is {len(idf_weights)}; len(vocabulary) is {len(vocabulary)}')\n idf_weights = self._convert_to_ndarray(idf_weights)\n if idf_weights.ndim != 1:\n raise ValueError(f'TF-IDF data must be a 1-index array. Received: type(idf_weights)={type(idf_weights)}')\n if found_special_tokens:\n front_padding = 0\n front_padding_value = 0\n else:\n front_padding = token_start\n front_padding_value = np.average(idf_weights)\n back_padding_value = 0\n if self.pad_to_max_tokens and self.max_tokens is not None:\n back_padding = self.max_tokens - front_padding - len(idf_weights)\n else:\n back_padding = 0\n weights = np.pad(idf_weights, (front_padding, back_padding), 'constant', constant_values=(front_padding_value, back_padding_value))\n weights = tf.convert_to_tensor(weights, dtype=backend.floatx())\n self.idf_weights = tf.Variable(weights, trainable=False)\n self.idf_weights_const = self.idf_weights.value()", "docstring": "Sets vocabulary (and optionally document frequency) for this layer.\n\n This method sets the vocabulary and idf weights for this layer directly,\n instead of analyzing a dataset through `adapt`. It should be used\n whenever the vocab (and optionally document frequency) information is\n already known. If vocabulary data is already present in the layer, this\n method will replace it.\n\nArgs:\n vocabulary: Either an array or a string path to a text file.\n If passing an array, can pass a tuple, list,\n 1D numpy array, or 1D tensor containing the vocbulary terms.\n If passing a file path, the file should contain one line\n per term in the vocabulary.\n idf_weights: A tuple, list, 1D numpy array, or 1D tensor\n of inverse document frequency weights with equal\n length to vocabulary. Must be set if `output_mode`\n is `\"tf_idf\"`. Should not be set otherwise.", "source": "github_repos"} -{"code": "def get_updates_for(self, inputs):\n if inputs is None:\n return [u for u in self.updates if u._unconditional_update]\n updates = [u for u in self.updates if not u._unconditional_update]\n inputs = nest.flatten(inputs)\n reachable = tf_utils.get_reachable_from_inputs(inputs, updates)\n return [u for u in updates if u in reachable]", "docstring": "Retrieves updates relevant to a specific set of inputs.\n\nArgs:\n inputs: Input tensor or list/tuple of input tensors.\n\nReturns:\n List of update ops of the layer that depend on `inputs`.", "source": "github_repos"} -{"code": "def __init__(self, config: ClvpConfig):\n super().__init__()\n self.summary_type = getattr(config, 'summary_type', 'last')\n if self.summary_type == 'attn':\n raise NotImplementedError\n self.summary = nn.Identity()\n if hasattr(config, 'summary_use_proj') and config.summary_use_proj:\n if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and (config.num_labels > 0):\n num_classes = config.num_labels\n else:\n num_classes = config.hidden_size\n self.summary = nn.Linear(config.hidden_size, num_classes)\n activation_string = getattr(config, 'summary_activation', None)\n self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity()\n self.first_dropout = nn.Identity()\n if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0:\n self.first_dropout = nn.Dropout(config.summary_first_dropout)\n self.last_dropout = nn.Identity()\n if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:\n self.last_dropout = nn.Dropout(config.summary_last_dropout)", "docstring": "Compute a single vector summary of a sequence hidden states.\n\nArgs:\n config ([`ClvpConfig`]):\n The config used by the model. Relevant arguments in the config class of the model are (refer to the actual\n config class of your model for the default values it uses):\n\n - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:\n\n - `\"last\"` -- Take the last token hidden state (like XLNet)\n - `\"first\"` -- Take the first token hidden state (like Bert)\n - `\"mean\"` -- Take the mean of all tokens hidden states\n - `\"cls_index\"` -- Supply a Tensor of classification token position (GPT/GPT-2)\n - `\"attn\"` -- Not implemented now, use multi-head attention\n\n - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.\n - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes\n (otherwise to `config.hidden_size`).\n - **summary_activation** (`Optional[str]`) -- Set to `\"tanh\"` to add a tanh activation to the output,\n another string or `None` will add no activation.\n - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.\n - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.", "source": "github_repos"} -{"code": "def _validate_children_inputs_mappings(self, children_inputs_mappings):\n assert isinstance(children_inputs_mappings, dict)\n assert 'parent_first_child_input' in children_inputs_mappings\n assert 'parent_last_child_output' in children_inputs_mappings\n assert 'internal_children_input_output' in children_inputs_mappings\n\n def assert_dictlist_has_keys(dictlist, keys):\n for dikt in dictlist:\n assert isinstance(dikt, dict)\n for key in keys:\n assert key in dikt\n assert_dictlist_has_keys(children_inputs_mappings['parent_first_child_input'], ['parent_ophint_input_index', 'first_child_ophint_input_index'])\n assert_dictlist_has_keys(children_inputs_mappings['parent_last_child_output'], ['parent_output_index', 'child_output_index'])\n assert_dictlist_has_keys(children_inputs_mappings['internal_children_input_output'], ['child_input_index', 'child_output_index'])", "docstring": "Validate children inputs mappings is in the right format.\n\nArgs:\n children_inputs_mappings: the Children ophint inputs/outputs mapping.", "source": "github_repos"} -{"code": "def preprocess_histories(self, max_coarse_history: int, semantic_to_coarse_ratio: int, batch_size: int, semantic_generation_config: int, codebook_size: int, history_prompt: Optional[Dict[str, torch.Tensor]]=None):\n if history_prompt is not None:\n x_semantic_history = torch.repeat_interleave(history_prompt['semantic_prompt'][None], batch_size, dim=0)\n x_coarse_history = history_prompt['coarse_prompt'].clone()\n if codebook_size is not None:\n for n in range(1, x_coarse_history.shape[0]):\n x_coarse_history[n, :] += codebook_size * n\n x_coarse_history = torch.transpose(x_coarse_history, 0, 1).reshape(-1)\n x_coarse_history = x_coarse_history + semantic_generation_config.semantic_vocab_size\n x_coarse_history = torch.repeat_interleave(x_coarse_history[None], batch_size, dim=0)\n max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))\n n_semantic_hist_provided = min([max_semantic_history, x_semantic_history.shape[1] - x_semantic_history.shape[1] % 2, int(np.floor(x_coarse_history.shape[1] / semantic_to_coarse_ratio))])\n n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio))\n x_semantic_history = x_semantic_history[:, -n_semantic_hist_provided:].int()\n x_coarse_history = x_coarse_history[:, -n_coarse_hist_provided:].int()\n x_coarse_history = x_coarse_history[:, :-2]\n else:\n x_semantic_history = torch.tensor([[]] * batch_size, dtype=torch.int, device=self.device)\n x_coarse_history = torch.tensor([[]] * batch_size, dtype=torch.int, device=self.device)\n return (x_semantic_history, x_coarse_history)", "docstring": "Preprocess the optional `Bark` speaker prompts before `self.generate`.\n\nArgs:\n max_coarse_history (`int`):\n Maximum size of coarse tokens used.\n semantic_to_coarse_ratio (`int`):\n Ratio of semantic to coarse frequency\n batch_size (`int`):\n Batch size, i.e the number of samples.\n semantic_generation_config (`BarkSemanticGenerationConfig`):\n Generation config indicating how to generate the semantic tokens.\n codebook_size (`int`):\n Codebook channel size, i.e. the size of the output vocabulary per codebook channel.\n history_prompt (`Optional[Dict[str,torch.Tensor]]`):\n Optional `Bark` speaker prompt.\n Returns: Returns:\n `tuple(torch.FloatTensor)`:\n - **x_semantic_history** (`torch.FloatTensor` -- Processed semantic speaker prompt.\n - **x_coarse_history** (`torch.FloatTensor`) -- Processed coarse speaker prompt.", "source": "github_repos"} -{"code": "def encode_categorical_inputs(inputs, output_mode, depth, dtype, sparse=False, count_weights=None, backend_module=None):\n backend_module = backend_module or backend\n if output_mode == 'int':\n return backend_module.cast(inputs, dtype=dtype)\n rank_of_inputs = len(backend_module.shape(inputs))\n if rank_of_inputs == 0:\n inputs = backend_module.numpy.expand_dims(inputs, -1)\n rank_of_inputs = 1\n if backend_module.__name__.endswith('tensorflow') and rank_of_inputs <= 2 and (output_mode in ('multi_hot', 'count')):\n try:\n return tf_utils.tf_encode_categorical_inputs(inputs, output_mode, depth, dtype=dtype, sparse=sparse, count_weights=count_weights)\n except ValueError:\n pass\n if output_mode == 'multi_hot':\n return backend_module.nn.multi_hot(inputs, depth, dtype=dtype, sparse=sparse)\n elif output_mode == 'one_hot':\n input_shape = backend_module.core.shape(inputs)\n if input_shape is not None and len(input_shape) > 1 and (input_shape[-1] == 1):\n newshape = tuple(input_shape[:-1])\n inputs = backend_module.numpy.reshape(inputs, newshape)\n return backend_module.nn.one_hot(inputs, depth, dtype=dtype, sparse=sparse)\n elif output_mode == 'count':\n reduction_axis = 1 if len(inputs.shape) > 1 else 0\n if count_weights is not None:\n dtype = count_weights.dtype\n one_hot_encoding = backend_module.nn.one_hot(inputs, depth, dtype=dtype, sparse=sparse)\n if count_weights is not None:\n count_weights = backend_module.numpy.expand_dims(count_weights, -1)\n one_hot_encoding = one_hot_encoding * count_weights\n outputs = backend_module.numpy.sum(one_hot_encoding, axis=reduction_axis)\n return outputs", "docstring": "Encodes categorical inputs according to output_mode.\n\nArgs:\n inputs: the inputs to encode.\n output_mode: one of `\"int\"`, `\"one_hot\"`, `\"multi_hot\"`, or `\"count\"`.\n depth: number of classes, this will be the last dimension of the output.\n dtype: the dtype of the output, unless `count_weights` is not `None`.\n sparse: whether the output should be sparse for backends supporting it.\n count_weights: weights to apply if `output_mode` is `\"count\"`.\n backend_module: the backend to use instead of the current one.\n\n Returns: the encoded inputs.", "source": "github_repos"} -{"code": "def get_formal_type_parameter(self, t: str) -> 'BaseValue':\n del t\n return self.ctx.convert.unsolvable", "docstring": "Get the class's type for the type parameter.\n\n Treating self as a class_mixin.Class, gets its formal type for the given\n type parameter. For the real implementation, see\n ParameterizedClass.get_formal_type_parameter.\n\nArgs:\n t: The name of the type parameter.\n\nReturns:\n A formal type.", "source": "github_repos"} -{"code": "def apply(self, pts: torch.Tensor) -> torch.Tensor:\n rot_mats = self.get_rot_mats()\n return rot_vec_mul(rot_mats, pts)", "docstring": "Apply the current Rotation as a rotation matrix to a set of 3D coordinates.\n\nArgs:\n pts:\n A [*, 3] set of points\n\nReturns:\n [*, 3] rotated points", "source": "github_repos"} -{"code": "def GetMap(self, cache_info):\n return self.GetParser().GetMap(cache_info, self.CreateMap())", "docstring": "Creates a Map from the cache_info data.\n\nArgs:\n cache_info: file-like object containing the data to parse\n\nReturns:\n A child of Map containing the cache data.", "source": "github_repos"} -{"code": "def _get_prob_original_static(initial_dist_t, target_dist_t):\n init_static = tensor_util.constant_value(initial_dist_t)\n target_static = tensor_util.constant_value(target_dist_t)\n if init_static is None or target_static is None:\n return None\n else:\n return np.min(target_static / init_static)", "docstring": "Returns the static probability of sampling from the original.\n\n `tensor_util.constant_value(prob_of_original)` returns `None` if it encounters\n an Op that it isn't defined for. We have some custom logic to avoid this.\n\nArgs:\n initial_dist_t: A tensor of the initial distribution.\n target_dist_t: A tensor of the target distribution.\n\nReturns:\n The probability of sampling from the original distribution as a constant,\n if it is a constant, or `None`.", "source": "github_repos"} -{"code": "def no_manual_dependency_tracking_scope(obj):\n previous_value = getattr(obj, '_manual_tracking', True)\n obj._manual_tracking = False\n try:\n yield\n finally:\n obj._manual_tracking = previous_value", "docstring": "A context that disables manual dependency tracking for the given `obj`.\n\n Sometimes library methods might track objects on their own and we might want\n to disable that and do the tracking on our own. One can then use this context\n manager to disable the tracking the library method does and do your own\n tracking.\n\n For example:\n\n class TestLayer(tf.keras.Layer):\n def build():\n with no_manual_dependency_tracking_scope(self):\n var = self.add_variable(\"name1\") # Creates a var and doesn't track it\n self._track_trackable(\"name2\", var) # We track variable with name `name2`\n\nArgs:\n obj: A trackable object.\n\nYields:\n a scope in which the object doesn't track dependencies manually.", "source": "github_repos"} -{"code": "def CheckInputFromValidContext(op, input_op):\n op_ctxt = op._get_control_flow_context()\n input_ctxt = GetOutputContext(input_op)\n valid = False\n if not input_ctxt:\n valid = True\n elif op_ctxt is input_ctxt:\n valid = True\n else:\n while_ctxt = GetContainingWhileContext(op_ctxt)\n input_while_ctxt = GetContainingWhileContext(input_ctxt)\n if while_ctxt is None:\n if input_while_ctxt is None:\n valid = True\n if IsLoopEnter(op):\n valid = True\n if IsSwitch(op):\n valid = True\n elif IsContainingContext(while_ctxt, input_while_ctxt):\n valid = True\n elif while_ctxt.grad_state and IsContainingContext(while_ctxt.grad_state.forward_context, input_while_ctxt):\n valid = True\n elif while_ctxt.grad_state and while_ctxt.grad_state.forward_context is input_while_ctxt._outer_context:\n valid = True\n elif input_while_ctxt.grad_state and input_while_ctxt.grad_state.forward_context is while_ctxt:\n valid = True\n elif input_while_ctxt.grad_state and input_ctxt.grad_state.forward_context.grad_state and (input_ctxt.grad_state.forward_context.grad_state.forward_context is while_ctxt):\n valid = True\n if not valid:\n if while_ctxt:\n error_msg = f\"Cannot use '{input_op.name}' as input to '{op.name}' because they are in different while loops.\"\n else:\n error_msg = f\"Cannot use '{input_op.name}' as input to '{op.name}' because '{input_op.name}' is in a while loop.\"\n log_msg = error_msg\n log_msg += '\\n\\n%s while context: %s' % (op.name, while_ctxt)\n log_msg += '\\n%s while context: %s' % (input_op.name, input_while_ctxt)\n log_msg += '\\n\\nTraceback for %s:\\n%s\\nTraceback for %s:\\n%s\\n' % (op.name, ''.join(traceback.format_list(op.traceback)), input_op.name, ''.join(traceback.format_list(input_op.traceback)))\n logging.info(log_msg)\n raise ValueError(error_msg + ' See info log for more details.')", "docstring": "Returns whether `input_op` can be used from `op`s context.\n\n Conceptually, only inputs from op's while context or any ancestor while\n context (including outside of any context) are valid. In practice, there are\n many other edge cases as well.\n\nArgs:\n op: Operation\n input_op: Operation\n\nRaises:\n ValueError: if input_op is from an invalid context.", "source": "github_repos"} -{"code": "def sharded_save(mesh: layout_lib.Mesh, file_prefix: Union[str, tensor_lib.Tensor], tensor_names: Union[List[str], tensor_lib.Tensor], shape_and_slices: Union[List[str], tensor_lib.Tensor], tensors: List[Union[tensor_lib.Tensor, tf_variables.Variable]]):\n with ops.device(api.device_name()):\n io_ops.save_v2(file_prefix, tensor_names, shape_and_slices, tensors)\n mesh_util.barrier(mesh.host_mesh(), 'SaveV2')\n with api.default_mesh(mesh.host_mesh()):\n merge_op = io_ops.MergeV2Checkpoints(checkpoint_prefixes=[file_prefix], destination_prefix=file_prefix, delete_old_dirs=True)\n mesh_util.barrier(mesh.host_mesh(), 'MergeV2Checkpoints')\n return merge_op", "docstring": "Saves given named tensor slices in a sharded, multi-client safe fashion.\n\n The method makes sure the checkpoint directory state is correct in a sharded\n mutli-client saving. Namely, we place a barrier after SaveV2 to make sure\n every client has done writing the files. And another one after\n MergeV2Checkpoints to make sure all Metadata is properly merged.\n\n Upon existing, the checkpoint is completed and the all directory operations\n are done.\n\nArgs:\n mesh: The Mesh that contains the Tensors to save.\n file_prefix: The prefix of checkpoint.\n tensor_names: a list of tensor names used in save op.\n shape_and_slices: a list of shape and slice specification used in save op.\n The only supported value is \"\" as we don't support distributed saving with\n slices yet.\n tensors: a list of tensors used in save op. The order should match\n tensor_names.\n\nReturns:\n A MergeV2Checkpoints op that merged all Metadata.", "source": "github_repos"} -{"code": "def loss_contrastive(self, contrastive_queries_logits: Tensor, text_queries: Tensor):\n image_queries = contrastive_queries_logits.float()\n image_queries = nn.functional.normalize(image_queries.flatten(1), dim=-1)\n text_queries = nn.functional.normalize(text_queries.flatten(1), dim=-1)\n logit_scale = torch.clamp(self.logit_scale.exp(), max=100)\n logits_per_text = torch.matmul(text_queries, image_queries.t()) * logit_scale\n logits_per_img = logits_per_text.t()\n loss_img = nn.functional.cross_entropy(logits_per_img, torch.arange(len(logits_per_img), device=logits_per_text.device))\n loss_text = nn.functional.cross_entropy(logits_per_text, torch.arange(len(logits_per_text), device=logits_per_text.device))\n loss_contrastive = loss_img + loss_text\n losses = {'loss_contrastive': loss_contrastive}\n return losses", "docstring": "Compute the query-text contrastive loss.\n\nArgs:\n contrastive_queries_logits (`torch.Tensor`):\n A tensor of shape `batch_size, num_queries, hidden_dim`\n text_queries (`torch.Tensor`):\n A tensor of shape `batch_size, num_queries, hidden_dim`\n\nReturns:\n `Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:\n - **loss_contrastive** -- The query-text contrastive loss computed using task-guided queries\n and text queries derived from input text list.", "source": "github_repos"} -{"code": "def _FractionalAvgPoolGrad(op: ops.Operation, grad_0, unused_grad_1, unused_grad_2):\n return gen_nn_ops.fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0, op.outputs[1], op.outputs[2], op.get_attr('overlapping'))", "docstring": "Returns gradient for FractionalAvgPool.\n\n Since FractionalAvgPool has three outputs, there are three gradients passed in\n for each of the outputs. Only the first one is useful, the other two gradients\n are empty.\n\nArgs:\n op: The FractionalAvgPoolOp.\n grad_0: Gradient with respect to op.outputs[0]\n unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.\n unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.\n\nReturns:\n Input backprop for FractionalAvgPool op.", "source": "github_repos"} -{"code": "def of(seconds: TimestampTypes) -> 'Timestamp':\n if isinstance(seconds, Timestamp):\n return seconds\n elif isinstance(seconds, (int, float)):\n return Timestamp(seconds)\n elif isinstance(seconds, datetime.datetime):\n return Timestamp.from_utc_datetime(seconds)\n else:\n raise TypeError('Cannot interpret %s %s as Timestamp.' % (seconds, type(seconds)))", "docstring": "Return the Timestamp for the given number of seconds.\n\n If the input is already a Timestamp, the input itself will be returned.\n\nArgs:\n seconds: Number of seconds as int, float, long, or Timestamp.\n\nReturns:\n Corresponding Timestamp object.", "source": "github_repos"} -{"code": "def Match(self, encoded):\n logging.log(1, 'Decoding %s: %s', self.name, encoded)\n decoded = self.msg.encoding.ParseFromString(encoded, self.msg)\n logging.info('Matching message value:\\nExpected: %s\\nActual: %s\\n', self.value_dict_or_array, decoded)\n return MessageValue._MatchValue(self.value_dict_or_array, decoded)", "docstring": "Whether or not |encoded| is compatible with this message instance.\n\n If |encoded| has all required fields, and values of all fields are same to\n those of this message instance, it is compatible. Otherwise, i.e\n 1) it doesn't have some required fields\n 2) it has some values of fields different from specified in |value_dict| of\n this message instance\n\nArgs:\n encoded: A string expected to be encoded with same encoding method of\n this message instance.\n\nReturns:\n Whether or not |encoded| is compatible with this message instance.", "source": "github_repos"} -{"code": "def trivial_reward(example):\n return example", "docstring": "Reward for the trivial search space.\n\n The reward (i.e. fitness) is the value itself. The goal of the search,\n therefore, is to find the value 1.\n\nArgs:\n example: a materialized value.\n\nReturns:\n The corresponding reward.", "source": "github_repos"} -{"code": "def _is_apk_install_success(stdout: bytes, stderr: str) -> bool:\n if utils.grep('Failure', stdout):\n return False\n return any([not stderr, stderr == 'Success', 'waiting for device' in stderr])", "docstring": "Checks output of the adb install command and decides if install succeeded.\n\nArgs:\n stdout: string, the standard out output of an adb install command.\n stderr: string, the standard error output of an adb install command.\n\nReturns:\n True if the installation succeeded; False otherwise.", "source": "github_repos"} -{"code": "def recipe_sa360_web_query(config, recipe_name):\n drive(config, {'auth': 'user', 'hour': [], 'copy': {'source': 'https://docs.google.com/spreadsheets/d/1S9os1VO3dBW_EUFvAq4SxlxddZYAdUkvr5H9iTPQT_s/edit?resourcekey=0-jdR3mdYYWSVSAEmwuxMKbQ#gid=0', 'destination': recipe_name}})", "docstring": "Download SA360 reports into a Google Sheet.\n\nArgs:\n recipe_name (string) - Name of document to deploy to.", "source": "github_repos"} -{"code": "def is_interactive_logging_enabled():\n return global_state.get_global_attribute('interactive_logging', True)", "docstring": "Check if interactive logging is enabled.\n\n To switch between writing logs to stdout and `absl.logging`, you may use\n `keras.config.enable_interactive_logging()` and\n `keras.config.disable_interactive_logging()`.\n\nReturns:\n Boolean, `True` if interactive logging is enabled,\n and `False` otherwise.", "source": "github_repos"} -{"code": "def get_config_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:\n cls._set_token_in_kwargs(kwargs)\n original_kwargs = copy.deepcopy(kwargs)\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n if config_dict is None:\n return ({}, kwargs)\n if '_commit_hash' in config_dict:\n original_kwargs['_commit_hash'] = config_dict['_commit_hash']\n if 'configuration_files' in config_dict:\n configuration_file = get_configuration_file(config_dict['configuration_files'])\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs)\n return (config_dict, kwargs)", "docstring": "From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a\n [`PretrainedConfig`] using `from_dict`.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.\n\nReturns:\n `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.", "source": "github_repos"} -{"code": "def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):\n batch_size = enc_output.shape[0]\n proposals = []\n _cur = 0\n level_ids = []\n for level, (height, width) in enumerate(spatial_shapes):\n mask_flatten_ = padding_mask[:, _cur:_cur + height * width].view(batch_size, height, width, 1)\n valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n grid_y, grid_x = meshgrid(torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing='ij')\n grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)\n grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale\n width_height = torch.ones_like(grid) * 0.05 * 2.0 ** level\n proposal = torch.cat((grid, width_height), -1).view(batch_size, -1, 4)\n proposals.append(proposal)\n _cur += height * width\n level_ids.append(grid.new_ones(height * width, dtype=torch.long) * level)\n output_proposals = torch.cat(proposals, 1)\n output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)\n output_proposals = torch.log(output_proposals / (1 - output_proposals))\n output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float('inf'))\n output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))\n object_query = enc_output\n object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))\n object_query = object_query.masked_fill(~output_proposals_valid, float(0))\n object_query = self.enc_output_norm(self.enc_output(object_query))\n level_ids = torch.cat(level_ids)\n return (object_query, output_proposals, level_ids)", "docstring": "Generate the encoder output proposals from encoded enc_output.\n\nArgs:\n enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder.\n padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`.\n spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps.\n\nReturns:\n `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.\n - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to\n directly predict a bounding box. (without the need of a decoder)\n - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse\n sigmoid.", "source": "github_repos"} -{"code": "def __init__(self, chunks: typing.List[str], separator: str):\n \"\"\"Initializes the parser.\n\nArgs:\n chunks (List[str]): The chunks to resolve.\n separator (str): The separator string.\n\"\"\"\n HTMLParser.__init__(self)\n self.chunks_joined = SEP.join(chunks)\n self.separator = separator\n self.to_skip = False\n self.scan_index = 0\n self.element_stack: queue.LifoQueue[ElementState] = queue.LifoQueue()", "docstring": "An HTML parser to resolve the given HTML string and semantic chunks.\n\nAttributes:\n output (str): The HTML string to output.", "source": "github_repos"} -{"code": "def _try_handling_undefineds(body, get_state, set_state, init_vars, nulls, shape_invariants, symbol_names):\n state_modified = False\n first_iter_vars = None\n failure_message = None\n try:\n with func_graph.FuncGraph('tmp').as_default():\n\n def autocast_to_tensor(v):\n if isinstance(v, (int, float, bool, str, list, tuple, np.ndarray, np.generic)):\n init_val = tensor_conversion.convert_to_tensor_v2(v)\n return array_ops.placeholder(init_val.dtype, init_val.shape)\n return v\n autocast_init_vars = nest.map_structure(autocast_to_tensor, init_vars)\n set_state(autocast_init_vars)\n state_modified = True\n body()\n first_iter_vars = get_state()\n inits_and_invariants = tuple((_placeholder_value(iv, i, v) if n else (v, None) for v, n, iv, i in zip(init_vars, nulls, first_iter_vars, shape_invariants)))\n init_vars, extra_shape_invariants = zip(*inits_and_invariants)\n success = True\n except (UnboundLocalError, TypeError, ValueError, KeyError):\n ag_logging.log(1, 'Caught error while staging loop body', exc_info=True)\n exc = sys.exc_info()\n failure_message = 'Note: AutoGraph tried to define it automatically, but ran into a {}: {}'.format(exc[0].__name__, exc[1])\n finally:\n if state_modified:\n set_state(init_vars)\n verify_loop_init_vars(init_vars, symbol_names, first_iter_vars, extra_message=failure_message)\n return (success, init_vars, extra_shape_invariants)", "docstring": "Makes a best-effort attempt to substitute undefineds with placeholders.\n\n Note: this substitution requires two things to happen:\n 1. the types of loop variables could be inferred (usually by staging one\n iteration)\n 2. these types could be replaced by placeholders (e.g. zero values, for\n tensors).\n\nArgs:\n body: a function representing the loop body. See while_stmt.\n get_state: state getter for the loop statement. See while_stmt.\n set_state: state getter for the loop statement. See while_stmt.\n init_vars: loop variables before entering the loop. See while_stmt.\n nulls: list of boolean flags indicating whether the corresponding loop var\n is None or undefined.\n shape_invariants: user-specified shape invariant for each loop variable.\n symbol_names: list of loop variable names. See while_stmt.\n\nReturns:\n A tuple (success, new_init_vars, extra_shape_invariants, failure_message):\n * success is a boolean flag indicating\n whether types could be successfully inferred (step 1 above)\n * new_init_vars contains the loop vars, with None or undefined values\n replaced by default values, where possible (step 2 above)\n * extra_shape_invariants contains shape invariants that would be needed\n by while_stmt, for instance if the placeholder values had a shape\n different from the corresponding loop outputs", "source": "github_repos"} -{"code": "def _process_update(self, item, feed_item):\n item['name'] = feed_item.get(FieldMap.CREATIVE_NAME, None)\n self._associate_third_party_urls(feed_item, item)\n self._associate_click_tags(feed_item, item)", "docstring": "Updates a creative based on the values from the feed.\n\nArgs:\n item: Object representing the creative to be updated, this object is\n updated directly.\n feed_item: Feed item representing creative values from the Bulkdozer feed.", "source": "github_repos"} -{"code": "def __init__(self, num_heads=None, relative_attention_num_buckets=32, bidirectional=True, scaling_factor=1, max_distance=128, level='tokens', augmentation=False, prefix_bucket=False, expand=False):\n super(RelativePositionBiasBase, self).__init__()\n self.prefix_bucket = prefix_bucket\n self.augmentation = augmentation\n self.level = level\n self.max_distance = max_distance\n self.scaling_factor = scaling_factor\n self.bidirectional = bidirectional\n self.num_heads = num_heads\n self.expand = expand\n self.relative_attention_num_buckets = relative_attention_num_buckets\n extra_head = 2 if prefix_bucket and (not self.expand) else 0\n self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets + extra_head, self.num_heads)", "docstring": "Base class of relative biases.\n\nArgs:\n num_heads (`int`):\n Number of attention heads in the model, it will create embeddings of size `num_heads`, which will be added to the scores of each token pair.\n relative_attention_num_buckets (`int`, *optional*, defaults to 32):\n Pair token metric (distance in the sequence, distance in pixels etc.) will be bucketed, parameter is defining number of such\n buckets.\n bidirectional (`bool`, *optional*, defaults to `True`):\n Whether the distance should be bidirectional for a pair of tokens. If `False`, then distance(tok1, tok2) == distance(tok2, tok1).\n scaling_factor (`int`, *optional*, defaults to 1):\n Defining factor which will be used to scale relative distance.\n max_distance (`int`, *optional*, defaults to 128):\n All distances above this value will end up in the one/same bucket.\n augmentation (`bool`, *optional*, defaults to `False`):\n Whether to multiply relative distances by a random scalar.\n expand (`bool`, *optional*, defaults to `False`):\n Whether to expand an existing pretrained model with subsequent additions of prefix_bucket.", "source": "github_repos"} -{"code": "def device(self) -> str:\n return pywrap_tf_session.TF_OperationDevice(self._c_op)", "docstring": "The name of the device to which this op has been assigned, if any.\n\nReturns:\n The string name of the device to which this op has been\n assigned, or an empty string if it has not been assigned to a\n device.", "source": "github_repos"} -{"code": "def conjugate(x):\n if any_symbolic_tensors((x,)):\n return Conjugate().symbolic_call(x)\n return backend.numpy.conjugate(x)", "docstring": "Returns the complex conjugate, element-wise.\n\n The complex conjugate of a complex number is obtained by changing the sign\n of its imaginary part.\n\n `keras.ops.conj` is a shorthand for this function.\n\nArgs:\n x: Input tensor.\n\nReturns:\n The complex conjugate of each element in `x`.", "source": "github_repos"} -{"code": "def _init_from_proto(self, variable_def, import_scope=None):\n assert isinstance(variable_def, variable_pb2.VariableDef)\n g = ops.get_default_graph()\n self._variable = g.as_graph_element(ops.prepend_name_scope(variable_def.variable_name, import_scope=import_scope))\n self._name = self._variable.name\n self._initializer_op = g.as_graph_element(ops.prepend_name_scope(variable_def.initializer_name, import_scope=import_scope))\n if hasattr(variable_def, 'initial_value_name') and variable_def.initial_value_name:\n self._initial_value = g.as_graph_element(ops.prepend_name_scope(variable_def.initial_value_name, import_scope=import_scope))\n else:\n self._initial_value = None\n synchronization, aggregation, trainable = variables.validate_synchronization_aggregation_trainable(variable_def.synchronization, variable_def.aggregation, variable_def.trainable, variable_def.variable_name)\n self._synchronization = synchronization\n self._aggregation = aggregation\n self._trainable = trainable\n self._snapshot = g.as_graph_element(ops.prepend_name_scope(variable_def.snapshot_name, import_scope=import_scope))\n if variable_def.HasField('save_slice_info_def'):\n self._save_slice_info = variables.Variable.SaveSliceInfo(save_slice_info_def=variable_def.save_slice_info_def, import_scope=import_scope)\n else:\n self._save_slice_info = None\n self._caching_device = None\n self._constraint = None", "docstring": "Recreates the Variable object from a `VariableDef` protocol buffer.\n\nArgs:\n variable_def: `VariableDef` protocol buffer, describing a variable whose\n nodes already exists in the graph.\n import_scope: Optional `string`. Name scope to add.", "source": "github_repos"} -{"code": "def _analyze_tab_complete_input(self, text):\n text = text.lstrip()\n if not text:\n context = ''\n prefix = ''\n except_last_word = ''\n else:\n items = text.split(' ')\n if len(items) == 1:\n context = ''\n prefix = items[0]\n except_last_word = ''\n else:\n context = items[0]\n prefix = items[-1]\n except_last_word = ' '.join(items[:-1]) + ' '\n return (context, prefix, except_last_word)", "docstring": "Analyze raw input to tab-completer.\n\nArgs:\n text: (str) the full, raw input text to be tab-completed.\n\nReturns:\n context: (str) the context str. For example,\n If text == \"print_tensor softmax\", returns \"print_tensor\".\n If text == \"print\", returns \"\".\n If text == \"\", returns \"\".\n prefix: (str) the prefix to be tab-completed, from the last word.\n For example, if text == \"print_tensor softmax\", returns \"softmax\".\n If text == \"print\", returns \"print\".\n If text == \"\", returns \"\".\n except_last_word: (str) the input text, except the last word.\n For example, if text == \"print_tensor softmax\", returns \"print_tensor\".\n If text == \"print_tensor -a softmax\", returns \"print_tensor -a\".\n If text == \"print\", returns \"\".\n If text == \"\", returns \"\".", "source": "github_repos"} -{"code": "def __getitem__(self, index: Any) -> Rotation:\n if type(index) is not tuple:\n index = (index,)\n if self._rot_mats is not None:\n rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n return Rotation(rot_mats=rot_mats)\n elif self._quats is not None:\n quats = self._quats[index + (slice(None),)]\n return Rotation(quats=quats, normalize_quats=False)\n else:\n raise ValueError('Both rotations are None')", "docstring": "Allows torch-style indexing over the virtual shape of the rotation object. See documentation for the shape\n property.\n\nArgs:\n index:\n A torch index. E.g. (1, 3, 2), or (slice(None,))\n\nReturns:\n The indexed rotation", "source": "github_repos"} -{"code": "def seed(s):\n try:\n s = int(s)\n except TypeError:\n raise ValueError(f'Argument `s` got an invalid value {s}. Only integers are supported.')\n random_seed.set_seed(s)", "docstring": "Sets the seed for the random number generator.\n\n Uses `tf.set_random_seed`.\n\nArgs:\n s: an integer.", "source": "github_repos"} -{"code": "def incomplete_size(self, name=None):\n if name is None:\n name = '%s_incomplete_size' % self._name\n return self._incomplete_size_fn(shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)", "docstring": "Returns the number of incomplete elements in the staging area.\n\nArgs:\n name: A name for the operation (optional)\n\nReturns:\n The created op", "source": "github_repos"} -{"code": "def get_dev_examples(self, data_dir, filename=None):\n if data_dir is None:\n data_dir = ''\n if self.dev_file is None:\n raise ValueError('SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor')\n with open(os.path.join(data_dir, self.dev_file if filename is None else filename), 'r', encoding='utf-8') as reader:\n input_data = json.load(reader)['data']\n return self._create_examples(input_data, 'dev')", "docstring": "Returns the evaluation example from the data directory.\n\nArgs:\n data_dir: Directory containing the data files used for training and evaluating.\n filename: None by default, specify this if the evaluation file has a different name than the original one\n which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> CausalLMOutputWithPast:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs)\n hidden_states = outputs.last_hidden_state\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n loss = None\n if labels is not None:\n loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs)\n return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n ```python\n >>> from transformers import AutoTokenizer, PersimmonForCausalLM\n\n >>> model = PersimmonForCausalLM.from_pretrained(\"adept/persimmon-8b-base\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"adept/persimmon-8b-base\")\n\n >>> prompt = \"human: Hey, what should I eat for dinner?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n 'human: Hey, what should I eat for dinner?\\n\\ncat: 🐱\\n\\nhuman: 😐\\n\\n'\n ```", "source": "github_repos"} -{"code": "def __init__(self, namespace: str, prefix: str=''):\n if prefix:\n prefix = f'{prefix}_'\n self._inference_counter = beam.metrics.Metrics.counter(namespace, prefix + 'num_inferences')\n self.failed_batches_counter = beam.metrics.Metrics.counter(namespace, prefix + 'failed_batches_counter')\n self._inference_request_batch_size = beam.metrics.Metrics.distribution(namespace, prefix + 'inference_request_batch_size')\n self._inference_request_batch_byte_size = beam.metrics.Metrics.distribution(namespace, prefix + 'inference_request_batch_byte_size')\n self._inference_batch_latency_micro_secs = beam.metrics.Metrics.distribution(namespace, prefix + 'inference_batch_latency_micro_secs')\n self._model_byte_size = beam.metrics.Metrics.distribution(namespace, prefix + 'model_byte_size')\n self._load_model_latency_milli_secs = beam.metrics.Metrics.distribution(namespace, prefix + 'load_model_latency_milli_secs')\n self._load_model_latency_milli_secs_cache = None\n self._model_byte_size_cache = None", "docstring": "Args:\n namespace: Namespace for the metrics.\n prefix: Unique identifier for metrics, used when models\n are updated using side input.", "source": "github_repos"} -{"code": "def _v2_get_resized_lm_head_bias(self, old_lm_head_bias: Dict[str, tf.Variable], new_num_tokens: int) -> Dict[str, tf.Tensor]:\n new_lm_head_bias = {}\n for attr, weight in old_lm_head_bias.items():\n first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)\n size_diff = new_num_tokens - old_num_tokens\n if old_num_tokens > new_num_tokens:\n new_bias = weight.value()[..., :new_num_tokens]\n else:\n padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]\n new_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape))\n new_lm_head_bias[attr] = new_bias\n return new_lm_head_bias", "docstring": "Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.\n Reducing the size will remove vectors from the end\n\nArgs:\n old_lm_head_bias (`Dict[str, tf.Variable]`):\n Old lm head bias to be resized.\n new_num_tokens (`int`):\n New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at\n the end. Reducing the size will remove vectors from the end.\n\nReturns:\n `tf.Tensor`: Values for the resized bias.", "source": "github_repos"} -{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n known_args, pipeline_args = parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n model_handler = VertexAIModelHandlerJSON(endpoint_id=known_args.endpoint, project=known_args.project, location=known_args.location, experiment=known_args.experiment, network=known_args.vpc_network, private=known_args.private)\n pipeline = test_pipeline\n if not test_pipeline:\n pipeline = beam.Pipeline(options=pipeline_options)\n read_glob = pipeline | 'Get glob' >> beam.Create([known_args.input])\n read_image_name = read_glob | 'Get Image Paths' >> fileio.MatchAll()\n load_image = read_image_name | 'Read Image' >> beam.Map(lambda image_name: read_image(image_name.path))\n preprocess = load_image | 'Preprocess Image' >> beam.MapTuple(lambda img_name, img: (img_name, preprocess_image(img)))\n predictions = preprocess | 'RunInference' >> RunInference(KeyedModelHandler(model_handler))\n process_output = predictions | 'Process Predictions' >> beam.ParDo(PostProcessor())\n _ = process_output | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n result = pipeline.run()\n result.wait_until_finish()\n return result", "docstring": "Args:\n argv: Command line arguments defined for this example.\n save_main_session: Used for internal testing.\n test_pipeline: Used for internal testing.", "source": "github_repos"} -{"code": "def _pad_for_batching(self, pixel_values: List[torch.Tensor], image_sizes: List[List[int]]):\n max_shape = (max([size[0] for size in image_sizes]), max([size[1] for size in image_sizes]))\n pixel_values = [torch.nn.functional.pad(image, pad=(0, max_shape[1] - size[1], 0, max_shape[0] - size[0])) for image, size in zip(pixel_values, image_sizes)]\n return torch.stack(pixel_values)", "docstring": "Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.\n\nArgs:\n pixel_values (`List[torch.Tensor]`):\n An array of pixel values of each images of shape (`batch_size`, `channels`, `height`, `width`)\n image_sizes (`List[List[int]]`):\n A list of sizes for each image in `pixel_values` in (height, width) format.\n\nReturns:\n List[`torch.Tensor`]: The padded images.", "source": "github_repos"} -{"code": "def _extract_attrs(op, keys):\n kwargs = {}\n not_found = object()\n for k in keys:\n srcs = [getattr(op, k, not_found), getattr(op, '_' + k, not_found), getattr(op, 'parameters', {}).get(k, not_found)]\n if any((v is not not_found for v in srcs)):\n kwargs[k] = [v for v in srcs if v is not not_found][0]\n else:\n raise ValueError(f\"Could not determine an appropriate value for field `{k}` in object `{op}`. Looked for \\n 1. an attr called `{k}`,\\n 2. an attr called `_{k}`,\\n 3. an entry in `op.parameters` with key '{k}'.\")\n if k in op._composite_tensor_prefer_static_fields and kwargs[k] is not None:\n if tensor_util.is_tensor(kwargs[k]):\n static_val = tensor_util.constant_value(kwargs[k])\n if static_val is not None:\n kwargs[k] = static_val\n if isinstance(kwargs[k], (np.ndarray, np.generic)):\n kwargs[k] = kwargs[k].tolist()\n return kwargs", "docstring": "Extract constructor kwargs to reconstruct `op`.\n\nArgs:\n op: A `LinearOperator` instance.\n keys: A Python `tuple` of strings indicating the names of the constructor\n kwargs to extract from `op`.\n\nReturns:\n kwargs: A Python `dict` of kwargs to `op`'s constructor, keyed by `keys`.", "source": "github_repos"} -{"code": "def execute_tests(self):\n with open(self.notebook_path, 'r') as nb_f:\n nb = nbformat.read(nb_f, as_version=4)\n ExecutePreprocessor.timeout = self.timeout_secs\n ep = ExecutePreprocessor(allow_errors=True)\n exec_nb, _ = ep.preprocess(nb, {'metadata': {'path': self.dir + '/'}})\n test_count = 0\n error_count = 0\n errors = OrderedDict()\n code_cells = {}\n for cell in exec_nb['cells']:\n if cell['cell_type'] == 'code':\n code_cells[cell['execution_count']] = cell\n for cell_num in sorted(self.tests.keys()):\n if cell_num not in code_cells:\n test_count += 1\n error_count += 1\n errors[cell_num, '', ''] = 'Given cell does not exist.'\n else:\n cell = code_cells[cell_num]\n for test in self.tests[cell_num]:\n cls, setup = list(test.items())[0]\n test_count += 1\n try:\n getattr(sys.modules['testlib'], cls)(setup).check(cell)\n except Exception as e:\n error_count += 1\n errors[cell_num, cls, setup] = str(e)\n return (test_count, error_count, errors)", "docstring": "Executes notebook and compares to test spec.\n\nReturns:\n # of tests, # of errors, error_dict\n where error_dict maps (cell number, test class, expected output) to string", "source": "github_repos"} -{"code": "def __init__(self, y_tensor=None):\n self._uuid = uuid.uuid4().hex\n _gradient_debuggers[self._uuid] = self\n self._gradient_tensors = {}\n self._y_tensor = y_tensor\n self._graph = None\n if y_tensor:\n self._graph = y_tensor.graph\n self._is_active_context = False", "docstring": "Constructor of GradientsDebugger.\n\nArgs:\n y_tensor: optional: the `tf.Tensor` to be differentiated, i.e., the tensor\n on the numerator of the differentiation.", "source": "github_repos"} -{"code": "def round(x, name=None):\n x = ops.convert_to_tensor(x, name='x')\n if x.dtype.is_integer:\n return x\n else:\n return gen_math_ops.round(x, name=name)", "docstring": "Rounds the values of a tensor to the nearest integer, element-wise.\n\n Rounds half to even. Also known as bankers rounding. If you want to round\n according to the current system rounding mode use tf::cint.\n For example:\n\n ```python\n x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])\n tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]\n ```\n\nArgs:\n x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` of same shape and type as `x`.", "source": "github_repos"} -{"code": "def __call__(self, audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], sampling_rate: Union[int, List[int]], steps_per_beat: int=2, resample: Optional[bool]=True, return_attention_mask: Optional[bool]=False, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchFeature:\n requires_backends(self, ['librosa'])\n is_batched = bool(isinstance(audio, (list, tuple)) and isinstance(audio[0], (np.ndarray, tuple, list)))\n if is_batched:\n if not isinstance(sampling_rate, list):\n raise ValueError(f'Please give sampling_rate of each audio separately when you are passing multiple raw_audios at the same time. Received {sampling_rate}, expected [audio_1_sr, ..., audio_n_sr].')\n return_attention_mask = True if return_attention_mask is None else return_attention_mask\n else:\n audio = [audio]\n sampling_rate = [sampling_rate]\n return_attention_mask = False if return_attention_mask is None else return_attention_mask\n batch_input_features, batch_beatsteps, batch_ext_beatstep = ([], [], [])\n for single_raw_audio, single_sampling_rate in zip(audio, sampling_rate):\n bpm, beat_times, confidence, estimates, essentia_beat_intervals = self.extract_rhythm(audio=single_raw_audio)\n beatsteps = self.interpolate_beat_times(beat_times=beat_times, steps_per_beat=steps_per_beat, n_extend=1)\n if self.sampling_rate != single_sampling_rate and self.sampling_rate is not None:\n if resample:\n single_raw_audio = librosa.core.resample(single_raw_audio, orig_sr=single_sampling_rate, target_sr=self.sampling_rate, res_type='kaiser_best')\n else:\n warnings.warn(f'The sampling_rate of the provided audio is different from the target sampling_rate of the Feature Extractor, {self.sampling_rate} vs {single_sampling_rate}. In these cases it is recommended to use `resample=True` in the `__call__` method to get the optimal behaviour.')\n single_sampling_rate = self.sampling_rate\n start_sample = int(beatsteps[0] * single_sampling_rate)\n end_sample = int(beatsteps[-1] * single_sampling_rate)\n input_features, extrapolated_beatstep = self.preprocess_mel(single_raw_audio[start_sample:end_sample], beatsteps - beatsteps[0])\n mel_specs = self.mel_spectrogram(input_features.astype(np.float32))\n log_mel_specs = np.log(np.clip(mel_specs, a_min=1e-06, a_max=None))\n input_features = np.transpose(log_mel_specs, (0, -1, -2))\n batch_input_features.append(input_features)\n batch_beatsteps.append(beatsteps)\n batch_ext_beatstep.append(extrapolated_beatstep)\n output = BatchFeature({'input_features': batch_input_features, 'beatsteps': batch_beatsteps, 'extrapolated_beatstep': batch_ext_beatstep})\n output = self.pad(output, is_batched=is_batched, return_attention_mask=return_attention_mask, return_tensors=return_tensors)\n return output", "docstring": "Main method to featurize and prepare for the model.\n\nArgs:\n audio (`np.ndarray`, `List`):\n The audio or batch of audio to be processed. Each audio can be a numpy array, a list of float values, a\n list of numpy arrays or a list of list of float values.\n sampling_rate (`int`):\n The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass\n `sampling_rate` at the forward call to prevent silent errors.\n steps_per_beat (`int`, *optional*, defaults to 2):\n This is used in interpolating `beat_times`.\n resample (`bool`, *optional*, defaults to `True`):\n Determines whether to resample the audio to `sampling_rate` or not before processing. Must be True\n during inference.\n return_attention_mask (`bool` *optional*, defaults to `False`):\n Denotes if attention_mask for input_features, beatsteps and extrapolated_beatstep will be given as\n output or not. Automatically set to True for batched inputs.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n If nothing is specified, it will return list of `np.ndarray` arrays.", "source": "github_repos"} -{"code": "def get_import(self, file_prefixes_to_strip: Sequence[str], module_prefix: str, use_lazy_loading: bool) -> str:\n module_import_path = _get_import_path(self.exported_symbol.file_name, file_prefixes_to_strip, module_prefix)\n alias = ''\n symbol_name = self.exported_symbol.symbol_name\n if self.name != symbol_name:\n alias = f' as {self.name}'\n if not use_lazy_loading:\n return f'from {module_import_path} import {symbol_name}{alias} # line: {self.exported_symbol.line_no}'\n else:\n return f\" '{self.name}': ('{module_import_path}', '{symbol_name}'), # line: {self.exported_symbol.line_no}\"", "docstring": "Returns the import statement for this entrypoint.\n\nArgs:\n file_prefixes_to_strip: List of prefixes to strip from the file name.\n module_prefix: A prefix to add to the import.\n use_lazy_loading: Whether to use lazy loading or not.", "source": "github_repos"} -{"code": "def __init__(self, funcs, trackable_obj=None):\n super(TFLiteConverterV2, self).__init__(funcs, trackable_obj)", "docstring": "Constructor for TFLiteConverter.\n\nArgs:\n funcs: List of TensorFlow ConcreteFunctions. The list should not contain\n duplicate elements.\n trackable_obj: tf.AutoTrackable object associated with `funcs`. A\n reference to this object needs to be maintained so that Variables do not\n get garbage collected since functions have a weak reference to\n Variables. This is only required when the tf.AutoTrackable object is not\n maintained by the user (e.g. `from_saved_model`).", "source": "github_repos"} -{"code": "def get_build_config(self):\n if self._build_shapes_dict is not None:\n if len(self._build_shapes_dict) == 1:\n return {'input_shape': tuple(self._build_shapes_dict.values())[0]}\n else:\n return {'shapes_dict': self._build_shapes_dict}", "docstring": "Returns a dictionary with the layer's input shape.\n\n This method returns a config dict that can be used by\n `build_from_config(config)` to create all states (e.g. Variables and\n Lookup tables) needed by the layer.\n\n By default, the config only contains the input shape that the layer\n was built with. If you're writing a custom layer that creates state in\n an unusual way, you should override this method to make sure this state\n is already created when Keras attempts to load its value upon model\n loading.\n\nReturns:\n A dict containing the input shape associated with the layer.", "source": "github_repos"} -{"code": "def forward(self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool=False, _z_reference_list: Optional[Sequence[torch.Tensor]]=None) -> torch.Tensor:\n z = [z]\n q = self.linear_q(s)\n kv = self.linear_kv(s)\n q = q.view(q.shape[:-1] + (self.num_heads, -1))\n kv = kv.view(kv.shape[:-1] + (self.num_heads, -1))\n k, v = torch.split(kv, self.hidden_dim, dim=-1)\n q_pts = self.linear_q_points(s)\n q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1)\n q_pts = torch.stack(q_pts, dim=-1)\n q_pts = r[..., None].apply(q_pts)\n q_pts = q_pts.view(q_pts.shape[:-2] + (self.num_heads, self.num_qk_points, 3))\n kv_pts = self.linear_kv_points(s)\n kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1)\n kv_pts = torch.stack(kv_pts, dim=-1)\n kv_pts = r[..., None].apply(kv_pts)\n kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.num_heads, -1, 3))\n k_pts, v_pts = torch.split(kv_pts, [self.num_qk_points, self.num_v_points], dim=-2)\n b = self.linear_b(z[0])\n if _offload_inference:\n assert sys.getrefcount(z[0]) == 2\n z[0] = z[0].cpu()\n if is_fp16_enabled():\n with torch.cuda.amp.autocast(enabled=False):\n a = torch.matmul(permute_final_dims(q.float(), (1, 0, 2)), permute_final_dims(k.float(), (1, 2, 0)))\n else:\n a = torch.matmul(permute_final_dims(q, (1, 0, 2)), permute_final_dims(k, (1, 2, 0)))\n a *= math.sqrt(1.0 / (3 * self.hidden_dim))\n a += math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))\n pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5)\n pt_att = pt_att ** 2\n pt_att = sum(torch.unbind(pt_att, dim=-1))\n head_weights = self.softplus(self.head_weights).view(*(1,) * len(pt_att.shape[:-2]) + (-1, 1))\n head_weights = head_weights * math.sqrt(1.0 / (3 * (self.num_qk_points * 9.0 / 2)))\n pt_att = pt_att * head_weights\n pt_att = torch.sum(pt_att, dim=-1) * -0.5\n square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2)\n square_mask = self.config.inf * (square_mask - 1)\n pt_att = permute_final_dims(pt_att, (2, 0, 1))\n a = a + pt_att\n a = a + square_mask.unsqueeze(-3)\n a = self.softmax(a)\n o = torch.matmul(a, v.transpose(-2, -3).to(dtype=a.dtype)).transpose(-2, -3)\n o = flatten_final_dims(o, 2)\n o_pt = torch.sum(a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :], dim=-2)\n o_pt = permute_final_dims(o_pt, (2, 0, 3, 1))\n o_pt = r[..., None, None].invert_apply(o_pt)\n o_pt_norm = flatten_final_dims(torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.config.epsilon), 2)\n o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3)\n if _offload_inference:\n z[0] = z[0].to(o_pt.device)\n o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype))\n o_pair = flatten_final_dims(o_pair, 2)\n s = self.linear_out(torch.cat((o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1).to(dtype=z[0].dtype))\n return s", "docstring": "Args:\n s:\n [*, N_res, C_s] single representation\n z:\n [*, N_res, N_res, C_z] pair representation\n r:\n [*, N_res] transformation object\n mask:\n [*, N_res] mask\n\nReturns:\n [*, N_res, C_s] single representation update", "source": "github_repos"} -{"code": "def filter_out_non_signature_kwargs(extra: Optional[list]=None):\n extra = extra or []\n extra_params_to_pass = set(extra)\n\n def decorator(func):\n sig = inspect.signature(func)\n function_named_args = set(sig.parameters.keys())\n valid_kwargs_to_pass = function_named_args.union(extra_params_to_pass)\n is_instance_method = 'self' in function_named_args\n is_class_method = 'cls' in function_named_args\n func._filter_out_non_signature_kwargs = True\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n valid_kwargs = {}\n invalid_kwargs = {}\n for k, v in kwargs.items():\n if k in valid_kwargs_to_pass:\n valid_kwargs[k] = v\n else:\n invalid_kwargs[k] = v\n if invalid_kwargs:\n invalid_kwargs_names = [f\"'{k}'\" for k in invalid_kwargs.keys()]\n invalid_kwargs_names = ', '.join(invalid_kwargs_names)\n if is_instance_method:\n cls_prefix = args[0].__class__.__name__ + '.'\n elif is_class_method:\n cls_prefix = args[0].__name__ + '.'\n else:\n cls_prefix = ''\n warnings.warn(f'The following named arguments are not valid for `{cls_prefix}{func.__name__}` and were ignored: {invalid_kwargs_names}', UserWarning, stacklevel=2)\n return func(*args, **valid_kwargs)\n return wrapper\n return decorator", "docstring": "Decorator to filter out named arguments that are not in the function signature.\n\n This decorator ensures that only the keyword arguments that match the function's signature, or are specified in the\n `extra` list, are passed to the function. Any additional keyword arguments are filtered out and a warning is issued.\n\n Parameters:\n extra (`Optional[list]`, *optional*):\n A list of extra keyword argument names that are allowed even if they are not in the function's signature.\n\nReturns:\n Callable:\n A decorator that wraps the function and filters out invalid keyword arguments.\n\n Example usage:\n\n ```python\n @filter_out_non_signature_kwargs(extra=[\"allowed_extra_arg\"])\n def my_function(arg1, arg2, **kwargs):\n print(arg1, arg2, kwargs)\n\n my_function(arg1=1, arg2=2, allowed_extra_arg=3, invalid_arg=4)\n # This will print: 1 2 {\"allowed_extra_arg\": 3}\n # And issue a warning: \"The following named arguments are not valid for `my_function` and were ignored: 'invalid_arg'\"\n ```", "source": "github_repos"} -{"code": "def set_prefix_tokens(self, language: Optional[str]=None, task: Optional[str]=None, predict_timestamps: Optional[bool]=None):\n self.language = language if language is not None else self.language\n self.task = task if task is not None else self.task\n self.predict_timestamps = predict_timestamps if predict_timestamps is not None else self.predict_timestamps", "docstring": "Override the prefix tokens appended to the start of the label sequence. This method can be used standalone to\n update the prefix tokens as required when fine-tuning. Example:\n\n ```python\n >>> # instantiate the tokenizer and set the prefix token to Spanish\n >>> tokenizer = WhisperTokenizer.from_pretrained(\"openai/whisper-tiny\", language=\"spanish\")\n >>> # now switch the prefix token from Spanish to French\n >>> tokenizer.set_prefix_tokens(language=\"french\")\n ```\n\nArgs:\n language (`str`, *optional*, defaults to `None`):\n The language of the transcription text.\n task (`str`, *optional*, defaults to `None`):\n Task identifier to append at the start of sequence (if any).\n predict_timestamps (`bool`, *optional*, defaults to `None`):\n Whether to omit the `<|notimestamps|>` token at the start of the sequence.", "source": "github_repos"} -{"code": "def make_coordinated_read_dataset(self, cluster, num_consumers, sharding_policy=data_service_ops.ShardingPolicy.OFF):\n if sharding_policy not in [data_service_ops.ShardingPolicy.OFF, data_service_ops.ShardingPolicy.DYNAMIC]:\n raise ValueError(f'Unsupported sharding policy: {sharding_policy}')\n ds = dataset_ops.Dataset.from_tensors(math_ops.cast(0, dtypes.int64))\n ds = ds.concatenate(dataset_ops.Dataset.random())\n\n def make_group(x):\n x = x % 2 ** 32\n return dataset_ops.Dataset.range(x * num_consumers, (x + 1) * num_consumers)\n ds = ds.flat_map(make_group)\n consumers = []\n for consumer_index in range(num_consumers):\n consumers.append(self.make_distributed_dataset(ds, cluster, job_name='test', processing_mode=sharding_policy, consumer_index=consumer_index, num_consumers=num_consumers))\n ds = dataset_ops.Dataset.from_tensor_slices(consumers)\n ds = ds.interleave(lambda x: x, cycle_length=num_consumers, num_parallel_calls=num_consumers)\n return ds", "docstring": "Creates a dataset that performs coordinated reads.\n\n The dataset simulates `num_consumers` consumers by using parallel\n interleave to read with `num_consumers` threads, one for each consumer. The\n nth element of the dataset is produced by consumer `n % num_consumers`.\n\n The dataset executed on each worker will produce groups of `num_consumers`\n sequentially increasing numbers. For example, if `num_consumers=3` a worker\n dataset could produce [0, 1, 2, 9, 10, 11, 21, 22, 23]. This enables\n `checkCoordinatedReadGroups` below to assess whether the values received in\n each step came from the same group.\n\nArgs:\n cluster: A tf.data service `TestCluster`.\n num_consumers: The number of consumers to simulate.\n sharding_policy: The sharding policy to use. Currently only OFF and\n DYNAMIC are supported.\n\nReturns:\n A dataset that simulates reading with `num_consumers` consumers.", "source": "github_repos"} -{"code": "def __init__(self, thresholds=None, name=None, dtype=None):\n super().__init__(confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES, thresholds=thresholds, name=name, dtype=dtype)", "docstring": "Calculates the number of true negatives.\n\n If `sample_weight` is given, calculates the sum of the weights of\n true negatives. This metric creates one local variable, `accumulator`\n that is used to keep track of the number of true negatives.\n\n If `sample_weight` is `None`, weights default to 1.\n Use `sample_weight` of 0 to mask values.\n\nArgs:\n thresholds: (Optional) Defaults to `0.5`. A float value, or a Python\n list/tuple of float threshold values in `[0, 1]`. A threshold is\n compared with prediction values to determine the truth value of\n predictions (i.e., above the threshold is `True`, below is `False`).\n If used with a loss function that sets `from_logits=True` (i.e. no\n sigmoid applied to predictions), `thresholds` should be set to 0.\n One metric value is generated for each threshold value.\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nExample:\n >>> m = keras.metrics.TrueNegatives()\n >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])\n >>> m.result()\n 2.0\n\n >>> m.reset_state()\n >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])\n >>> m.result()\n 1.0", "source": "github_repos"} -{"code": "def __init__(self, target, converter_target_spec=None, converter_allow_custom_ops=None, raise_exception=False):\n functools.update_wrapper(self, target)\n self._func = target\n self._obj_func = None\n self._verified = False\n self._log_messages = []\n self._raise_exception = raise_exception\n self._converter_target_spec = converter_target_spec\n self._converter_allow_custom_ops = converter_allow_custom_ops", "docstring": "Initialize the decorator object.\n\n Here is the description of the object variables.\n - _func : decorated function.\n - _obj_func : for class object, we need to use this object to provide `self`\n instance as 1 first argument.\n - _verified : whether the compatibility is checked or not.\n\nArgs:\n target: decorated function.\n converter_target_spec : target_spec of TFLite converter parameter.\n converter_allow_custom_ops : allow_custom_ops of TFLite converter\n parameter.\n raise_exception : to raise an exception on compatibility issues.\n User need to use get_compatibility_log() to check details.", "source": "github_repos"} -{"code": "def _overwrite_model_variables_with_average_value(self, trainable_variables):\n trainable_variables = [v.value if isinstance(v, backend.Variable) else v for v in trainable_variables]\n for var, average_var in zip(trainable_variables, self._model_variables_moving_average):\n self._distribution_strategy.extended.update(var, lambda a, b: a.assign(b), args=(average_var,))", "docstring": "Overwrite model variables with their moving average values.\n\n This function overwrites variables on each device.\n\nArgs:\n var_list: list of model variables.", "source": "github_repos"} -{"code": "def _replay(self, trial_id: int, dna: DNA, reward: Union[None, float, Tuple[float]]):\n del trial_id\n if reward is not None:\n self._feedback(dna, reward)", "docstring": "Replay a single DNA from the history for state recovery.\n\n The default implementation to call `DNAGenerator._feedback`. Subclasses that\n have states and can be recovered from replaying the history should override\n this method. See class `Sweeping` as an example.\n\nArgs:\n trial_id: A zero-based integer as the trial ID for the DNA.\n dna: A historically proposed DNA.\n reward: The reward for the DNA. If None, the reward is not yet fed back\n to the optimizer.", "source": "github_repos"} -{"code": "def __init__(self, num_shards):\n self._num_shards = num_shards", "docstring": "Creates a new `FixedShardsPartitioner`.\n\nArgs:\n num_shards: `int`, number of shards to partition.", "source": "github_repos"} -{"code": "def sheets_tab_copy(config, auth, from_sheet_url_or_name, from_sheet_tab, to_sheet_url_or_name, to_sheet_tab, overwrite=False):\n if config.verbose:\n print('SHEETS COPY', from_sheet_url_or_name, from_sheet_tab, to_sheet_url_or_name, to_sheet_tab)\n from_sheet_id, from_tab_id = sheets_tab_id(config, auth, from_sheet_url_or_name, from_sheet_tab)\n to_sheet_id, to_tab_id = sheets_tab_id(config, auth, to_sheet_url_or_name, to_sheet_tab)\n if overwrite or to_tab_id is None:\n copy_sheet = API_Sheets(config, auth).spreadsheets().sheets().copyTo(spreadsheetId=from_sheet_id, sheetId=from_tab_id, body={'destinationSpreadsheetId': to_sheet_id}).execute()\n body = {'requests': []}\n if to_tab_id:\n body['requests'].append({'deleteSheet': {'sheetId': to_tab_id}})\n body['requests'].append({'updateSheetProperties': {'properties': {'sheetId': copy_sheet['sheetId'], 'title': to_sheet_tab}, 'fields': 'title'}})\n API_Sheets(config, auth).spreadsheets().batchUpdate(spreadsheetId=to_sheet_id, body=body).execute()", "docstring": "Copy a tab to a specific name, without the 'Copy Of...'.\n\nArgs:\n config - see starthinker/util/configuration.py\n auth - user or service\n from_url_or_name - one of: URL, document title, or id\n from_sheet_tab - name of tab to get id for\n to_url_or_name - one of: URL, document title, or id\n to_sheet_tab - name of tab to get id for\n overwrite - if false will not perform operation if destination exists.\n\n No Return", "source": "github_repos"} -{"code": "def _process_debug_graph_node(self, node):\n if is_debug_node(node.name):\n return\n if node.name in self._node_inputs:\n raise ValueError(\"Duplicate node name on device %s: '%s'\" % (self._device_name, node.name))\n self._node_attributes[node.name] = node.attr\n self._node_inputs[node.name] = []\n self._node_ctrl_inputs[node.name] = []\n self._node_recipients[node.name] = []\n self._node_ctrl_recipients[node.name] = []\n if node.name not in self._node_devices:\n self._node_devices[node.name] = set()\n self._node_devices[node.name].add(node.device if node.device else self._device_name)\n self._node_op_types[node.name] = node.op\n self._ref_args[node.name] = self._get_ref_args(node)\n for inp in node.input:\n if is_copy_node(inp) and (node.op == '_Send' or node.op == '_Retval'):\n self._copy_send_nodes.append(node.name)\n if inp.startswith('^'):\n cinp = inp[1:]\n self._node_ctrl_inputs[node.name].append(cinp)\n else:\n self._node_inputs[node.name].append(inp)", "docstring": "Process a node from the debug GraphDef.\n\nArgs:\n node: (NodeDef) A partition-graph node to be processed.\n\nRaises:\n ValueError: If duplicate node names are encountered.", "source": "github_repos"} -{"code": "def indicator_column(categorical_column):\n if not isinstance(categorical_column, (CategoricalColumn, fc_old._CategoricalColumn)):\n raise ValueError('Unsupported input type. Input must be a CategoricalColumn. Given: {}'.format(categorical_column))\n return IndicatorColumn(categorical_column)", "docstring": "Represents multi-hot representation of given categorical column.\n\n - For DNN model, `indicator_column` can be used to wrap any\n `categorical_column_*` (e.g., to feed to DNN). Consider to Use\n `embedding_column` if the number of buckets/unique(values) are large.\n\n - For Wide (aka linear) model, `indicator_column` is the internal\n representation for categorical column when passing categorical column\n directly (as any element in feature_columns) to `linear_model`. See\n `linear_model` for details.\n\n ```python\n name = indicator_column(categorical_column_with_vocabulary_list(\n 'name', ['bob', 'george', 'wanda']))\n columns = [name, ...]\n features = tf.io.parse_example(..., features=make_parse_example_spec(columns))\n dense_tensor = input_layer(features, columns)\n\n dense_tensor == [[1, 0, 0]] # If \"name\" bytes_list is [\"bob\"]\n dense_tensor == [[1, 0, 1]] # If \"name\" bytes_list is [\"bob\", \"wanda\"]\n dense_tensor == [[2, 0, 0]] # If \"name\" bytes_list is [\"bob\", \"bob\"]\n ```\n\nArgs:\n categorical_column: A `CategoricalColumn` which is created by\n `categorical_column_with_*` or `crossed_column` functions.\n\nReturns:\n An `IndicatorColumn`.\n\nRaises:\n ValueError: If `categorical_column` is not CategoricalColumn type.", "source": "github_repos"} -{"code": "def get_metadata(self, key: str, per_trial: bool=True) -> Optional[Any]:", "docstring": "Gets metadata for current trial or current sampling.\n\nArgs:\n key: A string as key to metadata.\n per_trial: If True, the key is retrieved per curent trial. Otherwise, it\n is retrieved per current sampling.\n\nReturns:\n A value that can be deserialized by `pg.from_json_str`.", "source": "github_repos"} -{"code": "def load_and_use(path):\n example_cond, example_a, example_b = _get_example_tensors()\n restored = tf.saved_model.load(path)\n return restored.use_multiplex(example_cond, example_a, example_b)", "docstring": "Load and used a model that was previously created by `save()`.\n\nArgs:\n path: Directory to load model from, typically the same directory that was\n used by save().\n\nReturns:\n A tensor that is the result of using the multiplex op that is\n tf.constant([1, 20, 3, 40, 5], dtype=tf.int64).", "source": "github_repos"} -{"code": "def _build_select_and_next_from_expressions(self, builders: Tuple[column_expression_builder.ColumnExpressionBuilder, ...], child_builders: MutableSequence[column_expression_builder.ColumnExpressionBuilder], columns_selected: MutableSequence[str]) -> Tuple[MutableSequence[str], MutableSequence[str]]:\n select_expressions, next_from_expressions = ([], [])\n for column_name in columns_selected:\n select_expressions.append(f'(SELECT {column_name}) AS {column_name}')\n for builder in builders:\n child_builders.extend(builder.children)\n column_alias = _get_column_alias(builder)\n if builder.column_name:\n columns_selected.append(column_alias)\n needs_unnest = builder.needs_unnest or builder.children\n select_expression = self._encode(builder=builder, select_scalars_as_array=needs_unnest)\n if needs_unnest:\n select_expression = f'{select_expression} AS {column_alias}_needs_unnest_'\n next_from_expressions.append(self._build_next_from_expression(column_alias))\n else:\n select_expression = f'{select_expression} AS {column_alias}'\n select_expressions.append(select_expression)\n return (select_expressions, next_from_expressions)", "docstring": "Build select expressions and next from expressions from the builders.\n\nArgs:\n builders: the immutable current builders to compute select expressions.\n child_builders: collects the current given builders' children for the next\n round.\n columns_selected: accumulatively collects columns which has already been\n handled completely.\n\nReturns:\n The select expressions and next from expressions computed form the given\n builders.", "source": "github_repos"} -{"code": "def info(msg: str, *args, **kwargs) -> None:\n _DEFAULT_LOGGER.info(msg, *args, **kwargs)", "docstring": "Logs info message.\n\nArgs:\n msg: Message with possible format string.\n *args: Values for variables in the format string.\n **kwargs: Keyword arguments for the logger.", "source": "github_repos"} -{"code": "def _decorator(func):\n opname = func.__name__\n cap_sym_name = sym_name.capitalize()\n func.__doc__ = '\\n Assert the condition `x {sym}` holds element-wise.\\n\\n When running in graph mode, you should add a dependency on this operation\\n to ensure that it runs. Example of adding a dependency to an operation:\\n\\n ```python\\n with tf.control_dependencies([tf.debugging.{opname}(x, y)]):\\n output = tf.reduce_sum(x)\\n ```\\n\\n {sym_name} means, for every element `x[i]` of `x`, we have `x[i] {sym}`.\\n If `x` is empty this is trivially satisfied.\\n\\n Args:\\n x: Numeric `Tensor`.\\n data: The tensors to print out if the condition is False. Defaults to\\n error message and first few entries of `x`.\\n summarize: Print this many entries of each tensor.\\n message: A string to prefix to the default message.\\n name: A name for this operation (optional). Defaults to \"{opname}\".\\n\\n Returns:\\n Op that raises `InvalidArgumentError` if `x {sym}` is False.\\n @compatibility(eager)\\n returns None\\n @end_compatibility\\n\\n Raises:\\n InvalidArgumentError: if the check can be performed immediately and\\n `x {sym}` is False. The check can be performed immediately during\\n eager execution or if `x` is statically known.\\n '.format(sym=sym, sym_name=cap_sym_name, opname=opname)\n return func", "docstring": "Generated decorator that adds the appropriate docstring to the function for symbol `sym`.\n\nArgs:\n func: Function for a TensorFlow op\n\nReturns:\n Version of `func` with documentation attached.", "source": "github_repos"} -{"code": "def __init__(self, key_dtype, value_dtype):\n self._key_dtype = dtypes.as_dtype(key_dtype)\n self._value_dtype = dtypes.as_dtype(value_dtype)\n super(LookupInterface, self).__init__()", "docstring": "Construct a lookup table interface.\n\nArgs:\n key_dtype: The table key type.\n value_dtype: The table value type.", "source": "github_repos"} -{"code": "def call(self, input_values: tf.Tensor, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states\n output_attentions = output_attentions if output_attentions else self.config.output_attentions\n return_dict = return_dict if return_dict else self.config.return_dict\n outputs = self.hubert(input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n return outputs", "docstring": "Returns:\n\nExample:\n ```python\n >>> from transformers import AutoProcessor, TFHubertModel\n >>> from datasets import load_dataset\n >>> import soundfile as sf\n\n >>> processor = AutoProcessor.from_pretrained(\"facebook/hubert-large-ls960-ft\")\n >>> model = TFHubertModel.from_pretrained(\"facebook/hubert-large-ls960-ft\")\n\n\n >>> def map_to_array(batch):\n ... speech, _ = sf.read(batch[\"file\"])\n ... batch[\"speech\"] = speech\n ... return batch\n\n\n >>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n >>> ds = ds.map(map_to_array)\n\n >>> input_values = processor(ds[\"speech\"][0], return_tensors=\"tf\").input_values # Batch size 1\n >>> hidden_states = model(input_values).last_hidden_state\n ```", "source": "github_repos"} -{"code": "def __init__(self, given, enum_type, options):\n super(InvalidEnumValue, self).__init__('Could not parse [{0}] into a valid {1}. Valid values are [{2}]'.format(given, enum_type, ', '.join(options)))", "docstring": "Constructs a new exception.\n\nArgs:\n given: str, The given string that could not be parsed.\n enum_type: str, The human readable name of the enum you were trying to\n parse.\n options: list(str), The valid values for this enum.", "source": "github_repos"} -{"code": "def broadcast_to(rt_input, shape: DynamicRaggedShape):\n if not isinstance(shape, DynamicRaggedShape):\n raise TypeError('shape must be a DynamicRaggedShape')\n rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)\n origin_shape = None\n if ragged_tensor.is_ragged(rt_input):\n if shape.num_row_partitions != 0:\n if rt_input.row_splits.dtype != shape.dtype:\n raise ValueError('Cannot coerce row_splits.dtype')\n else:\n shape = shape.with_dtype(rt_input.row_splits.dtype)\n origin_shape = DynamicRaggedShape.from_tensor(rt_input)\n elif shape.num_row_partitions != 0:\n origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=shape.dtype)\n else:\n origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=dtypes.int64)\n shape = shape.with_dtype(dtype=dtypes.int64)\n broadcaster = _get_broadcaster(origin_shape, shape)\n return broadcaster.broadcast(rt_input)", "docstring": "Broadcasts a potentially ragged tensor to a ragged shape.\n\n Tiles `rt_input` as necessary to match the given shape.\n\n Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`.\n\nArgs:\n rt_input: The potentially ragged tensor to broadcast.\n shape: A `DynamicRaggedShape`\n\nReturns:\n A potentially ragged tensor whose values are taken from\n `rt_input`, and whose shape matches `shape`.", "source": "github_repos"} -{"code": "def _wait_for_glob(self, pattern, timeout_secs, for_checkpoint=True):\n end_time = time.time() + timeout_secs\n while time.time() < end_time:\n if for_checkpoint:\n if checkpoint_management.checkpoint_exists(pattern):\n return\n elif len(gfile.Glob(pattern)) >= 1:\n return\n time.sleep(0.05)\n self.assertFalse(True, 'Glob never matched any file: %s' % pattern)", "docstring": "Wait for a checkpoint file to appear.\n\nArgs:\n pattern: A string.\n timeout_secs: How long to wait for in seconds.\n for_checkpoint: whether we're globbing for checkpoints.", "source": "github_repos"} -{"code": "def get_logits_and_probs(logits=None, probs=None, multidimensional=False, validate_args=False, name='get_logits_and_probs', dtype=None):\n with ops.name_scope(name, values=[probs, logits]):\n if (probs is None) == (logits is None):\n raise ValueError('Must pass probs or logits, but not both.')\n if probs is None:\n logits = ops.convert_to_tensor(logits, name='logits', dtype=dtype)\n if not logits.dtype.is_floating:\n raise TypeError('logits must having floating type.')\n if multidimensional:\n if validate_args:\n logits = embed_check_categorical_event_shape(logits)\n return (logits, nn.softmax(logits, name='probs'))\n return (logits, math_ops.sigmoid(logits, name='probs'))\n probs = ops.convert_to_tensor(probs, name='probs', dtype=dtype)\n if not probs.dtype.is_floating:\n raise TypeError('probs must having floating type.')\n if validate_args:\n with ops.name_scope('validate_probs'):\n one = constant_op.constant(1.0, probs.dtype)\n dependencies = [check_ops.assert_non_negative(probs)]\n if multidimensional:\n probs = embed_check_categorical_event_shape(probs)\n dependencies += [check_ops.assert_near(math_ops.reduce_sum(probs, -1), one, message='probs does not sum to 1.')]\n else:\n dependencies += [check_ops.assert_less_equal(probs, one, message='probs has components greater than 1.')]\n probs = control_flow_ops.with_dependencies(dependencies, probs)\n with ops.name_scope('logits'):\n if multidimensional:\n return (math_ops.log(probs), probs)\n return (math_ops.log(probs) - math_ops.log1p(-1.0 * probs), probs)", "docstring": "Converts logit to probabilities (or vice-versa), and returns both.\n\nArgs:\n logits: Floating-point `Tensor` representing log-odds.\n probs: Floating-point `Tensor` representing probabilities.\n multidimensional: Python `bool`, default `False`. If `True`, represents\n whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]`\n dimensional tensor, representing the logit or probability of `shape[-1]`\n classes.\n validate_args: Python `bool`, default `False`. When `True`, either assert `0\n <= probs <= 1` (if not `multidimensional`) or that the last dimension of\n `probs` sums to one.\n name: A name for this operation (optional).\n dtype: `tf.DType` to prefer when converting args to `Tensor`s.\n\nReturns:\n logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or\n `1`, then the corresponding entry in the returned logit will be `-Inf` and\n `Inf` respectively.\n\nRaises:\n ValueError: if neither `probs` nor `logits` were passed in, or both were.", "source": "github_repos"} -{"code": "def make_seeds(self, count=1):\n alg = self.algorithm\n if alg in (a.value for a in random_ops_util.Algorithm):\n keys = self._make_int64_keys(shape=[count])\n zeros = array_ops.zeros_like(keys)\n return array_ops_stack.stack([keys, zeros])\n else:\n raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))", "docstring": "Generates seeds for stateless random ops.\n\n For example:\n\n ```python\n seeds = get_global_generator().make_seeds(count=10)\n for i in range(10):\n seed = seeds[:, i]\n numbers = stateless_random_normal(shape=[2, 3], seed=seed)\n ...\n ```\n\nArgs:\n count: the number of seed pairs (note that stateless random ops need a\n pair of seeds to invoke).\n\nReturns:\n A tensor of shape [2, count] and dtype int64.", "source": "github_repos"} -{"code": "def output_mask(self):\n output = self.output\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)", "docstring": "Retrieves the output mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\nReturns:\n Output mask tensor (potentially None) or list of output\n mask tensors.\n\nRaises:\n AttributeError: if the layer is connected to\n more than one incoming layers.", "source": "github_repos"} -{"code": "def get_master_port(real_launcher=False):\n master_port_base = os.environ.get('DS_TEST_PORT', DEFAULT_MASTER_PORT)\n if not real_launcher:\n master_port_base = str(int(master_port_base) + 1)\n return master_port_base", "docstring": "When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed)\n the issue is that once the port is tied it can't be used anywhere else outside of this process,\n since torch.dist doesn't free the port until the process exits. Therefore for the sake of being\n able to run both emulated launcher and normal launcher tests we need 2 distinct ports.\n\n This function will give the right port in the right context. For real launcher it'll give the\n base port, for emulated launcher it'll give the base port + 1. In both cases a string is\n returned.\n\nArgs:\n `real_launcher`: whether a real launcher is going to be used, or the emulated one", "source": "github_repos"} -{"code": "def __init__(self, seed=None, name=None, **kwargs):\n if name is None:\n name = auto_name(self.__class__.__name__)\n self.name = name\n custom_backend = kwargs.pop('backend', None)\n if kwargs:\n raise ValueError(f'Unrecognized keyword arguments: {kwargs}')\n if custom_backend is not None:\n self.backend = custom_backend\n else:\n self.backend = backend\n self._initial_seed = seed\n if seed is None:\n seed = make_default_seed()\n if not isinstance(seed, int):\n raise ValueError(f'Argument `seed` must be an integer. Received: seed={seed}')\n\n def seed_initializer(*args, **kwargs):\n dtype = kwargs.get('dtype', None)\n return self.backend.convert_to_tensor([seed, 0], dtype=dtype)\n with self.backend.name_scope(self.name, caller=self):\n self.state = self.backend.Variable(seed_initializer, shape=(2,), dtype=self.backend.random_seed_dtype(), trainable=False, aggregation='none', name='seed_generator_state')", "docstring": "Generates variable seeds upon each call to a function generating\n random numbers.\n\n In Keras, all random number generators (such as\n `keras.random.normal()`) are stateless, meaning that if you pass an\n integer seed to them (such as `seed=42`), they will return the same\n values for repeated calls. To get different values for each\n call, a `SeedGenerator` providing the state of the random generator\n has to be used.\n\n Note that all the random number generators have a default seed of None,\n which implies that an internal global SeedGenerator is used.\n If you need to decouple the RNG from the global state you can provide\n a local `StateGenerator` with either a deterministic or random initial\n state.\n\n Remark concerning the JAX backen: Note that the use of a local\n `StateGenerator` as seed argument is required for JIT compilation of\n RNG with the JAX backend, because the use of global state is not\n supported.\n\nExample:\n ```python\n seed_gen = keras.random.SeedGenerator(seed=42)\n values = keras.random.normal(shape=(2, 3), seed=seed_gen)\n new_values = keras.random.normal(shape=(2, 3), seed=seed_gen)\n ```\n\n Usage in a layer:\n\n ```python\n class Dropout(keras.Layer):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.seed_generator = keras.random.SeedGenerator(1337)\n\n def call(self, x, training=False):\n if training:\n return keras.random.dropout(\n x, rate=0.5, seed=self.seed_generator\n )\n return x\n ```", "source": "github_repos"} -{"code": "def nondominated_sort(inputs: List[pg.DNA]) -> List[List[pg.DNA]]:\n dependency_graph = [[] for i in range(len(inputs))]\n indegree = [0] * len(inputs)\n queue = []\n for i in range(len(inputs)):\n for j in range(len(inputs)):\n if dominates(base.get_fitness(inputs[i]), base.get_fitness(inputs[j])):\n dependency_graph[i].append(j)\n elif dominates(base.get_fitness(inputs[j]), base.get_fitness(inputs[i])):\n indegree[i] += 1\n if indegree[i] == 0:\n queue.append(i)\n result = []\n while queue:\n l = len(queue)\n frontier = []\n for i in range(l):\n parent = queue.pop(0)\n frontier.append(inputs[parent])\n for child in dependency_graph[parent]:\n indegree[child] -= 1\n if indegree[child] == 0:\n queue.append(child)\n result.append(frontier)\n return result", "docstring": "Algorithm fast-non-dominated-sort implementation using topological sort.\n\n Please see section III A in the original paper.\n\nArgs:\n inputs: A list of DNA that need to be sorted.\n\nReturns:\n A list of sorted frontier (a list of DNA).", "source": "github_repos"} -{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], patch_size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n if 'longest_edge' in size:\n size = (size['longest_edge'], size['longest_edge'])\n elif 'height' in size and 'width' in size:\n size = (size['height'], size['width'])\n else:\n raise ValueError(\"size must contain either 'longest_edge' or 'height' and 'width'.\")\n if 'height' in patch_size and 'width' in patch_size:\n patch_size = (patch_size['height'], patch_size['width'])\n else:\n raise ValueError(\"patch_size must contain either 'shortest_edge' or 'height' and 'width'.\")\n output_size = get_resize_output_image_size(image, size=size, patch_size=patch_size, input_data_format=input_data_format)\n return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\nArgs:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dict containing the longest possible edge of the image.\n patch_size (`Dict[str, int]`):\n Patch size used to calculate the size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.", "source": "github_repos"} -{"code": "def nrows(self):\n if self.rank == 0:\n return None\n return self._ragged_shape[0]", "docstring": "The number of rows in this StructuredTensor (if rank>0).\n\n This means the length of the outer-most dimension of the StructuredTensor.\n\n Notice that if `self.rank > 1`, then this equals the number of rows\n of the first row partition. That is,\n `self.nrows() == self.row_partitions[0].nrows()`.\n\n Otherwise `self.nrows()` will be the first dimension of the field values.\n\nReturns:\n A scalar integer `Tensor` (or `None` if `self.rank == 0`).", "source": "github_repos"} -{"code": "def preprocess(train_data_path: str, feature_thres: int, val_data_path: typing.Optional[str]=None) -> typing.Tuple[Dataset, typing.List[str], typing.Optional[Dataset]]:\n features = extract_features(train_data_path, feature_thres)\n feature_index = dict(((feature, i) for i, feature in enumerate(features)))\n train_dataset = load_dataset(train_data_path, feature_index)\n val_dataset = load_dataset(val_data_path, feature_index) if val_data_path else None\n return (train_dataset, features, val_dataset)", "docstring": "Loads entries and translates them into JAX arrays. The boolean matrix of\n the input data is represented by row indices and column indices of True values\n instead of the matrix itself for memory efficiency, assuming the matrix is\n highly sparse. Row and column indices are not guaranteed to be sorted.\n\nArgs:\n train_data_path (str): A file path to the training data file.\n feature_thres (str): A threshold to filter out features whose number of\n occurances does not exceed the value.\n val_data_path (str, optional): A file path to the validation data file.\n\nReturns:\n A tuple of following items:\n - train_dataset (Dataset): The training dataset.\n - features (List[str]): The list of features.\n - val_dataset (Optional[Dataset]): The validation dataset.\n This becomes None if val_data_path is None.", "source": "github_repos"} -{"code": "def hard_tanh(x):\n return ops.hard_tanh(x)", "docstring": "HardTanh activation function.\n\n It is defined as:\n `hard_tanh(x) = -1 for x < -1`,\n `hard_tanh(x) = x for -1 <= x <= 1`,\n `hard_tanh(x) = 1 for x > 1`.\n\nArgs:\n x: Input tensor.", "source": "github_repos"} -{"code": "def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0.0, **kwargs):\n super(Adadelta, self).__init__(**kwargs)\n with backend.name_scope(self.__class__.__name__):\n self.lr = backend.variable(lr, name='lr')\n self.decay = backend.variable(decay, name='decay')\n self.iterations = backend.variable(0, dtype='int64', name='iterations')\n if epsilon is None:\n epsilon = backend.epsilon()\n self.rho = rho\n self.epsilon = epsilon\n self.initial_decay = decay", "docstring": "Adadelta optimizer.\n\n Adadelta is a more robust extension of Adagrad\n that adapts learning rates based on a moving window of gradient updates,\n instead of accumulating all past gradients. This way, Adadelta continues\n learning even when many updates have been done. Compared to Adagrad, in the\n original version of Adadelta you don't have to set an initial learning\n rate. In this version, initial learning rate and decay factor can\n be set, as in most other Keras optimizers.\n\n It is recommended to leave the parameters of this optimizer\n at their default values.\n\nArgs:\n lr: float >= 0. Initial learning rate, defaults to 1.\n It is recommended to leave it at the default value.\n rho: float >= 0. Adadelta decay factor, corresponding to fraction of\n gradient to keep at each time step.\n epsilon: float >= 0. Fuzz factor.\n If `None`, defaults to `backend.epsilon()`.\n decay: float >= 0. Initial learning rate decay.\n\n References:\n - [Adadelta - an adaptive learning rate\n method](http://arxiv.org/abs/1212.5701)", "source": "github_repos"} -{"code": "def _process_new(self, feed_item):\n return {'assetIdentifier': {'name': feed_item.get(FieldMap.CREATIVE_ASSET_FILE_NAME, None), 'type': feed_item.get(FieldMap.CREATIVE_TYPE, None)}}", "docstring": "Creates a new creative asset DCM object from a feed item representing a creative asset from the Bulkdozer feed.\n\n This function simply creates the object to be inserted later by the BaseDAO\n object.\n\nArgs:\n feed_item: Feed item representing the creative asset from the Bulkdozer\n feed.\n\nReturns:\n A creative asset object ready to be inserted in DCM through the API.", "source": "github_repos"} -{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n known_args, pipeline_args = parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n model_loader = TFModelHandlerTensor(model_uri=known_args.model_path).with_preprocess_fn(lambda image_name: read_image(image_name, known_args.image_dir))\n pipeline = test_pipeline\n if not test_pipeline:\n pipeline = beam.Pipeline(options=pipeline_options)\n image = pipeline | 'ReadImageNames' >> beam.io.ReadFromText(known_args.input) | 'FilterEmptyLines' >> beam.ParDo(filter_empty_lines)\n predictions = image | 'RunInference' >> RunInference(model_loader) | 'PostProcessOutputs' >> beam.ParDo(PostProcessor())\n _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n result = pipeline.run()\n result.wait_until_finish()\n return result", "docstring": "Args:\n argv: Command line arguments defined for this example.\n save_main_session: Used for internal testing.\n test_pipeline: Used for internal testing.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], ElectraForPreTrainingOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n discriminator_sequence_output = discriminator_hidden_states[0]\n logits = self.discriminator_predictions(discriminator_sequence_output)\n loss = None\n if labels is not None:\n loss_fct = nn.BCEWithLogitsLoss()\n if attention_mask is not None:\n active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1\n active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]\n active_labels = labels[active_loss]\n loss = loss_fct(active_logits, active_labels.float())\n else:\n loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())\n if not return_dict:\n output = (logits,) + discriminator_hidden_states[1:]\n return (loss,) + output if loss is not None else output\n return ElectraForPreTrainingOutput(loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see `input_ids` docstring)\n Indices should be in `[0, 1]`:\n\n - 0 indicates the token is an original token,\n - 1 indicates the token was replaced.\n\nExample:\n ```python\n >>> from transformers import ElectraForPreTraining, AutoTokenizer\n >>> import torch\n\n >>> discriminator = ElectraForPreTraining.from_pretrained(\"google/electra-base-discriminator\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"google/electra-base-discriminator\")\n\n >>> sentence = \"The quick brown fox jumps over the lazy dog\"\n >>> fake_sentence = \"The quick brown fox fake over the lazy dog\"\n\n >>> fake_tokens = tokenizer.tokenize(fake_sentence, add_special_tokens=True)\n >>> fake_inputs = tokenizer.encode(fake_sentence, return_tensors=\"pt\")\n >>> discriminator_outputs = discriminator(fake_inputs)\n >>> predictions = torch.round((torch.sign(discriminator_outputs[0]) + 1) / 2)\n\n >>> fake_tokens\n ['[CLS]', 'the', 'quick', 'brown', 'fox', 'fake', 'over', 'the', 'lazy', 'dog', '[SEP]']\n\n >>> predictions.squeeze().tolist()\n [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n ```", "source": "github_repos"} -{"code": "def affine_transform(self, image: np.array, center: Tuple[float], scale: Tuple[float], rotation: float, size: Dict[str, int], data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array:\n data_format = input_data_format if data_format is None else data_format\n size = (size['width'], size['height'])\n transformation = get_warp_matrix(rotation, center * 2.0, np.array(size) - 1.0, scale * 200.0)\n image = image if input_data_format == ChannelDimension.LAST else to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)\n image = scipy_warp_affine(src=image, M=transformation, size=(size[1], size[0]))\n image = to_channel_dimension_format(image, data_format, ChannelDimension.LAST)\n return image", "docstring": "Apply an affine transformation to an image.\n\nArgs:\n image (`np.array`):\n Image to transform.\n center (`Tuple[float]`):\n Center of the bounding box (x, y).\n scale (`Tuple[float]`):\n Scale of the bounding box with respect to height/width.\n rotation (`float`):\n Rotation angle in degrees.\n size (`Dict[str, int]`):\n Size of the destination image.\n data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format of the output image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the input image.", "source": "github_repos"} -{"code": "def copy_function_def_to_graph_def_recursively(func_name, graph_def, copied_functions, default_graph=None):\n if func_name and (not is_function(func_name, default_graph)):\n raise ValueError(f'Function {func_name} was not found. Please make sure the FunctionDef `fdef` is correct.')\n if func_name in copied_functions:\n return\n copied_functions.add(func_name)\n func_def = get_function_def(func_name, default_graph)\n graph_def.library.function.add().CopyFrom(func_def)\n for node_def in func_def.node_def:\n op_def = default_graph.op_def_for_type(node_def.op)\n for attr in op_def.attr:\n if attr.type == 'func':\n func_name = node_def.attr[attr.name].func.name\n copy_function_def_to_graph_def_recursively(func_name, graph_def, copied_functions, default_graph)\n elif attr.type == 'list(func)':\n for fn in node_def.attr[attr.name].list.func:\n func_name = fn.name\n copy_function_def_to_graph_def_recursively(func_name, graph_def, copied_functions, default_graph)", "docstring": "Recursively copies `FunctionDef`s to `GraphDef`.\n\n It copies the outermost `FunctionDef` and all nested `FunctionDef`s to\n `graph_def`. The `copied_function` enforces that every `FunctionDef` will be\n copied at most once. The `FunctionDef`s will be found from `default_graph` if\n this function was called in graph mode or from eager context if this function\n was called in eager mode.\n\nArgs:\n func_name: The signature name of FunctionDef to be copied to `graph_def`.\n graph_def: The GraphDef that will contain all `FunctionDef`s in its library.\n copied_functions: A set contains all copied function names.\n default_graph: The `tf.Graph` where all `FunctionDef`s will be found\n in graph mode. Not used in eager mode.", "source": "github_repos"} -{"code": "def add_object(self, object_path, weights):\n if not isinstance(weights, dict):\n raise ValueError(f\"Argument `weights` should be a dict where keys are weight names (usually '0', '1', etc.) and values are NumPy arrays. Received: type(weights)={type(weights)}\")\n if '/' in object_path:\n elements = object_path.split('/')\n partial_path = '/'.join(elements[:-1])\n weights_dict = self.weights_dict\n for e in elements[:-1]:\n if e not in weights_dict:\n raise ValueError(f\"Path '{partial_path}' not found in model.\")\n weights_dict = weights_dict[e]\n weights_dict[elements[-1]] = weights\n else:\n self.weights_dict[object_path] = weights", "docstring": "Add a new object to the file (e.g. a layer).\n\nArgs:\n object_path: String, full path of the\n object to add (e.g. `\"layers/dense_2\"`).\n weights: Dict mapping weight names to weight\n values (arrays),\n e.g. `{\"0\": kernel_value, \"1\": bias_value}`.", "source": "github_repos"} -{"code": "def grad(f, has_aux=False):\n\n def check_loss_shape(np_loss):\n if not isinstance(np_loss, tf_np.ndarray):\n raise ValueError('The result of the function to take gradient must be an ndarray.')\n if not np_loss.shape.is_compatible_with([]):\n raise ValueError('The result of the function to take gradient must be a scalar.')\n\n def _f(params, *args):\n \"\"\"The gradient function to be returned.\"\"\"\n with backprop.GradientTape() as g:\n g.watch(nest.flatten(params))\n outputs = f(params, *args)\n if has_aux:\n np_loss, aux = outputs\n else:\n np_loss = outputs\n check_loss_shape(np_loss)\n tf_grads = g.gradient(np_loss, params)\n if has_aux:\n res = (tf_grads, aux)\n else:\n res = tf_grads\n return _tf_to_np(res)\n return _f", "docstring": "Returns a function that computes gradient of f.\n\n Gradients can only be computed through numpy and tensorflow operations and not\n through python float operations and values.\n\nArgs:\n f: a function of type (params, *args) -> scalar. 'params' can be a nested\n structure (made of lists and tuples) of ndarrays and the gradient is\n evaluated against it. `scalar` is a scalar ndarray.\n has_aux: bool, indicates whether fun returns a pair where the first element\n is considered the output of the mathematical function to be differentiated\n and the second element is auxiliary data.\n\nReturns:\n A gradient function of type (params, *args) -> gradients, where the result\n 'gradients' has the same structure and shapes as 'params'.", "source": "github_repos"} -{"code": "def _build_map(outputs):\n finished_nodes = set()\n nodes_in_progress = set()\n nodes_in_decreasing_depth = []\n layer_indices = {}\n for output in nest.flatten(outputs):\n _build_map_helper(output, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices)\n return (nodes_in_decreasing_depth, layer_indices)", "docstring": "This method topologically sorts nodes in order from inputs to outputs.\n\n It uses a depth-first search to topologically sort nodes that appear in the\n _keras_history connectivity metadata of `outputs`.\n\nArgs:\n outputs: the output tensors whose _keras_history metadata should be walked.\n This may be an arbitrary nested structure.\n\nReturns:\n A tuple like (ordered_nodes, layer_to_first_traversal_index)\n ordered_nodes: list of nodes appearing in the keras history, topologically\n sorted from original inputs to the `outputs`.\n (If outputs have different sets of ancestors, the inputs to one output\n may appear after a different output).\n layer_to_first_traversal_index:\n A dict mapping layer to the traversal index in the DFS where it is\n seen. Note: if a layer is shared by several nodes, the dict will only\n store the index corresponding to the *first* time the layer seen.", "source": "github_repos"} -{"code": "def limit_string_length(string, max_len=50):\n if max_len is None or len(string) <= max_len:\n return string\n else:\n return '...' + string[len(string) - max_len:]", "docstring": "Limit the length of input string.\n\nArgs:\n string: Input string.\n max_len: (int or None) If int, the length limit. If None, no limit.\n\nReturns:\n Possibly length-limited string.", "source": "github_repos"} -{"code": "def parameters(self, use_literal_values: bool=False) -> Dict[str, str]:\n value_type = 'choice_and_literal' if use_literal_values else 'choice'\n return self.to_dict(value_type=value_type)", "docstring": "Returns parameters for this DNA to emit based on its spec.\n\n Deprecated: use `to_dict` instead.\n\nArgs:\n use_literal_values: If True, use literal values from DNASpec for Choices,\n otherwise returns '{choice}/{num_candidates} ({literal})'. Otherwise\n returns '{choice}/{num_candidates}'.\n\nReturns:\n Dict of parameter names to their values mapped from this DNA.\n\nRaises:\n RuntimeError: If DNA is not associated with a DNASpec.", "source": "github_repos"} -{"code": "def _get_context_id(self, context):\n if context in self._context_to_id:\n return self._context_to_id[context]\n graph_is_new = False\n with self._context_lock:\n if context not in self._context_to_id:\n graph_is_new = True\n context_id = _get_id()\n self._context_to_id[context] = context_id\n if graph_is_new:\n self.get_writer().WriteDebuggedGraph(debug_event_pb2.DebuggedGraph(graph_id=context_id, graph_name=getattr(context, 'name', None), outer_context_id=self._get_outer_context_id(context)))\n return self._context_to_id[context]", "docstring": "Get a unique ID for an op-construction context (e.g., a graph).\n\n If the graph has been encountered before, reuse the same unique ID.\n When encountering a new context (graph), this methods writes a DebugEvent\n proto with the debugged_graph field to the proper DebugEvent file.\n\nArgs:\n context: A context to get the unique ID for. Must be hashable. E.g., a\n Graph object.\n\nReturns:\n A unique ID for the context.", "source": "github_repos"} -{"code": "def on_element(self, element, window, context):\n pass", "docstring": "Called when a new element arrives in a window.\n\nArgs:\n element: the element being added\n window: the window to which the element is being added\n context: a context (e.g. a TriggerContext instance) for managing state\n and setting timers", "source": "github_repos"} -{"code": "def cached_session(self, graph=None, config=None, target=None):\n config = self._create_config(config)\n if target is None:\n target = self._default_target\n if getattr(self._thread_local, 'cached_session', None) is None:\n self._thread_local.cached_session = session.Session(graph=None, config=config, target=target)\n sess = self._thread_local.cached_session\n with sess.graph.as_default(), sess.as_default():\n yield sess", "docstring": "Create a test session with master target set to the testing cluster.\n\n Creates a test session that connects to the local testing cluster.\n The session is only created once per test and then reused.\n\nArgs:\n graph: Optional graph to use during the returned session.\n config: An optional config_pb2.ConfigProto to use to configure the\n session.\n target: the target of session to connect to.\n\nYields:\n A Session object that should be used as a context manager to surround\n the graph building and execution code in a test case. Note that the\n session will live until the end of the test.", "source": "github_repos"} -{"code": "def create_input_data_based_on_hw_requirement(num_chip, max_unique_ids_per_partition, per_sc_vocab_size, per_sc_sample_count, num_minibatches_per_physical_sparse_core):\n num_sc_per_chip = 4\n num_physical_replica = num_chip * num_sc_per_chip\n col_ids = []\n row_ids = []\n gains = []\n smallest_num_division = np.power(2, np.ceil(np.log2(num_minibatches_per_physical_sparse_core)))\n division_size = (per_sc_vocab_size + smallest_num_division - 1) // smallest_num_division\n assert division_size >= max_unique_ids_per_partition, f'The max_unique_ids_per_partition is set to {max_unique_ids_per_partition} and the number of minibatches per sparse core is set to {num_minibatches_per_physical_sparse_core}. But the vocab size per sparse core is {per_sc_vocab_size} which is not going to fit that many minibatches, consider setting the number of minibatches smaller.'\n per_sc_per_minibatch_id_nums_for_each_replica = np.random.randint(max_unique_ids_per_partition * (num_minibatches_per_physical_sparse_core - 1) + 1, max_unique_ids_per_partition * num_minibatches_per_physical_sparse_core + 1, size=num_physical_replica)\n per_chip_sample_count = per_sc_sample_count * num_sc_per_chip\n for chip_id in range(num_chip):\n for sc_id in range(num_sc_per_chip):\n np.random.shuffle(per_sc_per_minibatch_id_nums_for_each_replica)\n for physical_replica_id in range(num_physical_replica):\n physical_replica_id_nums = per_sc_per_minibatch_id_nums_for_each_replica[physical_replica_id]\n local_col_ids = np.array([])\n for i in range(num_minibatches_per_physical_sparse_core):\n local_col_ids_minibatch_size = max_unique_ids_per_partition\n if i == num_minibatches_per_physical_sparse_core - 1:\n local_col_ids_minibatch_size = physical_replica_id_nums - i * max_unique_ids_per_partition\n local_col_ids = np.append(local_col_ids, np.random.choice(np.arange(division_size), size=local_col_ids_minibatch_size, replace=False) + i * division_size)\n local_row_ids = np.random.randint(low=0, high=per_sc_sample_count, size=physical_replica_id_nums)\n row_ids += list(local_row_ids + chip_id * per_chip_sample_count + sc_id * per_sc_sample_count)\n col_ids += list(local_col_ids * num_physical_replica + physical_replica_id)\n gains += list(np.random.random(size=physical_replica_id_nums))\n return (np.array(row_ids), np.array(col_ids), np.array(gains))", "docstring": "Create the coo tensor based on hardware requirements.\n\nArgs:\n num_chip: number of chips in the tpu system.\n max_unique_ids_per_partition: max unique ids per physical replica\n per_sc_vocab_size: per sc shard of table size.\n per_sc_sample_count: per sc sample count.\n num_minibatches_per_physical_sparse_core: per sc minibatch number.\n\nReturns:\n row_ids, col_ids, gains and splits", "source": "github_repos"} -{"code": "def solve_triangular(a, b, lower=False):\n if any_symbolic_tensors((a, b)):\n return SolveTriangular(lower).symbolic_call(a, b)\n return _solve_triangular(a, b, lower)", "docstring": "Solves a linear system of equations given by `a x = b`.\n\nArgs:\n a: A tensor of shape `(..., M, M)` representing the coefficients matrix.\n b: A tensor of shape `(..., M)` or `(..., M, N)` representing the\n right-hand side or \"dependent variable\" matrix.\n\nReturns:\n A tensor of shape `(..., M)` or `(..., M, N)` representing the solution\n of the linear system. Returned shape is identical to `b`.", "source": "github_repos"} -{"code": "def update_state_wrapper(update_state_fn):\n\n def decorated(metric_obj, *args, **kwargs):\n \"\"\"Decorated function with `add_update()`.\"\"\"\n strategy = distribute_lib.get_strategy()\n for weight in metric_obj.weights:\n if backend.is_tpu_strategy(strategy) and (not strategy.extended.variable_created_in_scope(weight)) and (not distribute_lib.in_cross_replica_context()):\n raise ValueError('Trying to run metric.update_state in replica context when the metric was not created in TPUStrategy scope. Make sure the keras Metric is created in TPUstrategy scope. ')\n with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):\n update_op = update_state_fn(*args, **kwargs)\n if update_op is not None:\n metric_obj.add_update(update_op)\n return update_op\n return tf_decorator.make_decorator(update_state_fn, decorated)", "docstring": "Decorator to wrap metric `update_state()` with `add_update()`.\n\nArgs:\n update_state_fn: function that accumulates metric statistics.\n\nReturns:\n Decorated function that wraps `update_state_fn()` with `add_update()`.", "source": "github_repos"} -{"code": "def __init__(self, underlying_result, pipeline_instrument):\n super().__init__(underlying_result.state)\n self._underlying_result = underlying_result\n self._pipeline_instrument = pipeline_instrument", "docstring": "Constructor of PipelineResult.\n\nArgs:\n underlying_result: (PipelineResult) the result returned by the underlying\n runner running the pipeline.\n pipeline_instrument: (PipelineInstrument) pipeline instrument describing\n the pipeline being executed with interactivity applied and related\n metadata including where the interactivity-backing cache lies.", "source": "github_repos"} -{"code": "def recipe_dv360_segmentology(config, auth_read, recipe_timezone, auth_write, recipe_name, date_range, recipe_slug, partners, advertisers):\n dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug})\n bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}})\n dbm(config, {'auth': auth_read, 'report': {'filters': {'FILTER_PARTNER': {'values': partners}, 'FILTER_ADVERTISER': {'values': advertisers}}, 'body': {'timezoneCode': recipe_timezone, 'metadata': {'title': recipe_name, 'dataRange': date_range, 'format': 'CSV'}, 'params': {'type': 'TYPE_CROSS_PARTNER', 'groupBys': ['FILTER_PARTNER', 'FILTER_PARTNER_NAME', 'FILTER_ADVERTISER', 'FILTER_ADVERTISER_NAME', 'FILTER_MEDIA_PLAN', 'FILTER_MEDIA_PLAN_NAME', 'FILTER_ZIP_POSTAL_CODE'], 'metrics': ['METRIC_BILLABLE_IMPRESSIONS', 'METRIC_CLICKS', 'METRIC_TOTAL_CONVERSIONS']}, 'schedule': {'frequency': 'WEEKLY'}}}})\n dbm(config, {'auth': auth_read, 'report': {'name': recipe_name}, 'out': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV360_KPI', 'header': True, 'schema': [{'name': 'Partner_Id', 'type': 'INTEGER', 'mode': 'REQUIRED'}, {'name': 'Partner', 'type': 'STRING', 'mode': 'REQUIRED'}, {'name': 'Advertiser_Id', 'type': 'INTEGER', 'mode': 'REQUIRED'}, {'name': 'Advertiser', 'type': 'STRING', 'mode': 'REQUIRED'}, {'name': 'Campaign_Id', 'type': 'INTEGER', 'mode': 'REQUIRED'}, {'name': 'Campaign', 'type': 'STRING', 'mode': 'REQUIRED'}, {'name': 'Zip', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'Impressions', 'type': 'FLOAT', 'mode': 'NULLABLE'}, {'name': 'Clicks', 'type': 'FLOAT', 'mode': 'NULLABLE'}, {'name': 'Conversions', 'type': 'FLOAT', 'mode': 'NULLABLE'}]}}})\n bigquery(config, {'auth': auth_write, 'from': {'query': 'SELECT\\n Partner_Id,\\n Partner,\\n Advertiser_Id,\\n Advertiser,\\n Campaign_Id,\\n Campaign,\\n Zip,\\n SAFE_DIVIDE(Impressions, SUM(Impressions) OVER(PARTITION BY Advertiser_Id)) AS Impression,\\n SAFE_DIVIDE(Clicks, Impressions) AS Click,\\n SAFE_DIVIDE(Conversions, Impressions) AS Conversion,\\n Impressions AS Impressions FROM\\n `{dataset}.DV360_KPI`; ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'DV360_KPI_Normalized'}})\n census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}})\n census(config, {'auth': auth_write, 'correlate': {'join': 'Zip', 'pass': ['Partner_Id', 'Partner', 'Advertiser_Id', 'Advertiser', 'Campaign_Id', 'Campaign'], 'sum': ['Impressions'], 'correlate': ['Impression', 'Click', 'Conversion'], 'dataset': recipe_slug, 'table': 'DV360_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})", "docstring": "DV360 funnel analysis using Census data.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n recipe_timezone (timezone) - Timezone for report dates.\n auth_write (authentication) - Authorization used for writing data.\n recipe_name (string) - Name of report, not needed if ID used.\n date_range (choice) - Timeframe to run the report for.\n recipe_slug (string) - Name of Google BigQuery dataset to create.\n partners (integer_list) - DV360 partner id.\n advertisers (integer_list) - Comma delimited list of DV360 advertiser ids.", "source": "github_repos"} -{"code": "def get_cross_replica_context():\n return _get_per_thread_mode().cross_replica_context", "docstring": "Returns the current tf.distribute.Strategy if in a cross-replica context.\n\n DEPRECATED: Please use `in_cross_replica_context()` and\n `get_strategy()` instead.\n\nReturns:\n Returns the current `tf.distribute.Strategy` object in a cross-replica\n context, or `None`.\n\n Exactly one of `get_replica_context()` and `get_cross_replica_context()`\n will return `None` in a particular block.", "source": "github_repos"} -{"code": "def __init__(self, cache_config: CacheConfig) -> None:\n super().__init__(cache_config)\n if is_optimum_quanto_available():\n optimum_quanto_version = version.parse(importlib.metadata.version('optimum-quanto'))\n if optimum_quanto_version <= version.parse('0.2.5'):\n raise ImportError(f'You need optimum-quanto package version to be greater or equal than 0.2.5 to use `QuantoQuantizedCache`. Detected version {optimum_quanto_version}.')\n from optimum.quanto import MaxOptimizer, qint2, qint4\n if self.nbits not in [2, 4]:\n raise ValueError(f'`nbits` for `quanto` backend has to be one of [`2`, `4`] but got {self.nbits}')\n if self.axis_key not in [0, -1]:\n raise ValueError(f'`axis_key` for `quanto` backend has to be one of [`0`, `-1`] but got {self.axis_key}')\n if self.axis_value not in [0, -1]:\n raise ValueError(f'`axis_value` for `quanto` backend has to be one of [`0`, `-1`] but got {self.axis_value}')\n self.qtype = qint4 if self.nbits == 4 else qint2\n self.optimizer = MaxOptimizer()", "docstring": "Quantized Cache class that uses `quanto` as a backend to perform quantization. Current implementation supports `int2` and `int4` dtypes only.\n\n Parameters:\n cache_config (`QuantizedCacheConfig`):\n A configuration containing all the arguments to be used by the quantizer, including axis, qtype and group size.\n\nExample:\n ```python\n >>> # Run pip install quanto first if you don't have it yet\n >>> from transformers import AutoTokenizer, AutoModelForCausalLM, QuantoQuantizedCache, QuantizedCacheConfig\n\n >>> model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen2-0.5B-Instruct\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen2-0.5B-Instruct\")\n\n >>> inputs = tokenizer(text=\"My name is Qwen2\", return_tensors=\"pt\")\n\n >>> # Prepare a cache class and pass it to model's forward\n >>> cache_config = QuantizedCacheConfig(nbits=4)\n >>> past_key_values = QuantoQuantizedCache(cache_config=cache_config)\n >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)\n >>> outputs.past_key_values # access cache filled with key/values from generation\n QuantoQuantizedCache()\n ```", "source": "github_repos"} -{"code": "def get_config(self):\n all_args = tf_inspect.getfullargspec(self.__init__).args\n config = {'name': self.name, 'trainable': self.trainable}\n if hasattr(self, '_batch_input_shape'):\n config['batch_input_shape'] = self._batch_input_shape\n config['dtype'] = policy.serialize(self._dtype_policy)\n if hasattr(self, 'dynamic'):\n if self.dynamic:\n config['dynamic'] = self.dynamic\n elif 'dynamic' in all_args:\n all_args.remove('dynamic')\n expected_args = config.keys()\n extra_args = [arg for arg in all_args if arg not in expected_args]\n if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):\n raise NotImplementedError('Layer %s has arguments in `__init__` and therefore must override `get_config`.' % self.__class__.__name__)\n return config", "docstring": "Returns the config of the layer.\n\n A layer config is a Python dictionary (serializable)\n containing the configuration of a layer.\n The same layer can be reinstantiated later\n (without its trained weights) from this configuration.\n\n The config of a layer does not include connectivity\n information, nor the layer class name. These are handled\n by `Network` (one layer of abstraction above).\n\n Note that `get_config()` does not guarantee to return a fresh copy of dict\n every time it is called. The callers should make a copy of the returned dict\n if they want to modify it.\n\nReturns:\n Python dictionary.", "source": "github_repos"} -{"code": "def resource_path(package: Union[str, types.ModuleType]) -> abstract_path.Path:\n try:\n path = importlib_resources.files(package)\n except AttributeError:\n is_adhoc = True\n else:\n if isinstance(path, importlib_resources._adapters.CompatibilityFiles.SpecPath):\n is_adhoc = True\n else:\n is_adhoc = False\n if is_adhoc:\n if isinstance(package, types.ModuleType):\n package = getattr(package.__spec__, 'name', package.__name__)\n path = pathlib.Path(sys.modules[package].__file__)\n if path.name == '__init__.py':\n path = path.parent\n if isinstance(path, pathlib.Path):\n return abstract_path.Path(path)\n elif isinstance(path, zipfile.Path):\n path = ResourcePath(path.root, path.at)\n return typing.cast(abstract_path.Path, path)\n elif isinstance(path, importlib_resources.abc.Traversable):\n return typing.cast(abstract_path.Path, path)\n else:\n raise TypeError(f'Unknown resource path: {type(path)}: {path}')", "docstring": "Returns read-only root directory path of the module.\n\n Used to access module resource files.\n\n Usage:\n\n ```python\n path = epath.resource_path('tensorflow_datasets') / 'README.md'\n content = path.read_text()\n ```\n\n This is compatible with everything, including zipapp (`.par`).\n\n Resource files should be in the `data=` of the `py_library(` (when using\n bazel).\n\n To write to your project (e.g. automatically update your code), read-only\n resource paths can be converted to read-write paths with\n `epath.to_write_path(path)`.\n\nArgs:\n package: Module or module name.\n\nReturns:\n The read-only path to the root module directory", "source": "github_repos"} -{"code": "def register_command_handler(self, prefix, handler, help_info, prefix_aliases=None):\n if not prefix:\n raise ValueError('Empty command prefix')\n if prefix in self._handlers:\n raise ValueError('A handler is already registered for command prefix \"%s\"' % prefix)\n if not callable(handler):\n raise ValueError('handler is not callable')\n if not isinstance(help_info, str):\n raise ValueError('help_info is not a str')\n if prefix_aliases:\n for alias in prefix_aliases:\n if self._resolve_prefix(alias):\n raise ValueError('The prefix alias \"%s\" clashes with existing prefixes or aliases.' % alias)\n self._alias_to_prefix[alias] = prefix\n self._prefix_to_aliases[prefix] = prefix_aliases\n self._handlers[prefix] = handler\n self._prefix_to_help[prefix] = help_info", "docstring": "Register a callable as a command handler.\n\nArgs:\n prefix: Command prefix, i.e., the first word in a command, e.g.,\n \"print\" as in \"print tensor_1\".\n handler: A callable of the following signature:\n foo_handler(argv, screen_info=None),\n where argv is the argument vector (excluding the command prefix) and\n screen_info is a dictionary containing information about the screen,\n such as number of columns, e.g., {\"cols\": 100}.\n The callable should return:\n 1) a RichTextLines object representing the screen output.\n\n The callable can also raise an exception of the type CommandLineExit,\n which if caught by the command-line interface, will lead to its exit.\n The exception can optionally carry an exit token of arbitrary type.\n help_info: A help string.\n prefix_aliases: Aliases for the command prefix, as a list of str. E.g.,\n shorthands for the command prefix: [\"p\", \"pr\"]\n\nRaises:\n ValueError: If\n 1) the prefix is empty, or\n 2) handler is not callable, or\n 3) a handler is already registered for the prefix, or\n 4) elements in prefix_aliases clash with existing aliases.\n 5) help_info is not a str.", "source": "github_repos"} -{"code": "def tpu_devices(devices=None):\n return find_devices('TPU', devices)", "docstring": "Gets TPU devices out of `devices`.\n\nArgs:\n devices: A device list (as a list of strings). If None, the list of all\n available devices will be used for it.\n\nReturns:\n Those in `devices` that are TPUs.", "source": "github_repos"} -{"code": "def run_suite(test_classes, argv=None):\n args = _parse_cli_args(argv)\n for test_class in test_classes:\n if not issubclass(test_class, base_test.BaseTestClass):\n logging.error('Test class %s does not extend mobly.base_test.BaseTestClass', test_class)\n sys.exit(1)\n if args.list_tests:\n _print_test_names(test_classes)\n sys.exit(0)\n test_configs = config_parser.load_test_config_file(args.config, args.test_bed)\n selected_tests = compute_selected_tests(test_classes, args.tests)\n console_level = logging.DEBUG if args.verbose else logging.INFO\n ok = True\n for config in test_configs:\n runner = test_runner.TestRunner(config.log_path, config.testbed_name)\n with runner.mobly_logger(console_level=console_level):\n for test_class, tests in selected_tests.items():\n runner.add_test_class(config, test_class, tests)\n try:\n runner.run()\n ok = runner.results.is_all_pass and ok\n except signals.TestAbortAll:\n pass\n except Exception:\n logging.exception('Exception when executing %s.', config.testbed_name)\n ok = False\n if not ok:\n sys.exit(1)", "docstring": "Executes multiple test classes as a suite.\n\n This is the default entry point for running a test suite script file\n directly.\n\nArgs:\n test_classes: List of python classes containing Mobly tests.\n argv: A list that is then parsed as cli args. If None, defaults to cli\n input.", "source": "github_repos"} -{"code": "def clean_code(content: str) -> str:\n splits = content.split('\"\"\"')\n content = ''.join(splits[::2])\n splits = content.split(\"'''\")\n content = ''.join(splits[::2])\n lines_to_keep = []\n for line in content.split('\\n'):\n line = re.sub('#.*$', '', line)\n if len(line) != 0 and (not line.isspace()):\n lines_to_keep.append(line)\n return '\\n'.join(lines_to_keep)", "docstring": "Remove docstrings, empty line or comments from some code (used to detect if a diff is real or only concern\n comments or docstings).\n\nArgs:\n content (`str`): The code to clean\n\nReturns:\n `str`: The cleaned code.", "source": "github_repos"} -{"code": "def resize_position_embeddings(self, new_num_position_embeddings: int):\n self.distilbert.resize_position_embeddings(new_num_position_embeddings)", "docstring": "Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.\n\nArgs:\n new_num_position_embeddings (`int`):\n The number of new position embedding matrix. If position embeddings are learned, increasing the size\n will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the\n end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the\n size will add correct vectors at the end following the position encoding algorithm, whereas reducing\n the size will remove vectors from the end.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BertForPreTrainingOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n total_loss = None\n if labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return (total_loss,) + output if total_loss is not None else output\n return BertForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),\n the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence\n pair (see `input_ids` docstring) Indices should be in `[0, 1]`:\n\n - 0 indicates sequence B is a continuation of sequence A,\n - 1 indicates sequence B is a random sequence.\n\nExample:\n ```python\n >>> from transformers import AutoTokenizer, BertForPreTraining\n >>> import torch\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"google-bert/bert-base-uncased\")\n >>> model = BertForPreTraining.from_pretrained(\"google-bert/bert-base-uncased\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> seq_relationship_logits = outputs.seq_relationship_logits\n ```", "source": "github_repos"} -{"code": "def make_rhs(self, operator, adjoint, with_batch=True):\n raise NotImplementedError('make_rhs is not defined.')", "docstring": "Make a rhs appropriate for calling operator.solve(rhs).\n\nArgs:\n operator: A `LinearOperator`\n adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the\n adjoint operator.\n with_batch: Python `bool`. If `True`, create `rhs` with the same batch\n shape as operator, and otherwise create a matrix without any batch\n shape.\n\nReturns:\n A `Tensor`", "source": "github_repos"} -{"code": "def generate_link(flag, np_fun_name):\n if flag == 'dev':\n template = 'https://numpy.org/devdocs/reference/generated/numpy.%s.html'\n elif flag == 'stable':\n template = 'https://numpy.org/doc/stable/reference/generated/numpy.%s.html'\n elif re.match('\\\\d+(\\\\.\\\\d+(\\\\.\\\\d+)?)?$', flag):\n template = f'https://numpy.org/doc/{flag}/reference/generated/numpy.%s.html'\n else:\n return None\n return template % np_fun_name", "docstring": "Generates link from numpy function name.\n\nArgs:\n flag: the flag to control link form. See `set_np_doc_form`.\n np_fun_name: the numpy function name.\n\nReturns:\n A string.", "source": "github_repos"} -{"code": "def __init__(self, text='', font_attr=None):\n \"\"\"Construct a RichLine with no rich attributes or a single attribute.\n\nArgs:\n text: Raw text string\n font_attr: If specified, a single font attribute to be applied to the\n entire text. Extending this object via concatenation allows creation\n of text with varying attributes.\n\"\"\"\n self.text = text\n if font_attr:\n self.font_attr_segs = [(0, len(text), font_attr)]\n else:\n self.font_attr_segs = []", "docstring": "Rich single-line text.\n\nAttributes:\n text: A plain string, the raw text represented by this object. Should not\n contain newlines.\n font_attr_segs: A list of (start, end, font attribute) triples, representing\n richness information applied to substrings of text.", "source": "github_repos"} -{"code": "def _get_normalizations(prices, forwards, strikes, discount_factors):\n strikes_abs = tf.abs(strikes)\n forwards_abs = tf.abs(forwards)\n orientations = strikes_abs >= forwards_abs\n normalization = tf.where(orientations, strikes_abs, forwards_abs)\n normalization = tf.where(tf.equal(normalization, 0), tf.ones_like(normalization), normalization)\n normalized_prices = prices / normalization\n if discount_factors is not None:\n normalized_prices /= discount_factors\n else:\n discount_factors = tf.ones_like(normalized_prices)\n return (normalized_prices, normalization, discount_factors)", "docstring": "Returns the normalized prices, normalization factors, and discount_factors.\n\n The normalization factors is the larger of strikes and forwards.\n If `discount_factors` is not None, these are the discount factors to expiry.\n If None, no discounting is applied and 1's are returned.\n\nArgs:\n prices: A real `Tensor` of any shape. The observed market prices of the\n assets.\n forwards: A real `Tensor` of the same shape and dtype as `prices`. The\n current forward prices to expiry.\n strikes: A real `Tensor` of the same shape and dtype as `prices`. The strike\n prices of the options.\n discount_factors: A real `Tensor` of same dtype as the `prices`.\n\nReturns:\n the normalized prices, normalization factors, and discount_factors.", "source": "github_repos"} -{"code": "def get_rot_mats(self) -> torch.Tensor:\n if self._rot_mats is not None:\n return self._rot_mats\n elif self._quats is not None:\n return quat_to_rot(self._quats)\n else:\n raise ValueError('Both rotations are None')", "docstring": "Returns the underlying rotation as a rotation matrix tensor.\n\nReturns:\n The rotation as a rotation matrix tensor", "source": "github_repos"} -{"code": "def __init__(self, proxy, include_indexes=False):\n self._proxy = proxy\n self._include_indexes = include_indexes", "docstring": "A transform that explodes a PCollection of DataFrame or Series. DataFrame\n is converterd to a schema-aware PCollection, while Series is converted to its\n underlying type.\n\nArgs:\n include_indexes: (optional, default: False) When unbatching a DataFrame\n if include_indexes=True, attempt to include index columns in the output\n schema for expanded DataFrames. Raises an error if any of the index\n levels are unnamed (name=None), or if any of the names are not unique\n among all column and index names.", "source": "github_repos"} -{"code": "def _split_list_into_bundles(self, output_pcollection, elements, max_element_per_bundle, element_size_fn):\n bundle = self._evaluation_context.create_bundle(output_pcollection)\n bundle_size = 0\n bundles = [bundle]\n for element in elements:\n if max_element_per_bundle and bundle_size >= max_element_per_bundle:\n bundle = self._evaluation_context.create_bundle(output_pcollection)\n bundle_size = 0\n bundles.append(bundle)\n bundle.output(element)\n bundle_size += element_size_fn(element)\n return bundles", "docstring": "Splits elements, an iterable, into multiple output bundles.\n\nArgs:\n output_pcollection: PCollection that the elements belong to.\n elements: elements to be chunked into bundles.\n max_element_per_bundle: (approximately) the maximum element per bundle.\n If it is None, only a single bundle will be produced.\n element_size_fn: Function to return the size of a given element.\n\nReturns:\n List of output uncommitted bundles with at least one bundle.", "source": "github_repos"} -{"code": "def __init__(self):\n object.__setattr__(self, '_options', {})\n object.__setattr__(self, '_mutable', True)", "docstring": "Base class for representing a set of tf.data options.\n\nAttributes:\n _options: Stores the option values.", "source": "github_repos"} -{"code": "def copy(self, stateful=False):\n raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')", "docstring": "Creates a new instance of this constraint.\n\nArgs:\n stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state.\n\nReturns:\n constraint(`Constraint`): The same constraint as the one being called from.", "source": "github_repos"} -{"code": "def _init_from_converter(self, options: QuantizationDebugOptions, converter: TFLiteConverter, calibrated_model: Optional[bytes]=None, float_model: Optional[bytes]=None) -> None:\n self.quant_model = convert.mlir_quantize(calibrated_model, disable_per_channel=converter._experimental_disable_per_channel, fully_quantize=options.fully_quantize, enable_numeric_verify=True, denylisted_ops=options.denylisted_ops, denylisted_nodes=options.denylisted_nodes)\n self._quant_interpreter = _interpreter.Interpreter(model_content=self.quant_model)\n self._float_interpreter = None\n if float_model is not None:\n self._float_interpreter = _interpreter.Interpreter(model_content=float_model)", "docstring": "Convert the model and apply options.\n\n Converts the quantized model and initializes a quantized model interpreter\n with the quantized model. Returns a float model interpreter if float model\n is provided.\n\nArgs:\n options: a QuantizationDebugOptions object.\n converter: an initialized tf.lite.TFLiteConverter.\n calibrated_model: Calibrated model bytes.\n float_model: Float model bytes.", "source": "github_repos"} -{"code": "def _block_orth(self, p1, p2, p3):\n p1_shape = p1.shape.as_list()\n if p1_shape != p2.shape.as_list() or p1_shape != p3.shape.as_list():\n raise ValueError(f'The dimension of the matrices must be the same. Received p1.shape={p1.shape}, p2.shape={p2.shape} and p3.shape={p3.shape}.')\n n = p1_shape[0]\n eye = linalg_ops_impl.eye(n, dtype=self.dtype)\n kernel2x2x2 = {}\n\n def matmul(p1, p2, p3):\n return math_ops.matmul(math_ops.matmul(p1, p2), p3)\n\n def cast(i, p):\n \"\"\"Return p or (1-p).\"\"\"\n return i * p + (1 - i) * (eye - p)\n for i in [0, 1]:\n for j in [0, 1]:\n for k in [0, 1]:\n kernel2x2x2[i, j, k] = matmul(cast(i, p1), cast(j, p2), cast(k, p3))\n return kernel2x2x2", "docstring": "Construct a 3 x 3 kernel.\n\n Used to construct orthgonal kernel.\n\nArgs:\n p1: A symmetric projection matrix.\n p2: A symmetric projection matrix.\n p3: A symmetric projection matrix.\n\nReturns:\n A 2 x 2 x 2 kernel.\n\nRaises:\n ValueError: If the dimensions of p1, p2 and p3 are different.", "source": "github_repos"} -{"code": "def l2_normalize(x, axis=None):\n return nn.l2_normalize(x, axis=axis)", "docstring": "Normalizes a tensor wrt the L2 norm alongside the specified axis.\n\nArgs:\n x: Tensor or variable.\n axis: axis along which to perform normalization.\n\nReturns:\n A tensor.", "source": "github_repos"} -{"code": "def parser(self, column: str, parser: str, error: str, value: Any) -> None:\n log = self._build_parser_message(column, parser, error, value)\n self.queue_log_message(log)", "docstring": "Adds parser error information to base log message and\n sends it to the logger for writing.\n\nArgs:\n * column: column where the rule is applied\n * parser: parser function that failed and raises this message\n * error: error that occurred\n * value: value that fails to parse\n\nReturns:\n * None", "source": "github_repos"} -{"code": "def actual_360(*, start_date, end_date, schedule_info=None, dtype=None, name=None):\n del schedule_info\n with tf.name_scope(name or 'actual_360'):\n end_date = dt.convert_to_date_tensor(end_date)\n start_date = dt.convert_to_date_tensor(start_date)\n dtype = dtype or tf.constant(0.0).dtype\n actual_days = tf.cast(start_date.days_until(end_date), dtype=dtype)\n return actual_days / 360", "docstring": "Computes the year fraction between the specified dates.\n\n The actual/360 convention specifies the year fraction between the start and\n end date as the actual number of days between the two dates divided by 360.\n\n Note that the schedule info is not needed for this convention and is ignored\n if supplied.\n\n For more details see:\n https://en.wikipedia.org/wiki/Day_count_convention#Actual/360\n\nArgs:\n start_date: A `DateTensor` object of any shape.\n end_date: A `DateTensor` object of compatible shape with `start_date`.\n schedule_info: The schedule info. Ignored for this convention.\n dtype: The dtype of the result. Either `tf.float32` or `tf.float64`. If not\n supplied, `tf.float32` is returned.\n name: Python `str` name prefixed to ops created by this function. If not\n supplied, `actual_360` is used.\n\nReturns:\n A real `Tensor` of supplied `dtype` and shape of `start_date`. The year\n fraction between the start and end date as computed by Actual/360\n convention.", "source": "github_repos"} -{"code": "def save_to_file(json_data, filename):\n if filename[-5:] != '.json':\n print('filename: %s' % filename)\n filename += '.json'\n with open(PATH_TO_DIR + '/' + filename, 'w') as f:\n json.dump(json_data, f, sort_keys=True, indent=4)\n print(' Successfully wrote configs to file `%s`.\\n' % filename)", "docstring": "Saves all detected configuration(s) into a JSON file.\n\nArgs:\n json_data: Dict of all configurations found.\n filename: String that is the name of the output JSON file.", "source": "github_repos"} -{"code": "def stop(self):\n self.log.debug('Stopping snippet package %s.', self.package)\n self.close_connection()\n self._stop_server()\n self._destroy_event_client()\n self.log.debug('Snippet package %s stopped.', self.package)", "docstring": "Releases all the resources acquired in `initialize`.\n\n This function releases following resources:\n * Close the socket connection.\n * Stop forwarding the device port to host.\n * Stop the standing server subprocess running on the host side.\n * Stop the snippet server running on the device side.\n * Stop the event client and set `self._event_client` to None.\n\nRaises:\n android_device_lib_errors.DeviceError: if the server exited with errors on\n the device side.", "source": "github_repos"} -{"code": "def _CheckLineJoining(self, code, join_lines):\n llines = yapf_test_helper.ParseAndUnwrap(code)\n self.assertCodeEqual(line_joiner.CanMergeMultipleLines(llines), join_lines)", "docstring": "Check that the given LogicalLines are joined as expected.\n\nArgs:\n code: The code to check to see if we can join it.\n join_lines: True if we expect the lines to be joined.", "source": "github_repos"} -{"code": "def attach_tracer(tracer_name_template=None):\n if not _has_opentelemetry:\n return lambda cls: cls\n\n def decorator(cls):\n original_init = cls.__init__\n\n @functools.wraps(original_init)\n def init_with_tracer(self, *args, **kwargs):\n original_init(self, *args, **kwargs)\n module_name = cls.__module__\n class_name = cls.__qualname__\n if tracer_name_template is None:\n if module_name.startswith('transformers.'):\n tracer_name = f'{module_name}.{class_name}'\n else:\n tracer_name = f'transformers.{module_name}.{class_name}'\n else:\n tracer_name = tracer_name_template.format(module=module_name, class_name=class_name)\n self.tracer = get_tracer(tracer_name)\n cls.__init__ = init_with_tracer\n return cls\n return decorator", "docstring": "Decorator that attaches a tracer to a class.\n\n This decorator should be applied to classes that need OpenTelemetry tracing.\n It adds a tracer attribute to the class instance that can be used by the traced decorator.\n\nArgs:\n tracer_name_template: Optional template string for the tracer name.\n If provided, it should contain {module} which will be replaced with the class's full module path\n and {class_name} for the class name.\n If None, a default naming scheme will be used where:\n - If the module already starts with \"transformers.\", it will use that directly\n - Otherwise, it will prepend \"transformers.\" to the module name\n\nReturns:\n Class decorator function", "source": "github_repos"} -{"code": "def get_init_tokens_op(self, num_tokens=-1):\n if self._gradients_applied is False:\n raise ValueError('get_init_tokens_op() should be called after apply_gradients().')\n tokens_needed = self._replicas_to_aggregate - self._total_num_replicas\n if num_tokens == -1:\n num_tokens = self._replicas_to_aggregate\n elif num_tokens < tokens_needed:\n raise ValueError('Too few tokens to finish the first step: %d (given) vs %d (needed)' % (num_tokens, tokens_needed))\n if num_tokens > 0:\n with ops.device(self._global_step.device), ops.name_scope(''):\n tokens = array_ops.fill([num_tokens], self._global_step)\n init_tokens = self._sync_token_queue.enqueue_many((tokens,))\n else:\n init_tokens = control_flow_ops.no_op(name='no_init_tokens')\n return init_tokens", "docstring": "Returns the op to fill the sync_token_queue with the tokens.\n\n This is supposed to be executed in the beginning of the chief/sync thread\n so that even if the total_num_replicas is less than replicas_to_aggregate,\n the model can still proceed as the replicas can compute multiple steps per\n variable update. Make sure:\n `num_tokens >= replicas_to_aggregate - total_num_replicas`.\n\nArgs:\n num_tokens: Number of tokens to add to the queue.\n\nReturns:\n An op for the chief/sync replica to fill the token queue.\n\nRaises:\n ValueError: If this is called before apply_gradients().\n ValueError: If num_tokens are smaller than replicas_to_aggregate -\n total_num_replicas.", "source": "github_repos"} -{"code": "def get_extended_attention_mask(self, attention_mask: tf.Tensor, input_shape: Tuple[int], is_decoder: bool) -> tf.Tensor:\n if not isinstance(attention_mask, tf.Tensor):\n attention_mask = tf.convert_to_tensor(attention_mask)\n if attention_mask.shape.rank == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.shape.rank == 2:\n if is_decoder:\n batch_size, seq_length = input_shape\n seq_ids = tf.range(seq_length, dtype=attention_mask.dtype)\n causal_mask = tf.broadcast_to(seq_ids, (batch_size, seq_length, seq_length)) <= seq_ids[None, :, None]\n if shape_list(causal_mask)[1] < shape_list(attention_mask)[1]:\n prefix_seq_len = tf.shape(attention_mask)[1] - tf.shape(causal_mask)[1]\n causal_mask = tf.concat([tf.ones((batch_size, seq_length, prefix_seq_len), dtype=causal_mask.dtype), causal_mask], axis=-1)\n extended_attention_mask = tf.cast(causal_mask[:, None, :, :], attention_mask.dtype) * attention_mask[:, None, None, :]\n else:\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(input_shape, attention_mask.shape))\n extended_attention_mask = tf.cast(extended_attention_mask, self.dtype)\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n return extended_attention_mask", "docstring": "Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\nArgs:\n attention_mask (`tf.Tensor`):\n Mask with ones indicating tokens to attend to, zeros for tokens to ignore.\n input_shape (`Tuple[int]`):\n The shape of the input to the model.\n is_decoder (`bool`):\n Whether the model is used as a decoder.\n\nReturns:\n `tf.Tensor` The extended attention mask, with the same dtype as `attention_mask.dtype`.", "source": "github_repos"} -{"code": "def _add_state_variable(self, name, shape, dtype, initializer=None, partitioner=None, use_resource=None, **kwargs):\n weight = self.add_weight(name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=None, trainable=False, constraint=None, partitioner=partitioner, use_resource=use_resource, **kwargs)\n self.state_variables[name] = weight\n return weight", "docstring": "Add a variable that can hold state which is updated during adapt().\n\nArgs:\n name: Variable name.\n shape: Variable shape. Defaults to scalar if unspecified.\n dtype: The type of the variable. Defaults to `self.dtype` or `float32`.\n initializer: initializer instance (callable).\n partitioner: Partitioner to be passed to the `Trackable` API.\n use_resource: Whether to use `ResourceVariable`\n **kwargs: Additional keyword arguments. Accepted values are `getter` and\n `collections`.\n\nReturns:\n The created variable.", "source": "github_repos"} -{"code": "def get_calibration_min_max_value(self, calibration_statistics_serialized: bytes, calibration_options_serialized: bytes) -> Optional[tuple[float, float]]:\n statistics = calibration_statistics_pb2.CalibrationStatistics.FromString(calibration_statistics_serialized)\n options = stablehlo_quant_config_pb2.CalibrationOptions.FromString(calibration_options_serialized)\n return _call_and_return_none_on_error(functools.partial(calibration_algorithm.get_min_max_value, statistics, options), error_msg=f'Retrieving calibrated min / max failed. Options: {options}.')", "docstring": "Calculates min and max values from statistics.\n\nArgs:\n calibration_statistics_serialized: Serialized `CalibrationStatistics`.\n This will be the source to calculate min and max values from.\n calibration_options_serialized: Serialized `CalibrationOptions`. Specifies\n how the min / max should be calculated.\n\nReturns:\n (min_value, max_value): Min and max calculated using calib_opts. `None`\n upon error.", "source": "github_repos"} -{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n known_args, pipeline_args = parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n requirements_dir = os.path.dirname(os.path.realpath(__file__))\n pipeline_options.view_as(SetupOptions).requirements_file = f'{requirements_dir}/sklearn_examples_requirements.txt'\n model_loader = KeyedModelHandler(SklearnModelHandlerNumpy(model_file_type=ModelFileType.PICKLE, model_uri=known_args.model_path, large_model=known_args.large_model))\n pipeline = test_pipeline\n if not test_pipeline:\n pipeline = beam.Pipeline(options=pipeline_options)\n label_pixel_tuple = pipeline | 'ReadFromInput' >> beam.io.ReadFromText(known_args.input) | 'PreProcessInputs' >> beam.Map(process_input)\n predictions = label_pixel_tuple | 'RunInference' >> RunInference(model_loader) | 'PostProcessOutputs' >> beam.ParDo(PostProcessor())\n _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n result = pipeline.run()\n result.wait_until_finish()\n return result", "docstring": "Args:\n argv: Command line arguments defined for this example.\n save_main_session: Used for internal testing.\n test_pipeline: Used for internal testing.", "source": "github_repos"} -{"code": "def _dedup_strings(device_strs):\n new_device_strs = []\n for device_str, vals in itertools.groupby(device_strs):\n num = len(list(vals))\n if num == 1:\n new_device_strs.append(device_str)\n else:\n new_device_strs.append('%s (x%d)' % (device_str, num))\n return new_device_strs", "docstring": "Groups together consecutive identical strings.\n\n For example, given:\n ['GPU 1', 'GPU 2', 'GPU 2', 'GPU 3', 'GPU 3', 'GPU 3']\n This function returns:\n ['GPU 1', 'GPU 2 (x2)', 'GPU 3 (x3)']\n\nArgs:\n device_strs: A list of strings, each representing a device.\n\nReturns:\n A copy of the input, but identical consecutive strings are merged into a\n single string.", "source": "github_repos"} -{"code": "def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor:\n hidden_states = self.conv_pre(input_embeds)\n for i in range(self.num_upsamples):\n hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)\n hidden_states = self.upsampler[i](hidden_states)\n res_state = self.resblocks[i * self.num_kernels](hidden_states)\n for j in range(1, self.num_kernels):\n res_state += self.resblocks[i * self.num_kernels + j](hidden_states)\n hidden_states = res_state / self.num_kernels\n hidden_states = nn.functional.leaky_relu(hidden_states)\n hidden_states = self.conv_post(hidden_states)\n hidden_states = torch.tanh(hidden_states)\n waveform = hidden_states.squeeze(1)\n return waveform", "docstring": "Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch\n of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech\n waveform.\n\nArgs:\n spectrogram (`torch.FloatTensor`):\n Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,\n model_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim`\n is the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`.\n\nReturns:\n `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of\n shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.", "source": "github_repos"} -{"code": "def _finish(self, update_ops, name_scope):\n return control_flow_ops.group(*update_ops, name=name_scope)", "docstring": "Do what is needed to finish the update.\n\n This is called with the `name_scope` using the \"name\" that\n users have chosen for the application of gradients.\n\nArgs:\n update_ops: List of `Operation` objects to update variables. This list\n contains the values returned by the `_apply_dense()` and\n `_apply_sparse()` calls.\n name_scope: String. Name to use for the returned operation.\n\nReturns:\n The operation to apply updates.", "source": "github_repos"} -{"code": "def recipe_dbm_to_bigquery(config, auth_read, auth_write, dbm_report_id, dbm_report_name, dbm_dataset, dbm_table, dbm_schema, is_incremental_load):\n dbm(config, {'auth': auth_read, 'report': {'report_id': dbm_report_id, 'name': dbm_report_name}, 'out': {'bigquery': {'auth': auth_write, 'dataset': dbm_dataset, 'table': dbm_table, 'schema': dbm_schema, 'header': True, 'is_incremental_load': is_incremental_load}}})", "docstring": "Move existing DV360 reports into a BigQuery table.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n auth_write (authentication) - Authorization used for writing data.\n dbm_report_id (integer) - DV360 report ID given in UI, not needed if name used.\n dbm_report_name (string) - Name of report, not needed if ID used.\n dbm_dataset (string) - Existing BigQuery dataset.\n dbm_table (string) - Table to create from this report.\n dbm_schema (json) - Schema provided in JSON list format or empty value to auto detect.\n is_incremental_load (boolean) - Clear data in destination table during this report's time period, then append report data to destination table.", "source": "github_repos"} -{"code": "def get_generating_ops(ts):\n ts = make_list_of_t(ts, allow_graph=False)\n return [t.op for t in ts]", "docstring": "Return all the generating ops of the tensors in `ts`.\n\nArgs:\n ts: a list of `tf.Tensor`\n\nReturns:\n A list of all the generating `tf.Operation` of the tensors in `ts`.\n\nRaises:\n TypeError: if `ts` cannot be converted to a list of `tf.Tensor`.", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.Tensor, freqs_ci: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n for encoder_layer in self.layers:\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, freqs_ci, attention_mask, output_attentions)\n else:\n layer_outputs = encoder_layer(hidden_state=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, freqs_ci=freqs_ci)\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n hidden_states = layer_outputs[0]\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github_repos"} -{"code": "def random_dna(self, random_generator: Union[types.ModuleType, random.Random, None]=None, attach_spec: bool=True, previous_dna: Optional['DNA']=None) -> 'DNA':\n random_generator = random_generator or random\n dna = self._random_dna(random_generator, previous_dna)\n if attach_spec:\n dna.use_spec(self)\n return dna", "docstring": "Returns a random DNA based on current spec.\n\nArgs:\n random_generator: An optional Random object. If None, the global random\n module will be used.\n attach_spec: If True, current spec will be attached to the returned DNA.\n previous_dna: An optional DNA representing previous DNA. This field might\n be useful for generating stateful random DNAs.\n\nReturns:\n A random DNA based on current spec.", "source": "github_repos"} -{"code": "def session_creator(self, scaffold=None, config=None, checkpoint_dir=None, checkpoint_filename_with_path=None, max_wait_secs=7200):\n if config:\n session_config = copy.deepcopy(config)\n session_config.MergeFrom(self._session_config)\n else:\n session_config = self._session_config\n if not self._strategy or self._strategy.extended.experimental_should_init:\n logging.info('Creating chief session creator with config: %r', config)\n return monitored_session.ChiefSessionCreator(scaffold, master=self.master_target, config=session_config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path)\n else:\n logging.info('Creating worker session creator with config: %r', config)\n return monitored_session.WorkerSessionCreator(scaffold, master=self.master_target, config=session_config, max_wait_secs=max_wait_secs)", "docstring": "Returns a session creator.\n\n The returned session creator will be configured with the correct master\n target and session configs. It will also run either init ops or ready ops\n by querying the `strategy` object when `create_session` is called on it.\n\nArgs:\n scaffold: A `Scaffold` used for gathering or building supportive ops. If\n not specified a default one is created. It's used to finalize the graph.\n config: `ConfigProto` proto used to configure the session.\n checkpoint_dir: A string. Optional path to a directory where to restore\n variables.\n checkpoint_filename_with_path: Full file name path to the checkpoint file.\n Only one of `checkpoint_dir` or `checkpoint_filename_with_path` can be\n specified.\n max_wait_secs: Maximum time to wait for the session to become available.\n\nReturns:\n a descendant of SessionCreator.", "source": "github_repos"} -{"code": "def get_tensor_layout(self, path):\n raise NotImplementedError()", "docstring": "Retrieve the `TensorLayout` for the intermediate tensor.\n\nArgs:\n path: a string path for the corresponding tensor.\n\nReturns:\n The `TensorLayout` for the intermediate tensor, which can be used\n by `backend.relayout()` to reshard the tensor. Could also return\n None.", "source": "github_repos"} -{"code": "def train_model(preprocessed_dataset_path: str, trained_model_path: str, base_artifact_path: str):\n timestamp = time.time()\n model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg16', pretrained=True)\n target_path = f'{base_artifact_path}/training/trained_model_{timestamp}.pt'\n target_path_gcsfuse = target_path.replace('gs://', '/gcs/')\n Path(target_path_gcsfuse).parent.mkdir(parents=True, exist_ok=True)\n torch.save(model.state_dict(), target_path_gcsfuse)\n Path(trained_model_path).parent.mkdir(parents=True, exist_ok=True)\n with open(trained_model_path, 'w') as f:\n f.write(target_path)", "docstring": "Placeholder method to load a model from the torch hub and save it.\n\nArgs:\n preprocessed_dataset_path (str): Path to the preprocessed dataset\n trained_model_path (str): Output path for the trained model\n base_artifact_path (str): path to the base directory of where artifacts can be stored for\n this component", "source": "github_repos"} -{"code": "def symmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, per_channel=False):\n with torch.no_grad():\n n = 2 ** (num_bits - 1) - 1\n if per_channel:\n scale, _ = torch.max(torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1)\n scale = torch.clamp(scale, min=1e-08) / n\n else:\n scale = max(saturation_min.abs(), saturation_max.abs())\n scale = torch.clamp(scale, min=1e-08) / n\n return scale", "docstring": "Compute the scaling factor with the given quantization range for symmetric quantization.\n\nArgs:\n saturation_min (`torch.Tensor`):\n Lower bound for quantization range.\n saturation_max (`torch.Tensor`):\n Upper bound for quantization range.\n per_channel (`bool`, *optional*, defaults to `False`):\n Whether to or not use channel-wise quantization.\n\nReturns:\n `torch.Tensor`: Scaling factor that linearly quantizes the given range between *saturation_min* and\n *saturation_max*.", "source": "github_repos"} -{"code": "def _snapshot_device_function_stack_metadata(self) -> list[traceable_stack.TraceableObject]:\n snapshot = []\n for obj in self._device_function_stack.peek_traceable_objs():\n obj_copy = obj.copy_metadata()\n obj_copy.obj = obj.obj.display_name\n snapshot.append(obj_copy)\n return snapshot", "docstring": "Return device function stack as a list of TraceableObjects.\n\nReturns:\n [traceable_stack.TraceableObject, ...] where each TraceableObject's .obj\n member is a displayable name for the user's argument to Graph.device, and\n the filename and lineno members point to the code location where\n Graph.device was called directly or indirectly by the user.", "source": "github_repos"} -{"code": "def node_name(self):\n return self._node_name", "docstring": "Name of the node from which the tensor value was dumped.\n\nReturns:\n (`str`) name of the node watched by the debug op.", "source": "github_repos"} -{"code": "def set_session(session):\n global _SESSION\n _SESSION.session = session", "docstring": "Sets the global TensorFlow session.\n\nArgs:\n session: A TF Session.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n lm_loss = None\n if labels is not None:\n lm_loss = self.loss_function(prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs)\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return (lm_loss,) + output if lm_loss is not None else output\n return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are\n ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`\n\nExample:\n ```python\n >>> from transformers import AutoTokenizer, MegatronBertForCausalLM, MegatronBertConfig\n >>> import torch\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"nvidia/megatron-bert-cased-345m\")\n >>> model = MegatronBertForCausalLM.from_pretrained(\"nvidia/megatron-bert-cased-345m\", is_decoder=True)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n ```", "source": "github_repos"} -{"code": "def recipe_hello(config, auth_read, say_first, say_second, error, sleep):\n hello(config, {'auth': auth_read, 'say': say_first, 'error': error, 'sleep': sleep})\n hello(config, {'auth': auth_read, 'say': say_second, 'sleep': sleep})", "docstring": "Recipe template for say hello.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n say_first (string) - Type in a greeting.\n say_second (string) - Type in a greeting.\n error (string) - Optional error for testing.\n sleep (integer) - Seconds to sleep.", "source": "github_repos"} -{"code": "def forward(self, trajectories: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, targets: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TrajectoryTransformerOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n if past_key_values is None:\n past_key_values = tuple([None] * len(self.blocks))\n batch_size, sequence_length = trajectories.size()\n if sequence_length > self.block_size:\n raise ValueError('Cannot forward, model block size is exhausted.')\n offset_trajectories = self.offset_tokens(trajectories)\n token_embeddings = self.tok_emb(offset_trajectories)\n position_embeddings = self.pos_emb[:, :sequence_length, :]\n hidden_states = self.drop(token_embeddings + position_embeddings)\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')\n use_cache = False\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.blocks, past_key_values)):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n if self.gradient_checkpointing and self.training:\n outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, layer_past, use_cache, output_attentions)\n else:\n outputs = block(hidden_states, layer_past, use_cache, output_attentions)\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n hidden_state = self.ln_f(hidden_states)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n hidden_states_pad, n_pad = self.pad_to_full_observation(hidden_state)\n logits = self.head(hidden_states_pad)\n logits = logits.reshape(batch_size, sequence_length + n_pad, self.vocab_size + 1)\n logits = logits[:, :sequence_length]\n if targets is not None:\n loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.view(-1), reduction='none')\n if self.action_weight != 1 or self.reward_weight != 1 or self.value_weight != 1:\n n_states = int(np.ceil(sequence_length / self.transition_dim))\n weights = torch.cat([torch.ones(self.observation_dim, device=trajectories.device), torch.ones(self.action_dim, device=trajectories.device) * self.action_weight, torch.ones(1, device=trajectories.device) * self.reward_weight, torch.ones(1, device=trajectories.device) * self.value_weight])\n weights = weights.repeat(n_states)\n weights = weights[1:].repeat(batch_size, 1)\n loss = loss * weights.view(-1)\n loss = (loss * attention_mask.view(-1)).mean()\n else:\n loss = None\n if not return_dict:\n return tuple((v for v in [loss, logits, presents, all_hidden_states, all_self_attentions] if v is not None))\n return TrajectoryTransformerOutput(loss=loss, logits=logits, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions)", "docstring": "Returns:\n\nExample:\n ```python\n >>> from transformers import TrajectoryTransformerModel\n >>> import torch\n\n >>> model = TrajectoryTransformerModel.from_pretrained(\n ... \"CarlCochet/trajectory-transformer-halfcheetah-medium-v2\"\n ... )\n >>> model.to(device)\n >>> model.eval()\n\n >>> observations_dim, action_dim, batch_size = 17, 6, 256\n >>> seq_length = observations_dim + action_dim + 1\n\n >>> trajectories = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(\n ... device\n ... )\n >>> targets = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(device)\n\n >>> outputs = model(\n ... trajectories,\n ... targets=targets,\n ... use_cache=True,\n ... output_attentions=True,\n ... output_hidden_states=True,\n ... return_dict=True,\n ... )\n ```", "source": "github_repos"} -{"code": "def __init__(self, thresholds=None, name=None, dtype=None):\n super(FalseNegatives, self).__init__(confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES, thresholds=thresholds, name=name, dtype=dtype)", "docstring": "Calculates the number of false negatives.\n\n If `sample_weight` is given, calculates the sum of the weights of\n false negatives. This metric creates one local variable, `accumulator`\n that is used to keep track of the number of false negatives.\n\n If `sample_weight` is `None`, weights default to 1.\n Use `sample_weight` of 0 to mask values.\n\nArgs:\n thresholds: (Optional) Defaults to 0.5. A float value or a python\n list/tuple of float threshold values in [0, 1]. A threshold is compared\n with prediction values to determine the truth value of predictions\n (i.e., above the threshold is `true`, below is `false`). One metric\n value is generated for each threshold value.\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\n Standalone usage:\n\n >>> m = tf.keras.metrics.FalseNegatives()\n >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0])\n >>> m.result().numpy()\n 2.0\n\n >>> m.reset_state()\n >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0])\n >>> m.result().numpy()\n 1.0\n\n Usage with `compile()` API:\n\n ```python\n model.compile(optimizer='sgd',\n loss='mse',\n metrics=[tf.keras.metrics.FalseNegatives()])\n ```", "source": "github_repos"} -{"code": "def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu, dtype):\n strides = [1] + strides + [1]\n rates = [1] + rates + [1]\n with self.cached_session(use_gpu=use_gpu):\n out_tensor = nn_ops.dilation2d(constant_op.constant(image, dtype=dtype), constant_op.constant(kernel, dtype=dtype), strides=strides, rates=rates, padding=padding, name='dilation2d')\n self.assertAllCloseAccordingToType(out, self.evaluate(out_tensor))", "docstring": "Verifies the output values of the dilation function.\n\nArgs:\n image: Input tensor with shape: [batch, in_height, in_width, channels].\n kernel: Filter tensor with shape: [filter_height, filter_width, channels].\n strides: Output strides, specified as [stride_height, stride_width].\n rates: Atrous rates, specified as [rate_height, rate_width].\n padding: Padding type.\n out: Expected output.\n use_gpu: Whether we are running on GPU.", "source": "github_repos"} -{"code": "def set_tuple_shapes(self, tuple_shapes):\n if len(tuple_shapes) != self.number_of_tuple_elements:\n raise ValueError(f'tuple_shapes is {str(tuple_shapes)}, but must be a list of length {self.number_of_tuple_elements}')\n try:\n tuple_shapes = [tensor_shape.as_shape(shape) for shape in tuple_shapes]\n except (ValueError, TypeError) as e:\n raise TypeError(f'tuple_shapes is {str(tuple_shapes)}, but must be a list of elements each convertible to TensorShape: got error {str(e)}') from e\n if self._frozen:\n for frozen, updated in zip(self._tuple_shapes, tuple_shapes):\n if frozen != updated:\n raise ValueError(f'Trying to update InfeedQueue with frozen configuration with an incompatible shape. Frozen shapes are {str(self._tuple_shapes)}, updated shapes are {str(tuple_shapes)}')\n else:\n self._tuple_shapes = tuple_shapes\n self._validate()", "docstring": "Sets the shape of each element of the queue.\n\n tuple_shapes must be a list of length\n self.number_of_tuple_elements, and each element must be\n convertible to a TensorShape.\n\nArgs:\n tuple_shapes: the shapes of each queue element.\n\nRaises:\n ValueError: if tuple_shapes is not of length\n self.number_of_tuple_elements.\n TypeError: if an element of tuple_shapes cannot be converted to\n a TensorShape.", "source": "github_repos"} -{"code": "def NHWCToNCHW(input_tensor):\n if isinstance(input_tensor, tensor.Tensor):\n return array_ops.transpose(input_tensor, [0, 3, 1, 2])\n else:\n return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]", "docstring": "Convert the input from NHWC format to NCHW.\n\nArgs:\n input_tensor: a 4-D tensor, or a 4-element array representing the same.\n\nReturns:\n the converted tensor or a shape array", "source": "github_repos"} -{"code": "def set_options(cls, pipeline_options):\n cls._pipeline_options = pipeline_options", "docstring": "Set filesystem options.\n\nArgs:\n pipeline_options: Instance of ``PipelineOptions``.", "source": "github_repos"} -{"code": "def __init__(self, config: FastSpeech2ConformerConfig, module_config):\n super().__init__()\n input_channels = config.hidden_size\n hidden_channels = module_config['linear_units']\n kernel_size = config.positionwise_conv_kernel_size\n self.conv1 = nn.Conv1d(input_channels, hidden_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)\n self.conv2 = nn.Conv1d(hidden_channels, input_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)\n self.dropout = nn.Dropout(module_config['dropout_rate'])", "docstring": "Initialize FastSpeech2ConformerMultiLayeredConv1d module.\n\nArgs:\n input_channels (`int`): Number of input channels.\n hidden_channels (`int`): Number of hidden channels.\n kernel_size (`int`): Kernel size of conv1d.\n dropout_rate (`float`): Dropout rate.", "source": "github_repos"} -{"code": "def test_gradient(self, shape, rt_value, rt_grad, default_value, default_grad, output_value, output_grad, ragged_rank=None):\n rt_value = ragged_factory_ops.constant(rt_value, dtype=dtypes.float32, ragged_rank=ragged_rank)\n rt_grad = ragged_factory_ops.constant(rt_grad, dtype=dtypes.float32, ragged_rank=ragged_rank)\n default_value = constant_op.constant(default_value, dtype=dtypes.float32)\n default_grad = constant_op.constant(default_grad, dtype=dtypes.float32)\n output_value = constant_op.constant(output_value, dtype=dtypes.float32, shape=shape)\n output_grad = constant_op.constant(output_grad, dtype=dtypes.float32, shape=shape)\n shape = tensor_shape.as_shape(shape)\n for partition_type in ['row_splits', 'value_rowids']:\n rt_val = self.rt_with_partition_type(rt_value, partition_type)\n if context.executing_eagerly():\n self._test_gradient_helper(rt_val, default_value, shape, output_grad, output_value, rt_grad, default_grad)\n else:\n for shape_info in ['known', 'unknown_dims', 'unknown_rank']:\n rt_val = self.wrap_in_placeholder(rt_val, shape_info)\n default_val = self.wrap_in_placeholder(default_value, shape_info)\n shape_val = self.wrap_in_placeholder(shape, shape_info)\n self._test_gradient_helper(rt_val, default_val, shape_val, output_grad, output_value, rt_grad, default_grad)", "docstring": "Tests that ragged_to_dense generates the right gradient.\n\nArgs:\n shape: The `shape` arg for `ragged_to_dense`.\n rt_value: The `rt_input` arg for `ragged_to_dense`.\n rt_grad: The expected gradient for `rt_value`. Corresponds 1:1 with\n `rt_value`.\n default_value: The `default_value` arg for `ragged_to_dense`.\n default_grad: The expected gradient for `default_value`. Corresponds 1:1\n with `default_value`.\n output_value: The expected output of `ragged_to_dense`.\n output_grad: The gradient for the output (used to generate the gradients\n `rt_grad` and `default_grad`). Corresponds 1:1 with `output_value`.\n ragged_rank: Ragged rank for `rt_value`.", "source": "github_repos"} -{"code": "def outer(x1, x2):\n if any_symbolic_tensors((x1, x2)):\n return Outer().symbolic_call(x1, x2)\n return backend.numpy.outer(x1, x2)", "docstring": "Compute the outer product of two vectors.\n\n Given two vectors `x1` and `x2`, the outer product is:\n\n ```\n out[i, j] = x1[i] * x2[j]\n ```\n\nArgs:\n x1: First input tensor.\n x2: Second input tensor.\n\nReturns:\n Outer product of `x1` and `x2`.", "source": "github_repos"} -{"code": "def select(self, children: List['ColumnExpressionBuilder']) -> 'ColumnExpressionBuilder':\n if self._column_name:\n raise AttributeError(f'select() must not be called on a builder with name set already. Got select called on {str(self)}.')\n if self._builder.return_type.returns_collection() and (not self._needs_unnest):\n raise AttributeError(f'select() must not be called on a builder which returns collection. Got select called on {str(self)}.')\n if not isinstance(self._builder.return_type, _fhir_path_data_types.StructureDataType):\n raise AttributeError(f'select() can only be called on a FHIR path which returns a StructureDataType. Got type of {self._builder.return_type} from {str(self)}.')\n for child_builder in children:\n if not child_builder.column_name and (not child_builder.children):\n raise AttributeError(f'select() child builders must either have name names or children. Got {str(child_builder)}')\n if not child_builder.fhir_path.startswith(self._builder.fhir_path):\n raise AttributeError(f'select() child builders must be built by starting with their parent FHIR path. Got {str(child_builder)} in {self._builder.fhir_path}.')\n path_to_replace = self._builder.fhir_path\n reference_node = self._builder.node\n child_builder.node.replace_operand(path_to_replace, _evaluation.ReferenceNode(self._builder.node.context, reference_node, unnested=True))\n return ColumnExpressionBuilder(self._builder, self._column_name, children, self._needs_unnest, True)", "docstring": "The select() function.\n\n Selects the child fields from a FHIR path which returns a StructureDataType.\n Once this function is called, the FHIR path is sealed to be immutable.\n\nArgs:\n children: A list of selected FHIR Path.\n\nReturns:\n A new ColumnExpressionBuilder with the given children.", "source": "github_repos"} -{"code": "def process_update(x):\n if callable(x):\n update = lambda: process_update(x())\n return update()\n elif isinstance(x, ops.Operation):\n update = x\n elif hasattr(x, 'op'):\n update = x.op\n else:\n update = tensor_conversion.convert_to_tensor_v2_with_dispatch(x)\n reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])\n update._unconditional_update = update not in reachable\n return update", "docstring": "Standardize update ops.\n\nArgs:\n x: Tensor, op, or callable.\n\nReturns:\n An update op.", "source": "github_repos"} -{"code": "def create_profiler_ui(graph, run_metadata, ui_type='readline', on_ui_exit=None, config=None):\n del config\n analyzer = ProfileAnalyzer(graph, run_metadata)\n cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit)\n cli.register_command_handler('list_profile', analyzer.list_profile, analyzer.get_help('list_profile'), prefix_aliases=['lp'])\n cli.register_command_handler('print_source', analyzer.print_source, analyzer.get_help('print_source'), prefix_aliases=['ps'])\n return cli", "docstring": "Create an instance of ReadlineUI based on a `tf.Graph` and `RunMetadata`.\n\nArgs:\n graph: Python `Graph` object.\n run_metadata: A `RunMetadata` protobuf object.\n ui_type: (str) requested UI type, e.g., \"readline\".\n on_ui_exit: (`Callable`) the callback to be called when the UI exits.\n config: An instance of `cli_config.CLIConfig`.\n\nReturns:\n (base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer\n commands and tab-completions registered.", "source": "github_repos"} -{"code": "def update(self, grads):\n pass", "docstring": "Updates the value of the loss scale.\n\n The loss scale will be potentially updated, based on the value of `grads`.\n The tensor returned by calling this class is only updated when this function\n is evaluated.\n\n In eager mode, this directly updates the loss scale, so that calling\n `__call__` will return the newly updated loss scale. In graph mode,\n this returns an op that, when evaluated, updates the loss scale.\n\n This function also returns a `should_apply_gradients` bool. If False,\n gradients should not be applied to the variables that step, as nonfinite\n gradients were found, and the loss scale has been be updated to reduce the\n chance of finding nonfinite gradients in the next step. Some loss scale\n classes will always return True, as they cannot adjust themselves in\n response to nonfinite gradients.\n\n When a DistributionStrategy is used, this function may only be called in a\n cross-replica context.\n\nArgs:\n grads: A nested structure of unscaled gradients, each which is the\n gradient of the loss with respect to a weight. The gradients should have\n already been divided by the loss scale being before passed to this\n function. 'None' gradients are accepted, and are ignored.\n\nReturns:\n update_op: In eager mode, None. In graph mode, an op to update the loss\n scale.\n should_apply_gradients: Either a bool or a scalar boolean tensor. If\n False, the caller should skip applying `grads` to the variables this\n step.", "source": "github_repos"} -{"code": "def assert_equal(first, second, msg=None, extras=None):\n _call_unittest_assertion(_pyunit_proxy.assertEqual, first, second, msg=msg, extras=extras)", "docstring": "Asserts the equality of objects, otherwise fail the test.\n\n Error message is \"first != second\" by default. Additional explanation can\n be supplied in the message.\n\nArgs:\n first: The first object to compare.\n second: The second object to compare.\n msg: A string that adds additional info about the failure.\n extras: An optional field for extra information to be included in\n test result.", "source": "github_repos"} -{"code": "def sigmoid(x):\n output = ops.sigmoid(x)\n try:\n output._keras_logits = x\n except AttributeError:\n pass\n return output", "docstring": "Sigmoid activation function.\n\n It is defined as: `sigmoid(x) = 1 / (1 + exp(-x))`.\n\n For small values (<-5),\n `sigmoid` returns a value close to zero, and for large values (>5)\n the result of the function gets close to 1.\n\n Sigmoid is equivalent to a 2-element softmax, where the second element is\n assumed to be zero. The sigmoid function always returns a value between\n 0 and 1.\n\nArgs:\n x: Input tensor.", "source": "github_repos"} -{"code": "def on_value_event(self, event):\n if self._dump_dir:\n self._write_value_event(event)\n else:\n value = event.summary.value[0]\n tensor_value = debug_data.load_tensor_from_event(event)\n self._event_listener_servicer.debug_tensor_values[value.node_name].append(tensor_value)\n items = event.summary.value[0].node_name.split(':')\n node_name = items[0]\n output_slot = int(items[1])\n debug_op = items[2]\n if (node_name, output_slot, debug_op) in self._event_listener_servicer.breakpoints:\n return debug_service_pb2.EventReply()", "docstring": "Implementation of the tensor value-carrying Event proto callback.\n\n Writes the Event proto to the file system for testing. The path written to\n follows the same pattern as the file:// debug URLs of tfdbg, i.e., the\n name scope of the op becomes the directory structure under the dump root\n directory.\n\nArgs:\n event: The Event proto carrying a tensor value.\n\nReturns:\n If the debug node belongs to the set of currently activated breakpoints,\n a `EventReply` proto will be returned.", "source": "github_repos"} -{"code": "def _aggregate_gradients(self, grads_and_vars):\n return self.gradient_aggregator(grads_and_vars)", "docstring": "Called in `apply_gradients` to aggregate gradients across devices.\n\n Note that user subclasses may override this, so the interface should not be\n changed.\n\nArgs:\n grads_and_vars: List of (gradient, variable) pairs.\n\nReturns:\n A list of (aggregated_gradient, variable) pairs. By default, this calls\n `self.gradient_aggregator`.", "source": "github_repos"} -{"code": "def add_test_class(self, config, test_class, tests=None, name_suffix=None):\n if self._log_dir != config.log_path:\n raise Error('TestRunner\\'s log folder is \"%s\", but a test config with a different log folder (\"%s\") was added.' % (self._log_dir, config.log_path))\n if self._testbed_name != config.testbed_name:\n raise Error('TestRunner\\'s test bed is \"%s\", but a test config with a different test bed (\"%s\") was added.' % (self._testbed_name, config.testbed_name))\n self._test_run_infos.append(TestRunner._TestRunInfo(config=config, test_class=test_class, tests=tests, test_class_name_suffix=name_suffix))", "docstring": "Adds tests to the execution plan of this TestRunner.\n\nArgs:\n config: config_parser.TestRunConfig, configuration to execute this\n test class with.\n test_class: class, test class to execute.\n tests: list of strings, optional list of test names within the\n class to execute.\n name_suffix: string, suffix to append to the class name for\n reporting. This is used for differentiating the same class\n executed with different parameters in a suite.\n\nRaises:\n Error: if the provided config has a log_path or testbed_name which\n differs from the arguments provided to this TestRunner's\n constructor.", "source": "github_repos"} -{"code": "def find(self, predicate, first_n=0, device_name=None, exclude_node_names=None):\n if exclude_node_names:\n exclude_node_names = re.compile(exclude_node_names)\n matched_data = []\n for device in self._dump_tensor_data if device_name is None else (self._dump_tensor_data[device_name],):\n for datum in self._dump_tensor_data[device]:\n if exclude_node_names and exclude_node_names.match(datum.node_name):\n continue\n if predicate(datum, datum.get_tensor()):\n matched_data.append(datum)\n if first_n > 0 and len(matched_data) >= first_n:\n return matched_data\n return matched_data", "docstring": "Find dumped tensor data by a certain predicate.\n\nArgs:\n predicate: A callable that takes two input arguments:\n\n ```python\n def predicate(debug_tensor_datum, tensor):\n # returns a bool\n ```\n\n where `debug_tensor_datum` is an instance of `DebugTensorDatum`, which\n carries the metadata, such as the `Tensor`'s node name, output slot\n timestamp, debug op name, etc.; and `tensor` is the dumped tensor value\n as a `numpy.ndarray`.\n first_n: (`int`) return only the first n `DebugTensotDatum` instances (in\n time order) for which the predicate returns True. To return all the\n `DebugTensotDatum` instances, let first_n be <= 0.\n device_name: optional device name.\n exclude_node_names: Optional regular expression to exclude nodes with\n names matching the regular expression.\n\nReturns:\n A list of all `DebugTensorDatum` objects in this `DebugDumpDir` object\n for which predicate returns True, sorted in ascending order of the\n timestamp.", "source": "github_repos"} -{"code": "def index_of(self, value_str):\n if value_str is None:\n value_str = ''\n if value_str in self._string_to_index:\n return self._string_to_index[value_str]\n index = len(self._string_table)\n self._string_table.append(value_str)\n self._string_to_index[value_str] = index\n return index", "docstring": "Get index of value_str in the string table.\n\n If value_str is not in the string table, we will add it at the end\n and then return the new index.\n\nArgs:\n value_str: (string) Value to lookup/add in/to the string table.\n\nReturns:\n Index of value_str in the string table.", "source": "github_repos"} -{"code": "def assertOutputStateMatches(self, **has_output):\n output_types = {'stdout', 'stderr', 'returncode'}\n assert len(output_types) == len(has_output)\n for output_type in output_types:\n output_value = getattr(self, output_type)\n if has_output[output_type]:\n self.assertTrue(output_value, output_type + ' unexpectedly empty')\n else:\n value = str(output_value)\n if len(value) > 50:\n value = value[:47] + '...'\n self.assertFalse(output_value, f'Unexpected output to {output_type}: {value!r}')", "docstring": "Check that the output state matches expectations.\n\n If, for example, you expect the program to print something to stdout and\n nothing to stderr before exiting with an error code, you would write\n assertOutputStateMatches(stdout=True, stderr=False, returncode=True).\n\nArgs:\n **has_output: Whether each output type should have output.", "source": "github_repos"} -{"code": "def __call__(self, shape, dtype=None):\n raise NotImplementedError('Initializer subclasses must implement the `__call__()` method.')", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor.", "source": "github_repos"} -{"code": "def assert_shallow_structure(shallow_tree, input_tree, check_types=True, expand_composites=False):\n nest_util.assert_shallow_structure(nest_util.Modality.CORE, shallow_tree, input_tree, check_types, expand_composites)", "docstring": "Asserts that `shallow_tree` is a shallow structure of `input_tree`.\n\n That is, this function tests if the `input_tree` structure can be created from\n the `shallow_tree` structure by replacing its leaf nodes with deeper\n tree structures.\n\nExample:\n The following code will raise an exception:\n ```python\n shallow_tree = {\"a\": \"A\", \"b\": \"B\"}\n input_tree = {\"a\": 1, \"c\": 2}\n assert_shallow_structure(shallow_tree, input_tree)\n ```\n\n The following code will raise an exception:\n ```python\n shallow_tree = [\"a\", \"b\"]\n input_tree = [\"c\", [\"d\", \"e\"], \"f\"]\n assert_shallow_structure(shallow_tree, input_tree)\n ```\n\nArgs:\n shallow_tree: an arbitrarily nested structure.\n input_tree: an arbitrarily nested structure.\n check_types: if `True` (default) the sequence types of `shallow_tree` and\n `input_tree` have to be the same. Note that even with check_types==True,\n this function will consider two different namedtuple classes with the same\n name and _fields attribute to be the same class.\n expand_composites: If true, then composite tensors such as\n `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their\n component tensors.\n\nRaises:\n TypeError: If `shallow_tree` is a sequence but `input_tree` is not.\n TypeError: If the sequence types of `shallow_tree` are different from\n `input_tree`. Only raised if `check_types` is `True`.\n ValueError: If the sequence lengths of `shallow_tree` are different from\n `input_tree`.", "source": "github_repos"} -{"code": "def __init__(self, opening_bracket, newline, opening_column):\n self.opening_bracket = opening_bracket\n self.has_split_before_first_param = newline\n self.opening_column = opening_column\n self.parameters = opening_bracket.parameters\n self.split_before_closing_bracket = False", "docstring": "Maintains the state of function parameter list formatting decisions.\n\nAttributes:\n opening_bracket: The opening bracket of the parameter list.\n closing_bracket: The closing bracket of the parameter list.\n has_typed_return: True if the function definition has a typed return.\n ends_in_comma: True if the parameter list ends in a comma.\n last_token: Returns the last token of the function declaration.\n has_default_values: True if the parameters have default values.\n has_split_before_first_param: Whether there is a newline before the first\n parameter.\n opening_column: The position of the opening parameter before a newline.\n parameters: A list of parameter objects (Parameter).\n split_before_closing_bracket: Split before the closing bracket. Sometimes\n needed if the indentation would collide.", "source": "github_repos"} -{"code": "def _var_key(var):\n if hasattr(var, '_distributed_container'):\n var = var._distributed_container()\n if var._in_graph_mode:\n return var._shared_name\n return var._unique_id", "docstring": "Key for representing a primary variable, for looking up slots.\n\n In graph mode the name is derived from the var shared name.\n In eager mode the name is derived from the var unique id.\n If distribution strategy exists, get the primary variable first.\n\nArgs:\n var: the variable.\n\nReturns:\n the unique name of the variable.", "source": "github_repos"} -{"code": "def preprocess_dataset(ingested_dataset_path: str, preprocessed_dataset_path: str, base_artifact_path: str, gcp_project_id: str, region: str, dataflow_staging_root: str, beam_runner: str):\n timestamp = time.time()\n target_path = f'{base_artifact_path}/preprocessing/preprocessed_dataset_{timestamp}'\n Path(preprocessed_dataset_path).parent.mkdir(parents=True, exist_ok=True)\n with open(preprocessed_dataset_path, 'w') as f:\n f.write(target_path)\n pipeline_options = PipelineOptions(runner=beam_runner, project=gcp_project_id, job_name=f'preprocessing-{int(time.time())}', temp_location=dataflow_staging_root, region=region, requirements_file='/requirements.txt', save_main_session=True)\n with beam.Pipeline(options=pipeline_options) as pipeline:\n pipeline | 'Read input jsonlines file' >> beam.io.ReadFromText(ingested_dataset_path) | 'Load json' >> beam.Map(json.loads) | 'Filter licenses' >> beam.Filter(valid_license) | 'Download image from URL' >> beam.FlatMap(download_image_from_url) | 'Resize image' >> beam.Map(resize_image, size=IMAGE_SIZE) | 'Clean Text' >> beam.Map(clean_text) | 'Serialize Example' >> beam.Map(serialize_example) | 'Write to Avro files' >> beam.io.WriteToAvro(file_path_prefix=target_path, schema={'namespace': 'preprocessing.example', 'type': 'record', 'name': 'Sample', 'fields': [{'name': 'id', 'type': 'int'}, {'name': 'caption', 'type': 'string'}, {'name': 'image', 'type': 'bytes'}]}, file_name_suffix='.avro')", "docstring": "Preprocess the ingested raw dataset and write the result to avro format.\n\nArgs:\n ingested_dataset_path (str): Path to the ingested dataset\n preprocessed_dataset_path (str): Path to where the preprocessed dataset will be saved\n base_artifact_path (str): path to the base directory of where artifacts can be stored for\n this component.\n gcp_project_id (str): ID for the google cloud project to deploy the pipeline to.\n region (str): Region in which to deploy the pipeline.\n dataflow_staging_root (str): Path to staging directory for the dataflow runner.\n beam_runner (str): Beam runner: DataflowRunner or DirectRunner.", "source": "github_repos"} -{"code": "def absolute(x):\n if any_symbolic_tensors((x,)):\n return Absolute().symbolic_call(x)\n return backend.numpy.absolute(x)", "docstring": "Compute the absolute value element-wise.\n\n `keras.ops.abs` is a shorthand for this function.\n\nArgs:\n x: Input tensor.\n\nReturns:\n An array containing the absolute value of each element in `x`.\n\nExample:\n >>> x = keras.ops.convert_to_tensor([-1.2, 1.2])\n >>> keras.ops.absolute(x)\n array([1.2, 1.2], dtype=float32)", "source": "github_repos"} -{"code": "def multiplier_with_docstring(num, rate=2):\n return num * rate", "docstring": "Multiplies num by rate.\n\nArgs:\n num (int): the num you want to multiply\n rate (int): the rate for multiplication\n\nReturns:\n Multiplication of num by rate", "source": "github_repos"} -{"code": "def __init__(self, source_shape, target_shape, layer_broadcasters, dtype=None):\n if not isinstance(source_shape, DynamicRaggedShape):\n raise TypeError('source_shape is not a DynamicRaggedShape')\n if not isinstance(target_shape, DynamicRaggedShape):\n raise TypeError('target_shape is not a DynamicRaggedShape')\n if not isinstance(layer_broadcasters, list):\n raise TypeError('layer_broadcasters not a list: ' + str(layer_broadcasters))\n for bc in layer_broadcasters:\n if not isinstance(bc, _LayerBroadcaster):\n raise TypeError('Not a LayerBroadcaster: ' + str(bc))\n dtype = _find_dtype(source_shape, dtype)\n dtype = _find_dtype(target_shape, dtype)\n dtype = _find_dtype_iterable(layer_broadcasters, dtype)\n dtype = _find_dtype(dtypes.int64, dtype)\n self._source_shape = source_shape.with_dtype(dtype)\n self._target_shape = target_shape.with_dtype(dtype)\n self._layer_broadcasters = [x.with_dtype(dtype) for x in layer_broadcasters]", "docstring": "Create a broadcaster.\n\n Do not call directly.\n The source_shape, target_shape, and layer_broadcasters are converted\n to have the same dtype.\n\n Note: source_shape.rank and target_shape.rank must be known.\n\nArgs:\n source_shape: the source DynamicRaggedShape\n target_shape: the target DynamicRaggedShape\n layer_broadcasters: List[_LayerBroadcaster] of length source_shape.rank.\n dtype: the preferred dtype of the broadcaster.\n\nRaises:\n TypeError: if the input types don't match.", "source": "github_repos"} -{"code": "def __init__(self, initial_learning_rate: float, decay_schedule_fn: Callable, warmup_steps: int, power: float=1.0, name: Optional[str]=None):\n super().__init__()\n self.initial_learning_rate = initial_learning_rate\n self.warmup_steps = warmup_steps\n self.power = power\n self.decay_schedule_fn = decay_schedule_fn\n self.name = name", "docstring": "Applies a warmup schedule on a given learning rate decay schedule.\n\nArgs:\n initial_learning_rate (`float`):\n The initial learning rate for the schedule after the warmup (so this will be the learning rate at the end\n of the warmup).\n decay_schedule_fn (`Callable`):\n The schedule function to apply after the warmup for the rest of training.\n warmup_steps (`int`):\n The number of steps for the warmup part of training.\n power (`float`, *optional*, defaults to 1.0):\n The power to use for the polynomial warmup (defaults is a linear warmup).\n name (`str`, *optional*):\n Optional name prefix for the returned tensors during the schedule.", "source": "github_repos"} -{"code": "def wait_for_session(self, master: str, config=None, max_wait_secs=float('Inf')) -> Optional[session.Session]:\n self._target = master\n if max_wait_secs is None:\n max_wait_secs = float('Inf')\n timer = _CountDownTimer(max_wait_secs)\n while True:\n sess = session.Session(self._target, graph=self._graph, config=config)\n not_ready_msg = None\n not_ready_local_msg = None\n local_init_success, not_ready_local_msg = self._try_run_local_init_op(sess)\n if local_init_success:\n is_ready, not_ready_msg = self._model_ready(sess)\n if is_ready:\n return sess\n self._safe_close(sess)\n remaining_ms_after_wait = timer.secs_remaining() - self._recovery_wait_secs\n if remaining_ms_after_wait < 0:\n raise errors.DeadlineExceededError(None, None, 'Session was not ready after waiting %d secs.' % (max_wait_secs,))\n logging.info('Waiting for model to be ready. Ready_for_local_init_op: %s, ready: %s', not_ready_local_msg, not_ready_msg)\n time.sleep(self._recovery_wait_secs)", "docstring": "Creates a new `Session` and waits for model to be ready.\n\n Creates a new `Session` on 'master'. Waits for the model to be\n initialized or recovered from a checkpoint. It's expected that\n another thread or process will make the model ready, and that this\n is intended to be used by threads/processes that participate in a\n distributed training configuration where a different thread/process\n is responsible for initializing or recovering the model being trained.\n\n NB: The amount of time this method waits for the session is bounded\n by max_wait_secs. By default, this function will wait indefinitely.\n\nArgs:\n master: `String` representation of the TensorFlow master to use.\n config: Optional ConfigProto proto used to configure the session.\n max_wait_secs: Maximum time to wait for the session to become available.\n\nReturns:\n A `Session`. May be None if the operation exceeds the timeout\n specified by config.operation_timeout_in_ms.\n\nRaises:\n tf.DeadlineExceededError: if the session is not available after\n max_wait_secs.", "source": "github_repos"} -{"code": "def query(self, src: Any, use_inferred: bool=False) -> Any:\n return self._query(0, src, use_inferred)", "docstring": "Query the value from the source object based on current path.\n\n Example::\n\n @pg.members([\n ('x', pg.typing.Int()),\n ('y', pg.typing.Str())\n ])\n class A(pg.Object):\n pass\n\n @pg.members([\n ('z', pg.typing.Object(A))\n ])\n class B(pg.Object):\n pass\n\n b = B(z=A(x=1, y='foo'))\n assert pg.KeyPath.parse('z.x').query(b) == 1\n\nArgs:\n src: Source value to query.\n use_inferred: If True, infer `pg.Inferential` values. Otherwise returns\n their symbolic form. Applicable only for symbolic values.\n\nReturns:\n Value from src if path exists.\n\nRaises:\n KeyError: Path doesn't exist in src.\n RuntimeError: Called on a KeyPath that is considered as removed.", "source": "github_repos"} -{"code": "def send_rpc_request(self, request):", "docstring": "Sends the JSON RPC request to the server and gets a response.\n\n Note that the request and response are both in string format. So if the\n connection with server provides interfaces in bytes format, please\n transform them to string in the implementation of this function.\n\nArgs:\n request: str, a string of the RPC request.\n\nReturns:\n A string of the RPC response.\n\nRaises:\n errors.ProtocolError: something went wrong when exchanging data with the\n server.", "source": "github_repos"} -{"code": "def _StatelessRandomGammaV3Grad(op: ops.Operation, grad):\n shape = op.inputs[0]\n alpha = op.inputs[4]\n sample = op.outputs[0]\n with ops.control_dependencies([grad]):\n return (None, None, None, None, _StatelessGammaGradAlpha(shape, alpha, sample, grad))", "docstring": "Returns the gradient of a Gamma sample w.r.t. alpha.\n\n The gradient is computed using implicit differentiation\n (Figurnov et al., 2018).\n\nArgs:\n op: A `StatelessRandomGamma` operation. We assume that the inputs to the\n operation are `shape`, `key`, `counter`, `alg`, and `alpha` tensors, and\n the output is the `sample` tensor.\n grad: The incoming gradient `dloss / dsample` of the same shape as\n `op.outputs[0]`.\n\nReturns:\n A `Tensor` with derivatives `dloss / dalpha`.\n\n References:\n Implicit Reparameterization Gradients:\n [Figurnov et al., 2018]\n (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients)\n ([pdf]\n (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))", "source": "github_repos"} -{"code": "def _create_hash_str(self, input_arg, output_arg, node_def):\n hasher = hashlib.sha1()\n\n def update_num(n):\n hasher.update(compat.as_bytes('%x' % n))\n\n def update_str(s):\n update_num(len(s))\n hasher.update(compat.as_bytes(s))\n\n def update_strs(slist):\n update_num(len(slist))\n for s in slist:\n update_str(s)\n for adef in input_arg:\n update_str(adef.SerializeToString())\n for adef in output_arg:\n update_str(adef.SerializeToString())\n for n in sorted(node_def, key=lambda n: n.name):\n update_str(n.name)\n update_str(n.op)\n update_strs(n.input)\n update_num(len(n.attr))\n for k in sorted(n.attr):\n update_str(k)\n update_str(n.attr[k].SerializeToString())\n return hasher.hexdigest()[:8]", "docstring": "Creates an 8-character string unique to this input.\n\nArgs:\n input_arg: the input_arg field of an OpDef\n (e.g. self._definition.signature.input_arg)\n output_arg: the output_arg field of an OpDef\n (e.g. self._definition.signature.output_arg)\n node_def: the node_def field of a FunctionDef\n (e.g. self._definition.node_def)\n\nReturns:\n The unique string for this input", "source": "github_repos"} -{"code": "def parallel_walk(node, other):\n if isinstance(node, (list, tuple)):\n node_stack = list(node)\n else:\n node_stack = [node]\n if isinstance(other, (list, tuple)):\n other_stack = list(other)\n else:\n other_stack = [other]\n while node_stack and other_stack:\n assert len(node_stack) == len(other_stack)\n n = node_stack.pop()\n o = other_stack.pop()\n if not isinstance(n, (ast.AST, gast.AST, str)) and n is not None or (not isinstance(o, (ast.AST, gast.AST, str)) and n is not None) or n.__class__.__name__ != o.__class__.__name__:\n raise ValueError('inconsistent nodes: {} ({}) and {} ({})'.format(n, n.__class__.__name__, o, o.__class__.__name__))\n yield (n, o)\n if isinstance(n, str):\n assert isinstance(o, str), 'The check above should have ensured this'\n continue\n if n is None:\n assert o is None, 'The check above should have ensured this'\n continue\n for f in n._fields:\n n_child = getattr(n, f, None)\n o_child = getattr(o, f, None)\n if f.startswith('__') or n_child is None or o_child is None:\n continue\n if isinstance(n_child, (list, tuple)):\n if not isinstance(o_child, (list, tuple)) or len(n_child) != len(o_child):\n raise ValueError('inconsistent values for field {}: {} and {}'.format(f, n_child, o_child))\n node_stack.extend(n_child)\n other_stack.extend(o_child)\n elif isinstance(n_child, (gast.AST, ast.AST)):\n node_stack.append(n_child)\n other_stack.append(o_child)\n elif n_child != o_child:\n raise ValueError('inconsistent values for field {}: {} and {}'.format(f, n_child, o_child))", "docstring": "Walks two ASTs in parallel.\n\n The two trees must have identical structure.\n\nArgs:\n node: Union[ast.AST, Iterable[ast.AST]]\n other: Union[ast.AST, Iterable[ast.AST]]\n\nYields:\n Tuple[ast.AST, ast.AST]\n\nRaises:\n ValueError: if the two trees don't have identical structure.", "source": "github_repos"} -{"code": "def _skip_remaining_tests(self, exception):\n for test_name in self.results.requested:\n if not self.results.is_test_executed(test_name):\n test_record = records.TestResultRecord(test_name, self.TAG)\n test_record.test_skip(exception)\n self.results.add_record(test_record)\n self.summary_writer.dump(test_record.to_dict(), records.TestSummaryEntryType.RECORD)", "docstring": "Marks any requested test that has not been executed in a class as\n skipped.\n\n This is useful for handling abort class signal.\n\nArgs:\n exception: The exception object that was thrown to trigger the\n skip.", "source": "github_repos"} -{"code": "def _read_protocol_line(self):\n while True:\n line = self._proc.stdout.readline().decode('utf-8')\n if not line:\n raise jsonrpc_client_base.AppStartError(self._ad, 'Unexpected EOF waiting for app to start')\n line = line.strip()\n if line.startswith('INSTRUMENTATION_RESULT:') or line.startswith('SNIPPET '):\n self.log.debug('Accepted line from instrumentation output: \"%s\"', line)\n return line\n self.log.debug('Discarded line from instrumentation output: \"%s\"', line)", "docstring": "Reads the next line of instrumentation output relevant to snippets.\n\n This method will skip over lines that don't start with 'SNIPPET' or\n 'INSTRUMENTATION_RESULT'.\n\nReturns:\n (str) Next line of snippet-related instrumentation output, stripped.\n\nRaises:\n jsonrpc_client_base.AppStartError: If EOF is reached without any\n protocol lines being read.", "source": "github_repos"} -{"code": "def _get_unpad_data(attention_mask: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, int]:\n seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)\n indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()\n max_seqlen_in_batch = seqlens_in_batch.max().item()\n cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))\n return (indices, cu_seqlens, max_seqlen_in_batch)", "docstring": "Retrieves indexing data required to repad unpadded (ragged) tensors.\n\nArgs:\n attention_mask (`torch.Tensor`):\n Boolean or int tensor of shape (batch_size, sequence_length), 1 means valid and 0 means not valid.\n\nReturns:\n indices (`torch.Tensor`):\n The indices of non-masked tokens from the flattened input sequence.\n cu_seqlens (`torch.Tensor`):\n The cumulative sequence lengths, used to index into ragged (unpadded) tensors. `cu_seqlens` shape is (batch_size + 1,).\n max_seqlen_in_batch (`int`):\n Maximum sequence length in batch.", "source": "github_repos"} -{"code": "def load_model(filepath, custom_objects=None, compile=True, safe_mode=True):\n is_keras_zip = str(filepath).endswith('.keras') and zipfile.is_zipfile(filepath)\n is_keras_dir = file_utils.isdir(filepath) and file_utils.exists(file_utils.join(filepath, 'config.json'))\n is_hf = str(filepath).startswith('hf://')\n if file_utils.is_remote_path(filepath) and (not file_utils.isdir(filepath)) and (not is_keras_zip) and (not is_hf):\n local_path = file_utils.join(saving_lib.get_temp_dir(), os.path.basename(filepath))\n file_utils.copy(filepath, local_path)\n if zipfile.is_zipfile(local_path):\n filepath = local_path\n is_keras_zip = True\n if is_keras_zip or is_keras_dir or is_hf:\n return saving_lib.load_model(filepath, custom_objects=custom_objects, compile=compile, safe_mode=safe_mode)\n if str(filepath).endswith(('.h5', '.hdf5')):\n return legacy_h5_format.load_model_from_hdf5(filepath, custom_objects=custom_objects, compile=compile)\n elif str(filepath).endswith('.keras'):\n raise ValueError(f'File not found: filepath={filepath}. Please ensure the file is an accessible `.keras` zip file.')\n else:\n raise ValueError(f\"File format not supported: filepath={filepath}. Keras 3 only supports V3 `.keras` files and legacy H5 format files (`.h5` extension). Note that the legacy SavedModel format is not supported by `load_model()` in Keras 3. In order to reload a TensorFlow SavedModel as an inference-only layer in Keras 3, use `keras.layers.TFSMLayer({filepath}, call_endpoint='serving_default')` (note that your `call_endpoint` might have a different name).\")", "docstring": "Loads a model saved via `model.save()`.\n\nArgs:\n filepath: `str` or `pathlib.Path` object, path to the saved model file.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n compile: Boolean, whether to compile the model after loading.\n safe_mode: Boolean, whether to disallow unsafe `lambda` deserialization.\n When `safe_mode=False`, loading an object has the potential to\n trigger arbitrary code execution. This argument is only\n applicable to the Keras v3 model format. Defaults to `True`.\n\nReturns:\n A Keras model instance. If the original model was compiled,\n and the argument `compile=True` is set, then the returned model\n will be compiled. Otherwise, the model will be left uncompiled.\n\nExample:\n ```python\n model = keras.Sequential([\n keras.layers.Dense(5, input_shape=(3,)),\n keras.layers.Softmax()])\n model.save(\"model.keras\")\n loaded_model = keras.saving.load_model(\"model.keras\")\n x = np.random.random((10, 3))\n assert np.allclose(model.predict(x), loaded_model.predict(x))\n ```\n\n Note that the model variables may have different name values\n (`var.name` property, e.g. `\"dense_1/kernel:0\"`) after being reloaded.\n It is recommended that you use layer attributes to\n access specific variables, e.g. `model.get_layer(\"dense_1\").kernel`.", "source": "github_repos"} -{"code": "def get_platform():\n global PLATFORM\n cmd = 'uname'\n out, err = run_shell_cmd(cmd)\n platform_detected = out.strip().lower()\n if platform_detected != 'linux':\n if err and FLAGS.debug:\n print('Error in detecting platform:\\n %s' % str(err))\n print('Error: Detected unsupported operating system.\\nStopping...')\n sys.exit(1)\n else:\n PLATFORM = platform_detected\n return PLATFORM", "docstring": "Retrieves platform information.\n\n Currently the script only support linux. If other platoforms such as Windows\n or MacOS is detected, it throws an error and terminates.\n\nReturns:\n String that is platform type.\n e.g. 'linux'", "source": "github_repos"} -{"code": "def solve(self):\n hierarchy = type_match.get_all_subclasses([self.ast, self.builtins])\n factory_protocols = type_match.TypeMatch(hierarchy)\n factory_partial = type_match.TypeMatch(hierarchy)\n solver_protocols = factory_protocols.solver\n solver_partial = factory_partial.solver\n unknown_classes = set()\n partial_classes = set()\n complete_classes = set()\n for cls in self.ast.classes:\n if is_unknown(cls):\n solver_protocols.register_variable(cls.name)\n solver_partial.register_variable(cls.name)\n unknown_classes.add(cls)\n elif is_partial(cls):\n partial_classes.add(cls)\n else:\n complete_classes.add(cls)\n protocol_classes_and_aliases = set(self.protocols.classes)\n for alias in self.protocols.aliases:\n if not isinstance(alias.type, pytd.AnythingType) and alias.name != 'protocols.Protocol':\n protocol_classes_and_aliases.add(alias.type.cls)\n for protocol in protocol_classes_and_aliases:\n for unknown in unknown_classes:\n self.match_unknown_against_protocol(factory_protocols, solver_protocols, unknown, protocol)\n for complete in complete_classes.union(self.builtins.classes):\n for partial in partial_classes:\n if escape.unpack_partial(partial.name) == complete.name:\n self.match_partial_against_complete(factory_partial, solver_partial, partial, complete)\n partial_functions = set()\n complete_functions = set()\n for f in self.ast.functions:\n if is_partial(f):\n partial_functions.add(f)\n else:\n complete_functions.add(f)\n for partial in partial_functions:\n for complete in complete_functions.union(self.builtins.functions):\n if escape.unpack_partial(partial.name) == complete.name:\n self.match_call_record(factory_partial, solver_partial, partial, complete)\n log.info('=========== Equations to solve =============\\n%s', solver_protocols)\n log.info('=========== Equations to solve (end) =======')\n solved_protocols = solver_protocols.solve()\n log.info('=========== Call trace equations to solve =============\\n%s', solver_partial)\n log.info('=========== Call trace equations to solve (end) =======')\n solved_partial = solver_partial.solve()\n merged_solution = {}\n for unknown in itertools.chain(solved_protocols, solved_partial):\n if unknown in solved_protocols and unknown in solved_partial:\n merged_solution[unknown] = solved_protocols[unknown].union(solved_partial[unknown])\n merged_solution[unknown].discard('?')\n elif unknown in solved_protocols:\n merged_solution[unknown] = solved_protocols[unknown]\n else:\n merged_solution[unknown] = solved_partial[unknown]\n return merged_solution", "docstring": "Solve the equations generated from the pytd.\n\nReturns:\n A dictionary (str->str), mapping unknown class names to known class names.\n\nRaises:\n AssertionError: If we detect an internal error.", "source": "github_repos"} -{"code": "def _mark_func_graph_as_unsaveable(graph, learning_phase):\n if graph.building_function and is_placeholder(learning_phase):\n graph.mark_as_unsaveable('The keras learning phase placeholder was used inside a function. Exporting placeholders is not supported when saving out a SavedModel. Please call `tf.keras.backend.set_learning_phase(0)` in the function to set the learning phase to a constant value.')", "docstring": "Mark func graph as unsaveable due to use of symbolic keras learning phase.\n\n Functions that capture the symbolic learning phase cannot be exported to\n SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised\n if it is exported.\n\nArgs:\n graph: Graph or FuncGraph object.\n learning_phase: Learning phase placeholder or int defined in the graph.", "source": "github_repos"} -{"code": "def format_speech_generation_kwargs(kwargs):\n kwargs_text = {}\n kwargs_speech = {}\n for key, value in kwargs.items():\n if key.startswith('text_'):\n key = key[len('text_'):]\n kwargs_text[key] = value\n elif key.startswith('speech_'):\n key = key[len('speech_'):]\n kwargs_speech[key] = value\n elif key == 'generation_config':\n kwargs_text[key] = value\n else:\n if key not in kwargs_text:\n kwargs_text[key] = value\n if key not in kwargs_speech:\n kwargs_speech[key] = value\n return (kwargs_text, kwargs_speech)", "docstring": "Format kwargs for SeamlessM4T models that generate speech, attribute kwargs to either the text generation or the\n speech generation models.\n\nArgs:\n kwargs (`dict`)`:\n Keyword arguments are of two types:\n\n - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,\n except for `decoder_input_ids` which will only be passed through the text components.\n - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the\n text model and speech model respectively. It has the priority over the keywords without a prefix.\n\n This means you can, for example, specify a generation strategy for one generation but not for the\n other.", "source": "github_repos"} -{"code": "def assign_add(self, delta, use_locking=None, name=None, read_value=True):\n with _handle_graph(self.handle), self._assign_dependencies():\n assign_add_op = gen_resource_variable_ops.assign_add_variable_op(self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name)\n if read_value:\n return self._lazy_read(assign_add_op)\n return assign_add_op", "docstring": "Adds a value to this variable.\n\nArgs:\n delta: A `Tensor`. The value to add to this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name to use for the operation.\n read_value: A `bool`. Whether to read and return the new value of the\n variable or not.\n\nReturns:\n If `read_value` is `True`, this method will return the new value of the\n variable after the assignment has completed. Otherwise, when in graph mode\n it will return the `Operation` that does the assignment, and when in eager\n mode it will return `None`.", "source": "github_repos"} -{"code": "def register_proto_function(collection_name, proto_type=None, to_proto=None, from_proto=None) -> None:\n if to_proto and (not callable(to_proto)):\n raise TypeError('to_proto must be callable.')\n if from_proto and (not callable(from_proto)):\n raise TypeError('from_proto must be callable.')\n _proto_function_registry.register((proto_type, to_proto, from_proto), collection_name)", "docstring": "Registers `to_proto` and `from_proto` functions for collection_name.\n\n `to_proto` function converts a Python object to the corresponding protocol\n buffer, and returns the protocol buffer.\n\n `from_proto` function converts protocol buffer into a Python object, and\n returns the object..\n\nArgs:\n collection_name: Name of the collection.\n proto_type: Protobuf type, such as `saver_pb2.SaverDef`,\n `variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..\n to_proto: Function that implements Python object to protobuf conversion.\n from_proto: Function that implements protobuf to Python object conversion.", "source": "github_repos"} -{"code": "def counter(namespace: Union[Type, str], name: str) -> 'Metrics.DelegatingCounter':\n namespace = Metrics.get_namespace(namespace)\n return Metrics.DelegatingCounter(MetricName(namespace, name))", "docstring": "Obtains or creates a Counter metric.\n\nArgs:\n namespace: A class or string that gives the namespace to a metric\n name: A string that gives a unique name to a metric\n\nReturns:\n A Counter object.", "source": "github_repos"} -{"code": "def run(code: str, *, global_vars: Optional[Dict[str, Any]]=None, permission: Optional[permissions.CodePermission]=None, returns_stdout: bool=False, outputs_intermediate: bool=False, sandbox: Optional[bool]=None, timeout: Optional[float]=None) -> Union[Any, Dict[str, Any]]:\n return maybe_sandbox_call(evaluate, code=code, global_vars=global_vars, permission=permission, returns_stdout=returns_stdout, outputs_intermediate=outputs_intermediate, sandbox=sandbox, timeout=timeout)", "docstring": "Executes Python code.\n\n Features:\n * Fine-grained execution policy for limiting what APIs could be executed.\n This eliminates the need for sandboxing.\n * It exposes both the final results and intermediate results (variables).\n\nArgs:\n code: Python code to run.\n global_vars: An optional dict of\n permission: Permission for the Python code to run.\n returns_stdout: If True, the stdout (a str) will be returned.\n outputs_intermediate: Applicable when returns_stdout is False. If True,\n intermediate output will be outputted as a dict, with the last line's\n value accessible by key '__result__' and the std output accessible by\n key '__stdout__'. Otherwise the value of the last line will be returned.\n sandbox: If True, run code in sandbox; If False, run code in current\n process. If None, run in sandbox first, if the output could not be\n serialized and pass to current process, run the code again in current\n process.\n timeout: Execution timeout in seconds. If None, wait the code the complete.\n\nReturns:\n The value of the last line of the code block. Or a dict of variable\n names of all locals to their evaluated values as the output of the code to\n run. The value for the last line can be accessed by key '__result__'. Or the\n stdout as a str.\n\nRaises:\n TimeoutError: If the execution time exceeds the timeout.\n Exception: Exception that are raised from the code.", "source": "github_repos"} -{"code": "def scramble_generating_matrices(generating_matrices: types.IntTensor, scrambling_matrices: types.IntTensor, num_digits: types.IntTensor, validate_args: bool=False, dtype: tf.DType=None, name: str=None):\n with tf.name_scope(name or 'scramble_generating_matrices'):\n if dtype is None:\n generating_matrices = tf.convert_to_tensor(generating_matrices)\n dtype = dtype or generating_matrices.dtype\n num_digits = tf.convert_to_tensor(num_digits, dtype=dtype, name='num_digits')\n generating_matrices = tf.cast(generating_matrices, dtype=dtype, name='generating_matrices')\n scrambling_matrices = tf.cast(scrambling_matrices, dtype=dtype, name='scrambling_matrices')\n control_deps = []\n if validate_args:\n control_deps.append(tf.debugging.assert_equal(tf.rank(generating_matrices), tf.rank(scrambling_matrices), message='input matrices must have the same rank'))\n control_deps.append(tf.debugging.assert_positive(num_digits, message='num_digits must be positive'))\n with tf.control_dependencies(control_deps):\n\n def loop_predicate_fn(matrix, shift):\n del matrix\n return shift < num_digits\n\n def loop_body_fn(matrix, shift):\n shifted_scrambling_matrices = tf.bitwise.right_shift(tf.gather(scrambling_matrices, [shift], axis=1), shift)\n updated_matrix = tf.bitwise.bitwise_xor(matrix, utils.filter_tensor(shifted_scrambling_matrices, generating_matrices, num_digits - 1 - shift))\n return (updated_matrix, shift + 1)\n matrix, _ = tf.while_loop(loop_predicate_fn, loop_body_fn, loop_vars=(tf.zeros_like(generating_matrices), tf.constant(0, dtype=dtype)), maximum_iterations=tf.cast(num_digits, tf.int32))\n return matrix", "docstring": "Scrambles a generating matrix.\n\n #### Examples\n\n ```python\n import tf_quant_finance as tff\n\n # Example: Scrambling the 2D Sobol generating matrices.\n\n dim = 2\n num_results = 1000\n num_digits = 10\n seed = (2, 3)\n\n tff.math.qmc.scramble_generating_matrices(\n tff.math.qmc.sobol_generating_matrices(dim, num_results, num_digits),\n tff.math.qmc.random_scrambling_matrices(dim, num_digits, seed=seed),\n num_digits)\n # ==> tf.Tensor([\n # [586, 505, 224, 102, 34, 31, 13, 6, 2, 1],\n # [872, 695, 945, 531, 852, 663, 898, 568, 875, 693],\n # ], shape=(2, 10), dtype=int32)\n ```\n\nArgs:\n generating_matrices: Positive scalar `Tensor` of integers.\n scrambling_matrices: Positive Scalar `Tensor` of integers with the same\n `shape` as `generating_matrices`.\n num_digits: Positive scalar `Tensor` of integers with rank 0. The base-2\n precision of the points which can be sampled from `generating_matrices`.\n validate_args: Python `bool` indicating whether to validate arguments.\n Default value: `False`.\n dtype: Optional `dtype`. The `dtype` of the output `Tensor` (either `int32`\n or `int64`).\n Default value: `None` which maps to `generating_matrices.dtype`.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` which maps to `scramble_generating_matrices`.\n\nReturns:\n A `Tensor` with the same `shape` and `dtype` as `generating_matrices`.", "source": "github_repos"} -{"code": "def resize(self, image: torch.Tensor, size: SizeDict, patch_size: SizeDict, interpolation: 'F.InterpolationMode'=None, **kwargs) -> torch.Tensor:\n interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR\n if size.longest_edge:\n size = (size.longest_edge, size.longest_edge)\n elif size.height and size.width:\n size = (size.height, size.width)\n else:\n raise ValueError(\"size must contain either 'longest_edge' or 'height' and 'width'.\")\n if patch_size.height and patch_size.width:\n patch_size = (patch_size.height, patch_size.width)\n else:\n raise ValueError(\"patch_size must contain either 'shortest_edge' or 'height' and 'width'.\")\n output_size = get_resize_output_image_size(image, size=size, patch_size=patch_size)\n return F.resize(image, size=output_size, interpolation=interpolation, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\nArgs:\n image (`torch.Tensor`):\n Image to resize.\n size (`SizeDict`):\n Dict containing the longest possible edge of the image.\n patch_size (`SizeDict`):\n Patch size used to calculate the size of the output image.\n interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):\n Resampling filter to use when resiizing the image.", "source": "github_repos"} -{"code": "def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs = self.glpn(pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)\n hidden_states = outputs.hidden_states if return_dict else outputs[1]\n out = self.decoder(hidden_states)\n predicted_depth = self.head(out)\n loss = None\n if labels is not None:\n loss_fct = SiLogLoss()\n loss = loss_fct(predicted_depth, labels)\n if not return_dict:\n if output_hidden_states:\n output = (predicted_depth,) + outputs[1:]\n else:\n output = (predicted_depth,) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return DepthEstimatorOutput(loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)", "docstring": "labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):\n Ground truth depth estimation maps for computing the loss.\n\nExample:\n ```python\n >>> from transformers import AutoImageProcessor, GLPNForDepthEstimation\n >>> import torch\n >>> import numpy as np\n >>> from PIL import Image\n >>> import requests\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> image_processor = AutoImageProcessor.from_pretrained(\"vinvino02/glpn-kitti\")\n >>> model = GLPNForDepthEstimation.from_pretrained(\"vinvino02/glpn-kitti\")\n\n >>> # prepare image for the model\n >>> inputs = image_processor(images=image, return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... outputs = model(**inputs)\n\n >>> # interpolate to original size\n >>> post_processed_output = image_processor.post_process_depth_estimation(\n ... outputs,\n ... target_sizes=[(image.height, image.width)],\n ... )\n\n >>> # visualize the prediction\n >>> predicted_depth = post_processed_output[0][\"predicted_depth\"]\n >>> depth = predicted_depth * 255 / predicted_depth.max()\n >>> depth = depth.detach().cpu().numpy()\n >>> depth = Image.fromarray(depth.astype(\"uint8\"))\n ```", "source": "github_repos"} -{"code": "def _update_sample_weight_modes(self, sample_weights=None):\n if not self._is_compiled:\n return\n if sample_weights and any((s is not None for s in sample_weights)):\n for endpoint in self._training_endpoints:\n endpoint.sample_weight_mode = endpoint.sample_weight_mode or 'samplewise'\n else:\n for endpoint in self._training_endpoints:\n endpoint.sample_weight_mode = None", "docstring": "Updates sample weight modes based on training/eval inputs.\n\n Sample weight placeholders will be created for all or no outputs\n based on whether sample_weight is provided for any output.\n\n If model contains `_sample_weight_modes` we check if the input\n `sample_weights` corresponds to the sample weight modes.\n 1. Set sample weight mode to be 'temporal' for output i, if `compile`\n sample_weight_mode was set to `temporal` and sample weight inputs\n are given for one or more outputs.\n 2. Set sample weight mode to be 'samplewise' for output i, if `compile`\n sample_weight_mode was not set and sample weight inputs are given for\n one or more outputs.\n 3. Reset sample weight mode to None for output i if sample weight mode\n was set but there is no sample weight input.\n\nArgs:\n sample_weights: List of sample weights of the same length as model outputs\n or None.", "source": "github_repos"} -{"code": "def materialize_value_sets(self, value_set_protos: Iterable[value_set_pb2.ValueSet], batch_size: int=500) -> None:\n self._create_valueset_codes_table_if_not_exists()\n columns = [sqlalchemy.column('valueseturi'), sqlalchemy.column('valuesetversion'), sqlalchemy.column('system'), sqlalchemy.column('code')]\n sa_table = sqlalchemy.table(self._value_set_codes_table, *columns)\n queries = value_set_tables.valueset_codes_insert_statement_for(value_set_protos, sa_table, batch_size=batch_size)\n for query in queries:\n with self._client.connect() as curs:\n curs.execute(query)", "docstring": "Materialize the given value sets into the value_set_codes_table.\n\n Then writes these expanded codes into the database\n named after the `value_set_codes_table` provided at class initialization.\n Builds a valueset_codes table as described by\n https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md#valueset-support\n\n The table will be created if it does not already exist.\n\n The function will avoid inserting duplicate rows if some of the codes are\n already present in the given table. It will not attempt to perform an\n 'upsert' or modify any existing rows.\n\n Note that value sets provided to this function should already be expanded,\n in that they contain the code values to write. Users should also see\n `materialize_value_set_expansion` below to retrieve an expanded set from\n a terminology server.\n\nArgs:\n value_set_protos: An iterable of FHIR ValueSet protos.\n batch_size: The maximum number of rows to insert in a single query.", "source": "github_repos"} -{"code": "def loop(self, timer_interval_secs, target, args=None, kwargs=None):\n looper = coordinator.LooperThread(self._coord, timer_interval_secs, target=target, args=args, kwargs=kwargs)\n looper.start()\n return looper", "docstring": "Start a LooperThread that calls a function periodically.\n\n If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`\n repeatedly. Otherwise it calls it every `timer_interval_secs`\n seconds. The thread terminates when a stop is requested.\n\n The started thread is added to the list of threads managed by the supervisor\n so it does not need to be passed to the `stop()` method.\n\nArgs:\n timer_interval_secs: Number. Time boundaries at which to call `target`.\n target: A callable object.\n args: Optional arguments to pass to `target` when calling it.\n kwargs: Optional keyword arguments to pass to `target` when calling it.\n\nReturns:\n The started thread.", "source": "github_repos"} -{"code": "def __init__(self, dim, means=0.0, volatilities=1.0, corr_matrix=None, dtype=None, name=None):\n self._name = name or 'multivariate_geometric_brownian_motion'\n with tf.name_scope(self._name):\n self._means = tf.convert_to_tensor(means, dtype=dtype, name='means')\n self._dtype = self._means.dtype\n self._vols = tf.convert_to_tensor(volatilities, dtype=self._dtype, name='volatilities')\n self._dim = dim\n if corr_matrix is None:\n self._corr_matrix = None\n else:\n self._corr_matrix = tf.convert_to_tensor(corr_matrix, dtype=self._dtype, name='corr_matrix')\n if self._corr_matrix.shape.as_list() != [dim, dim]:\n raise ValueError('`corr_matrix` must be of shape [{0}, {0}] but is of shape {1}'.format(dim, self._corr_matrix.shape.as_list()))", "docstring": "Initializes the Multivariate Geometric Brownian Motion.\n\nArgs:\n dim: A Python scalar. The dimensionality of the process\n means: A real `Tensor` of shape broadcastable to `[dim]`.\n Corresponds to the vector of means of the GBM components `X_i`.\n Default value: 0.0.\n volatilities: A `Tensor` of the same `dtype` as `means` and of shape\n broadcastable to `[dim]`. Corresponds to the volatilities of the GBM\n components `X_i`.\n Default value: 1.0.\n corr_matrix: An optional `Tensor` of the same `dtype` as `means` and of\n shape `[dim, dim]`. Correlation of the GBM components `W_i`.\n Default value: `None` which maps to a process with\n independent GBM components `X_i`.\n dtype: The default dtype to use when converting values to `Tensor`s.\n Default value: `None` which means that default dtypes inferred by\n TensorFlow are used.\n name: Python string. The name to give to the ops created by this class.\n Default value: `None` which maps to the default name\n 'multivariate_geometric_brownian_motion'.\n\nRaises:\n ValueError: If `corr_matrix` is supplied and is not of shape `[dim, dim]`", "source": "github_repos"} -{"code": "def forward(self, patch_input: torch.Tensor):\n if self.mask_type == 'random':\n masked_input, mask = random_masking(inputs=patch_input, mask_ratio=self.random_mask_ratio, unmasked_channel_indices=self.unmasked_channel_indices, channel_consistent_masking=self.channel_consistent_masking, mask_value=self.mask_value)\n elif self.mask_type == 'forecast':\n masked_input, mask = forecast_masking(inputs=patch_input, num_forecast_mask_patches=self.num_forecast_mask_patches, unmasked_channel_indices=self.unmasked_channel_indices, mask_value=self.mask_value)\n else:\n raise ValueError(f'Invalid mask type {self.mask_type}.')\n mask = mask.bool()\n return (masked_input, mask)", "docstring": "Parameters:\n patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):\n Patch input\n\nReturns:\n masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)\n Masked patched input\n mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)\n Bool tensor indicating True on masked points", "source": "github_repos"} -{"code": "def to_variable(self, node: 'cfg.CFGNode') -> 'cfg.Variable':\n return self.ctx.program.NewVariable([self], source_set=[], where=node)", "docstring": "Build a variable out of this abstract value.\n\nArgs:\n node: The current CFG node.\n\nReturns:\n A cfg.Variable.", "source": "github_repos"} -{"code": "def flip_channel_order(self, image):\n self._ensure_format_supported(image)\n if isinstance(image, PIL.Image.Image):\n image = self.to_numpy_array(image)\n return image[::-1, :, :]", "docstring": "Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of\n `image` to a NumPy array if it's a PIL Image.\n\nArgs:\n image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):\n The image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimension should\n be first.", "source": "github_repos"} -{"code": "def _format_value(cls, value, type_):\n res = value\n if type_ == 'CLASS':\n res = '{}.{}'.format(value.__module__, value.__name__)\n elif type_ == 'DURATION':\n res = value.total_seconds() * 1000\n elif type_ == 'TIMESTAMP':\n res = calendar.timegm(value.timetuple()) * 1000 + value.microsecond // 1000\n return res", "docstring": "Returns the API representation of a value given its type.\n\nArgs:\n value: The value of the item that needs to be shortened.\n type_(string): The type of the value.\n\nReturns:\n A formatted value in the form of a float, int, or string.", "source": "github_repos"} -{"code": "def find_labels(model_class):\n model_name = model_class.__name__\n framework = infer_framework(model_class)\n if framework == 'tf':\n signature = inspect.signature(model_class.call)\n elif framework == 'pt':\n signature = inspect.signature(model_class.forward)\n else:\n signature = inspect.signature(model_class.__call__)\n if 'QuestionAnswering' in model_name:\n return [p for p in signature.parameters if 'label' in p or p in ('start_positions', 'end_positions')]\n else:\n return [p for p in signature.parameters if 'label' in p]", "docstring": "Find the labels used by a given model.\n\nArgs:\n model_class (`type`): The class of the model.", "source": "github_repos"} -{"code": "def get_merged_audio_embeddings(self, input_ids: torch.Tensor, audio_features: torch.Tensor, input_features_mask: Optional[torch.Tensor]=None) -> torch.Tensor:\n is_audio_index = input_ids == self.config.audio_token_id\n llm_input_ids = torch.where(is_audio_index, 0, input_ids)\n inputs_embeds = self.language_model.get_input_embeddings()(llm_input_ids)\n special_audio_mask = is_audio_index.unsqueeze(-1)\n audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype)\n if input_features_mask is not None:\n if torch.all(is_audio_index.int().sum(dim=1) != input_features_mask.int().sum(dim=1)).item():\n raise ValueError('Number of audio tokens does not match number of audio features')\n audio_features = audio_features[input_features_mask]\n inputs_embeds = inputs_embeds.masked_scatter(special_audio_mask, audio_features)\n return inputs_embeds", "docstring": "Adds the audio token to the model's LLM vocabulary so that we can pass it\n through the tokenizer; it's assumed that the embeddings corresponding to the\n <|audio|> token will be clobbered with speech features.\n\nArgs:\n input_ids (`torch.Tensor`):\n Input IDs containing one or more audio tokens.\n audio_features (`torch.Tensor`):\n Audio features to be masked into the language embeddings to form multimodal embeddings.\n input_features_mask (`torch.Tensor`, *optional*, defaults to `None`)\n Mask to be applied to audio features prior to scattering into the language embeddings.", "source": "github_repos"} -{"code": "def predict_signature_def(inputs, outputs):\n if inputs is None or not inputs:\n raise ValueError('Prediction `inputs` cannot be None or empty.')\n if outputs is None or not outputs:\n raise ValueError('Prediction `outputs` cannot be None or empty.')\n signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()}\n signature_outputs = {key: utils.build_tensor_info(tensor) for key, tensor in outputs.items()}\n signature_def = build_signature_def(signature_inputs, signature_outputs, signature_constants.PREDICT_METHOD_NAME)\n return signature_def", "docstring": "Creates prediction signature from given inputs and outputs.\n\n This function produces signatures intended for use with the TensorFlow Serving\n Predict API (tensorflow_serving/apis/prediction_service.proto). This API\n imposes no constraints on the input and output types.\n\nArgs:\n inputs: dict of string to `Tensor`.\n outputs: dict of string to `Tensor`.\n\nReturns:\n A prediction-flavored signature_def.\n\nRaises:\n ValueError: If inputs or outputs is `None`.", "source": "github_repos"} -{"code": "def is_copy_node(node_name):\n return node_name.startswith('__copy_')", "docstring": "Determine whether a node name is that of a debug Copy node.\n\n Such nodes are inserted by TensorFlow core upon request in\n RunOptions.debug_options.debug_tensor_watch_opts.\n\nArgs:\n node_name: Name of the node.\n\nReturns:\n A bool indicating whether the input argument is the name of a debug Copy\n node.", "source": "github_repos"} -{"code": "def send_log_message(self, message: LogMessage) -> None:\n self.send_log_messages([message])", "docstring": "Sends the log message to BigQuery.\n\nArgs:\n * message: LogMessage dictionary\n\nReturns:\n * None\n\nRaises:\n * RuntimeError: if BigQuery insert fails", "source": "github_repos"} -{"code": "def view_options(**kwargs) -> Iterator[Dict[str, Any]]:\n parent_options = utils.thread_local_peek(_TLS_KEY_VIEW_OPTIONS, {})\n options = utils.merge([parent_options, kwargs])\n utils.thread_local_push(_TLS_KEY_VIEW_OPTIONS, options)\n try:\n yield options\n finally:\n utils.thread_local_pop(_TLS_KEY_VIEW_OPTIONS)", "docstring": "Context manager to inject rendering args to view.\n\nExample:\n with pg.view_options(enable_summary_tooltip=False):\n MyObject().to_html()\n\nArgs:\n **kwargs: Keyword arguments for View.render method.\n\nYields:\n The merged keyword arguments.", "source": "github_repos"} -{"code": "def wait_until_finish(self, duration=None):\n if not PipelineState.is_terminal(self._state):\n raise NotImplementedError()", "docstring": "Waits until the pipeline finishes and returns the final status.\n\nArgs:\n duration (int): The time to wait (in milliseconds) for job to finish.\n If it is set to :data:`None`, it will wait indefinitely until the job\n is finished.\n\nRaises:\n IOError: If there is a persistent problem getting job\n information.\n NotImplementedError: If the runner does not support this\n operation.\n\nReturns:\n The final state of the pipeline, or :data:`None` on timeout.", "source": "github_repos"} -{"code": "def aggregate_and_return_name_for_output(self, fused_op_name, output_index, out_graphdef):\n del fused_op_name, output_index, out_graphdef\n raise RuntimeError('Unimplemented abstract method.')", "docstring": "Add node(s) to graph representing output operands and returns type.\n\nArgs:\n fused_op_name: name of the fused op stub name.\n output_index: Output index that we are currently processing from stub.\n out_graphdef: The destination graphdef we are currently building up.\n\nReturns:\n The datatype of this identity.\n\nRaises:\n RuntimeError: if the method is not implemented.", "source": "github_repos"} -{"code": "def interpolate(self, x: types.RealTensor, y: types.RealTensor, name: str=None):\n name = name or self._name + '_interpolate'\n with tf.name_scope(name):\n x = tf.convert_to_tensor(x, dtype=self._dtype, name='x')\n y = tf.convert_to_tensor(y, dtype=self._dtype, name='y')\n y = tf.expand_dims(y, axis=-2)\n xy = cubic.interpolate(y, self._spline_yz, name='interpolation_in_y_direction')\n xy_rank = xy.shape.rank\n perm = [xy_rank - 1] + list(range(xy_rank - 1))\n yx = tf.transpose(xy, perm=perm)\n perm_original = list(range(1, xy_rank)) + [0]\n x = tf.expand_dims(tf.transpose(x, [xy_rank - 2] + list(range(xy_rank - 2))), axis=-1)\n z_values = linear.interpolate(x, self._xdata, yx)\n return tf.squeeze(tf.transpose(z_values, perm=perm_original), axis=-2)", "docstring": "Performs 2-D interpolation on a specified set of points.\n\nArgs:\n x: Real-valued `Tensor` of shape `batch_shape + [num_points]`.\n Defines the x-coordinates at which the interpolation should be\n performed. Note that `batch_shape` should be the same as in the\n underlying data.\n y: A `Tensor` of the same shape and `dtype` as `x`.\n Defines the y-coordinates at which the interpolation should be\n performed.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` which is mapped to the default name\n `interpolate`.\n\nReturns:\n A `Tensor` of the same shape and `dtype` as `x`. Represents the\n interpolated values of the function on for the coordinates\n `(x, y)`.", "source": "github_repos"} -{"code": "def elementwise_division(func):\n\n @functools.wraps(func)\n def sparse_wrapper(x1, x2):\n if isinstance(x1, tf.SparseTensor):\n if isinstance(x2, tf.SparseTensor):\n x1 = sparse_to_dense(x1)\n x2 = sparse_to_dense(x2)\n elif not hasattr(x2, 'shape') or len(x2.shape) == 0:\n return sparse_with_values(x1, func(x1.values, x2))\n else:\n x2_zeros_and_nans = tf.equal(x2, 0)\n if not tf.as_dtype(x2.dtype).is_integer:\n x2_zeros_and_nans = tf.math.logical_or(x2_zeros_and_nans, tf.math.is_nan(x2))\n\n def func_for_x1_indices():\n return sparse_with_values(x1, func(x1.values, tf.gather_nd(x2, x1.indices)))\n\n def func_for_union_indices():\n x2_zeros_and_nan_indices = tf.where(x2_zeros_and_nans)\n union_indices, x1_values_for_union, _ = sparse_union_indices_and_values(x1, x2_zeros_and_nan_indices)\n output = tf.SparseTensor(union_indices, func(x1_values_for_union, tf.gather_nd(x2, union_indices)), x1.dense_shape)\n output.set_shape(x1.shape)\n return output\n return tf.cond(tf.reduce_any(x2_zeros_and_nans), func_for_union_indices, func_for_x1_indices)\n elif isinstance(x2, tf.SparseTensor):\n x2 = sparse_to_dense(x2)\n elif isinstance(x1, tf.IndexedSlices):\n if isinstance(x2, tf.IndexedSlices):\n x1 = tf.convert_to_tensor(x1)\n x2 = tf.convert_to_tensor(x2)\n elif not hasattr(x2, 'shape') or len(x2.shape) == 0:\n return tf.IndexedSlices(func(x1.values, x2), x1.indices, x1.dense_shape)\n else:\n x2_zeros_and_nans = tf.equal(x2, 0)\n if not tf.as_dtype(x2.dtype).is_integer:\n x2_zeros_and_nans = tf.math.logical_or(x2_zeros_and_nans, tf.math.is_nan(x2))\n x2_zeros_and_nans = tf.reduce_any(x2_zeros_and_nans, axis=tuple(range(1, x2.shape.rank)))\n\n def func_for_x1_indices():\n return tf.IndexedSlices(func(x1.values, tf.gather(x2, x1.indices)), x1.indices, x1.dense_shape)\n\n def func_for_union_indices():\n x2_zeros_and_nan_indices = tf.squeeze(tf.where(x2_zeros_and_nans), axis=-1)\n union_indices, x1_values_for_union, _ = indexed_slices_union_indices_and_values(x1, x2_zeros_and_nan_indices)\n return tf.IndexedSlices(func(x1_values_for_union, tf.gather(x2, union_indices)), union_indices, x1.dense_shape)\n return tf.cond(tf.reduce_any(x2_zeros_and_nans), func_for_union_indices, func_for_x1_indices)\n elif isinstance(x2, tf.IndexedSlices):\n x2 = tf.convert_to_tensor(x2)\n return func(x1, x2)\n return sparse_wrapper", "docstring": "Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to\n element-wise binary division and related operators.\n\n This decorator is designed for operations related to the division of two\n operands (e.g. `divide`). It accepts `tf.SparseTensor` and\n `tf.IndexedSlices` for both the dividend and the divisor, but handles them\n differently based on whether they are the dividend or the divisor.\n\n - If the divisor is a `tf.SparseTensor` or `tf.IndexedSlices`, it is\n densified and the result is dense because the result contains Inf or Nan\n outside of the indices of the dividend.\n - If the dividend is a `tf.SparseTensor` or `tf.IndexedSlices` and the\n divisor is dense, it finds occurrences of zeros and NaNs in the divisor.\n The result may therefore have more indices than there were in the dividend\n to return correct values where the divisor was zero or NaN.\n - If the dividend is a `tf.SparseTensor` or `tf.IndexedSlices` and the\n divisor is a scalar, it does the division element-wise. Note that the\n result is incorrectly sparse if the scalar divisor is zero.\n\nArgs:\n func: The function to wrap.\n\nReturns:\n Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`.", "source": "github_repos"} -{"code": "def _valid_dtypes(self):\n return _DEFAULT_VALID_DTYPES", "docstring": "Valid types for loss, variables and gradients.\n\n Subclasses should override to allow other float types.\n\nReturns:\n Valid types for loss, variables and gradients.", "source": "github_repos"} -{"code": "def _scalar(tf_fn, x, promote_to_float=False):\n x = np_array_ops.asarray(x)\n if promote_to_float and (not np.issubdtype(x.dtype.as_numpy_dtype, np.inexact)):\n x = x.astype(np_utils.result_type(float))\n return tf_fn(x)", "docstring": "Computes the tf_fn(x) for each element in `x`.\n\nArgs:\n tf_fn: function that takes a single Tensor argument.\n x: array_like. Could be an ndarray, a Tensor or any object that can be\n converted to a Tensor using `ops.convert_to_tensor`.\n promote_to_float: whether to cast the argument to a float dtype if it is not\n already.\n\nReturns:\n An ndarray with the same shape as `x`. The default output dtype is\n determined by `np_utils.result_type(float)`, unless x is an ndarray with a\n floating point type, in which case the output type is same as x.dtype.", "source": "github_repos"} -{"code": "def export(self, input_ids: Optional[torch.Tensor]=None, cache_position: Optional[torch.Tensor]=None, dynamic_shapes: Optional[dict]=None, strict: Optional[bool]=None) -> torch.export.ExportedProgram:\n ALL_MASK_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', sdpa_mask_without_vmap)\n ALL_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', ALL_ATTENTION_FUNCTIONS['sdpa'])\n self.model.model.config._attn_implementation = 'sdpa_without_vmap'\n example_input_ids = input_ids if input_ids is not None else torch.tensor([[1]], dtype=torch.long)\n example_cache_position = cache_position if cache_position is not None else torch.tensor([0], dtype=torch.long)\n exported_program = torch.export.export(self.model, args=(example_input_ids, example_cache_position), kwargs={}, dynamic_shapes=dynamic_shapes, strict=strict if strict is not None else True)\n return exported_program", "docstring": "Export the wrapped module using `torch.export`.\n\nArgs:\n input_ids (`Optional[torch.Tensor]`):\n Tensor representing current input token id to the module. If not provided, a default tensor will be used.\n cache_position (`Optional[torch.Tensor]`):\n Tensor representing current input position in the cache. If not provided, a default tensor will be used.\n dynamic_shapes (`Optional[dict]`):\n Dynamic shapes to use for export if specified.\n strict(`Optional[bool]`):\n Flag to instruct `torch.export` to use `torchdynamo`.", "source": "github_repos"} -{"code": "def __init__(self, aggregation=None, metric_name=None, **kwargs):\n super(AddMetric, self).__init__(**kwargs)\n self.aggregation = aggregation\n self.metric_name = metric_name", "docstring": "Adds its inputs as a metric.\n\nAttributes:\n aggregation: 'mean' or None. How the inputs should be aggregated.\n metric_name: The name to use for this metric.", "source": "github_repos"} -{"code": "def to_proto(self, export_scope=None):\n if context.executing_eagerly():\n raise RuntimeError('This operation is not supported when eager execution is enabled.')\n if export_scope is None or self.handle.name.startswith(export_scope):\n var_def = variable_pb2.VariableDef()\n var_def.variable_name = ops.strip_name_scope(self.handle.name, export_scope)\n if self._initial_value is not None:\n var_def.initial_value_name = ops.strip_name_scope(self._initial_value.name, export_scope)\n var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope)\n if self._cached_value is not None:\n var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name, export_scope)\n else:\n var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name, export_scope)\n var_def.is_resource = True\n var_def.trainable = self.trainable\n var_def.synchronization = self.synchronization.value\n var_def.aggregation = self.aggregation.value\n if self._save_slice_info:\n var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(export_scope=export_scope))\n return var_def\n else:\n return None", "docstring": "Converts a `ResourceVariable` to a `VariableDef` protocol buffer.\n\nArgs:\n export_scope: Optional `string`. Name scope to remove.\n\nRaises:\n RuntimeError: If run in EAGER mode.\n\nReturns:\n A `VariableDef` protocol buffer, or `None` if the `Variable` is not\n in the specified name scope.", "source": "github_repos"} -{"code": "def _dump_file_name_to_datum(self, dir_name, file_name):\n debug_dump_rel_path = os.path.join(os.path.relpath(dir_name, self._dump_root), file_name)\n return DebugTensorDatum(self._dump_root, debug_dump_rel_path)", "docstring": "Obtain a DebugTensorDatum from the directory and file name.\n\nArgs:\n dir_name: (`str`) Name of the directory in which the dump file resides.\n file_name: (`str`) Base name of the dump file.\n\nReturns:\n (`DebugTensorDatum`) The `DebugTensorDatum` loaded from the dump file.", "source": "github_repos"} -{"code": "def assert_compatible_matrix_dimensions(operator, x):\n assert_same_dd = check_ops.assert_equal(array_ops.shape(x)[-2], operator.domain_dimension_tensor(), message='Dimensions are not compatible. shape[-2] of argument to be the same as this operator')\n return assert_same_dd", "docstring": "Assert that an argument to solve/matmul has proper domain dimension.\n\n If `operator.shape[-2:] = [M, N]`, and `x.shape[-2:] = [Q, R]`, then\n `operator.matmul(x)` is defined only if `N = Q`. This `Op` returns an\n `Assert` that \"fires\" if this is not the case. Static checks are already\n done by the base class `LinearOperator`.\n\nArgs:\n operator: `LinearOperator`.\n x: `Tensor`.\n\nReturns:\n `Assert` `Op`.", "source": "github_repos"} -{"code": "def __getattr__(self, name: str):\n if name.startswith('__'):\n raise AttributeError(name)\n attr = getattr(self._builder, name)\n if isinstance(attr, expressions.Builder) and self._sealed:\n raise self._fhir_path_sealed_error(name)\n return ColumnExpressionBuilder._wrap_any(self, attr)", "docstring": "Redirects to the expressions.Builder when the attribute is not here.\n\n Note that in Python, '__getattribute__' always gets called first (the\n highest priority). Thus for attributes which has already been defined in\n this class, they won't be redirected to the expressions.Builder.\n\nArgs:\n name: The attribute name as a string.\n\nReturns:\n The attribute get from expressions.Builder wrapped with _wrap_any.\n\nRaises:\n AttributeError: if the FHIR path in this class is already sealed, or if\n getting the attribute from self._builder fails.", "source": "github_repos"} -{"code": "def __init__(self, agg_func: Callable[[Iterable[float]], float], agg_model_id: Optional[str]=None, include_source_predictions: bool=False):\n self._agg = agg_func\n _AggModelIdMixin.__init__(self, agg_model_id)\n _SourcePredictionMixin.__init__(self, include_source_predictions)", "docstring": "Aggregates anomaly predictions based on their scores.\n\n This is an abstract base class for `AggregationFn`s that combine multiple\n `AnomalyPrediction` objects into a single `AnomalyPrediction` based on\n the scores of the input predictions.\n\nArgs:\n agg_func (Callable[[Iterable[float]], float]): A function that aggregates\n a collection of anomaly scores (floats) into a single score.\n agg_model_id (Optional[str]): The model id used in aggregated predictions.\n Defaults to None.\n include_source_predictions (bool): If True, include the input predictions in\n the `source_predictions` of the output. Defaults to False.", "source": "github_repos"} -{"code": "def empty(shape, dtype=None):\n return backend.numpy.empty(shape, dtype=dtype)", "docstring": "Return a tensor of given shape and type filled with uninitialized data.\n\nArgs:\n shape: Shape of the empty tensor.\n dtype: Desired data type of the empty tensor.\n\nReturns:\n The empty tensor.", "source": "github_repos"} -{"code": "def make_vjp(f, params=None, persistent=True):\n\n def decorated(*args, **kwds):\n \"\"\"Computes the value and gradient of the decorated function.\"\"\"\n parameter_positions = _get_arg_spec(f, params, args)\n assert not kwds, \"The gradient function can't take keyword arguments.\"\n this_tape = tape.push_new_tape(persistent=persistent)\n try:\n sources = []\n args = [ops.convert_to_tensor(arg) if i in parameter_positions else arg for i, arg in enumerate(args)]\n args = _ensure_unique_tensor_objects(parameter_positions, args)\n for i in parameter_positions:\n if getattr(args[i], 'is_packed', False):\n raise ValueError('GradientTape.gradient is not supported on packed EagerTensorsyet.')\n sources.append(args[i])\n tape.watch(this_tape, args[i])\n result = f(*args)\n if result is None:\n raise ValueError('Cannot differentiate a function that returns None; did you forget to return a value from {}?'.format(f.__name__))\n flat_result = nest.flatten(result)\n flat_result = [gen_array_ops.identity(x) for x in flat_result]\n result = nest.pack_sequence_as(result, flat_result)\n finally:\n tape.pop_tape(this_tape)\n\n def vjp(dy=None):\n if dy is not None:\n dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]\n return imperative_grad.imperative_grad(this_tape, nest.flatten(result), sources, output_gradients=dy)\n return (result, vjp)\n return decorated", "docstring": "Returns a function that computes f and its vjp w.r.t.\n\n params.\n\n The term \"vjp\" here is an abbreviation for vector-jacobian product.\n\nArgs:\n f: the function to be differentiated.\n params: the parameters (numbers or names) to differentiate with respect to.\n A value of None will differentiate with respect to all parameters.\n persistent: Boolean controlling whether the VJP function can be re-used.\n Must be True or False.\n\nReturns:\n A function, which when called, returns a tuple (value, vjp), where:\n - value is the result of calling f.\n - vjp is a function, which takes a vector as an argument and\n returns the product of that vector with the Jacobian of f.\n Providing no argument to vjp is equivalent to providing a\n vector of ones.\n\n For example,\n ```python\n def f(x):\n return x * x\n\n wrapped_fn = tfe.make_vjp(f)\n result, vjp = wrapped_fn(tf.constant(3.0))\n # result is 9.0\n vjp() # the vjp function returns 6.0\n\nRaises:\n ValueError: if `f` returns None.", "source": "github_repos"} -{"code": "def dense_block(x, blocks, name):\n for i in range(blocks):\n x = conv_block(x, 32, name=name + '_block' + str(i + 1))\n return x", "docstring": "A dense block.\n\nArgs:\n x: input tensor.\n blocks: integer, the number of building blocks.\n name: string, block label.\n\nReturns:\n Output tensor for the block.", "source": "github_repos"} -{"code": "def restore(self, file_prefix: str, options: Optional[checkpoint_options.CheckpointOptions]=None) -> Dict[str, ops.Operation]:\n if options is not None and options.experimental_io_device is not None:\n raise ValueError('Specified experimental_io_device in DTensor checkpoint is not supported.')\n del options\n restore_specs = []\n tensor_structure = []\n for saveable in self._saveable_objects:\n saveable_tensor_structure = []\n tensor_structure.append(saveable_tensor_structure)\n for spec in saveable.specs:\n saveable_tensor_structure.append(spec.name)\n if isinstance(spec, d_variable.DSaveSpec):\n restore_specs.append((spec.name, spec.slice_spec, spec.dtype, spec.layout, spec.global_shape))\n elif isinstance(spec, saveable_object.SaveSpec):\n restore_specs.append((spec.name, spec.slice_spec, spec.dtype, layout.Layout.replicated(self._mesh.host_mesh(), spec.tensor.shape.rank).to_string(), spec.tensor.shape.as_list()))\n tensor_names, tensor_slices, tensor_dtypes, layouts, global_shapes = zip(*restore_specs)\n with ops.device(api.device_name()):\n restored_tensors = gen_dtensor_ops.d_tensor_restore_v2(prefix=file_prefix, tensor_names=tensor_names, shape_and_slices=tensor_slices, input_shapes=global_shapes, input_layouts=layouts, dtypes=tensor_dtypes)\n structured_restored_tensors = nest.pack_sequence_as(tensor_structure, restored_tensors)\n restore_ops = {}\n for saveable, restored_tensors in zip(self._saveable_objects, structured_restored_tensors):\n restore_ops[saveable.name] = saveable.restore(restored_tensors, restored_shapes=None)\n return restore_ops", "docstring": "Restore the saveable objects from a checkpoint with `file_prefix`.\n\nArgs:\n file_prefix: A string or scalar string Tensor containing the prefix for\n files to read from.\n options: Optional `CheckpointOptions` object. This is unused in DTensor.\n\nReturns:\n A dictionary mapping from SaveableObject names to restore operations.", "source": "github_repos"} -{"code": "def insert_or_update(table, columns, values):\n rows = len(values)\n cells = len(columns) * len(values)\n return _Mutator(mutation=Mutation(insert_or_update=batch._make_write_pb(table, columns, values)), operation=WriteMutation._OPERATION_INSERT_OR_UPDATE, rows=rows, cells=cells, kwargs={'table': table, 'columns': columns, 'values': values})", "docstring": "Insert/update one or more table rows.\n\nArgs:\n table: Name of the table to be modified.\n columns: Name of the table columns to be modified.\n values: Values to be modified.", "source": "github_repos"} -{"code": "def unwrap(maybe_tf_decorator):\n decorators = []\n cur = maybe_tf_decorator\n while True:\n if isinstance(cur, TFDecorator):\n decorators.append(cur)\n elif _has_tf_decorator_attr(cur):\n decorators.append(getattr(cur, '_tf_decorator'))\n else:\n break\n if not hasattr(decorators[-1], 'decorated_target'):\n break\n cur = decorators[-1].decorated_target\n return (decorators, cur)", "docstring": "Unwraps an object into a list of TFDecorators and a final target.\n\nArgs:\n maybe_tf_decorator: Any callable object.\n\nReturns:\n A tuple whose first element is an list of TFDecorator-derived objects that\n were applied to the final callable target, and whose second element is the\n final undecorated callable target. If the `maybe_tf_decorator` parameter is\n not decorated by any TFDecorators, the first tuple element will be an empty\n list. The `TFDecorator` list is ordered from outermost to innermost\n decorators.", "source": "github_repos"} -{"code": "def _finish_parsing(self, instrumentation_block):\n formatter = _InstrumentationBlockFormatter(instrumentation_block)\n return formatter.has_completed_result_block_format(self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)", "docstring": "Finishes parsing the instrumentation result block for the final\n instrumentation run status.\n\nArgs:\n instrumentation_block: _InstrumentationBlock, the instrumentation\n result block for the instrumenation run. Potentially, thisi\n could actually be method block if the instrumentation outputi\n is malformed.\n\nReturns:\n A boolean indicating whether the instrumentation run completed\n with all the tests passing.\n\nRaises:\n signals.TestError: Error raised if the instrumentation failed to\n complete with either a pass or fail status.", "source": "github_repos"} -{"code": "def wrap_flash_attention(query, key, value, decoder_segment_ids, custom_mask=None, attn_logits_soft_cap=None, head_shards=1, q_seq_shards=1):\n if decoder_segment_ids is not None:\n assert query.shape[2] == decoder_segment_ids.q.shape[1], 'Sharding along sequence dimension not allowed in TPU kernel attention'\n if custom_mask is not None:\n mask = splash_attention_mask.NumpyMask(array=custom_mask)\n else:\n mask = splash_attention_mask.CausalMask(shape=(query.shape[2], query.shape[2]))\n multi_head_mask = splash_attention_mask.MultiHeadMask(masks=(mask,) * query.shape[1])\n splash_kernel = splash_attention_kernel.make_splash_mha(mask=multi_head_mask, head_shards=head_shards, q_seq_shards=q_seq_shards, attn_logits_soft_cap=attn_logits_soft_cap)\n return jax.vmap(splash_kernel)(query, key, value, segment_ids=decoder_segment_ids)", "docstring": "Applies a wrapped flash attention mechanism using the Splash kernel.\n This function prepares the appropriate attention mask (causal or custom),\n constructs a multi-head mask, and applies the Splash multi-head attention\n kernel to the provided query, key, and value tensors. It supports optional\n sharding and soft capping of attention logits.\n\nArgs:\n query: jax.Array. The query tensor of shape\n (batch, num_heads, seq_len, head_dim).\n key: jax.Array. The key tensor of shape\n (batch, num_heads, seq_len, head_dim).\n value: jax.Array. The value tensor of shape\n (batch, num_heads, seq_len, head_dim).\n decoder_segment_ids: Optional. Segment IDs for the decoder, used for\n sharding or masking.\n custom_mask: Optional[jax.Array]. A custom attention mask to apply. If\n None, a causal mask is used.\n attn_logits_soft_cap: Optional[float]. If provided, applies a soft cap\n to the attention logits.\n head_shards: int, default=1. Number of shards for the attention heads.\n q_seq_shards: int, default=1. Number of shards for the query sequence\n dimension.\n\nReturns:\n jax.Array: The result of applying the Splash multi-head attention\n kernel to the inputs.\n\nRaises:\n AssertionError: If sharding along the sequence dimension is attempted\n with decoder_segment_ids.", "source": "github_repos"} -{"code": "def _list_inputs_or_outputs(self, recursive, node_name, depth, control, op_type, do_outputs=False):\n if do_outputs:\n tracker = self._debug_dump.node_recipients\n type_str = 'Recipients of'\n short_type_str = 'recipients'\n else:\n tracker = self._debug_dump.node_inputs\n type_str = 'Inputs to'\n short_type_str = 'inputs'\n lines = []\n font_attr_segs = {}\n node_name, _ = debug_graphs.parse_node_or_tensor_name(node_name)\n if not self._debug_dump.node_exists(node_name):\n return cli_shared.error('There is no node named \"%s\" in the partition graphs' % node_name)\n if recursive:\n max_depth = depth\n else:\n max_depth = 1\n if control:\n include_ctrls_str = ', control %s included' % short_type_str\n else:\n include_ctrls_str = ''\n line = '%s node \"%s\"' % (type_str, node_name)\n font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, 'bold')]\n lines.append(line + ' (Depth limit = %d%s):' % (max_depth, include_ctrls_str))\n command_template = 'lo -c -r %s' if do_outputs else 'li -c -r %s'\n self._dfs_from_node(lines, font_attr_segs, node_name, tracker, max_depth, 1, [], control, op_type, command_template=command_template)\n lines.append('')\n lines.append('Legend:')\n lines.append(' (d): recursion depth = d.')\n if control:\n lines.append(' (Ctrl): Control input.')\n if op_type:\n lines.append(' [Op]: Input node has op type Op.')\n return debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)", "docstring": "Helper function used by list_inputs and list_outputs.\n\n Format a list of lines to display the inputs or output recipients of a\n given node.\n\nArgs:\n recursive: Whether the listing is to be done recursively, as a boolean.\n node_name: The name of the node in question, as a str.\n depth: Maximum recursion depth, applies only if recursive == True, as an\n int.\n control: Whether control inputs or control recipients are included, as a\n boolean.\n op_type: Whether the op types of the nodes are to be included, as a\n boolean.\n do_outputs: Whether recipients, instead of input nodes are to be\n listed, as a boolean.\n\nReturns:\n Input or recipient tree formatted as a RichTextLines object.", "source": "github_repos"} -{"code": "def _preprocess(self, inputs: Sequence[torch.Tensor], freq: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n input_ts, input_padding, inp_freq = ([], [], [])\n for i, ts in enumerate(inputs):\n input_len = ts.shape[0]\n padding = torch.zeros(input_len + self.horizon_len, dtype=ts.dtype, device=ts.device)\n if input_len < self.context_len:\n num_front_pad = self.context_len - input_len\n ts = torch.cat([torch.zeros(num_front_pad, dtype=ts.dtype, device=ts.device), ts], dim=0)\n padding = torch.cat([torch.ones(num_front_pad, dtype=ts.dtype, device=padding.device), padding], dim=0)\n elif input_len > self.context_len:\n ts = ts[-self.context_len:]\n padding = padding[-(self.context_len + self.horizon_len):]\n input_ts.append(ts)\n input_padding.append(padding)\n inp_freq.append(freq[i])\n return (torch.stack(input_ts, dim=0), torch.stack(input_padding, dim=0), torch.tensor(inp_freq, dtype=torch.int32).reshape(-1, 1))", "docstring": "Formats and pads raw inputs to feed into the model.\n\n This function both pads each time series to match the context length, and\n pads the inputs to meet the SPMD shape requirement.\n\nArgs:\n inputs: A list of 1d Tensors. Each Tensor is the context time series of\n a single forecast task.\n freq: list of frequencies\n\nReturns:\n A tuple of:\n - the padded input time series to meet the model required context.\n - the padding indicator.\n - the number of padded examples for SPMD so that each core has the same\n number (a multiple of `batch_size`) of examples.", "source": "github_repos"} -{"code": "def get_absolute_positions(self, abs_pos_embeddings, has_cls_token, height, width):\n if has_cls_token:\n abs_pos_embeddings = abs_pos_embeddings[:, 1:]\n num_position = abs_pos_embeddings.shape[1]\n size = int(math.sqrt(num_position))\n if size * size != num_position:\n raise ValueError('Absolute position embeddings must be a square number.')\n if torch.jit.is_tracing() or (size != height or size != width):\n new_abs_pos_embeddings = nn.functional.interpolate(abs_pos_embeddings.reshape(1, size, size, -1).permute(0, 3, 1, 2), size=(height, width), mode='bicubic', align_corners=False)\n return new_abs_pos_embeddings.permute(0, 2, 3, 1)\n else:\n return abs_pos_embeddings.reshape(1, height, width, -1)", "docstring": "Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token dimension for the\n original embeddings.\n\nArgs:\n abs_pos_embeddings (`torch.Tensor`):\n Absolute positional embeddings with (1, num_position, num_channels).\n has_cls_token (`bool`):\n If true, has 1 embedding in abs_pos_embeddings for cls token.\n height (`int`):\n Height of input image tokens.\n width (`int`):\n Width of input image tokens.\n\nReturns:\n Absolute positional embeddings after processing with shape (1, height, width, num_channels)", "source": "github_repos"} -{"code": "def add_link(self, name: str, url: str) -> None:", "docstring": "Adds a related link to current trial.\n\n Added links can be retrieved from the `Trial.related_links` property via\n `pg.poll_result`.\n\nArgs:\n name: Name for the related link.\n url: URL for this link.", "source": "github_repos"} -{"code": "def at(dataset, index):\n return structure.from_tensor_list(dataset.element_spec, gen_experimental_dataset_ops.get_element_at_index(dataset._variant_tensor, index, output_types=structure.get_flat_tensor_types(dataset.element_spec), output_shapes=structure.get_flat_tensor_shapes(dataset.element_spec)))", "docstring": "Returns the element at a specific index in a datasest.\n\n Currently, random access is supported for the following tf.data operations:\n\n - `tf.data.Dataset.from_tensor_slices`,\n - `tf.data.Dataset.from_tensors`,\n - `tf.data.Dataset.shuffle`,\n - `tf.data.Dataset.batch`,\n - `tf.data.Dataset.shard`,\n - `tf.data.Dataset.map`,\n - `tf.data.Dataset.range`,\n - `tf.data.Dataset.zip`,\n - `tf.data.Dataset.skip`,\n - `tf.data.Dataset.repeat`,\n - `tf.data.Dataset.list_files`,\n - `tf.data.Dataset.SSTableDataset`,\n - `tf.data.Dataset.concatenate`,\n - `tf.data.Dataset.enumerate`,\n - `tf.data.Dataset.parallel_map`,\n - `tf.data.Dataset.prefetch`,\n - `tf.data.Dataset.take`,\n - `tf.data.Dataset.cache` (in-memory only)\n\n Users can use the cache operation to enable random access for any dataset,\n even one comprised of transformations which are not on this list.\n E.g., to get the third element of a TFDS dataset:\n\n ```python\n ds = tfds.load(\"mnist\", split=\"train\").cache()\n elem = tf.data.Dataset.experimental.at(ds, 3)\n ```\n\nArgs:\n dataset: A `tf.data.Dataset` to determine whether it supports random access.\n index: The index at which to fetch the element.\n\nReturns:\n A (nested) structure of values matching `tf.data.Dataset.element_spec`.\n\nRaises:\n UnimplementedError: If random access is not yet supported for a dataset.", "source": "github_repos"} -{"code": "def collect_per_output_metric_info(metrics, output_names, output_shapes, loss_fns, from_serialized=False, is_weighted=False):\n if not metrics:\n return [{} for _ in output_names]\n if isinstance(metrics, list):\n any_sub_list = any((isinstance(m, list) for m in metrics))\n if any_sub_list:\n if len(metrics) != len(output_names):\n raise ValueError('When passing a list of lists as `metrics`, it should have one entry per model output. The model has ' + str(len(output_names)) + ' outputs, but you passed metrics=' + str(metrics))\n nested_metrics = [generic_utils.to_list(m) for m in metrics]\n elif len(output_names) > 1:\n nested_metrics = []\n for _ in output_names:\n nested_metrics.append([metrics_module.clone_metric(m) for m in metrics])\n else:\n nested_metrics = [metrics]\n elif isinstance(metrics, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)\n nested_metrics = []\n for name in output_names:\n output_metrics = generic_utils.to_list(metrics.get(name, []))\n nested_metrics.append(output_metrics)\n else:\n raise TypeError('Type of `metrics` argument not understood. Expected a list or dictionary, found: ' + str(metrics))\n per_output_metrics = []\n for i, metrics in enumerate(nested_metrics):\n metrics_dict = collections.OrderedDict()\n for metric in metrics:\n metric_name = get_metric_name(metric, is_weighted)\n metric_fn = get_metric_function(metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])\n metric_fn._from_serialized = from_serialized\n if not isinstance(metric_fn, metrics_module.Metric):\n metric_fn = metrics_module.MeanMetricWrapper(metric_fn, name=metric_name)\n metric_fn._from_serialized = False\n metrics_dict[metric_name] = metric_fn\n per_output_metrics.append(metrics_dict)\n return per_output_metrics", "docstring": "Maps metric names and functions to model outputs.\n\nArgs:\n metrics: a list or a list of lists or a dict of metric functions.\n output_names: a list of the names (strings) of model outputs.\n output_shapes: a list of the shapes (strings) of model outputs.\n loss_fns: a list of the loss functions corresponding to the model outputs.\n from_serialized: whether the model the metrics are being sourced from is\n being initialized from a serialized format.\n is_weighted: Boolean indicating whether the given metrics are weighted.\n\nReturns:\n A list (one entry per model output) of dicts.\n For instance, if the model has 2 outputs, and for the first output\n we want to compute \"binary_accuracy\" and \"binary_crossentropy\",\n and just \"binary_accuracy\" for the second output,\n the list would look like: `[{\n 'acc': binary_accuracy(),\n 'ce': binary_crossentropy(),\n }, {\n 'acc': binary_accuracy(),\n }]`\n\nRaises:\n TypeError: if an incorrect type is passed for the `metrics` argument.", "source": "github_repos"} -{"code": "def _fill_meta_graph_def(meta_graph_def: meta_graph_pb2.MetaGraphDef, saveable_view: _SaveableView, signature_functions: Dict[str, Callable[..., Any]], namespace_whitelist: List[str], save_custom_gradients: bool, create_saver: bool, enable_debug_stripper: bool, defaults=None) -> Tuple[_AssetInfo, ops.Graph]:\n resource_initializers = saveable_view.get_concrete_resource_initializers()\n exported_graph = ops.Graph()\n resource_initializer_ops = []\n with exported_graph.as_default():\n object_map, tensor_map, asset_info = saveable_view.map_resources()\n signatures = _generate_signatures(signature_functions, object_map, defaults)\n if save_custom_gradients:\n _trace_gradient_functions(exported_graph, saveable_view)\n with exported_graph.as_default():\n for resource_initializer_function in resource_initializers:\n asset_dependencies = []\n for capture in resource_initializer_function.graph.external_captures:\n asset_initializer = asset_info.asset_initializers_by_resource.get(capture, None)\n if asset_initializer is not None:\n asset_dependencies.append(asset_initializer)\n with ops.control_dependencies(asset_dependencies):\n mapped_initializer = object_map[resource_initializer_function]\n resource_initializer_ops.append(mapped_initializer())\n resource_initializer_ops.extend(asset_info.asset_initializers_by_resource.values())\n with ops.control_dependencies(resource_initializer_ops):\n init_op = control_flow_ops.no_op()\n meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(init_op.name)\n meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(signature_def_utils.op_signature_def(init_op, constants.INIT_OP_SIGNATURE_KEY))\n\n def call_with_mapped_captures(function, args):\n if function in object_map:\n return object_map[function](*args)\n return saved_model_exported_concrete.ExportedConcreteFunction(function, tensor_map)(*args)\n for obj in object_map.values():\n obj._maybe_initialize_trackable()\n named_saveable_objects, registered_savers = save_util_v1.frozen_saveables_and_savers(graph_view=saveable_view.augmented_graph_view, object_map=object_map, to_graph=exported_graph, call_with_mapped_captures=call_with_mapped_captures)\n if create_saver:\n saver = functional_saver.MultiDeviceSaver.from_saveables(named_saveable_objects, registered_savers, call_with_mapped_captures)\n with exported_graph.as_default():\n saver_def = saver.to_proto()\n meta_graph_def.saver_def.CopyFrom(saver_def)\n _dependency_sorted_node_ids(saveable_view)\n graph_def, _ = exported_graph._as_graph_def(add_shapes=True, use_pybind11_proto=False)\n graph_def.library.registered_gradients.extend(saveable_view.gradient_defs)\n _verify_ops(graph_def, namespace_whitelist)\n meta_graph_def.graph_def.CopyFrom(graph_def)\n meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING)\n if saveable_view.options.extra_tags:\n for tag in saveable_view.options.extra_tags:\n meta_graph_def.meta_info_def.tags.append(tag)\n meta_graph_def.meta_info_def.tensorflow_version = versions.__version__\n meta_graph_def.meta_info_def.tensorflow_git_version = versions.__git_version__\n meta_graph_def.meta_info_def.stripped_default_attrs = True\n meta_graph_def.asset_file_def.extend(asset_info.asset_defs)\n for signature_key, signature in signatures.items():\n meta_graph_def.signature_def[signature_key].CopyFrom(signature)\n meta_graph.strip_graph_default_valued_attrs(meta_graph_def)\n if sys.byteorder == 'big':\n utils_impl.swap_function_tensor_content(meta_graph_def, 'big', 'little')\n if enable_debug_stripper:\n _strip_debug_nodes(meta_graph_def)\n meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def))\n return (asset_info, exported_graph)", "docstring": "Generates a MetaGraph which calls `signature_functions`.\n\nArgs:\n meta_graph_def: The MetaGraphDef proto to fill.\n saveable_view: The _SaveableView being exported.\n signature_functions: A dictionary mapping signature keys to concrete\n functions containing signatures to add to the MetaGraph.\n namespace_whitelist: List of strings containing whitelisted op namespaces.\n save_custom_gradients: Whether to save custom gradients.\n create_saver: Whether to add SavedModel's native save and restore ops.\n enable_debug_stripper: Whether to strip the debug nodes from the graph.\n defaults: A dictionary mapping signature_key to dictionary of\n user_specified_name to Tensor representing default values.\n\nReturns:\n A tuple of (_AssetInfo, Graph) containing the captured assets and\n exported Graph generated from tracing the saveable_view.", "source": "github_repos"} -{"code": "def type_parameter(self, unknown: _UnknownType, base_class: pytd.Class, item: pytd.TemplateItem) -> StrictType:\n assert is_unknown(unknown)\n name = unknown.name + '.' + base_class.name + '.' + item.type_param.name\n return StrictType(name)", "docstring": "This generates the type parameter when matching against a generic type.\n\n For example, when we match ~unknown1 against list[T], we need an additional\n type to model the T in \"~unknown1[T]\". This type would have the name\n \"~unknown1.list.T\".\n\nArgs:\n unknown: An unknown type. This is the type that's matched against\n base_class[T].\n base_class: The base class of the generic we're matching the unknown\n against. E.g. \"list\".\n item: The actual type parameter. (\"T\" in the examples above).\n\nReturns:\n A type (pytd.Node) to represent this type parameter.", "source": "github_repos"} -{"code": "def __init__(self, embedding_shape, initializer, weight_collections=None, trainable=True, name=None, **kwargs):\n super(_EmbeddingColumnLayer, self).__init__(trainable=trainable, name=name, **kwargs)\n self._embedding_shape = embedding_shape\n self._initializer = initializer\n self._weight_collections = weight_collections", "docstring": "Constructor.\n\nArgs:\n embedding_shape: Shape of the embedding variable used for lookup.\n initializer: A variable initializer function to be used in embedding\n variable initialization.\n weight_collections: A list of collection names to which the Variable will\n be added. Note that, variables will also be added to collections\n `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: Name of the layer\n **kwargs: keyword named properties.", "source": "github_repos"} -{"code": "def __init__(self, config: CvtConfig, stage: int, **kwargs):\n super().__init__(**kwargs)\n self.config = config\n self.stage = stage\n if self.config.cls_token[self.stage]:\n self.cls_token = self.add_weight(shape=(1, 1, self.config.embed_dim[-1]), initializer=get_initializer(self.config.initializer_range), trainable=True, name='cvt.encoder.stages.2.cls_token')\n self.embedding = TFCvtEmbeddings(self.config, patch_size=config.patch_sizes[self.stage], num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1], stride=config.patch_stride[self.stage], embed_dim=config.embed_dim[self.stage], padding=config.patch_padding[self.stage], dropout_rate=config.drop_rate[self.stage], name='embedding')\n drop_path_rates = tf.linspace(0.0, config.drop_path_rate[self.stage], config.depth[stage])\n drop_path_rates = [x.numpy().item() for x in drop_path_rates]\n self.layers = [TFCvtLayer(config, num_heads=config.num_heads[self.stage], embed_dim=config.embed_dim[self.stage], kernel_size=config.kernel_qkv[self.stage], stride_q=config.stride_q[self.stage], stride_kv=config.stride_kv[self.stage], padding_q=config.padding_q[self.stage], padding_kv=config.padding_kv[self.stage], qkv_projection_method=config.qkv_projection_method[self.stage], qkv_bias=config.qkv_bias[self.stage], attention_drop_rate=config.attention_drop_rate[self.stage], drop_rate=config.drop_rate[self.stage], mlp_ratio=config.mlp_ratio[self.stage], drop_path_rate=drop_path_rates[self.stage], with_cls_token=config.cls_token[self.stage], name=f'layers.{j}') for j in range(config.depth[self.stage])]", "docstring": "Cvt stage (encoder block). Each stage has 2 parts :\n - (1) A Convolutional Token Embedding layer\n - (2) A Convolutional Transformer Block (layer).\n The classification token is added only in the last stage.\n\nArgs:\n config ([`CvtConfig`]): Model configuration class.\n stage (`int`): Stage number.", "source": "github_repos"} -{"code": "def pmean(tensor, axis_name=None):\n if axis_name != _pmap_config.axis_name():\n raise ValueError('axis_name (%s) is not equal to that of the surrounding pmap (%s)' % (axis_name, _pmap_config.axis_name()))\n devices = _pmap_config.devices()\n if devices is None:\n raise ValueError(\"Can't retrieve the device list from the surrounding pmap\")\n if tpu_devices(devices):\n raise ValueError('pmean for TPU is not supported yet.')\n else:\n return gen_collective_ops.collective_reduce(input=tensor, group_size=len(devices), group_key=_GROUP_KEY, instance_key=_get_instance_key(), merge_op='Add', final_op='Div', subdiv_offsets=(0,))", "docstring": "Mean all-reduction.\n\nArgs:\n tensor: A tensor.\n axis_name: The axis name to reduce. Must equal to that of the surrounding\n pmap.\n\nReturns:\n The mean of the `tensor` replicas on each participating devices.", "source": "github_repos"} -{"code": "def __init__(self, num_evals, steps_per_run=1):\n self._num_evals = num_evals\n self._evals_completed = None\n self._steps_per_run_initial_value = steps_per_run", "docstring": "Constructs the run hook.\n\nArgs:\n num_evals: The number of evaluations to run for. if set to None, will\n iterate the dataset until all inputs are exhausted.\n steps_per_run: Number of steps executed per run call.", "source": "github_repos"} -{"code": "def calendar_month(self: EventSetOrNode, tz: Union[str, float, int]=0) -> EventSetOrNode:\n from temporian.core.operators.calendar.month import calendar_month\n return calendar_month(self, tz)", "docstring": "Obtains the month the timestamps in an\n [`EventSet`][temporian.EventSet]'s sampling are in.\n\n Features in the input are ignored, only the timestamps are used and\n they must be unix timestamps (`is_unix_timestamp=True`).\n\n Output feature contains numbers between 1 and 12.\n\n By default, the timezone is UTC unless the `tz` argument is specified,\n as an offset in hours or a timezone name. See\n [`EventSet.calendar_hour()`][temporian.EventSet.calendar_hour] for an\n example using timezones.\n\n Usage example:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[\"2023-02-04\", \"2023-02-20\", \"2023-03-01\", \"2023-05-07\"],\n ... name='special_events'\n ... )\n >>> b = a.calendar_month()\n >>> b\n indexes: ...\n features: [('calendar_month', int32)]\n events:\n (4 events):\n timestamps: [...]\n 'calendar_month': [2 2 3 5]\n ...\n\n ```\n\nArgs:\n tz: timezone name (see `pytz.all_timezones`) or UTC offset in hours.\n\nReturns:\n EventSet with a single feature with the month each timestamp in\n `sampling` belongs to.", "source": "github_repos"} -{"code": "def repeat(n: int, body: Callable[..., Union[core_types.TensorLike, Iterable]], inputs: Optional[List[core_types.TensorLike]]=None, infeed_queue: Optional[tpu_feed.InfeedQueue]=None, name: Any=None) -> List[core_types.TensorLike]:\n\n def _convert_to_list(xs):\n if not isinstance(xs, (list, tuple)):\n return [xs]\n else:\n return list(xs)\n\n def cond(i, *args):\n del args\n return i < n\n\n def body_wrapper(i, *args):\n return [i + 1] + _convert_to_list(body(*args))\n inputs = [0] if inputs is None else [0] + _convert_to_list(inputs)\n outputs = while_loop(cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)\n outputs = _convert_to_list(outputs)\n if len(outputs) == 1:\n return outputs[0].op\n else:\n return outputs[1:]", "docstring": "Builds a training loop that executes a fixed number of iterations.\n\n The set of loop-carried tensors correspond to `inputs`.\n `body` must be a function that takes and returns the values of the\n loop-carried tensors.\n\nArgs:\n n: the number of loop iterations\n body: a Python function that builds the loop body.\n inputs: a list of initial values passed into the training loop or None\n (equivalent to an empty list).\n infeed_queue: if not None, the infeed queue from which to append a tuple of\n arguments as inputs to condition.\n name: (Deprecated) Does nothing.\n\nReturns:\n The final values of the loop-carried tensors.\n\nRaises:\n ValueError: if there is a type error.", "source": "github_repos"} -{"code": "def GetUpdates(self, source, url, since):\n proto = url.split(':')[0]\n if proto not in ('http', 'https'):\n raise error.ConfigurationError('Unsupported protocol %s' % proto)\n conn = source.conn\n conn.setopt(pycurl.OPT_FILETIME, 1)\n conn.setopt(pycurl.ENCODING, 'bzip2, gzip')\n if since is not None:\n conn.setopt(pycurl.TIMEVALUE, int(since))\n conn.setopt(pycurl.TIMECONDITION, pycurl.TIMECONDITION_IFMODSINCE)\n retry_count = 0\n resp_code = 500\n while retry_count < source.conf['retry_max']:\n try:\n source.log.debug('fetching %s', url)\n resp_code, headers, body_bytes = curl.CurlFetch(url, conn, self.log)\n self.log.debug('response code: %s', resp_code)\n finally:\n if resp_code < 400:\n if resp_code == 304:\n return []\n if resp_code == 200:\n break\n retry_count += 1\n self.log.warning('Failed connection: attempt #%s.', retry_count)\n if retry_count == source.conf['retry_max']:\n self.log.debug('max retries hit')\n raise error.SourceUnavailable('Max retries exceeded.')\n time.sleep(source.conf['retry_delay'])\n headers = headers.split('\\r\\n')\n last_modified = conn.getinfo(pycurl.INFO_FILETIME)\n self.log.debug('last modified: %s', last_modified)\n if last_modified == -1:\n for header in headers:\n if header.lower().startswith('last-modified'):\n self.log.debug('%s', header)\n http_ts_string = header[header.find(':') + 1:].strip()\n last_modified = self.FromHttpToTimestamp(http_ts_string)\n break\n else:\n http_ts_string = ''\n else:\n http_ts_string = self.FromTimestampToHttp(last_modified)\n self.log.debug('Last-modified is: %s', http_ts_string)\n try:\n body_bytes = bz2.decompress(body_bytes)\n self.log.debug('bzip encoding found')\n except IOError:\n self.log.debug('bzip encoding not found')\n response = StringIO(body_bytes.decode('utf-8'))\n data_map = self.GetMap(cache_info=response)\n if http_ts_string:\n http_ts = self.FromHttpToTimestamp(http_ts_string)\n self.log.debug('setting last modified to: %s', http_ts)\n data_map.SetModifyTimestamp(http_ts)\n return data_map", "docstring": "Get updates from a source.\n\nArgs:\n source: A data source\n url: url to the data we want\n since: a timestamp representing the last change (None to force-get)\n\nReturns:\n A tuple containing the map of updates and a maximum timestamp\n\nRaises:\n ValueError: an object in the source map is malformed\n ConfigurationError:", "source": "github_repos"} -{"code": "def _GetBcastSubshape(subscripts):\n start = subscripts.find(ellipsis)\n if start == -1:\n return (0, 0)\n remaining = len(subscripts) - (start + len(ellipsis))\n end = -remaining if remaining > 0 else None\n return (start, end)", "docstring": "Returns a tuple denoting the slice mapping to ellipsis.\n\n For a given subscript, returns a tuple (start, end) denoting the start\n axis index and the (negative) end axis index respectively. For any input\n Tensor `x` described by the subscript, `x[start:end]` would be the slice\n represented by the ellipsis. E.g. For `ab...cd` returns `[1, -2]`.\n\n If ellipsis is not present in `subscripts`, returns `(0, 0)`.\n\nArgs:\n subscripts: A string denoting the einsum subscript.", "source": "github_repos"} -{"code": "def Run(self, conf, args):\n try:\n options, args = self.parser.parse_args(args)\n except SystemExit as e:\n return e.code\n if options.maps:\n self.log.info('Setting configured maps to %s', options.maps)\n conf.maps = options.maps\n if not options.incremental:\n self.log.debug('performing FULL update of caches')\n else:\n self.log.debug('performing INCREMENTAL update of caches')\n if options.delay:\n self.log.info('Delaying %d seconds before executing', options.delay)\n time.sleep(options.delay)\n return self.UpdateMaps(conf, incremental=options.incremental, force_write=options.force_write, force_lock=options.force_lock)", "docstring": "Run the Update command.\n\n See Command.Run() for full documentation on the Run() method.\n\nArgs:\n conf: a nss_cache.config.Config object\n args: a list of arguments to be parsed by this command\n\nReturns:\n 0 on success, nonzero on error", "source": "github_repos"} -{"code": "def VisitUnionType(self, union):\n intersection = self.hierarchy.ExpandSuperClasses(str(union.type_list[0]))\n for t in union.type_list[1:]:\n intersection.intersection_update(self.hierarchy.ExpandSuperClasses(str(t)))\n new_type_list = tuple((pytd.NamedType(cls) for cls in intersection if not self.hierarchy.HasSubClassInSet(cls, intersection)))\n if not new_type_list:\n return union\n return pytd_utils.JoinTypes(new_type_list)", "docstring": "Given a union type, try to find a simplification by using superclasses.\n\n This is a lossy optimization that tries to map a list of types to a common\n base type. For example, int and bool are both base classes of int, so it\n would convert \"Union[int, bool]\" to \"int\".\n\nArgs:\n union: A union type.\n\nReturns:\n A simplified type, if available.", "source": "github_repos"} -{"code": "def blackman(x):\n if any_symbolic_tensors((x,)):\n return Blackman().symbolic_call(x)\n return backend.numpy.blackman(x)", "docstring": "Blackman window function.\n The Blackman window is a taper formed by using a weighted cosine.\n\nArgs:\n x: Scalar or 1D Tensor. Window length.\n\nReturns:\n A 1D tensor containing the Blackman window values.\n\nExample:\n >>> x = keras.ops.convert_to_tensor(5)\n >>> keras.ops.blackman(x)\n array([-1.3877788e-17, 3.4000000e-01, 1.0000000e+00, 3.4000000e-01,\n -1.3877788e-17], dtype=float32)", "source": "github_repos"} -{"code": "def _get_parameter_info(param_name, documented_params, source_args_dict, param_type, optional):\n description = None\n shape = None\n shape_string = ''\n is_documented = True\n additional_info = None\n if param_name in documented_params:\n if param_type == '' and documented_params[param_name].get('type', None) is not None:\n param_type = documented_params[param_name]['type']\n optional = documented_params[param_name]['optional']\n shape = documented_params[param_name]['shape']\n shape_string = shape if shape else ''\n additional_info = documented_params[param_name]['additional_info'] or ''\n description = f'{documented_params[param_name]['description']}\\n'\n elif param_name in source_args_dict:\n shape = source_args_dict[param_name]['shape']\n shape_string = ' ' + shape if shape else ''\n description = source_args_dict[param_name]['description']\n additional_info = None\n else:\n is_documented = False\n optional_string = ', *optional*' if optional else ''\n return (param_type, optional_string, shape_string, additional_info, description, is_documented)", "docstring": "Get parameter documentation details from the appropriate source.\n Tensor shape, optional status and description are taken from the custom docstring in priority if available.\n Type is taken from the function signature first, then from the custom docstring if missing from the signature\n\nArgs:\n param_name (`str`): Name of the parameter\n documented_params (`dict`): Dictionary of documented parameters (manually specified in the docstring)\n source_args_dict (`dict`): Default source args dictionary to use if not in documented_params\n param_type (`str`): Current parameter type (may be updated)\n optional (`bool`): Whether the parameter is optional (may be updated)", "source": "github_repos"} -{"code": "def yield_value(modality, iterable):\n if modality == Modality.CORE:\n yield from _tf_core_yield_value(iterable)\n elif modality == Modality.DATA:\n yield from _tf_data_yield_value(iterable)\n else:\n raise ValueError('Unknown modality used {} for nested structure'.format(modality))", "docstring": "Yield elements of `iterable` in a deterministic order.\n\nArgs:\n modality: enum value of supported modality [Modality.CORE or Modality.DATA]\n iterable: an iterable.\n\nYields:\n The iterable elements in a deterministic order.", "source": "github_repos"} -{"code": "def _calculate_regression_loss(answer, aggregate_mask, dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config):\n expected_result = _calculate_expected_result(dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config)\n answer_masked = tf.where(tf.math.is_nan(answer), tf.zeros_like(answer), answer)\n if config.use_normalized_answer_loss:\n normalizer = tf.stop_gradient(tf.math.maximum(tf.math.abs(expected_result), tf.math.abs(answer_masked)) + EPSILON_ZERO_DIVISION)\n normalized_answer_masked = answer_masked / normalizer\n normalized_expected_result = expected_result / normalizer\n per_example_answer_loss = tf.compat.v1.losses.huber_loss(normalized_answer_masked * aggregate_mask, normalized_expected_result * aggregate_mask, delta=tf.cast(1.0, tf.float32), reduction=tf.losses.Reduction.NONE)\n else:\n per_example_answer_loss = tf.compat.v1.losses.huber_loss(answer_masked * aggregate_mask, expected_result * aggregate_mask, delta=tf.cast(config.huber_loss_delta, tf.float32), reduction=tf.losses.Reduction.NONE)\n if config.answer_loss_cutoff is None:\n large_answer_loss_mask = tf.ones_like(per_example_answer_loss, dtype=tf.float32)\n else:\n large_answer_loss_mask = tf.where(per_example_answer_loss > config.answer_loss_cutoff, tf.zeros_like(per_example_answer_loss, dtype=tf.float32), tf.ones_like(per_example_answer_loss, dtype=tf.float32))\n per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask)\n return (per_example_answer_loss_scaled, large_answer_loss_mask)", "docstring": "Calculates the regression loss per example.\n\nArgs:\n answer (`tf.Tensor` of shape `(batch_size,)`):\n Answer for every example in the batch. Nan if there is no scalar answer.\n aggregate_mask (`tf.Tensor` of shape `(batch_size,)`):\n A mask set to 1 for examples that should use aggregation functions.\n dist_per_cell (`torch.distributions.Bernoulli`):\n Cell selection distribution for each cell.\n numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`):\n Numeric values of every token. Nan for tokens which are not numeric values.\n numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`):\n Scale of the numeric values of every token.\n input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`):\n Mask for the table, without question tokens and table headers.\n logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):\n Logits per aggregation operation.\n config ([`TapasConfig`]):\n Model configuration class with all the parameters of the model\n\nReturns:\n per_example_answer_loss_scaled (`tf.Tensor` of shape `(batch_size,)`): Scales answer loss for each example in\n the batch. large_answer_loss_mask (`tf.Tensor` of shape `(batch_size,)`): A mask which is 1 for examples for\n which their answer loss is larger than the answer_loss_cutoff.", "source": "github_repos"} -{"code": "def assert_input_compatibility(input_spec, inputs, layer_name):\n if not input_spec:\n return\n input_spec = tree.flatten(input_spec)\n if isinstance(inputs, dict):\n names = [spec.name for spec in input_spec]\n if all(names):\n list_inputs = []\n for name in names:\n if name not in inputs:\n raise ValueError(f'Missing data for input \"{name}\". You passed a data dictionary with keys {list(inputs.keys())}. Expected the following keys: {names}')\n list_inputs.append(inputs[name])\n inputs = list_inputs\n inputs = tree.flatten(inputs)\n if len(inputs) != len(input_spec):\n raise ValueError(f'Layer \"{layer_name}\" expects {len(input_spec)} input(s), but it received {len(inputs)} input tensors. Inputs received: {inputs}')\n for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):\n if spec is None:\n continue\n if x is None and spec.optional:\n continue\n if not hasattr(x, 'shape'):\n raise ValueError(f\"Inputs to a layer should be tensors. Got '{x}' (of type {type(x)}) as input for layer '{layer_name}'.\")\n shape = backend.standardize_shape(x.shape)\n ndim = len(shape)\n if spec.ndim is not None and (not spec.allow_last_axis_squeeze):\n if ndim != spec.ndim:\n raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected ndim={spec.ndim}, found ndim={ndim}. Full shape received: {shape}')\n if spec.max_ndim is not None:\n if ndim is not None and ndim > spec.max_ndim:\n raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected max_ndim={spec.max_ndim}, found ndim={ndim}')\n if spec.min_ndim is not None:\n if ndim is not None and ndim < spec.min_ndim:\n raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected min_ndim={spec.min_ndim}, found ndim={ndim}. Full shape received: {shape}')\n if spec.dtype is not None:\n dtype = backend.standardize_dtype(x.dtype)\n if dtype != spec.dtype:\n raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected dtype={spec.dtype}, found dtype={dtype}')\n if spec.axes:\n for axis, value in spec.axes.items():\n if value is not None and shape[axis] not in {value, None}:\n raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected axis {axis} of input shape to have value {value}, but received input with shape {shape}')\n if spec.shape is not None:\n spec_shape = spec.shape\n if spec.allow_last_axis_squeeze:\n if shape and shape[-1] == 1:\n shape = shape[:-1]\n if spec_shape and spec_shape[-1] == 1:\n spec_shape = spec_shape[:-1]\n for spec_dim, dim in zip(spec_shape, shape):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected shape={spec.shape}, found shape={shape}')", "docstring": "Checks compatibility between the layer and provided inputs.\n\n This checks that the tensor(s) `inputs` verify the input assumptions\n of a layer (if any). If not, a clear and actional exception gets raised.\n\nArgs:\n input_spec: An InputSpec instance, list of InputSpec instances, a nested\n structure of InputSpec instances, or None.\n inputs: Input tensor, list of input tensors, or a nested structure of\n input tensors.\n layer_name: String, name of the layer (for error message formatting).\n\nRaises:\n ValueError: in case of mismatch between\n the provided inputs and the expectations of the layer.", "source": "github_repos"} -{"code": "def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):\n if not self.supports_gradient_checkpointing:\n raise ValueError(f'{self.__class__.__name__} does not support gradient checkpointing.')\n if gradient_checkpointing_kwargs is None:\n gradient_checkpointing_kwargs = {'use_reentrant': True}\n gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs)\n _is_using_old_format = 'value' in inspect.signature(self._set_gradient_checkpointing).parameters\n if not _is_using_old_format:\n self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func)\n else:\n self.apply(partial(self._set_gradient_checkpointing, value=True))\n logger.warning('You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it).Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model.')\n if getattr(self, '_hf_peft_config_loaded', False):\n self.enable_input_require_grads()", "docstring": "Activates gradient checkpointing for the current model.\n\n Note that in other frameworks this feature can be referred to as \"activation checkpointing\" or \"checkpoint\n activations\".\n\n We pass the `__call__` method of the modules instead of `forward` because `__call__` attaches all the hooks of\n the module. https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2\n\nArgs:\n gradient_checkpointing_kwargs (dict, *optional*):\n Additional keyword arguments passed along to the `torch.utils.checkpoint.checkpoint` function.", "source": "github_repos"} -{"code": "def cholesky(self, name: str='cholesky') -> 'LinearOperator':\n if not self._can_use_cholesky():\n raise ValueError('Cannot take the Cholesky decomposition: Not a positive definite self adjoint matrix.')\n with self._name_scope(name):\n return self._linop_cholesky()", "docstring": "Returns a Cholesky factor as a `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, if `A` is positive definite\n self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky\n decomposition.\n\nArgs:\n name: A name for this `Op`.\n\nReturns:\n `LinearOperator` which represents the lower triangular matrix\n in the Cholesky decomposition.\n\nRaises:\n ValueError: When the `LinearOperator` is not hinted to be positive\n definite and self adjoint.", "source": "github_repos"} -{"code": "def RemoveSubtypeAnnotation(node, value):\n attr = GetNodeAnnotation(node, Annotation.SUBTYPE)\n if attr and value in attr:\n attr.remove(value)\n SetNodeAnnotation(node, Annotation.SUBTYPE, attr)", "docstring": "Removes an annotation value from the subtype annotations on the node.\n\nArgs:\n node: the node.\n value: annotation value to remove.", "source": "github_repos"} -{"code": "def add_import(self, symbol, source_module_name, source_name, dest_module_name, dest_name):\n if source_module_name.endswith('python.modules_with_exports'):\n source_module_name = symbol.__module__\n import_str = self.format_import(source_module_name, source_name, dest_name)\n full_api_name = dest_name\n if dest_module_name:\n full_api_name = dest_module_name + '.' + full_api_name\n symbol_id = -1 if not symbol else id(symbol)\n self._check_already_imported(symbol_id, full_api_name)\n if not dest_module_name and dest_name.startswith('_'):\n self._underscore_names_in_root.add(dest_name)\n priority = 0\n if symbol:\n if hasattr(symbol, '__module__'):\n priority = int(source_module_name == symbol.__module__)\n if hasattr(symbol, '__name__'):\n priority += int(source_name == symbol.__name__)\n self._module_imports[dest_module_name][full_api_name].add((import_str, priority))", "docstring": "Adds this import to module_imports.\n\nArgs:\n symbol: TensorFlow Python symbol.\n source_module_name: (string) Module to import from.\n source_name: (string) Name of the symbol to import.\n dest_module_name: (string) Module name to add import to.\n dest_name: (string) Import the symbol using this name.\n\nRaises:\n SymbolExposedTwiceError: Raised when an import with the same\n dest_name has already been added to dest_module_name.", "source": "github_repos"} -{"code": "def forward(self, pixel_values: torch.Tensor, dataset_index: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n embedding_output = self.embeddings(pixel_values)\n outputs = self.encoder(embedding_output, dataset_index=dataset_index, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)\n hidden_states = outputs.hidden_states if return_dict else outputs[1]\n feature_maps = ()\n for stage, hidden_state in zip(self.stage_names, hidden_states):\n if stage in self.out_features:\n hidden_state = self.layernorm(hidden_state)\n feature_maps += (hidden_state,)\n if not return_dict:\n if output_hidden_states:\n output = (feature_maps,) + outputs[1:]\n else:\n output = (feature_maps,) + outputs[2:]\n return output\n return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)", "docstring": "dataset_index (`torch.Tensor` of shape `(batch_size,)`):\n Index to use in the Mixture-of-Experts (MoE) blocks of the backbone.\n\n This corresponds to the dataset index used during training, e.g. index 0 refers to COCO.\n\nExample:\n ```python\n >>> from transformers import VitPoseBackboneConfig, VitPoseBackbone\n >>> import torch\n\n >>> config = VitPoseBackboneConfig(out_indices=[-1])\n >>> model = VitPoseBackbone(config)\n\n >>> pixel_values = torch.randn(1, 3, 256, 192)\n >>> dataset_index = torch.tensor([1])\n >>> outputs = model(pixel_values, dataset_index)\n ```", "source": "github_repos"} -{"code": "def is_deterministic(x: Any) -> bool:\n return not contains(x, type=NonDeterministic)", "docstring": "Returns if the input value is deterministic.\n\n Example::\n\n @pg.symbolize\n def foo(x, y):\n pass\n\n assert pg.is_deterministic(1)\n assert pg.is_deterministic(foo(1, 2))\n assert not pg.is_deterministic(pg.oneof([1, 2]))\n assert not pg.is_deterministic(foo(pg.oneof([1, 2]), 3))\n\nArgs:\n x: Value to query against.\n\nReturns:\n True if value itself is not NonDeterministic and its child and nested\n child fields do not contain NonDeterministic values.", "source": "github_repos"} -{"code": "def __init__(self, label, element=None, state=None):\n self.label = label\n self.state = state\n if element is not None:\n self.set_element(element)", "docstring": "Initialize a processing context object with an element and state.\n\n The element represents one value from a PCollection that will be accessed\n by a DoFn object during pipeline execution, and state is an arbitrary object\n where counters and other pipeline state information can be passed in.\n\n DoFnProcessContext objects are also used as inputs to PartitionFn instances.\n\nArgs:\n label: label of the PCollection whose element is being processed.\n element: element of a PCollection being processed using this context.\n state: a DoFnState object with state to be passed in to the DoFn object.", "source": "github_repos"} -{"code": "def _embedding_lookup_for_sparse_tensor(self, inp: sparse_tensor.SparseTensor, weight: Optional[sparse_tensor.SparseTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:\n\n def sparse_to_dense_computation(inp, weight):\n if weight is None:\n weight = sparse_tensor.SparseTensor(inp.indices, array_ops.ones_like(inp.values, dtype=dtypes.float32), dense_shape=inp.dense_shape)\n inp = sparse_ops.sparse_tensor_to_dense(inp)\n weight = sparse_ops.sparse_tensor_to_dense(weight)\n return (inp, weight)\n inp, weight = tpu_replication.outside_compilation(sparse_to_dense_computation, inp=inp, weight=weight)\n embeddings = embedding_ops.embedding_lookup_v2(table, inp)\n weight = array_ops.expand_dims(weight, -1)\n embeddings *= weight\n if not feature.output_shape and feature.max_sequence_length > 0:\n embeddings = self._pad_or_truncate_with_sequence_length(embeddings, feature.max_sequence_length)\n else:\n embeddings = self._apply_combiner_to_embeddings(embeddings, weight, feature.table.combiner)\n return embeddings", "docstring": "Embedding lookup for sparse tensor based on its feature config.\n\nArgs:\n inp: a single SparseTensor input.\n weight: None or SparseTensor which has the same shape of the input.\n table: a table variable.\n feature: a feature config.\n\nReturns:\n Embedding lookup result.", "source": "github_repos"} -{"code": "def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, ...]:\n if output_attentions or head_mask is not None:\n logger.warning_once('DistilBertSdpaAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation=\"eager\"` when loading the model.')\n return super().forward(query, key, value, mask, head_mask, output_attentions)\n batch_size, _, _ = query.size()\n dim_per_head = self.dim // self.n_heads\n\n def shape(x: torch.Tensor) -> torch.Tensor:\n \"\"\"separate heads\"\"\"\n return x.view(batch_size, -1, self.n_heads, dim_per_head).transpose(1, 2)\n\n def unshape(x: torch.Tensor) -> torch.Tensor:\n \"\"\"group heads\"\"\"\n return x.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * dim_per_head)\n q = shape(self.q_lin(query))\n k = shape(self.k_lin(key))\n v = shape(self.v_lin(value))\n if self.require_contiguous_qkv and q.device.type == 'cuda' and (mask is not None):\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n attn_output = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=self.dropout_prob if self.training else 0.0, is_causal=False)\n attn_output = unshape(attn_output)\n attn_output = self.out_lin(attn_output)\n return (attn_output,)", "docstring": "Parameters:\n query: torch.tensor(bs, seq_length, dim)\n key: torch.tensor(bs, seq_length, dim)\n value: torch.tensor(bs, seq_length, dim)\n mask: torch.tensor(bs, seq_length)\n\nReturns:\n weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,\n seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`", "source": "github_repos"} -{"code": "def __init__(self, *, target_profiles: CollectionType[str], structure_definition: message.Message, base_type: str, element_type: str, backbone_element_path: Optional[str], _child_defs: ChildDefinitions, _slices: Tuple[Slice, ...], _raw_url: str, cardinality: Cardinality=Cardinality.SCALAR, root_element_definition: Optional[message.Message]=None) -> None:\n super().__init__(structure_definition=structure_definition, backbone_element_path=backbone_element_path, base_type=base_type, element_type=element_type, _child_defs=_child_defs, _slices=_slices, _raw_url=_raw_url, root_element_definition=root_element_definition, cardinality=cardinality)\n object.__setattr__(self, 'target_profiles', target_profiles)", "docstring": "Represents a FHIR Reference.\n\n See more at: https://build.fhir.org/references.html\n\nAttributes:\n target_profiles: The types of resources to which the reference may link. See\n more at:\n https://build.fhir.org/elementdefinition-definitions.html#ElementDefinition.type.targetProfile", "source": "github_repos"} -{"code": "def make_fn(shared_variable_store, device_id):\n variable_scope_access_index = {}\n assert isinstance(device_id, int)\n\n def create_new_variable(next_creator, **kwargs):\n \"\"\"Create the variable using `next_creator` and store it.\"\"\"\n canonical_name = _canonicalize_variable_name(kwargs.get('name'))\n v = next_creator(**kwargs)\n if canonical_name not in shared_variable_store:\n shared_variable_store[canonical_name] = []\n shared_variable_store[canonical_name].append(v)\n return v\n\n def reuse_variable(next_creator, **kwargs):\n \"\"\"Re-use existing variable from store with same name (in order).\"\"\"\n del next_creator\n name = kwargs.get('name')\n canonical_name = _canonicalize_variable_name(name)\n try:\n variable_index = variable_scope_access_index.get(canonical_name, 0)\n v = shared_variable_store[canonical_name][variable_index]\n variable_scope_access_index[canonical_name] = variable_index + 1\n return v\n except (KeyError, IndexError):\n raise RuntimeError('Tried to create variable {} with mismatching name on device {}'.format(name, device_id))\n if device_id == 0:\n return create_new_variable\n else:\n return reuse_variable", "docstring": "Construct the variable creator function for device `device_id`.\n\n Constructs custom variable creator functions for the given device.\n On first device (device_id == 0), it creates the variable using the\n `next_creator`, and stores it in the provided `shared_variable_store`.\n On all other devices (device_id > 0), it tries to re-use the variable\n already created with the same name. If no such variable exists, it throws an\n error.\n Additionally, we de-uniquify variable names before checking for matches. This\n helps re-use variables which are intended to be the same but have different\n names due to variable uniquification happening upstream. Since this might\n mean we may have multiple variables with the same canonical name, we store\n them in a list per canonical name and return them in the same order as well.\n\nArgs:\n shared_variable_store: A dictionary that we will use to store variables\n created on the first device, and re-used by creators for other devices.\n device_id: Integer index of the device whose creator should be\n constructed.\n\nReturns:\n An appropriate creator function based on device_id.", "source": "github_repos"} -{"code": "def dummy_inputs(self):\n input_ids = tf.constant(DUMMY_INPUTS, dtype=tf.int32)\n batch_size, seq_len = input_ids.shape\n VISION_DUMMY_INPUTS = tf.random.uniform(shape=(batch_size, self.config.vision_config.num_channels, self.config.vision_config.image_size, self.config.vision_config.image_size), dtype=tf.float32)\n pixel_values = tf.constant(VISION_DUMMY_INPUTS)\n dummy = {'pixel_values': pixel_values, 'input_ids': input_ids}\n return dummy", "docstring": "Dummy inputs to build the network.\n\nReturns:\n `Dict[str, tf.Tensor]`: The dummy inputs.", "source": "github_repos"} -{"code": "def call(self, input_features: TFModelInputType | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, decoder_inputs_embeds: Optional[Tuple[Union[np.ndarray, tf.Tensor]]]=None, labels: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n outputs = self.model(input_features, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n decoder_last_hidden_state = outputs[0]\n lm_logits = tf.matmul(decoder_last_hidden_state, self.get_output_embeddings().weights, transpose_b=True)\n loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n return TFSeq2SeqLMOutput(loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`\n or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is\n only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nReturns:\n\nExample:\n ```python\n >>> import tensorflow as tf\n >>> from transformers import AutoProcessor, TFWhisperForConditionalGeneration\n >>> from datasets import load_dataset\n\n >>> processor = AutoProcessor.from_pretrained(\"openai/whisper-tiny.en\")\n >>> model = TFWhisperForConditionalGeneration.from_pretrained(\"openai/whisper-tiny.en\")\n\n >>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n\n >>> inputs = processor(ds[0][\"audio\"][\"array\"], return_tensors=\"tf\")\n >>> input_features = inputs.input_features\n\n >>> generated_ids = model.generate(input_features=input_features)\n\n >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]\n >>> transcription\n ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'\n ```", "source": "github_repos"} -{"code": "def call(self, input_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, training: bool=False) -> tf.Tensor:\n assert not (input_ids is None and inputs_embeds is None)\n if input_ids is not None:\n check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n input_shape = shape_list(inputs_embeds)[:-1]\n if token_type_ids is None:\n token_type_ids = tf.fill(dims=input_shape, value=0)\n token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n final_embeddings = inputs_embeds + token_type_embeds\n final_embeddings = self.LayerNorm(inputs=final_embeddings)\n final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\n final_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github_repos"} -{"code": "def w8a8_block_fp8_matmul_triton(A: torch.Tensor, B: torch.Tensor, As: torch.Tensor, Bs: torch.Tensor, block_size: List[int], output_dtype: torch.dtype=torch.float32) -> torch.Tensor:\n assert len(block_size) == 2\n block_n, block_k = (block_size[0], block_size[1])\n assert A.shape[-1] == B.shape[-1]\n assert A.shape[:-1] == As.shape[:-1] and A.is_contiguous()\n assert triton.cdiv(A.shape[-1], block_k) == As.shape[-1]\n M = A.numel() // A.shape[-1]\n assert B.ndim == 2 and B.is_contiguous() and (Bs.ndim == 2)\n N, K = B.shape\n assert triton.cdiv(N, block_n) == Bs.shape[0]\n assert triton.cdiv(K, block_k) == Bs.shape[1]\n C_shape = A.shape[:-1] + (N,)\n C = A.new_empty(C_shape, dtype=output_dtype)\n BLOCK_SIZE_M = 128\n if M < BLOCK_SIZE_M:\n BLOCK_SIZE_M = triton.next_power_of_2(M)\n BLOCK_SIZE_M = max(BLOCK_SIZE_M, 16)\n BLOCK_SIZE_K = block_k\n assert block_k % BLOCK_SIZE_K == 0\n BLOCK_SIZE_N = block_n\n\n def grid(META):\n return (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),)\n _w8a8_block_fp8_matmul[grid](A, B, C, As, Bs, M, N, K, block_n, block_k, A.stride(-2), A.stride(-1), B.stride(1), B.stride(0), C.stride(-2), C.stride(-1), As.stride(-2), As.stride(-1), Bs.stride(1), Bs.stride(0), BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE_M=8)\n return C", "docstring": "This function performs matrix multiplication with block-wise\n quantization.\n It takes two input tensors `A` and `B` with scales `As` and `Bs`.\n The output is returned in the specified `output_dtype`.\n\nArgs:\n A: The input tensor, e.g., activation.\n B: The input tensor, e.g., weight.\n As: The per-token-group quantization scale for `A`.\n Bs: The per-block quantization scale for `B`.\n block_size: The block size for per-block quantization. It should\n be 2-dim, e.g., [128, 128].\n output_dytpe: The dtype of the returned tensor.\n\nReturns:\n torch.Tensor: The result of matmul.", "source": "github_repos"} -{"code": "def assign(self, value, use_locking=False, name=None, read_value=True):\n assign = state_ops.assign(self._variable, value, use_locking=use_locking, name=name)\n if read_value:\n return assign\n return assign.op", "docstring": "Assigns a new value to the variable.\n\n This is essentially a shortcut for `assign(self, value)`.\n\nArgs:\n value: A `Tensor`. The new value for this variable.\n use_locking: If `True`, use locking during the assignment.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the new\n value of the variable; if False will return the assign op.\n\nReturns:\n A `Tensor` that will hold the new value of this variable after\n the assignment has completed.", "source": "github_repos"} -{"code": "def copy(self) -> 'TraceableStack[T]':\n return TraceableStack(self._stack)", "docstring": "Return a copy of self referencing the same objects but in a new list.\n\n This method is implemented to support thread-local stacks.\n\nReturns:\n TraceableStack with a new list that holds existing objects.", "source": "github_repos"} -{"code": "def get_log_line_timestamp(delta=None):\n return _get_timestamp('%m-%d %H:%M:%S.%f', delta)", "docstring": "Returns a timestamp in the format used by log lines.\n\n Default is current time. If a delta is set, the return value will be\n the current time offset by delta seconds.\n\nArgs:\n delta: Number of seconds to offset from current time; can be negative.\n\nReturns:\n A timestamp in log line format with an offset.", "source": "github_repos"} -{"code": "def select(self, inputs: List[Any], global_state: pg.geno.AttributeDict, step: int) -> List[Any]:", "docstring": "Select a list of outputs from the inputs.\n\n A selector has two use cases:\n\n * Used as parents selector, which selects individuals from the population\n as parents for recombination. It will be called before the recombination\n step within the :meth:`pyglove.evolution.Evolution.propose` method.\n\n * Used as a population updater, which selects individuals from previous\n population as a new population. It will be called everytime the\n population is updated, triggered by the\n :meth:`pyglove.evolution.Evolution.feedback` method.\n\nArgs:\n inputs: a list of objects as input.\n global_state: An `AttributeDict` object as the global state container,\n which is readable/writable during the operation.\n step: Number of examples historically proposed, which can be used for\n determining a cross over schedule.", "source": "github_repos"} -{"code": "def __init__(self, backend=None):\n self._backend = backend or backend_module.backend()", "docstring": "A class that can be used to switch from one backend to another.\n\nExample:\n ```python\n backend = DynamicBackend(\"tensorflow\")\n y = backend.square(tf.constant(...))\n backend.set_backend(\"jax\")\n y = backend.square(jax.numpy.array(...))\n ```\n\nArgs:\n backend: Initial backend to use (string).", "source": "github_repos"} -{"code": "def output_stream(self, instruction_id, transform_id):\n raise NotImplementedError(type(self))", "docstring": "Returns an output stream writing elements to transform_id.\n\nArgs:\n instruction_id: which instruction this stream belongs to\n transform_id: the transform_id of the returned stream", "source": "github_repos"} -{"code": "def assert_self_adjoint(self, name='assert_self_adjoint'):\n with self._name_scope(name):\n return self._assert_self_adjoint()", "docstring": "Returns an `Op` that asserts this operator is self-adjoint.\n\n Here we check that this operator is *exactly* equal to its hermitian\n transpose.\n\nArgs:\n name: A string name to prepend to created ops.\n\nReturns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is not self-adjoint.", "source": "github_repos"} -{"code": "def _safe_scalar_div(numerator, denominator, name):\n numerator.get_shape().with_rank_at_most(1)\n denominator.get_shape().with_rank_at_most(1)\n return math_ops.div_no_nan(numerator, denominator, name=name)", "docstring": "Divides two values, returning 0 if the denominator is 0.\n\nArgs:\n numerator: A scalar `float64` `Tensor`.\n denominator: A scalar `float64` `Tensor`.\n name: Name for the returned op.\n\nReturns:\n 0 if `denominator` == 0, else `numerator` / `denominator`", "source": "github_repos"} -{"code": "def convert_to_bq_name(name: str) -> str:\n return BQ_REGEX.sub('_', name).lower()", "docstring": "Tranform the given string into a valid BigQuery name -\n convert non-alphanumeric characters to an underscore (_)\n and lowercase the result for consistency.\n\nArgs:\n * name: original name\n\nReturns:\n * Transformed valid name", "source": "github_repos"} -{"code": "def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:\n if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:\n return _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)\n return _fused_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)\n elif sorted(reduction_axes) == list(range(ndim(x)))[:-1]:\n return _regular_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)\n else:\n return _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)", "docstring": "Computes mean and std for batch then apply batch_normalization on batch.\n\nArgs:\n x: Input tensor or variable.\n gamma: Tensor by which to scale the input.\n beta: Tensor with which to center the input.\n reduction_axes: iterable of integers,\n axes over which to normalize.\n epsilon: Fuzz factor.\n\nReturns:\n A tuple length of 3, `(normalized_tensor, mean, variance)`.", "source": "github_repos"} -{"code": "def size(self, url):\n return self.metadata(url).size_in_bytes", "docstring": "Fetches file size for a URL.\n\nReturns:\n int size of path according to the FileSystem.\n\nRaises:\n ``BeamIOError``: if url doesn't exist.", "source": "github_repos"} -{"code": "def _partitioner(shape, dtype):\n if not isinstance(shape, tensor_shape.TensorShape):\n raise ValueError(f'shape is not a TensorShape: {shape}')\n if not shape.is_fully_defined():\n raise ValueError(f'shape is not fully defined: {shape}')\n dtype = dtypes.as_dtype(dtype)\n if dtype.base_dtype == dtypes.string:\n element_size = bytes_per_string_element\n else:\n element_size = dtype.size\n partitions = [1] * shape.ndims\n bytes_per_slice = 1.0 * (shape.num_elements() / shape.dims[axis].value) * element_size\n slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))\n axis_shards = int(math.ceil(1.0 * shape.dims[axis].value / slices_per_shard))\n if max_shards:\n axis_shards = min(max_shards, axis_shards)\n partitions[axis] = axis_shards\n return partitions", "docstring": "Partitioner that partitions shards to have max_shard_bytes total size.\n\nArgs:\n shape: A `TensorShape`.\n dtype: A `DType`.\n\nReturns:\n A tuple representing how much to slice each axis in shape.\n\nRaises:\n ValueError: If shape is not a fully defined `TensorShape` or dtype is not\n a `DType`.", "source": "github_repos"} -{"code": "def forecast_masking(inputs: torch.Tensor, num_forecast_mask_patches: Union[list, int], unmasked_channel_indices: Optional[list]=None, mask_value: int=0):\n if isinstance(num_forecast_mask_patches, int):\n num_forecast_mask_patches = [num_forecast_mask_patches]\n forecast_mask_ratios = [1 for _ in num_forecast_mask_patches]\n batch_size, num_channels, sequence_length, num_features = inputs.shape\n mask = torch.zeros(batch_size, num_channels, sequence_length, device=inputs.device)\n t_list = []\n total_length = 0\n total_ratio = sum(forecast_mask_ratios)\n for patch_length, ratio in zip(num_forecast_mask_patches, forecast_mask_ratios):\n if patch_length <= 0 or patch_length >= sequence_length:\n raise ValueError(f'num_forecast_mask_patches {patch_length} should be greater than 0 and less than total patches.')\n temp_len = int(batch_size * ratio / total_ratio)\n t_list.append([patch_length, ratio, temp_len])\n total_length += temp_len\n t_list = sorted(t_list, key=lambda x: x[2])\n if total_length < batch_size:\n t_list[0][2] = t_list[0][2] + (batch_size - total_length)\n elif total_length > batch_size:\n t_list[-1][2] = t_list[-1][2] + (total_length - batch_size)\n batch1 = 0\n for patch_len, _, temp_len in t_list:\n batch2 = batch1 + temp_len\n mask[batch1:batch2, :, -patch_len:] = 1\n batch1 = batch2\n perm = torch.randperm(mask.shape[0])\n mask = mask[perm]\n mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features)\n if unmasked_channel_indices is not None:\n mask[:, unmasked_channel_indices, :, :] = 0\n inputs_mask = inputs.masked_fill(mask.bool(), mask_value)\n return (inputs_mask, mask[..., 0])", "docstring": "Forecast masking that masks the last K patches where K is from the num_forecast_mask_patches.\n If num_forecast_mask_patches is a list, samples in the batch will be randomly masked by numbers defined in the list.\n\n Parameters:\n inputs (`torch.Tensor`):\n Input of shape `(bs, num_channels, num_patch, patch_length)`\n num_forecast_mask_patches (`list`):\n Number of patches to be masked at the end of each batch sample. e.g. 4 or [3, 5].\n unmasked_channel_indices (`list`, *optional*):\n Indices of channels that are not masked.\n mask_value (`int`, *optional*, defaults to 0):\n Values in the masked patches will be filled by `mask_value`.\n\nReturns:\n `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as inputs Tensor and Mask tensor of shape `(bs,\n num_channels , num_patch)` or `(bs, tsg1, tsg2, num_channels, num_patch)`", "source": "github_repos"} -{"code": "def normalize_tuple(value, n, name):\n if isinstance(value, int):\n return (value,) * n\n else:\n try:\n value_tuple = tuple(value)\n except TypeError:\n raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} integers. Received: {str(value)}')\n if len(value_tuple) != n:\n raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} integers. Received: {str(value)}')\n for single_value in value_tuple:\n try:\n int(single_value)\n except (ValueError, TypeError):\n raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} integers. Received: {str(value)} including element {str(single_value)} of type {str(type(single_value))}')\n return value_tuple", "docstring": "Transforms a single integer or iterable of integers into an integer tuple.\n\nArgs:\n value: The value to validate and convert. Could an int, or any iterable\n of ints.\n n: The size of the tuple to be returned.\n name: The name of the argument being validated, e.g. \"strides\" or\n \"kernel_size\". This is only used to format error messages.\n\nReturns:\n A tuple of n integers.\n\nRaises:\n ValueError: If something else than an int/long or iterable thereof was\n passed.", "source": "github_repos"} -{"code": "def size(self, path: str) -> int:\n raise NotImplementedError", "docstring": "Get size in bytes of a file on the FileSystem.\n\nArgs:\n path: string filepath of file.\n\n Returns: int size of file according to the FileSystem.\n\nRaises:\n ``BeamIOError``: if path doesn't exist.", "source": "github_repos"} -{"code": "def pad_to_square(self, image: np.ndarray, background_color: Union[int, Tuple[int, int, int]]=0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array:\n height, width = get_image_size(image, input_data_format)\n num_channels = image.shape[0] if input_data_format == ChannelDimension.FIRST else image.shape[-1]\n if height == width:\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image\n max_dim = max(height, width)\n if isinstance(background_color, int):\n background_color = [background_color]\n elif len(background_color) != num_channels:\n raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels')\n if input_data_format == ChannelDimension.FIRST:\n result = np.zeros((num_channels, max_dim, max_dim), dtype=image.dtype)\n for i, color in enumerate(background_color):\n result[i, :, :] = color\n if width > height:\n start = (max_dim - height) // 2\n result[:, start:start + height, :] = image\n else:\n start = (max_dim - width) // 2\n result[:, :, start:start + width] = image\n else:\n result = np.zeros((max_dim, max_dim, num_channels), dtype=image.dtype)\n for i, color in enumerate(background_color):\n result[:, :, i] = color\n if width > height:\n start = (max_dim - height) // 2\n result[start:start + height, :, :] = image\n else:\n start = (max_dim - width) // 2\n result[:, start:start + width, :] = image\n image = to_channel_dimension_format(result, data_format, input_data_format) if data_format is not None else result\n return image", "docstring": "Pads an image to a square based on the longest edge.\n\nArgs:\n image (`np.ndarray`):\n The image to pad.\n background_color (`int` or `Tuple[int, int, int]`, *optional*, defaults to 0):\n The color to use for the padding. Can be an integer for single channel or a\n tuple of integers representing for multi-channel images. If passed as integer\n in mutli-channel mode, it will default to `0` in subsequent channels.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\nReturns:\n `np.ndarray`: The padded image.", "source": "github_repos"} -{"code": "def empty_function(function: _evaluation.EmptyFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:\n del params_result\n if operand_result is None:\n raise ValueError('empty() cannot be called without an operand.')\n sql_alias = 'empty_'\n sql_data_type = _sql_data_types.Boolean\n if not _fhir_path_data_types.returns_collection(function.parent_node.return_type):\n return dataclasses.replace(operand_result, select_part=operand_result.select_part.is_null(_sql_alias=sql_alias))\n else:\n return _sql_data_types.Select(select_part=_sql_data_types.RawExpression('CASE WHEN COUNT(*) = 0 THEN TRUE ELSE FALSE END', _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=str(operand_result.to_subquery()), where_part=f'{operand_result.sql_alias} IS NOT NULL', sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Generates Spark SQL representing the FHIRPath empty() function.\n\n Returns `TRUE` if the input collection is empty and `FALSE` otherwise.\n\n This is the opposite of `_ExistsFunction`. If the operand is empty, then the\n result is `TRUE`.\n\n The returned SQL expression is a table of cardinality 1, whose value is of\n `BOOL` type. By default, `_EmptyFunction` will return `FALSE` if given no\n operand.\n\nArgs:\n function: The FHIRPath AST `EmptyFunction` node\n operand_result: The expression which is being evaluated\n params_result: The parameter passed in to function\n\nReturns:\n A compiled Spark SQL expression.\n\nRaises:\n ValueError: When the function is called without an operand", "source": "github_repos"} -{"code": "def write_mutations(self, throttler, rpc_stats_callback, throttle_delay=1):\n while throttler.throttle_request(time.time() * 1000):\n _LOGGER.info('Delaying request for %ds due to previous failures', throttle_delay)\n time.sleep(throttle_delay)\n rpc_stats_callback(throttled_secs=throttle_delay)\n if self._batch is None:\n self._batch = self._client.batch()\n self._batch.begin()\n for element in self._batch_elements:\n self.add_to_batch(element)\n resource = resource_identifiers.DatastoreNamespace(self._project, '')\n labels = {monitoring_infos.SERVICE_LABEL: 'Datastore', monitoring_infos.METHOD_LABEL: 'BatchDatastoreWrite', monitoring_infos.RESOURCE_LABEL: resource, monitoring_infos.DATASTORE_NAMESPACE_LABEL: '', monitoring_infos.DATASTORE_PROJECT_ID_LABEL: self._project, monitoring_infos.STATUS_LABEL: 'ok'}\n service_call_metric = ServiceCallMetric(request_count_urn=monitoring_infos.API_REQUEST_COUNT_URN, base_labels=labels)\n try:\n start_time = time.time()\n self._batch.commit()\n end_time = time.time()\n service_call_metric.call('ok')\n rpc_stats_callback(successes=1)\n throttler.successful_request(start_time * 1000)\n commit_time_ms = int((end_time - start_time) * 1000)\n return commit_time_ms\n except (ClientError, GoogleAPICallError) as e:\n self._batch = None\n service_call_metric.call(e.code.value)\n rpc_stats_callback(errors=1)\n raise\n except HttpError as e:\n service_call_metric.call(e)\n rpc_stats_callback(errors=1)\n raise", "docstring": "Writes a batch of mutations to Cloud Datastore.\n\n If a commit fails, it will be retried up to 5 times. All mutations in the\n batch will be committed again, even if the commit was partially\n successful. If the retry limit is exceeded, the last exception from\n Cloud Datastore will be raised.\n\n Assumes that the Datastore client library does not perform any retries on\n commits. It has not been determined how such retries would interact with\n the retries and throttler used here.\n See ``google.cloud.datastore_v1.gapic.datastore_client_config`` for\n retry config.\n\nArgs:\n rpc_stats_callback: a function to call with arguments `successes` and\n `failures` and `throttled_secs`; this is called to record successful\n and failed RPCs to Datastore and time spent waiting for throttling.\n throttler: (``apache_beam.io.gcp.datastore.v1new.adaptive_throttler.\n AdaptiveThrottler``)\n Throttler instance used to select requests to be throttled.\n throttle_delay: (:class:`float`) time in seconds to sleep when\n throttled.\n\nReturns:\n (int) The latency of the successful RPC in milliseconds.", "source": "github_repos"} -{"code": "def _calculate_expected_result(dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config):\n if config.use_gumbel_for_cells:\n gumbel_dist = torch.distributions.RelaxedBernoulli(temperature=config.temperature, logits=dist_per_cell.logits * config.temperature)\n scaled_probability_per_cell = gumbel_dist.sample()\n else:\n scaled_probability_per_cell = dist_per_cell.probs\n scaled_probability_per_cell = scaled_probability_per_cell / numeric_values_scale * input_mask_float\n count_result = torch.sum(scaled_probability_per_cell, dim=1)\n numeric_values_masked = torch.where(torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values)\n sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1)\n avg_approximation = config.average_approximation_function\n if avg_approximation == AverageApproximationFunction.RATIO:\n average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION)\n elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:\n ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1\n average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1)\n elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:\n ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1\n pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell)\n var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var\n multiplier = (var / torch.square(ex) + 1) / ex\n average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1)\n else:\n raise ValueError(f'Invalid average_approximation_function: {config.average_approximation_function}')\n if config.use_gumbel_for_aggregation:\n gumbel_dist = torch.distributions.RelaxedOneHotCategorical(config.aggregation_temperature, logits=logits_aggregation[:, 1:])\n aggregation_op_only_probs = gumbel_dist.sample()\n else:\n aggregation_op_only_probs = nn.functional.softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1)\n all_results = torch.cat([torch.unsqueeze(sum_result, dim=1), torch.unsqueeze(average_result, dim=1), torch.unsqueeze(count_result, dim=1)], dim=1)\n expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1)\n return expected_result", "docstring": "Calculates the expected result given cell and aggregation probabilities.\n\nArgs:\n dist_per_cell (`torch.distributions.Bernoulli`):\n Cell selection distribution for each cell.\n numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`):\n Numeric values of every token. Nan for tokens which are not numeric values.\n numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`):\n Scale of the numeric values of every token.\n input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`):\n Mask for the table, without question tokens and table headers.\n logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):\n Logits per aggregation operation.\n config ([`TapasConfig`]):\n Model configuration class with all the hyperparameters of the model\n\nReturns:\n expected_result (`torch.FloatTensor` of shape `(batch_size,)`): The expected result per example.", "source": "github_repos"} -{"code": "def _apply_sequence_to_tensor_op(cls, op_fn, tensor_wrappers):\n raise NotImplementedError()", "docstring": "Applies given sequence-to-tensor op.\n\n This method is used for implementing ops that take a sequence of tensors and\n return a new tensor, such as tf.concat and tf.stack. Implementing wrappers\n should apply `op_fn` to the backing tensor(s) and return an new wrapper\n instance with the combined backing tensor.\n\nArgs:\n op_fn: Callable that applies sequence-to-tensor op to the given sequence\n of Tensors. E.g. applies tf.concat.\n tensor_wrappers: a sequence of tensor wrappers to be transformed. All\n elements have the type of the implementing TensorWrapper class.\n\nReturns:\n A TensorWrapper instance with combined backing tensor(s).", "source": "github_repos"} -{"code": "def __init__(self, initial_learning_rate, decay_steps, num_periods=0.5, alpha=0.0, beta=0.001, name=None):\n super(LinearCosineDecay, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.num_periods = num_periods\n self.alpha = alpha\n self.beta = beta\n self.name = name", "docstring": "Applies linear cosine decay to the learning rate.\n\nArgs:\n initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python\n number. The initial learning rate.\n decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\n Number of steps to decay over.\n num_periods: Number of periods in the cosine part of the decay.\n See computation above.\n alpha: See computation above.\n beta: See computation above.\n name: String. Optional name of the operation. Defaults to\n 'LinearCosineDecay'.", "source": "github_repos"} -{"code": "def convert(value, dtype=None, name=None, as_ref=False, preferred_dtype=None, accepted_result_types=(core.Symbol,)):\n if dtype is not None:\n dtype = dtypes.as_dtype(dtype)\n if preferred_dtype is not None:\n preferred_dtype = dtypes.as_dtype(preferred_dtype)\n overload = getattr(value, '__tf_tensor__', None)\n if overload is not None:\n return overload(dtype, name)\n for base_type, conversion_func in get(type(value)):\n ret = None\n if dtype is None and preferred_dtype is not None:\n try:\n ret = conversion_func(value, dtype=preferred_dtype, name=name, as_ref=as_ref)\n except (TypeError, ValueError):\n pass\n else:\n if ret is not NotImplemented and ret.dtype.base_dtype != preferred_dtype.base_dtype:\n raise RuntimeError(_add_error_prefix(f'Conversion function {conversion_func!r} for type {base_type} returned incompatible dtype: requested = {preferred_dtype.base_dtype.name}, actual = {ret.dtype.base_dtype.name}', name=name))\n if ret is None:\n ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\n if ret is NotImplemented:\n continue\n if isinstance(ret, core.Tensor):\n to_tensor = getattr(ret, '__tf_tensor__', None)\n ret = to_tensor() if to_tensor is not None else ret\n if not isinstance(ret, accepted_result_types):\n raise RuntimeError(_add_error_prefix(f'Conversion function {conversion_func!r} for type {base_type} returned non-Tensor: {ret!r}', name=name))\n if dtype and (not dtype.is_compatible_with(ret.dtype)):\n raise RuntimeError(_add_error_prefix(f'Conversion function {conversion_func} for type {base_type} returned incompatible dtype: requested = {dtype.name}, actual = {ret.dtype.name}', name=name))\n return ret\n raise TypeError(_add_error_prefix(f'Cannot convert {value!r} with type {type(value)} to Tensor: no conversion function registered.', name=name))", "docstring": "Converts `value` to a `Tensor` using registered conversion functions.\n\nArgs:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the type\n is inferred from the type of `value`.\n name: Optional name to use if a new `Tensor` is created.\n as_ref: Optional boolean specifying if the returned value should be a\n reference-type `Tensor` (e.g. Variable). Pass-through to the registered\n conversion function. Defaults to `False`.\n preferred_dtype: Optional element type for the returned tensor.\n Used when dtype is None. In some cases, a caller may not have a dtype\n in mind when converting to a tensor, so `preferred_dtype` can be used\n as a soft preference. If the conversion to `preferred_dtype` is not\n possible, this argument has no effect.\n accepted_result_types: Optional collection of types as an allow-list\n for the returned value. If a conversion function returns an object\n which is not an instance of some type in this collection, that value\n will not be returned.\n\nReturns:\n A `Tensor` converted from `value`.\n\nRaises:\n ValueError: If `value` is a `Tensor` and conversion is requested\n to a `Tensor` with an incompatible `dtype`.\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.", "source": "github_repos"} -{"code": "def add_decomposed_rel_pos(self, attn: torch.Tensor, query: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int]) -> torch.Tensor:\n query_height, query_width = q_size\n key_height, key_width = k_size\n relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h)\n relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w)\n batch_size, _, dim = query.shape\n reshaped_query = query.reshape(batch_size, query_height, query_width, dim)\n rel_h = torch.einsum('bhwc,hkc->bhwk', reshaped_query, relative_position_height)\n rel_w = torch.einsum('bhwc,wkc->bhwk', reshaped_query, relative_position_width)\n attn = attn.reshape(batch_size, query_height, query_width, key_height, key_width)\n attn = attn + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]\n attn = attn.reshape(batch_size, query_height * query_width, key_height * key_width)\n return attn", "docstring": "Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.\n https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py\n\nArgs:\n attn (`torch.Tensor`):\n attention map.\n query (`torch.Tensor`):\n query q in the attention layer with shape (batch_size, query_height * query_width, channel).\n rel_pos_h (`torch.Tensor`):\n relative position embeddings (Lh, channel) for height axis.\n rel_pos_w (`torch.Tensor`):\n relative position embeddings (Lw, channel) for width axis.\n q_size (tuple):\n spatial sequence size of query q with (query_height, query_width).\n k_size (tuple):\n spatial sequence size of key k with (key_height, key_width).\n\nReturns:\n attn (`torch.Tensor`):\n attention map with added relative positional embeddings.", "source": "github_repos"} -{"code": "def SplitLine(self, line, width):\n lines = []\n chunk = ''\n w = 0\n keep = False\n for normal, control in self.SplitIntoNormalAndControl(line):\n keep = True\n while True:\n n = width - w\n w += len(normal)\n if w <= width:\n break\n lines.append(chunk + normal[:n])\n chunk = ''\n keep = False\n w = 0\n normal = normal[n:]\n chunk += normal + control\n if chunk or keep:\n lines.append(chunk)\n return lines", "docstring": "Splits line into width length chunks.\n\nArgs:\n line: The line to split.\n width: The width of each chunk except the last which could be smaller than\n width.\n\nReturns:\n A list of chunks, all but the last with display width == width.", "source": "github_repos"} -{"code": "def If(cond, inputs, then_branch, else_branch, name=None):\n if isinstance(then_branch, function._DefinedFunction):\n tlist = [_.type for _ in then_branch.definition.signature.output_arg]\n return gen_functional_ops._if(cond, inputs, tlist, then_branch, else_branch, name=name)\n then_out = then_branch.structured_outputs\n else_out = else_branch.structured_outputs\n nest.assert_same_structure(then_out, else_out, expand_composites=True)\n tlist = nest.flatten(then_branch.output_dtypes)\n ret = gen_functional_ops._if(cond, inputs, tlist, then_branch, else_branch, name=name)\n return nest.pack_sequence_as(then_out, ret, expand_composites=True)", "docstring": "output = Cond(inputs) ?\n\n then_branch(inputs) : else_branch(inputs).\n\nArgs:\n cond: A `Tensor`. A scalar. If the scalar is not a boolean, the scalar is\n converted to a boolean according to the following rule: if the scalar is a\n numerical value, non-zero means True and zero means False; if the scalar\n is a string, non-empty means True and empty means False.\n inputs: A list of input tensors.\n then_branch: A function takes 'inputs' and returns a list of tensors, whose\n types are the same as what else_branch returns.\n else_branch: A function takes 'inputs' and returns a list of tensors. whose\n types are the same as what then_branch returns.\n name: A name for the operation (optional).\n\nReturns:\n A list of tensors returned by either then_branch(inputs)\n or else_branch(inputs).", "source": "github_repos"} -{"code": "def path_to_bytes(path):\n if hasattr(path, '__fspath__'):\n path = path.__fspath__()\n return as_bytes(path)", "docstring": "Converts input which is a `PathLike` object to `bytes`.\n\n Converts from any python constant representation of a `PathLike` object\n or `str` to bytes.\n\nArgs:\n path: An object that can be converted to path representation.\n\nReturns:\n A `bytes` object.\n\n Usage:\n In case a simplified `bytes` version of the path is needed from an\n `os.PathLike` object.", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n residual = hidden_states\n hidden_states, self_attn_weights, past_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states, cross_attn_weights, past_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, output_attentions=output_attentions, cache_position=cache_position)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n if use_cache:\n outputs += (past_key_value,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (`torch.FloatTensor`):\n cross attention input to the layer of shape `(batch, seq_len, embed_dim)`\n encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`.\n cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of\n size `(decoder_attention_heads,)`.\n past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. It is used to update the\n cache in the correct position and to infer the complete sequence length.", "source": "github_repos"} -{"code": "def init_cache(self, batch_size, max_length):\n input_ids = jnp.ones((batch_size, max_length), dtype='i4')\n attention_mask = jnp.ones_like(input_ids)\n init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, return_dict=False, init_cache=True)\n return unfreeze(init_variables['cache'])", "docstring": "Args:\n batch_size (`int`):\n batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\n max_length (`int`):\n maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\n cache.", "source": "github_repos"} -{"code": "def _resolve_grad_captures(body_graph, body_grad_graph, while_op):\n new_capture_inputs = []\n for t in body_grad_graph.external_captures:\n if t.graph == body_graph:\n for i, output in enumerate(t.graph.outputs):\n if output is t:\n t = while_op.outputs[i]\n break\n assert t.graph == body_graph.outer_graph\n new_capture_inputs.append(t)\n return new_capture_inputs", "docstring": "Returns the tensors to pass as captured inputs to `body_grad_graph`.\n\n `body_grad_graph` may have external references to:\n 1. Its outer graph containing the input gradients. These are left as-is.\n 2. Accumulators captured from the forward-pass graph. These should have been\n added as `while_op` outputs after the gradient graph was built. We replace\n these with the corresponding output of `while_op`, i.e. a tensor in\n `body_graph.outer_graph`. In the case of nested control flow or functions,\n the gradient logic handling `body_grad_graph.outer_graph` will make sure\n the tensor from `body_graph.outer_graph` is also correctly captured.\n\nArgs:\n body_graph: FuncGraph. The forward-pass body function.\n body_grad_graph: FuncGraph. The body gradients function.\n while_op: The forward-pass While Operation calling `body_graph`.\n\nReturns:\n A list of input tensors to be passed as the captured inputs to\n `body_grad_graph`.", "source": "github_repos"} -{"code": "def _from_pyval(cls, pyval, typespec, path_so_far):\n if isinstance(pyval, dict):\n return cls._from_pydict(pyval, typespec, path_so_far)\n elif isinstance(pyval, (list, tuple)):\n keys = set()\n rank = _pyval_find_struct_keys_and_depth(pyval, keys)\n if rank is not None:\n return cls._from_pylist_of_dict(pyval, keys, rank, typespec, path_so_far)\n else:\n return cls._from_pylist_of_value(pyval, typespec, path_so_far)\n else:\n return cls._from_pyscalar(pyval, typespec, path_so_far)", "docstring": "Helper function for from_pyval.\n\nArgs:\n pyval: The nested Python structure that should be used to create the new\n `StructuredTensor`.\n typespec: A `StructuredTensor.Spec` specifying the expected type for each\n field. If not specified, then all nested dictionaries are turned into\n StructuredTensors, and all nested lists are turned into Tensors (if\n rank<2) or RaggedTensors (if rank>=2).\n path_so_far: the path of fields that led here (for error messages).\n\nReturns:\n A `StructuredTensor`.", "source": "github_repos"} -{"code": "def _key2seed(a):\n\n def int64_to_int32s(a):\n \"\"\"Converts an int64 tensor of shape [] to an int32 tensor of shape [2].\"\"\"\n a = math_ops.cast(a, dtypes.uint64)\n fst = math_ops.cast(a, dtypes.uint32)\n snd = math_ops.cast(gen_bitwise_ops.right_shift(a, constant_op.constant(32, dtypes.uint64)), dtypes.uint32)\n a = [fst, snd]\n a = nest.map_structure(lambda x: math_ops.cast(x, dtypes.int32), a)\n a = array_ops_stack.stack(a)\n return a\n return int64_to_int32s(a)", "docstring": "Converts an RNG key to an RNG seed.\n\nArgs:\n a: an RNG key, an ndarray of shape [] and dtype `np.int64`.\n\nReturns:\n an RNG seed, a tensor of shape [2] and dtype `tf.int32`.", "source": "github_repos"} -{"code": "def embedding_lookup(self, features: Any, weights: Optional[Any]=None) -> Any:\n if not self._built:\n self.build()\n nest.assert_same_structure(features, self._feature_config)\n flat_inputs = nest.flatten(features)\n flat_weights = [None] * len(flat_inputs)\n if weights is not None:\n nest.assert_same_structure(features, weights)\n flat_weights = nest.flatten(weights)\n flat_features = nest.flatten_with_joined_string_paths(self._feature_config)\n outputs = []\n for inp, weight, (path, feature) in zip(flat_inputs, flat_weights, flat_features):\n table = self.embedding_tables[feature.table]\n if weight is not None:\n if isinstance(inp, tensor.Tensor):\n raise ValueError('Weight specified for {}, but input is dense.'.format(path))\n elif type(weight) is not type(inp):\n raise ValueError('Weight for {} is of type {} but it does not match type of the input which is {}.'.format(path, type(weight), type(inp)))\n elif feature.max_sequence_length > 0:\n raise ValueError('Weight specified for {}, but this is a sequence feature.'.format(path))\n if isinstance(inp, tensor.Tensor):\n if feature.max_sequence_length > 0:\n raise ValueError('Feature {} is a sequence feature but a dense tensor was passed.'.format(path))\n outputs.append(embedding_ops.embedding_lookup_v2(table, inp))\n elif isinstance(inp, sparse_tensor.SparseTensor):\n outputs.append(self._embedding_lookup_for_sparse_tensor(inp, weight, table, feature))\n elif isinstance(inp, ragged_tensor.RaggedTensor):\n outputs.append(self._embedding_lookup_for_ragged_tensor(inp, weight, table, feature))\n else:\n raise ValueError('Input {} is type {}. Tensor, SparseTensor or RaggedTensor expected.'.format(path, type(inp)))\n return nest.pack_sequence_as(self._feature_config, outputs)", "docstring": "Apply embedding lookup on TPUs using Tensorcore.\n\n Note that all the sparse and ragged tensors will be converted to dense\n tensors on CPU and then passed to the TPU to do embedding look up. Large\n embedding lookup is not supported by this API, use the TPUEmbedding mid\n level api instead.\n\nArgs:\n features: a nested structure of Tensors, SparseTensors or RaggedTensors.\n weights: a nested structure of Tensors, SparseTensors or RaggedTensors or\n None for no weights. If not None, structure must match that of inputs,\n but entries are allowed to be None.\n\nReturns:\n A nested structure of Tensors with the same structure as inputs.", "source": "github_repos"} -{"code": "def __init__(self, model: PreTrainedModel, max_batch_size: int=1, max_cache_len: int=4096):\n super().__init__()\n if not hasattr(model.config, 'use_cache') or model.config.use_cache is False:\n raise ValueError('The model must have caching enabled to be performant.')\n if hasattr(model.config, 'layer_types') and getattr(model.config, 'sliding_window', None) is not None:\n self.model = TorchExportableModuleWithHybridCache(model, max_batch_size, max_cache_len)\n else:\n logging.info('Using `StaticCache` for export as `layer_types` is not specified or `sliding_window` is `null` in the config.')\n self.model = TorchExportableModuleWithStaticCache(model)", "docstring": "Initializes the exportable module with `HybridCache`.\n\nArgs:\n model (`PreTrainedModel`): The pretrained model to wrap.\n max_batch_size (int): Maximum batch size for the cache.\n max_cache_len (int): Maximum sequence length for the cache.\n\nRaises:\n ValueError: If the model is configured with a unsupported cache implementation.", "source": "github_repos"} -{"code": "def xxd_output_to_object(input_cc_file):\n model_bytes = xxd_output_to_bytes(input_cc_file)\n return convert_bytearray_to_object(model_bytes)", "docstring": "Converts xxd output C++ source file to object.\n\nArgs:\n input_cc_file: Full path name to th C++ source file dumped by xxd\n\nRaises:\n RuntimeError: If input_cc_file path is invalid.\n IOError: If input_cc_file cannot be opened.\n\nReturns:\n A python object corresponding to the input tflite file.", "source": "github_repos"} -{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = math_ops.cast(y_true, self._dtype)\n y_pred = math_ops.cast(y_pred, self._dtype)\n [y_true, y_pred], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values([y_true, y_pred], sample_weight)\n y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx())\n matches = ag_fn(y_true, y_pred, **self._fn_kwargs)\n return super(MeanMetricWrapper, self).update_state(matches, sample_weight=sample_weight)", "docstring": "Accumulates metric statistics.\n\n `y_true` and `y_pred` should have the same shape.\n\nArgs:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n sample_weight: Optional `sample_weight` acts as a\n coefficient for the metric. If a scalar is provided, then the metric is\n simply scaled by the given value. If `sample_weight` is a tensor of size\n `[batch_size]`, then the metric for each sample of the batch is rescaled\n by the corresponding element in the `sample_weight` vector. If the shape\n of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted\n to this shape), then each metric element of `y_pred` is scaled by the\n corresponding value of `sample_weight`. (Note on `dN-1`: all metric\n functions reduce by 1 dimension, usually the last axis (-1)).\n\nReturns:\n Update op.", "source": "github_repos"} -{"code": "def _NonEagerInputs(op: ops.Operation, xs_set):\n return [t for t in _Inputs(op, xs_set) if not isinstance(t, ops.EagerTensor)]", "docstring": "Returns the inputs of op, crossing closure boundaries where necessary.\n\n Does not return any captured EagerTensors, i.e., the number of tensors\n returned may be less than the actual number of inputs.\n\nArgs:\n op: Operation\n xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.\n\nReturns:\n A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op\n is in a FuncGraph and has captured inputs.", "source": "github_repos"} -{"code": "def __init__(self, x: int, *args, y: str, **kwargs) -> float:\n del x, y, args, kwargs", "docstring": "Constructor.\n\nArgs:\n x: Input 1.\n *args: Variable positional args.\n y: Input 2.\n **kwargs: Variable keyword args.\n\nReturns:\n The result.", "source": "github_repos"} -{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: Optional[bool]=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is None:\n return [0] * len(token_ids_0) + [1]\n return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github_repos"} -{"code": "def _output_ast(ast: pytd.TypeDeclUnit, options: config.Options | None=None) -> str:\n options = options or config.Options.create()\n with config.verbosity_from(options):\n result = pytd_utils.Print(ast)\n log.info('=========== pyi optimized =============')\n log.info('\\n%s', result)\n log.info('========================================')\n result += '\\n'\n if options.quick:\n result = '# (generated with --quick)\\n\\n' + result\n return result", "docstring": "Transforms the given analysis result into a pyi representation.\n\nArgs:\n ast: pytd.TypeDeclUnit to output in pyi format.\n options: config.Options object.\n\nReturns:\n A pyi representation of the given AST as a string.", "source": "github_repos"} -{"code": "def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]:\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)", "docstring": "Example:\n\n ```python\n >>> import requests\n >>> from PIL import Image\n >>> from transformers import AutoProcessor, MLCDVisionModel\n >>> model = MLCDVisionModel.from_pretrained(\"DeepGlint-AI/mlcd-vit-bigG-patch14-448\")\n >>> processor = AutoProcessor.from_pretrained(\"DeepGlint-AI/mlcd-vit-bigG-patch14-448\")\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n >>> inputs = processor(images=image, return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... outputs = model(**inputs, output_attentions=True)\n\n >>> features = outputs.last_hidden_state\n >>> print(f\"Extracted features shape: {features.shape}\")\n >>> print(f\"Number of attention layers: {len(outputs.attentions)}\")\n >>> print(f\"Attention shape: {outputs.attentions[0].shape}\")\n ```", "source": "github_repos"} -{"code": "def VerifyMaps(self, conf):\n retval = 0\n for map_name in conf.maps:\n self.log.info('Verifying map: %s.', map_name)\n if map_name == config.MAP_NETGROUP:\n self.log.info('The netgroup map does not support enumeration, skipping.')\n continue\n if map_name == config.MAP_AUTOMOUNT:\n self.log.info('The automount map does not support enumeration, skipping.')\n continue\n try:\n nss_map = nss.GetMap(map_name)\n except error.UnsupportedMap:\n self.log.warning('Verification of %s map is unsupported!', map_name)\n continue\n self.log.debug('built NSS map of %d entries', len(nss_map))\n cache_options = conf.options[map_name].cache\n cache = cache_factory.Create(cache_options, map_name)\n try:\n cache_map = cache.GetMap()\n except error.CacheNotFound:\n self.log.error('Cache missing!')\n retval += 1\n continue\n self.log.debug('built cache map of %d entries', len(cache_map))\n missing_entries = 0\n for map_entry in cache_map:\n if map_entry not in nss_map:\n self.log.info('The following entry is present in the cache but not availible via NSS! %s', map_entry.name)\n self.log.debug('missing entry data: %s', map_entry)\n missing_entries += 1\n if missing_entries > 0:\n self.log.warning('Missing %d entries in %s map', missing_entries, map_name)\n retval += 1\n return retval", "docstring": "Compare each configured map against data retrieved from NSS.\n\n For each configured map, build a Map object from NSS and compare\n it against a Map object retrieved directly from the cache. We\n expect the cache Map to be a subset of the nss Map due to possible\n inclusion of other NSS map types (e.g. files, nis, ldap, etc).\n\n This could be done via series of get*nam calls, however at this\n time it appears to be more efficient to grab them in bulk and use\n the Map.__contains__() membership test.\n\nArgs:\n conf: nss_cache.config.Config object\n\nReturns:\n count of failures when verifying", "source": "github_repos"} -{"code": "def __clean__(struct: Union[dict, list]) -> Union[dict, list]:\n if isinstance(struct, dict):\n for key, value in struct.items():\n if isinstance(value, bytes):\n struct[key] = base64.standard_b64encode(value).decode('ascii')\n elif isinstance(value, date):\n struct[key] = str(value)\n else:\n API.__clean__(value)\n elif isinstance(struct, list):\n for index, value in enumerate(struct):\n if isinstance(value, bytes):\n struct[index] = base64.standard_b64encode(value).decode('ascii')\n elif isinstance(value, date):\n struct[index] = str(value)\n else:\n API.__clean__(value)\n return struct", "docstring": "Helper to recursively clean up JSON data for API call.\n\n Converts bytes -> base64.\n Converts date -> str (yyyy-mm-dd).\n TODO: Add Converts datetime, time -> string.\n\nArgs:\n struct: The kwargs being cleaned up.\n\nReturns:\n struct: The kwargs with replacments.", "source": "github_repos"} -{"code": "def fftshift(x, axes=None, name=None):\n with _ops.name_scope(name, 'fftshift') as name:\n x = _ops.convert_to_tensor(x)\n if axes is None:\n axes = tuple(range(x.shape.ndims))\n shift = _array_ops.shape(x) // 2\n elif isinstance(axes, int):\n shift = _array_ops.shape(x)[axes] // 2\n else:\n rank = _array_ops.rank(x)\n axes = _array_ops.where(_math_ops.less(axes, 0), axes + rank, axes)\n shift = _array_ops.gather(_array_ops.shape(x), axes) // 2\n return manip_ops.roll(x, shift, axes, name)", "docstring": "Shift the zero-frequency component to the center of the spectrum.\n\n This function swaps half-spaces for all axes listed (defaults to all).\n Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.\n\n @compatibility(numpy)\n Equivalent to numpy.fft.fftshift.\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fftshift.html\n @end_compatibility\n\n For example:\n\n ```python\n x = tf.signal.fftshift([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.])\n x.numpy() # array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])\n ```\n\nArgs:\n x: `Tensor`, input tensor.\n axes: `int` or shape `tuple`, optional Axes over which to shift. Default is\n None, which shifts all axes.\n name: An optional name for the operation.\n\nReturns:\n A `Tensor`, The shifted tensor.", "source": "github_repos"} -{"code": "def UpdateFromSource(self, source, incremental=True, force_write=False):\n cache = cache_factory.Create(self.cache_options, self.map_name)\n return self.UpdateCacheFromSource(cache, source, incremental, force_write, location=None)", "docstring": "Update this map's cache from the source provided.\n\n The FileMapUpdater expects to fetch as single map from the source\n and write/merge it to disk. We create a cache to write to, and then call\n UpdateCacheFromSource() with that cache.\n\n Note that AutomountUpdater also calls UpdateCacheFromSource() for each\n cache it is writing, hence the distinct seperation.\n\nArgs:\n source: A nss_cache.sources.Source object.\n incremental: A boolean flag indicating that an incremental update should\n be performed, defaults to True.\n force_write: A boolean flag forcing empty map updates, defaults to False.\n\nReturns:\n An int indicating success of update (0 == good, fail otherwise).", "source": "github_repos"} -{"code": "def _add_variable_with_custom_getter(self, name, shape=None, dtype=dtypes.float32, initializer=None, getter=None, overwrite=False, **kwargs_for_getter):\n self._maybe_initialize_trackable()\n with ops.init_scope():\n if context.executing_eagerly():\n checkpoint_initializer = self._preload_simple_restoration(name=name)\n else:\n checkpoint_initializer = None\n if checkpoint_initializer is not None and (not (isinstance(initializer, CheckpointInitialValueCallable) and initializer.restore_uid > checkpoint_initializer.restore_uid)):\n initializer = checkpoint_initializer\n new_variable = getter(name=name, shape=shape, dtype=dtype, initializer=initializer, **kwargs_for_getter)\n if not overwrite or isinstance(new_variable, Trackable):\n return self._track_trackable(new_variable, name=name, overwrite=overwrite)\n else:\n return new_variable", "docstring": "Restore-on-create for a variable be saved with this `Trackable`.\n\n If the user has requested that this object or another `Trackable` which\n depends on this object be restored from a checkpoint (deferred loading\n before variable object creation), `initializer` may be ignored and the value\n from the checkpoint used instead.\n\nArgs:\n name: A name for the variable. Must be unique within this object.\n shape: The shape of the variable.\n dtype: The data type of the variable.\n initializer: The initializer to use. Ignored if there is a deferred\n restoration stored in the Trackable.\n getter: The getter to wrap which actually fetches the variable.\n overwrite: If True, disables unique name and type checks.\n **kwargs_for_getter: Passed to the getter.\n\nReturns:\n The new variable object.\n\nRaises:\n ValueError: If the variable name is not unique.", "source": "github_repos"} -{"code": "def __init__(self, l1=0.0, l2=0.0, **kwargs):\n super().__init__(activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)\n self.supports_masking = True\n self.l1 = l1\n self.l2 = l2\n self._build_at_init()", "docstring": "Layer that applies an update to the cost function based input activity.\n\nArgs:\n l1: L1 regularization factor (positive float).\n l2: L2 regularization factor (positive float).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.", "source": "github_repos"} -{"code": "def _print_extension(self, extension: message.Message) -> None:\n if not fhir_types.is_type_or_profile_of_extension(extension):\n raise ValueError(f'Message of type: {extension.DESCRIPTOR.full_name} is not a FHIR Extension.')\n if self.json_format == _FhirJsonFormat.ANALYTIC:\n self.generator.push(f'\"{cast(Any, extension).url.value}\"')\n else:\n self._print_message(extension)", "docstring": "Pushes the Extension into the JSON text generator.\n\n If the _FhirJsonFormat is set to ANALYTIC, this method only prints the url.\n\nArgs:\n extension: The Extension to print.", "source": "github_repos"} -{"code": "def _maybe_colocate_with(self, value):\n if not self._colocate_with_first_write_call:\n yield\n else:\n if not self._colocate_with:\n self._colocate_with.append(value)\n with ops.colocate_with(self._colocate_with[0]):\n yield", "docstring": "Colocate operations with an internal colocation group or `value`.\n\nArgs:\n value: `Tensor`, the tensor to try to colocate with.\n\nYields:\n Does not yield anything, but the new context is a colocation context.\n\n If no internal colocation group is set, colocate with `value` and set\n the internal colocation group to be value.", "source": "github_repos"} -{"code": "def _expand_variables(input_str, cmake_vars):\n\n def replace(match):\n if match.group(1) in cmake_vars:\n return cmake_vars[match.group(1)]\n return ''\n return _CMAKE_ATVAR_REGEX.sub(replace, _CMAKE_VAR_REGEX.sub(replace, input_str))", "docstring": "Expands ${VARIABLE}s and @VARIABLE@s in 'input_str', using dictionary 'cmake_vars'.\n\nArgs:\n input_str: the string containing ${VARIABLE} or @VARIABLE@ expressions to expand.\n cmake_vars: a dictionary mapping variable names to their values.\n\nReturns:\n The expanded string.", "source": "github_repos"} -{"code": "def __init__(self, name=None):\n rr = gen_io_ops.identity_reader_v2(name=name)\n super(IdentityReader, self).__init__(rr, supports_serialize=True)", "docstring": "Create a IdentityReader.\n\nArgs:\n name: A name for the operation (optional).", "source": "github_repos"} -{"code": "def get_adafactor_schedule(optimizer, initial_lr=0.0):\n return AdafactorSchedule(optimizer, initial_lr)", "docstring": "Get a proxy schedule for [`~optimization.Adafactor`]\n\nArgs:\n optimizer ([`~torch.optim.Optimizer`]):\n The optimizer for which to schedule the learning rate.\n initial_lr (`float`, *optional*, defaults to 0.0):\n Initial lr\n\nReturns:\n [`~optimization.Adafactor`] proxy schedule object.", "source": "github_repos"} -{"code": "def from_proto(context_def, import_scope=None):\n ret = WhileContext(context_def=context_def, import_scope=import_scope)\n ret.Enter()\n for nested_def in context_def.nested_contexts:\n from_control_flow_context_def(nested_def, import_scope=import_scope)\n ret.Exit()\n return ret", "docstring": "Returns a `WhileContext` object created from `context_def`.\n\nArgs:\n context_def: A `WhileContextDef` protocol buffer.\n import_scope: Optional `string`. Name scope to add.\n\nReturns:\n A `WhileContext` Python object.", "source": "github_repos"} -{"code": "def append_value_at_field(msg: message.Message, field: Union[descriptor.FieldDescriptor, str], value: Any):\n if isinstance(field, str):\n field = _field_descriptor_for_name(msg, field)\n if not field_is_repeated(field):\n raise ValueError(f'{field.name} is not repeated. Unable to append value.')\n getattr(msg, field.name).append(value)", "docstring": "Appends the provided value to the repeated field.\n\nArgs:\n msg: The message whose field to append to.\n field: The FieldDescriptor or name of the field to append to.\n value: The value to append.\n\nRaises:\n ValueError: In the event that the field is not repeated.", "source": "github_repos"} -{"code": "def dispatch_command(self, prefix, argv, screen_info=None):\n if not prefix:\n raise ValueError('Prefix is empty')\n resolved_prefix = self._resolve_prefix(prefix)\n if not resolved_prefix:\n raise ValueError('No handler is registered for command prefix \"%s\"' % prefix)\n handler = self._handlers[resolved_prefix]\n try:\n output = handler(argv, screen_info=screen_info)\n except CommandLineExit as e:\n raise e\n except SystemExit as e:\n lines = ['Syntax error for command: %s' % prefix, 'For help, do \"help %s\"' % prefix]\n output = RichTextLines(lines)\n except BaseException as e:\n lines = ['Error occurred during handling of command: %s %s:' % (resolved_prefix, ' '.join(argv)), '%s: %s' % (type(e), str(e))]\n lines.append('')\n lines.extend(traceback.format_exc().split('\\n'))\n output = RichTextLines(lines)\n if not isinstance(output, RichTextLines) and output is not None:\n raise ValueError('Return value from command handler %s is not None or a RichTextLines instance' % str(handler))\n return output", "docstring": "Handles a command by dispatching it to a registered command handler.\n\nArgs:\n prefix: Command prefix, as a str, e.g., \"print\".\n argv: Command argument vector, excluding the command prefix, represented\n as a list of str, e.g.,\n [\"tensor_1\"]\n screen_info: A dictionary containing screen info, e.g., {\"cols\": 100}.\n\nReturns:\n An instance of RichTextLines or None. If any exception is caught during\n the invocation of the command handler, the RichTextLines will wrap the\n error type and message.\n\nRaises:\n ValueError: If\n 1) prefix is empty, or\n 2) no command handler is registered for the command prefix, or\n 3) the handler is found for the prefix, but it fails to return a\n RichTextLines or raise any exception.\n CommandLineExit:\n If the command handler raises this type of exception, this method will\n simply pass it along.", "source": "github_repos"} -{"code": "def get_coder_from_spec(coder_spec):\n assert coder_spec is not None\n ignored_wrappers = 'com.google.cloud.dataflow.sdk.util.TimerOrElement$TimerOrElementCoder'\n if coder_spec['@type'] in ignored_wrappers:\n assert len(coder_spec['component_encodings']) == 1\n coder_spec = coder_spec['component_encodings'][0]\n return get_coder_from_spec(coder_spec)\n if coder_spec['@type'] == 'kind:pair':\n assert len(coder_spec['component_encodings']) == 2\n component_coders = [get_coder_from_spec(c) for c in coder_spec['component_encodings']]\n return coders.TupleCoder(component_coders)\n elif coder_spec['@type'] == 'kind:stream':\n assert len(coder_spec['component_encodings']) == 1\n return coders.IterableCoder(get_coder_from_spec(coder_spec['component_encodings'][0]))\n elif coder_spec['@type'] == 'kind:windowed_value':\n assert len(coder_spec['component_encodings']) == 2\n value_coder, window_coder = [get_coder_from_spec(c) for c in coder_spec['component_encodings']]\n return coders.coders.WindowedValueCoder(value_coder, window_coder=window_coder)\n elif coder_spec['@type'] == 'kind:interval_window':\n assert 'component_encodings' not in coder_spec or not coder_spec['component_encodings']\n return coders.coders.IntervalWindowCoder()\n elif coder_spec['@type'] == 'kind:global_window':\n assert 'component_encodings' not in coder_spec or not coder_spec['component_encodings']\n return coders.coders.GlobalWindowCoder()\n elif coder_spec['@type'] == 'kind:varint':\n assert 'component_encodings' not in coder_spec or len(coder_spec['component_encodings'] == 0)\n return coders.coders.VarIntCoder()\n elif coder_spec['@type'] == 'kind:length_prefix':\n assert len(coder_spec['component_encodings']) == 1\n return coders.coders.LengthPrefixCoder(get_coder_from_spec(coder_spec['component_encodings'][0]))\n elif coder_spec['@type'] == 'kind:bytes':\n assert 'component_encodings' not in coder_spec or len(coder_spec['component_encodings'] == 0)\n return coders.BytesCoder()\n return coders.coders.deserialize_coder(coder_spec['@type'].encode('ascii'))", "docstring": "Return a coder instance from a coder spec.\n\nArgs:\n coder_spec: A dict where the value of the '@type' key is a pickled instance\n of a Coder instance.\n\nReturns:\n A coder instance (has encode/decode methods).", "source": "github_repos"} -{"code": "def _process_v1_graph_mode_tensor(self, op_type, tensor, debug_tensor, tensor_debug_mode):\n if op_type in ('Placeholder', 'PlaceholderWithDefault'):\n self._placeholder_to_debug_tensor[tensor] = debug_tensor\n return tensor\n elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR and op_type != 'Const':\n self._tensor_aliases[debug_tensor.name] = tensor.name\n return debug_tensor\n else:\n with self._symbolic_tensor_counter_lock:\n identity_name = 'tfdbg_identity_%d' % self._symbolic_tensor_counter\n identity = array_ops.identity(tensor, name=identity_name)\n identity.op._add_control_input(debug_tensor.op)\n self._tensor_aliases[identity.name] = tensor.name\n return identity", "docstring": "For V1 graph mode, determine what tensor to output from callback.\n\nArgs:\n op_type: Type of the op that outputs the original symbolic tensor.\n tensor: The original output symbolic tensor.\n debug_tensor: The debugger-instrumented tensor.\n tensor_debug_mode: Debug mode used, a tfdbg TensorDebugMode enum.\n\nReturns:\n A symbolic tensor to be returned by the dumping op_callback.", "source": "github_repos"} -{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n default_to_square = True\n if 'shortest_edge' in size:\n size = size['shortest_edge']\n default_to_square = False\n elif 'height' in size and 'width' in size:\n size = (size['height'], size['width'])\n else:\n raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)\n return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\nArgs:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.", "source": "github_repos"} -{"code": "def get_generic_distributions(generic_dists, metric_id):\n return sum((get_all_distributions_by_type(dist, metric_id) for dist in generic_dists), [])", "docstring": "Creates flatten list of distributions per its value type.\n A generic distribution is the one which is not processed but saved in\n the most raw version.\n\nArgs:\n generic_dists: list of distributions to be saved\n metric_id(uuid): id of the current test run\n\nReturns:\n list of dictionaries made from :class:`DistributionMetric`", "source": "github_repos"} -{"code": "def _parallel_part_processors(part_processors: Sequence[PartProcessorWithMatchFn]) -> PartProcessorFn:\n\n async def part_processor(content: ProcessorPart) -> AsyncIterable[ProcessorPart]:\n output_queue = asyncio.Queue()\n processors = []\n match_fns = []\n passthrough_fallback = False\n passthrough_always = False\n for p in part_processors:\n if p is PASSTHROUGH_FALLBACK:\n passthrough_fallback = True\n continue\n if p is PASSTHROUGH_ALWAYS:\n passthrough_always = True\n continue\n processors.append(_CaptureReservedSubstreams(output_queue, p))\n match_fns.append(p.match)\n parallel_processor = _CaptureReservedSubstreams(output_queue, map_processor.parallel_part_functions(processors, match_fns, with_default_output=passthrough_fallback, with_always_output=passthrough_always))\n content = parallel_processor(content)\n create_task(_enqueue_content(content, output_queue))\n while (part := (await output_queue.get())) is not None:\n yield part\n output_queue.task_done()\n return part_processor", "docstring": "Combine **part processors** in parallel.\n\n Adds debug and status streams to the output.\n\n NOTE: Substreams debug and status are yielded immediately instead of passing\n them to the next processor.\n\nArgs:\n part_processors: sequence of part processors to compute concurrently.\n\nReturns:\n Part processor that computes the output of the provided sequence of part\n processors concurrently.", "source": "github_repos"} -{"code": "def _StopOps(from_ops: list[ops.Operation], stop_gradient_ops: list[ops.Operation], pending_count, xs_set):\n stop_ops = set()\n for op in from_ops:\n is_stop_op = True\n for inp in _NonEagerInputs(op, xs_set):\n if pending_count[inp.op] > 0:\n is_stop_op = False\n break\n if is_stop_op:\n stop_ops.add(op)\n stop_ops.update((op for op in stop_gradient_ops))\n return stop_ops", "docstring": "The set of ops that terminate the gradient computation.\n\n This computes the frontier of the forward graph *before* which backprop\n should stop. Operations in the returned set will not be differentiated.\n This set is defined as the subset of `from_ops` containing ops that have\n no predecessor in `from_ops`. `pending_count` is the result of\n `_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`\n iff pending_count[op] > 0.\n\n In addition, none of `stop_gradient_ops` will be differentiated.\n\nArgs:\n from_ops: list of Operations.\n stop_gradient_ops: list of Operations never to backprop through.\n pending_count: mapping from operation to number of backprop inputs.\n xs_set: ObjectIdentitySet of Tensors.\n\nReturns:\n The set of operations.", "source": "github_repos"} -{"code": "def _ref(self):\n return self._variable", "docstring": "Returns a reference to this variable.\n\n You usually do not need to call this method as all ops that need a reference\n to the variable call it automatically.\n\n Returns is a `Tensor` which holds a reference to the variable. You can\n assign a new value to the variable by passing the tensor to an assign op.\n See `tf.Variable.value` if you want to get the value of the\n variable.\n\nReturns:\n A `Tensor` that is a reference to the variable.", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n residual = hidden_states\n hidden_states = self.input_layernorm(hidden_states)\n hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n if use_cache:\n outputs += (present_key_value,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*):\n attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,\n query_sequence_length, key_sequence_length)` if default attention is used.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence\n kwargs (`dict`, *optional*):\n Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code\n into the model", "source": "github_repos"} -{"code": "def _is_guaranteed_const(tensor):\n if isinstance(tensor, ops.EagerTensor):\n return False\n\n class Work(object):\n\n def __init__(self, op: ops.Operation, leaving):\n self.op = op\n self.leaving = leaving\n is_guaranteed_const = lambda op: op.node_def.op == 'GuaranteeConst'\n constants = set([])\n\n def all_inputs_const(op: ops.Operation):\n return op.inputs and all((inp.op in constants for inp in op.inputs))\n visited = set([])\n stack = [Work(tensor.op, leaving=False)]\n while stack:\n work = stack.pop()\n if work.leaving:\n if all_inputs_const(work.op):\n constants.add(work.op)\n continue\n visited.add(work.op)\n if is_guaranteed_const(work.op):\n constants.add(work.op)\n continue\n stack.append(Work(work.op, leaving=True))\n for inp in work.op.inputs:\n if inp.op not in visited:\n stack.append(Work(inp.op, leaving=False))\n return tensor.op in constants", "docstring": "Determines whether `tensor` is guaranteed to be a constant.\n\n A tensor is guaranteed to be a constant if either it was produced by\n a `GuaranteeConst` op or if all of its children are guaranteed to be\n constants.\n\nArgs:\n tensor: The tensor for which to determine const-ness.\n\nReturns:\n True if `tensor` is guaranteed to be a constant, False otherwise.", "source": "github_repos"} -{"code": "def encode(self, table: 'pd.DataFrame', query: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> List[int]:\n encoded_inputs = self.encode_plus(table, query=query, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs)\n return encoded_inputs['input_ids']", "docstring": "Prepare a table and a string for the model. This method does not return token type IDs, attention masks, etc.\n which are necessary for the model to work correctly. Use that method if you want to build your processing on\n your own, otherwise refer to `__call__`.\n\nArgs:\n table (`pd.DataFrame`):\n Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas\n dataframe to convert it to string.\n query (`str` or `List[str]`):\n Question related to a table to be encoded.", "source": "github_repos"} -{"code": "def num_parameters(self, only_trainable: bool=False) -> int:\n if only_trainable:\n return int(sum((np.prod(w.shape.as_list()) for w in self.trainable_variables)))\n else:\n return self.count_params()", "docstring": "Get the number of (optionally, trainable) parameters in the model.\n\nArgs:\n only_trainable (`bool`, *optional*, defaults to `False`):\n Whether or not to return only the number of trainable parameters\n\nReturns:\n `int`: The number of parameters.", "source": "github_repos"} -{"code": "def __init__(self, saved_model_dir, saved_model_tags, saved_model_exported_names, experimental_debug_info_func=None):\n \"\"\"Constructor for TFLiteConverter.\n\nArgs:\n saved_model_dir: Directory of the SavedModel.\n saved_model_tags: Set of tags identifying the MetaGraphDef within the\n SavedModel to analyze. All tags in the tag set must be present. (default\n {tf.saved_model.SERVING}).\n saved_model_exported_names: Names to be exported when the saved model\n import path is on.\n experimental_debug_info_func: An experimental function to retrieve the\n graph debug info for a set of nodes from the `graph_def`.\n\nRaises:\n ValueError: Invalid arguments.\n\"\"\"\n super(TFLiteSavedModelConverter, self).__init__(experimental_debug_info_func)\n self.saved_model_dir = saved_model_dir\n self._saved_model_tags = saved_model_tags\n self._saved_model_exported_names = saved_model_exported_names\n if len(self._saved_model_exported_names) != 1:\n raise ValueError('Only supports a single signature key.')\n signature_key = self._saved_model_exported_names[0]\n result = _freeze_saved_model(self.saved_model_dir, None, None, None, self._saved_model_tags, signature_key)\n self._graph_def = result[0]\n self._input_tensors = result[1]\n self._output_tensors = result[2]\n self._parse_saved_model_args()", "docstring": "Converts the given SavedModel into TensorFlow Lite model.\n\nAttributes:\n saved_model_dir: Directory of the SavedModel.", "source": "github_repos"} -{"code": "def _shape_invariant_to_type_spec(self, shape):\n raise NotImplementedError(f'{type(self).__name__}._shape_invariant_to_type_spec')", "docstring": "Returns a TypeSpec given a shape invariant (used by `tf.while_loop`).\n\nArgs:\n shape: A `tf.TensorShape` object. The shape invariant for this\n `CompositeTensor`, or `None` if a default shape invariant should be used\n (based on the value of this `CompositeTensor`).\n\nReturns:\n A nested structure whose values are `tf.TensorShape` objects, specifying\n the shape invariants for the tensors that comprise this `CompositeTensor`.", "source": "github_repos"} -{"code": "def reconstruct_non_debug_graph_def(debug_graph_def):\n return DebugGraph(debug_graph_def).non_debug_graph_def", "docstring": "Reconstruct original (non-debugger-decorated) partition GraphDef.\n\n This method strips the input `tf.compat.v1.GraphDef` of the Copy* and\n Debug*-type nodes inserted by the debugger.\n\n The reconstructed partition graph is identical to the original (i.e.,\n non-debugger-decorated) partition graph except in the following respects:\n 1) The exact names of the runtime-inserted internal nodes may differ.\n These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops.\n 2) As a consequence of 1, the nodes that receive input directly from such\n send- and recv-type ops will have different input names.\n 3) The parallel_iteration attribute of while-loop Enter ops are set to 1.\n\nArgs:\n debug_graph_def: The debugger-decorated `tf.compat.v1.GraphDef`, with the\n debugger-inserted Copy* and Debug* nodes.\n\nReturns:\n The reconstructed `tf.compat.v1.GraphDef` stripped of the debugger-inserted\n nodes.", "source": "github_repos"} -{"code": "def abs(self: EventSetOrNode) -> EventSetOrNode:\n from temporian.core.operators.unary import abs\n return abs(self)", "docstring": "Gets the absolute value of an [`EventSet`][temporian.EventSet]'s\n features.\n\nExample:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"M\":[np.nan, -1., 2.], \"N\": [-1, -3, 5]},\n ... )\n >>> a.abs()\n indexes: ...\n 'M': [nan 1. 2.]\n 'N': [1 3 5]\n ...\n\n ```\n\nReturns:\n EventSet with positive valued features.", "source": "github_repos"} -{"code": "def patch_retry(testcase, module):\n from mock import Mock\n from mock import patch\n real_retry_with_exponential_backoff = retry.with_exponential_backoff\n\n def patched_retry_with_exponential_backoff(**kwargs):\n \"\"\"A patch for retry decorator to use a mock dummy clock and logger.\"\"\"\n kwargs.update(logger=Mock(), clock=Mock())\n return real_retry_with_exponential_backoff(**kwargs)\n patch.object(retry, 'with_exponential_backoff', side_effect=patched_retry_with_exponential_backoff).start()\n importlib.reload(module)\n\n def remove_patches():\n patch.stopall()\n importlib.reload(module)\n testcase.addCleanup(remove_patches)", "docstring": "A function to patch retry module to use mock clock and logger.\n\n Clock and logger that defined in retry decorator will be replaced in test\n in order to skip sleep phase when retry happens.\n\nArgs:\n testcase: An instance of unittest.TestCase that calls this function to\n patch retry module.\n module: The module that uses retry and need to be replaced with mock\n clock and logger in test.", "source": "github_repos"} -{"code": "def tokenize(self, text):\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = ''.join(chars[start:end])\n if start > 0:\n substr = '##' + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "docstring": "Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform\n tokenization using the given vocabulary.\n\n For example, `input = \"unaffable\"` will return as output `[\"un\", \"##aff\", \"##able\"]`.\n\nArgs:\n text: A single token or whitespace separated tokens. This should have\n already been passed through *BasicTokenizer*.\n\nReturns:\n A list of wordpiece tokens.", "source": "github_repos"} -{"code": "def broadcast_tensor(self, tensor):\n return array_ops.gather(tensor, self.gather_index)", "docstring": "Broadcast from a dense tensor.\n\n It is assumed that the first axis of the dense tensor is indexed by the\n source shape, and at the end, the first axis of the dense tensor is\n indexed by the destination shape.\n\nArgs:\n tensor: a dense tensor.\n\nReturns:\n A dense tensor.", "source": "github_repos"} -{"code": "def convert_segmentation_to_rle(segmentation):\n segment_ids = torch.unique(segmentation)\n run_length_encodings = []\n for idx in segment_ids:\n mask = torch.where(segmentation == idx, 1, 0)\n rle = binary_mask_to_rle(mask)\n run_length_encodings.append(rle)\n return run_length_encodings", "docstring": "Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.\n\nArgs:\n segmentation (`torch.Tensor` or `numpy.array`):\n A segmentation map of shape `(height, width)` where each value denotes a segment or class id.\n\nReturns:\n `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.", "source": "github_repos"} -{"code": "def __init__(self, x_data: types.RealTensor, y_data: types.RealTensor, z_data: types.RealTensor, dtype: tf.DType=None, name: str=None):\n name = name or 'interpolation_2d'\n with tf.name_scope(name):\n self._xdata = tf.convert_to_tensor(x_data, dtype=dtype, name='x_data')\n self._dtype = dtype or self._xdata.dtype\n self._ydata = tf.convert_to_tensor(y_data, dtype=self._dtype, name='y_data')\n self._zdata = tf.convert_to_tensor(z_data, dtype=self._dtype, name='z_data')\n self._name = name\n self._spline_yz = cubic.build_spline(self._ydata, self._zdata, name='spline_y_direction')", "docstring": "Initialize the 2d-interpolation object.\n\nArgs:\n x_data: A `Tensor` of real `dtype` and shape\n `batch_shape + [num_x_data_points]`.\n Defines the x-coordinates of the input data. `num_x_data_points` should\n be >= 2. The elements of `x_data` should be in a non-decreasing order.\n y_data: A `Tensor` of the same `dtype` as `x_data` and shape\n `batch_shape + [num_x_data_points, num_y_data_points]`. Defines the\n y-coordinates of the input data. `num_y_data_points` should be >= 2.\n The elements of `y_data` should be in a non-decreasing order along last\n dimension.\n z_data: A `Tensor` of the same shape and `dtype` as `y_data`. Defines the\n z-coordinates of the input data (i.e., the function values).\n dtype: Optional dtype for the input `Tensor`s.\n Default value: `None` which maps to the default dtype inferred by\n TensorFlow.\n name: Python `str` name prefixed to ops created by this class.\n Default value: `None` which is mapped to the default name\n `interpolation_2d`.", "source": "github_repos"} -{"code": "def split(cls, tensor, split_dimension, num_devices, input_shape=None):\n if input_shape:\n shape = input_shape\n else:\n shape = tensor.shape.as_list()\n if shape[split_dimension] is not None and shape[split_dimension] < num_devices:\n raise ValueError('Split dimension was smaller than the required number of splits: shape=%r, dimension=%r, num_devices=%r' % (shape, split_dimension, num_devices))\n tile_assignment_dims = [1] * len(shape)\n tile_assignment_dims[split_dimension] = num_devices\n return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=tile_assignment_dims, tile_assignment_devices=range(num_devices)))", "docstring": "Returns a Sharding that splits a tensor across a dimension.\n\n This creates a Tiled attribute, similar to tile(), but easier to use for the\n common case of tiling a tensor N ways in one dimension.\n\nArgs:\n tensor: A tf.Tensor to split.\n split_dimension: The dimension number to split.\n num_devices: The number of cores to split `tensor` over.\n input_shape: The shape of the original tensor.\n\nRaises:\n ValueError: The tensor to split was smaller in the split dimension than\n the number of devices to split over.", "source": "github_repos"} -{"code": "def node_op_type(self, node_name, device_name=None):\n if not self._debug_graphs:\n raise LookupError('Node op types are not loaded from partition graphs yet.')\n device_name = self._infer_device_name(device_name, node_name)\n return self._debug_graphs[device_name].node_op_types[node_name]", "docstring": "Get the op type of given node.\n\nArgs:\n node_name: (`str`) name of the node.\n device_name: (`str`) name of the device. If there is only one device or if\n node_name exists on only one device, this argument is optional.\n\nReturns:\n (`str`) op type of the node.\n\nRaises:\n LookupError: If node op types have not been loaded\n from partition graphs yet.", "source": "github_repos"} -{"code": "def VisitNamedType(self, node):\n return pytd.ClassType(node.name)", "docstring": "Converts a named type to a class type, to be filled in later.\n\nArgs:\n node: The NamedType. This type only has a name.\n\nReturns:\n A ClassType. This ClassType will (temporarily) only have a name.", "source": "github_repos"} -{"code": "def __init__(self, dtensor_components: Tuple[tensor.Tensor], global_element_spec: tensor_spec.TensorSpec, layouts: Any):\n [self._iterator_resource_dtensor] = dtensor_components\n self._global_element_spec = global_element_spec\n self._layouts = layouts\n self._layouts_str = nest.map_structure(lambda layout: layout.to_string(), layouts)\n super().__init__(components=dtensor_components, element_spec=global_element_spec)", "docstring": "Initializes a distributed iterator for DTensor datasets.\n\n This iterator encapsulates tf.data iterators for the underlying devices, and\n treats it as a packed DTensor of iterator resource tensors.\n\nArgs:\n dtensor_components: a tuple containing the underlying iterator resources\n packed into a DTensor. This is expected to be a tuple with a single\n element.\n global_element_spec: the underlying dataset's element spec from a global\n view.\n layouts: a structure of DTensor layouts to be applied to the elements\n returned by the underlying iterators. This can be a single layout or\n (possibly nested) tuples or dictionaries of layouts, and the structure\n must match the structure of the iterator elements.", "source": "github_repos"} -{"code": "def _calculate_scores(self, query, key):\n q_reshaped = array_ops.expand_dims(query, axis=-2)\n k_reshaped = array_ops.expand_dims(key, axis=-3)\n if self.use_scale:\n scale = self.scale\n else:\n scale = 1.0\n return math_ops.reduce_sum(scale * math_ops.tanh(q_reshaped + k_reshaped), axis=-1)", "docstring": "Calculates attention scores as a nonlinear sum of query and key.\n\nArgs:\n query: Query tensor of shape `[batch_size, Tq, dim]`.\n key: Key tensor of shape `[batch_size, Tv, dim]`.\n\nReturns:\n Tensor of shape `[batch_size, Tq, Tv]`.", "source": "github_repos"} -{"code": "def visit_Import(self, node):\n for import_alias in node.names:\n if import_alias.name == 'tensorflow.compat.v1' and import_alias.asname == 'tf':\n import_alias.name = 'tensorflow'\n self.generic_visit(node)", "docstring": "Handle visiting an import node in the AST.\n\nArgs:\n node: Current Node", "source": "github_repos"} -{"code": "def to_yaml(self, **kwargs):\n raise RuntimeError('Method `model.to_yaml()` has been removed due to security risk of arbitrary code execution. Please use `model.to_json()` instead.')", "docstring": "Returns a yaml string containing the network configuration.\n\n Note: Since TF 2.6, this method is no longer supported and will raise a\n RuntimeError.\n\n To load a network from a yaml save file, use\n `keras.models.model_from_yaml(yaml_string, custom_objects={})`.\n\n `custom_objects` should be a dictionary mapping\n the names of custom losses / layers / etc to the corresponding\n functions / classes.\n\nArgs:\n **kwargs: Additional keyword arguments\n to be passed to `yaml.dump()`.\n\nReturns:\n A YAML string.\n\nRaises:\n RuntimeError: announces that the method poses a security risk", "source": "github_repos"} -{"code": "def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n if distribute_lib.in_cross_replica_context():\n raise ValueError('apply_gradients() must be called in a replica context.')\n if not self._doing_dynamic_loss_scaling():\n return self._optimizer.apply_gradients(grads_and_vars, global_step, name)\n replica_context = distribute_lib.get_replica_context()\n grads_and_vars = tuple(grads_and_vars)\n return replica_context.merge_call(self._distributed_apply, args=(grads_and_vars, global_step, name))", "docstring": "Apply gradients to variables.\n\n This is the second part of `minimize()`. It returns an `Operation` that\n conditionally applies gradients if all gradient values are finite.\n Otherwise no update is performed (nor is `global_step` incremented).\n\nArgs:\n grads_and_vars: List of (gradient, variable) pairs as returned by\n `compute_gradients()`.\n global_step: Optional `Variable` to increment by one after the variables\n have been updated.\n name: Optional name for the returned operation. Default to the name\n passed to the `Optimizer` constructor.\n\nReturns:\n An `Operation` that conditionally applies the specified gradients. If\n `global_step` was not None, that operation also increments `global_step`.\n\nRaises:\n RuntimeError: If you should use `_distributed_apply()` instead.", "source": "github_repos"} -{"code": "def compute_gradient(f, x, delta=None):\n if not isinstance(x, (list, tuple)):\n raise ValueError('`x` must be a list or tuple of values convertible to a Tensor (arguments to `f`), not a %s' % type(x))\n if delta is None:\n delta = 1.0 / 1024\n return _compute_gradient_list(f, x, delta)", "docstring": "Computes the theoretical and numeric Jacobian of `f`.\n\n With y = f(x), computes the theoretical and numeric Jacobian dy/dx.\n\nArgs:\n f: the function.\n x: the arguments for the function as a list or tuple of values convertible\n to a Tensor.\n delta: (optional) perturbation used to compute numeric Jacobian.\n\nReturns:\n A pair of lists, where the first is a list of 2-d numpy arrays representing\n the theoretical Jacobians for each argument, and the second list is the\n numerical ones. Each 2-d array has \"y_size\" rows\n and \"x_size\" columns where \"x_size\" is the number of elements in the\n corresponding argument and \"y_size\" is the number of elements in f(x).\n\nRaises:\n ValueError: If result is empty but the gradient is nonzero.\n ValueError: If x is not list, but any other type.\n\nExample:\n >>> @tf.function\n ... def test_func(x):\n ... return x*x\n ...\n >>>\n >>> class MyTest(tf.test.TestCase):\n ...\n ... def test_gradient_of_test_func(self):\n ... theoretical, numerical = tf.test.compute_gradient(test_func, [1.0])\n ... # ((array([[2.]], dtype=float32),),\n ... # (array([[2.000004]], dtype=float32),))\n ... self.assertAllClose(theoretical, numerical)", "source": "github_repos"} -{"code": "def _get_index_proposal_probs(self, x):\n x_float = tf.cast(x, tf.float32)\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(x_float)\n current_energy = tf.squeeze(self._energy(tf.expand_dims(x_float, 0)))\n f_grad = -1.0 * tape.gradient(current_energy, x_float)\n approx_energy_diff = -(2 * tf.cast(x, tf.float32) - 1) * f_grad\n return tf.nn.softmax(approx_energy_diff / 2.0)", "docstring": "Returns the value of equation 6 of the paper.\n\n Given current state x, the locally-informed proposal evaluates f(x') - f(x)\n for each x' in a Hamming ball of radius 1 around x, written H(x). Let\n d(x) = [f(x') - f(x) for x' in H(x)]. The locally-informed conditional\n distribution is then q(i | x) = C * e^(d(x) / T) for some temperature T,\n where C is the normalization constant (T=2 is optimal under a standard\n criterion).\n\n The insight of the paper is that d(x) can be approximated using a Taylor\n series, yielding d(x) ~ -(2x - 1) * df(x)/dx. This function computes the\n locally-informed conditional probabilities q(i | x) using the approximation.\n\n Note that in terms of the unnormalized log probability of the state, f(x),\n used in the paper, the energy of the state is: E(x) = -f(x). So we have\n d(x) ~ (2x - 1) * dE(x)/dx\n\nArgs:\n x: 1D tensor which is the current state of the chain.\n\nReturns:\n The conditional probabilities q(i | x) given in equation 6.", "source": "github_repos"} -{"code": "def join(self, basepath, *paths):\n if not basepath.startswith(GCSFileSystem.GCS_PREFIX):\n raise ValueError('Basepath %r must be GCS path.' % basepath)\n path = basepath\n for p in paths:\n path = path.rstrip('/') + '/' + p.lstrip('/')\n return path", "docstring": "Join two or more pathname components for the filesystem\n\nArgs:\n basepath: string path of the first component of the path\n paths: path components to be added\n\n Returns: full path after combining all the passed components", "source": "github_repos"} -{"code": "def fn(x: int, y: int):\n pass", "docstring": "Test function\n\nArgs:\n x: The first input\n\n y: The second input. This is a longer description\n that spans multiple lines with indentation and stuff.\n\nReturns:\n God knows what", "source": "github_repos"} -{"code": "def categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0, axis=-1):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n label_smoothing = tensor_conversion.convert_to_tensor_v2_with_dispatch(label_smoothing, dtype=backend.floatx())\n\n def _smooth_labels():\n num_classes = math_ops.cast(array_ops.shape(y_true)[-1], y_pred.dtype)\n return y_true * (1.0 - label_smoothing) + label_smoothing / num_classes\n y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels, lambda: y_true)\n return backend.categorical_crossentropy(y_true, y_pred, from_logits=from_logits, axis=axis)", "docstring": "Computes the categorical crossentropy loss.\n\n Standalone usage:\n\n >>> y_true = [[0, 1, 0], [0, 0, 1]]\n >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]\n >>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> loss.numpy()\n array([0.0513, 2.303], dtype=float32)\n\nArgs:\n y_true: Tensor of one-hot true targets.\n y_pred: Tensor of predicted targets.\n from_logits: Whether `y_pred` is expected to be a logits tensor. By default,\n we assume that `y_pred` encodes a probability distribution.\n label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For\n example, if `0.1`, use `0.1 / num_classes` for non-target labels\n and `0.9 + 0.1 / num_classes` for target labels.\n axis: Defaults to -1. The dimension along which the entropy is\n computed.\n\nReturns:\n Categorical crossentropy loss value.", "source": "github_repos"} -{"code": "def request(self, batch: Sequence[ExampleT], model: ModelT, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionT]:\n raise NotImplementedError(type(self))", "docstring": "Makes a request to a remote inference service and returns the response.\n Should raise an exception of some kind if there is an error to enable the\n retry and client-side throttling logic to work. Returns an iterable of the\n desired prediction type. This method should return the values directly, as\n handling return values as a generator can prevent the retry logic from\n functioning correctly.\n\nArgs:\n batch: A sequence of examples or features.\n model: The model used to make inferences.\n inference_args: Extra arguments for models whose inference call requires\n extra parameters.\n\nReturns:\n An Iterable of Predictions.", "source": "github_repos"} -{"code": "def build_model_flags(change_concat_input_ranges=False, allow_nonexistent_arrays=False, saved_model_dir=None, saved_model_version=0, saved_model_tags=None, saved_model_exported_names=None, **_):\n model_flags = _model_flags_pb2.ModelFlags()\n model_flags.change_concat_input_ranges = change_concat_input_ranges\n model_flags.allow_nonexistent_arrays = allow_nonexistent_arrays\n if saved_model_dir:\n model_flags.saved_model_dir = saved_model_dir\n model_flags.saved_model_version = saved_model_version\n if saved_model_tags:\n model_flags.saved_model_tags.extend(saved_model_tags)\n if saved_model_exported_names:\n model_flags.saved_model_exported_names.extend(saved_model_exported_names)\n return model_flags", "docstring": "Builds the model flags object from params.\n\nArgs:\n change_concat_input_ranges: Boolean to change behavior of min/max ranges for\n inputs and outputs of the concat operator for quantized models. Changes\n the ranges of concat operator overlap when true. (default False)\n allow_nonexistent_arrays: Allow specifying array names that don't exist or\n are unused in the final graph. (default False)\n saved_model_dir: Filepath of the saved model to be converted. This value\n will be non-empty only when the saved model import path will be used.\n Otherwises, the graph def-based conversion will be processed.\n saved_model_version: SavedModel file format version of The saved model file\n to be converted. This value will be set only when the SavedModel import\n path will be used.\n saved_model_tags: Set of string saved model tags, formatted in the\n comma-separated value. This value will be set only when the SavedModel\n import path will be used.\n saved_model_exported_names: Names to be exported (default: export all) when\n the saved model import path is on. This value will be set only when the\n SavedModel import path will be used.\n\nReturns:\n model_flags: protocol buffer describing the model.", "source": "github_repos"} -{"code": "def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)\n loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)\n return (data, loc, scale)", "docstring": "Parameters:\n data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):\n input for Batch norm calculation\n\nReturns:\n tuple of `torch.Tensor` of shapes\n (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,\n `(batch_size, 1, num_input_channels)`)", "source": "github_repos"} -{"code": "def convert_debug_info_func(saved_debug_info):\n\n def f(original_nodes):\n \"\"\"Function to create `GraphDebugInfo` for the given `original_nodes`.\"\"\"\n del original_nodes\n return saved_debug_info\n return f", "docstring": "Returns a method to retrieve the `GraphDebugInfo` from the original graph.\n\nArgs:\n saved_debug_info: The `GraphDebugInfo` containing all the debug info.\n\nReturns:\n A function which retrieves the stack traces from the original graph and\n converts them to a `GraphDebugInfo` for a given set of nodes.", "source": "github_repos"} -{"code": "def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):\n return", "docstring": "Updates the candidate generation strategy based on the outcomes.\n\nArgs:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using\n beam search or log softmax for each vocabulary token when using beam search\n num_matches (`int`):\n The number of matches between the candidate sequences and the model predictions.", "source": "github_repos"} -{"code": "def __init__(self, min_length: int, eos_token_id: Union[int, List[int], torch.Tensor], device: str='cpu'):\n if not isinstance(min_length, int) or min_length < 0:\n raise ValueError(f'`min_length` has to be a non-negative integer, but is {min_length}')\n if not isinstance(eos_token_id, torch.Tensor):\n if isinstance(eos_token_id, int):\n eos_token_id = [eos_token_id]\n eos_token_id = torch.tensor(eos_token_id, device=device)\n self.min_length = min_length\n self.eos_token_id = eos_token_id", "docstring": "[`LogitsProcessor`] enforcing a min-length by setting EOS probability to 0. Note that, for decoder-only models\n like most LLMs, the length includes the prompt.\n\nArgs:\n min_length (`int`):\n The minimum length below which the score of `eos_token_id` is set to `-float(\"Inf\")`.\n eos_token_id (`Union[int, List[int], torch.Tensor]`):\n The id(s) of the *end-of-sequence* token.\n device (`str`, *optional*, defaults to `\"cpu\"`):\n The device to allocate the tensors.\n\nExample:\n ```python\n >>> from transformers import AutoModelForCausalLM, AutoTokenizer\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"bigscience/bloomz-560m\")\n >>> model = AutoModelForCausalLM.from_pretrained(\"bigscience/bloomz-560m\")\n\n >>> inputs = tokenizer(\"A number:\", return_tensors=\"pt\")\n >>> gen_out = model.generate(**inputs)\n >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])\n A number: one\n\n >>> # setting `min_length` to a value smaller than the uncontrolled output length has no impact\n >>> gen_out = model.generate(**inputs, min_length=3)\n >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])\n A number: one\n\n >>> # setting a larger `min_length` will force the model to generate beyond its natural ending point, which is not\n >>> # necessarily incorrect\n >>> gen_out = model.generate(**inputs, min_length=10)\n >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])\n A number: one thousand, nine hundred and ninety-four\n ```", "source": "github_repos"} -{"code": "def to_diff_dict(self) -> dict[str, Any]:\n config_dict = self.to_dict()\n default_config_dict = PretrainedConfig().to_dict()\n class_config_dict = self.__class__().to_dict() if not self.has_no_defaults_at_init else {}\n serializable_config_dict = {}\n for key, value in config_dict.items():\n if isinstance(getattr(self, key, None), PretrainedConfig) and key in class_config_dict and isinstance(class_config_dict[key], dict) or key in self.sub_configs:\n diff = recursive_diff_dict(value, default_config_dict, config_obj=getattr(self, key, None))\n if 'model_type' in value:\n diff['model_type'] = value['model_type']\n serializable_config_dict[key] = diff\n elif key not in default_config_dict or key == 'transformers_version' or key == 'vocab_file' or (value != default_config_dict[key]) or (key in default_config_dict and value != class_config_dict.get(key, value)):\n serializable_config_dict[key] = value\n self._remove_keys_not_serialized(serializable_config_dict)\n if '_name_or_path' in serializable_config_dict:\n del serializable_config_dict['_name_or_path']\n if hasattr(self, 'quantization_config'):\n serializable_config_dict['quantization_config'] = self.quantization_config.to_dict() if not isinstance(self.quantization_config, dict) else self.quantization_config\n self.dict_torch_dtype_to_str(serializable_config_dict)\n return serializable_config_dict", "docstring": "Removes all attributes from the configuration that correspond to the default config attributes for\n better readability, while always retaining the `config` attribute from the class. Serializes to a\n Python dictionary.\n\nReturns:\n Dict[str, Any]: Dictionary of all the attributes that make up this configuration instance.", "source": "github_repos"} -{"code": "def _format_neighbors(self, neighbor_type, non_ctrls, ctrls):\n lines = []\n font_attr_segs = {}\n lines.append('')\n lines.append(' %d %s(s) + %d control %s(s):' % (len(non_ctrls), neighbor_type, len(ctrls), neighbor_type))\n lines.append(' %d %s(s):' % (len(non_ctrls), neighbor_type))\n for non_ctrl in non_ctrls:\n line = ' [%s] %s' % (self._debug_dump.node_op_type(non_ctrl), non_ctrl)\n lines.append(line)\n font_attr_segs[len(lines) - 1] = [(len(line) - len(non_ctrl), len(line), debugger_cli_common.MenuItem(None, 'ni -a -d -t %s' % non_ctrl))]\n if ctrls:\n lines.append('')\n lines.append(' %d control %s(s):' % (len(ctrls), neighbor_type))\n for ctrl in ctrls:\n line = ' [%s] %s' % (self._debug_dump.node_op_type(ctrl), ctrl)\n lines.append(line)\n font_attr_segs[len(lines) - 1] = [(len(line) - len(ctrl), len(line), debugger_cli_common.MenuItem(None, 'ni -a -d -t %s' % ctrl))]\n return debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)", "docstring": "List neighbors (inputs or recipients) of a node.\n\nArgs:\n neighbor_type: (\"input\" | \"recipient\")\n non_ctrls: Non-control neighbor node names, as a list of str.\n ctrls: Control neighbor node names, as a list of str.\n\nReturns:\n A RichTextLines object.", "source": "github_repos"} -{"code": "def for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts):\n try:\n for_fn = for_loop_registry.lookup(iter_)\n except LookupError:\n for_fn = _py_for_stmt\n if tensor_util.is_tf_type(iter_):\n if tensors.is_range_tensor(iter_):\n for_fn = _tf_range_for_stmt\n else:\n for_fn = _known_len_tf_for_stmt\n elif isinstance(iter_, distribute.Iterator):\n for_fn = _tf_iterator_for_stmt\n elif isinstance(iter_, distribute.Iterable):\n for_fn = _tf_distributed_iterable_for_stmt\n for_fn(iter_, extra_test, body, get_state, set_state, symbol_names, opts)", "docstring": "Functional form of a for statement.\n\n The loop operates on a state, which includes all symbols that are\n variant across loop iterations, excluding the variables local to the loop.\n\n For example, given the loop below that calculates the geometric and\n arithmetic means or some numbers:\n\n ```\n geo_mean = 1\n arith_mean = 0\n for i in range(n):\n a = numbers[i]\n geo_mean *= a\n arith_mean += a\n ```\n\n The state is represented by the variables named geo_mean and arith_mean. The\n `extra_test`, `body`, `get_state` and `set_state` functions must bind to the\n original `geo_mean` and `arith_mean` symbols, using `nonlocal`.\n\n The inputs and outputs of the callables representing the loop blocks are not\n explicit - instead, these functions must use nonlocal/global for side effects.\n The inputs and outputs are instead controlled by the set_state/get_state\n functions.\n\nArgs:\n iter_: The entity being iterated over.\n extra_test: Callable with boolean return type. An additional loop condition.\n body: Callable representing the actual loop body.\n get_state: Additional callable which can capture additional state (such as\n the values of composite symbols). This is only useful when staging the\n loop.\n set_state: Additional callable which save values captured by get_state back\n into the Python environment. This is only useful when staging the loop.\n symbol_names: Tuple containing names of the loop variables returned by\n get_state.\n opts: Optional dict of extra loop parameters.", "source": "github_repos"} -{"code": "def apply(self, inputs, *args, **kwargs):\n warnings.warn('`layer.apply` is deprecated and will be removed in a future version. Please use `layer.__call__` method instead.')\n return self.__call__(inputs, *args, **kwargs)", "docstring": "Deprecated, do NOT use!\n\n This is an alias of `self.__call__`.\n\nArgs:\n inputs: Input tensor(s).\n *args: additional positional arguments to be passed to `self.call`.\n **kwargs: additional keyword arguments to be passed to `self.call`.\n\nReturns:\n Output tensor(s).", "source": "github_repos"} -{"code": "def from_dim_sizes(dim_sizes):\n with ops.name_scope(None, 'RaggedTensorDynamicShapeFromDimensionSizes', [dim_sizes]):\n dim_sizes = tuple((ops.convert_to_tensor(size, preferred_dtype=dtypes.int64, name='dim_sizes') for size in dim_sizes))\n inner_split = 0\n for dim, dim_size in enumerate(dim_sizes):\n if dim_size.shape.ndims == 1:\n inner_split = dim + 1\n elif dim_size.shape.ndims != 0:\n raise ValueError('Each dim_size must be a scalar or a vector')\n return RaggedTensorDynamicShape(dim_sizes[:inner_split], dim_sizes[inner_split:])", "docstring": "Constructs a ragged shape from a list of dimension sizes.\n\n This list contains a single tensor for each dimension, where the tensor\n is a scalar if the dimension is uniform, or a vector if the dimension is\n ragged.\n\nArgs:\n dim_sizes: List of int32 or int64 scalars or vectors.\n\nReturns:\n A RaggedTensorDynamicShape.", "source": "github_repos"} -{"code": "def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs = self.encoder(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)\n hidden_states = outputs.hidden_states\n feature_maps = ()\n for idx, stage in enumerate(self.stage_names):\n if stage in self.out_features:\n feature_maps += (hidden_states[idx],)\n if not return_dict:\n output = (feature_maps,)\n if output_hidden_states:\n output += (outputs.hidden_states,)\n return output\n return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None)", "docstring": "Examples:\n\n ```python\n >>> from transformers import AutoImageProcessor, AutoBackbone\n >>> import torch\n >>> from PIL import Image\n >>> import requests\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> processor = AutoImageProcessor.from_pretrained(\"OpenGVLab/pvt_v2_b0\")\n >>> model = AutoBackbone.from_pretrained(\n ... \"OpenGVLab/pvt_v2_b0\", out_features=[\"stage1\", \"stage2\", \"stage3\", \"stage4\"]\n ... )\n\n >>> inputs = processor(image, return_tensors=\"pt\")\n\n >>> outputs = model(**inputs)\n >>> feature_maps = outputs.feature_maps\n >>> list(feature_maps[-1].shape)\n [1, 256, 7, 7]\n ```", "source": "github_repos"} -{"code": "def fixed_field_for_type_code(type_code: str) -> str:\n url_type_code = _TYPE_CODE_URI_RE.search(type_code)\n if url_type_code is not None:\n type_code = url_type_code[1]\n fixed_field = stringcase.snakecase(type_code[:1].lower() + type_code[1:])\n return 'string_value' if fixed_field == 'string' else fixed_field", "docstring": "Retrieves the `ElementDefinition.fixed.choice` oneof field for `type_code`.\n\nArgs:\n type_code: The FHIR type code to look up. Could be a value like 'boolean' or\n URL like 'http://hl7.org/fhirpath/System.Boolean'\n\nReturns:\n The attribute corresponding to this type code on the\n ElementDefinition.FixedX.choice oneof.", "source": "github_repos"} -{"code": "def _test_end(self, result, e):\n if self.begin_time is not None:\n self.end_time = utils.get_current_epoch_time()\n self.result = result\n if e:\n self.termination_signal = ExceptionRecord(e)", "docstring": "Marks the end of the test logic.\n\nArgs:\n result: One of the TEST_RESULT enums in TestResultEnums.\n e: A test termination signal (usually an exception object). It can\n be any exception instance or of any subclass of\n mobly.signals.TestSignal.", "source": "github_repos"} -{"code": "def configTestMesh(device_type_mesh_map: typing.Dict[typing.Text, layout_lib.Mesh]) -> layout_lib.Mesh:\n reset_context()\n\n def get_mesh(device_type):\n mesh = device_type_mesh_map.get(device_type, None)\n if mesh is None:\n raise ValueError('Requires a %s mesh to run test on %s.' % (device_type, device_type))\n return mesh\n mesh = None\n if is_tpu_present():\n mesh = get_mesh('TPU')\n reset_context()\n accelerator_util.initialize_accelerator_system('TPU')\n elif tf_config.list_physical_devices('GPU'):\n mesh = get_mesh('GPU')\n reset_logical_devices('GPU', np.prod(mesh.shape()))\n accelerator_util.initialize_accelerator_system('GPU')\n else:\n mesh = get_mesh('CPU')\n reset_logical_devices('CPU', np.prod(mesh.shape()))\n accelerator_util.initialize_accelerator_system('CPU')\n return mesh", "docstring": "Configs corresponding mesh given test context.\n\n If runs on a CPU mesh, set virtual device on CPU.\n If runs on a GPU mesh, sets virtual device on GPU with proper memory limits.\n if runs on a TPU mesh, initializes TPU system.\n\nArgs:\n device_type_mesh_map: A dictionary containing device_type -> mesh mapping.\n\nReturns:\n A properly configured mesh for use in test.", "source": "github_repos"} -{"code": "def _process_parameters_section(func_documentation, sig, func, class_name, model_name_lowercase, parent_class, indent_level):\n docstring = set_min_indent('Args:\\n', indent_level + 4)\n undocumented_parameters = []\n documented_params = {}\n documented_kwargs = {}\n if func_documentation is not None:\n documented_params, func_documentation = parse_docstring(func_documentation)\n if model_name_lowercase is not None:\n documented_params = format_args_docstring(documented_params, model_name_lowercase)\n param_docstring, missing_args = _process_regular_parameters(sig, func, class_name, documented_params, indent_level, undocumented_parameters)\n docstring += param_docstring\n kwargs_docstring = _process_kwargs_parameters(sig, func, parent_class, model_name_lowercase, documented_kwargs, indent_level, undocumented_parameters)\n docstring += kwargs_docstring\n if len(undocumented_parameters) > 0:\n print('\\n'.join(undocumented_parameters))\n return docstring", "docstring": "Process the parameters section of the docstring.\n\nArgs:\n func_documentation (`str`): Existing function documentation (manually specified in the docstring)\n sig (`inspect.Signature`): Function signature\n func (`function`): Function the parameters belong to\n class_name (`str`): Name of the class the function belongs to\n model_name_lowercase (`str`): Lowercase model name\n parent_class (`class`): Parent class of the function (if any)\n indent_level (`int`): Indentation level", "source": "github_repos"} -{"code": "def check_collective_ops_peer_health(self, task, timeout_in_ms):\n self.ensure_initialized()\n pywrap_tfe.TFE_CollectiveOpsCheckPeerHealth(self._handle, task, timeout_in_ms)", "docstring": "Check collective peer health.\n\n This probes each task to see if they're still alive. Note that restarted\n tasks are considered a different one, and they're considered not healthy.\n\n This should only be used in multi client multi worker training.\n\nArgs:\n task: a task string, must be in the format of /job:xxx/replica:0/task:N.\n timeout_in_ms: an integer, the timeout. If zero, there's no timeout.\n\nRaises:\n tf.errors.UnavailableError: when a peer is down.\n tf.errors.FailedPreconditionError: when a peer is a different one from the\n one this task has talked to, e.g. the peer has restarted.\n tf.errors.InvalidArgumentError: when the task string is invalid.", "source": "github_repos"} -{"code": "def parallel(processor_list: Sequence[PartProcessor]) -> PartProcessor:\n if not processor_list:\n raise ValueError('processor_list is empty')\n return _ParallelPartProcessor(processor_list)", "docstring": "Create a sequence of part processors to be run in parallel.\n\nArgs:\n processor_list: list of part processors.\n\nReturns:\n A processor consisting of the parallel run of all the processors in the\n list. The execution is sequential from the first processor to the last but\n parts are processed concurrently overall.", "source": "github_repos"} -{"code": "def _html_tree_view(self, *, view: 'HtmlTreeView', name: Optional[str]=None, parent: Any=None, root_path: Optional[KeyPath]=None, **kwargs) -> Html:\n return view.render(self, name=name, parent=parent, root_path=root_path, **kwargs)", "docstring": "Returns the topmost HTML representation of this extension.\n\nArgs:\n view: The view to render the object.\n name: The name of the object.\n parent: The parent of the object.\n root_path: The key path of the object relative to the root.\n **kwargs: kwargs to pass to the view. See `_html_tree_view_config` for\n the builtin arguments.\n\nReturns:\n The rendered HTML.", "source": "github_repos"} -{"code": "def _get_kind_name(item):\n if isinstance(item, (str, bytes)):\n kind = 'bytes_list'\n elif isinstance(item, int):\n kind = 'int64_list'\n elif isinstance(item, float):\n kind = 'float_list'\n elif isinstance(item, Any):\n kind = 'any_list'\n else:\n kind = 'node_list'\n return kind", "docstring": "Returns the kind name in CollectionDef.\n\nArgs:\n item: A data item.\n\nReturns:\n The string representation of the kind in CollectionDef.", "source": "github_repos"} -{"code": "def restore_op(self, filename_tensor, saveable, preferred_shard):\n tensors = []\n for spec in saveable.specs:\n tensors.append(io_ops.restore_v2(filename_tensor, [spec.name], [spec.slice_spec], [spec.dtype])[0])\n return tensors", "docstring": "Create ops to restore 'saveable'.\n\n This is intended to be overridden by subclasses that want to generate\n different Ops.\n\nArgs:\n filename_tensor: String Tensor.\n saveable: A BaseSaverBuilder.SaveableObject object.\n preferred_shard: Int. Shard to open first when loading a sharded file.\n\nReturns:\n A list of Tensors resulting from reading 'saveable' from\n 'filename'.", "source": "github_repos"} -{"code": "def _pyval_empty_list_depth(pyval):\n if isinstance(pyval, list):\n if not pyval:\n return 1\n depths = [_pyval_empty_list_depth(v) for v in pyval]\n if any((depth is None for depth in depths)):\n return None\n else:\n return max(depths) + 1\n else:\n return None", "docstring": "Find the max depth for nested empty lists.\n\nArgs:\n pyval: A nested python list.\n\nReturns:\n The maximum depth of empty lists in `pyval`, or None if `pyval` contains\n anything other than nested empty lists.", "source": "github_repos"} -{"code": "def __init__(self, elements: Iterable[Any]):\n self._elements = elements", "docstring": "Asserts that the input contains exactly the elements provided.\n\n This is primarily used for testing; it will cause the entire pipeline to\n fail if the input to this transform is not exactly the set of `elements`\n given in the config parameter.\n\n As with Create, YAML/JSON-style mappings are interpreted as Beam rows,\n e.g.::\n\n type: AssertEqual\n input: SomeTransform\n config:\n elements:\n - {a: 0, b: \"foo\"}\n - {a: 1, b: \"bar\"}\n\n would ensure that `SomeTransform` produced exactly two elements with values\n `(a=0, b=\"foo\")` and `(a=1, b=\"bar\")` respectively.\n\nArgs:\n elements: The set of elements that should belong to the PCollection.\n YAML/JSON-style mappings will be interpreted as Beam rows.", "source": "github_repos"} -{"code": "def _grappler_enabled_session_config():\n rewriter_config = rewriter_config_pb2.RewriterConfig(disable_model_pruning=False, arithmetic_optimization=rewriter_config_pb2.RewriterConfig.ON)\n graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)\n return config_pb2.ConfigProto(graph_options=graph_options)", "docstring": "Constructs a Session config proto that explicitly enables Grappler.\n\nReturns:\n A config proto that obtains extra safety for the unit tests in this\n file by ensuring that the relevant Grappler rewrites are always enabled.", "source": "github_repos"} -{"code": "def view_as_real(x):\n if any_symbolic_tensors((x,)):\n return ViewAsReal().symbolic_call(x)\n x = backend.convert_to_tensor(x)\n real_part = backend.numpy.real(x)\n imag_part = backend.numpy.imag(x)\n return backend.numpy.stack((real_part, imag_part), axis=-1)", "docstring": "Converts a complex tensor to a real tensor with shape `(..., 2)`,\n where the last dimension represents the real and imaginary components.\n\nArgs:\n x: A complex tensor.\n\nReturns:\n A real tensor where the last dimension contains the\n real and imaginary parts.\n\nExample:\n ```\n >>> import numpy as np\n >>> from keras import ops\n\n >>> complex_tensor = np.array([1 + 2j, 3 + 4j])\n >>> real = ops.view_as_real(complex_tensor)\n >>> real\n array([[1., 2.],\n [3., 4.]])\n ```", "source": "github_repos"} -{"code": "def __init__(self, file_pattern, batch_size=1, buffer_size=1, parallelism=1, shift_ratio=0, seed=0, name=None, batches=None, compression_type=None):\n self._batch_size = batch_size\n if batches is not None:\n self._batch_size *= batches\n self._batches = batches\n self._file_pattern = file_pattern\n self._buffer_size = buffer_size\n self._parallelism = parallelism\n self._shift_ratio = shift_ratio\n self._seed = seed\n self._name = name\n self._compression_type = python_io.TFRecordCompressionType.NONE\n if compression_type is not None:\n self._compression_type = compression_type", "docstring": "Constructs a RecordInput Op.\n\nArgs:\n file_pattern: File path to the dataset, possibly containing wildcards.\n All matching files will be iterated over each epoch.\n batch_size: How many records to return at a time.\n buffer_size: The maximum number of records the buffer will contain.\n parallelism: How many reader threads to use for reading from files.\n shift_ratio: What percentage of the total number files to move the start\n file forward by each epoch.\n seed: Specify the random number seed used by generator that randomizes\n records.\n name: Optional name for the operation.\n batches: None by default, creating a single batch op. Otherwise specifies\n how many batches to create, which are returned as a list when\n `get_yield_op()` is called. An example use case is to split processing\n between devices on one computer.\n compression_type: The type of compression for the file. Currently ZLIB and\n GZIP are supported. Defaults to none.\n\nRaises:\n ValueError: If one of the arguments is invalid.", "source": "github_repos"} -{"code": "def mean_absolute_error(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n return backend.mean(math_ops.abs(y_pred - y_true), axis=-1)", "docstring": "Computes the mean absolute error between labels and predictions.\n\n `loss = mean(abs(y_true - y_pred), axis=-1)`\n\n Standalone usage:\n\n >>> y_true = np.random.randint(0, 2, size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> assert np.array_equal(\n ... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))\n\nArgs:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\nReturns:\n Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.", "source": "github_repos"} -{"code": "def forward(self, pixel_values: Tensor, task_inputs: Tensor, text_inputs: Optional[Tensor]=None, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> OneFormerModelOutput:\n if pixel_values is None:\n raise ValueError('You have to specify pixel_values')\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n batch_size, _, height, width = pixel_values.shape\n if pixel_mask is None:\n pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device)\n pixel_level_module_output = self.pixel_level_module(pixel_values, output_hidden_states)\n multi_scale_features = pixel_level_module_output.decoder_features\n mask_features = pixel_level_module_output.decoder_last_feature\n task_token = self.task_encoder(task_inputs.to(self.dtype))\n if self.is_training:\n text_queries = self.text_mapper(text_inputs)\n else:\n text_queries = None\n transformer_module_output = self.transformer_module(multi_scale_features=multi_scale_features, mask_features=mask_features, task_token=task_token, output_attentions=output_attentions)\n queries = transformer_module_output.object_queries\n encoder_hidden_states = None\n pixel_decoder_hidden_states = None\n transformer_decoder_hidden_states = None\n if output_hidden_states:\n encoder_hidden_states = pixel_level_module_output.encoder_features\n pixel_decoder_hidden_states = (pixel_level_module_output.decoder_last_feature,)\n for f in pixel_level_module_output.decoder_features:\n pixel_decoder_hidden_states += (f,)\n transformer_decoder_hidden_states = transformer_module_output.auxiliary_predictions\n output = OneFormerModelOutput(encoder_hidden_states=encoder_hidden_states, pixel_decoder_hidden_states=pixel_decoder_hidden_states, transformer_decoder_hidden_states=transformer_decoder_hidden_states, transformer_decoder_object_queries=queries, transformer_decoder_contrastive_queries=transformer_module_output.contrastive_logits, transformer_decoder_mask_predictions=transformer_module_output.prediction_masks, transformer_decoder_class_predictions=transformer_module_output.prediction_class, transformer_decoder_auxiliary_predictions=transformer_module_output.auxiliary_predictions, text_queries=text_queries, task_token=task_token, attentions=transformer_module_output.attentions)\n if not return_dict:\n output = tuple((v for v in output.values()))\n return output", "docstring": "task_inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Task inputs. Task inputs can be obtained using [`AutoImageProcessor`]. See [`OneFormerProcessor.__call__`]\n for details.\n text_inputs (`List[torch.Tensor]`, *optional*):\n Tensor fof shape `(num_queries, sequence_length)` to be fed to a model\n\nExample:\n ```python\n >>> import torch\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import OneFormerProcessor, OneFormerModel\n\n >>> # download texting image\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> # load processor for preprocessing the inputs\n >>> processor = OneFormerProcessor.from_pretrained(\"shi-labs/oneformer_ade20k_swin_tiny\")\n >>> model = OneFormerModel.from_pretrained(\"shi-labs/oneformer_ade20k_swin_tiny\")\n >>> inputs = processor(image, [\"semantic\"], return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... outputs = model(**inputs)\n\n >>> mask_predictions = outputs.transformer_decoder_mask_predictions\n >>> class_predictions = outputs.transformer_decoder_class_predictions\n\n >>> f\"👉 Mask Predictions Shape: {list(mask_predictions.shape)}, Class Predictions Shape: {list(class_predictions.shape)}\"\n '👉 Mask Predictions Shape: [1, 150, 128, 171], Class Predictions Shape: [1, 150, 151]'\n ```", "source": "github_repos"} -{"code": "def Print(input_, data, message=None, first_n=None, summarize=None, name=None):\n return gen_logging_ops._print(input_, data, message, first_n, summarize, name)", "docstring": "Prints a list of tensors.\n\n This is an identity op (behaves like `tf.identity`) with the side effect\n of printing `data` when evaluating.\n\n Note: This op prints to the standard error. It is not currently compatible\n with jupyter notebook (printing to the notebook *server's* output, not into\n the notebook).\n\n @compatibility(TF2)\n This API is deprecated. Use `tf.print` instead. `tf.print` does not need the\n `input_` argument.\n\n `tf.print` works in TF2 when executing eagerly and inside a `tf.function`.\n\n In TF1-styled sessions, an explicit control dependency declaration is needed\n to execute the `tf.print` operation. Refer to the documentation of\n `tf.print` for more details.\n @end_compatibility\n\nArgs:\n input_: A tensor passed through this op.\n data: A list of tensors to print out when op is evaluated.\n message: A string, prefix of the error message.\n first_n: Only log `first_n` number of times. Negative numbers log always;\n this is the default.\n summarize: Only print this many entries of each tensor. If None, then a\n maximum of 3 elements are printed per input tensor.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor`. Has the same type and contents as `input_`.\n\n ```python\n sess = tf.compat.v1.Session()\n with sess.as_default():\n tensor = tf.range(10)\n print_op = tf.print(tensor)\n with tf.control_dependencies([print_op]):\n out = tf.add(tensor, tensor)\n sess.run(out)\n ```", "source": "github_repos"} -{"code": "def ee_initialize(use_personal_account: bool=False, enforce_high_volume: bool=False, service_account: t.Optional[str]=None, private_key: t.Optional[str]=None, project_id: t.Optional[str]=None) -> None:\n creds = get_creds(use_personal_account, service_account, private_key)\n on_compute_engine = is_compute_engine()\n if on_compute_engine:\n if project_id is None and use_personal_account:\n raise RuntimeError('Project_name should not be None!')\n params = {'credentials': creds, 'opt_url': 'https://earthengine-highvolume.googleapis.com'}\n if project_id:\n params['project'] = project_id\n ee.Initialize(**params)\n elif enforce_high_volume and (not on_compute_engine):\n raise RuntimeError('Must run on a compute engine VM to use the high volume earth engine api.')\n else:\n ee.Initialize(creds)", "docstring": "Initializes earth engine with the high volume API when using a compute engine VM.\n\nArgs:\n use_personal_account: A flag to use personal account for ee authentication. Default: False.\n enforce_high_volume: A flag to use the high volume API when using a compute engine VM. Default: False.\n service_account: Service account address when using a private key for earth engine authentication.\n private_key: A private key path to authenticate earth engine using private key. Default: None.\n Project ID: An identifier that represents the name of a project present in Earth Engine.\n\nRaises:\n RuntimeError: Earth Engine did not initialize.", "source": "github_repos"} -{"code": "def SendCapture(self, request, global_params=None):\n config = self.GetMethodConfig('SendCapture')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Send encoded debug capture data for component.\n\nArgs:\n request: (DataflowProjectsLocationsJobsDebugSendCaptureRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n\nReturns:\n (SendDebugCaptureResponse) The response message.", "source": "github_repos"} -{"code": "def __init__(self, name='mean', dtype=None):\n super().__init__(name=name, dtype=dtype)\n self.total = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='total')\n self.count = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='count')", "docstring": "Compute the (weighted) mean of the given values.\n\n For example, if values is `[1, 3, 5, 7]` then the mean is 4.\n If `sample_weight` was specified as `[1, 1, 0, 0]` then the mean would be 2.\n\n This metric creates two variables, `total` and `count`.\n The mean value returned is simply `total` divided by `count`.\n\nArgs:\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nExample:\n >>> m = Mean()\n >>> m.update_state([1, 3, 5, 7])\n >>> m.result()\n 4.0\n\n >>> m.reset_state()\n >>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])\n >>> m.result()\n 2.0", "source": "github_repos"} -{"code": "def __init__(self, target_shape, **kwargs):\n super().__init__(**kwargs)\n self.target_shape = tuple(target_shape)", "docstring": "Layer that reshapes inputs into the given shape.\n\nArgs:\n target_shape: Target shape. Tuple of integers, does not include the\n samples dimension (batch size).\n\n Input shape:\n Arbitrary, although all dimensions in the input shape must be\n known/fixed. Use the keyword argument `input_shape` (tuple of integers,\n does not include the samples/batch size axis) when using this layer as\n the first layer in a model.\n\n Output shape:\n `(batch_size, *target_shape)`\n\nExample:\n >>> x = keras.Input(shape=(12,))\n >>> y = keras.layers.Reshape((3, 4))(x)\n >>> y.shape\n (None, 3, 4)\n\n >>> # also supports shape inference using `-1` as dimension\n >>> y = keras.layers.Reshape((-1, 2, 2))(x)\n >>> y.shape\n (None, 3, 2, 2)", "source": "github_repos"} -{"code": "def outer_graph(self):\n current = self._weak_outer_graph()\n if current is None:\n return self._fallback_outer_graph\n return current", "docstring": "The Graph this FuncGraph is nested in.\n\n Functions may capture Tensors from graphs they are nested in (transitive).\n\nReturns:\n A Graph object. Initially set to the current default graph when the\n FuncGraph was created. If the previous `outer_graph` was deleted because\n the function that owns it was deleted, `outer_graph` is reset to the\n outermost default graph active when the FuncGraph was created. This\n FuncGraph won't have captured anything from the new `outer_graph` (and\n likely not from the previous setting, since that would have created a\n strong reference), but it is returned so that FuncGraphs always have a\n parent.", "source": "github_repos"} -{"code": "def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, bbox_first_token_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, initial_token_labels: Optional[torch.Tensor]=None, subsequent_token_labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BrosSpadeOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.bros(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n last_hidden_states = outputs[0]\n last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()\n initial_token_logits = self.initial_token_classifier(last_hidden_states).transpose(0, 1).contiguous()\n subsequent_token_logits = self.subsequent_token_classifier(last_hidden_states, last_hidden_states).squeeze(0)\n inv_attention_mask = 1 - attention_mask\n batch_size, max_seq_length = inv_attention_mask.shape\n device = inv_attention_mask.device\n invalid_token_mask = torch.cat([inv_attention_mask, torch.zeros([batch_size, 1]).to(device)], axis=1).bool()\n subsequent_token_logits = subsequent_token_logits.masked_fill(invalid_token_mask[:, None, :], torch.finfo(subsequent_token_logits.dtype).min)\n self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device=device, dtype=torch.bool)\n subsequent_token_logits = subsequent_token_logits.masked_fill(self_token_mask[None, :, :], torch.finfo(subsequent_token_logits.dtype).min)\n subsequent_token_mask = attention_mask.view(-1).bool()\n loss = None\n if initial_token_labels is not None and subsequent_token_labels is not None:\n loss_fct = CrossEntropyLoss()\n initial_token_labels = initial_token_labels.view(-1)\n if bbox_first_token_mask is not None:\n bbox_first_token_mask = bbox_first_token_mask.view(-1)\n initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels)[bbox_first_token_mask], initial_token_labels[bbox_first_token_mask])\n else:\n initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels), initial_token_labels)\n subsequent_token_labels = subsequent_token_labels.view(-1)\n subsequent_token_loss = loss_fct(subsequent_token_logits.view(-1, max_seq_length + 1)[subsequent_token_mask], subsequent_token_labels[subsequent_token_mask])\n loss = initial_token_loss + subsequent_token_loss\n if not return_dict:\n output = (initial_token_logits, subsequent_token_logits) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return BrosSpadeOutput(loss=loss, initial_token_logits=initial_token_logits, subsequent_token_logits=subsequent_token_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):\n Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values\n (x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the\n bounding box.\n bbox_first_token_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n initial_token_labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for the initial token classification.\n subsequent_token_labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for the subsequent token classification.\n\nExample:\n ```python\n >>> import torch\n >>> from transformers import BrosProcessor, BrosSpadeEEForTokenClassification\n\n >>> processor = BrosProcessor.from_pretrained(\"jinho8345/bros-base-uncased\")\n\n >>> model = BrosSpadeEEForTokenClassification.from_pretrained(\"jinho8345/bros-base-uncased\")\n\n >>> encoding = processor(\"Hello, my dog is cute\", add_special_tokens=False, return_tensors=\"pt\")\n >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding[\"input_ids\"].shape[-1], 1)\n >>> encoding[\"bbox\"] = bbox\n\n >>> outputs = model(**encoding)\n ```", "source": "github_repos"} -{"code": "def in_top_k(predictions, targets, k):\n return nn.in_top_k(predictions, targets, k)", "docstring": "Returns whether the `targets` are in the top `k` `predictions`.\n\nArgs:\n predictions: A tensor of shape `(batch_size, classes)` and type `float32`.\n targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.\n k: An `int`, number of top elements to consider.\n\nReturns:\n A 1D tensor of length `batch_size` and type `bool`.\n `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`\n values of `predictions[i]`.", "source": "github_repos"} -{"code": "def load_graph_from_args(pipeline_name: str, framework: str, model: str, tokenizer: Optional[str]=None, **models_kwargs) -> Pipeline:\n if tokenizer is None:\n tokenizer = model\n if framework == 'pt' and (not is_torch_available()):\n raise Exception('Cannot convert because PyTorch is not installed. Please install torch first.')\n if framework == 'tf' and (not is_tf_available()):\n raise Exception('Cannot convert because TF is not installed. Please install tensorflow first.')\n print(f'Loading pipeline (model: {model}, tokenizer: {tokenizer})')\n return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs)", "docstring": "Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model\n\nArgs:\n pipeline_name: The kind of pipeline to use (ner, question-answering, etc.)\n framework: The actual model to convert the pipeline from (\"pt\" or \"tf\")\n model: The model name which will be loaded by the pipeline\n tokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value\n\n Returns: Pipeline object", "source": "github_repos"} -{"code": "def _prop(self):\n return 'prop_with_doc'", "docstring": "prop doc.\n\nReturns:\n String.", "source": "github_repos"} -{"code": "def get(self):\n raise NotImplementedError", "docstring": "Creates a generator to extract data from the queue.\n\n Skip the data if it is `None`.\n\n This method is called from the main thread.\n\nYields:\n The next element in the queue, i.e. a tuple\n `(inputs, targets)` or\n `(inputs, targets, sample_weights)`.", "source": "github_repos"} -{"code": "def get_dense_tensor(self, transformation_cache, state_manager):\n if isinstance(self.categorical_column, SequenceCategoricalColumn):\n raise ValueError('In indicator_column: {}. categorical_column must not be of type SequenceCategoricalColumn. Suggested fix A: If you wish to use DenseFeatures, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use SequenceFeatures instead of DenseFeatures. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n return transformation_cache.get(self, state_manager)", "docstring": "Returns dense `Tensor` representing feature.\n\nArgs:\n transformation_cache: A `FeatureTransformationCache` object to access\n features.\n state_manager: A `StateManager` to create / access resources such as\n lookup tables.\n\nReturns:\n Dense `Tensor` created within `transform_feature`.\n\nRaises:\n ValueError: If `categorical_column` is a `SequenceCategoricalColumn`.", "source": "github_repos"} -{"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, next_sentence_label: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFMobileBertForPreTrainingOutput]:\n outputs = self.mobilebert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n sequence_output, pooled_output = outputs[:2]\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n total_loss = None\n if labels is not None and next_sentence_label is not None:\n d_labels = {'labels': labels}\n d_labels['next_sentence_label'] = next_sentence_label\n total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score))\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return (total_loss,) + output if total_loss is not None else output\n return TFMobileBertForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "Return:\n\nExample:\n ```python\n >>> import tensorflow as tf\n >>> from transformers import AutoTokenizer, TFMobileBertForPreTraining\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"google/mobilebert-uncased\")\n >>> model = TFMobileBertForPreTraining.from_pretrained(\"google/mobilebert-uncased\")\n >>> input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\"))[None, :] # Batch size 1\n >>> outputs = model(input_ids)\n >>> prediction_scores, seq_relationship_scores = outputs[:2]\n ```", "source": "github_repos"} -{"code": "def track_and_add_endpoint(self, name, resource, input_signature, **kwargs):\n if name in self._endpoint_names:\n raise ValueError(f\"Endpoint name '{name}' is already taken.\")\n if not isinstance(resource, layers.Layer):\n raise ValueError(f'Invalid resource type. Expected an instance of a Keras `Layer` or `Model`. Received: resource={resource} (of type {type(resource)})')\n if not resource.built:\n raise ValueError('The layer provided has not yet been built. It must be built before export.')\n if backend.backend() != 'jax':\n if 'jax2tf_kwargs' in kwargs or 'is_static' in kwargs:\n raise ValueError(f\"'jax2tf_kwargs' and 'is_static' are only supported with the jax backend. Current backend: {backend.backend()}\")\n input_signature = tree.map_structure(make_tf_tensor_spec, input_signature)\n if not hasattr(BackendExportArchive, 'track_and_add_endpoint'):\n self.track(resource)\n return self.add_endpoint(name, resource.__call__, input_signature, **kwargs)\n else:\n decorated_fn = super().track_and_add_endpoint(name, resource, input_signature, **kwargs)\n self._endpoint_signatures[name] = input_signature\n setattr(self._tf_trackable, name, decorated_fn)\n self._endpoint_names.append(name)\n return decorated_fn", "docstring": "Track the variables and register a new serving endpoint.\n\n This function combines the functionality of `track` and `add_endpoint`.\n It tracks the variables of the `resource` (either a layer or a model)\n and registers a serving endpoint using `resource.__call__`.\n\nArgs:\n name: `str`. The name of the endpoint.\n resource: A trackable Keras resource, such as a layer or model.\n input_signature: Optional. Specifies the shape and dtype of `fn`.\n Can be a structure of `keras.InputSpec`, `tf.TensorSpec`,\n `backend.KerasTensor`, or backend tensor (see below for an\n example showing a `Functional` model with 2 input arguments). If\n not provided, `fn` must be a `tf.function` that has been called\n at least once. Defaults to `None`.\n **kwargs: Additional keyword arguments:\n - Specific to the JAX backend:\n - `is_static`: Optional `bool`. Indicates whether `fn` is\n static. Set to `False` if `fn` involves state updates\n (e.g., RNG seeds).\n - `jax2tf_kwargs`: Optional `dict`. Arguments for\n `jax2tf.convert`. See [`jax2tf.convert`](\n https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md).\n If `native_serialization` and `polymorphic_shapes` are\n not provided, they are automatically computed.", "source": "github_repos"} -{"code": "def __init__(self, sess, hooks):\n _WrappedSession.__init__(self, sess)\n self._hooks = hooks\n self._should_stop = False", "docstring": "Initializes a _HookedSession object.\n\nArgs:\n sess: A `tf.compat.v1.Session` or a `_WrappedSession` object.\n hooks: An iterable of `SessionRunHook' objects.", "source": "github_repos"} -{"code": "def explicit_pass(msg, extras=None):\n raise signals.TestPass(msg, extras)", "docstring": "Explicitly pass a test.\n\n This will pass the test explicitly regardless of any other error happened\n in the test body. E.g. even if errors have been recorded with `expects`,\n the test will still be marked pass if this is called.\n\n A test without uncaught exception will pass implicitly so this should be\n used scarcely.\n\nArgs:\n msg: A string explaining the details of the passed test.\n extras: An optional field for extra information to be included in\n test result.\n\nRaises:\n signals.TestPass: Mark a test as passed.", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n x = self.dense(hidden_states).squeeze(-1)\n if p_mask is not None:\n if p_mask.dtype == torch.float16:\n x = x * (1 - p_mask) - 65500 * p_mask\n else:\n x = x * (1 - p_mask) - 1e+30 * p_mask\n return x", "docstring": "Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):\n The final hidden states of the model.\n p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):\n Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token\n should be masked.\n\nReturns:\n `torch.FloatTensor`: The start logits for SQuAD.", "source": "github_repos"} -{"code": "def _convert_tf1_model(flags):\n if flags.custom_opdefs:\n register_custom_opdefs(_parse_array(flags.custom_opdefs))\n converter = _get_tflite_converter(flags)\n if flags.inference_type:\n converter.inference_type = _parse_inference_type(flags.inference_type, 'inference_type')\n if flags.inference_input_type:\n converter.inference_input_type = _parse_inference_type(flags.inference_input_type, 'inference_input_type')\n if flags.output_format:\n converter.output_format = _toco_flags_pb2.FileFormat.Value(flags.output_format)\n if flags.mean_values and flags.std_dev_values:\n input_arrays = converter.get_input_arrays()\n std_dev_values = _parse_array(flags.std_dev_values, type_fn=float)\n if converter.inference_type == dtypes.float32:\n mean_values = _parse_array(flags.mean_values, type_fn=float)\n else:\n mean_values = _parse_array(flags.mean_values, type_fn=int)\n quant_stats = list(zip(mean_values, std_dev_values))\n if not flags.input_arrays and len(input_arrays) > 1 or len(input_arrays) != len(quant_stats):\n raise ValueError(\"Mismatching --input_arrays, --std_dev_values, and --mean_values. The flags must have the same number of items. The current input arrays are '{0}'. --input_arrays must be present when specifying --std_dev_values and --mean_values with multiple input tensors in order to map between names and values.\".format(','.join(input_arrays)))\n converter.quantized_input_stats = dict(list(zip(input_arrays, quant_stats)))\n if flags.default_ranges_min is not None and flags.default_ranges_max is not None:\n converter.default_ranges_stats = (flags.default_ranges_min, flags.default_ranges_max)\n if flags.drop_control_dependency:\n converter.drop_control_dependency = flags.drop_control_dependency\n if flags.reorder_across_fake_quant:\n converter.reorder_across_fake_quant = flags.reorder_across_fake_quant\n if flags.change_concat_input_ranges:\n converter.change_concat_input_ranges = flags.change_concat_input_ranges == 'TRUE'\n if flags.allow_custom_ops:\n converter.allow_custom_ops = flags.allow_custom_ops\n if flags.target_ops:\n ops_set_options = lite.OpsSet.get_options()\n converter.target_spec.supported_ops = set()\n for option in flags.target_ops.split(','):\n if option not in ops_set_options:\n raise ValueError('Invalid value for --target_ops. Options: {0}'.format(','.join(ops_set_options)))\n converter.target_spec.supported_ops.add(lite.OpsSet(option))\n if flags.experimental_select_user_tf_ops:\n if lite.OpsSet.SELECT_TF_OPS not in converter.target_spec.supported_ops:\n raise ValueError('--experimental_select_user_tf_ops can only be set if --target_ops contains SELECT_TF_OPS.')\n user_op_set = set()\n for op_name in flags.experimental_select_user_tf_ops.split(','):\n user_op_set.add(op_name)\n converter.target_spec.experimental_select_user_tf_ops = list(user_op_set)\n if flags.post_training_quantize:\n converter.optimizations = [lite.Optimize.DEFAULT]\n if converter.inference_type != dtypes.float32:\n print('--post_training_quantize quantizes a graph of inference_type FLOAT. Overriding inference_type to FLOAT.')\n converter.inference_type = dtypes.float32\n if flags.quantize_to_float16:\n converter.target_spec.supported_types = [dtypes.float16]\n if not flags.post_training_quantize:\n print('--quantize_to_float16 will only take effect with the --post_training_quantize flag enabled.')\n if flags.dump_graphviz_dir:\n converter.dump_graphviz_dir = flags.dump_graphviz_dir\n if flags.dump_graphviz_video:\n converter.dump_graphviz_vode = flags.dump_graphviz_video\n if flags.conversion_summary_dir:\n converter.conversion_summary_dir = flags.conversion_summary_dir\n converter.experimental_new_converter = flags.experimental_new_converter\n if flags.experimental_new_quantizer is not None:\n converter.experimental_new_quantizer = flags.experimental_new_quantizer\n output_data = converter.convert()\n with gfile.GFile(flags.output_file, 'wb') as f:\n f.write(output_data)", "docstring": "Calls function to convert the TensorFlow 1.X model into a TFLite model.\n\nArgs:\n flags: argparse.Namespace object.\n\nRaises:\n ValueError: Invalid flags.", "source": "github_repos"} -{"code": "def reduce_sum(self, x):\n return self.reduce(lambda y: math_ops.reduce_sum(y, axis=0), x)", "docstring": "Performs a sum reduction on `x` across pfor iterations.\n\n Note that this currently may not work inside a control flow construct.\n\nArgs:\n x: an unvectorized Tensor.\n\nReturns:\n A Tensor that has same rank as `x`. The value is the sum of the values\n of `x` across the pfor iterations.", "source": "github_repos"} -{"code": "def _autopacking_helper(list_or_tuple, dtype, name):\n if context.executing_eagerly():\n if all((isinstance(elem, core.Tensor) for elem in list_or_tuple)):\n return gen_array_ops.pack(list_or_tuple, name=name)\n must_pack = False\n converted_elems = []\n with ops.name_scope(name) as scope:\n for i, elem in enumerate(list_or_tuple):\n if isinstance(elem, core.Tensor):\n if dtype is not None and elem.dtype.base_dtype != dtype:\n raise TypeError(f'Cannot convert a list containing a tensor of dtype {elem.dtype} to {dtype} (Tensor is: {elem!r})')\n converted_elems.append(elem)\n must_pack = True\n elif isinstance(elem, (list, tuple)):\n converted_elem = _autopacking_helper(elem, dtype, str(i))\n if isinstance(converted_elem, core.Tensor):\n must_pack = True\n converted_elems.append(converted_elem)\n else:\n converted_elems.append(elem)\n if must_pack:\n elems_as_tensors = []\n for i, elem in enumerate(converted_elems):\n if isinstance(elem, core.Tensor):\n elems_as_tensors.append(elem)\n else:\n elems_as_tensors.append(constant_op.constant(elem, dtype=dtype, name=str(i)))\n return gen_array_ops.pack(elems_as_tensors, name=scope)\n else:\n return converted_elems", "docstring": "Converts the given list or tuple to a tensor by packing.\n\nArgs:\n list_or_tuple: A (possibly nested) list or tuple containing a tensor.\n dtype: The element type of the returned tensor.\n name: A name for the returned tensor.\n\nReturns:\n A `tf.Tensor` with value equivalent to `list_or_tuple`.", "source": "github_repos"} -{"code": "def _bigbird_block_rand_mask_with_head(self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, indices_prng_key: Optional[jax.random.PRNGKey]=None, deterministic: Optional[bool]=True, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1):\n if from_seq_length // from_block_size != to_seq_length // to_block_size:\n raise ValueError('Error the number of blocks needs to be same!')\n if from_seq_length not in plan_from_length:\n raise ValueError('Error from sequence length not in plan!')\n num_blocks = from_seq_length // from_block_size\n plan_block_length = jnp.array(plan_from_length) // from_block_size\n max_plan_idx = plan_from_length.index(from_seq_length)\n rand_attn = [jnp.zeros((num_blocks, sum(plan_num_rand_blocks[:max_plan_idx + 1])), dtype=jnp.int32) for i in range(num_heads)]\n if deterministic:\n for nh in range(num_heads):\n rand_attn[nh] = rand_attn[nh][global_block_top:num_blocks - global_block_bottom, :]\n return rand_attn\n for plan_idx in range(max_plan_idx + 1):\n rnd_r_cnt = 0\n if plan_idx > 0:\n if plan_num_rand_blocks[plan_idx] > 0:\n rnd_r_cnt = int(sum(plan_num_rand_blocks[:plan_idx]))\n curr_r_cnt = int(sum(plan_num_rand_blocks[:plan_idx + 1]))\n for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):\n for h in range(num_heads):\n single_block_row_attention = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, indices_prng_key=indices_prng_key)\n rand_attn[h] = rand_attn[h].at[blk_rw_idx, rnd_r_cnt:curr_r_cnt].set(single_block_row_attention)\n for pl_id in range(plan_idx):\n if plan_num_rand_blocks[pl_id] == 0:\n continue\n for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):\n rnd_r_cnt = 0\n to_start_block_id = 0\n if pl_id > 0:\n rnd_r_cnt = int(sum(plan_num_rand_blocks[:pl_id]))\n to_start_block_id = plan_block_length[pl_id - 1]\n curr_r_cnt = int(sum(plan_num_rand_blocks[:pl_id + 1]))\n for h in range(num_heads):\n single_block_row_attention = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, indices_prng_key=indices_prng_key)\n rand_attn[h] = rand_attn[h].at[blk_rw_idx, rnd_r_cnt:curr_r_cnt].set(single_block_row_attention)\n if plan_num_rand_blocks[plan_idx] == 0:\n continue\n curr_r_cnt = int(sum(plan_num_rand_blocks[:plan_idx + 1]))\n from_start_block_id = global_block_top\n to_start_block_id = 0\n if plan_idx > 0:\n rnd_r_cnt = int(sum(plan_num_rand_blocks[:plan_idx]))\n from_start_block_id = plan_block_length[plan_idx - 1]\n to_start_block_id = plan_block_length[plan_idx - 1]\n for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):\n for h in range(num_heads):\n single_block_row_attention = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, indices_prng_key=indices_prng_key)\n rand_attn[h] = rand_attn[h].at[blk_rw_idx, rnd_r_cnt:curr_r_cnt].set(single_block_row_attention)\n for nh in range(num_heads):\n rand_attn[nh] = rand_attn[nh][global_block_top:num_blocks - global_block_bottom, :]\n return rand_attn", "docstring": "Create adjacency list of random attention.\n\nArgs:\n from_seq_length: int. length of from sequence.\n to_seq_length: int. length of to sequence.\n from_block_size: int. size of block in from sequence.\n to_block_size: int. size of block in to sequence.\n num_heads: int. total number of heads.\n plan_from_length: list. plan from length where num_random_blocks are chosen from.\n plan_num_rand_blocks: list. number of rand blocks within the plan.\n indices_prng_key: jax.random.PRNGKey. PRNG key that is used to perform random jax operations.\n deterministic: bool. When False random attention will be used.\n window_block_left: int. number of blocks of window to left of a block.\n window_block_right: int. number of blocks of window to right of a block.\n global_block_top: int. number of blocks at the top.\n global_block_bottom: int. number of blocks at the bottom.\n global_block_left: int. Number of blocks globally used to the left.\n global_block_right: int. Number of blocks globally used to the right.\n\nReturns:\n adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by\n num_rand_blocks", "source": "github_repos"} -{"code": "def model_custom_sink(simplekv, KVs, final_table_name_no_ptransform, final_table_name_with_ptransform):\n final_table_name = final_table_name_no_ptransform\n with beam.Pipeline(options=PipelineOptions()) as pipeline:\n kvs = pipeline | 'CreateKVs' >> beam.Create(KVs)\n kvs | 'WriteToSimpleKV' >> beam.io.Write(SimpleKVSink(simplekv, 'http://url_to_simple_kv/', final_table_name))\n final_table_name = final_table_name_with_ptransform\n with beam.Pipeline(options=PipelineOptions()) as pipeline:\n kvs = pipeline | 'CreateKVs' >> beam.core.Create(KVs)\n kvs | 'WriteToSimpleKV' >> WriteToKVSink(simplekv, 'http://url_to_simple_kv/', final_table_name)", "docstring": "Demonstrates creating a new custom sink and using it in a pipeline.\n\n Uses the new sink in an example pipeline.\n\n Additionally demonstrates how a sink should be implemented using a\n ``PTransform``. This is the recommended way to develop sinks that are to be\n distributed to a large number of end users.\n\n This method runs two pipelines.\n\n (1) A pipeline that uses ``SimpleKVSink`` directly using the ``df.Write``\n transform.\n (2) A pipeline that uses a custom ``PTransform`` that wraps\n ``SimpleKVSink``.\n\nArgs:\n simplekv: an object that mocks the key-value storage.\n\n KVs: the set of key-value pairs to be written in the example pipeline.\n\n final_table_name_no_ptransform: the prefix of final set of tables to be\n created by the example pipeline that uses\n ``SimpleKVSink`` directly.\n\n final_table_name_with_ptransform: the prefix of final set of tables to be\n created by the example pipeline that uses\n a ``PTransform`` that wraps\n ``SimpleKVSink``.", "source": "github_repos"} -{"code": "def _benchmarkFeed(self, name, target, size, iters):\n feed_val = np.random.rand(size).astype(np.float32)\n times = []\n with ops.Graph().as_default():\n p = array_ops.placeholder(dtypes.float32, shape=[size])\n no_op = array_ops.identity(p).op\n with session.Session(target) as sess:\n sess.run(no_op, feed_dict={p: feed_val})\n for _ in range(iters):\n start_time = time.time()\n sess.run(no_op, feed_dict={p: feed_val})\n end_time = time.time()\n times.append(end_time - start_time)\n print('%s %d %f' % (name, size, np.median(times)))\n self.report_benchmark(iters=1, wall_time=np.median(times), name=name)", "docstring": "Runs a microbenchmark to measure the cost of feeding a tensor.\n\n Reports the median cost of feeding a tensor of `size` * `sizeof(float)`\n bytes.\n\nArgs:\n name: A human-readable name for logging the output.\n target: The session target to use for the benchmark.\n size: The number of floating-point numbers to be feed.\n iters: The number of iterations to perform.", "source": "github_repos"} -{"code": "def calc_control_outputs(self, graph):\n control_outputs = {}\n for op in graph.get_operations():\n for control_input in op.control_inputs:\n if control_input not in control_outputs:\n control_outputs[control_input] = set()\n control_outputs[control_input].add(op)\n return control_outputs", "docstring": "Returns the map of control_outputs for a given graph.\n\nArgs:\n graph: The graph to parse.\n\nReturns:\n A map of the control outputs.", "source": "github_repos"} -{"code": "def erf(x):\n if any_symbolic_tensors((x,)):\n return Erf().symbolic_call(x)\n x = backend.convert_to_tensor(x)\n return backend.math.erf(x)", "docstring": "Computes the error function of `x`, element-wise.\n\nArgs:\n x: Input tensor.\n\nReturns:\n A tensor with the same dtype as `x`.\n\nExample:\n >>> x = np.array([-3.0, -2.0, -1.0, 0.0, 1.0])\n >>> keras.ops.erf(x)\n array([-0.99998 , -0.99532, -0.842701, 0., 0.842701], dtype=float32)", "source": "github_repos"} -{"code": "def with_options(self, options, name=None) -> 'DatasetV2':\n return _OptionsDataset(self, options, name=name)", "docstring": "Returns a new `tf.data.Dataset` with the given options set.\n\n The options are \"global\" in the sense they apply to the entire dataset.\n If options are set multiple times, they are merged as long as different\n options do not use different non-default values.\n\n >>> ds = tf.data.Dataset.range(5)\n >>> ds = ds.interleave(lambda x: tf.data.Dataset.range(5),\n ... cycle_length=3,\n ... num_parallel_calls=3)\n >>> options = tf.data.Options()\n >>> # This will make the interleave order non-deterministic.\n >>> options.deterministic = False\n >>> ds = ds.with_options(options)\n\nArgs:\n options: A `tf.data.Options` that identifies the options the use.\n name: (Optional.) A name for the tf.data operation.\n\nReturns:\n A new `Dataset` with the transformation applied as described above.\n\nRaises:\n ValueError: when an option is set more than once to a non-default value", "source": "github_repos"} -{"code": "def create_primary_api_files(output_files, packages, packages_to_ignore, root_init_template, output_dir, output_package, api_name, api_version, compat_api_versions, compat_init_templates, lazy_loading=_LAZY_LOADING, use_relative_imports=False):\n module_name_to_file_path = {}\n for output_file in output_files:\n module_name = get_module(os.path.dirname(output_file), output_dir)\n module_name_to_file_path[module_name] = os.path.normpath(output_file)\n for module, file_path in module_name_to_file_path.items():\n if not os.path.isdir(os.path.dirname(file_path)):\n os.makedirs(os.path.dirname(file_path))\n open(file_path, 'a').close()\n module_text_map, deprecation_footer_map, root_module_footer = get_api_init_text(packages, packages_to_ignore, output_package, api_name, api_version, compat_api_versions, lazy_loading, use_relative_imports)\n missing_output_files = []\n root_module = ''\n compat_module_to_template = {_COMPAT_MODULE_TEMPLATE % v: t for v, t in zip(compat_api_versions, compat_init_templates)}\n for v in compat_api_versions:\n compat_module_to_template.update({_SUBCOMPAT_MODULE_TEMPLATE % (v, vs): t for vs, t in zip(compat_api_versions, compat_init_templates)})\n for module, text in module_text_map.items():\n if module not in module_name_to_file_path:\n module_file_path = '\"%s/__init__.py\"' % module.replace('.', '/')\n missing_output_files.append(module_file_path)\n continue\n contents = ''\n if module == root_module and root_init_template:\n with open(root_init_template, 'r') as root_init_template_file:\n contents = root_init_template_file.read()\n contents = contents.replace('# API IMPORTS PLACEHOLDER', text)\n contents = contents.replace('# __all__ PLACEHOLDER', root_module_footer)\n elif module in compat_module_to_template:\n with open(compat_module_to_template[module], 'r') as init_template_file:\n contents = init_template_file.read()\n contents = contents.replace('# API IMPORTS PLACEHOLDER', text)\n else:\n contents = _GENERATED_FILE_HEADER % get_module_docstring(module, packages[0], api_name) + text + _GENERATED_FILE_FOOTER\n if module in deprecation_footer_map:\n if '# WRAPPER_PLACEHOLDER' in contents:\n contents = contents.replace('# WRAPPER_PLACEHOLDER', deprecation_footer_map[module])\n else:\n contents += deprecation_footer_map[module]\n with open(module_name_to_file_path[module], 'w') as fp:\n fp.write(contents)\n if missing_output_files:\n missing_files = ',\\n'.join(sorted(missing_output_files))\n raise ValueError(f'Missing outputs for genrule:\\n{missing_files}. Be sure to add these targets to tensorflow/python/tools/api/generator/api_init_files_v1.bzl and tensorflow/python/tools/api/generator/api_init_files.bzl (tensorflow repo), or tf_keras/api/api_init_files.bzl (tf_keras repo)')", "docstring": "Creates __init__.py files for the Python API.\n\nArgs:\n output_files: List of __init__.py file paths to create.\n packages: Base python packages containing python with target tf_export\n decorators.\n packages_to_ignore: python packages to be ignored when checking for\n tf_export decorators.\n root_init_template: Template for top-level __init__.py file. \"# API IMPORTS\n PLACEHOLDER\" comment in the template file will be replaced with imports.\n output_dir: output API root directory.\n output_package: Base output package where generated API will be added.\n api_name: API you want to generate Currently, only `tensorflow`.\n api_version: API version to generate (`v1` or `v2`).\n compat_api_versions: Additional API versions to generate in compat/\n subdirectory.\n compat_init_templates: List of templates for top level compat init files in\n the same order as compat_api_versions.\n lazy_loading: Boolean flag. If True, a lazy loading `__init__.py` file is\n produced and if `False`, static imports are used.\n use_relative_imports: True if we should use relative imports when import\n submodules.\n\nRaises:\n ValueError: if output_files list is missing a required file.", "source": "github_repos"} -{"code": "def matmul(self, matmul_input: core.Tensor) -> Mapping[str, core.Tensor]:\n out = math_ops.matmul(matmul_input, self.matmul_filters)\n return {'output': out}", "docstring": "Performs a matrix multiplication.\n\nArgs:\n matmul_input: Input tensor to matmul with the filter.\n\nReturns:\n A map of: output key -> output result.", "source": "github_repos"} -{"code": "def layer_statistics_dump(self, file: IO[str]) -> None:\n fields = ['op_name', 'tensor_idx'] + list(self._layer_debug_metrics.keys())\n if self._debug_options.layer_direct_compare_metrics is not None:\n fields += list(self._debug_options.layer_direct_compare_metrics.keys())\n fields += ['scale', 'zero_point', 'tensor_name']\n writer = csv.DictWriter(file, fields)\n writer.writeheader()\n if self.layer_statistics:\n for name, metrics in self.layer_statistics.items():\n data = metrics.copy()\n data['tensor_name'], _ = self._get_operand_name_and_index(name)\n data['tensor_idx'] = self._numeric_verify_op_details[name]['inputs'][0]\n data['op_name'] = self._quant_interpreter._get_op_details(self._defining_op[data['tensor_idx']])['op_name']\n details = self._quant_interpreter._get_tensor_details(data['tensor_idx'], subgraph_index=0)\n data['scale'], data['zero_point'] = (details['quantization_parameters']['scales'][0], details['quantization_parameters']['zero_points'][0])\n writer.writerow(data)", "docstring": "Dumps layer statistics into file, in csv format.\n\nArgs:\n file: file, or file-like object to write.", "source": "github_repos"} -{"code": "def _tensordot_reshape(a, axes, flipped=False):\n if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):\n shape_a = a.get_shape().as_list()\n axes = [i if i >= 0 else i + len(shape_a) for i in axes]\n free = [i for i in builtins.range(len(shape_a)) if i not in axes]\n free_dims = [shape_a[i] for i in free]\n prod_free = int(np.prod([shape_a[i] for i in free]))\n prod_axes = int(np.prod([shape_a[i] for i in axes]))\n perm = list(axes) + free if flipped else free + list(axes)\n new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]\n if (perm != np.arange(len(shape_a))).any():\n a_trans = array_ops.transpose(a, perm)\n else:\n a_trans = a\n if a_trans.get_shape().as_list() != new_shape:\n reshaped_a = array_ops.reshape(a_trans, new_shape)\n else:\n reshaped_a = a_trans\n return (reshaped_a, free_dims, free_dims)\n else:\n if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):\n shape_a = a.get_shape().as_list()\n axes = [i if i >= 0 else i + len(shape_a) for i in axes]\n free = [i for i in builtins.range(len(shape_a)) if i not in axes]\n axes_dims = [shape_a[i] for i in axes]\n free_dims = [shape_a[i] for i in free]\n free_dims_static = free_dims\n axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name='axes')\n free = ops.convert_to_tensor(free, dtype=dtypes.int32, name='free')\n shape_a = array_ops.shape(a)\n else:\n free_dims_static = None\n shape_a = array_ops.shape(a)\n rank_a = array_ops.rank(a)\n axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name='axes')\n axes = array_ops.where(axes >= 0, axes, axes + rank_a)\n free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32)\n free_dims = array_ops.gather(shape_a, free)\n axes_dims = array_ops.gather(shape_a, axes)\n prod_free_dims = reduce_prod(free_dims)\n prod_axes_dims = reduce_prod(axes_dims)\n if flipped:\n perm = array_ops.concat([axes, free], 0)\n new_shape = array_ops_stack.stack([prod_axes_dims, prod_free_dims])\n else:\n perm = array_ops.concat([free, axes], 0)\n new_shape = array_ops_stack.stack([prod_free_dims, prod_axes_dims])\n reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)\n return (reshaped_a, free_dims, free_dims_static)", "docstring": "Helper method to perform transpose and reshape for contraction op.\n\n This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`\n using `array_ops.transpose` and `array_ops.reshape`. The method takes a\n tensor and performs the correct transpose and reshape operation for a given\n set of indices. It returns the reshaped tensor as well as a list of indices\n necessary to reshape the tensor again after matrix multiplication.\n\nArgs:\n a: `Tensor`.\n axes: List or `int32` `Tensor` of unique indices specifying valid axes of\n `a`.\n flipped: An optional `bool`. Defaults to `False`. If `True`, the method\n assumes that `a` is the second argument in the contraction operation.\n\nReturns:\n A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is\n the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is\n either a list of integers or an `int32` `Tensor`, depending on whether\n the shape of a is fully specified, and free_dims_static is either a list\n of integers and None values, or None, representing the inferred\n static shape of the free dimensions", "source": "github_repos"} -{"code": "def from_vision_text_configs(cls, vision_config: PretrainedConfig, text_config: PretrainedConfig, **kwargs):\n return cls(vision_config=vision_config.to_dict(), text_config=text_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`VisionTextDualEncoderConfig`] (or a derived class) from text model configuration and vision\n model configuration.\n\nReturns:\n [`VisionTextDualEncoderConfig`]: An instance of a configuration object", "source": "github_repos"} -{"code": "def _safe_close(self, sess: session.Session):\n try:\n sess.close()\n except Exception:\n pass", "docstring": "Closes a session without raising an exception.\n\n Just like sess.close() but ignores exceptions.\n\nArgs:\n sess: A `Session`.", "source": "github_repos"} -{"code": "def _wrap_call_and_conditional_losses(layer):\n layer_call = _get_layer_call_method(layer)\n\n def call_and_return_conditional_losses(*args, **kwargs):\n \"\"\"Returns layer (call_output, conditional losses) tuple.\"\"\"\n call_output = layer_call(*args, **kwargs)\n if version_utils.is_v1_layer_or_model(layer):\n conditional_losses = layer.get_losses_for(_filtered_inputs([args, kwargs]))\n else:\n conditional_losses = [l for l in layer.losses if not hasattr(l, '_unconditional_loss')]\n return (call_output, conditional_losses)\n return _create_call_fn_decorator(layer, call_and_return_conditional_losses)", "docstring": "Wraps call function that returns a tuple of (outputs, losses).\n\n The losses returned are conditional on the inputs passed to the call function.\n Unconditional losses (e.g. weight regularizeration) are wrapped separately.\n\nArgs:\n layer: a Keras layer object\n\nReturns:\n python call function that returns outputs and conditional losses -- excludes\n activity regularizer", "source": "github_repos"} -{"code": "def get_impacted_files_from_tiny_model_summary(diff_with_last_commit: bool=False) -> List[str]:\n repo = Repo(PATH_TO_REPO)\n folder = Path(repo.working_dir)\n if not diff_with_last_commit:\n print(f'main is at {repo.refs.main.commit}')\n print(f'Current head is at {repo.head.commit}')\n commits = repo.merge_base(repo.refs.main, repo.head)\n for commit in commits:\n print(f'Branching commit: {commit}')\n else:\n print(f'main is at {repo.head.commit}')\n commits = repo.head.commit.parents\n for commit in commits:\n print(f'Parent commit: {commit}')\n if not os.path.isfile(folder / 'tests/utils/tiny_model_summary.json'):\n return []\n files = set()\n for commit in commits:\n with checkout_commit(repo, commit):\n with open(folder / 'tests/utils/tiny_model_summary.json', 'r', encoding='utf-8') as f:\n old_content = f.read()\n with open(folder / 'tests/utils/tiny_model_summary.json', 'r', encoding='utf-8') as f:\n new_content = f.read()\n old_content = json.loads(old_content)\n new_content = json.loads(new_content)\n old_keys = set(old_content.keys())\n new_keys = set(new_content.keys())\n keys_with_diff = old_keys.symmetric_difference(new_keys)\n common_keys = old_keys.intersection(new_keys)\n for key in common_keys:\n if old_content[key] != new_content[key]:\n keys_with_diff.add(key)\n impacted_model_classes = []\n for key in keys_with_diff:\n if key in new_keys:\n impacted_model_classes.extend(new_content[key]['model_classes'])\n with open(folder / 'src/transformers/__init__.py') as fp:\n lines = fp.readlines()\n new_lines = []\n for line in lines:\n if line == '_import_structure = {\\n':\n new_lines.append(line)\n elif line == '# Direct imports for type-checking\\n':\n break\n elif len(new_lines) > 0:\n line = re.sub('is_.+_available\\\\(\\\\)', 'True', line)\n line = line.replace('OptionalDependencyNotAvailable', 'Exception')\n line = line.replace('Exception()', 'Exception')\n new_lines.append(line)\n with tempfile.TemporaryDirectory() as tmpdirname:\n with open(os.path.join(tmpdirname, 'temp_init.py'), 'w') as fp:\n fp.write(''.join(new_lines))\n spec = importlib.util.spec_from_file_location('temp_init', os.path.join(tmpdirname, 'temp_init.py'))\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n import_structure = module._import_structure\n reversed_structure = {}\n for key, values in import_structure.items():\n for value in values:\n reversed_structure[value] = key\n for model_class in impacted_model_classes:\n module = reversed_structure[model_class]\n framework = ''\n if model_class.startswith('TF'):\n framework = 'tf'\n elif model_class.startswith('Flax'):\n framework = 'flax'\n fn = f'modeling_{module.split('.')[-1]}.py' if framework == '' else f'modeling_{framework}_{module.split('.')[-1]}.py'\n files.add(f'src.transformers.{module}.{fn}'.replace('.', os.path.sep).replace(f'{os.path.sep}py', '.py'))\n return sorted(files)", "docstring": "Return a list of python modeling files that are impacted by the changes of `tiny_model_summary.json` in between:\n\n - the current head and the main branch if `diff_with_last_commit=False` (default)\n - the current head and its parent commit otherwise.\n\nReturns:\n `List[str]`: The list of Python modeling files that are impacted by the changes of `tiny_model_summary.json`.", "source": "github_repos"} -{"code": "def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):\n if not masks.shape[0] == scores.shape[0] == labels.shape[0]:\n raise ValueError('mask, scores and labels must have the same shape!')\n to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)\n return (masks[to_keep], scores[to_keep], labels[to_keep])", "docstring": "Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and\n `labels`.\n\nArgs:\n masks (`torch.Tensor`):\n A tensor of shape `(num_queries, height, width)`.\n scores (`torch.Tensor`):\n A tensor of shape `(num_queries)`.\n labels (`torch.Tensor`):\n A tensor of shape `(num_queries)`.\n object_mask_threshold (`float`):\n A number between 0 and 1 used to binarize the masks.\n\nRaises:\n `ValueError`: Raised when the first dimension doesn't match in all input tensors.\n\nReturns:\n `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region\n < `object_mask_threshold`.", "source": "github_repos"} -{"code": "def __call__(self, image: Union[str, List[str], 'Image.Image', List['Image.Image']], candidate_labels: List[str], **kwargs: Any) -> Union[List[Dict[str, Any]], List[List[Dict[str, Any]]]]:\n if 'images' in kwargs:\n image = kwargs.pop('images')\n if image is None:\n raise ValueError('Cannot call the zero-shot-image-classification pipeline without an images argument!')\n return super().__call__(image, candidate_labels=candidate_labels, **kwargs)", "docstring": "Assign labels to the image(s) passed as inputs.\n\nArgs:\n image (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):\n The pipeline handles three types of images:\n\n - A string containing a http link pointing to an image\n - A string containing a local path to an image\n - An image loaded in PIL directly\n\n candidate_labels (`List[str]`):\n The candidate labels for this image. They will be formatted using *hypothesis_template*.\n\n hypothesis_template (`str`, *optional*, defaults to `\"This is a photo of {}\"`):\n The format used in conjunction with *candidate_labels* to attempt the image classification by\n replacing the placeholder with the candidate_labels. Pass \"{}\" if *candidate_labels* are\n already formatted.\n\n timeout (`float`, *optional*, defaults to None):\n The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and\n the call may block forever.\n\nReturns:\n A list of dictionaries containing one entry per proposed label. Each dictionary contains the\n following keys:\n - **label** (`str`) -- One of the suggested *candidate_labels*.\n - **score** (`float`) -- The score attributed by the model to that label. It is a value between\n 0 and 1, computed as the `softmax` of `logits_per_image`.", "source": "github_repos"} -{"code": "def save(self, file_prefix, options=None):\n if options and options.experimental_enable_async_checkpoint:\n self._checkpoint_options = options\n if checkpoint_context.in_preemption_save_context():\n if self._async_checkpointer_impl is not None:\n self._async_checkpointer_impl.sync()\n logging.warning('Switching to regular sync checkpoint for preemption checkpoint.')\n elif context.executing_eagerly():\n return self._async_checkpointer().save(file_prefix, options)\n else:\n logging.warning('Saving async checkpoint in graph mode is currently not supported; switching to regular sync checkpoint instead.')\n if isinstance(file_prefix, os.PathLike):\n file_prefix = os.fspath(file_prefix)\n options = copy.copy(options) or checkpoint_options.CheckpointOptions()\n graph_building = not context.executing_eagerly()\n if graph_building:\n if ops.inside_function():\n raise NotImplementedError('Calling tf.train.Checkpoint.save() from a function is not supported, as save() modifies saving metadata in ways not supported by TensorFlow Operations. Consider using tf.train.Checkpoint.write(), a lower-level API which does not update metadata. tf.train.latest_checkpoint and related APIs will not see this checkpoint.')\n session = get_session()\n if self._save_counter is None:\n session.run(self.save_counter.initializer)\n if not graph_building or self._save_assign_op is None:\n with ops.colocate_with(self.save_counter):\n assign_op = self.save_counter.assign_add(1, read_value=True)\n if graph_building:\n self._save_assign_op = data_structures.NoDependency(assign_op)\n if graph_building:\n checkpoint_number = session.run(self._save_assign_op)\n else:\n checkpoint_number = assign_op.numpy()\n if options.experimental_write_callbacks is None:\n options.experimental_write_callbacks = [_update_checkpoint_state_internal]\n else:\n options.experimental_write_callbacks.append(_update_checkpoint_state_internal)\n return self._write('%s-%d' % (file_prefix, checkpoint_number), options=options)", "docstring": "Saves a training checkpoint and provides basic checkpoint management.\n\n The saved checkpoint includes variables created by this object and any\n trackable objects it depends on at the time `Checkpoint.save()` is\n called.\n\n `save` is a basic convenience wrapper around the `write` method,\n sequentially numbering checkpoints using `save_counter` and updating the\n metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint\n management, for example garbage collection and custom numbering, may be\n provided by other utilities which also wrap `write` and `read`.\n (`tf.train.CheckpointManager` for example).\n\n ```\n step = tf.Variable(0, name=\"step\")\n checkpoint = tf.train.Checkpoint(step=step)\n checkpoint.save(\"/tmp/ckpt\")\n\n # Later, read the checkpoint with restore()\n checkpoint.restore(\"/tmp/ckpt-1\")\n\n # You can also pass options to save() and restore(). For example this\n # runs the IO ops on the localhost:\n options = tf.train.CheckpointOptions(experimental_io_device=\"/job:localhost\")\n checkpoint.save(\"/tmp/ckpt\", options=options)\n\n # Later, read the checkpoint with restore()\n checkpoint.restore(\"/tmp/ckpt-1\", options=options)\n ```\n\nArgs:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix). Names are generated based on this\n prefix and `Checkpoint.save_counter`.\n options: Optional `tf.train.CheckpointOptions` object.\n\nReturns:\n The full path to the checkpoint.", "source": "github_repos"} -{"code": "def _batched_mask_to_box(masks: 'torch.Tensor'):\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n shape = masks.shape\n height, width = shape[-2:]\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + height * ~in_height\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + width * ~in_width\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n out = out.reshape(*shape[:-2], 4)\n return out", "docstring": "Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which\n corresponds the following required indices:\n - LEFT: left hand side of the bounding box\n - TOP: top of the bounding box\n - RIGHT: right of the bounding box\n - BOTTOM: bottom of the bounding box\n\n Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape\n is channel_1 x channel_2 x ... x 4.\n\nArgs:\n - masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`)", "source": "github_repos"} -{"code": "def get_num_bytes(self, batch: Sequence[scipy.sparse.csr_matrix]) -> int:\n return sum((sys.getsizeof(element) for element in batch))", "docstring": "Returns:\n The number of bytes of data for a batch.", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n residual = hidden_states\n hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs)\n hidden_states = self.input_layernorm(hidden_states)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.mlp(hidden_states)\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n if use_cache:\n outputs += (present_key_value,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`):\n input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*):\n attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,\n query_sequence_length, key_sequence_length)` if default attention is used.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence.", "source": "github_repos"} -{"code": "def _sym_missing(self) -> Dict[str, Any]:\n missing = dict()\n for k, v in self._sym_attributes.items():\n if pg_typing.MISSING_VALUE != v and isinstance(v, base.Symbolic):\n missing_child = v.sym_missing(flatten=False)\n if missing_child:\n missing[k] = missing_child\n return missing", "docstring": "Returns missing values for Functor.\n\n Semantically unbound arguments are not missing, thus we only return partial\n bound arguments in `sym_missing`. As a result, a functor is partial only\n when any of its bound arguments is partial.\n\nReturns:\n A dict of missing key (or path) to missing value.", "source": "github_repos"} -{"code": "def checksum(path):\n filesystem = FileSystems.get_filesystem(path)\n return filesystem.checksum(path)", "docstring": "Fetch checksum metadata of a file on the\n :class:`~apache_beam.io.filesystem.FileSystem`.\n\n This operation returns checksum metadata as stored in the underlying\n FileSystem. It should not read any file data. Checksum type and format are\n FileSystem dependent and are not compatible between FileSystems.\n\nArgs:\n path: string path of a file.\n\n Returns: string containing checksum\n\nRaises:\n ``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github_repos"} -{"code": "def set_weight_collections(self, weight_collections):\n self._weight_collections = weight_collections", "docstring": "Sets the weight collections for the layer.\n\nArgs:\n weight_collections: A list of collection names to which the Variable will\n be added.", "source": "github_repos"} -{"code": "def extrapolation_scheme(value_grid, t1, t2, equation_params_fn):\n first_half_step = implicit_scheme(value_grid, t1, (t1 + t2) / 2, equation_params_fn)\n two_half_steps = implicit_scheme(first_half_step, (t1 + t2) / 2, t2, equation_params_fn)\n full_step = implicit_scheme(value_grid, t1, t2, equation_params_fn)\n return 2 * two_half_steps - full_step", "docstring": "Constructs extrapolation implicit-explicit scheme.\n\n Performs two implicit half-steps, one full implicit step, and combines them\n with such coefficients that ensure second-order errors. More computationally\n expensive than Crank-Nicolson scheme, but provides a better approximation for\n high-wavenumber components, which results in absence of oscillations typical\n for Crank-Nicolson scheme in case of non-smooth initial conditions. See [1]\n for details.\n\n #### References:\n [1]: D. Lawson, J & Ll Morris, J. The Extrapolation of First Order Methods\n for Parabolic Partial Differential Equations. I. 1978\n SIAM Journal on Numerical Analysis. 15. 1212-1224.\n https://epubs.siam.org/doi/abs/10.1137/0715082\n\nArgs:\n value_grid: A `Tensor` of real dtype. Grid of solution values at the current\n time.\n t1: Time before the step.\n t2: Time after the step.\n equation_params_fn: A callable that takes a scalar `Tensor` argument\n representing time and constructs the tridiagonal matrix `A`\n (a tuple of three `Tensor`s, main, upper, and lower diagonals)\n and the inhomogeneous term `b`. All of the `Tensor`s are of the same\n `dtype` as `inner_value_grid` and of the shape broadcastable with the\n shape of `inner_value_grid`.\n\nReturns:\n A `Tensor` of the same shape and `dtype` a\n `values_grid` and represents an approximate solution `u(t2)`.", "source": "github_repos"} -{"code": "def __init__(self, use_variable):\n super(GatherModel, self).__init__()\n w_val = np.random.randn(128, 32).astype('f4')\n if use_variable:\n self.w = variables.Variable(w_val)\n else:\n self.w = w_val", "docstring": "Initializes a GatherModel.\n\nArgs:\n use_variable: If True, creates a variable for weight.", "source": "github_repos"} -{"code": "def _write_object_proto(self, proto, options):\n write_object_proto_for_resource_variable(self, proto, options)", "docstring": "Writes additional information of the variable into the SavedObject proto.\n\n Subclasses of ResourceVariables could choose to override this method to\n customize extra information to provide when saving a SavedModel.\n\n Ideally, this should contain the logic in\n write_object_proto_for_resource_variable but `DistributedValue` is an\n outlier at the momemnt. Once `DistributedValue` becomes a proper\n ResourceVariable, we should remove the helper method below.\n\nArgs:\n proto: `SavedObject` proto to update.\n options: A `SaveOption` instance that configures save behavior.", "source": "github_repos"} -{"code": "def stop_standing_subprocess(proc):\n logging.debug('Stopping standing subprocess %d', proc.pid)\n _kill_process_tree(proc)\n if proc.stdout:\n proc.stdout.close()\n if proc.stderr:\n proc.stderr.close()\n proc.wait()\n logging.debug('Stopped standing subprocess %d', proc.pid)", "docstring": "Stops a subprocess started by start_standing_subprocess.\n\n Before killing the process, we check if the process is running, if it has\n terminated, Error is raised.\n\n Catches and ignores the PermissionError which only happens on Macs.\n\nArgs:\n proc: Subprocess to terminate.\n\nRaises:\n Error: if the subprocess could not be stopped.", "source": "github_repos"} -{"code": "def elementwise_division(func):\n sparse_func = jax_sparse.sparsify(func)\n\n @functools.wraps(func)\n def sparse_wrapper(x1, x2):\n if isinstance(x1, jax_sparse.JAXSparse):\n if isinstance(x2, jax_sparse.JAXSparse):\n x1 = x1.todense()\n x2 = x2.todense()\n elif isinstance(x1, jax_sparse.BCOO):\n if not hasattr(x2, 'shape') or len(x2.shape) == 0:\n return jax_sparse.BCOO((func(x1.data, x2), x1.indices), shape=x1.shape, indices_sorted=x1.indices_sorted, unique_indices=x1.unique_indices)\n else:\n if not jax_utils.is_in_jax_tracing_scope(x2):\n x2_zeros_and_nans = jnp.equal(x2, 0)\n if not jnp.issubdtype(x2.dtype, jnp.integer):\n x2_zeros_and_nans = jnp.logical_or(x2_zeros_and_nans, jnp.isnan(x2))\n x2_zeros_and_nans = jax_sparse.bcoo_fromdense(x2_zeros_and_nans, n_batch=x1.n_batch, n_dense=x1.n_dense, index_dtype=x1.indices.dtype)\n x1 = bcoo_add_indices(x1, x2_zeros_and_nans, sum_duplicates=True)\n return sparse_func(x1, x2)\n else:\n raise ValueError(f'Unsupported sparse format: {x1.__class__}')\n elif isinstance(x2, jax_sparse.JAXSparse):\n x2 = x2.todense()\n return func(x1, x2)\n return sparse_wrapper", "docstring": "Decorator to add support for `BCOO` sparse tensors to element-wise binary\n division and related operators.\n\n This decorator is designed for operations related to the division of two\n two operands (e.g. `divide`). It accepts `BCOO` tensors for both the\n dividend and the divisor, but handles them differently based on whether they\n are the dividend or the divisor.\n\n - If the divisor is sparse, it is densified and the result is dense because\n the result contains Inf or Nan outside of the indices of the dividend.\n - If the dividend is sparse and the divisor is dense, it finds occurrences\n of zeros and NaNs in the divisor. The result may therefore have more\n indices than there were in the dividend to return correct values where the\n divisor was zero or NaN.\n - If the dividend is sparse and the divisor is a scalar, it does the\n division element-wise. Note that the result is incorrectly sparse if the\n scalar divisor is zero.\n\nArgs:\n func: The function to wrap.\n\nReturns:\n Wrapped function that supports `BCOO` sparse tensors.", "source": "github_repos"} -{"code": "def remove_objects_from_args(args: Iterable[Any], kwargs: Dict[str, Any], pvalue_class: Union[Type[T], Tuple[Type[T], ...]]) -> Tuple[List[Any], Dict[str, Any], List[T]]:\n pvals = []\n\n def swapper(value):\n pvals.append(value)\n return ArgumentPlaceholder()\n new_args = [swapper(v) if isinstance(v, pvalue_class) else v for v in args]\n new_kwargs = dict(((k, swapper(v)) if isinstance(v, pvalue_class) else (k, v) for k, v in sorted(kwargs.items())))\n return (new_args, new_kwargs, pvals)", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\n Replaces all objects of a given type in args/kwargs with a placeholder.\n\nArgs:\n args: A list of positional arguments.\n kwargs: A dictionary of keyword arguments.\n pvalue_class: A class object representing the types of arguments that must\n be replaced with a placeholder value (instance of ArgumentPlaceholder).\n\nReturns:\n A 3-tuple containing a modified list of positional arguments, a modified\n dictionary of keyword arguments, and a list of all objects replaced with\n a placeholder value.", "source": "github_repos"} -{"code": "def get_registered_object(name, custom_objects=None, module_objects=None):\n if name in _GLOBAL_CUSTOM_OBJECTS:\n return _GLOBAL_CUSTOM_OBJECTS[name]\n elif custom_objects and name in custom_objects:\n return custom_objects[name]\n elif module_objects and name in module_objects:\n return module_objects[name]\n return None", "docstring": "Returns the class associated with `name` if it is registered with Keras.\n\n This function is part of the Keras serialization and deserialization\n framework. It maps strings to the objects associated with them for\n serialization/deserialization.\n\nExample:\n ```\n def from_config(cls, config, custom_objects=None):\n if 'my_custom_object_name' in config:\n config['hidden_cls'] = tf.keras.utils.get_registered_object(\n config['my_custom_object_name'], custom_objects=custom_objects)\n ```\n\nArgs:\n name: The name to look up.\n custom_objects: A dictionary of custom objects to look the name up in.\n Generally, custom_objects is provided by the user.\n module_objects: A dictionary of custom objects to look the name up in.\n Generally, module_objects is provided by midlevel library implementers.\n\nReturns:\n An instantiable class associated with 'name', or None if no such class\n exists.", "source": "github_repos"} -{"code": "def InsertNodesBefore(new_nodes, target):\n for node in new_nodes:\n _InsertNodeAt(node, target, after=False)", "docstring": "Insert new_nodes before the given target location in the tree.\n\nArgs:\n new_nodes: a sequence of new nodes to insert (the nodes should not be in the\n tree).\n target: the target node before which the new node node will be inserted.\n\nRaises:\n RuntimeError: if the tree is corrupted, or the insertion would corrupt it.", "source": "github_repos"} -{"code": "def non_slot_devices(self, var_list):\n raise NotImplementedError('must be implemented in descendants')", "docstring": "Device(s) for non-slot variables.\n\n DEPRECATED: TF 1.x ONLY.\n\n This method returns non-slot devices where non-slot variables are placed.\n Users can create non-slot variables on these devices by using a block:\n\n ```python\n with tf.distribute.StrategyExtended.colocate_vars_with(tf.distribute.StrategyExtended.non_slot_devices(...)):\n ...\n ```\n\nArgs:\n var_list: The list of variables being optimized, needed with the\n default `tf.distribute.Strategy`.\n\nReturns:\n A sequence of devices for non-slot variables.", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> torch.Tensor:\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, output_attentions=output_attentions)\n hidden_states = self.attn_dropout(hidden_states)\n hidden_states = residual + hidden_states\n cross_attn_present_key_value = None\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states = self.cross_attention_layer_norm(hidden_states)\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, past_key_value=cross_attn_past_key_value, attention_mask=encoder_attention_mask, output_attentions=output_attentions)\n hidden_states = self.attn_dropout(hidden_states)\n hidden_states = residual + hidden_states\n present_key_value += cross_attn_present_key_value\n residual = hidden_states\n hidden_states = self.ffn_layer_norm(hidden_states)\n hidden_states = self.ffn(hidden_states)\n hidden_states = self.ffn_dropout(hidden_states)\n hidden_states = residual + hidden_states\n outputs = (hidden_states, present_key_value)\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`):\n input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`):\n attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very\n large negative values.\n encoder_hidden_states (`torch.FloatTensor`):\n cross attention input to the layer of shape `(batch, seq_len, embed_dim)`\n encoder_attention_mask (`torch.FloatTensor`):\n encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by\n very large negative values.\n past_key_value (`Tuple(torch.FloatTensor)`):\n cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.", "source": "github_repos"} -{"code": "def _run_graph(self, device, input_shape, variable, num_inputs, axis, grad, num_iters):\n graph = ops.Graph()\n with graph.as_default():\n outputs = build_graph(device, input_shape, variable, num_inputs, axis, grad)\n config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(optimizer_options=config_pb2.OptimizerOptions(opt_level=config_pb2.OptimizerOptions.L0)))\n with session_lib.Session(graph=graph, config=config) as session:\n variables.global_variables_initializer().run()\n _ = session.run(outputs)\n start_time = time.time()\n for _ in range(num_iters):\n _ = session.run(outputs)\n duration = time.time() - start_time\n print('%s shape:%d/%d var: %r #inputs:%d axis:%d grad:%r - %f secs - %f GB/sec' % (device, input_shape[0], input_shape[1], variable, num_inputs, axis, grad, duration / num_iters, num_inputs * input_shape[0] * input_shape[1] * 4 * 2 * 100 / (duration / num_iters) / 1000000000.0))\n name_template = 'concat_bench_{device}_input_shape_{shape}_variable_{variable}_num_inputs_{num_inputs}_axis_{axis}_grad_{grad}'\n self.report_benchmark(name=name_template.format(device=device, num_inputs=num_inputs, variable=variable, grad=grad, shape=str(input_shape).replace(' ', ''), axis=str(axis), iters=num_iters))\n return duration", "docstring": "Run the graph and print its execution time.\n\nArgs:\n device: string, the device to run on.\n input_shape: shape of the input tensors.\n variable: whether or not the input shape should be fixed\n num_inputs: the number of inputs to concat\n axis: axis to be concat'ed\n grad: if True compute the gradient\n num_iters: number of steps to run.\n\nReturns:\n The duration of the run in seconds.", "source": "github_repos"} -{"code": "def write(self, value, *labels):\n raise NotImplementedError", "docstring": "Writes the value to the given cache.\n\nArgs:\n value: An encodable (with corresponding PCoder) value\n *labels: List of labels for PCollection instance", "source": "github_repos"} -{"code": "def get_min_max_value(self) -> tuple[float, float]:\n average_min_max_statistics = self._statistics.average_min_max_statistics\n num_samples = average_min_max_statistics.num_samples\n if num_samples == 0:\n raise ValueError(f'num_samples must not be 0 when calibration method is AverageMinMax: {self._calib_opts}')\n min_value, max_value = (average_min_max_statistics.min_sum / num_samples, average_min_max_statistics.max_sum / num_samples)\n return (min_value, max_value)", "docstring": "Calculates the average of min and max values.\n\nReturns:\n (min_value, max_value): Min and max calculated using AverageMinMax\n\nRaises:\n ValueError: num_samples is 0.", "source": "github_repos"} -{"code": "def all_matches(pcoll, regex):\n regex = Regex._regex_compile(regex)\n\n def _process(element):\n m = regex.match(element)\n if m:\n yield [m.group(ix) for ix in range(m.lastindex + 1)]\n return pcoll | FlatMap(_process)", "docstring": "Returns all matches (groups) if zero or more characters at the beginning\n of string match the regular expression.\n\nArgs:\n regex: the regular expression string or (re.compile) pattern.", "source": "github_repos"} -{"code": "def recipe_manual(config, auth_read):\n hello(config, {'auth': auth_read, 'hour': [], 'say': 'Hello Manual', 'sleep': 0})", "docstring": "Used by tests.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.", "source": "github_repos"} -{"code": "def matches_alias(self, alias: str) -> bool:\n del self\n del alias\n return False", "docstring": "Indicates whether the expression will be selected as the given alias.\n\n Intended to be over-ridden by sub-classes which can safely implement it.\n Given an expression and an alias, indicates whether the expression will be\n SELECT'd as the given alias. For example, an expression like `SELECT a.b`\n matches the alias 'b', making it equivalent to the expression\n `SELECT a.b AS b`.\n\nArgs:\n alias: The alias to compare the expression against.\n\nReturns:\n True when the expression evaluates to the same name as the alias and False\n otherwise.", "source": "github_repos"} -{"code": "def discount_bond_price(self, state: types.RealTensor, times: types.RealTensor, maturities: types.RealTensor, name: str=None) -> types.RealTensor:\n name = name or self._name + '_discount_bond_prices'\n with tf.name_scope(name):\n x_t = tf.convert_to_tensor(state, self._dtype)\n times = tf.convert_to_tensor(times, self._dtype)\n maturities = tf.convert_to_tensor(maturities, self._dtype)\n input_shape_times = tf.shape(times)\n mean_reversion = self._mean_reversion\n y_t = self.state_y(times)\n y_t = tf.reshape(tf.transpose(y_t), tf.concat([input_shape_times, [self._dim, self._dim]], axis=0))\n values = self._bond_reconstitution(times, maturities, mean_reversion, x_t, y_t, 1, tf.shape(times)[0])\n return values[0][0]", "docstring": "Returns zero-coupon bond prices `P(t,T)` conditional on `x(t)`.\n\nArgs:\n state: A `Tensor` of real dtype and shape compatible with\n `(num_times, dim)` specifying the state `x(t)`.\n times: A `Tensor` of real dtype and shape `(num_times,)`. The time `t`\n at which discount bond prices are computed.\n maturities: A `Tensor` of real dtype and shape `(num_times,)`. The time\n to maturity of the discount bonds.\n name: Str. The name to give this op.\n Default value: `discount_bond_prices`.\n\nReturns:\n A `Tensor` of real dtype and the same shape as `(num_times,)`\n containing the price of zero-coupon bonds.", "source": "github_repos"} -{"code": "def _pad_all_input(inputs: Iterable[core_types.Tensor], padded_shapes: List[Optional[tensor_shape.TensorShape]], padding_spec: PaddingSpec) -> Tuple[List[List[Any]], List[dynamic_padding.PaddingMap]]:\n maximum_static_shapes = []\n need_padding = []\n input_shape_tensors = []\n for core_idx, inputs_per_core in enumerate(inputs):\n for idx, input_tensor in enumerate(inputs_per_core):\n input_shape = input_tensor.get_shape().as_list()\n if core_idx == 0:\n input_shape_tensors.append([])\n maximum_static_shapes.append(input_shape)\n need_padding.append(np.full_like(input_shape, False, dtype=bool))\n else:\n for i, s in enumerate(input_shape):\n if s is None or s != maximum_static_shapes[idx][i]:\n need_padding[idx][i] = True\n maximum_static_shapes[idx] = max(input_shape, maximum_static_shapes[idx])\n real_input_shape = array_ops.shape(input_tensor)\n real_input_shape.op._set_attr(_POST_DEVICE_REWRITE_ATTR, attr_value_pb2.AttrValue(b=True))\n input_shape_tensors[idx].append(real_input_shape)\n maximum_shapes = []\n for shapes_per_input in input_shape_tensors:\n maximum_shapes.append(math_ops.reduce_max(array_ops_stack.stack(shapes_per_input), axis=0))\n padded_inputs = []\n real_shapes = []\n padding_maps = []\n for core_idx, inputs_per_core in enumerate(inputs):\n padded_inputs.append([])\n real_shapes.append([])\n real_shape_idx = len(inputs_per_core) - 1\n for idx, input_tensor in enumerate(inputs_per_core):\n input_shape_tensor = input_shape_tensors[idx][core_idx]\n input_shape = input_tensor.get_shape().as_list()\n padded_shape = padded_shapes[idx]\n if any(need_padding[idx]) and padded_shape is not None:\n for i, s in enumerate(input_shape):\n if need_padding[idx][i]:\n if core_idx == 0:\n real_shape_idx += 1\n padding_map = dynamic_padding.PaddingMap()\n padding_map.arg_index = idx\n padding_map.shape_index = i\n padding_map.padding_arg_index = real_shape_idx\n padding_maps.append(padding_map)\n real_shapes[core_idx].append(math_ops.cast(input_shape_tensor[i], dtypes.int32))\n paddings = []\n for i, s in enumerate(padded_shape.dims):\n if need_padding[idx][i]:\n minimum_dynamic_dim_size = 2\n if s.value is not None:\n max_dim_size = max(s.value, minimum_dynamic_dim_size)\n else:\n max_dim_size = math_ops.maximum(maximum_shapes[idx][i], minimum_dynamic_dim_size)\n if padding_spec == PaddingSpec.POWER_OF_TWO:\n max_dim_size = _ceil_to_pow_of_n(max_dim_size, 2)\n padding = [0, max_dim_size - input_shape_tensor[i]]\n else:\n padding = [0, 0]\n paddings.append(padding)\n if input_tensor.get_shape().is_fully_defined():\n padded_input = cond.cond(array_ops.constant(True), lambda: array_ops.pad(input_tensor, paddings), lambda: input_tensor)\n else:\n padded_input = array_ops.pad(input_tensor, paddings)\n padded_input.op._set_attr(_POST_DEVICE_REWRITE_ATTR, attr_value_pb2.AttrValue(b=True))\n padded_inputs[core_idx].append(padded_input)\n else:\n padded_inputs[core_idx].append(input_tensor)\n num_replicas = len(padded_inputs)\n for i in range(num_replicas):\n padded_inputs[i].extend(real_shapes[i])\n return (padded_inputs, padding_maps)", "docstring": "Pad all input tensors given padded_shapes.\n\n The real shape tensors will be concatenated with the padded original inputs.\n\nArgs:\n inputs: The original inputs.\n padded_shapes: A list of padded shapes for each input. If an entry is None,\n no padding is performed.\n padding_spec: An enum specified by `tpu.PaddingSpec`. This describes the\n padding policy when the `inputs` to `tf.tpu.replicate` is dynamic.\n One usage is to enable automatic bucketizing on the inputs by setting the\n value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the\n recompilation in the XLA side.\n\nReturns:\n The padded inputs and a PaddingMap list which maps the padded input\n dimension to the real shape argument index.", "source": "github_repos"} -{"code": "def test_antithetic_sample_paths_mean_2d(self, random_type, seed):\n mu = np.array([0.2, 0.7])\n a = np.array([[0.4, 0.1], [0.3, 0.2]])\n b = np.array([[0.33, -0.03], [0.21, 0.5]])\n\n def drift_fn(t, x):\n del x\n return mu * tf.sqrt(t)\n\n def vol_fn(t, x):\n del x\n return (a * t + b) * tf.ones([2, 2], dtype=t.dtype)\n times = np.array([0.1, 0.21, 0.32, 0.43, 0.55])\n num_samples = 5000\n x0 = np.array([0.1, -1.1])\n paths = self.evaluate(euler_sampling.sample(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, num_samples=num_samples, initial_state=x0, random_type=random_type, time_step=0.01, seed=seed))\n self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0)\n means = np.mean(paths, axis=0)\n times = np.reshape(times, [-1, 1])\n expected_means = x0 + 2.0 / 3.0 * mu * np.power(times, 1.5)\n self.assertAllClose(means, expected_means, rtol=0.005, atol=0.005)", "docstring": "Tests path properties for 2-dimentional anthithetic variates method.\n\n The same test as above but with `PSEUDO_ANTITHETIC` random type.\n We construct the following Ito processes.\n\n dX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2\n dX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2\n\n mu_1, mu_2 are constants.\n s_ij = a_ij t + b_ij\n\n For this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5.\n\nArgs:\n random_type: Random number type defined by tff.math.random.RandomType\n enum.\n seed: Random seed.", "source": "github_repos"} -{"code": "def _colocation_dict(self) -> dict[str, traceable_stack.TraceableObject]:\n locations_dict = self._colocation_code_locations or {}\n return locations_dict.copy()", "docstring": "Code locations for colocation context managers active at op creation.\n\n This property will return a dictionary for which the keys are nodes with\n which this Operation is colocated, and for which the values are\n traceable_stack.TraceableObject instances. The TraceableObject instances\n record the location of the relevant colocation context manager but have the\n \"obj\" field set to None to prevent leaking private data.\n\n For example, suppose file_a contained these lines:\n\n file_a.py:\n 14: node_a = tf.constant(3, name='NODE_A')\n 15: with tf.compat.v1.colocate_with(node_a):\n 16: node_b = tf.constant(4, name='NODE_B')\n\n Then a TraceableObject t_obj representing the colocation context manager\n would have these member values:\n\n t_obj.obj -> None\n t_obj.filename = 'file_a.py'\n t_obj.lineno = 15\n\n and node_b.op._colocation_dict would return the dictionary\n\n { 'NODE_A': t_obj }\n\nReturns:\n {str: traceable_stack.TraceableObject} as per this method's description,\n above.", "source": "github_repos"} -{"code": "def __init__(self, gain=1.0, dtype=dtypes.float32):\n self.gain = gain\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))", "docstring": "Initializer that generates the identity matrix.\n\n Only use for 2D matrices.\n\nArgs:\n gain: Multiplicative factor to apply to the identity matrix.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.", "source": "github_repos"} -{"code": "def np_array(values, dtype=None, copy=True, order='K'):\n if dtype is not None and np.issubdtype(dtype, np.number):\n return np.array(values, copy=copy, order=order).astype(dtype)\n else:\n return np.array(values, dtype=dtype, copy=copy, order=order)", "docstring": "Creates a NumPy array containing input values.\n\n It will make a copy of the object.\n\n In NumPy 2.x and later, strict type casting can lead to errors when values\n overflow the specified dtype. This function addresses this by replacing direct\n np.array(..., dtype=...) calls with np.array(...).astype(...). This allows for\n intended overflows, aligning with the behavior of older NumPy versions.\n\nArgs:\n values: Array_like objects. E.g., a python list, tuple, or an object whose\n __array__ method returns an array.\n dtype: The desired numpy data type for the array.\n copy: Bool. If True (default), then the array data is copied. If None, a\n copy will only be made if __array__ returns a copy, if obj is a nested\n sequence, or if a copy is needed to satisfy any of the other requirements\n (dtype, order, etc.). Note that any copy of the data is shallow, i.e., for\n arrays with object dtype, the new array will point to the same objects.\n For False it raises a ValueError if a copy cannot be avoided.\n order: {‘K’, ‘A’, ‘C’, ‘F’}.\n\nReturns:\n A NumPy array with the specified data type.", "source": "github_repos"} -{"code": "def __init__(self, serialized_tensors: Mapping[base.Trackable, sharding_util.Shard], registered_savers: 'RegisteredSaversDict | None'=None, call_with_mapped_captures: 'MappedCapturesCallable | None'=None):\n self._shardable_tensors_by_task: MutableMapping[device_lib.DeviceSpec, MutableSequence[sharding_util.ShardableTensor]] = {}\n self._keys_to_restore_fn: MutableMapping[TensorKeyAndSliceSpec, RestoreFn] = {}\n self._restore_fn_to_keys: MutableMapping[RestoreFn, MutableSequence[TensorKeyAndSliceSpec]] = {}\n unique_tasks = set()\n for obj, tensor_dict in serialized_tensors.items():\n restore_fn = _restore_noop if obj is None else obj._restore_from_tensors\n for checkpoint_key, tensor_slice_dict in tensor_dict.items():\n if not isinstance(tensor_slice_dict, dict):\n tensor_slice_dict = {'': tensor_slice_dict}\n for slice_spec, tensor_save_spec in tensor_slice_dict.items():\n tensor_value = None\n if not isinstance(tensor_save_spec, saveable_object.SaveSpec):\n tensor_value = tensor_save_spec\n tensor_save_spec = saveable_object.SaveSpec(tensor=tensor_value, slice_spec=slice_spec, name=checkpoint_key, dtype=tensor_save_spec.dtype, device=tensor_save_spec.device)\n if (checkpoint_key, slice_spec) in self._keys_to_restore_fn:\n raise ValueError('Received multiple tensors with the same checkpoint key and slice spec. This is invalid because one will overwrite the other in the checkpoint. This indicates a bug in the Checkpoint key-generation.')\n self._keys_to_restore_fn[checkpoint_key, slice_spec] = restore_fn\n self._restore_fn_to_keys.setdefault(restore_fn, []).append((checkpoint_key, slice_spec))\n if isinstance(tensor_save_spec.device, str):\n device = device_lib.DeviceSpec.from_string(tensor_save_spec.device)\n task = device_lib.DeviceSpec.from_string(saveable_object_util.set_cpu0(tensor_save_spec.device))\n else:\n device = tensor_save_spec.device\n task = device_lib.DeviceSpec.from_string(saveable_object_util.set_cpu0(device.to_string()))\n self._shardable_tensors_by_task.setdefault(task, []).append(sharding_util.ShardableTensor(_tensor_save_spec=tensor_save_spec, tensor=tensor_value, dtype=tensor_save_spec.dtype, device=device, name=tensor_save_spec.name, shape=None, slice_spec=slice_spec.strip(), checkpoint_key=checkpoint_key, trackable=obj))\n unique_tasks.add(saveable_object_util.set_cpu0(device.to_string()))\n self._num_unique_tasks = len(unique_tasks)\n self._registered_savers = {}\n if registered_savers:\n for registered_name, trackables in registered_savers.items():\n save_fn = _get_mapped_registered_save_fn(registration.get_save_function(registered_name), trackables, call_with_mapped_captures)\n restore_fn = _get_mapped_registered_restore_fn(registration.get_restore_function(registered_name), trackables, call_with_mapped_captures)\n self._registered_savers[registered_name] = (save_fn, restore_fn)", "docstring": "Specify a list of `SaveableObject`s to save and restore.\n\nArgs:\n serialized_tensors: A dictionary mapping `Trackable` to a tensor dict,\n which maps checkpoint_key -> (slice_spec ->) -> Tensor/SaveSpec. The\n `Trackable` key is used to get the `restore_from_tensors` function,\n and may be `None` if the tensor is not meant to be restored.\n registered_savers: A dictionary mapping `registration.RegisteredSaver`\n namedtuples to a dictionary of named Trackables. The keys of the\n Trackable dictionary are string names that uniquely identify the\n Trackable in the checkpoint.\n call_with_mapped_captures: TODO", "source": "github_repos"} -{"code": "def from_proto(saver_def, import_scope=None):\n return Saver(saver_def=saver_def, name=import_scope)", "docstring": "Returns a `Saver` object created from `saver_def`.\n\nArgs:\n saver_def: a `SaverDef` protocol buffer.\n import_scope: Optional `string`. Name scope to use.\n\nReturns:\n A `Saver` built from saver_def.", "source": "github_repos"} -{"code": "def data_gen_sig1() -> repr_dataset.RepresentativeDataset:\n for _ in range(4):\n yield {'matmul_input': random_ops.random_uniform(shape=(1, 4))}", "docstring": "Generates tuple-style samples for signature 'sig1'.\n\n The first element of the tuple identifies the signature key the input data\n is for.\n\nYields:\n Representative sample for 'sig1'.", "source": "github_repos"} -{"code": "def __init__(self, config: Mask2FormerConfig, weight_dict: Dict[str, float]):\n super().__init__()\n requires_backends(self, ['scipy'])\n self.num_labels = config.num_labels\n self.weight_dict = weight_dict\n self.eos_coef = config.no_object_weight\n empty_weight = torch.ones(self.num_labels + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer('empty_weight', empty_weight)\n self.num_points = config.train_num_points\n self.oversample_ratio = config.oversample_ratio\n self.importance_sample_ratio = config.importance_sample_ratio\n self.matcher = Mask2FormerHungarianMatcher(cost_class=1.0, cost_dice=config.dice_weight, cost_mask=config.mask_weight, num_points=self.num_points)", "docstring": "The Mask2Former Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we\n compute hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair\n of matched ground-truth / prediction (supervise class and mask)\n\nArgs:\n config (`Mask2FormerConfig`):\n The configuration for Mask2Former model also containing loss calculation specific parameters.\n weight_dict (`Dict[str, float]`):\n A dictionary of weights to be applied to the different losses.", "source": "github_repos"} -{"code": "def Approve(self, request, global_params=None):\n config = self.GetMethodConfig('Approve')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Approves or rejects a pending build. If approved, the returned LRO will be analogous to the LRO returned from a CreateBuild call. If rejected, the returned LRO will be immediately done.\n\nArgs:\n request: (CloudbuildProjectsBuildsApproveRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\n\nReturns:\n (Operation) The response message.", "source": "github_repos"} -{"code": "def parse_ranges(range_string):\n range_string = range_string.strip()\n if not range_string:\n return []\n if 'inf' in range_string:\n range_string = re.sub('inf', repr(sys.float_info.max), range_string)\n ranges = ast.literal_eval(range_string)\n if isinstance(ranges, list) and (not isinstance(ranges[0], list)):\n ranges = [ranges]\n for item in ranges:\n if len(item) != 2:\n raise ValueError('Incorrect number of elements in range')\n elif not isinstance(item[0], (int, float)):\n raise ValueError('Incorrect type in the 1st element of range: %s' % type(item[0]))\n elif not isinstance(item[1], (int, float)):\n raise ValueError('Incorrect type in the 2nd element of range: %s' % type(item[0]))\n return ranges", "docstring": "Parse a string representing numerical range(s).\n\nArgs:\n range_string: (str) A string representing a numerical range or a list of\n them. For example:\n \"[-1.0,1.0]\", \"[-inf, 0]\", \"[[-inf, -1.0], [1.0, inf]]\"\n\nReturns:\n (list of list of float) A list of numerical ranges parsed from the input\n string.\n\nRaises:\n ValueError: If the input doesn't represent a range or a list of ranges.", "source": "github_repos"} -{"code": "def is_tf_type(x):\n return isinstance(x, tf_type_classes)", "docstring": "Checks whether `x` is a TF-native type that can be passed to many TF ops.\n\n Use `is_tensor` to differentiate types that can ingested by TensorFlow ops\n without any conversion (e.g., `tf.Tensor`, `tf.SparseTensor`, and\n `tf.RaggedTensor`) from types that need to be converted into tensors before\n they are ingested (e.g., numpy `ndarray` and Python scalars).\n\n For example, in the following code block:\n\n ```python\n if not tf.is_tensor(t):\n t = tf.convert_to_tensor(t)\n return t.shape, t.dtype\n ```\n\n we check to make sure that `t` is a tensor (and convert it if not) before\n accessing its `shape` and `dtype`. (But note that not all TensorFlow native\n types have shapes or dtypes; `tf.data.Dataset` is an example of a TensorFlow\n native type that has neither shape nor dtype.)\n\nArgs:\n x: A python object to check.\n\nReturns:\n `True` if `x` is a TensorFlow-native type.", "source": "github_repos"} -{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> torch.Tensor:\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n cross_attn_present_key_value = None\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n present_key_value = present_key_value + cross_attn_present_key_value\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n if use_cache:\n outputs += (present_key_value,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (`torch.FloatTensor`):\n cross attention input to the layer of shape `(batch, seq_len, embed_dim)`\n encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`.\n cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of\n size `(decoder_attention_heads,)`.\n past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.", "source": "github_repos"} -{"code": "def _find_executable_or_die(executable_name: str, executable_path: Optional[str]=None) -> str:\n if executable_path:\n return str(pathlib.Path(executable_path).resolve(strict=True))\n resolved_path_to_exe = _find_executable(executable_name)\n if resolved_path_to_exe is None:\n raise RuntimeError(f'Could not find executable `{executable_name}`! Please change your $PATH or pass the path directly like`--{executable_name}_path=path/to/executable.')\n logging.info('Found path to %s at %s', executable_name, resolved_path_to_exe)\n return resolved_path_to_exe", "docstring": "Finds executable and resolves symlinks or raises RuntimeError.\n\n Resolving symlinks is sometimes necessary for finding system headers.\n\nArgs:\n executable_name: The name of the executable that we want to find.\n executable_path: If not None, the path to the executable.\n\nReturns:\n The path to the executable we are looking for, after symlinks are resolved.\n\nRaises:\n RuntimeError: if path to the executable cannot be found.", "source": "github_repos"} -{"code": "def AddBackpropAccumulatedValue(self, history_value, value, dead_branch=False):\n history_ctxt = history_value.op._get_control_flow_context()\n cond_ctxt = None\n value_ctxt = value.op._get_control_flow_context()\n while value_ctxt and value_ctxt != history_ctxt:\n if isinstance(value_ctxt, control_flow_ops.CondContext):\n cond_ctxt = value_ctxt\n break\n value_ctxt = value_ctxt.outer_context\n with ops.control_dependencies(None):\n self.grad_context.Enter()\n if cond_ctxt:\n grad_state = self\n pred = None\n while pred is None and grad_state:\n pred = grad_state.history_map.get(cond_ctxt.pred.name)\n grad_state = grad_state.outer_grad_state\n if pred is None:\n pred = cond_ctxt.pred\n branch = 1 - cond_ctxt.branch if dead_branch else cond_ctxt.branch\n history_value = control_flow_ops._SwitchRefOrTensor(history_value, pred)[branch]\n pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.base_dtype)\n pop.set_shape(value.get_shape())\n self.grad_context.Exit()\n parallel_iterations = self.grad_context.parallel_iterations\n if parallel_iterations > 1:\n self.grad_sync._add_control_input(pop.op)\n return pop", "docstring": "Add the getter for an accumulated value in the grad context.\n\n This is added to the backprop loop. Called in the grad context to\n get the value of an accumulated value. The stack pop op must be guarded\n by the pred of the controlling cond.\n\nArgs:\n history_value: The history (a stack) of a value.\n value: The value that is pushed onto the stack.\n dead_branch: True iff the tensor is on a dead branch of a cond.\n\nReturns:\n The current value (the top of the stack).", "source": "github_repos"} -{"code": "def _consume_line(line_info, state):\n _update_section_state(line_info, state)\n if state.section.title is None:\n if state.summary.permitted:\n if line_info.remaining:\n state.summary.lines.append(line_info.remaining)\n elif state.summary.lines:\n state.summary.permitted = False\n else:\n state.description.lines.append(line_info.remaining_raw)\n else:\n state.summary.permitted = False\n if state.section.new and state.section.format == Formats.RST:\n directive = _get_directive(line_info)\n directive_tokens = directive.split()\n if state.section.title == Sections.ARGS:\n name = directive_tokens[-1]\n arg = _get_or_create_arg_by_name(state, name, is_kwarg=directive_tokens[0] == 'key')\n if len(directive_tokens) == 3:\n arg.type.lines.append(directive_tokens[1])\n state.current_arg = arg\n elif state.section.title == Sections.TYPE:\n name = directive_tokens[-1]\n arg = _get_or_create_arg_by_name(state, name)\n state.current_arg = arg\n if state.section.format == Formats.NUMPY and _line_is_hyphens(line_info.remaining):\n return\n if state.section.title == Sections.ARGS:\n if state.section.format == Formats.GOOGLE:\n _consume_google_args_line(line_info, state)\n elif state.section.format == Formats.RST:\n state.current_arg.description.lines.append(line_info.remaining.strip())\n elif state.section.format == Formats.NUMPY:\n line_stripped = line_info.remaining.strip()\n if _is_arg_name(line_stripped):\n arg = _get_or_create_arg_by_name(state, line_stripped)\n state.current_arg = arg\n elif _line_is_numpy_parameter_type(line_info):\n possible_args, type_data = line_stripped.split(':', 1)\n arg_names = _as_arg_names(possible_args)\n if arg_names:\n for arg_name in arg_names:\n arg = _get_or_create_arg_by_name(state, arg_name)\n arg.type.lines.append(type_data)\n state.current_arg = arg\n elif state.current_arg:\n state.current_arg.description.lines.append(line_info.remaining.strip())\n else:\n pass\n elif state.current_arg:\n state.current_arg.description.lines.append(line_info.remaining.strip())\n else:\n pass\n elif state.section.title == Sections.RETURNS:\n state.returns.lines.append(line_info.remaining.strip())\n elif state.section.title == Sections.YIELDS:\n state.yields.lines.append(line_info.remaining.strip())\n elif state.section.title == Sections.RAISES:\n state.raises.lines.append(line_info.remaining.strip())\n elif state.section.title == Sections.TYPE:\n if state.section.format == Formats.RST:\n assert state.current_arg is not None\n state.current_arg.type.lines.append(line_info.remaining.strip())\n else:\n pass", "docstring": "Consumes one line of text, updating the state accordingly.\n\n When _consume_line is called, part of the line may already have been processed\n for header information.\n\nArgs:\n line_info: Information about the current and next line of the docstring.\n state: The state of the docstring parser.", "source": "github_repos"} -{"code": "def decode_raw_v1(input_bytes=None, out_type=None, little_endian=True, name=None, bytes=None):\n input_bytes = deprecation.deprecated_argument_lookup('input_bytes', input_bytes, 'bytes', bytes)\n if out_type is None:\n raise ValueError(\"decode_raw_v1() missing 1 positional argument: 'out_type'\")\n return gen_parsing_ops.decode_raw(input_bytes, out_type, little_endian=little_endian, name=name)", "docstring": "Convert raw byte strings into tensors.\n\nArgs:\n input_bytes:\n Each element of the input Tensor is converted to an array of bytes.\n out_type:\n `DType` of the output. Acceptable types are `half`, `float`, `double`,\n `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`.\n little_endian:\n Whether the `input_bytes` data is in little-endian format. Data will be\n converted into host byte order if necessary.\n name: A name for the operation (optional).\n bytes: Deprecated parameter. Use `input_bytes` instead.\n\nReturns:\n A `Tensor` object storing the decoded bytes.", "source": "github_repos"} -{"code": "def __init__(self, shape=None, dtype=dtypes.float32):\n self._shape = tensor_shape.as_shape(shape)\n self._dtype = dtypes.as_dtype(dtype)", "docstring": "Constructs a type specification for a `tf.sparse.SparseTensor`.\n\nArgs:\n shape: The dense shape of the `SparseTensor`, or `None` to allow any dense\n shape.\n dtype: `tf.DType` of values in the `SparseTensor`.", "source": "github_repos"} -{"code": "def report_build(config, auth, body):\n report = report_get(config, auth, name=body['metadata']['title'])\n if not report:\n body.setdefault('schedule', {})\n body['schedule'].setdefault('nextRunTimezoneCode', str(config.timezone))\n body['schedule'].setdefault('frequency', 'DAILY')\n if body['schedule']['frequency'] != 'ONE_TIME':\n body['schedule'].setdefault('startDate', {'day': config.date.day, 'month': config.date.month, 'year': config.date.year})\n body['schedule'].setdefault('endDate', {'day': config.date.day, 'month': config.date.month, 'year': config.date.year + 1})\n report = API_DBM(config, auth).queries().create(body=body).execute()\n API_DBM(config, auth).queries().run(queryId=report['queryId'], synchronous=False).execute()\n elif config.verbose:\n print('DBM Report Exists:', body['metadata']['title'])\n return report", "docstring": "Creates a DBM report given a JSON definition.\n\n The report will be automatically run the first time.\n\n The body JSON provided will have the following fields added if not present:\n * schedule - set to run daily and expire in one year.\n\nArgs:\n * auth: (string) Either user or service.\n * body: (json) As defined in:\n https://developers.google.com/doubleclick-advertisers/v3.2/reports#resource\n\nReturns:\n * JSON definition of report created or existing.", "source": "github_repos"} -{"code": "def from_numbers(cls, dna_values: List[Union[int, float, str]], dna_spec: DNASpec) -> 'DNA':\n context = dict(index=0)\n\n def _next_decision():\n if context['index'] >= len(dna_values):\n raise ValueError(f'The input {dna_values!r} is too short for {dna_spec!r}.')\n decision = dna_values[context['index']]\n context['index'] += 1\n return decision\n\n def _bind_decisions(dna_spec):\n value = None\n children = None\n if dna_spec.is_space:\n children = [_bind_decisions(elem) for elem in dna_spec.elements]\n elif dna_spec.is_categorical:\n if dna_spec.num_choices == 1:\n value = _next_decision()\n if value < 0 or value >= len(dna_spec.candidates):\n raise ValueError(f\"Candidate index out of range at choice '{dna_spec.name or dna_spec.id}'. Index={value}, Number of candidates={len(dna_spec.candidates)}.\")\n children = [_bind_decisions(dna_spec.candidates[value])]\n else:\n children = [_bind_decisions(spec) for spec in dna_spec.choice_specs]\n else:\n value = _next_decision()\n return DNA(value, children, spec=dna_spec)\n dna = _bind_decisions(dna_spec)\n if context['index'] != len(dna_values):\n end_pos = context['index']\n raise ValueError(f'The input {dna_values!r} is too long for {dna_spec!r}. Remaining: {dna_values[end_pos:]!r}.')\n return dna", "docstring": "Create a DNA from a flattened list of dna values.\n\nArgs:\n dna_values: A list of DNA values.\n dna_spec: DNASpec that interprets the dna values.\n\nReturns:\n A DNA object.", "source": "github_repos"} -{"code": "def main():\n pip_package_dependencies = subprocess.check_output(['bazel', 'cquery', '--experimental_cc_shared_library', PIP_PACKAGE_QUERY_EXPRESSION])\n if isinstance(pip_package_dependencies, bytes):\n pip_package_dependencies = pip_package_dependencies.decode('utf-8')\n pip_package_dependencies_list = pip_package_dependencies.strip().split('\\n')\n pip_package_dependencies_list = [x.split()[0] for x in pip_package_dependencies_list]\n print('Pip package superset size: %d' % len(pip_package_dependencies_list))\n tf_py_test_dependencies = subprocess.check_output(['bazel', 'cquery', '--experimental_cc_shared_library', PY_TEST_QUERY_EXPRESSION])\n if isinstance(tf_py_test_dependencies, bytes):\n tf_py_test_dependencies = tf_py_test_dependencies.decode('utf-8')\n tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split('\\n')\n tf_py_test_dependencies_list = [x.split()[0] for x in tf_py_test_dependencies.strip().split('\\n')]\n print('Pytest dependency subset size: %d' % len(tf_py_test_dependencies_list))\n missing_dependencies = []\n ignore_extensions = ['_test', '_test.py', '_test_cpu', '_test_cpu.py', '_test_gpu', '_test_gpu.py', '_test_lib']\n ignored_files_count = 0\n denylisted_dependencies_count = len(DEPENDENCY_DENYLIST)\n for dependency in tf_py_test_dependencies_list:\n if dependency and dependency.startswith('//tensorflow'):\n ignore = False\n if any((dependency.endswith(ext) for ext in ignore_extensions)):\n ignore = True\n ignored_files_count += 1\n if not (ignore or dependency in pip_package_dependencies_list or dependency in DEPENDENCY_DENYLIST):\n missing_dependencies.append(dependency)\n print('Ignored files count: %d' % ignored_files_count)\n print('Denylisted dependencies count: %d' % denylisted_dependencies_count)\n if missing_dependencies:\n print('Missing the following dependencies from pip_packages:')\n for missing_dependency in missing_dependencies:\n print('\\nMissing dependency: %s ' % missing_dependency)\n print('Affected Tests:')\n rdep_query = 'rdeps(kind(py_test, %s), %s)' % (' + '.join(PYTHON_TARGETS), missing_dependency)\n affected_tests = subprocess.check_output(['bazel', 'cquery', '--experimental_cc_shared_library', rdep_query])\n affected_tests_list = affected_tests.split('\\n')[:-2]\n print('\\n'.join(affected_tests_list))\n raise RuntimeError('\\n One or more added test dependencies are not in the pip package.\\nIf these test dependencies need to be in TensorFlow pip package, please add them to //tensorflow/tools/pip_package/BUILD.\\nElse add no_pip tag to the test.')\n else:\n print('TEST PASSED')", "docstring": "This script runs the pip smoke test.\n\nRaises:\n RuntimeError: If any dependencies for py_tests exist in subSet\n\n Prerequisites:\n 1. Bazel is installed.\n 2. Running in github repo of tensorflow.\n 3. Configure has been run.", "source": "github_repos"} -{"code": "def __init__(self, config: Idefics3Config):\n super().__init__()\n self.config = config\n self.layers = nn.ModuleList([Idefics3EncoderLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n [`Idefics3EncoderLayer`].\n\nArgs:\n config: Idefics3Config", "source": "github_repos"} -{"code": "def get(self, path):\n if self.mode != 'r':\n raise ValueError('`get` is only allowed in read mode.')\n self._h5_entry_path = path\n self._h5_entry_group = {}\n if not path:\n if 'vars' in self.h5_file:\n self._h5_entry_group = self.h5_file['vars']\n elif path in self.h5_file and 'vars' in self.h5_file[path]:\n self._h5_entry_group = self.h5_file[path]['vars']\n elif '_layer_checkpoint_dependencies' in self.h5_file:\n path = path.replace('layers', '_layer_checkpoint_dependencies')\n if path in self.h5_file and 'vars' in self.h5_file[path]:\n self._h5_entry_group = self.h5_file[path]['vars']\n self._h5_entry_initialized = True\n return self", "docstring": "Get the H5 entry group.\n\n This method is only available in read mode.\n\nArgs:\n path: `str`. The variable path.", "source": "github_repos"} -{"code": "def policy_scope(policy):\n old_policy = _global_policy\n try:\n set_global_policy(policy)\n yield\n finally:\n set_global_policy(old_policy)", "docstring": "A context manager that sets the global Policy under it.\n\nArgs:\n policy: A Policy, or a string that will be converted to a Policy..\n\nYields:\n Nothing.", "source": "github_repos"} -{"code": "def GetTestConfigs(include_nchw_vect_c=False, one_dimensional=False):\n if one_dimensional:\n test_configs = [('NWC', False), ('NWC', True)]\n if test.is_gpu_available(cuda_only=True):\n test_configs += [('NCW', True)]\n return test_configs\n test_configs = [('NHWC', False), ('NHWC', True)]\n if not test.is_gpu_available(cuda_only=True):\n tf_logging.info('NCHW and NCHW_VECT_C tests skipped because not run with --config=cuda or no GPUs available.')\n return test_configs\n test_configs += [('NCHW', True)]\n if include_nchw_vect_c:\n if test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=(6, 1)):\n test_configs += [('NCHW_VECT_C', True)]\n else:\n tf_logging.info('NCHW_VECT_C test skipped because no GPUs with compute capability >= 6.1 are available.')\n return test_configs", "docstring": "Get all the valid tests configs to run.\n\nArgs:\n include_nchw_vect_c: Whether to include NCHW_VECT_C in the test configs.\n one_dimensional: If it's a 1D test\n\nReturns:\n all the valid test configs as tuples of data_format and use_gpu.", "source": "github_repos"} -{"code": "def partition_graphs(self):\n if not self._debug_graphs:\n raise LookupError('No partition graphs have been loaded.')\n return [self._debug_graphs[key].debug_graph_def for key in self._debug_graphs]", "docstring": "Get the partition graphs.\n\nReturns:\n Partition graphs as a list of GraphDef.\n\nRaises:\n LookupError: If no partition graphs have been loaded.", "source": "github_repos"} -{"code": "def queryString_required_ClassVersion(strList):\n\n\tdef _dec(function):\n\t\t@wraps(function)\n\t\tdef _wrap(classInstance, request, *args, **kwargs):\n\t\t\tfor i in strList:\n\t\t\t\tif i not in request.GET:\n\t\t\t\t\traise Http404(\"api does not exist\")\n\t\t\treturn function(classInstance, request, *args, **kwargs)\n\t\treturn _wrap\n\treturn _dec", "docstring": "An decorator checking whether queryString key is valid or not\n\nArgs:\n str: allowed queryString key\n\nReturns:\n if contains invalid queryString key, it will raise exception.", "source": "juraj_google_style"} -{"code": "def ignore_path(path):\n\n ignore = False\n for name in ['.tox', 'dist', 'build', 'node_modules', 'htmlcov']:\n if path.find(name) >= 0:\n ignore = True\n break\n return ignore", "docstring": "Verify whether to ignore a path.\n\nArgs:\n path (str): path to check.\n\nReturns:\n bool: True when to ignore given path.", "source": "juraj_google_style"} -{"code": "def claim(self, unclaimed_file_readers):\n\n claimed_vcf_readers = []\n for caller in self._callers:\n (unclaimed_file_readers,\n translated_vcf_readers) = caller.claim(unclaimed_file_readers)\n claimed_vcf_readers.extend(translated_vcf_readers)\n\n return unclaimed_file_readers, claimed_vcf_readers", "docstring": "Allows each caller to claim incoming files as they are recognized.\n\nArgs:\n unclaimed_file_readers: Usually, all files in the input dir.\n\nReturns:\n A tuple of unclaimed file readers and claimed VcfReaders. The\n presence of any unclaimed file readers could indicate stray files\n in the input dir.", "source": "juraj_google_style"} -{"code": "def change_t(self, t):\n\n t = super().change_t(t)\n self.__now_cycles += 1\n if self.__now_cycles % self.__reannealing_per == 0:\n t = t * self.__thermostat\n\n if t < self.__t_min:\n t = self.__t_default\n return t", "docstring": "Change temperature.\n\n Override.\n\nArgs:\n t: Now temperature.\n\nReturns:\n Next temperature.", "source": "juraj_google_style"} -{"code": "def connection_made(self, transport):\n\n self.transport = transport\n self.responders = [self.make_responder(self)]\n\n try:\n good_func = callable(self.responders[0].on_data)\n except AttributeError:\n good_func = False\n\n if not good_func:\n err_str = \"Provided responder MUST implement an 'on_data' method\"\n raise TypeError(err_str)\n\n log_info = (id(self), self.remote_hostname, self.remote_port)\n log.info(\"{:d} connection from {}:{}\", *log_info)", "docstring": "(asyncio.Protocol member)\n\n Called upon when there is a new socket connection.\n This creates a new responder (as determined by the member\n 'responder_type') and stores in a list.\n Incoming data from this connection will always call on_data\n to the last element of this list.\n\nArgs:\n transport (asyncio.Transport): The Transport handling the\n socket communication", "source": "juraj_google_style"} -{"code": "def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):\n # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]\n\n iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)\n if iso3 is not None:\n return cls.get_country_name_from_iso3(iso3, exception=exception)\n return None", "docstring": "Get country name from ISO2 code\n\nArgs:\n iso2 (str): ISO2 code for which to get country name\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\n Optional[str]: Country name", "source": "juraj_google_style"} -{"code": "def devno_free_if_allocated(self, devno):\n\n devno_int = int(devno, 16)\n self._devno_pool.free_if_allocated(devno_int)", "docstring": "Free a device number allocated with :meth:`devno_alloc`.\n\n If the device number is not currently allocated or not in the pool\n range, nothing happens.\n\n Parameters:\n devno (string): The device number as four hexadecimal digits.", "source": "juraj_google_style"} -{"code": "def get_varname_from_locals(val, locals_, default='varname-not-found',\n strict=False, cmpfunc_=operator.is_):\n\n if val is None or isinstance(val, (int, float, bool)):\n # Cannot work on primative types\n return default\n try:\n for count, val_ in enumerate(six.itervalues(locals_)):\n if cmpfunc_(val, val_):\n index_ = count\n varname = six.text_type(list(locals_.keys())[index_])\n except NameError:\n varname = default\n if strict:\n raise\n return varname", "docstring": "Finds the string name which has where locals_[name] is val\n\n Check the varname is in the parent namespace\n This will only work with objects not primatives\n\nArgs:\n val (): some value\n locals_ (dict): local dictionary to search\n default (str):\n strict (bool):\n\nReturns:\n str: the varname which is Val (if it exists)", "source": "juraj_google_style"} -{"code": "def get_subscribers(object_type: str) -> List[str]:\n\n return DB.get_list(_keys.subscribers(object_type))", "docstring": "Get the list of subscribers to events of the object type.\n\nArgs:\n object_type (str): Type of object.\n\nReturns:\n List[str], list of subscriber names.", "source": "juraj_google_style"} -{"code": "def group_device_names(devices, group_size):\n\n num_devices = len(devices)\n if group_size > num_devices:\n raise ValueError(\n \"only %d devices, but group_size=%d\" % (num_devices, group_size))\n num_groups = (\n num_devices // group_size + (1 if\n (num_devices % group_size != 0) else 0))\n groups = [[] for i in range(num_groups)]\n for i in range(0, num_groups * group_size):\n groups[i % num_groups].append(devices[i % num_devices])\n return groups", "docstring": "Group device names into groups of group_size.\n\nArgs:\n devices: list of strings naming devices.\n group_size: int >= 1\n\nReturns:\n list of lists of devices, where each inner list is group_size long,\n and each device appears at least once in an inner list. If\n len(devices) % group_size = 0 then each device will appear\n exactly once.\n\nRaises:\n ValueError: group_size > len(devices)", "source": "juraj_google_style"} -{"code": "def poll(self, channel_id=None, json=None, **kwargs): # pragma: no cover\n\n path = \"/event-service/v1/channels/{}/poll\".format(channel_id)\n r = self._httpclient.request(\n method=\"POST\",\n url=self.url,\n json=json,\n path=path,\n **kwargs\n )\n return r", "docstring": "Read one or more events from a channel.\n\n Reads events (log records) from the identified channel. Events\n are read in chronological order.\n\nArgs:\n channel_id (str): The channel ID.\n json (dict): Payload/request body.\n **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\nReturns:\n requests.Response: Requests Response() object.\n\nExample:\n Refer to ``event_poll.py`` example.", "source": "juraj_google_style"} -{"code": "def delay(self, methodname, *args, **kwargs):\n\n ID = str(uuid.uuid4())\n self.send_query(ID, methodname, False, *args, **kwargs)\n return ID", "docstring": "调用但不要求返回结果,而是通过系统方法getresult来获取.\n\n Parameters:\n methodname (str): - 要调用的方法名\n args (Any): - 要调用的方法的位置参数\n kwargs (Any): - 要调用的方法的关键字参数", "source": "juraj_google_style"} -{"code": "def source_required(src_file):\n\n if not src_file.exists():\n return True\n\n required = True\n hash_file = src_file.with_suffix(\".hash\", depth=0)\n LOG.debug(\"Hash file location: %s\", hash_file)\n if hash_file.exists():\n new_hash = get_hash_of_dirs(src_file)\n with open(hash_file, 'r') as h_file:\n old_hash = h_file.readline()\n required = not new_hash == old_hash\n if required:\n from benchbuild.utils.cmd import rm\n rm(\"-r\", src_file)\n rm(hash_file)\n if required:\n LOG.info(\"Source required for: %s\", src_file)\n LOG.debug(\"Reason: src-exists: %s hash-exists: %s\", src_file.exists(),\n hash_file.exists())\n return required", "docstring": "Check, if a download is required.\n\nArgs:\n src_file: The filename to check for.\n src_root: The path we find the file in.\n\nReturns:\n True, if we need to download something, False otherwise.", "source": "juraj_google_style"} -{"code": "def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float:\n\n examples_len = len(y_true)\n correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)])\n return correct / examples_len if examples_len else 0", "docstring": "Calculate accuracy in terms of absolute coincidence\n\nArgs:\n y_true: array of true values\n y_predicted: array of predicted values\n\nReturns:\n portion of absolutely coincidental samples", "source": "juraj_google_style"} -{"code": "def run_custom_bird_reconfigure(operation):\n\n log = logging.getLogger(PROGRAM_NAME)\n if isinstance(operation, AddOperation):\n status = 'up'\n else:\n status = 'down'\n cmd = shlex.split(operation.bird_reconfigure_cmd + \" \" + status)\n log.info(\"reconfiguring BIRD by running custom command %s\", ' '.join(cmd))\n try:\n proc = subprocess.Popen(cmd,\n start_new_session=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n _, errs = proc.communicate(\n timeout=operation.bird_reconfigure_timeout\n )\n except OSError as exc:\n log.error(\"reconfiguring BIRD failed with: %s\", exc)\n except subprocess.TimeoutExpired as exc:\n log.error(\"reconfiguring bird timed out\")\n if proc.poll() is None: # if process is still alive\n try:\n os.killpg(os.getpgid(proc.pid), signal.SIGTERM)\n except PermissionError as exc:\n log.error(\"failed to terminate custom bird command: %s\", exc)\n else:\n if proc.returncode != 0:\n log.error(\"reconfiguring BIRD failed with return code: %s and \"\n \"stderr: %s\", proc.returncode, errs)\n else:\n log.info(\"custom command successfully reconfigured Bird\")", "docstring": "Reconfigure BIRD daemon by running a custom command.\n\n It adds one argument to the command, either \"up\" or \"down\".\n If command times out then we kill it. In order to avoid leaving any orphan\n processes, that may have been started by the command, we start a new\n session when we invoke the command and then we kill process group of that\n session.\n\nArgs:\n operation (obj): Either a AddOperation or DeleteOperation object.", "source": "juraj_google_style"} -{"code": "def plot_probabilities_histogram(Y_p, title=None):\n\n if Y_p.ndim > 1:\n msg = (\n f\"Arg Y_p should be a 1-dimensional np.ndarray, not of shape \"\n f\"{Y_p.shape}.\"\n )\n raise ValueError(msg)\n plt.hist(Y_p, bins=20)\n plt.xlim((0, 1.025))\n plt.xlabel(\"Probability\")\n plt.ylabel(\"# Predictions\")\n if isinstance(title, str):\n plt.title(title)\n plt.show()", "docstring": "Plot a histogram from a numpy array of probabilities\n\nArgs:\n Y_p: An [n] or [n, 1] np.ndarray of probabilities (floats in [0,1])", "source": "juraj_google_style"} -{"code": "def Main(url):\n\n # The object of Web-Scraping.\n web_scrape = WebScraping()\n # Execute Web-Scraping.\n document = web_scrape.scrape(url)\n # The object of automatic summarization with N-gram.\n auto_abstractor = NgramAutoAbstractor()\n # n-gram object\n auto_abstractor.n_gram = Ngram()\n # n of n-gram\n auto_abstractor.n = 3\n # Set tokenizer. This is japanese tokenizer with MeCab.\n auto_abstractor.tokenizable_doc = MeCabTokenizer()\n # Object of abstracting and filtering document.\n abstractable_doc = TopNRankAbstractor()\n # Execute summarization.\n result_dict = auto_abstractor.summarize(document, abstractable_doc)\n\n # Output 3 summarized sentences.\n limit = 3\n i = 1\n for sentence in result_dict[\"summarize_result\"]:\n print(sentence)\n if i >= limit:\n break\n i += 1", "docstring": "Entry Point.\n\nArgs:\n url: target url.", "source": "juraj_google_style"} -{"code": "def GetAllUsers(self, pagination_size=10):\n\n next_page_token, accounts = self.rpc_helper.DownloadAccount(\n None, pagination_size)\n while accounts:\n for account in accounts:\n yield GitkitUser.FromApiResponse(account)\n next_page_token, accounts = self.rpc_helper.DownloadAccount(\n next_page_token, pagination_size)", "docstring": "Gets all user info from Gitkit server.\n\nArgs:\n pagination_size: int, how many users should be returned per request.\n The account info are retrieved in pagination.\n\nYields:\n A generator to iterate all users.", "source": "juraj_google_style"} -{"code": "def fill(self, text):\n\n def _fill(elem): # pylint: disable=missing-docstring\n elem.clear()\n elem.send_keys(text)\n\n self.map(_fill, u'fill({!r})'.format(text)).execute()", "docstring": "Set the text value of each matched element to `text`.\n\n Example usage:\n\n .. code:: python\n\n # Set the text of the first element matched by the query to \"Foo\"\n q.first.fill('Foo')\n\nArgs:\n text (str): The text used to fill the element (usually a text field or text area).\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def pull_datapackage(descriptor, name, backend, **backend_options):\n\n\n # Deprecated\n warnings.warn(\n 'Functions \"push/pull_datapackage\" are deprecated. '\n 'Please use \"Package\" class',\n UserWarning)\n\n # Save datapackage name\n datapackage_name = name\n\n # Get storage\n plugin = import_module('jsontableschema.plugins.%s' % backend)\n storage = plugin.Storage(**backend_options)\n\n # Iterate over tables\n resources = []\n for table in storage.buckets:\n\n # Prepare\n schema = storage.describe(table)\n base = os.path.dirname(descriptor)\n path, name = _restore_path(table)\n fullpath = os.path.join(base, path)\n\n # Write data\n helpers.ensure_dir(fullpath)\n with io.open(fullpath, 'wb') as file:\n model = Schema(deepcopy(schema))\n data = storage.iter(table)\n writer = csv.writer(file, encoding='utf-8')\n writer.writerow(model.headers)\n for row in data:\n writer.writerow(row)\n\n # Add resource\n resource = {'schema': schema, 'path': path}\n if name is not None:\n resource['name'] = name\n resources.append(resource)\n\n # Write descriptor\n mode = 'w'\n encoding = 'utf-8'\n if six.PY2:\n mode = 'wb'\n encoding = None\n resources = _restore_resources(resources)\n helpers.ensure_dir(descriptor)\n with io.open(descriptor,\n mode=mode,\n encoding=encoding) as file:\n descriptor = {\n 'name': datapackage_name,\n 'resources': resources,\n }\n json.dump(descriptor, file, indent=4)\n return storage", "docstring": "Pull Data Package from storage.\n\n All parameters should be used as keyword arguments.\n\nArgs:\n descriptor (str): path where to store descriptor\n name (str): name of the pulled datapackage\n backend (str): backend name like `sql` or `bigquery`\n backend_options (dict): backend options mentioned in backend docs", "source": "juraj_google_style"} -{"code": "def mapped_repr_stripping_underscores(\n obj: Any, attrnames: List[str],\n with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:\n\n attributes = []\n for attr_name in attrnames:\n if attr_name.startswith('_'):\n init_param_name = attr_name[1:]\n else:\n init_param_name = attr_name\n attributes.append((attr_name, init_param_name))\n return mapped_repr(obj, attributes, with_addr=with_addr, joiner=joiner)", "docstring": "Convenience function for :func:`__repr__`.\n Here, you pass a list of internal attributes, and it assumes that the\n :func:`__init__` parameter names have the leading underscore dropped.\n\nArgs:\n obj: object to display\n attrnames: list of attribute names\n with_addr: include the memory address of ``obj``\n joiner: string with which to join the elements\n\nReturns:\n string: :func:`repr`-style representation", "source": "juraj_google_style"} -{"code": "def pprint_cell(self, row, col):\n\n ndims = self.ndims\n if col >= self.cols:\n raise Exception(\"Maximum column index is %d\" % self.cols-1)\n elif row >= self.rows:\n raise Exception(\"Maximum row index is %d\" % self.rows-1)\n elif row == 0:\n if col >= ndims:\n if self.vdims:\n return self.vdims[col - ndims].pprint_label\n else:\n return ''\n return self.kdims[col].pprint_label\n else:\n dim = self.get_dimension(col)\n return dim.pprint_value(self.iloc[row-1, col])", "docstring": "Formatted contents of table cell.\n\nArgs:\n row (int): Integer index of table row\n col (int): Integer index of table column\n\nReturns:\n Formatted table cell contents", "source": "juraj_google_style"} -{"code": "def get_upstream_artifacts_full_paths_per_task_id(context):\n\n upstream_artifacts = context.task['payload']['upstreamArtifacts']\n task_ids_and_relative_paths = [\n (artifact_definition['taskId'], artifact_definition['paths'])\n for artifact_definition in upstream_artifacts\n ]\n\n optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts)\n\n upstream_artifacts_full_paths_per_task_id = {}\n failed_paths_per_task_id = {}\n for task_id, paths in task_ids_and_relative_paths:\n for path in paths:\n try:\n path_to_add = get_and_check_single_upstream_artifact_full_path(context, task_id, path)\n add_enumerable_item_to_dict(\n dict_=upstream_artifacts_full_paths_per_task_id,\n key=task_id, item=path_to_add\n )\n except ScriptWorkerTaskException:\n if path in optional_artifacts_per_task_id.get(task_id, []):\n log.warning('Optional artifact \"{}\" of task \"{}\" not found'.format(path, task_id))\n add_enumerable_item_to_dict(\n dict_=failed_paths_per_task_id,\n key=task_id, item=path\n )\n else:\n raise\n\n return upstream_artifacts_full_paths_per_task_id, failed_paths_per_task_id", "docstring": "List the downloaded upstream artifacts.\n\nArgs:\n context (scriptworker.context.Context): the scriptworker context.\n\nReturns:\n dict, dict: lists of the paths to upstream artifacts, sorted by task_id.\n First dict represents the existing upstream artifacts. The second one\n maps the optional artifacts that couldn't be downloaded\n\nRaises:\n scriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist.", "source": "juraj_google_style"} -{"code": "def rho_hv(scatterer):\n\n Z = scatterer.get_Z()\n a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2\n b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])\n c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])\n return np.sqrt(a / (b*c))", "docstring": "Copolarized correlation (rho_hv) for the current setup.\n\nArgs:\n scatterer: a Scatterer instance.\n\nReturns:\n rho_hv.", "source": "juraj_google_style"} -{"code": "def transform(self, args):\n\n if self.parse_error():\n # Write an empty hash so next run will check the config file against the entire command table again\n AliasManager.write_alias_config_hash(empty_hash=True)\n return args\n\n # Only load the entire command table if it detects changes in the alias config\n if self.detect_alias_config_change():\n self.load_full_command_table()\n self.collided_alias = AliasManager.build_collision_table(self.alias_table.sections())\n build_tab_completion_table(self.alias_table)\n else:\n self.load_collided_alias()\n\n transformed_commands = []\n alias_iter = enumerate(args, 1)\n for alias_index, alias in alias_iter:\n is_collided_alias = alias in self.collided_alias and alias_index in self.collided_alias[alias]\n # Check if the current alias is a named argument\n # index - 2 because alias_iter starts counting at index 1\n is_named_arg = alias_index > 1 and args[alias_index - 2].startswith('-')\n is_named_arg_flag = alias.startswith('-')\n excluded_commands = is_alias_command(['remove', 'export'], transformed_commands)\n if not alias or is_collided_alias or is_named_arg or is_named_arg_flag or excluded_commands:\n transformed_commands.append(alias)\n continue\n\n full_alias = self.get_full_alias(alias)\n\n if self.alias_table.has_option(full_alias, 'command'):\n cmd_derived_from_alias = self.alias_table.get(full_alias, 'command')\n telemetry.set_alias_hit(full_alias)\n else:\n transformed_commands.append(alias)\n continue\n\n pos_args_table = build_pos_args_table(full_alias, args, alias_index)\n if pos_args_table:\n logger.debug(POS_ARG_DEBUG_MSG, full_alias, cmd_derived_from_alias, pos_args_table)\n transformed_commands += render_template(cmd_derived_from_alias, pos_args_table)\n\n # Skip the next arg(s) because they have been already consumed as a positional argument above\n for pos_arg in pos_args_table: # pylint: disable=unused-variable\n next(alias_iter)\n else:\n logger.debug(DEBUG_MSG, full_alias, cmd_derived_from_alias)\n transformed_commands += shlex.split(cmd_derived_from_alias)\n\n return self.post_transform(transformed_commands)", "docstring": "Transform any aliases in args to their respective commands.\n\nArgs:\n args: A list of space-delimited command input extracted directly from the console.\n\nReturns:\n A list of transformed commands according to the alias configuration file.", "source": "juraj_google_style"} -{"code": "def _finish_disconnection_action(self, action):\n\n\n success = action.data['success']\n conn_key = action.data['id']\n\n if self._get_connection_state(conn_key) != self.Disconnecting:\n self._logger.error(\"Invalid finish_disconnection action on a connection whose state is not Disconnecting, conn_key=%s\", str(conn_key))\n return\n\n # Cannot be None since we checked above to make sure it exists\n data = self._get_connection(conn_key)\n callback = data['callback']\n\n conn_id = data['conn_id']\n int_id = data['int_id']\n\n if success is False:\n reason = action.data['reason']\n if reason is None:\n reason = \"No reason was given\"\n\n data['state'] = self.Idle\n data['microstate'] = None\n data['callback'] = None\n callback(conn_id, self.id, False, reason)\n else:\n del self._connections[conn_id]\n del self._int_connections[int_id]\n callback(conn_id, self.id, True, None)", "docstring": "Finish a disconnection attempt\n\n There are two possible outcomes:\n - if we were successful at disconnecting, we transition to disconnected\n - if we failed at disconnecting, we transition back to idle\n\nArgs:\n action (ConnectionAction): the action object describing what we are\n disconnecting from and what the result of the operation was", "source": "juraj_google_style"} -{"code": "def _find_address_range(addresses):\n\n it = iter(addresses)\n first = last = next(it)\n for ip in it:\n if ip._ip != last._ip + 1:\n yield first, last\n first = ip\n last = ip\n yield first, last", "docstring": "Find a sequence of sorted deduplicated IPv#Address.\n\nArgs:\n addresses: a list of IPv#Address objects.\n\nYields:\n A tuple containing the first and last IP addresses in the sequence.", "source": "juraj_google_style"} -{"code": "def blocks(self, name):\n\n b = self._blocks(name)\n if b:\n return b\n return self._blocks(name.replace('?>?', ' '))", "docstring": "Search for defined blocks recursively.\n Allow '>' to be ignored. '.a .b' == '.a > .b'\n\nArgs:\n name (string): Search term\n\nReturns:\n Block object OR False", "source": "juraj_google_style"} -{"code": "def decode_offset_response(cls, data):\n\n ((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)\n\n for _ in range(num_topics):\n (topic, cur) = read_short_string(data, cur)\n ((num_partitions,), cur) = relative_unpack('>i', data, cur)\n\n for _ in range(num_partitions):\n ((partition, error, num_offsets,), cur) = \\\n relative_unpack('>ihi', data, cur)\n\n offsets = []\n for k in range(num_offsets):\n ((offset,), cur) = relative_unpack('>q', data, cur)\n offsets.append(offset)\n\n yield OffsetResponse(topic, partition, error, tuple(offsets))", "docstring": "Decode bytes to an OffsetResponse\n\nArgs:\n data: bytes to decode", "source": "juraj_google_style"} -{"code": "def set_query_parameter(url, param_name, param_value):\n\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n\n query_params[param_name] = [param_value]\n new_query_string = urlencode(query_params, doseq=True)\n\n return urlunsplit((scheme, netloc, path, new_query_string, fragment))", "docstring": "Given a URL, set or replace a query parameter and return the modified URL.\n\nArgs:\n url: a given URL\n param_name: the parameter name to add\n param_value: the parameter value\n\nReturns:\n URL with the added parameter", "source": "juraj_google_style"} -{"code": "def increment(self, size: int):\n\n assert size >= 0, size\n\n self.files += 1\n self.size += size\n self.bandwidth_meter.feed(size)", "docstring": "Increment the number of files downloaded.\n\nArgs:\n size: The size of the file", "source": "juraj_google_style"} -{"code": "def load_steps(working_dir=None, steps_dir=None, step_file=None,\n step_list=None):\n\n if steps_dir is not None:\n step_files = glob.glob(os.path.join(steps_dir, '*.cwl'))\n elif step_file is not None:\n step_files = [step_file]\n elif step_list is not None:\n step_files = []\n for path in step_list:\n if os.path.isdir(path):\n step_files += glob.glob(os.path.join(path, '*.cwl'))\n else:\n step_files.append(path)\n else:\n step_files = []\n\n if working_dir is not None:\n step_files = sort_loading_order(step_files)\n\n steps = {}\n for f in step_files:\n if working_dir is not None:\n # Copy file to working_dir\n if not working_dir == os.path.dirname(f) and not is_url(f):\n copied_file = os.path.join(working_dir, os.path.basename(f))\n shutil.copy2(f, copied_file)\n f = copied_file\n\n # Create steps\n try:\n s = Step(f)\n steps[s.name] = s\n except (NotImplementedError, ValidationException,\n PackedWorkflowException) as e:\n logger.warning(e)\n\n return steps", "docstring": "Return a dictionary containing Steps read from file.\n\nArgs:\n steps_dir (str, optional): path to directory containing CWL files.\n step_file (str, optional): path or http(s) url to a single CWL file.\n step_list (list, optional): a list of directories, urls or local file\n paths to CWL files or directories containing CWL files.\n\nReturns:\n dict containing (name, Step) entries.", "source": "juraj_google_style"} -{"code": "def dense_message_pass(node_states, edge_matrices):\n\n batch_size, num_nodes, node_dim = common_layers.shape_list(node_states)\n\n # Stack the nodes as a big column vector.\n h_flat = tf.reshape(\n node_states, [batch_size, num_nodes * node_dim, 1], name=\"h_flat\")\n\n messages = tf.reshape(\n tf.matmul(edge_matrices, h_flat), [batch_size * num_nodes, node_dim],\n name=\"messages_matmul\")\n\n message_bias = tf.get_variable(\"message_bias\", shape=node_dim)\n messages = messages + message_bias\n messages = tf.reshape(messages, [batch_size, num_nodes, node_dim])\n return messages", "docstring": "Computes a_t from h_{t-1}, see bottom of page 3 in the paper.\n\nArgs:\n node_states: [B, L, D] tensor (h_{t-1})\n edge_matrices (tf.float32): [B, L*D, L*D]\n\nReturns:\n messages (tf.float32): [B, L, D] For each pair\n of nodes in the graph a message is sent along both the incoming and\n outgoing edge.", "source": "juraj_google_style"} -{"code": "def Emulation_setCPUThrottlingRate(self, rate):\n\n\t\tassert isinstance(rate, (float, int)\n\t\t ), \"Argument 'rate' must be of type '['float', 'int']'. Received type: '%s'\" % type(\n\t\t rate)\n\t\tsubdom_funcs = self.synchronous_command('Emulation.setCPUThrottlingRate',\n\t\t rate=rate)\n\t\treturn subdom_funcs", "docstring": "Function path: Emulation.setCPUThrottlingRate\n Domain: Emulation\n Method name: setCPUThrottlingRate\n\n WARNING: This function is marked 'Experimental'!\n\n Parameters:\n Required arguments:\n 'rate' (type: number) -> Throttling rate as a slowdown factor (1 is no throttle, 2 is 2x slowdown, etc).\n No return value.\n\n Description: Enables CPU throttling to emulate slow CPUs.", "source": "juraj_google_style"} -{"code": "def pow(self, other, axis=\"columns\", level=None, fill_value=None):\n\n return self._binary_op(\n \"pow\", other, axis=axis, level=level, fill_value=fill_value\n )", "docstring": "Pow this DataFrame against another DataFrame/Series/scalar.\n\nArgs:\n other: The object to use to apply the pow against this.\n axis: The axis to pow over.\n level: The Multilevel index level to apply pow over.\n fill_value: The value to fill NaNs with.\n\nReturns:\n A new DataFrame with the Pow applied.", "source": "juraj_google_style"} -{"code": "def set_inode(self, ino):\n # type: (inode.Inode) -> None\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('El Torito Entry not yet initialized')\n self.inode = ino", "docstring": "A method to set the Inode associated with this El Torito Entry.\n\n Parameters:\n ino - The Inode object corresponding to this entry.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def _write_to_hdx(self, action, data, id_field_name, file_to_upload=None):\n # type: (str, Dict, str, Optional[str]) -> Dict\n\n file = None\n try:\n if file_to_upload:\n file = open(file_to_upload, 'rb')\n files = [('upload', file)]\n else:\n files = None\n return self.configuration.call_remoteckan(self.actions()[action], data, files=files)\n except Exception as e:\n raisefrom(HDXError, 'Failed when trying to %s %s! (POST)' % (action, data[id_field_name]), e)\n finally:\n if file_to_upload and file:\n file.close()", "docstring": "Creates or updates an HDX object in HDX and return HDX object metadata dict\n\nArgs:\n action (str): Action to perform eg. 'create', 'update'\n data (Dict): Data to write to HDX\n id_field_name (str): Name of field containing HDX object identifier or None\n file_to_upload (Optional[str]): File to upload to HDX\n\nReturns:\n Dict: HDX object metadata", "source": "juraj_google_style"} -{"code": "def Sign(verifiable, keypair):\n\n prikey = bytes(keypair.PrivateKey)\n hashdata = verifiable.GetHashData()\n res = Crypto.Default().Sign(hashdata, prikey)\n return res", "docstring": "Sign the `verifiable` object with the private key from `keypair`.\n\nArgs:\n verifiable:\n keypair (neocore.KeyPair):\n\nReturns:\n bool: True if successfully signed. False otherwise.", "source": "juraj_google_style"} -{"code": "def get_cluster_interfaces(cluster, extra_cond=lambda nic: True):\n\n nics = get_nics(cluster)\n # NOTE(msimonin): Since 05/18 nics on g5k nodes have predictable names but\n # the api description keep the legacy name (device key) and the new\n # predictable name (key name). The legacy names is still used for api\n # request to the vlan endpoint This should be fixed in\n # https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=9272\n # When its fixed we should be able to only use the new predictable name.\n nics = [(nic['device'], nic['name']) for nic in nics\n if nic['mountable']\n and nic['interface'] == 'Ethernet'\n and not nic['management']\n and extra_cond(nic)]\n nics = sorted(nics)\n return nics", "docstring": "Get the network interfaces names corresponding to a criteria.\n\n Note that the cluster is passed (not the individual node names), thus it is\n assumed that all nodes in a cluster have the same interface names same\n configuration. In addition to ``extra_cond``, only the mountable and\n Ehernet interfaces are returned.\n\nArgs:\n cluster(str): the cluster to consider\n extra_cond(lambda): boolean lambda that takes the nic(dict) as\n parameter", "source": "juraj_google_style"} -{"code": "def WriteEventBody(self, event):\n\n if not hasattr(event, 'timestamp'):\n return\n\n # TODO: preserve dfdatetime as an object.\n date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n timestamp=event.timestamp)\n posix_timestamp = date_time.CopyToPosixTimestamp()\n if not posix_timestamp:\n posix_timestamp = 0\n\n source = self._FormatSource(event)\n hostname = self._FormatHostname(event)\n username = self._FormatUsername(event)\n description = self._FormatDescription(event)\n notes = self._FormatNotes(event)\n\n out_write = '{0:d}|{1:s}|{2:s}|{3:s}|{4:s}|{5!s}|{6!s}\\n'.format(\n posix_timestamp, source, hostname, username, description,\n self._output_mediator.timezone, notes)\n\n self._output_writer.Write(out_write)", "docstring": "Writes the body of an event object to the output.\n\nArgs:\n event (EventObject): event.", "source": "juraj_google_style"} -{"code": "def download_file(self, url):\n\n response = requests.get(url, stream=True)\n response.raise_for_status()\n return (int(response.headers.get('content-length', 0)), response)", "docstring": "Initiate a streaming download\n\nArgs:\n url (str): The url to download\n\nReturns:\n A tuple of the content length and the streaming response", "source": "juraj_google_style"} -{"code": "def Open(self, file_object):\n\n self._file_object = file_object\n self._regf_file.open_file_object(self._file_object)\n return True", "docstring": "Opens the Windows Registry file using a file-like object.\n\nArgs:\n file_object (file): file-like object.\n\nReturns:\n bool: True if successful or False if not.", "source": "juraj_google_style"} -{"code": "def save_images(images, filenames, output_dir):\n\n for i, filename in enumerate(filenames):\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n imsave(f, (images[i, :, :, :] + 1.0) * 0.5, format='png')", "docstring": "Saves images to the output directory.\n\nArgs:\n images: array with minibatch of images\n filenames: list of filenames without path\n If number of file names in this list less than number of images in\n the minibatch then only first len(filenames) images will be saved.\n output_dir: directory where to save images", "source": "juraj_google_style"} -{"code": "def handle(self, message):\n\n\n logger.debug(message)\n if Utilities.isNotEmpty(message['metadata']['opts']):\n target = message['metadata']['opts']['target']\n self.botThread.create_message(target, message['text'])", "docstring": "Attempts to send a message to the specified destination in Discord.\n Extends Legobot.Lego.handle()\n\nArgs:\n message (Legobot.Message): message w/ metadata to send.", "source": "juraj_google_style"} -{"code": "def state_estimation_ensemble(data, k, n_runs=10, M_list=[], **se_params):\n\n if len(M_list)==0:\n M_list = []\n for i in range(n_runs):\n M, W, ll = poisson_estimate_state(data, k, **se_params)\n M_list.append(M)\n M_stacked = np.hstack(M_list)\n M_new, W_new, ll = poisson_estimate_state(M_stacked, k, **se_params)\n W_new = np.dot(data.T, M_new)\n W_new = W_new/W_new.sum(0)\n return M_new, W_new, ll", "docstring": "Runs an ensemble method on the list of M results...\n\nArgs:\n data: genes x cells array\n k: number of classes\n n_runs (optional): number of random initializations of state estimation\n M_list (optional): list of M arrays from state estimation\n se_params (optional): optional poisson_estimate_state params\n\nReturns:\n M_new\n W_new\n ll", "source": "juraj_google_style"} -{"code": "def pxbounds(self, geom, clip=False):\n\n\n try:\n if isinstance(geom, dict):\n if 'geometry' in geom:\n geom = shape(geom['geometry'])\n else:\n geom = shape(geom)\n elif isinstance(geom, BaseGeometry):\n geom = shape(geom)\n else:\n geom = wkt.loads(geom)\n except:\n raise TypeError (\"Invalid geometry object\")\n\n # if geometry doesn't overlap the image, return an error\n if geom.disjoint(shape(self)):\n raise ValueError(\"Geometry outside of image bounds\")\n # clip to pixels within the image\n (xmin, ymin, xmax, ymax) = ops.transform(self.__geo_transform__.rev, geom).bounds\n _nbands, ysize, xsize = self.shape\n if clip:\n xmin = max(xmin, 0)\n ymin = max(ymin, 0)\n xmax = min(xmax, xsize)\n ymax = min(ymax, ysize)\n\n return (xmin, ymin, xmax, ymax)", "docstring": "Returns the bounds of a geometry object in pixel coordinates\n\nArgs:\n geom: Shapely geometry object or GeoJSON as Python dictionary or WKT string\n clip (bool): Clip the bounds to the min/max extent of the image\n\nReturns:\n list: bounds in pixels [min x, min y, max x, max y] clipped to image bounds", "source": "juraj_google_style"} -{"code": "def produce_csv_output(filehandle: TextIO,\n fields: Sequence[str],\n values: Iterable[str]) -> None:\n\n output_csv(filehandle, fields)\n for row in values:\n output_csv(filehandle, row)", "docstring": "Produce CSV output, without using ``csv.writer``, so the log can be used\n for lots of things.\n\n - ... eh? What was I talking about?\n - POOR; DEPRECATED.\n\nArgs:\n filehandle: file to write to\n fields: field names\n values: values", "source": "juraj_google_style"} -{"code": "def get_user(self, user_id=None, user_name=None):\n\n\n if user_id:\n endpoint = '/api/user_id/{0}'.format(user_id)\n elif user_name:\n endpoint = '/api/user_name/{0}'.format(user_name)\n else:\n # Return currently authorized user\n endpoint = '/api/user'\n\n data = self._make_request(verb=\"GET\", endpoint=endpoint)\n\n try:\n return User.NewFromJSON(data)\n except:\n return data", "docstring": "Get a user object from the API. If no ``user_id`` or ``user_name``\n is specified, it will return the User object for the currently\n authenticated user.\n\nArgs:\n user_id (int): User ID of the user for whom you want to get\n information. [Optional]\n user_name(str): Username for the user for whom you want to get\n information. [Optional]\n\nReturns:\n A User object.", "source": "juraj_google_style"} -{"code": "def write_rtt(jlink):\n\n try:\n while jlink.connected():\n bytes = list(bytearray(input(), \"utf-8\") + b\"\\x0A\\x00\")\n bytes_written = jlink.rtt_write(0, bytes)\n except Exception:\n print(\"IO write thread exception, exiting...\")\n thread.interrupt_main()\n raise", "docstring": "Writes kayboard input to JLink RTT buffer #0.\n\n This method is a loop that blocks waiting on stdin. When enter is pressed,\n LF and NUL bytes are added to the input and transmitted as a byte list.\n If the JLink is disconnected, it will exit gracefully. If any other\n exceptions are raised, they will be caught and re-raised after interrupting\n the main thread.\n\nArgs:\n jlink (pylink.JLink): The JLink to write to.\n\nRaises:\n Exception on error.", "source": "juraj_google_style"} -{"code": "def _publish_status(self, slug, data):\n\n\n status_topic = self.topics.prefix + 'devices/{}/data/status'.format(slug)\n\n self._logger.debug(\"Publishing status message: (topic=%s) (message=%s)\", status_topic, str(data))\n self.client.publish(status_topic, data)", "docstring": "Publish a status message for a device\n\nArgs:\n slug (string): The device slug that we are publishing on behalf of\n data (dict): The status message data to be sent back to the caller", "source": "juraj_google_style"} -{"code": "def _build_encryption_key_information(self, value):\n\n if value is None:\n return None\n if not isinstance(value, dict):\n raise TypeError(\"Encryption key information must be a dictionary.\")\n\n cryptographic_parameters = value.get('cryptographic_parameters')\n if cryptographic_parameters:\n cryptographic_parameters = self._build_cryptographic_parameters(\n cryptographic_parameters\n )\n encryption_key_information = cobjects.EncryptionKeyInformation(\n unique_identifier=value.get('unique_identifier'),\n cryptographic_parameters=cryptographic_parameters\n )\n return encryption_key_information", "docstring": "Build an EncryptionKeyInformation struct from a dictionary.\n\nArgs:\n value (dict): A dictionary containing the key/value pairs for a\n EncryptionKeyInformation struct.\n\nReturns:\n EncryptionKeyInformation: an EncryptionKeyInformation struct\n\nRaises:\n TypeError: if the input argument is invalid", "source": "juraj_google_style"} -{"code": "def __init__(self, connection):\n\n self._connection = connection\n\n self._latest_state_delta_event = None\n self._subscribers = []\n self._subscriber_lock = asyncio.Lock()\n self._delta_task = None\n self._listening = False\n self._accepting = True\n\n self._connection.on_connection_state_change(\n ConnectionEvent.DISCONNECTED,\n self._handle_disconnect)\n self._connection.on_connection_state_change(\n ConnectionEvent.RECONNECTED,\n self._handle_reconnection)", "docstring": "Constructs this handler on a given validator connection.\n\nArgs:\n connection (messaging.Connection): the validator connection", "source": "juraj_google_style"} -{"code": "def xeval(source, optimize=True):\n\n native = xcompile(source, optimize=optimize)\n return native()", "docstring": "Compiles to native Python bytecode and runs program, returning the\n topmost value on the stack.\n\nArgs:\n optimize: Whether to optimize the code after parsing it.\n\nReturns:\n None: If the stack is empty\n obj: If the stack contains a single value\n [obj, obj, ...]: If the stack contains many values", "source": "juraj_google_style"} -{"code": "def write(self, data, report_id=0):\n\n\n if not self._is_open:\n raise HIDException(\"HIDDevice not open\")\n\n write_data = bytearray([report_id]) + bytearray(data)\n cdata = ffi.new(\"const unsigned char[]\", bytes(write_data))\n num_written = hidapi.hid_write(self._device, cdata, len(write_data))\n if num_written < 0:\n raise HIDException(\"Failed to write to HID device: \" + str(num_written))\n else:\n return num_written", "docstring": "Writes data to the HID device on its endpoint.\n\n Parameters:\n data: data to send on the HID endpoint\n report_id: the report ID to use.\n\nReturns:\n The number of bytes written including the report ID.", "source": "juraj_google_style"} -{"code": "def setPadding(self, padding):\n\n self._pad = padding\n self._zfill = self.__class__.getPaddingNum(self._pad)", "docstring": "Set new padding characters for the sequence.\n i.e. \"#\" or \"@@@\" or '%04d', or an empty string to disable range formatting.\n\nArgs:\n padding (str): sequence padding to set", "source": "juraj_google_style"} -{"code": "def compile_initial_state(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]:\n\n with self.graph.as_default():\n with tf.name_scope('initial_state'):\n self._initialize_initial_state_fluents()\n if batch_size is None:\n return self.initial_state_fluents\n return self._compile_batch_fluents(self.initial_state_fluents, batch_size)", "docstring": "Returns a tuple of tensors representing the initial state fluents.\n\nArgs:\n batch_size (Optional[int]): The batch size.\n\nReturns:\n Sequence[tf.Tensor]: A tuple of tensors.", "source": "juraj_google_style"} -{"code": "def compute_output(self, o, output_shape=None):\n\n if self.combine_dims:\n o = mtf.transpose(o, o.shape - self.o_dims + self.o_dims)\n o = mtf.replace_dimensions(o, self.o_dims, self.wo.shape.dims[0])\n reduced_dims = [self.wo.shape.dims[0]]\n else:\n reduced_dims = self.o_dims\n return mtf.einsum(\n [o, self.wo], output_shape=output_shape, reduced_dims=reduced_dims)", "docstring": "Compute output of multihead attention.\n\nArgs:\n o: a Tensor with dimensions\n query_heads_dims + {value_dim} + other_dims\n output_shape: an optional Shape\n\nReturns:\n a Tensor with shape:\n {output_dim} + other_dims", "source": "juraj_google_style"} -{"code": "def _batch_transpose(mat):\n\n n = distribution_util.prefer_static_rank(mat)\n perm = tf.range(n)\n perm = tf.concat([perm[:-2], [perm[-1], perm[-2]]], axis=0)\n return tf.transpose(a=mat, perm=perm)", "docstring": "Transpose a possibly batched matrix.\n\nArgs:\n mat: A `tf.Tensor` of shape `[..., n, m]`.\n\nReturns:\n A tensor of shape `[..., m, n]` with matching batch dimensions.", "source": "juraj_google_style"} -{"code": "def parse_display_name(chrom, pos, ref, alt, variant_type):\n\n return '_'.join([chrom, pos, ref, alt, variant_type])", "docstring": "Parse the variant id for a variant\n\n This is used to display the variant in scout.\n\nArgs:\n chrom(str)\n pos(str)\n ref(str)\n alt(str)\n variant_type(str): 'clinical' or 'research'\n\nReturns:\n variant_id(str): The variant id in human readable format", "source": "juraj_google_style"} -{"code": "def update_by_id(self, data, table, id_value):\n\n key_map = {}\n if len(data) == 0:\n print('请确保data被正确传入了')\n return -1\n datas = {}\n updates = ''\n for k, v in data.items():\n # 防止sql注入\n datas.update({k: pymysql.escape_string(str(v))})\n for d in datas:\n updates += \"{}='{}',\".format(str(d), str(datas[d]))\n if len(updates) <= 0:\n return -1\n # 生成sql语句\n sql = \"update {} set {} where id = '{}'\".format(\n table, updates[:-1], str(id_value))\n res = self.query(sql)\n return 1 if res != -1 else res", "docstring": "通过id更新记录\n\nArgs:\n table: 表名字 str\n data: 记录 dict\n id_value: id值\n\nReturns:\n 成功: 1\n 失败: -1 并打印返回报错信息\n 每条记录都以一个字典的形式传进来", "source": "juraj_google_style"} -{"code": "def shared_s3_app_bucket(self, include_region=False):\n\n if include_region:\n shared_s3_app_bucket = self.format['shared_s3_app_region_bucket'].format(**self.data)\n else:\n shared_s3_app_bucket = self.format['shared_s3_app_bucket'].format(**self.data)\n return shared_s3_app_bucket", "docstring": "Generate shared s3 application bucket name.\n\nArgs:\n include_region (bool): Include region in the name generation.", "source": "juraj_google_style"} -{"code": "def is_user_enrolled(cls, user, course_id, course_mode):\n\n enrollment_client = EnrollmentApiClient()\n try:\n enrollments = enrollment_client.get_course_enrollment(user.username, course_id)\n if enrollments and course_mode == enrollments.get('mode'):\n return True\n except HttpClientError as exc:\n logging.error(\n 'Error while checking enrollment status of user %(user)s: %(message)s',\n dict(user=user.username, message=str(exc))\n )\n except KeyError as exc:\n logging.warning(\n 'Error while parsing enrollment data of user %(user)s: %(message)s',\n dict(user=user.username, message=str(exc))\n )\n return False", "docstring": "Query the enrollment API and determine if a learner is enrolled in a given course run track.\n\nArgs:\n user: The user whose enrollment needs to be checked\n course_mode: The mode with which the enrollment should be checked\n course_id: course id of the course where enrollment should be checked.\n\nReturns:\n Boolean: Whether or not enrollment exists", "source": "juraj_google_style"} -{"code": "def process_links(include_match, block_map, link_stack, source_path):\n\n leading_whitespace = include_match.group(1)\n include_path = include_match.group(2)\n\n # Optional block name. If match is None, block name was ommitted (default to 'all').\n block_name = include_match.group(3)\n if block_name is not None:\n block_name = block_name.lstrip(':')\n else:\n block_name = ALL_BLOCK_NAME\n\n return retrieve_block_from_map(\n source_path,\n include_path.strip(),\n block_name.strip(),\n leading_whitespace,\n block_map,\n link_stack)", "docstring": "Process a string of content for include tags.\n\n This function assumes there are no blocks in the content. The content is split into segments,\n with include tags being replaced by Block objects.\n\n PARAMETERS:\n content -- str; content to be converted into a Block.\n block_map -- BlockMap\n link_stack -- LinkStack\n source_path -- str; the filepath of the file from which this content came.\n\nReturns:\n list of str; segments that the comprise the content.", "source": "juraj_google_style"} -{"code": "def _update_step(self, sequence):\n\n observ, action, old_policy_params, reward, advantage = sequence['sequence']\n length = sequence['length']\n old_policy = self._policy_type(**old_policy_params)\n value_loss, value_summary = self._value_loss(observ, reward, length)\n network = self._network(observ, length)\n policy_loss, policy_summary = self._policy_loss(\n old_policy, network.policy, action, advantage, length)\n network_loss = network.get('loss', 0.0)\n loss = policy_loss + value_loss + tf.reduce_mean(network_loss)\n gradients, variables = (\n zip(*self._optimizer.compute_gradients(loss)))\n optimize = self._optimizer.apply_gradients(\n zip(gradients, variables))\n summary = tf.summary.merge([\n value_summary, policy_summary,\n tf.summary.histogram('network_loss', network_loss),\n tf.summary.scalar('avg_network_loss', tf.reduce_mean(network_loss)),\n tf.summary.scalar('gradient_norm', tf.global_norm(gradients)),\n utility.gradient_summaries(zip(gradients, variables))])\n with tf.control_dependencies([optimize]):\n return [tf.identity(x) for x in (value_loss, policy_loss, summary)]", "docstring": "Compute the current combined loss and perform a gradient update step.\n\n The sequences must be a dict containing the keys `length` and `sequence`,\n where the latter is a tuple containing observations, actions, parameters of\n the behavioral policy, rewards, and advantages.\n\nArgs:\n sequence: Sequences of episodes or chunks of episodes.\n\nReturns:\n Tuple of value loss, policy loss, and summary tensor.", "source": "juraj_google_style"} -{"code": "def _get_music_services_data_xml(soco=None):\n\n device = soco or discovery.any_soco()\n log.debug(\"Fetching music services data from %s\", device)\n available_services = device.musicServices.ListAvailableServices()\n descriptor_list_xml = available_services[\n 'AvailableServiceDescriptorList']\n log.debug(\"Services descriptor list: %s\", descriptor_list_xml)\n return descriptor_list_xml", "docstring": "Fetch the music services data xml from a Sonos device.\n\nArgs:\n soco (SoCo): a SoCo instance to query. If none is specified, a\n random device will be used. Defaults to `None`.\n\nReturns:\n str: a string containing the music services data xml", "source": "juraj_google_style"} -{"code": "def prepare_minibatch(self, audio_paths, texts, overwrite=False,\n is_bi_graphemes=False, seq_length=-1, save_feature_as_csvfile=False):\n\n assert len(audio_paths) == len(texts),\\\n \"Inputs and outputs to the network must be of the same number\"\n # Features is a list of (timesteps, feature_dim) arrays\n # Calculate the features for each audio clip, as the log of the\n # Fourier Transform of the audio\n features = [self.featurize(a, overwrite=overwrite, save_feature_as_csvfile=save_feature_as_csvfile) for a in audio_paths]\n input_lengths = [f.shape[0] for f in features]\n feature_dim = features[0].shape[1]\n mb_size = len(features)\n # Pad all the inputs so that they are all the same length\n if seq_length == -1:\n x = np.zeros((mb_size, self.max_seq_length, feature_dim))\n else:\n x = np.zeros((mb_size, seq_length, feature_dim))\n y = np.zeros((mb_size, self.max_label_length))\n labelUtil = LabelUtil.getInstance()\n label_lengths = []\n for i in range(mb_size):\n feat = features[i]\n feat = self.normalize(feat) # Center using means and std\n x[i, :feat.shape[0], :] = feat\n if is_bi_graphemes:\n label = generate_bi_graphemes_label(texts[i])\n label = labelUtil.convert_bi_graphemes_to_num(label)\n y[i, :len(label)] = label\n else:\n label = labelUtil.convert_word_to_num(texts[i])\n y[i, :len(texts[i])] = label\n label_lengths.append(len(label))\n return {\n 'x': x, # (0-padded features of shape(mb_size,timesteps,feat_dim)\n 'y': y, # list(int) Flattened labels (integer sequences)\n 'texts': texts, # list(str) Original texts\n 'input_lengths': input_lengths, # list(int) Length of each input\n 'label_lengths': label_lengths, # list(int) Length of each label\n }", "docstring": "Featurize a minibatch of audio, zero pad them and return a dictionary\n Params:\n audio_paths (list(str)): List of paths to audio files\n texts (list(str)): List of texts corresponding to the audio files\n\nReturns:\n dict: See below for contents", "source": "juraj_google_style"} -{"code": "def _check_root_tag(self, root):\n\n supported = self.supported_tags()\n if root.tag in supported:\n return\n\n error = \"Document root element ({0}) not one of ({1})\"\n raise UnsupportedRootElementError(\n message=error.format(root.tag, supported),\n expected=supported,\n found=root.tag,\n )", "docstring": "Check that the XML element tree has a supported root element.\n\nArgs:\n root (etree.Element)\n\nRaises:\n UnsupportedRootElementError", "source": "juraj_google_style"} -{"code": "def _render_fluent_timestep(self,\n fluent_type: str,\n fluents: Sequence[Tuple[str, np.array]],\n fluent_variables: Sequence[Tuple[str, List[str]]]) -> None:\n\n for fluent_pair, variable_list in zip(fluents, fluent_variables):\n name, fluent = fluent_pair\n _, variables = variable_list\n print(name)\n fluent = fluent.flatten()\n for variable, value in zip(variables, fluent):\n print('- {}: {} = {}'.format(fluent_type, variable, value))\n print()", "docstring": "Prints `fluents` of given `fluent_type` as list of instantiated variables\n with corresponding values.\n\nArgs:\n fluent_type (str): Fluent type.\n fluents (Sequence[Tuple[str, np.array]]): List of pairs (fluent_name, fluent_values).\n fluent_variables (Sequence[Tuple[str, List[str]]]): List of pairs (fluent_name, args).", "source": "juraj_google_style"} -{"code": "def list2str(self, l: List, joiner: str) -> str:\n\n result = str()\n for item in l:\n if isinstance(item, list):\n result = result + self.list2str(item, joiner) + joiner\n elif isinstance(item, dict):\n result = result + self.dict2str(item, joiner) + joiner\n elif item:\n result = result + str(item) + joiner\n return result", "docstring": "Convert list to str as input for tokenizer\n\nArgs:\n l (list): list for converting\n joiner (str): join the elements using this string to separate them.\n\n Returns: the value of the list as a string", "source": "juraj_google_style"} -{"code": "def _setup_index(index):\n\n index = int(index)\n if index > 0:\n index -= 1\n elif index == 0:\n # Zero indicies should not be allowed by default.\n raise ValueError\n return index", "docstring": "Shifts indicies as needed to account for one based indexing\n\n Positive indicies need to be reduced by one to match with zero based\n indexing.\n\n Zero is not a valid input, and as such will throw a value error.\n\nArgs:\n index - index to shift", "source": "juraj_google_style"} -{"code": "def __setstate__(self, state):\n\n self._api = state['api']\n self._path = state['path']\n self.name = api_utils._unquote_filename(self._path)\n self._buffer_size = state['buffer_size']\n self._max_request_size = state['request_size']\n self._etag = state['etag']\n self._file_size = state['size']\n self._offset = state['offset']\n self._buffer = _Buffer()\n self.closed = state['closed']\n self._buffer_future = None\n if self._remaining() and not self.closed:\n self._request_next_buffer()", "docstring": "Restore state as part of deserialization/unpickling.\n\nArgs:\n state: the dictionary from a __getstate__ call\n\n Along with restoring the state, pre-fetch the next read buffer.", "source": "juraj_google_style"} -{"code": "def feed(self, data_len, feed_time=None):\n\n self._bytes_transferred += data_len\n self._collected_bytes_transferred += data_len\n\n time_now = feed_time or time.time()\n time_diff = time_now - self._last_feed_time\n\n if time_diff < self._sample_min_time:\n return\n\n self._last_feed_time = time.time()\n\n if data_len == 0 and time_diff >= self._stall_time:\n self._stalled = True\n return\n\n self._samples.append((time_diff, self._collected_bytes_transferred))\n self._collected_bytes_transferred = 0", "docstring": "Update the bandwidth meter.\n\nArgs:\n data_len (int): The number of bytes transfered since the last\n call to :func:`feed`.\n feed_time (float): Current time.", "source": "juraj_google_style"} -{"code": "def read_probes(self, key):\n\n assert key in list(self._PROBES.keys())\n\n if key == 'output':\n value = self._output\n\n return value", "docstring": "requestes value from the instrument and returns it\n\nArgs:\n key: name of requested value\n\n Returns: reads values from instrument", "source": "juraj_google_style"} -{"code": "def transform_to_mods_multimono(marc_xml, uuid, url):\n\n marc_xml = _read_content_or_path(marc_xml)\n\n transformed = xslt_transformation(\n marc_xml,\n _absolute_template_path(\"MARC21toMultiMonographTitle.xsl\")\n )\n\n return _apply_postprocessing(\n marc_xml=marc_xml,\n xml=transformed,\n func=mods_postprocessor.postprocess_multi_mono,\n uuid=uuid,\n url=url,\n )", "docstring": "Convert `marc_xml` to multimonograph MODS data format.\n\nArgs:\n marc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\n filename.\n uuid (str): UUID string giving the package ID.\n url (str): URL of the publication (public or not).\n\nReturns:\n list: Collection of transformed xml strings.", "source": "juraj_google_style"} -{"code": "def _AddExtractionProcessStatusTableRow(self, process_status, table_view):\n\n used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)\n\n sources = ''\n if (process_status.number_of_produced_sources is not None and\n process_status.number_of_produced_sources_delta is not None):\n sources = '{0:d} ({1:d})'.format(\n process_status.number_of_produced_sources,\n process_status.number_of_produced_sources_delta)\n\n events = ''\n if (process_status.number_of_produced_events is not None and\n process_status.number_of_produced_events_delta is not None):\n events = '{0:d} ({1:d})'.format(\n process_status.number_of_produced_events,\n process_status.number_of_produced_events_delta)\n\n # TODO: shorten display name to fit in 80 chars and show the filename.\n\n table_view.AddRow([\n process_status.identifier, process_status.pid, process_status.status,\n used_memory, sources, events, process_status.display_name])", "docstring": "Adds an extraction process status table row.\n\nArgs:\n process_status (ProcessStatus): processing status.\n table_view (CLITabularTableView): table view.", "source": "juraj_google_style"} -{"code": "def choose(self, locator=None, allow_label_click=None, **kwargs):\n\n\n self._check_with_label(\n \"radio_button\", True, locator=locator, allow_label_click=allow_label_click, **kwargs)", "docstring": "Find a radio button and mark it as checked. The radio button can be found via name, id, or\n label text. ::\n\n page.choose(\"Male\")\n\nArgs:\n locator (str, optional): Which radio button to choose.\n allow_label_click (bool, optional): Attempt to click the label to toggle state if\n element is non-visible. Defaults to :data:`capybara.automatic_label_click`.\n **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.", "source": "juraj_google_style"} -{"code": "def update_variant_rank(self, case_obj, variant_type='clinical', category='snv'):\n\n # Get all variants sorted by rank score\n variants = self.variant_collection.find({\n 'case_id': case_obj['_id'],\n 'category': category,\n 'variant_type': variant_type,\n }).sort('rank_score', pymongo.DESCENDING)\n\n LOG.info(\"Updating variant_rank for all variants\")\n\n requests = []\n\n for index, var_obj in enumerate(variants):\n if len(requests) > 5000:\n try:\n self.variant_collection.bulk_write(requests, ordered=False)\n requests = []\n except BulkWriteError as err:\n LOG.warning(\"Updating variant rank failed\")\n raise err\n\n operation = pymongo.UpdateOne(\n {'_id': var_obj['_id']},\n {\n '$set': {\n 'variant_rank': index + 1,\n }\n })\n requests.append(operation)\n\n #Update the final bulk\n try:\n self.variant_collection.bulk_write(requests, ordered=False)\n except BulkWriteError as err:\n LOG.warning(\"Updating variant rank failed\")\n raise err\n\n LOG.info(\"Updating variant_rank done\")", "docstring": "Updates the manual rank for all variants in a case\n\n Add a variant rank based on the rank score\n Whenever variants are added or removed from a case we need to update the variant rank\n\nArgs:\n case_obj(Case)\n variant_type(str)", "source": "juraj_google_style"} -{"code": "def task_done(self, message):\n\n topic_partition = (message.topic, message.partition)\n if topic_partition not in self._topics:\n logger.warning('Unrecognized topic/partition in task_done message: '\n '{0}:{1}'.format(*topic_partition))\n return False\n\n offset = message.offset\n\n # Warn on non-contiguous offsets\n prev_done = self._offsets.task_done[topic_partition]\n if prev_done is not None and offset != (prev_done + 1):\n logger.warning('Marking task_done on a non-continuous offset: %d != %d + 1',\n offset, prev_done)\n\n # Warn on smaller offsets than previous commit\n # \"commit\" offsets are actually the offset of the next message to fetch.\n prev_commit = self._offsets.commit[topic_partition]\n if prev_commit is not None and ((offset + 1) <= prev_commit):\n logger.warning('Marking task_done on a previously committed offset?: %d (+1) <= %d',\n offset, prev_commit)\n\n self._offsets.task_done[topic_partition] = offset\n\n # Check for auto-commit\n if self._does_auto_commit_messages():\n self._incr_auto_commit_message_count()\n\n if self._should_auto_commit():\n self.commit()\n\n return True", "docstring": "Mark a fetched message as consumed.\n\n Offsets for messages marked as \"task_done\" will be stored back\n to the kafka cluster for this consumer group on commit()\n\nArgs:\n message (KafkaMessage): the message to mark as complete\n\nReturns:\n True, unless the topic-partition for this message has not\n been configured for the consumer. In normal operation, this\n should not happen. But see github issue 364.", "source": "juraj_google_style"} -{"code": "def get_keys(self, alias_name, key_format):\n\n uri = self.URI + \"/keys/\" + alias_name + \"?format=\" + key_format\n return self._client.get(uri)", "docstring": "Retrieves the contents of PKCS12 file in the format specified.\n This PKCS12 formatted file contains both the certificate as well as the key file data.\n Valid key formats are Base64 and PKCS12.\n\nArgs:\n alias_name: Key pair associated with the RabbitMQ\n key_format: Valid key formats are Base64 and PKCS12.\n\nReturns:\n dict: RabbitMQ certificate", "source": "juraj_google_style"} -{"code": "def _tzinfome(tzinfo):\n\n if not isinstance(tzinfo, datetime.tzinfo):\n try:\n tzinfo = pytz.timezone(tzinfo)\n assert tzinfo.zone in pytz.all_timezones\n except AttributeError:\n raise pytz.UnknownTimeZoneError(\"Unknown timezone! %s\" % tzinfo)\n return tzinfo", "docstring": "Gets a tzinfo object from a string.\n\nArgs:\n tzinfo: A string (or string like) object, or a datetime.tzinfo object.\n\nReturns:\n An datetime.tzinfo object.\n\nRaises:\n UnknownTimeZoneError: If the timezone given can't be decoded.", "source": "juraj_google_style"} -{"code": "def append(self, header, f, _left=False):\n\n self.items_length += len(header)\n if _left:\n self.deque.appendleft((header, f))\n else:\n self.deque.append((header, f))", "docstring": "Add a column to the table.\n\nArgs:\n header (str):\n Column header\n\n f (function(datum)->str):\n Makes the row string from the datum. Str returned by f should\n have the same width as header.", "source": "juraj_google_style"} -{"code": "def color_palette_dict(self, alpha=0.35):\n\n\n color_dict = {}\n for hkl in self.all_slab_entries.keys():\n rgb_indices = [0, 1, 2]\n color = [0, 0, 0, 1]\n random.shuffle(rgb_indices)\n for i, ind in enumerate(rgb_indices):\n if i == 2:\n break\n color[ind] = np.random.uniform(0, 1)\n\n # Get the clean (solid) colors first\n clean_list = np.linspace(0, 1, len(self.all_slab_entries[hkl]))\n for i, clean in enumerate(self.all_slab_entries[hkl].keys()):\n c = copy.copy(color)\n c[rgb_indices[2]] = clean_list[i]\n color_dict[clean] = c\n\n # Now get the adsorbed (transparent) colors\n for ads_entry in self.all_slab_entries[hkl][clean]:\n c_ads = copy.copy(c)\n c_ads[3] = alpha\n color_dict[ads_entry] = c_ads\n\n return color_dict", "docstring": "Helper function to assign each facet a unique color using a dictionary.\n\nArgs:\n alpha (float): Degree of transparency\n\n return (dict): Dictionary of colors (r,g,b,a) when plotting surface\n energy stability. The keys are individual surface entries where\n clean surfaces have a solid color while the corresponding adsorbed\n surface will be transparent.", "source": "juraj_google_style"} -{"code": "def __str__(self, talker='GP'):\n\n if not len(talker) == 2:\n raise ValueError('Talker ID must be two characters %r' % talker)\n data = ['%sGLL' % talker]\n data.extend(nmea_latitude(self.latitude))\n data.extend(nmea_longitude(self.longitude))\n data.append('%s.%02i' % (self.time.strftime('%H%M%S'),\n self.time.microsecond / 1000000))\n data.append('A' if self.status else 'V')\n if self.mode:\n data.append(self.mode)\n data = ','.join(data)\n return '$%s*%02X\\r' % (data, calc_checksum(data))", "docstring": "Pretty printed position string.\n\nArgs:\n talker (str): Talker ID\n\nReturns:\n str: Human readable string representation of ``Position`` object", "source": "juraj_google_style"} -{"code": "def equal(x, y):\n\n if PY_3:\n return test_case().assertEqual(x, y) or True\n\n assert x == y", "docstring": "Shortcut function for ``unittest.TestCase.assertEqual()``.\n\nArgs:\n x (mixed)\n y (mixed)\n\nRaises:\n AssertionError: in case of assertion error.\n\nReturns:\n bool", "source": "juraj_google_style"} -{"code": "def random( self ):\n\n j = np.searchsorted( self.cumulative_probabilities(), random.random() )\n return self.jumps[ j ]", "docstring": "Select a jump at random with appropriate relative probabilities.\n\nArgs:\n None\n\nReturns:\n (Jump): The randomly selected Jump.", "source": "juraj_google_style"} -{"code": "def __init__(self, submission_id, submissions, storage_bucket):\n\n self.submission_id = submission_id\n self.storage_bucket = storage_bucket\n self.type = None\n self.submission = None\n if submission_id in submissions.attacks:\n self.type = TYPE_NONTARGETED\n self.submission = submissions.attacks[submission_id]\n elif submission_id in submissions.targeted_attacks:\n self.type = TYPE_TARGETED\n self.submission = submissions.targeted_attacks[submission_id]\n elif submission_id in submissions.defenses:\n self.type = TYPE_DEFENSE\n self.submission = submissions.defenses[submission_id]\n else:\n raise WorkerError(\n 'Submission with ID \"{0}\" not found'.format(submission_id))\n self.submission_dir = None\n self.extracted_submission_dir = None", "docstring": "Initializes ExecutableSubmission.\n\nArgs:\n submission_id: ID of the submissions\n submissions: instance of CompetitionSubmissions with all submissions\n storage_bucket: storage bucket where all submissions are stored\n\nRaises:\n WorkerError: if submission was not found", "source": "juraj_google_style"} -{"code": "def generate_nodes(tpm, cm, network_state, indices, node_labels=None):\n\n if node_labels is None:\n node_labels = NodeLabels(None, indices)\n\n node_state = utils.state_of(indices, network_state)\n\n return tuple(Node(tpm, cm, index, state, node_labels)\n for index, state in zip(indices, node_state))", "docstring": "Generate |Node| objects for a subsystem.\n\nArgs:\n tpm (np.ndarray): The system's TPM\n cm (np.ndarray): The corresponding CM.\n network_state (tuple): The state of the network.\n indices (tuple[int]): Indices to generate nodes for.\n\n Keyword Args:\n node_labels (|NodeLabels|): Textual labels for each node.\n\nReturns:\n tuple[Node]: The nodes of the system.", "source": "juraj_google_style"} -{"code": "def save(self, fname, mode=None, validate=True, encoding='utf-8',\n wd=False, inline=False, relative=False, pack=False):\n\n self._closed()\n\n if mode is None:\n mode = 'abs'\n if pack:\n mode = 'pack'\n elif wd:\n mode = 'wd'\n elif relative:\n mode = 'rel'\n\n msg = 'Using deprecated save method. Please save the workflow ' \\\n 'with: wf.save(\\'{}\\', mode=\\'{}\\'). Redirecting to new ' \\\n 'save method.'.format(fname, mode)\n warnings.warn(msg, DeprecationWarning)\n\n modes = ('rel', 'abs', 'wd', 'inline', 'pack')\n if mode not in modes:\n msg = 'Illegal mode \"{}\". Choose one of ({}).'\\\n .format(mode, ','.join(modes))\n raise ValueError(msg)\n\n if validate:\n self.validate()\n\n dirname = os.path.dirname(os.path.abspath(fname))\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n if mode == 'inline':\n msg = ('Inline saving is deprecated. Please save the workflow '\n 'using mode=\\'pack\\'. Setting mode to pack.')\n warnings.warn(msg, DeprecationWarning)\n mode = 'pack'\n\n if mode == 'rel':\n relpath = dirname\n save_yaml(fname=fname, wf=self, pack=False, relpath=relpath,\n wd=False)\n\n if mode == 'abs':\n save_yaml(fname=fname, wf=self, pack=False, relpath=None,\n wd=False)\n\n if mode == 'pack':\n self._pack(fname, encoding)\n\n if mode == 'wd':\n if self.get_working_dir() is None:\n raise ValueError('Working directory not set.')\n else:\n # save in working_dir\n bn = os.path.basename(fname)\n wd_file = os.path.join(self.working_dir, bn)\n save_yaml(fname=wd_file, wf=self, pack=False, relpath=None,\n wd=True)\n # and copy workflow file to other location (as though all steps\n # are in the same directory as the workflow)\n try:\n shutil.copy2(wd_file, fname)\n except shutil.Error:\n pass", "docstring": "Save the workflow to file.\n\n Save the workflow to a CWL file that can be run with a CWL runner.\n\nArgs:\n fname (str): file to save the workflow to.\n mode (str): one of (rel, abs, wd, inline, pack)\n encoding (str): file encoding to use (default: ``utf-8``).", "source": "juraj_google_style"} -{"code": "def find(self, soup):\n\n for tag in soup.recursiveChildGenerator():\n if self.match_criterion(tag):\n yield tag", "docstring": "Yield tags matching the tag criterion from a soup.\n\n There is no need to override this if you are satisfied with finding\n tags that match match_criterion.\n\nArgs:\n soup: A BeautifulSoup to search through.\n\nYields:\n BeautifulSoup Tags that match the criterion.", "source": "juraj_google_style"} -{"code": "def indicator(self, data):\n\n # handle hashes in form md5 : sha1 : sha256\n data = self.get_first_hash(data)\n super(File, self).indicator(data)", "docstring": "Update the request URI to include the Indicator for specific indicator retrieval.\n\nArgs:\n data (string): The indicator value", "source": "juraj_google_style"} -{"code": "def flatten(schedule: ScheduleComponent, name: str = None) -> Schedule:\n\n if name is None:\n name = schedule.name\n\n return Schedule(*schedule.instructions, name=name)", "docstring": "Create a flattened schedule.\n\nArgs:\n schedule: Schedules to flatten\n name: Name of the new schedule. Defaults to first element of `schedules`", "source": "juraj_google_style"} -{"code": "def has_arg(fn, arg_name):\n\n if sys.version_info < (3,):\n if isinstance(fn, types.FunctionType) or isinstance(fn, types.MethodType):\n arg_spec = inspect.getargspec(fn)\n else:\n try:\n arg_spec = inspect.getargspec(fn.__call__)\n except AttributeError:\n return False\n return (arg_name in arg_spec.args)\n elif sys.version_info < (3, 6):\n arg_spec = inspect.getfullargspec(fn)\n return (arg_name in arg_spec.args or\n arg_name in arg_spec.kwonlyargs)\n else:\n try:\n signature = inspect.signature(fn)\n except ValueError:\n # handling Cython\n signature = inspect.signature(fn.__call__)\n parameter = signature.parameters.get(arg_name)\n if parameter is None:\n return False\n return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD,\n inspect.Parameter.KEYWORD_ONLY))", "docstring": "Checks if a callable accepts a given keyword argument.\n\nArgs:\n fn: callable to inspect\n arg_name: string, keyword argument name to check\n\nReturns:\n bool, whether `fn` accepts a `arg_name` keyword argument.", "source": "juraj_google_style"} -{"code": "def save_images(images, filenames, output_dir):\n\n for i, filename in enumerate(filenames):\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)\n Image.fromarray(img).save(f, format='PNG')", "docstring": "Saves images to the output directory.\n\nArgs:\n images: array with minibatch of images\n filenames: list of filenames without path\n If number of file names in this list less than number of images in\n the minibatch then only first len(filenames) images will be saved.\n output_dir: directory where to save images", "source": "juraj_google_style"} -{"code": "def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs):\n\n query_hash = hash(query)\n\n cookie_name = self._GetRowValue(query_hash, row, 'name')\n cookie_data = self._GetRowValue(query_hash, row, 'value')\n\n hostname = self._GetRowValue(query_hash, row, 'host_key')\n if hostname.startswith('.'):\n hostname = hostname[1:]\n\n httponly = self._GetRowValue(query_hash, row, 'httponly')\n path = self._GetRowValue(query_hash, row, 'path')\n persistent = self._GetRowValue(query_hash, row, 'persistent')\n secure = self._GetRowValue(query_hash, row, 'secure')\n\n if secure:\n scheme = 'https'\n else:\n scheme = 'http'\n\n url = '{0:s}://{1:s}{2:s}'.format(scheme, hostname, path)\n\n event_data = ChromeCookieEventData()\n event_data.cookie_name = cookie_name\n event_data.data = cookie_data\n event_data.host = hostname\n event_data.httponly = bool(httponly)\n event_data.path = path\n event_data.persistent = bool(persistent)\n event_data.query = query\n event_data.secure = bool(secure)\n event_data.url = url\n\n timestamp = self._GetRowValue(query_hash, row, 'creation_utc')\n date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n timestamp = self._GetRowValue(query_hash, row, 'last_access_utc')\n date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n timestamp = self._GetRowValue(query_hash, row, 'expires_utc')\n if timestamp:\n date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_EXPIRATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n for plugin in self._cookie_plugins:\n if cookie_name != plugin.COOKIE_NAME:\n continue\n\n try:\n plugin.UpdateChainAndProcess(\n parser_mediator, cookie_data=cookie_data, cookie_name=cookie_name,\n url=url)\n\n except Exception as exception: # pylint: disable=broad-except\n parser_mediator.ProduceExtractionWarning(\n 'plugin: {0:s} unable to parse cookie with error: {1!s}'.format(\n plugin.NAME, exception))", "docstring": "Parses a cookie row.\n\nArgs:\n parser_mediator (ParserMediator): parser mediator.\n query (str): query that created the row.\n row (sqlite3.Row): row resulting from the query.", "source": "juraj_google_style"} -{"code": "def remove_node(self, node):\n\n if node not in self.node_list:\n return\n self.node_list.remove(node)\n # Remove links pointing to the deleted node\n for n in self.node_list:\n n.link_list = [link for link in n.link_list if\n link.target != node]", "docstring": "Remove a node from ``self.node_list`` and links pointing to it.\n\n If ``node`` is not in the graph, do nothing.\n\nArgs:\n node (Node): The node to be removed\n\n Returns: None\n\nExample:\n >>> from blur.markov.node import Node\n >>> node_1 = Node('One')\n >>> graph = Graph([node_1])\n >>> graph.remove_node(node_1)\n >>> len(graph.node_list)\n 0", "source": "juraj_google_style"} -{"code": "def __le__(self, other):\n\n if not isinstance(other, interface.DateTimeValues):\n raise ValueError('Other not an instance of DateTimeValues')\n\n return isinstance(other, Never)", "docstring": "Determines if the date time values are less than or equal to other.\n\nArgs:\n other (DateTimeValues): date time values to compare against.\n\nReturns:\n bool: True if the date time values are greater than or equal to other.\n\nRaises:\n ValueError: if other is not an instance of DateTimeValues.", "source": "juraj_google_style"} -{"code": "def call(self, url, method=None, args=None):\n\n if not args:\n args = {}\n\n if sys.version_info.major == 3:\n data = urllib.parse.urlparse(url)\n path = data.path.rstrip('/') + '/'\n _args = dict(urllib.parse.parse_qs(data.query,\n keep_blank_values=True))\n elif sys.version_info.major == 2:\n data = urlparse.urlparse(url)\n path = data.path.rstrip('/') + '/'\n _args = dict(urlparse.parse_qs(data.query,\n keep_blank_values=True))\n\n for elem in self._data_store:\n pattern = elem['pattern']\n function = elem['function']\n _method = elem['method']\n type_cast = elem['type_cast']\n\n result = re.match(pattern, path)\n\n # Found matching method\n if result and _method == method:\n _args = dict(_args, **result.groupdict())\n\n # Unpack value lists (due to urllib.parse.parse_qs) in case\n # theres only one value available\n for key, val in _args.items():\n if isinstance(_args[key], list) and len(_args[key]) == 1:\n _args[key] = _args[key][0]\n\n # Apply typ-casting if necessary\n for key, val in type_cast.items():\n\n # Not within available _args, no type-cast required\n if key not in _args:\n continue\n\n # Is None or empty, no type-cast required\n if not _args[key]:\n continue\n\n # Try and cast the values\n if isinstance(_args[key], list):\n for i, _val in enumerate(_args[key]):\n _args[key][i] = self._cast(_val, val)\n else:\n _args[key] = self._cast(_args[key], val)\n\n requiered_args = self._get_function_args(function)\n for key, val in args.items():\n if key in requiered_args:\n _args[key] = val\n\n return function(**_args)\n\n return None", "docstring": "Calls the first function matching the urls pattern and method.\n\nArgs:\n url (str): Url for which to call a matching function.\n method (str, optional): The method used while registering a\n function.\n Defaults to None\n args (dict, optional): Additional args to be passed to the\n matching function.\n\nReturns:\n The functions return value or `None` if no function was called.", "source": "juraj_google_style"} -{"code": "def list_vmss_vm_instance_view_pg(access_token, subscription_id, resource_group, vmss_name,\n link=None):\n\n if link is None:\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourceGroups/', resource_group,\n '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n '/virtualMachines?$expand=instanceView&$select=instanceView',\n '&api-version=', COMP_API])\n else:\n endpoint = link\n return do_get(endpoint, access_token)", "docstring": "Gets one page of a paginated list of scale set VM instance views.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n vmss_name (str): Name of the virtual machine scale set.\n link (str): Optional link to URI to get list (as part of a paginated API query).\n\nReturns:\n HTTP response. JSON body of list of VM instance views.", "source": "juraj_google_style"} -{"code": "def tokenize(self, s, pattern=None, active=None):\n\n if pattern is None:\n if self.tokenize_pattern is None:\n pattern = r'[ \\t]+'\n else:\n pattern = self.tokenize_pattern\n if active is None:\n active = self.active\n return self.group.tokenize(s, pattern=pattern, active=active)", "docstring": "Rewrite and tokenize the input string *s*.\n\nArgs:\n s (str): the input string to process\n pattern (str, optional): the regular expression pattern on\n which to split tokens; defaults to `[ \\t]+`\n active (optional): a collection of external module names\n that may be applied if called\n\nReturns:\n a :class:`~delphin.tokens.YyTokenLattice` containing the\n tokens and their characterization information", "source": "juraj_google_style"} -{"code": "def is_array(self, data_type):\n\n\n # Split off any brackets\n data_type = data_type.split('[')[0].strip()\n\n return data_type.lower() in self.array_types", "docstring": "Check if a type is a known array type\n\nArgs:\n data_type (str): Name of type to check\n\nReturns:\n True if ``data_type`` is a known array type.", "source": "juraj_google_style"} -{"code": "def return_item_count_on_page(self, page=1, total_items=1):\n\n up_to_page = ((page - 1) * self.page_items)\n # Number of items up to the page in question\n\n if total_items > up_to_page:\n # Remove all the items up to the page in question\n #\n count = total_items - up_to_page\n\n if count >= self.page_items:\n # The remaining items are greater than the items per page\n # so the answer is a full page\n return self.page_items\n else:\n # There are less items than a full page,\n #\n return count", "docstring": "Return the number of items on page.\n\nArgs:\n * page = The Page to test for\n * total_items = the total item count\n\nReturns:\n * Integer - Which represents the calculated number of items on page.", "source": "juraj_google_style"} -{"code": "def __call__(self, *args, **kwargs):\n\n try:\n self.logger.debug(\"Received args=%r kwargs=%r.\", args, kwargs)\n start = time.perf_counter()\n return self.fn(*args, **kwargs)\n finally:\n delta = time.perf_counter() - start\n self.logger.debug(\"Completed after %.02fms.\", delta * 1000)", "docstring": "Synchronously call this actor.\n\n Parameters:\n *args: Positional arguments to send to the actor.\n **kwargs: Keyword arguments to send to the actor.\n\nReturns:\n Whatever the underlying function backing this actor returns.", "source": "juraj_google_style"} -{"code": "def _create_handler(self, config):\n\n if config is None:\n raise ValueError('No handler config to create handler from.')\n\n if 'name' not in config:\n raise ValueError('Handler name is required.')\n\n handler_name = config['name']\n # try to create handler\n module_name = handler_name.rsplit('.', 1)[0]\n class_name = handler_name.rsplit('.', 1)[-1]\n module = import_module(module_name)\n handler_class = getattr(module, class_name)\n instance = handler_class(**config)\n\n return instance", "docstring": "Creates a handler from its config.\n\n Params:\n config: handler config\n\nReturns:\n handler instance", "source": "juraj_google_style"} -{"code": "def filter_by_pattern(self, pattern):\n\n _filt_values, _filt_datetimes = self._filter_by_pattern(pattern)\n collection = HourlyDiscontinuousCollection(\n self.header.duplicate(), _filt_values, _filt_datetimes)\n collection._validated_a_period = True\n return collection", "docstring": "Filter the Data Collection based on a list of booleans.\n\nArgs:\n pattern: A list of True/False values. Typically, this is a list\n with a length matching the length of the Data Collections values\n but it can also be a pattern to be repeated over the Data Collection.\n\nReturns:\n A new Data Collection with filtered data", "source": "juraj_google_style"} -{"code": "def __call__(self, index, graph):\n\n for c in self.criteria:\n if c(index, graph):\n return True\n return False", "docstring": "Evaluates all the criteria and applies an OR opartion\n\nArgs:\n | ``index`` -- the index of the vertex/edge on which the criterion\n is applied\n | ``graph`` -- the graph on which the criterion is tested", "source": "juraj_google_style"} -{"code": "def get_result_xml(result):\n\n result_xml = Element('result')\n for name, value in [('name', result['name']),\n ('type', ResultType.get_str(result['type'])),\n ('severity', result['severity']),\n ('host', result['host']),\n ('test_id', result['test_id']),\n ('port', result['port']),\n ('qod', result['qod'])]:\n result_xml.set(name, str(value))\n result_xml.text = result['value']\n return result_xml", "docstring": "Formats a scan result to XML format.\n\nArgs:\n result (dict): Dictionary with a scan result.\n\nReturns:\n Result as xml element object.", "source": "juraj_google_style"} -{"code": "def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n\n init_params = super(Estimator, cls)._prepare_init_params_from_job_description(job_details, model_channel_name)\n\n init_params['image_name'] = init_params.pop('image')\n return init_params", "docstring": "Convert the job description to init params that can be handled by the class constructor\n\nArgs:\n job_details: the returned job details from a describe_training_job API call.\n model_channel_name (str): Name of the channel where pre-trained model data will be downloaded\n\nReturns:\n dictionary: The transformed init_params", "source": "juraj_google_style"} -{"code": "def serialize_skycoord(o):\n\n representation = o.representation.get_name()\n frame = o.frame.name\n\n r = o.represent_as('spherical')\n\n d = dict(\n _type='astropy.coordinates.SkyCoord',\n frame=frame,\n representation=representation,\n lon=r.lon,\n lat=r.lat)\n\n if len(o.distance.unit.to_string()):\n d['distance'] = r.distance\n\n return d", "docstring": "Serializes an :obj:`astropy.coordinates.SkyCoord`, for JSONification.\n\nArgs:\n o (:obj:`astropy.coordinates.SkyCoord`): :obj:`SkyCoord` to be serialized.\n\nReturns:\n A dictionary that can be passed to :obj:`json.dumps`.", "source": "juraj_google_style"} -{"code": "def get_lowest_decomposition(self, composition):\n\n\n entries_list = []\n elements = [e.symbol for e in composition.elements]\n for i in range(len(elements)):\n for combi in itertools.combinations(elements, i + 1):\n chemsys = [Element(e) for e in combi]\n x = self.costdb.get_entries(chemsys)\n entries_list.extend(x)\n try:\n pd = PhaseDiagram(entries_list)\n return pd.get_decomposition(composition)\n except IndexError:\n raise ValueError(\"Error during PD building; most likely, \"\n \"cost data does not exist!\")", "docstring": "Get the decomposition leading to lowest cost\n\nArgs:\n composition:\n Composition as a pymatgen.core.structure.Composition\n\nReturns:\n Decomposition as a dict of {Entry: amount}", "source": "juraj_google_style"} -{"code": "def get_template(template_file='', **kwargs):\n\n template = get_template_object(template_file)\n\n LOG.info('Rendering template %s', template.filename)\n for key, value in kwargs.items():\n LOG.debug('%s => %s', key, value)\n\n rendered_json = template.render(**kwargs)\n LOG.debug('Rendered JSON:\\n%s', rendered_json)\n\n return rendered_json", "docstring": "Get the Jinja2 template and renders with dict _kwargs_.\n\nArgs:\n template_file (str): name of the template file\n kwargs: Keywords to use for rendering the Jinja2 template.\n\nReturns:\n String of rendered JSON template.", "source": "juraj_google_style"} -{"code": "def easeInOutExpo(n):\n\n _checkRange(n)\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n n = n * 2\n if n < 1:\n return 0.5 * 2**(10 * (n - 1))\n else:\n n -= 1\n # 0.5 * (-() + 2)\n return 0.5 * (-1 * (2 ** (-10 * n)) + 2)", "docstring": "An exponential tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\n n (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj_google_style"} -{"code": "def get_current_round(self, tournament=1):\n\n # zero is an alias for the current round!\n query = \n arguments = {'tournament': tournament}\n data = self.raw_query(query, arguments)['data']['rounds'][0]\n if data is None:\n return None\n round_num = data[\"number\"]\n return round_num", "docstring": "Get number of the current active round.\n\nArgs:\n tournament (int): ID of the tournament (optional, defaults to 1)\n\nReturns:\n int: number of the current active round\n\nExample:\n >>> NumerAPI().get_current_round()\n 104", "source": "juraj_google_style"} -{"code": "def create_model_table(self, model):\n\n try:\n return db_model_factory(self.Base, model, self.models)\n except Exception as exc:\n raise ModelError(\n model.name,\n message=\"failed to create in-memory table.\",\n orig_exc=exc,\n context=self.error_context\n )", "docstring": "Creates the table for the given model.\n\nArgs:\n model: A StatikModel instance.\n\nReturns:\n A SQLAlchemy model instance for the table corresponding to this\n particular model.", "source": "juraj_google_style"} -{"code": "def _update_trial_info(self, expr_dir):\n\n trial_id = expr_dir[-8:]\n\n meta_file = os.path.join(expr_dir, EXPR_META_FILE)\n meta = parse_json(meta_file)\n\n result_file = os.path.join(expr_dir, EXPR_RESULT_FILE)\n offset = self._result_offsets.get(trial_id, 0)\n results, new_offset = parse_multiple_json(result_file, offset)\n self._add_results(results, trial_id)\n self._result_offsets[trial_id] = new_offset\n\n if meta:\n TrialRecord.objects \\\n .filter(trial_id=trial_id) \\\n .update(trial_status=meta[\"status\"],\n end_time=timestamp2date(meta.get(\"end_time\", None)))\n elif len(results) > 0:\n metrics = {\n \"episode_reward\": results[-1].get(\"episode_reward_mean\", None),\n \"accuracy\": results[-1].get(\"mean_accuracy\", None),\n \"loss\": results[-1].get(\"loss\", None)\n }\n if results[-1].get(\"done\"):\n TrialRecord.objects \\\n .filter(trial_id=trial_id) \\\n .update(trial_status=\"TERMINATED\",\n end_time=results[-1].get(\"date\", None),\n metrics=str(metrics))\n else:\n TrialRecord.objects \\\n .filter(trial_id=trial_id) \\\n .update(metrics=str(metrics))", "docstring": "Update information for given trial.\n\n Meta file will be loaded if exists, and the trial information\n in db backend will be updated.\n\nArgs:\n expr_dir(str)", "source": "juraj_google_style"} -{"code": "def __init__(self, left, right):\n\n if isinstance(left, Dist) and len(left) > 1:\n if (not isinstance(left, J) or\n evaluation.get_dependencies(*list(left.inverse_map))):\n raise StochasticallyDependentError(\n \"Joint distribution with dependencies not supported.\")\n if isinstance(right, Dist) and len(right) > 1:\n if (not isinstance(right, J) or\n evaluation.get_dependencies(*list(right.inverse_map))):\n raise StochasticallyDependentError(\n \"Joint distribution with dependencies not supported.\")\n\n assert isinstance(left, Dist) or isinstance(right, Dist)\n Dist.__init__(self, left=left, right=right)", "docstring": "Constructor.\n\nArgs:\n left (Dist, numpy.ndarray) : Left hand side.\n right (Dist, numpy.ndarray) : Right hand side.", "source": "juraj_google_style"} -{"code": "def _del_conversation(self, conversation_key: str) -> None:\n\n if conversation_key in self.conversations.keys():\n del self.conversations[conversation_key]\n log.info(f'Deleted conversation, key: {conversation_key}')", "docstring": "Deletes Conversation instance.\n\nArgs:\n conversation_key: Conversation key.", "source": "juraj_google_style"} -{"code": "def parse_cytoband(lines):\n\n cytobands = {}\n for line in lines:\n line = line.rstrip()\n splitted_line = line.split('\\t')\n chrom = splitted_line[0].lstrip('chr')\n start = int(splitted_line[1])\n stop = int(splitted_line[2])\n name = splitted_line[3]\n if chrom in cytobands:\n # Add interval to existing tree\n cytobands[chrom][start:stop] = name\n else:\n # Create a new interval tree\n new_tree = intervaltree.IntervalTree()\n # create the interval\n new_tree[start:stop] = name\n # Add the interval tree \n cytobands[chrom] = new_tree\n\n return cytobands", "docstring": "Parse iterable with cytoband coordinates\n\nArgs:\n lines(iterable): Strings on format \"chr1\\t2300000\\t5400000\\tp36.32\\tgpos25\"\n\nReturns:\n cytobands(dict): Dictionary with chromosome names as keys and\n interval trees as values", "source": "juraj_google_style"} -{"code": "def get_available_references(self, datas):\n\n names = []\n\n for k, v in datas.items():\n if k.startswith(RULE_REFERENCE):\n names.append(k[len(RULE_REFERENCE)+1:])\n\n return names", "docstring": "Get available manifest reference names.\n\n Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``\n are available references.\n\n Only name validation is performed on these references.\n\nArgs:\n datas (dict): Data where to search for reference declarations.\n\nReturns:\n list: List of every available reference names. This is the real\n name unprefixed.", "source": "juraj_google_style"} -{"code": "def __init__(self, model, ncats, alpha_lambda=1.0, beta_lambda=2.0,\n freeparams=['alpha_lambda', 'beta_lambda']):\n\n\n # set new limits so the maximum value of `beta` is equal to or\n # greater than the maximum `beta` inferred from the gamma distribution\n # with the constrained `alpha_beta` and `beta_beta` parameters\n new_max_beta = DiscreteGamma(self.PARAMLIMITS[\"alpha_lambda\"][1],\n self.PARAMLIMITS[\"beta_lambda\"][0], ncats)[-1]\n new_limits = model.PARAMLIMITS\n new_limits[\"beta\"] = (new_limits[\"beta\"][0], new_max_beta)\n model.PARAMLIMITS = new_limits\n\n super(GammaDistributedBetaModel, self).__init__(model, \"beta\",\n ncats, alpha_lambda=1.0, beta_lambda=2.0,\n freeparams=['alpha_lambda', 'beta_lambda'])\n\n assert all([scipy.allclose(new_max_beta, m.PARAMLIMITS[\"beta\"][1])\n for m in self._models]), (\"{0}\\n{1}\".format(\n new_max_beta, '\\n'.join([m.PARAMLIMITS[\"beta\"][1]\n for m in self._models])))", "docstring": "Initialize an `GammaDistributedModel` object.\n\n The `lambda_param` is set to \"beta\".\n\nArgs:\n `model` `ncats`,`alpha_lambda`, `beta_lambda`, `freeparams`\n Meaning described in main class doc string for\n `GammaDistributedModel`.", "source": "juraj_google_style"} -{"code": "def __init__(self, *args, **kwargs):\n\n if 'style_class' in kwargs:\n my_class = '%s alert alert-success' % kwargs['style_class']\n kwargs['style_class'] = my_class\n super(SuccessParagraph, self).__init__(**kwargs)\n self.text = Text(*args)", "docstring": "Creates an important paragraph object.\n\nArgs:\n String text, a string to add to the message\n\nReturns:\n None\n\nRaises:\n Errors are propagated\n\n We pass the kwargs on to the base class so an exception is raised\n if invalid keywords were passed. See:\n\n http://stackoverflow.com/questions/13124961/\n how-to-pass-arguments-efficiently-kwargs-in-python", "source": "juraj_google_style"} -{"code": "def get_resource_from_handle(self, resource_handle):\n\n repo_type = resource_handle.get(\"repository_type\")\n location = resource_handle.get(\"location\")\n if not (repo_type and location):\n raise ValueError(\"PackageRepositoryManager requires \"\n \"resource_handle objects to have a \"\n \"repository_type and location defined\")\n path = \"%s@%s\" % (repo_type, location)\n repo = self.get_repository(path)\n resource = repo.get_resource_from_handle(resource_handle)\n return resource", "docstring": "Get a resource.\n\nArgs:\n resource_handle (`ResourceHandle`): Handle of the resource.\n\nReturns:\n `PackageRepositoryResource` instance.", "source": "juraj_google_style"} -{"code": "def __init__(self, key, value, timeout):\n\n self.key = key\n self.value = value\n self.expiration = time.clock() * 1000 + timeout", "docstring": "Creates instance of the cache entry.\n\nArgs:\n key: the unique key used to identify and locate the value.\n value: the cached value.\n timeout: time to live for the object in milliseconds", "source": "juraj_google_style"} -{"code": "def ParseOptions(cls, options, configuration_object):\n\n if not isinstance(configuration_object, tools.CLITool):\n raise errors.BadConfigObject(\n 'Configuration object is not an instance of CLITool')\n\n process_memory_limit = cls._ParseNumericOption(\n options, 'process_memory_limit')\n\n if process_memory_limit and process_memory_limit < 0:\n raise errors.BadConfigOption(\n 'Invalid process memory limit value cannot be negative.')\n\n setattr(configuration_object, '_process_memory_limit', process_memory_limit)", "docstring": "Parses and validates options.\n\nArgs:\n options (argparse.Namespace): parser options.\n configuration_object (CLITool): object to be configured by the argument\n helper.\n\nRaises:\n BadConfigObject: when the configuration object is of the wrong type.\n BadConfigOption: when a configuration parameter fails validation.", "source": "juraj_google_style"} -{"code": "def YiqToRgb(y, i, q):\n\n r = y + (i * 0.9562) + (q * 0.6210)\n g = y - (i * 0.2717) - (q * 0.6485)\n b = y - (i * 1.1053) + (q * 1.7020)\n return (r, g, b)", "docstring": "Convert the color from YIQ coordinates to RGB.\n\n Parameters:\n :y:\n Tte Y component value [0...1]\n :i:\n The I component value [0...1]\n :q:\n The Q component value [0...1]\n\nReturns:\n The color as an (r, g, b) tuple in the range:\n r[0...1],\n g[0...1],\n b[0...1]\n\n >>> '(%g, %g, %g)' % Color.YiqToRgb(0.592263, 0.458874, -0.0499818)\n '(1, 0.5, 5.442e-07)'", "source": "juraj_google_style"} -{"code": "def device_type_from_string(cl_device_type_str):\n\n cl_device_type_str = cl_device_type_str.upper()\n if hasattr(cl.device_type, cl_device_type_str):\n return getattr(cl.device_type, cl_device_type_str)\n return None", "docstring": "Converts values like ``gpu`` to a pyopencl device type string.\n\n Supported values are: ``accelerator``, ``cpu``, ``custom``, ``gpu``. If ``all`` is given, None is returned.\n\nArgs:\n cl_device_type_str (str): The string we want to convert to a device type.\n\nReturns:\n cl.device_type: the pyopencl device type.", "source": "juraj_google_style"} -{"code": "def __init__(self,\n validate_args=False,\n name=\"exp\"):\n\n # forward_min_event_ndims = 0.\n # No forward_min_event_ndims specified as this is done in PowerTransform.\n super(Exp, self).__init__(\n validate_args=validate_args,\n name=name)", "docstring": "Instantiates the `Exp` bijector.\n\nArgs:\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str` name given to ops managed by this object.", "source": "juraj_google_style"} -{"code": "def automatic_linemode(divisions, ibz):\n\n kpoints = list()\n labels = list()\n for path in ibz.kpath[\"path\"]:\n kpoints.append(ibz.kpath[\"kpoints\"][path[0]])\n labels.append(path[0])\n for i in range(1, len(path) - 1):\n kpoints.append(ibz.kpath[\"kpoints\"][path[i]])\n labels.append(path[i])\n kpoints.append(ibz.kpath[\"kpoints\"][path[i]])\n labels.append(path[i])\n\n kpoints.append(ibz.kpath[\"kpoints\"][path[-1]])\n labels.append(path[-1])\n\n return Kpoints(\"Line_mode KPOINTS file\",\n style=Kpoints.supported_modes.Line_mode,\n coord_type=\"Reciprocal\",\n kpts=kpoints,\n labels=labels,\n num_kpts=int(divisions))", "docstring": "Convenient static constructor for a KPOINTS in mode line_mode.\n gamma centered Monkhorst-Pack grids and the number of subdivisions\n along each reciprocal lattice vector determined by the scheme in the\n VASP manual.\n\nArgs:\n divisions: Parameter determining the number of k-points along each\n hight symetry lines.\n ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure)\n\nReturns:\n Kpoints object", "source": "juraj_google_style"} -{"code": "def Selector(fields):\n\n\n check_user_facing_fields_dict(fields, 'Selector')\n\n class _Selector(_ConfigSelector):\n def __init__(self):\n key = 'Selector.' + str(DictCounter.get_next_count())\n super(_Selector, self).__init__(\n key=key,\n name=None,\n fields=fields,\n # description='A configuration dictionary with typed fields',\n type_attributes=ConfigTypeAttributes(is_builtin=True),\n )\n\n return _Selector", "docstring": "Selectors are used when you want to be able present several different options to the user but\n force them to select one. For example, it would not make much sense to allow them\n to say that a single input should be sourced from a csv and a parquet file: They must choose.\n\n Note that in other type systems this might be called an \"input union.\"\n\nArgs:\n fields (Dict[str, Field]):", "source": "juraj_google_style"} -{"code": "def flatten(lst):\n\n for elm in lst:\n if isinstance(elm, collections.Iterable) and not isinstance(\n elm, string_types):\n for sub in flatten(elm):\n yield sub\n else:\n yield elm", "docstring": "Flatten list.\n\nArgs:\n lst (list): List to flatten\n\nReturns:\n generator", "source": "juraj_google_style"} -{"code": "def GetMessages(self, formatter_mediator, event):\n\n if self.DATA_TYPE != event.data_type:\n raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n event.data_type))\n\n event_values = event.CopyToDict()\n\n read_receipt = event_values.get('read_receipt', None)\n if read_receipt is not None:\n event_values['read_receipt'] = (\n self._READ_RECEIPT.get(read_receipt, 'UNKNOWN'))\n\n message_type = event_values.get('message_type', None)\n if message_type is not None:\n event_values['message_type'] = (\n self._MESSAGE_TYPE.get(message_type, 'UNKNOWN'))\n\n return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\n formatter_mediator (FormatterMediator): mediates the interactions\n between formatters and other components, such as storage and Windows\n EventLog resources.\n event (EventObject): event.\n\nReturns:\n tuple(str, str): formatted message string and short message string.\n\nRaises:\n WrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj_google_style"} -{"code": "def to_string(cls, error_code):\n\n if error_code == cls.RTT_ERROR_CONTROL_BLOCK_NOT_FOUND:\n return 'The RTT Control Block has not yet been found (wait?)'\n return super(JLinkRTTErrors, cls).to_string(error_code)", "docstring": "Returns the string message for the given error code.\n\nArgs:\n cls (JLinkRTTErrors): the ``JLinkRTTErrors`` class\n error_code (int): error code to convert\n\nReturns:\n An error string corresponding to the error code.\n\nRaises:\n ValueError: if the error code is invalid.", "source": "juraj_google_style"} -{"code": "def handle_block(\n mediator_state: MediatorTransferState,\n state_change: Block,\n channelidentifiers_to_channels: ChannelMap,\n pseudo_random_generator: random.Random,\n) -> TransitionResult[MediatorTransferState]:\n\n expired_locks_events = events_to_remove_expired_locks(\n mediator_state,\n channelidentifiers_to_channels,\n state_change.block_number,\n pseudo_random_generator,\n )\n\n secret_reveal_events = events_for_onchain_secretreveal_if_dangerzone(\n channelmap=channelidentifiers_to_channels,\n secrethash=mediator_state.secrethash,\n transfers_pair=mediator_state.transfers_pair,\n block_number=state_change.block_number,\n block_hash=state_change.block_hash,\n )\n\n unlock_fail_events = events_for_expired_pairs(\n channelidentifiers_to_channels=channelidentifiers_to_channels,\n transfers_pair=mediator_state.transfers_pair,\n waiting_transfer=mediator_state.waiting_transfer,\n block_number=state_change.block_number,\n )\n\n iteration = TransitionResult(\n mediator_state,\n unlock_fail_events + secret_reveal_events + expired_locks_events,\n )\n\n return iteration", "docstring": "After Raiden learns about a new block this function must be called to\n handle expiration of the hash time locks.\n\nArgs:\n state: The current state.\n\nReturns:\n TransitionResult: The resulting iteration", "source": "juraj_google_style"} -{"code": "def get_session(self, app_path, session_id):\n\n if app_path not in self._applications:\n raise ValueError(\"Application %s does not exist on this server\" % app_path)\n return self._applications[app_path].get_session(session_id)", "docstring": "Get an active a session by name application path and session ID.\n\nArgs:\n app_path (str) :\n The configured application path for the application to return\n a session for.\n\n session_id (str) :\n The session ID of the session to retrieve.\n\nReturns:\n ServerSession", "source": "juraj_google_style"} -{"code": "def size_filter(labeled_grid, min_size):\n\n out_grid = np.zeros(labeled_grid.shape, dtype=int)\n slices = find_objects(labeled_grid)\n j = 1\n for i, s in enumerate(slices):\n box = labeled_grid[s]\n size = np.count_nonzero(box.ravel() == (i + 1))\n if size >= min_size and box.shape[0] > 1 and box.shape[1] > 1:\n out_grid[np.where(labeled_grid == i + 1)] = j\n j += 1\n return out_grid", "docstring": "Remove labeled objects that do not meet size threshold criteria.\n\nArgs:\n labeled_grid: 2D output from label method.\n min_size: minimum size of object in pixels.\n\nReturns:\n labeled grid with smaller objects removed.", "source": "juraj_google_style"} -{"code": "def convert(model_path: str, out_file: str):\n\n print('Converting', model_path, 'to', out_file, '...')\n\n import tensorflow as tf\n from precise.model import load_precise_model\n from keras import backend as K\n\n out_dir, filename = split(out_file)\n out_dir = out_dir or '.'\n os.makedirs(out_dir, exist_ok=True)\n\n K.set_learning_phase(0)\n model = load_precise_model(model_path)\n\n out_name = 'net_output'\n tf.identity(model.output, name=out_name)\n print('Output node name:', out_name)\n print('Output folder:', out_dir)\n\n sess = K.get_session()\n\n # Write the graph in human readable\n tf.train.write_graph(sess.graph.as_graph_def(), out_dir, filename + 'txt', as_text=True)\n print('Saved readable graph to:', filename + 'txt')\n\n # Write the graph in binary .pb file\n from tensorflow.python.framework import graph_util\n from tensorflow.python.framework import graph_io\n\n cgraph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [out_name])\n graph_io.write_graph(cgraph, out_dir, filename, as_text=False)\n\n if isfile(model_path + '.params'):\n copyfile(model_path + '.params', out_file + '.params')\n\n print('Saved graph to:', filename)\n\n del sess", "docstring": "Converts an HD5F file from Keras to a .pb for use with TensorFlow\n\nArgs:\n model_path: location of Keras model\n out_file: location to write protobuf", "source": "juraj_google_style"} -{"code": "def prepare(path, name): # type: (str, str) -> None\n\n setup_path = os.path.join(path, 'setup.py')\n if not os.path.exists(setup_path):\n data = textwrap.dedent( % name)\n\n logger.info('Module %s does not provide a setup.py. \\nGenerating setup.py' % name)\n\n _files.write_file(setup_path, data)\n\n data = textwrap.dedent()\n\n logger.info('Generating setup.cfg')\n\n _files.write_file(os.path.join(path, 'setup.cfg'), data)\n\n data = textwrap.dedent()\n\n logger.info('Generating MANIFEST.in')\n\n _files.write_file(os.path.join(path, 'MANIFEST.in'), data)", "docstring": "Prepare a Python script (or module) to be imported as a module.\n If the script does not contain a setup.py file, it creates a minimal setup.\n\nArgs:\n path (str): path to directory with the script or module.\n name (str): name of the script or module.", "source": "juraj_google_style"} -{"code": "def _req(self, req):\n\n logger.debug('DUT> %s', req)\n self._log and self.pause()\n times = 3\n res = None\n\n while times:\n times = times - 1\n try:\n self._sendline(req)\n self._expect(req)\n\n line = None\n res = []\n\n while True:\n line = self._readline()\n logger.debug('Got line %s', line)\n\n if line == 'Done':\n break\n\n if line:\n res.append(line)\n break\n\n except:\n logger.exception('Failed to send command')\n self.close()\n self._init()\n\n self._log and self.resume()\n return res", "docstring": "Send command and wait for response.\n\n The command will be repeated 3 times at most in case data loss of serial port.\n\nArgs:\n req (str): Command to send, please do not include new line in the end.\n\nReturns:\n [str]: The output lines", "source": "juraj_google_style"} -{"code": "def tfms_from_model(f_model, sz, aug_tfms=None, max_zoom=None, pad=0, crop_type=CropType.RANDOM,\n tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT, norm_y=True, scale=None):\n\n stats = inception_stats if f_model in inception_models else imagenet_stats\n return tfms_from_stats(stats, sz, aug_tfms, max_zoom=max_zoom, pad=pad, crop_type=crop_type,\n tfm_y=tfm_y, sz_y=sz_y, pad_mode=pad_mode, norm_y=norm_y, scale=scale)", "docstring": "Returns separate transformers of images for training and validation.\n Transformers are constructed according to the image statistics given by the model. (See tfms_from_stats)\n\nArgs:\n f_model: model, pretrained or not pretrained", "source": "juraj_google_style"} -{"code": "def omega(self, structure, n, u):\n\n l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n)\n l0 *= 1e-10 # in A\n weight = float(structure.composition.weight) * 1.66054e-27 # in kg\n vol = structure.volume * 1e-30 # in m^3\n vel = (1e9 * self[0].einsum_sequence([n, u, n, u])\n / (weight / vol)) ** 0.5\n return vel / l0", "docstring": "Finds directional frequency contribution to the heat\n capacity from direction and polarization\n\nArgs:\n structure (Structure): Structure to be used in directional heat\n capacity determination\n n (3x1 array-like): direction for Cv determination\n u (3x1 array-like): polarization direction, note that\n no attempt for verification of eigenvectors is made", "source": "juraj_google_style"} -{"code": "def __init__(\n self,\n deconvolution_layer_list,\n opt_params=None,\n learning_rate=1e-05,\n verbose_mode=False\n ):\n\n for deconvolution_layer in deconvolution_layer_list:\n if isinstance(deconvolution_layer, DeconvolutionLayer) is False:\n raise TypeError()\n\n if opt_params is None:\n opt_params = Adam()\n opt_params.dropout_rate = 0.0\n\n if isinstance(opt_params, OptParams) is False:\n raise TypeError()\n\n logger = getLogger(\"pydbm\")\n handler = StreamHandler()\n if verbose_mode is True:\n handler.setLevel(DEBUG)\n logger.setLevel(DEBUG)\n else:\n handler.setLevel(ERROR)\n logger.setLevel(ERROR)\n\n logger.addHandler(handler)\n\n self.__deconvolution_layer_list = deconvolution_layer_list\n self.__learning_rate = learning_rate\n self.__attenuate_epoch = 50\n self.__opt_params = opt_params\n self.__logger = logger", "docstring": "Init.\n\nArgs:\n deconvolution_layer_list: `list` of `DeconvolutionLayer`.\n opt_params: is-a `OptParams`. If `None`, this value will be `Adam`.\n learning_rate: Learning rate.\n verbose_mode: Verbose mode or not.", "source": "juraj_google_style"} -{"code": "def add_string_pairs_from_text_view_element(xib_file, results, text_view, special_ui_components_prefix):\n\n text_view_entry_comment = extract_element_internationalized_comment(text_view)\n if text_view_entry_comment is None:\n return\n\n if text_view.hasAttribute('usesAttributedText') and text_view.attributes['usesAttributedText'].value == 'YES':\n add_string_pairs_from_attributed_ui_element(results, text_view, text_view_entry_comment)\n else:\n try:\n text_view_entry_key = text_view.attributes['text'].value\n results.append((text_view_entry_key, text_view_entry_comment + ' default text value'))\n except KeyError:\n pass\n warn_if_element_not_of_class(text_view, 'TextView', special_ui_components_prefix)", "docstring": "Adds string pairs from a textview element.\n\nArgs:\n xib_file (str): Path to the xib file.\n results (list): The list to add the results to.\n text_view(element): The textview element from the xib, to extract the string pairs from.\n special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)", "source": "juraj_google_style"} -{"code": "def set_data_length(self, length):\n # type: (int) -> None\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')\n\n len_diff = length - self.info_len\n if len_diff > 0:\n # If we are increasing the length, update the last alloc_desc up\n # to the max of 0x3ffff800, and throw an exception if we overflow.\n new_len = self.alloc_descs[-1][0] + len_diff\n if new_len > 0x3ffff800:\n raise pycdlibexception.PyCdlibInvalidInput('Cannot increase the size of a UDF file beyond the current descriptor')\n self.alloc_descs[-1][0] = new_len\n elif len_diff < 0:\n # We are decreasing the length. It's possible we are removing one\n # or more alloc_descs, so run through the list updating all of the\n # descriptors and remove any we no longer need.\n len_left = length\n alloc_descs_needed = 0\n index = 0\n while len_left > 0:\n this_len = min(len_left, 0x3ffff800)\n alloc_descs_needed += 1\n self.alloc_descs[index][0] = this_len\n index += 1\n len_left -= this_len\n\n self.alloc_descs = self.alloc_descs[:alloc_descs_needed]\n\n self.info_len = length", "docstring": "A method to set the length of the data that this UDF File Entry\n points to.\n\n Parameters:\n length - The new length for the data.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def serialize_ndarray_npy(o):\n\n with io.BytesIO() as f:\n np.save(f, o)\n f.seek(0)\n serialized = json.dumps(f.read().decode('latin-1'))\n return dict(\n _type='np.ndarray',\n npy=serialized)", "docstring": "Serializes a :obj:`numpy.ndarray` using numpy's built-in :obj:`save` function.\n This produces totally unreadable (and very un-JSON-like) results (in \"npy\"\n format), but it's basically guaranteed to work in 100% of cases.\n\nArgs:\n o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.\n\nReturns:\n A dictionary that can be passed to :obj:`json.dumps`.", "source": "juraj_google_style"} -{"code": "def extend(self, trajectory):\n\n if self.time_step != trajectory.time_step:\n raise ValueError('Trajectory not extended: Time steps of trajectories is incompatible')\n\n if len(self.species) != len(trajectory.species) and self.species != trajectory.species:\n raise ValueError('Trajectory not extended: species in trajectory do not match')\n\n self.to_positions()\n trajectory.to_positions()\n\n self.frac_coords = np.concatenate((self.frac_coords, trajectory.frac_coords), axis=0)\n self.lattice, self.constant_lattice = self._combine_attribute(self.lattice, trajectory.lattice,\n self.frac_coords.shape[0],\n trajectory.frac_coords.shape[0])\n self.site_properties = self._combine_attribute(self.site_properties, trajectory.site_properties,\n self.frac_coords.shape[0], trajectory.frac_coords.shape[0])", "docstring": "Concatenate another trajectory\n\nArgs:\n trajectory (Trajectory): Trajectory to add", "source": "juraj_google_style"} -{"code": "def get_all(self, attrs: Iterable[FetchAttribute]) \\\n -> Sequence[Tuple[FetchAttribute, MaybeBytes]]:\n\n ret: List[Tuple[FetchAttribute, MaybeBytes]] = []\n for attr in attrs:\n try:\n ret.append((attr.for_response, self.get(attr)))\n except NotFetchable:\n pass\n return ret", "docstring": "Return a list of tuples containing the attribute iself and the bytes\n representation of that attribute from the message.\n\nArgs:\n attrs: The fetch attributes.", "source": "juraj_google_style"} -{"code": "def parse_mapping(mapping_file: Optional[str]) -> configparser.ConfigParser:\n\n LOGGER.debug('Parsing mapping file. Command line: %s', mapping_file)\n\n def parse(mapping_file):\n config = configparser.ConfigParser()\n config.read_file(mapping_file)\n return config\n\n # give precedence to the user-specified file\n if mapping_file is not None:\n LOGGER.debug('Parsing command line mapping file')\n return parse(mapping_file)\n\n # fall back on XDG config location\n xdg_config_dir = xdg.BaseDirectory.load_first_config('pass-git-helper')\n if xdg_config_dir is None:\n raise RuntimeError(\n 'No mapping configured so far at any XDG config location. '\n 'Please create {config_file}'.format(\n config_file=DEFAULT_CONFIG_FILE))\n mapping_file = os.path.join(xdg_config_dir, CONFIG_FILE_NAME)\n LOGGER.debug('Parsing mapping file %s', mapping_file)\n with open(mapping_file, 'r') as file_handle:\n return parse(file_handle)", "docstring": "Parse the file containing the mappings from hosts to pass entries.\n\nArgs:\n mapping_file:\n Name of the file to parse. If ``None``, the default file from the\n XDG location is used.", "source": "juraj_google_style"} -{"code": "def mtz(n,c):\n\n\n model = Model(\"atsp - mtz\")\n\n x,u = {},{}\n for i in range(1,n+1):\n u[i] = model.addVar(lb=0, ub=n-1, vtype=\"C\", name=\"u(%s)\"%i)\n for j in range(1,n+1):\n if i != j:\n x[i,j] = model.addVar(vtype=\"B\", name=\"x(%s,%s)\"%(i,j))\n\n for i in range(1,n+1):\n model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, \"Out(%s)\"%i)\n model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, \"In(%s)\"%i)\n\n for i in range(1,n+1):\n for j in range(2,n+1):\n if i != j:\n model.addCons(u[i] - u[j] + (n-1)*x[i,j] <= n-2, \"MTZ(%s,%s)\"%(i,j))\n\n model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), \"minimize\")\n\n model.data = x,u\n return model", "docstring": "mtz: Miller-Tucker-Zemlin's model for the (asymmetric) traveling salesman problem\n (potential formulation)\n Parameters:\n - n: number of nodes\n - c[i,j]: cost for traversing arc (i,j)\n Returns a model, ready to be solved.", "source": "juraj_google_style"} -{"code": "def not_evaluator(conditions, leaf_evaluator):\n\n if not len(conditions) > 0:\n return None\n\n result = evaluate(conditions[0], leaf_evaluator)\n return None if result is None else not result", "docstring": "Evaluates a list of conditions as if the evaluator had been applied\n to a single entry and NOT was applied to the result.\n\nArgs:\n conditions: List of conditions ex: [operand_1, operand_2].\n leaf_evaluator: Function which will be called to evaluate leaf condition values.\n\nReturns:\n Boolean:\n - True if the operand evaluates to False.\n - False if the operand evaluates to True.\n None: if conditions is empty or condition couldn't be evaluated.", "source": "juraj_google_style"} -{"code": "def songs_iter(self, *, page_size=250):\n\n\n\t\tstart_token = None\n\n\t\twhile True:\n\t\t\tresponse = self._call(\n\t\t\t\tmc_calls.TrackFeed,\n\t\t\t\tmax_results=page_size,\n\t\t\t\tstart_token=start_token\n\t\t\t)\n\t\t\titems = response.body.get('data', {}).get('items', [])\n\n\t\t\tif items:\n\t\t\t\tyield items\n\n\t\t\tstart_token = response.body.get('nextPageToken')\n\t\t\tif start_token is None:\n\t\t\t\tbreak", "docstring": "Get a paged iterator of library songs.\n\n Parameters:\n page_size (int, Optional): The maximum number of results per returned page.\n Max allowed is ``49995``.\n Default: ``250``\n\nYields:\n list: Song dicts.", "source": "juraj_google_style"} -{"code": "def get(self, request, *args, **kwargs):\n\n context = self.get_context_data(**kwargs)\n context.update(self.extra_context)\n context['crumbs'] = self.get_crumbs()\n context['title'] = self.title\n context['suit'] = 'suit' in settings.INSTALLED_APPS\n if context.get('dashboard_grid', None) is None and self.grid:\n context['dashboard_grid'] = self.grid\n return self.render_to_response(context)", "docstring": "Django view get function.\n\n Add items of extra_context, crumbs and grid to context.\n\nArgs:\n request (): Django's request object.\n *args (): request args.\n **kwargs (): request kwargs.\n\nReturns:\n response: render to response with context.", "source": "juraj_google_style"} -{"code": "def get_smeared_densities(self, sigma):\n\n from scipy.ndimage.filters import gaussian_filter1d\n smeared_dens = {}\n diff = [self.energies[i + 1] - self.energies[i]\n for i in range(len(self.energies) - 1)]\n avgdiff = sum(diff) / len(diff)\n for spin, dens in self.densities.items():\n smeared_dens[spin] = gaussian_filter1d(dens, sigma / avgdiff)\n return smeared_dens", "docstring": "Returns the Dict representation of the densities, {Spin: densities},\n but with a Gaussian smearing of std dev sigma applied about the fermi\n level.\n\nArgs:\n sigma: Std dev of Gaussian smearing function.\n\nReturns:\n Dict of Gaussian-smeared densities.", "source": "juraj_google_style"} -{"code": "def init_from_acceptor(self, acceptor):\n\n self.states = copy.deepcopy(acceptor.states)\n self.alphabet = copy.deepcopy(acceptor.alphabet)\n self.osyms = copy.deepcopy(acceptor.osyms)\n self.isyms = copy.deepcopy(acceptor.isyms)", "docstring": "Adds a sink state\n\nArgs:\n alphabet (list): The input alphabet\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def ScanForStorageMediaImage(self, source_path_spec):\n\n try:\n type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators(\n source_path_spec, resolver_context=self._resolver_context)\n except RuntimeError as exception:\n raise errors.BackEndError((\n 'Unable to process source path specification with error: '\n '{0!s}').format(exception))\n\n if not type_indicators:\n # The RAW storage media image type cannot be detected based on\n # a signature so we try to detect it based on common file naming schemas.\n file_system = resolver.Resolver.OpenFileSystem(\n source_path_spec, resolver_context=self._resolver_context)\n raw_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_RAW, parent=source_path_spec)\n\n try:\n # The RAW glob function will raise a PathSpecError if the path\n # specification is unsuitable for globbing.\n glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec)\n except errors.PathSpecError:\n glob_results = None\n\n file_system.Close()\n\n if not glob_results:\n return None\n\n return raw_path_spec\n\n if len(type_indicators) > 1:\n raise errors.BackEndError(\n 'Unsupported source found more than one storage media image types.')\n\n return path_spec_factory.Factory.NewPathSpec(\n type_indicators[0], parent=source_path_spec)", "docstring": "Scans the path specification for a supported storage media image format.\n\nArgs:\n source_path_spec (PathSpec): source path specification.\n\nReturns:\n PathSpec: storage media image path specification or None if no supported\n storage media image type was found.\n\nRaises:\n BackEndError: if the source cannot be scanned or more than one storage\n media image type is found.", "source": "juraj_google_style"} -{"code": "def generate(self, text):\n\n if not text:\n raise Exception(\"No text to speak\")\n\n if len(text) >= self.MAX_CHARS:\n raise Exception(\"Number of characters must be less than 2000\")\n\n params = self.__params.copy()\n params[\"text\"] = text\n self._data = requests.get(self.TTS_URL, params=params,\n stream=False).iter_content()", "docstring": "Try to get the generated file.\n\nArgs:\n text: The text that you want to generate.", "source": "juraj_google_style"} -{"code": "def WMITimeStrToRDFDatetime(self, timestr):\n\n # We use manual parsing here because the time functions provided (datetime,\n # dateutil) do not properly deal with timezone information.\n offset_minutes = timestr[21:]\n year = timestr[:4]\n month = timestr[4:6]\n day = timestr[6:8]\n hours = timestr[8:10]\n minutes = timestr[10:12]\n seconds = timestr[12:14]\n microseconds = timestr[15:21]\n\n unix_seconds = calendar.timegm(\n tuple(map(int, [year, month, day, hours, minutes, seconds])))\n unix_seconds -= int(offset_minutes) * 60\n return rdfvalue.RDFDatetime(unix_seconds * 1e6 + int(microseconds))", "docstring": "Return RDFDatetime from string like 20140825162259.000000-420.\n\nArgs:\n timestr: WMI time string\n\nReturns:\n rdfvalue.RDFDatetime\n\n We have some timezone manipulation work to do here because the UTC offset is\n in minutes rather than +-HHMM", "source": "juraj_google_style"} -{"code": "def set_name(self, name, anyway=False):\n\n set_name(self.startEA, name, anyway=anyway)", "docstring": "Set Function Name.\n\n Default behavior throws an exception when setting to a name that already exists in\n the IDB. to make IDA automatically add a counter to the name (like in the GUI,)\n use `anyway=True`.\n\nArgs:\n name: Desired name.\n anyway: `True` to set anyway.", "source": "juraj_google_style"} -{"code": "def __init__(self,\n base_url: str = None,\n *,\n timeout_connection: int = 30\n ) -> None:\n\n if base_url is None:\n base_url = \"http://127.0.0.1:6060\"\n self.__api = APIClient(base_url, timeout_connection=timeout_connection)\n self.__bugs = BugManager(self.__api)\n self.__containers = ContainerManager(self.__api)\n self.__files = FileManager(self.__api, self.__bugs)\n self.__docker = DockerManager(self.__api)", "docstring": "Constructs a new client for communicating with a BugZoo server.\n\n Parameters:\n base_url: the base URL of the BugZoo server.\n timeout_connection: the maximum number of seconds to wait whilst\n attempting to connect to the server before declaring the\n connection to have failed.\n\nRaises:\n ConnectionFailure: if a connection to the server could not be\n established within the timeout window.", "source": "juraj_google_style"} -{"code": "def checkPermissions(permissions=[], obj=None):\n\n if not obj:\n return False\n sm = getSecurityManager()\n for perm in permissions:\n if not sm.checkPermission(perm, obj):\n return ''\n return True", "docstring": "Checks if a user has permissions for a given object.\n\nArgs:\n permissions: The permissions the current user must be compliant with\n obj: The object for which the permissions apply\n\nReturns:\n 1 if the user complies with all the permissions for the given object.\n Otherwise, it returns empty.", "source": "juraj_google_style"} -{"code": "def swo_disable(self, port_mask):\n\n res = self._dll.JLINKARM_SWO_DisableTarget(port_mask)\n if res != 0:\n raise errors.JLinkException(res)\n return None", "docstring": "Disables ITM & Stimulus ports.\n\nArgs:\n self (JLink): the ``JLink`` instance\n port_mask (int): mask specifying which ports to disable\n\nReturns:\n ``None``\n\nRaises:\n JLinkException: on error", "source": "juraj_google_style"} -{"code": "def _environment_variables(**kwargs):\n # type: (Any) -> Any\n\n\n hdx_key = os.getenv('HDX_KEY')\n if hdx_key is not None:\n kwargs['hdx_key'] = hdx_key\n hdx_url = os.getenv('HDX_URL')\n if hdx_url is not None:\n kwargs['hdx_url'] = hdx_url\n else:\n hdx_site = os.getenv('HDX_SITE')\n if hdx_site is not None:\n kwargs['hdx_site'] = hdx_site\n return kwargs", "docstring": "Overwrite keyword arguments with environment variables\n\nArgs:\n **kwargs: See below\n hdx_url (str): HDX url to use. Overrides hdx_site.\n hdx_site (str): HDX site to use eg. prod, test. Defaults to test.\n hdx_key (str): Your HDX key. Ignored if hdx_read_only = True.\n\nReturns:\n kwargs: Changed keyword arguments", "source": "juraj_google_style"} -{"code": "def _get_parsed_args(command_name, doc, argv):\n # type: (str, str, typing.List[str]) -> typing.Dict[str, typing.Any]\n\n _LOGGER.debug('Parsing docstring: with arguments %s.', doc, argv)\n args = docopt(doc, argv=argv)\n if command_name == settings.command:\n args[command_name] = True\n return args", "docstring": "Parse the docstring with docopt.\n\nArgs:\n command_name: The name of the subcommand to parse.\n doc: A docopt-parseable string.\n argv: The list of arguments to pass to docopt during parsing.\n\nReturns:\n The docopt results dictionary. If the subcommand has the same name as\n the primary command, the subcommand value will be added to the\n dictionary.", "source": "juraj_google_style"} -{"code": "def _process_book(book_url):\n\n data = DOWNER.download(book_url)\n dom = dhtmlparser.parseString(data)\n\n details_tags = dom.find(\"div\", {\"id\": \"contentDetail\"})\n\n assert details_tags, \"Can't find details of the book.\"\n\n details = details_tags[0]\n\n # parse required informations\n title = _parse_title(dom, details)\n authors = _parse_authors(details)\n publisher = _parse_publisher(details)\n price = _parse_price(details)\n pages, binding = _parse_pages_binding(details)\n\n pub = Publication(\n title,\n authors,\n price,\n publisher\n )\n\n # parse optional informations\n pub.optionals.URL = book_url\n pub.optionals.binding = binding\n\n pub.optionals.pages = pages\n pub.optionals.ISBN, pub.optionals.EAN = _parse_ISBN_EAN(details)\n pub.optionals.edition = _parse_edition(details)\n pub.optionals.description = _parse_description(details)\n\n return pub", "docstring": "Parse available informations about book from the book details page.\n\nArgs:\n book_url (str): Absolute URL of the book.\n\nReturns:\n obj: :class:`structures.Publication` instance with book details.", "source": "juraj_google_style"} -{"code": "def get_pipeline_path(pipeline_name, working_directory):\n\n logger.debug(\"starting\")\n\n # look for name.yaml in the pipelines/ sub-directory\n logger.debug(f\"current directory is {working_directory}\")\n\n # looking for {cwd}/pipelines/[pipeline_name].yaml\n pipeline_path = os.path.abspath(os.path.join(\n working_directory,\n 'pipelines',\n pipeline_name + '.yaml'))\n\n if os.path.isfile(pipeline_path):\n logger.debug(f\"Found {pipeline_path}\")\n else:\n logger.debug(f\"{pipeline_name} not found in current \"\n \"directory/pipelines folder. Looking in pypyr install \"\n \"directory instead.\")\n pypyr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n logger.debug(f\"pypyr installation directory is: {pypyr_dir}\")\n pipeline_path = os.path.abspath(os.path.join(\n pypyr_dir,\n 'pipelines',\n pipeline_name + '.yaml'))\n\n if os.path.isfile(pipeline_path):\n logger.debug(f\"Found {pipeline_path}\")\n else:\n raise PipelineNotFoundError(f\"{pipeline_name}.yaml not found in \"\n f\"either \"\n f\"{working_directory}/pipelines \"\n f\"or {pypyr_dir}/pipelines\")\n\n logger.debug(\"done\")\n return pipeline_path", "docstring": "Look for the pipeline in the various places it could be.\n\n First checks the cwd. Then checks pypyr/pipelines dir.\n\nArgs:\n pipeline_name: string. Name of pipeline to find\n working_directory: string. Path in which to look for pipeline_name.yaml\n\nReturns:\n Absolute path to the pipeline_name.yaml file\n\nRaises:\n PipelineNotFoundError: if pipeline_name.yaml not found in working_dir\n or in {pypyr install dir}/pipelines.", "source": "juraj_google_style"} -{"code": "def fresh(t, non_generic):\n\n\n mappings = {} # A mapping of TypeVariables to TypeVariables\n\n def freshrec(tp):\n p = prune(tp)\n if isinstance(p, TypeVariable):\n if is_generic(p, non_generic):\n if p not in mappings:\n mappings[p] = TypeVariable()\n return mappings[p]\n else:\n return p\n elif isinstance(p, dict):\n return p # module\n elif isinstance(p, Collection):\n return Collection(*[freshrec(x) for x in p.types])\n elif isinstance(p, Scalar):\n return Scalar([freshrec(x) for x in p.types])\n elif isinstance(p, TypeOperator):\n return TypeOperator(p.name, [freshrec(x) for x in p.types])\n elif isinstance(p, MultiType):\n return MultiType([freshrec(x) for x in p.types])\n else:\n assert False, \"missing freshrec case {}\".format(type(p))\n\n return freshrec(t)", "docstring": "Makes a copy of a type expression.\n\n The type t is copied. The generic variables are duplicated and the\n non_generic variables are shared.\n\nArgs:\n t: A type to be copied.\n non_generic: A set of non-generic TypeVariables", "source": "juraj_google_style"} -{"code": "def profile_view(request, user_id=None):\n\n if request.user.is_eighthoffice and \"full\" not in request.GET and user_id is not None:\n return redirect(\"eighth_profile\", user_id=user_id)\n\n if user_id is not None:\n try:\n profile_user = User.objects.get(id=user_id)\n\n if profile_user is None:\n raise Http404\n except User.DoesNotExist:\n raise Http404\n else:\n profile_user = request.user\n\n num_blocks = 6\n\n eighth_schedule = []\n start_block = EighthBlock.objects.get_first_upcoming_block()\n\n blocks = []\n if start_block:\n blocks = [start_block] + list(start_block.next_blocks(num_blocks - 1))\n\n for block in blocks:\n sch = {\"block\": block}\n try:\n sch[\"signup\"] = EighthSignup.objects.get(scheduled_activity__block=block, user=profile_user)\n except EighthSignup.DoesNotExist:\n sch[\"signup\"] = None\n except MultipleObjectsReturned:\n client.captureException()\n sch[\"signup\"] = None\n eighth_schedule.append(sch)\n\n if profile_user.is_eighth_sponsor:\n sponsor = EighthSponsor.objects.get(user=profile_user)\n start_date = get_start_date(request)\n eighth_sponsor_schedule = (EighthScheduledActivity.objects.for_sponsor(sponsor).filter(block__date__gte=start_date).order_by(\n \"block__date\", \"block__block_letter\"))\n eighth_sponsor_schedule = eighth_sponsor_schedule[:10]\n else:\n eighth_sponsor_schedule = None\n\n admin_or_teacher = (request.user.is_eighth_admin or request.user.is_teacher)\n can_view_eighth = (profile_user.can_view_eighth or request.user == profile_user)\n eighth_restricted_msg = (not can_view_eighth and admin_or_teacher)\n\n if not can_view_eighth and not request.user.is_eighth_admin and not request.user.is_teacher:\n eighth_schedule = []\n\n has_been_nominated = profile_user.username in [\n u.nominee.username for u in request.user.nomination_votes.filter(position__position_name=settings.NOMINATION_POSITION)\n ]\n context = {\n \"profile_user\": profile_user,\n \"eighth_schedule\": eighth_schedule,\n \"can_view_eighth\": can_view_eighth,\n \"eighth_restricted_msg\": eighth_restricted_msg,\n \"eighth_sponsor_schedule\": eighth_sponsor_schedule,\n \"nominations_active\": settings.NOMINATIONS_ACTIVE,\n \"nomination_position\": settings.NOMINATION_POSITION,\n \"has_been_nominated\": has_been_nominated\n }\n return render(request, \"users/profile.html\", context)", "docstring": "Displays a view of a user's profile.\n\nArgs:\n user_id\n The ID of the user whose profile is being viewed. If not\n specified, show the user's own profile.", "source": "juraj_google_style"} -{"code": "def _parse_alt_url(html_chunk):\n\n url_list = html_chunk.find(\"a\", fn=has_param(\"href\"))\n url_list = map(lambda x: x.params[\"href\"], url_list)\n url_list = filter(lambda x: not x.startswith(\"autori/\"), url_list)\n\n if not url_list:\n return None\n\n return normalize_url(BASE_URL, url_list[0])", "docstring": "Parse URL from alternative location if not found where it should be.\n\nArgs:\n html_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\n str: Book's URL.", "source": "juraj_google_style"} -{"code": "def _ParseRecords(self, parser_mediator, evt_file):\n\n # To handle errors when parsing a Windows EventLog (EVT) file in the most\n # granular way the following code iterates over every event record. The\n # call to evt_file.get_record() and access to members of evt_record should\n # be called within a try-except.\n\n for record_index in range(evt_file.number_of_records):\n if parser_mediator.abort:\n break\n\n try:\n evt_record = evt_file.get_record(record_index)\n self._ParseRecord(parser_mediator, record_index, evt_record)\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to parse event record: {0:d} with error: {1!s}'.format(\n record_index, exception))\n\n for record_index in range(evt_file.number_of_recovered_records):\n if parser_mediator.abort:\n break\n\n try:\n evt_record = evt_file.get_recovered_record(record_index)\n self._ParseRecord(\n parser_mediator, record_index, evt_record, recovered=True)\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning((\n 'unable to parse recovered event record: {0:d} with error: '\n '{1!s}').format(record_index, exception))", "docstring": "Parses Windows EventLog (EVT) records.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n evt_file (pyevt.file): Windows EventLog (EVT) file.", "source": "juraj_google_style"} -{"code": "def __init__(self, artifacts_registry, knowledge_base):\n\n super(ArtifactDefinitionsFilterHelper, self).__init__()\n self._artifacts_registry = artifacts_registry\n self._knowledge_base = knowledge_base\n\n self.file_system_artifact_names = set()\n self.file_system_find_specs = []\n self.registry_artifact_names = set()\n self.registry_find_specs = []", "docstring": "Initializes an artifact definitions filter helper.\n\nArgs:\n artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifact\n definitions registry.\n knowledge_base (KnowledgeBase): contains information from the source\n data needed for filtering.", "source": "juraj_google_style"} -{"code": "def _check_and_parse_eltorito(self, br):\n # type: (headervd.BootRecord) -> None\n\n if br.boot_system_identifier != b'EL TORITO SPECIFICATION'.ljust(32, b'\\x00'):\n return\n\n if self.eltorito_boot_catalog is not None:\n raise pycdlibexception.PyCdlibInvalidISO('Only one El Torito boot record is allowed')\n\n # According to the El Torito specification, section 2.0, the El\n # Torito boot record must be at extent 17.\n if br.extent_location() != 17:\n raise pycdlibexception.PyCdlibInvalidISO('El Torito Boot Record must be at extent 17')\n\n # Now that we have verified that the BootRecord is an El Torito one\n # and that it is sane, we go on to parse the El Torito Boot Catalog.\n # Note that the Boot Catalog is stored as a file in the ISO, though\n # we ignore that for the purposes of parsing.\n\n self.eltorito_boot_catalog = eltorito.EltoritoBootCatalog(br)\n eltorito_boot_catalog_extent, = struct.unpack_from('=L', br.boot_system_use[:4], 0)\n\n old = self._cdfp.tell()\n self._cdfp.seek(eltorito_boot_catalog_extent * self.pvd.logical_block_size())\n data = self._cdfp.read(32)\n while not self.eltorito_boot_catalog.parse(data):\n data = self._cdfp.read(32)\n self._cdfp.seek(old)", "docstring": "An internal method to examine a Boot Record and see if it is an\n El Torito Boot Record. If it is, parse the El Torito Boot Catalog,\n verification entry, initial entry, and any additional section entries.\n\n Parameters:\n br - The boot record to examine for an El Torito signature.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def __init__(self, path):\n\n # if list, do a pathjoin\n if isinstance(path, list):\n path = os.path.join(*path)\n\n # store as pathlib.Path, to avoid unexpected relative\n # path redirection\n self.path = Path(path).resolve()\n\n # ensure that path exists\n if not self.path.is_dir():\n log.error(\"No path exists at {}\".format(self.path))\n err_msg = \"Path '{}' is not a directory.\".format(self.path)\n raise NotADirectoryError(err_msg)\n\n log.info(\"%d Serving static files out of %s\" % (id(self), self.path))", "docstring": "Construct Static method.\n\nArgs:\n path (str or list): The directory path to search for files.\n If this is a list, the paths will be path-joined\n automatically.", "source": "juraj_google_style"} -{"code": "def _is_valid_netmask(self, prefixlen):\n\n try:\n prefixlen = int(prefixlen)\n except ValueError:\n return False\n return 0 <= prefixlen <= self._max_prefixlen", "docstring": "Verify that the netmask/prefixlen is valid.\n\nArgs:\n prefixlen: A string, the netmask in prefix length format.\n\nReturns:\n A boolean, True if the prefix represents a valid IPv6\n netmask.", "source": "juraj_google_style"} -{"code": "def record(self, crc_bytes):\n # type: (bytes) -> bytes\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('UDF Descriptor Tag not initialized')\n\n crc_byte_len = len(crc_bytes)\n if self.desc_crc_length >= 0:\n crc_byte_len = self.desc_crc_length\n\n # We need to compute the checksum, but we'll do that by first creating\n # the output buffer with the csum field set to 0, computing the csum,\n # and then setting that record back as usual.\n rec = bytearray(struct.pack(self.FMT, self.tag_ident, self.desc_version,\n 0, 0, self.tag_serial_number,\n crc_ccitt(crc_bytes[:crc_byte_len]),\n crc_byte_len, self.tag_location))\n\n rec[4] = _compute_csum(rec)\n\n return bytes(rec)", "docstring": "A method to generate the string representing this UDF Descriptor Tag.\n\n Parameters:\n crc_bytes - The string to compute the CRC over.\n\nReturns:\n A string representing this UDF Descriptor Tag.", "source": "juraj_google_style"} -{"code": "def content_create(self, key, model, contentid, meta, protected=False):\n\n params = {'id': contentid, 'meta': meta}\n if protected is not False:\n params['protected'] = 'true'\n data = urlencode(params)\n path = PROVISION_MANAGE_CONTENT + model + '/'\n return self._request(path,\n key, data, 'POST', self._manage_by_cik)", "docstring": "Creates a content entity bucket with the given `contentid`.\n\n This method maps to\n https://github.com/exosite/docs/tree/master/provision#post---create-content-entity.\n\nArgs:\n key: The CIK or Token for the device\n model:\n contentid: The ID used to name the entity bucket\n meta:\n protected: Whether or not this is restricted to certain device serial numbers only.", "source": "juraj_google_style"} -{"code": "def update_dynamic_gene_list(self, case, hgnc_symbols=None, hgnc_ids=None,\n phenotype_ids=None, build='37'):\n\n dynamic_gene_list = []\n res = []\n if hgnc_ids:\n LOG.info(\"Fetching genes by hgnc id\")\n res = self.hgnc_collection.find({'hgnc_id': {'$in': hgnc_ids}, 'build': build})\n elif hgnc_symbols:\n LOG.info(\"Fetching genes by hgnc symbols\")\n res = []\n for symbol in hgnc_symbols:\n for gene_obj in self.gene_by_alias(symbol=symbol, build=build):\n res.append(gene_obj)\n\n for gene_obj in res:\n dynamic_gene_list.append(\n {\n 'hgnc_symbol': gene_obj['hgnc_symbol'],\n 'hgnc_id': gene_obj['hgnc_id'],\n 'description': gene_obj['description'],\n }\n )\n\n LOG.info(\"Update dynamic gene panel for: %s\", case['display_name'])\n updated_case = self.case_collection.find_one_and_update(\n {'_id': case['_id']},\n {'$set': {'dynamic_gene_list': dynamic_gene_list,\n 'dynamic_panel_phenotypes': phenotype_ids or []}},\n return_document=pymongo.ReturnDocument.AFTER\n )\n LOG.debug(\"Case updated\")\n return updated_case", "docstring": "Update the dynamic gene list for a case\n\n Adds a list of dictionaries to case['dynamic_gene_list'] that looks like\n\n {\n hgnc_symbol: str,\n hgnc_id: int,\n description: str\n }\n\nArgs:\n case (dict): The case that should be updated\n hgnc_symbols (iterable): A list of hgnc_symbols\n hgnc_ids (iterable): A list of hgnc_ids\n\nReturns:\n updated_case(dict)", "source": "juraj_google_style"} -{"code": "def file_exists(file_path, credentials=None):\n\n if file_path.startswith('gs://'):\n return _file_exists_in_gcs(file_path, credentials)\n else:\n return os.path.isfile(file_path)", "docstring": "Check whether the file exists, on local disk or GCS.\n\nArgs:\n file_path: The target file path; should have the 'gs://' prefix if in gcs.\n credentials: Optional credential to be used to load the file from gcs.\n\nReturns:\n True if the file's there.", "source": "juraj_google_style"} -{"code": "def _open_streaming_interface(self, connection_id, callback):\n\n\n try:\n context = self.connections.get_context(connection_id)\n except ArgumentError:\n callback(connection_id, self.id, False, \"Could not find connection information\")\n return\n\n self._logger.info(\"Attempting to enable streaming\")\n self.connections.begin_operation(connection_id, 'open_interface', callback, self.get_config('default_timeout'))\n\n try:\n characteristic = context['services'][TileBusService][StreamingChar]\n except KeyError:\n self.connections.finish_operation(\n connection_id,\n False,\n \"Can't find characteristic to open streaming interface\"\n )\n return\n\n context['parser'] = IOTileReportParser(report_callback=self._on_report, error_callback=self._on_report_error)\n context['parser'].context = connection_id\n\n def on_report_chunk_received(report_chunk):\n\n context['parser'].add_data(report_chunk)\n\n # Register our callback function in the notifications callbacks\n self._register_notification_callback(\n context['connection_handle'],\n characteristic.value_handle,\n on_report_chunk_received\n )\n\n self.bable.set_notification(\n enabled=True,\n connection_handle=context['connection_handle'],\n characteristic=characteristic,\n on_notification_set=[self._on_interface_opened, context],\n on_notification_received=self._on_notification_received,\n timeout=1.0,\n sync=False\n )", "docstring": "Enable streaming interface for this IOTile device\n\nArgs:\n connection_id (int): The unique identifier for the connection\n callback (callback): Callback to be called when this command finishes\n callback(conn_id, adapter_id, success, failure_reason)", "source": "juraj_google_style"} -{"code": "def _send_offset_requests(self, timestamps):\n\n timestamps_by_node = collections.defaultdict(dict)\n for partition, timestamp in six.iteritems(timestamps):\n node_id = self._client.cluster.leader_for_partition(partition)\n if node_id is None:\n self._client.add_topic(partition.topic)\n log.debug(\"Partition %s is unknown for fetching offset,\"\n \" wait for metadata refresh\", partition)\n return Future().failure(Errors.StaleMetadata(partition))\n elif node_id == -1:\n log.debug(\"Leader for partition %s unavailable for fetching \"\n \"offset, wait for metadata refresh\", partition)\n return Future().failure(\n Errors.LeaderNotAvailableError(partition))\n else:\n timestamps_by_node[node_id][partition] = timestamp\n\n # Aggregate results until we have all\n list_offsets_future = Future()\n responses = []\n node_count = len(timestamps_by_node)\n\n def on_success(value):\n responses.append(value)\n if len(responses) == node_count:\n offsets = {}\n for r in responses:\n offsets.update(r)\n list_offsets_future.success(offsets)\n\n def on_fail(err):\n if not list_offsets_future.is_done:\n list_offsets_future.failure(err)\n\n for node_id, timestamps in six.iteritems(timestamps_by_node):\n _f = self._send_offset_request(node_id, timestamps)\n _f.add_callback(on_success)\n _f.add_errback(on_fail)\n return list_offsets_future", "docstring": "Fetch offsets for each partition in timestamps dict. This may send\n request to multiple nodes, based on who is Leader for partition.\n\nArgs:\n timestamps (dict): {TopicPartition: int} mapping of fetching\n timestamps.\n\nReturns:\n Future: resolves to a mapping of retrieved offsets", "source": "juraj_google_style"} -{"code": "def bars(n=3,n_categories=3,prefix='category',columns=None,mode='abc'):\n\n\tcategories=[]\n\tif not columns:\n\t\tcolumns=getName(n,mode=mode)\n\tfor i in range(n_categories):\n\t\tcategories.extend([prefix+str(i+1)])\n\tdata=dict([(x,np.random.randint(1,100,n_categories)) for x in columns])\n\treturn pd.DataFrame(data,index=categories)", "docstring": "Returns a DataFrame with the required format for\n a bar plot\n\n Parameters:\n -----------\n n : int\n Number of points for each trace\n n_categories : int\n Number of categories for each point\n prefix : string\n Name for each category\n columns : [str]\n List of column names\n mode : string\n Format for each item\n 'abc' for alphabet columns\n 'stocks' for random stock names", "source": "juraj_google_style"} -{"code": "def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:\n\n\n logger.debug(\"Yielding from zip iterator\")\n if isinstance(variables, list):\n for item in variables:\n yield list(variable_matrix(item, parent, \"zip\"))\n else:\n yield list(variable_matrix(variables, parent, \"zip\"))", "docstring": "Apply the zip operator to a set of variables.\n\n This uses the python zip iterator to combine multiple lists of variables such that\n the nth variable in each list is aligned.\n\nArgs:\n variables: The variables object\n parent: Unused", "source": "juraj_google_style"} -{"code": "def post(self, json=None):\n\n return self._call('post', url=self.endpoint, json=json)", "docstring": "Send a POST request and return the JSON decoded result.\n\nArgs:\n json (dict, optional): Object to encode and send in request.\n\nReturns:\n mixed: JSON decoded response data.", "source": "juraj_google_style"} -{"code": "def timed_display(msg):\n\n def print_header(msg, newline=True):\n\n if sys.stdout.isatty():\n print('\\r', end=Style.BRIGHT + Fore.BLUE)\n print(' {} '.format(msg).center(_ncols(), '='),\n end='\\n{}'.format(Style.RESET_ALL)\n if newline else Style.RESET_ALL)\n sys.stdout.flush()\n\n def print_message(msg):\n\n if sys.stdout.isatty():\n print('\\r', end='')\n msg = msg.ljust(_ncols())\n print(msg, end='')\n sys.stdout.flush()\n\n start = time.time()\n print_header(msg)\n with hidden_cursor():\n try:\n yield print_message\n finally:\n delta = time.time() - start\n print_header('completed in {:.2f}s'.format(delta), False)", "docstring": "A timed block to run tasks with titles and success/failure messages.\n\nArgs:\n msg: The header message to print at the beginning of the timed block.", "source": "juraj_google_style"} -{"code": "def __init__(self, callback):\n\n super(LLDP, self).__init__(callback)\n self._lldp = brcd_lldp(callback=pynos.utilities.return_xml)", "docstring": "LLDP init method.\n\nArgs:\n callback (function): Callback function that will be called for each\n action.\n\nReturns:\n LLDP Object\n\nRaises:\n None", "source": "juraj_google_style"} -{"code": "def getFingerprint(self, text):\n\n fp = self._fullClient.getFingerprintForText(text)\n return fp.positions", "docstring": "Get the semantic fingerprint of the input text.\n\nArgs:\n text, str: The text to be evaluated\n\nReturns:\n list of str: the positions of the semantic fingerprint\n\nRaises:\n CorticalioException: if the request was not successful", "source": "juraj_google_style"} -{"code": "def get_py_dtypes(data_frame):\n\n df_py_dtypes = data_frame.dtypes.map(get_py_dtype).to_frame('dtype').copy()\n df_py_dtypes.loc[df_py_dtypes.dtype == object, 'dtype'] = \\\n (df_py_dtypes.loc[df_py_dtypes.dtype == object].index\n .map(lambda c: str if data_frame[c]\n .map(lambda v: isinstance(v, str)).all() else object))\n\n df_py_dtypes.insert(0, 'i', range(df_py_dtypes.shape[0]))\n df_py_dtypes.index.name = 'column'\n return df_py_dtypes", "docstring": "Return a `pandas.DataFrame` containing Python type information for the\n columns in `data_frame`.\n\nArgs:\n data_frame (pandas.DataFrame) : Data frame containing data columns.\n\nReturns:\n (pandas.DataFrame) : Data frame indexed by the column names from\n `data_frame`, with the columns `'i'` and `'dtype'` indicating the\n index and Python type of the corresponding `data_frame` column,\n respectively.", "source": "juraj_google_style"} -{"code": "def __init__(self, a_schedule, b_schedule, merged_schedule,\n problem_reporter):\n\n self.a_schedule = a_schedule\n self.b_schedule = b_schedule\n self.merged_schedule = merged_schedule\n self.a_merge_map = {}\n self.b_merge_map = {}\n self.a_zone_map = {}\n self.b_zone_map = {}\n self._mergers = []\n self._idnum = max(self._FindLargestIdPostfixNumber(self.a_schedule),\n self._FindLargestIdPostfixNumber(self.b_schedule))\n\n self.problem_reporter = problem_reporter", "docstring": "Initialise the merger.\n\n Once this initialiser has been called, a_schedule and b_schedule should\n not be modified.\n\nArgs:\n a_schedule: The old schedule, an instance of transitfeed.Schedule.\n b_schedule: The new schedule, an instance of transitfeed.Schedule.\n problem_reporter: The problem reporter, an instance of\n transitfeed.ProblemReporter.", "source": "juraj_google_style"} -{"code": "def ud_grade_ipix(ipix, nside_in, nside_out, nest=False):\n\n if nside_in == nside_out: return ipix\n elif nside_in < nside_out:\n return u_grade_ipix(ipix, nside_in, nside_out, nest)\n elif nside_in > nside_out:\n return d_grade_ipix(ipix, nside_in, nside_out, nest)", "docstring": "Upgrade or degrade resolution of a pixel list.\n\n Parameters:\n -----------\n ipix:array-like\n the input pixel(s)\n\n nside_in:int\n the nside of the input pixel(s)\n\n nside_out:int\n the desired nside of the output pixel(s)\n\n order:str\n pixel ordering of input and output (\"RING\" or \"NESTED\")\n\nReturns:\n --------\n pix_out:array-like\n the upgraded or degraded pixel array", "source": "juraj_google_style"} -{"code": "def make_es_id(uri):\n\n try:\n uri = uri.clean_uri\n except AttributeError:\n pass\n return sha1(uri.encode()).hexdigest()", "docstring": "Creates the id based off of the uri value\n\nArgs:\n -----\n uri: the uri to conver to an elasticsearch id", "source": "juraj_google_style"} -{"code": "def from_dict(cls, metadata):\n\n hyperparameters = metadata.get('hyperparameters')\n tunable = metadata.get('tunable_hyperparameters')\n\n pipeline = cls(\n metadata['primitives'],\n metadata.get('init_params'),\n metadata.get('input_names'),\n metadata.get('output_names'),\n )\n\n if hyperparameters:\n pipeline.set_hyperparameters(hyperparameters)\n\n if tunable is not None:\n pipeline._tunable_hyperparameters = tunable\n\n return pipeline", "docstring": "Create a new MLPipeline from a dict specification.\n\n The dict structure is the same as the one created by the `to_dict` method.\n\nArgs:\n metadata (dict): Dictionary containing the pipeline specification.\n\nReturns:\n MLPipeline:\n A new MLPipeline instance with the details found in the\n given specification dictionary.", "source": "juraj_google_style"} -{"code": "def check_synced(localval, comm=None):\n\n comm = comm or MPI.COMM_WORLD\n vals = comm.gather(localval)\n if comm.rank == 0:\n assert all(val==vals[0] for val in vals[1:])", "docstring": "It's common to forget to initialize your variables to the same values, or\n (less commonly) if you update them in some other way than adam, to get them out of sync.\n This function checks that variables on all MPI workers are the same, and raises\n an AssertionError otherwise\n\nArgs:\n comm: MPI communicator\n localval: list of local variables (list of variables on current worker to be compared with the other workers)", "source": "juraj_google_style"} -{"code": "def get_foreign_id(self, idspace='musicbrainz', cache=True):\n\n if not (cache and ('foreign_ids' in self.cache) and filter(lambda d: d.get('catalog') == idspace, self.cache['foreign_ids'])):\n response = self.get_attribute('profile', bucket=['id:'+idspace])\n foreign_ids = response['artist'].get(\"foreign_ids\", [])\n self.cache['foreign_ids'] = self.cache.get('foreign_ids', []) + foreign_ids\n cval = filter(lambda d: d.get('catalog') == util.map_idspace(idspace),\n self.cache.get('foreign_ids'))\n return cval[0].get('foreign_id') if cval else None", "docstring": "Get the foreign id for this artist for a specific id space\n\nArgs:\n Kwargs:\n idspace (str): A string indicating the idspace to fetch a foreign id for.\n\nReturns:\n A foreign ID string\n\nExample:\n >>> a = artist.Artist('fabulous')\n >>> a.get_foreign_id('7digital')\n u'7digital:artist:186042'\n >>>", "source": "juraj_google_style"} -{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n\n version_value = registry_key.GetValueByName('Version')\n count_subkey = registry_key.GetSubkeyByName('Count')\n\n if not version_value:\n parser_mediator.ProduceExtractionWarning('missing version value')\n return\n\n if not version_value.DataIsInteger():\n parser_mediator.ProduceExtractionWarning(\n 'unsupported version value data type')\n return\n\n format_version = version_value.GetDataAsObject()\n if format_version not in (3, 5):\n parser_mediator.ProduceExtractionWarning(\n 'unsupported format version: {0:d}'.format(format_version))\n return\n\n if not count_subkey:\n parser_mediator.ProduceExtractionWarning('missing count subkey')\n return\n\n userassist_entry_index = 0\n\n for registry_value in count_subkey.GetValues():\n try:\n # Note that Python 2 codecs.decode() does not support keyword arguments\n # such as encodings='rot-13'.\n value_name = codecs.decode(registry_value.name, 'rot-13')\n except UnicodeEncodeError as exception:\n logger.debug((\n 'Unable to decode UserAssist string: {0:s} with error: {1!s}.\\n'\n 'Attempting piecewise decoding.').format(\n registry_value.name, exception))\n\n characters = []\n for char in registry_value.name:\n if ord(char) < 128:\n try:\n characters.append(char.decode('rot-13'))\n except UnicodeEncodeError:\n characters.append(char)\n else:\n characters.append(char)\n\n value_name = ''.join(characters)\n\n if format_version == 5:\n path_segments = value_name.split('\\\\')\n\n for segment_index, path_segment in enumerate(path_segments):\n # Remove the { } from the path segment to get the GUID.\n guid = path_segments[segment_index][1:-1]\n path_segments[segment_index] = known_folder_ids.PATHS.get(\n guid, path_segment)\n\n value_name = '\\\\'.join(path_segments)\n # Check if we might need to substitute values.\n if '%' in value_name:\n # TODO: fix missing self._knowledge_base\n # pylint: disable=no-member\n environment_variables = self._knowledge_base.GetEnvironmentVariables()\n value_name = path_helper.PathHelper.ExpandWindowsPath(\n value_name, environment_variables)\n\n if value_name == 'UEME_CTLSESSION':\n continue\n\n if format_version == 3:\n entry_map = self._GetDataTypeMap('user_assist_entry_v3')\n elif format_version == 5:\n entry_map = self._GetDataTypeMap('user_assist_entry_v5')\n else:\n parser_mediator.ProduceExtractionWarning(\n 'unsupported format version: {0:d}'.format(format_version))\n continue\n\n if not registry_value.DataIsBinaryData():\n parser_mediator.ProduceExtractionWarning(\n 'unsupported value data type: {0:s}'.format(\n registry_value.data_type_string))\n continue\n\n entry_data_size = entry_map.GetByteSize()\n value_data_size = len(registry_value.data)\n if entry_data_size != value_data_size:\n parser_mediator.ProduceExtractionWarning(\n 'unsupported value data size: {0:d}'.format(value_data_size))\n continue\n\n try:\n user_assist_entry = self._ReadStructureFromByteStream(\n registry_value.data, 0, entry_map)\n except (ValueError, errors.ParseError) as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to parse UserAssist entry value with error: {0!s}'.format(\n exception))\n continue\n\n event_data = UserAssistWindowsRegistryEventData()\n event_data.key_path = count_subkey.path\n event_data.number_of_executions = user_assist_entry.number_of_executions\n event_data.value_name = value_name\n\n if format_version == 3:\n if event_data.number_of_executions > 5:\n event_data.number_of_executions -= 5\n\n elif format_version == 5:\n userassist_entry_index += 1\n\n event_data.application_focus_count = (\n user_assist_entry.application_focus_count)\n event_data.application_focus_duration = (\n user_assist_entry.application_focus_duration)\n event_data.entry_index = userassist_entry_index\n\n timestamp = user_assist_entry.last_execution_time\n if not timestamp:\n date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n else:\n date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_LAST_RUN)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n registry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj_google_style"} -{"code": "def variant(self, document_id, gene_panels=None, case_id=None):\n\n query = {}\n if case_id:\n # search for a variant in a case\n query['case_id'] = case_id\n query['variant_id'] = document_id\n else:\n # search with a unique id\n query['_id'] = document_id\n\n variant_obj = self.variant_collection.find_one(query)\n if variant_obj:\n variant_obj = self.add_gene_info(variant_obj, gene_panels)\n if variant_obj['chromosome'] in ['X', 'Y']:\n ## TODO add the build here\n variant_obj['is_par'] = is_par(variant_obj['chromosome'],\n variant_obj['position'])\n return variant_obj", "docstring": "Returns the specified variant.\n\nArgs:\n document_id : A md5 key that represents the variant or \"variant_id\"\n gene_panels(List[GenePanel])\n case_id (str): case id (will search with \"variant_id\")\n\nReturns:\n variant_object(Variant): A odm variant object", "source": "juraj_google_style"} -{"code": "def find(self, _id, instance = None):\n\n\n if instance is None:\n # We are looking for an instance\n return self.service_instance.find(_id)\n else:\n # We are looking for a binding\n return self.service_binding.find(_id, instance)", "docstring": "Find\n\nArgs:\n _id (str): instance id or binding Id\n\n Keyword Arguments:\n instance (AtlasServiceInstance.Instance): Existing instance\n\nReturns:\n AtlasServiceInstance.Instance or AtlasServiceBinding.Binding: An instance or binding.", "source": "juraj_google_style"} -{"code": "def __init__(self, inputs=None,\n outputs=None,\n assettype=AssetType.GoverningToken,\n assetname='',\n amount=Fixed8(0),\n precision=0,\n owner=None,\n admin=None):\n\n super(RegisterTransaction, self).__init__(inputs, outputs)\n self.Type = TransactionType.RegisterTransaction # 0x40\n self.AssetType = assettype\n self.Name = assetname\n self.Amount = amount # Unlimited Mode: -0.00000001\n\n if inputs is not None:\n self.inputs = inputs\n else:\n self.inputs = []\n\n if outputs is not None:\n self.outputs = outputs\n else:\n self.outputs = []\n\n if owner is not None and type(owner) is not EllipticCurve.ECPoint:\n raise Exception(\"Invalid owner, must be ECPoint instance\")\n\n self.Owner = owner\n self.Admin = admin\n self.Precision = precision", "docstring": "Create an instance.\n\nArgs:\n inputs (list):\n outputs (list):\n assettype (neo.Core.AssetType):\n assetname (str):\n amount (Fixed8):\n precision (int): number of decimals the asset has.\n owner (EllipticCurve.ECPoint):\n admin (UInt160):", "source": "juraj_google_style"} -{"code": "def _AddOption(self, name):\n\n\n # Check for duplicate option declaration\n if name in [option.name for option in self.options]:\n raise TextFSMTemplateError('Duplicate option \"%s\"' % name)\n\n # Create the option object\n try:\n option = self._options_cls.GetOption(name)(self)\n except AttributeError:\n raise TextFSMTemplateError('Unknown option \"%s\"' % name)\n\n self.options.append(option)", "docstring": "Add an option to this Value.\n\nArgs:\n name: (str), the name of the Option to add.\n\nRaises:\n TextFSMTemplateError: If option is already present or\n the option does not exist.", "source": "juraj_google_style"} -{"code": "def setUpMethods(self, port):\n\n assert isinstance(port, WSDLTools.Port), \\\n 'expecting WSDLTools.Port not: ' %type(port)\n\n sd = self._services.get(port.getService().name)\n assert sd is not None, 'failed to initialize.'\n\n binding = port.getBinding()\n portType = port.getPortType()\n action_in = ''\n for bop in binding.operations:\n try:\n op = portType.operations[bop.name]\n except KeyError, ex:\n raise WsdlGeneratorError,\\\n 'Port(%s) PortType(%s) missing operation(%s) defined in Binding(%s)' \\\n %(port.name,portType.name,bop.name,binding.name)\n\n for ext in bop.extensions:\n if isinstance(ext, WSDLTools.SoapOperationBinding):\n action_in = ext.soapAction\n break\n else:\n warnings.warn('Port(%s) operation(%s) defined in Binding(%s) missing soapAction' \\\n %(port.name,op.name,binding.name)\n )\n\n msgin = op.getInputMessage()\n msgin_name = TextProtect(msgin.name)\n method_name = self.getMethodName(op.name)\n\n m = sd.newMethod()\n print >>m, '%sdef %s(self, ps, **kw):' %(self.getIndent(level=1), method_name)\n if msgin is not None:\n print >>m, '%srequest = ps.Parse(%s.typecode)' %(self.getIndent(level=2), msgin_name)\n else:\n print >>m, '%s# NO input' %self.getIndent(level=2)\n\n msgout = op.getOutputMessage()\n if msgout is not None:\n msgout_name = TextProtect(msgout.name)\n print >>m, '%sreturn request,%s()' %(self.getIndent(level=2), msgout_name)\n else:\n print >>m, '%s# NO output' % self.getIndent(level=2)\n print >>m, '%sreturn request,None' % self.getIndent(level=2)\n\n print >>m, ''\n print >>m, '%ssoapAction[\\'%s\\'] = \\'%s\\'' %(self.getIndent(level=1), action_in, method_name)\n print >>m, '%sroot[(%s.typecode.nspname,%s.typecode.pname)] = \\'%s\\'' \\\n %(self.getIndent(level=1), msgin_name, msgin_name, method_name)\n\n return", "docstring": "set up all methods representing the port operations.\n Parameters:\n port -- Port that defines the operations.", "source": "juraj_google_style"} -{"code": "def install(self, connection, partition, table_name=None, columns=None, materialize=False,\n logger=None):\n\n\n partition.localize()\n\n self._add_partition(connection, partition)\n fdw_table = partition.vid\n view_table = '{}_v'.format(fdw_table)\n\n if materialize:\n with connection.cursor() as cursor:\n view_exists = self._relation_exists(connection, view_table)\n if view_exists:\n logger.debug(\n 'Materialized view of the partition already exists.\\n partition: {}, view: {}'\n .format(partition.name, view_table))\n else:\n query = 'CREATE MATERIALIZED VIEW {} AS SELECT * FROM {};'\\\n .format(view_table, fdw_table)\n logger.debug(\n 'Creating new materialized view of the partition.'\n '\\n partition: {}, view: {}, query: {}'\n .format(partition.name, view_table, query))\n cursor.execute(query)\n cursor.execute('COMMIT;')\n\n final_table = view_table if materialize else fdw_table\n\n with connection.cursor() as cursor:\n view_q = \"CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} \".format(partition.vid, final_table)\n cursor.execute(view_q)\n cursor.execute('COMMIT;')\n\n return partition.vid", "docstring": "Creates FDW or materialize view for given partition.\n\nArgs:\n connection: connection to postgresql\n partition (orm.Partition):\n materialize (boolean): if True, create read-only table. If False create virtual table.\n\nReturns:\n str: name of the created table.", "source": "juraj_google_style"} -{"code": "def follow(self, chars):\n\n chars = chars.lower()\n\n node = self.node\n for char in chars:\n node = cgaddag.gdg_follow_edge(self.gdg, node, char.encode(\"ascii\"))\n if not node:\n raise KeyError(char)\n\n return Node(self.gdg, node)", "docstring": "Traverse the GADDAG to the node at the end of the given characters.\n\nArgs:\n chars: An string of characters to traverse in the GADDAG.\n\nReturns:\n The Node which is found by traversing the tree.", "source": "juraj_google_style"} -{"code": "def __init__(self, value=KeyFormatTypeEnum.RAW):\n\n super(KeyFormatType, self).__init__(\n KeyFormatTypeEnum, value, Tags.KEY_FORMAT_TYPE)", "docstring": "Construct a KeyFormatType object.\n\nArgs:\n value (KeyFormatType): A KeyFormatType enumeration value,\n (e.g., KeyFormatType.PKCS_1). Optional, default to\n KeyFormatType.RAW.", "source": "juraj_google_style"} -{"code": "def merge_level_and_latent_dist(level_dist, latent_dist,\n merge_std=\"prev_level\"):\n\n level_mean, level_std = level_dist.loc, level_dist.scale\n latent_mean, latent_std = latent_dist.loc, latent_dist.scale\n new_mean = level_mean + latent_mean\n if merge_std == \"normal\":\n z_shape = common_layers.shape_list(latent_mean)\n log_scale = tf.get_variable(\n \"merge_std\", shape=z_shape, dtype=tf.float32,\n initializer=tf.zeros_initializer(), trainable=False)\n scale = tf.exp(log_scale * 3.0)\n elif merge_std == \"prev_level\":\n scale = level_std\n elif merge_std == \"prev_step\":\n scale = latent_std\n return tfp.distributions.Normal(loc=new_mean, scale=scale)", "docstring": "Merge level_dist and latent_dist.\n\n new_dist ~ N(level_dist.mean + latent_dis.mean, std) where std is determined\n according to merge_std.\n\nArgs:\n level_dist: instance of tfp.distributions.Normal\n latent_dist: instance of tfp.distributions.Normal\n merge_std: can be \"prev_level\", \"prev_step\" or \"normal\".\n\nReturns:\n merged_dist: instance of tfp.distributions.Normal", "source": "juraj_google_style"} -{"code": "def run(self, gin):\n\n with ScratchDir(\".\"):\n p = subprocess.Popen(\n self._gulp_cmd, stdout=subprocess.PIPE,\n stdin=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n out, err = p.communicate(bytearray(gin, \"utf-8\"))\n out = out.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n\n if \"Error\" in err or \"error\" in err:\n print(gin)\n print(\"----output_0---------\")\n print(out)\n print(\"----End of output_0------\\n\\n\\n\")\n print(\"----output_1--------\")\n print(out)\n print(\"----End of output_1------\")\n raise GulpError(err)\n\n # We may not need this\n if \"ERROR\" in out:\n raise GulpError(out)\n\n # Sometimes optimisation may fail to reach convergence\n conv_err_string = \"Conditions for a minimum have not been satisfied\"\n if conv_err_string in out:\n raise GulpConvergenceError()\n\n gout = \"\"\n for line in out.split(\"\\n\"):\n gout = gout + line + \"\\n\"\n return gout", "docstring": "Run GULP using the gin as input\n\nArgs:\n gin: GULP input string\n\nReturns:\n gout: GULP output string", "source": "juraj_google_style"} -{"code": "def message_factory(msg_type, msg_types=MESSAGE_TYPES, *args, **kwargs):\n\n try:\n return msg_types[msg_type.lower()](*args, **kwargs)\n except (UnknownProfileError, InvalidMessageInputError) as e:\n err_exit(\"Unable to send message: \", e)\n except KeyError:\n raise UnsupportedMessageTypeError(msg_type, msg_types)", "docstring": "Factory function to return the specified message instance.\n\nArgs:\n :msg_type: (str) the type of message to send, i.e. 'Email'\n :msg_types: (str, list, or set) the supported message types\n :kwargs: (dict) keywords arguments that are required for the\n various message types. See docstrings for each type.\n i.e. help(messages.Email), help(messages.Twilio), etc.", "source": "juraj_google_style"} -{"code": "def scale(self, width: int, height: int) -> None:\n\n lib.TCOD_image_scale(self.image_c, width, height)\n self.width, self.height = width, height", "docstring": "Scale this Image to the new width and height.\n\nArgs:\n width (int): The new width of the Image after scaling.\n height (int): The new height of the Image after scaling.", "source": "juraj_google_style"} -{"code": "def fit(self, train_x, train_y):\n\n if self.first_fitted:\n self.incremental_fit(train_x, train_y)\n else:\n self.first_fit(train_x, train_y)", "docstring": "Fit the regressor with more data.\n\nArgs:\n train_x: A list of NetworkDescriptor.\n train_y: A list of metric values.", "source": "juraj_google_style"} -{"code": "def add_listener(self, callback, event_type=None):\n\n listener_uid = uuid4()\n # TODO: listeners should be stored in dict and accessed/deleted directly. Add\n # convenience method such that MatrixClient.listeners.new(Listener(...)) performs\n # MatrixClient.listeners[uuid4()] = Listener(...)\n self.listeners.append(\n {\n 'uid': listener_uid,\n 'callback': callback,\n 'event_type': event_type\n }\n )\n return listener_uid", "docstring": "Add a listener that will send a callback when the client recieves\n an event.\n\nArgs:\n callback (func(roomchunk)): Callback called when an event arrives.\n event_type (str): The event_type to filter for.\n\nReturns:\n uuid.UUID: Unique id of the listener, can be used to identify the listener.", "source": "juraj_google_style"} -{"code": "def GetDecoder(cls, encoding_method):\n\n encoding_method = encoding_method.lower()\n decoder = cls._decoders.get(encoding_method, None)\n if not decoder:\n return None\n\n return decoder()", "docstring": "Retrieves the decoder object for a specific encoding method.\n\nArgs:\n encoding_method (str): encoding method identifier.\n\nReturns:\n Decoder: decoder or None if the encoding method does not exists.", "source": "juraj_google_style"} -{"code": "def loads(s, single=False, version=_default_version,\n strict=False, errors='warn'):\n\n ms = deserialize(s, version=version, strict=strict, errors=errors)\n if single:\n return next(ms)\n else:\n return ms", "docstring": "Deserialize SimpleMRS string representations\n\nArgs:\n s (str): a SimpleMRS string\n single (bool): if `True`, only return the first Xmrs object\n\nReturns:\n a generator of Xmrs objects (unless *single* is `True`)", "source": "juraj_google_style"} -{"code": "def AddClass(self, class_name, gtfs_class):\n\n if class_name in self._class_mapping:\n raise problems.DuplicateMapping(class_name)\n self._class_mapping[class_name] = gtfs_class", "docstring": "Adds an entry to the list of known classes.\n\nArgs:\n class_name: A string with name through which gtfs_class is to be made\n accessible.\n gtfs_class: The class to be added.\n\nRaises:\n DuplicateMapping if class_name is already present in the class mapping.", "source": "juraj_google_style"} -{"code": "def airborne_position_with_ref(msg, lat_ref, lon_ref):\n\n\n\n mb = common.hex2bin(msg)[32:]\n\n cprlat = common.bin2int(mb[22:39]) / 131072.0\n cprlon = common.bin2int(mb[39:56]) / 131072.0\n\n i = int(mb[21])\n d_lat = 360.0/59 if i else 360.0/60\n\n j = common.floor(lat_ref / d_lat) \\\n + common.floor(0.5 + ((lat_ref % d_lat) / d_lat) - cprlat)\n\n lat = d_lat * (j + cprlat)\n\n ni = common.cprNL(lat) - i\n\n if ni > 0:\n d_lon = 360.0 / ni\n else:\n d_lon = 360.0\n\n m = common.floor(lon_ref / d_lon) \\\n + common.floor(0.5 + ((lon_ref % d_lon) / d_lon) - cprlon)\n\n lon = d_lon * (m + cprlon)\n\n return round(lat, 5), round(lon, 5)", "docstring": "Decode airborne position with only one message,\n knowing reference nearby location, such as previously calculated location,\n ground station, or airport location, etc. The reference position shall\n be with in 180NM of the true position.\n\nArgs:\n msg (string): even message (28 bytes hexadecimal string)\n lat_ref: previous known latitude\n lon_ref: previous known longitude\n\nReturns:\n (float, float): (latitude, longitude) of the aircraft", "source": "juraj_google_style"} -{"code": "def get_random_email(ltd=\"com\"):\n\n\n email = [\n RandomInputHelper.get_random_value(6, [string.ascii_lowercase]),\n \"@\",\n RandomInputHelper.get_random_value(6, [string.ascii_lowercase]),\n \".\",\n ltd\n ]\n\n return \"\".join(email)", "docstring": "Get a random email address with the given ltd.\n\nArgs:\n ltd (str): The ltd to use (e.g. com).\n\nReturns:\n str: The random email.", "source": "juraj_google_style"} -{"code": "def transformer_revnet_decoder(decoder_input,\n encoder_output,\n decoder_self_attention_bias,\n encoder_decoder_attention_bias,\n hparams,\n name=\"decoder\"):\n\n\n def f(x, side_input):\n\n decoder_self_attention_bias = side_input[0]\n encoder_decoder_attention_bias = side_input[1]\n encoder_output = side_input[2]\n\n old_hid_size = hparams.hidden_size\n hparams.hidden_size = old_hid_size // 2\n\n with tf.variable_scope(\"self_attention\"):\n y = common_attention.multihead_attention(\n common_layers.layer_preprocess(\n x, hparams), None, decoder_self_attention_bias,\n hparams.attention_key_channels or hparams.hidden_size,\n hparams.attention_value_channels or hparams.hidden_size,\n hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)\n y = common_layers.layer_postprocess(x, y, hparams)\n if encoder_output is not None:\n with tf.variable_scope(\"encdec_attention\"):\n y = common_attention.multihead_attention(\n common_layers.layer_preprocess(\n x, hparams), encoder_output, encoder_decoder_attention_bias,\n hparams.attention_key_channels or hparams.hidden_size,\n hparams.attention_value_channels or hparams.hidden_size,\n hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)\n y = common_layers.layer_postprocess(x, y, hparams)\n hparams.hidden_size = old_hid_size\n return y\n\n def g(x):\n\n old_hid_size = hparams.hidden_size\n hparams.hidden_size = old_hid_size // 2\n with tf.variable_scope(\"ffn\"):\n y = transformer.transformer_ffn_layer(\n common_layers.layer_preprocess(x, hparams), hparams)\n y = common_layers.layer_postprocess(x, y, hparams)\n hparams.hidden_size = old_hid_size\n return y\n\n x1, x2 = tf.split(decoder_input, 2, axis=-1)\n\n with tf.variable_scope(name):\n y1, y2 = tf.contrib.layers.rev_block(\n x1,\n x2,\n f,\n g,\n num_layers=hparams.num_hidden_layers,\n f_side_input=[\n decoder_self_attention_bias, encoder_decoder_attention_bias,\n encoder_output\n ],\n is_training=hparams.mode == tf.estimator.ModeKeys.TRAIN)\n y = tf.concat([y1, y2], axis=-1)\n return common_layers.layer_preprocess(y, hparams)", "docstring": "A stack of transformer layers.\n\nArgs:\n decoder_input: a Tensor\n encoder_output: a Tensor\n decoder_self_attention_bias: bias Tensor for self-attention\n (see common_attention.attention_bias())\n encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention\n (see common_attention.attention_bias())\n hparams: hyperparameters for model\n name: a string\n\nReturns:\n y: a Tensors", "source": "juraj_google_style"} -{"code": "def heatmap(n_x=5,n_y=10):\n\n\tx=['x_'+str(_) for _ in range(n_x)]\n\ty=['y_'+str(_) for _ in range(n_y)]\n\treturn pd.DataFrame(surface(n_x-1,n_y-1).values,index=x,columns=y)", "docstring": "Returns a DataFrame with the required format for\n a heatmap plot\n\n Parameters:\n -----------\n n_x : int\n Number of x categories\n n_y : int\n Number of y categories", "source": "juraj_google_style"} -{"code": "def ParseInteger(text, is_signed=False, is_long=False):\n\n # Do the actual parsing. Exception handling is propagated to caller.\n result = _ParseAbstractInteger(text, is_long=is_long)\n\n # Check if the integer is sane. Exceptions handled by callers.\n checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]\n checker.CheckValue(result)\n return result", "docstring": "Parses an integer.\n\nArgs:\n text: The text to parse.\n is_signed: True if a signed integer must be parsed.\n is_long: True if a long integer must be parsed.\n\nReturns:\n The integer value.\n\nRaises:\n ValueError: Thrown Iff the text is not a valid integer.", "source": "juraj_google_style"} -{"code": "def parse_config_file(config_file, skip_unknown=False):\n\n for reader, existence_check in _FILE_READERS:\n if existence_check(config_file):\n with reader(config_file) as f:\n parse_config(f, skip_unknown=skip_unknown)\n return\n raise IOError('Unable to open file: {}'.format(config_file))", "docstring": "Parse a Gin config file.\n\nArgs:\n config_file: The path to a Gin config file.\n skip_unknown: A boolean indicating whether unknown configurables and imports\n should be skipped instead of causing errors (alternatively a list of\n configurable names to skip if unknown). See `parse_config` for additional\n details.\n\nRaises:\n IOError: If `config_file` cannot be read using any register file reader.", "source": "juraj_google_style"} -{"code": "def __call__(self, name, value):\n\n if not isinstance(value, self.base_type):\n raise ValueError(\"%s must be %s, not %s\" % (name, self.base_type, value.__class__))", "docstring": "Call method.\n\nArgs:\n name (str): the value's name.\n value (object): the value to check.\n\nRaises:\n ValueError: if value is not type base_type.", "source": "juraj_google_style"} -{"code": "def flush_redis_unsafe(redis_client=None):\n\n if redis_client is None:\n ray.worker.global_worker.check_connected()\n redis_client = ray.worker.global_worker.redis_client\n\n # Delete the log files from the primary Redis shard.\n keys = redis_client.keys(\"LOGFILE:*\")\n if len(keys) > 0:\n num_deleted = redis_client.delete(*keys)\n else:\n num_deleted = 0\n print(\"Deleted {} log files from Redis.\".format(num_deleted))\n\n # Delete the event log from the primary Redis shard.\n keys = redis_client.keys(\"event_log:*\")\n if len(keys) > 0:\n num_deleted = redis_client.delete(*keys)\n else:\n num_deleted = 0\n print(\"Deleted {} event logs from Redis.\".format(num_deleted))", "docstring": "This removes some non-critical state from the primary Redis shard.\n\n This removes the log files as well as the event log from Redis. This can\n be used to try to address out-of-memory errors caused by the accumulation\n of metadata in Redis. However, it will only partially address the issue as\n much of the data is in the task table (and object table), which are not\n flushed.\n\nArgs:\n redis_client: optional, if not provided then ray.init() must have been\n called.", "source": "juraj_google_style"} -{"code": "def run_tpm(tpm, time_scale):\n\n sbs_tpm = convert.state_by_node2state_by_state(tpm)\n if sparse(tpm):\n tpm = sparse_time(sbs_tpm, time_scale)\n else:\n tpm = dense_time(sbs_tpm, time_scale)\n return convert.state_by_state2state_by_node(tpm)", "docstring": "Iterate a TPM by the specified number of time steps.\n\nArgs:\n tpm (np.ndarray): A state-by-node tpm.\n time_scale (int): The number of steps to run the tpm.\n\nReturns:\n np.ndarray", "source": "juraj_google_style"} -{"code": "def from_json_file(cls, file_name):\n\n with open(file_name) as json_data:\n config = json.load(json_data)\n\n return cls(config)", "docstring": "Construct OneViewClient using a json file.\n\nArgs:\n file_name: json full path.\n\nReturns:\n OneViewClient:", "source": "juraj_google_style"} -{"code": "def symlink_to_bytes(symlink_target):\n # type: (str) -> bytes\n\n symlink_data = bytearray()\n for comp in symlink_target.split('/'):\n if comp == '':\n # If comp is empty, then we know this is the leading slash\n # and we should make an absolute entry (double slashes and\n # such are weeded out by the earlier utils.normpath).\n symlink_data.extend(b'\\x02\\x00\\x00\\x00')\n elif comp == '.':\n symlink_data.extend(b'\\x04\\x00\\x00\\x00')\n elif comp == '..':\n symlink_data.extend(b'\\x03\\x00\\x00\\x00')\n else:\n symlink_data.extend(b'\\x05')\n ostaname = _ostaunicode(comp)\n symlink_data.append(len(ostaname))\n symlink_data.extend(b'\\x00\\x00')\n symlink_data.extend(ostaname)\n\n return symlink_data", "docstring": "A function to generate UDF symlink data from a Unix-like path.\n\n Parameters:\n symlink_target - The Unix-like path that is the symlink.\n\nReturns:\n The UDF data corresponding to the symlink.", "source": "juraj_google_style"} -{"code": "def _TerminateProcessByPid(self, pid):\n\n self._RaiseIfNotRegistered(pid)\n\n process = self._processes_per_pid[pid]\n\n self._TerminateProcess(process)\n self._StopMonitoringProcess(process)", "docstring": "Terminate a process that's monitored by the engine.\n\nArgs:\n pid (int): process identifier (PID).\n\nRaises:\n KeyError: if the process is not registered with and monitored by the\n engine.", "source": "juraj_google_style"} -{"code": "def import_image_from_url(self, url, repository=None, tag=None,\n changes=None):\n\n return self.import_image(\n src=url, repository=repository, tag=tag, changes=changes\n )", "docstring": "Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only\n supports importing from a URL.\n\nArgs:\n url (str): A URL pointing to a tar file.\n repository (str): The repository to create\n tag (str): The tag to apply", "source": "juraj_google_style"} -{"code": "def add_time_dimension(padded_inputs, seq_lens):\n\n\n # Sequence lengths have to be specified for LSTM batch inputs. The\n # input batch must be padded to the max seq length given here. That is,\n # batch_size == len(seq_lens) * max(seq_lens)\n padded_batch_size = tf.shape(padded_inputs)[0]\n max_seq_len = padded_batch_size // tf.shape(seq_lens)[0]\n\n # Dynamically reshape the padded batch to introduce a time dimension.\n new_batch_size = padded_batch_size // max_seq_len\n new_shape = ([new_batch_size, max_seq_len] +\n padded_inputs.get_shape().as_list()[1:])\n return tf.reshape(padded_inputs, new_shape)", "docstring": "Adds a time dimension to padded inputs.\n\nArgs:\n padded_inputs (Tensor): a padded batch of sequences. That is,\n for seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where\n A, B, C are sequence elements and * denotes padding.\n seq_lens (Tensor): the sequence lengths within the input batch,\n suitable for passing to tf.nn.dynamic_rnn().\n\nReturns:\n Reshaped tensor of shape [NUM_SEQUENCES, MAX_SEQ_LEN, ...].", "source": "juraj_google_style"} -{"code": "def interpret_obj(\n self,\n obj,\n v_level_indexes,\n h_level_indexes,\n v_level_visibility,\n h_level_visibility,\n v_level_sort_keys,\n h_level_sort_keys,\n v_level_titles,\n h_level_titles,\n ):\n\n if not isinstance(obj, NonStringIterable):\n raise self.error(\"Cannot make a table from object {!r}\".format(obj))\n\n rectangular_rows = tabulate(\n obj,\n v_level_indexes=v_level_indexes,\n h_level_indexes=h_level_indexes,\n v_level_visibility=v_level_visibility,\n h_level_visibility=h_level_visibility,\n v_level_sort_keys=v_level_sort_keys,\n h_level_sort_keys=h_level_sort_keys,\n v_level_titles=v_level_titles,\n h_level_titles=h_level_titles,\n )\n assert is_rectangular(rectangular_rows)\n num_rows, num_cols = size(rectangular_rows)\n return rectangular_rows, num_cols", "docstring": "Interpret the given Python object as a table.\n\nArgs:\n obj: A sequence (later a mapping, too)\n\nReturns:\n A list of lists represents rows of cells.\n\nRaises:\n TypeError: If the type couldn't be interpreted as a table.", "source": "juraj_google_style"} -{"code": "def __init__(self, mol):\n\n if isinstance(mol, Molecule):\n if not mol.is_ordered:\n raise ValueError(\"OpenBabel Molecule only supports ordered \"\n \"molecules.\")\n\n # For some reason, manually adding atoms does not seem to create\n # the correct OBMol representation to do things like force field\n # optimization. So we go through the indirect route of creating\n # an XYZ file and reading in that file.\n obmol = ob.OBMol()\n obmol.BeginModify()\n for site in mol:\n coords = [c for c in site.coords]\n atomno = site.specie.Z\n obatom = ob.OBAtom()\n obatom.thisown = 0\n obatom.SetAtomicNum(atomno)\n obatom.SetVector(*coords)\n obmol.AddAtom(obatom)\n del obatom\n obmol.ConnectTheDots()\n obmol.PerceiveBondOrders()\n obmol.SetTotalSpinMultiplicity(mol.spin_multiplicity)\n obmol.SetTotalCharge(mol.charge)\n obmol.Center()\n obmol.Kekulize()\n obmol.EndModify()\n self._obmol = obmol\n elif isinstance(mol, ob.OBMol):\n self._obmol = mol", "docstring": "Initializes with pymatgen Molecule or OpenBabel\"s OBMol.\n\nArgs:\n mol: pymatgen's Molecule or OpenBabel OBMol", "source": "juraj_google_style"} -{"code": "def __contains__(self, k):\n\n chain = ChainMap(self.scopes, self.globals)\n return chain.__contains__(k)", "docstring": "Check whether a variable has been assigned to.\n\n This is **not** the same kind of element-of as described in the\n class documentation.\n\nArgs:\n k (str): The name of the variable to check.\n\nReturns:\n bool: Whether or not the variable has been assigned to.", "source": "juraj_google_style"} -{"code": "def remove(self, *l):\n\n removeList = list(flatten(l))\n self._remove(removeList, self.value)", "docstring": "remove elements from self.value by matching.\n\n Create the exactly same single you want to delete and pass it(them) in.\n Normally this method needs to be overwrited by subclass. It only looks inside current instance's value, not recursive. There is no need for a recursive one anyway.\n\nArgs:\n *l: a single element, a bunch of element seperated by comma, or a list of elements, or any combination. Element is what you match with.", "source": "juraj_google_style"} -{"code": "def unpack(self, buff, offset=0):\n\n header = UBInt16()\n header.unpack(buff[offset:offset+2])\n self.tlv_type = header.value >> 9\n length = header.value & 511\n begin, end = offset + 2, offset + 2 + length\n sub_type = UBInt8()\n sub_type.unpack(buff[begin:begin+1])\n self.sub_type = sub_type.value\n self.sub_value = BinaryData(buff[begin+1:end])", "docstring": "Unpack a binary message into this object's attributes.\n\n Unpack the binary value *buff* and update this object attributes based\n on the results.\n\nArgs:\n buff (bytes): Binary data package to be unpacked.\n offset (int): Where to begin unpacking.\n\nRaises:\n Exception: If there is a struct unpacking error.", "source": "juraj_google_style"} -{"code": "def _CreateShapePointFolder(self, shapes_folder, shape):\n\n\n folder_name = shape.shape_id + ' Shape Points'\n folder = self._CreateFolder(shapes_folder, folder_name, visible=False)\n for (index, (lat, lon, dist)) in enumerate(shape.points):\n placemark = self._CreatePlacemark(folder, str(index+1))\n point = ET.SubElement(placemark, 'Point')\n coordinates = ET.SubElement(point, 'coordinates')\n coordinates.text = '%.6f,%.6f' % (lon, lat)\n return folder", "docstring": "Create a KML Folder containing all the shape points in a shape.\n\n The folder contains placemarks for each shapepoint.\n\nArgs:\n shapes_folder: A KML Shape Folder ElementTree.Element instance\n shape: The shape to plot.\n\nReturns:\n The Folder ElementTree.Element instance or None.", "source": "juraj_google_style"} -{"code": "def terminate_session(self, token):\n\n\n url = self.rest_url + \"/session/%s\" % token\n response = self._delete(url)\n\n # For consistency between methods use None rather than False\n # If token validation failed for any reason return None\n if not response.ok:\n return None\n\n # Otherwise return True\n return True", "docstring": "Terminates the session token, effectively logging out the user\n from all crowd-enabled services.\n\nArgs:\n token: The session token.\n\nReturns:\n True: If session terminated\n\n None: If session termination failed", "source": "juraj_google_style"} -{"code": "def log_correction(self, event, action):\n\n # TODO: Create CorrectionObject\n action = str(action)\n self.history.info(action)\n\n self._corrections.append(dict(\n event=event.as_dict(),\n action=action,\n ))", "docstring": "This method should be called once we have fixed the problem associated to this event.\n It adds a new entry in the correction history of the node.\n\nArgs:\n event: :class:`AbinitEvent` that triggered the correction.\n action (str): Human-readable string with info on the action perfomed to solve the problem.", "source": "juraj_google_style"} -{"code": "def __init__(self, *futures):\n\n for f in futures:\n if not isinstance(f, PipelineFuture):\n raise TypeError('May only pass PipelineFuture instances to After(). %r',\n type(f))\n self._futures = set(futures)", "docstring": "Initializer.\n\nArgs:\n *futures: PipelineFutures that all subsequent pipelines should follow.\n May be empty, in which case this statement does nothing.", "source": "juraj_google_style"} -{"code": "def connect(portname, baudrate):\n\n global SERPORT\n try:\n SERPORT = serial.Serial(portname, baudrate, timeout = 0.1)\n\n except:\n raise HerkulexError(\"could not open the serial port\")", "docstring": "Connect to the Herkulex bus\n\n Connect to serial port to which Herkulex Servos are attatched\n\nArgs:\n portname (str): The serial port name\n baudrate (int): The serial port baudrate\n\nRaises:\n SerialException: Error occured while opening serial port", "source": "juraj_google_style"} -{"code": "def batch_mutate(self, mutation_map, consistency_level):\n\n self._seqid += 1\n d = self._reqs[self._seqid] = defer.Deferred()\n self.send_batch_mutate(mutation_map, consistency_level)\n return d", "docstring": "Mutate many columns or super columns for many row keys. See also: Mutation.\n\n mutation_map maps key to column family to a list of Mutation objects to take place at that scope.\n *\n\n Parameters:\n - mutation_map\n - consistency_level", "source": "juraj_google_style"} -{"code": "def new_body(name=None, pos=None, **kwargs):\n\n if name is not None:\n kwargs[\"name\"] = name\n if pos is not None:\n kwargs[\"pos\"] = array_to_string(pos)\n element = ET.Element(\"body\", attrib=kwargs)\n return element", "docstring": "Creates a body element with attributes specified by @**kwargs.\n\nArgs:\n name (str): body name.\n pos: 3d position of the body frame.", "source": "juraj_google_style"} -{"code": "def git_clone(prettyname: str, url: str, directory: str,\n branch: str = None,\n commit: str = None,\n clone_options: List[str] = None,\n run_func: Callable[[List[str]], Any] = None) -> bool:\n\n run_func = run_func or subprocess.check_call\n clone_options = clone_options or [] # type: List[str]\n if os.path.isdir(directory):\n log.info(\"Not re-cloning {} Git repository: using existing source \"\n \"in {}\".format(prettyname, directory))\n return False\n log.info(\"Fetching {} source from {} into {}\",\n prettyname, url, directory)\n require_executable(GIT)\n gitargs = [GIT, \"clone\"] + clone_options\n if branch:\n gitargs += [\"--branch\", branch]\n gitargs += [url, directory]\n run_func(gitargs)\n if commit:\n log.info(\"Resetting {} local Git repository to commit {}\",\n prettyname, commit)\n run_func([GIT,\n \"-C\", directory,\n \"reset\", \"--hard\", commit])\n # Using a Git repository that's not in the working directory:\n # https://stackoverflow.com/questions/1386291/git-git-dir-not-working-as-expected # noqa\n return True", "docstring": "Fetches a Git repository, unless we have it already.\n\nArgs:\n prettyname: name to display to user\n url: URL\n directory: destination directory\n branch: repository branch\n commit: repository commit tag\n clone_options: additional options to pass to ``git clone``\n run_func: function to use to call an external command\n\nReturns:\n did we need to do anything?", "source": "juraj_google_style"} -{"code": "def __call__(self, value):\n\n fmt = self.fmt(value)\n if len(fmt) > self.col_width:\n fmt = fmt[:self.col_width - 3] + '...'\n fmt = self.just(fmt, self.col_width)\n return fmt", "docstring": "Formats a given value\n\nArgs:\n value: value to format\n\nReturns:\n str: formatted value", "source": "juraj_google_style"} -{"code": "def get(self, url, params=None, **kwargs):\n\n return self.call_api(\n \"GET\",\n url,\n params=params,\n **kwargs\n )", "docstring": "Call the API with a GET request.\n\nArgs:\n url (str): Resource location relative to the base URL.\n params (dict or None): Query-string parameters.\n\nReturns:\n ResultParser or ErrorParser.", "source": "juraj_google_style"} -{"code": "def serialize_to_transport(self, doc_format=\"xml\", *args, **kwargs):\n\n return super(ResourceMap, self).serialize(\n format=doc_format, encoding=\"utf-8\", *args, **kwargs\n )", "docstring": "Serialize ResourceMap to UTF-8 encoded XML document.\n\nArgs:\n doc_format: str\n One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,\n ``trig`` and ``nquads``.\n\n args and kwargs:\n Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().\n\nReturns:\n bytes: UTF-8 encoded XML doc.\n\nNote:\n Only the default, \"xml\", is automatically indexed by DataONE.", "source": "juraj_google_style"} -{"code": "def __init__(self, provider, template, **kwargs):\n\n super(StatikJinjaTemplate, self).__init__(template.filename, **kwargs)\n self.provider = provider\n self.template = template", "docstring": "Constructor.\n\nArgs:\n provider: The provider that created this template.\n template: The Jinja2 template to wrap.", "source": "juraj_google_style"} -{"code": "def parse_flags_with_usage(args):\n\n try:\n return FLAGS(args)\n except flags.Error as error:\n sys.stderr.write('FATAL Flags parsing error: %s\\n' % error)\n sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\\n')\n sys.exit(1)", "docstring": "Tries to parse the flags, print usage, and exit if unparseable.\n\nArgs:\n args: [str], a non-empty list of the command line arguments including\n program name.\n\nReturns:\n [str], a non-empty list of remaining command line arguments after parsing\n flags, including program name.", "source": "juraj_google_style"} -{"code": "def configure_logging(verbosity):\n\n\n root = logging.getLogger()\n\n formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s',\n '%y-%m-%d %H:%M:%S')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n loglevels = [logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]\n if verbosity >= len(loglevels):\n verbosity = len(loglevels) - 1\n\n level = loglevels[verbosity]\n\n root.setLevel(level)\n root.addHandler(handler)", "docstring": "Set up the global logging level.\n\nArgs:\n verbosity (int): The logging verbosity", "source": "juraj_google_style"} -{"code": "def gen_binary_files_from_urls(\n urls: Iterable[str],\n on_disk: bool = False,\n show_info: bool = True) -> Generator[BinaryIO, None, None]:\n\n for url in urls:\n if on_disk:\n # Necessary for e.g. zip processing (random access)\n with tempfile.TemporaryDirectory() as tmpdir:\n filename = os.path.join(tmpdir, \"tempfile\")\n download(url=url, filename=filename)\n with open(filename, 'rb') as f:\n yield f\n else:\n if show_info:\n log.info(\"Reading from URL: {}\", url)\n with urllib.request.urlopen(url) as f:\n yield f\n if show_info:\n log.info(\"... finished reading from URL: {}\", url)", "docstring": "Generate binary files from a series of URLs (one per URL).\n\nArgs:\n urls: iterable of URLs\n on_disk: if ``True``, yields files that are on disk (permitting\n random access); if ``False``, yields in-memory files (which will\n not permit random access)\n show_info: show progress to the log?\n\nYields:\n files, each of type :class:`BinaryIO`", "source": "juraj_google_style"} -{"code": "def groups(self, group_type=None, filters=None, params=None):\n\n group = self._tcex.ti.group(group_type)\n for g in self.tc_requests.groups_from_tag(group, self.name, filters=filters, params=params):\n yield g", "docstring": "Gets all groups from a tag.\n\nArgs:\n filters:\n params:\n group_type:", "source": "juraj_google_style"} -{"code": "def _unescape_token(escaped_token):\n\n\n def match(m):\n if m.group(1) is None:\n return u\"_\" if m.group(0) == u\"\\\\u\" else u\"\\\\\"\n\n try:\n return six.unichr(int(m.group(1)))\n except (ValueError, OverflowError):\n return \"\"\n\n trimmed = escaped_token[:-1] if escaped_token.endswith(\"_\") else escaped_token\n return _UNESCAPE_REGEX.sub(match, trimmed)", "docstring": "Inverse of _escape_token().\n\nArgs:\n escaped_token: a unicode string\n\nReturns:\n token: a unicode string", "source": "juraj_google_style"} -{"code": "def from_files(path_dir, dos_spin=1):\n\n run_type, warning, efermi, gap, doping_levels = \\\n BoltztrapAnalyzer.parse_outputtrans(path_dir)\n\n vol = BoltztrapAnalyzer.parse_struct(path_dir)\n\n intrans = BoltztrapAnalyzer.parse_intrans(path_dir)\n\n if run_type == \"BOLTZ\":\n dos, pdos = BoltztrapAnalyzer.parse_transdos(\n path_dir, efermi, dos_spin=dos_spin, trim_dos=False)\n\n mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, \\\n seebeck_doping, cond_doping, kappa_doping, hall_doping, \\\n carrier_conc = BoltztrapAnalyzer. \\\n parse_cond_and_hall(path_dir, doping_levels)\n\n return BoltztrapAnalyzer(\n gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,\n mu_doping, seebeck_doping, cond_doping, kappa_doping,\n hall_doping, intrans, dos, pdos, carrier_conc, vol, warning)\n\n elif run_type == \"DOS\":\n trim = True if intrans[\"dos_type\"] == \"HISTO\" else False\n dos, pdos = BoltztrapAnalyzer.parse_transdos(\n path_dir, efermi, dos_spin=dos_spin, trim_dos=trim)\n\n return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos,\n warning=warning, vol=vol)\n\n elif run_type == \"BANDS\":\n bz_kpoints = np.loadtxt(\n os.path.join(path_dir, \"boltztrap_band.dat\"))[:, -3:]\n bz_bands = np.loadtxt(\n os.path.join(path_dir, \"boltztrap_band.dat\"))[:, 1:-6]\n return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints,\n warning=warning, vol=vol)\n\n elif run_type == \"FERMI\":\n\n\n if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')):\n fs_data = read_cube_file(\n os.path.join(path_dir, 'boltztrap_BZ.cube'))\n elif os.path.exists(os.path.join(path_dir, 'fort.30')):\n fs_data = read_cube_file(os.path.join(path_dir, 'fort.30'))\n else:\n raise BoltztrapError(\"No data file found for fermi surface\")\n return BoltztrapAnalyzer(fermi_surface_data=fs_data)\n\n else:\n raise ValueError(\"Run type: {} not recognized!\".format(run_type))", "docstring": "get a BoltztrapAnalyzer object from a set of files\n\nArgs:\n path_dir: directory where the boltztrap files are\n dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down\n\nReturns:\n a BoltztrapAnalyzer object", "source": "juraj_google_style"} -{"code": "def delete_subscription(self, subscription_id):\n\n return self.client._delete(self.url + 'subscriptions/{}'.format(subscription_id), headers=self.get_headers())", "docstring": "Unsubscribe, delete the relationship of the customer with the plan.\n\nArgs:\n subscription_id: Identification of the subscription.\n\n Returns:", "source": "juraj_google_style"} -{"code": "def deps_from_pydit_json(requires, runtime=True):\n\n parsed = []\n for req in requires:\n # req looks like 'some-name (>=X.Y,!=Y.X)' or 'someme-name' where\n # 'some-name' is the name of required package and '(>=X.Y,!=Y.X)'\n # are specs\n name, specs = None, None\n # len(reqs) == 1 if there are not specified versions, 2 otherwise\n reqs = req.split(' ')\n name = reqs[0]\n if len(reqs) == 2:\n specs = reqs[1]\n # try if there are more specs in spec part of the requires\n specs = specs.split(\",\")\n # strip brackets\n specs = [re.sub('[()]', '', spec) for spec in specs]\n # this will divide (>=0.1.2) to ['>=', '0', '.1.2']\n # or (0.1.2) into ['', '0', '.1.2']\n specs = [re.split('([0-9])', spec, 1) for spec in specs]\n # we have separated specs based on number as delimiter\n # so we need to join it back to rest of version number\n # e.g ['>=', '0', '.1.2'] to ['>=', '0.1.2']\n for spec in specs:\n spec[1:3] = [''.join(spec[1:3])]\n if specs:\n for spec in specs:\n if '!' in spec[0]:\n parsed.append(['Conflicts', name, '=', spec[1]])\n elif specs[0] == '==':\n parsed.append(['Requires', name, '=', spec[1]])\n else:\n parsed.append(['Requires', name, spec[0], spec[1]])\n else:\n parsed.append(['Requires', name])\n\n if not runtime:\n for pars in parsed:\n pars[0] = 'Build' + pars[0]\n\n return parsed", "docstring": "Parses dependencies returned by pydist.json, since versions\n uses brackets we can't use pkg_resources to parse and we need a separate\n method\n\nArgs:\n requires: list of dependencies as written in pydist.json of the package\n runtime: are the dependencies runtime (True) or build time (False)\n\nReturns:\n List of semi-SPECFILE dependecies (see dependency_to_rpm for format)", "source": "juraj_google_style"} -{"code": "def __init__(self, rate=None, burst_size=None, experimenter=None):\n\n super().__init__(MeterBandType.OFPMBT_EXPERIMENTER, rate, burst_size)\n self.experimenter = experimenter", "docstring": "Create a MeterBandExperimenter with the optional parameters below.\n\nArgs:\n rate (int): Rate for remarking packets.\n burst_size (int): Size of bursts.\n experimenter (int): Experimenter ID which takes the same form as in\n :class:`.ExperimenterHeader`.", "source": "juraj_google_style"} -{"code": "def _find_best_fit(self, pbin):\n\n fit = ((pbin.fitness(r[0], r[1]), k) for k, r in self._sorted_rect.items())\n fit = (f for f in fit if f[0] is not None)\n try:\n _, rect = min(fit, key=self.first_item)\n return rect\n except ValueError:\n return None", "docstring": "Return best fitness rectangle from rectangles packing _sorted_rect list\n\nArgs:\n pbin (PackingAlgorithm): Packing bin\n\nReturns:\n key of the rectangle with best fitness", "source": "juraj_google_style"} -{"code": "def _ListActiveBreakpoints(self, service):\n\n try:\n response = service.debuggees().breakpoints().list(\n debuggeeId=self._debuggee_id, waitToken=self._wait_token,\n successOnTimeout=True).execute()\n if not response.get('waitExpired'):\n self._wait_token = response.get('nextWaitToken')\n breakpoints = response.get('breakpoints') or []\n if self._breakpoints != breakpoints:\n self._breakpoints = breakpoints\n native.LogInfo(\n 'Breakpoints list changed, %d active, wait token: %s' % (\n len(self._breakpoints), self._wait_token))\n self.on_active_breakpoints_changed(copy.deepcopy(self._breakpoints))\n except BaseException:\n native.LogInfo('Failed to query active breakpoints: ' +\n traceback.format_exc())\n\n # Forget debuggee ID to trigger repeated debuggee registration. Once the\n # registration succeeds, the worker thread will retry this query\n self._debuggee_id = None\n\n return (True, self.list_backoff.Failed())\n\n self.list_backoff.Succeeded()\n return (False, 0)", "docstring": "Single attempt query the list of active breakpoints.\n\n Must not be called before the debuggee has been registered. If the request\n fails, this function resets self._debuggee_id, which triggers repeated\n debuggee registration.\n\nArgs:\n service: client to use for API calls\n\nReturns:\n (registration_required, delay) tuple", "source": "juraj_google_style"} -{"code": "def save_spectre_plot(self, filename=\"spectre.pdf\", img_format=\"pdf\",\n sigma=0.05, step=0.01):\n\n d, plt = self.get_spectre_plot(sigma, step)\n plt.savefig(filename, format=img_format)", "docstring": "Save matplotlib plot of the spectre to a file.\n\nArgs:\n filename: Filename to write to.\n img_format: Image format to use. Defaults to EPS.\n sigma: Full width at half maximum in eV for normal functions.\n step: bin interval in eV", "source": "juraj_google_style"} -{"code": "def resolve_aliases(data_type):\n\n if not is_alias(data_type):\n return data_type\n\n resolved = resolve_aliases(data_type.data_type)\n data_type.data_type = resolved\n\n return resolved", "docstring": "Resolve all chained / nested aliases. This will recursively point\n nested aliases to their resolved data type (first non-alias in the chain).\n\n Note: This differs from unwrap_alias which simply identifies/returns\n the resolved data type.\n\nArgs:\n data_type (DataType): The target DataType/Alias to resolve.\n\nReturns:\n DataType: The resolved type.", "source": "juraj_google_style"} -{"code": "def compare_mim_panels(self, existing_panel, new_panel):\n\n existing_genes = set([gene['hgnc_id'] for gene in existing_panel['genes']])\n new_genes = set([gene['hgnc_id'] for gene in new_panel['genes']])\n\n return new_genes.difference(existing_genes)", "docstring": "Check if the latest version of OMIM differs from the most recent in database\n Return all genes that where not in the previous version.\n\nArgs:\n existing_panel(dict)\n new_panel(dict)\n\nReturns:\n new_genes(set(str))", "source": "juraj_google_style"} -{"code": "def GetMetadataAttribute(self, attribute_name):\n\n table_name = 'metadata'\n\n has_table = self._database_file.HasTable(table_name)\n if not has_table:\n return None\n\n column_names = ['value']\n condition = 'name == \"{0:s}\"'.format(attribute_name)\n\n values = list(self._database_file.GetValues(\n [table_name], column_names, condition))\n\n number_of_values = len(values)\n if number_of_values == 0:\n return None\n\n if number_of_values == 1:\n return values[0]['value']\n\n raise RuntimeError('More than one value found in database.')", "docstring": "Retrieves the metadata attribute.\n\nArgs:\n attribute_name (str): name of the metadata attribute.\n\nReturns:\n str: the metadata attribute or None.\n\nRaises:\n RuntimeError: if more than one value is found in the database.", "source": "juraj_google_style"} -{"code": "def reward(self,\n state: Sequence[tf.Tensor],\n action: Sequence[tf.Tensor],\n next_state: Sequence[tf.Tensor]) -> tf.Tensor:\n\n scope = self.reward_scope(state, action, next_state)\n r = self.compile_reward(scope).tensor\n with self.graph.as_default():\n with tf.name_scope('reward'):\n return tf.expand_dims(r, -1)", "docstring": "Compiles the reward function given the current `state`, `action` and\n `next_state`.\n\nArgs:\n state (Sequence[tf.Tensor]): A tuple of current state tensors.\n action (Sequence[tf.Tensor]): A tuple of action tensors.\n next_state (Sequence[tf.Tensor]): A tuple of next state tensors.\n\nReturns:\n (:obj:`tf.Tensor`): A tensor representing the reward function.", "source": "juraj_google_style"} -{"code": "def read(self, viewport=None, components=3, *, attachment=0, alignment=1, dtype='f1') -> bytes:\n\n\n return self.mglo.read(viewport, components, attachment, alignment, dtype)", "docstring": "Read the content of the framebuffer.\n\nArgs:\n viewport (tuple): The viewport.\n components (int): The number of components to read.\n\n Keyword Args:\n attachment (int): The color attachment.\n alignment (int): The byte alignment of the pixels.\n dtype (str): Data type.\n\nReturns:\n bytes", "source": "juraj_google_style"} -{"code": "def register_menu_item(self, items):\n\n for itm in items:\n if itm.group in self.menu_items:\n # Only add the menu item if we don't already have it registered\n if itm not in self.menu_items[itm.group]['items']:\n self.menu_items[itm.group]['items'].append(itm)\n else:\n logger.warning('Tried registering menu item to unknown group {}'.format(itm.group))", "docstring": "Registers a views menu items into the metadata for the application. Skip if the item is already present\n\nArgs:\n items (`list` of `MenuItem`): A list of `MenuItem`s\n\nReturns:\n `None`", "source": "juraj_google_style"} -{"code": "def __init__(self, cell_ctor, *args, **kwargs):\n\n super(RNNCellWrapper, self).__init__(\n name=kwargs.get(\"name\"),\n custom_getter=kwargs.pop(\"custom_getter\", None))\n\n with self._enter_variable_scope():\n self._cell = cell_ctor(*args, **kwargs)", "docstring": "Constructs the cell, within this module's variable scope.\n\nArgs:\n cell_ctor: Callable that instantiates a `tf.contrib.rnn.RNNCell`.\n *args: Arguments to pass to `cell_ctor`.\n **kwargs: Keyword arguments to pass to `cell_ctor`.\n If `name` is provided, it is passed to `RNNCore.__init__` as well.\n If `custom_getter` is provided, it is passed to `RNNCore.__init__`\n but not to `cell_ctor`.", "source": "juraj_google_style"} -{"code": "def lf_polarities(L):\n\n polarities = [sorted(list(set(L[:, i].data))) for i in range(L.shape[1])]\n return [p[0] if len(p) == 1 else p for p in polarities]", "docstring": "Return the polarities of each LF based on evidence in a label matrix.\n\nArgs:\n L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the\n jth LF to the ith candidate", "source": "juraj_google_style"} -{"code": "def check_grandparents(self, mother = None, father = None):\n\n if mother:\n if mother.mother != '0':\n self.grandparents[mother.mother] = ''\n elif mother.father != '0':\n self.grandparents[mother.father] = ''\n if father:\n if father.mother != '0':\n self.grandparents[father.mother] = ''\n elif father.father != '0':\n self.grandparents[father.father] = ''\n return", "docstring": "Check if there are any grand parents.\n\n Set the grandparents id:s\n\nArgs:\n mother (Individual): An Individual object that represents the mother\n father (Individual): An Individual object that represents the father", "source": "juraj_google_style"} -{"code": "def learn(self, grad_arr):\n\n encoder_delta_arr, _, encoder_grads_list = self.__encoder_decoder_controller.encoder.hidden_back_propagate(\n grad_arr[:, -1]\n )\n encoder_grads_list.insert(0, None)\n encoder_grads_list.insert(0, None)\n\n self.__encoder_decoder_controller.encoder.optimize(\n encoder_grads_list, \n self.__learning_rate,\n 1\n )\n\n return encoder_delta_arr", "docstring": "Update this Discriminator by ascending its stochastic gradient.\n\nArgs:\n grad_arr: `np.ndarray` of gradients.\n\nReturns:\n `np.ndarray` of delta or gradients.", "source": "juraj_google_style"} -{"code": "def set(self, prop, value):\n\n\n prop_parts = prop.split(\".\")\n if self.copy_dict:\n new_dict = copy.deepcopy(self.obj)\n else:\n new_dict = self.obj\n pointer = None\n parts_length = len(prop_parts) - 1\n for i, part in enumerate(prop_parts):\n if pointer is None and i == parts_length:\n new_dict[part] = value\n elif pointer is None:\n pointer = new_dict.get(part)\n elif i == parts_length:\n pointer[part] = value\n else:\n pointer = pointer.get(part)\n return new_dict", "docstring": "sets the dot notated property to the passed in value\n\nArgs:\n prop: a string of the property to retreive\n \"a.b.c\" ~ dictionary['a']['b']['c']\n value: the value to set the prop object", "source": "juraj_google_style"} -{"code": "def make_slot_check(wanted):\n\n if isinstance(wanted, types.FunctionType):\n return wanted # just forward the slot check function\n\n if isinstance(wanted, int):\n item, meta = wanted, None\n elif isinstance(wanted, Slot):\n item, meta = wanted.item_id, wanted.damage # TODO compare NBT\n elif isinstance(wanted, (Item, Block)):\n item, meta = wanted.id, wanted.metadata\n elif isinstance(wanted, str):\n item_or_block = get_item_or_block(wanted, init=True)\n item, meta = item_or_block.id, item_or_block.metadata\n else: # wanted is (id, meta)\n try:\n item, meta = wanted\n except TypeError:\n raise ValueError('Illegal args for make_slot_check(): %s' % wanted)\n\n return lambda slot: item == slot.item_id and meta in (None, slot.damage)", "docstring": "Creates and returns a function that takes a slot\n and checks if it matches the wanted item.\n\nArgs:\n wanted: function(Slot) or Slot or itemID or (itemID, metadata)", "source": "juraj_google_style"} -{"code": "def login_with_password(self, username, password, limit=10):\n\n warn(\"login_with_password is deprecated. Use login with sync=True.\",\n DeprecationWarning)\n return self.login(username, password, limit, sync=True)", "docstring": "Deprecated. Use ``login`` with ``sync=True``.\n\n Login to the homeserver.\n\nArgs:\n username (str): Account username\n password (str): Account password\n limit (int): Deprecated. How many messages to return when syncing.\n This will be replaced by a filter API in a later release.\n\nReturns:\n str: Access token\n\nRaises:\n MatrixRequestError", "source": "juraj_google_style"} -{"code": "def incrementKeySequenceCounter(self, iIncrementValue=1):\n\n print '%s call incrementKeySequenceCounter' % self.port\n print iIncrementValue\n currentKeySeq = ''\n try:\n currentKeySeq = self.getKeySequenceCounter()\n keySequence = int(currentKeySeq, 10) + iIncrementValue\n print keySequence\n return self.setKeySequenceCounter(keySequence)\n except Exception, e:\n ModuleHelper.WriteIntoDebugLogger('incrementKeySequenceCounter() Error: ' + str(e))", "docstring": "increment the key sequence with a given value\n\nArgs:\n iIncrementValue: specific increment value to be added\n\nReturns:\n True: successful to increment the key sequence with a given value\n False: fail to increment the key sequence with a given value", "source": "juraj_google_style"} -{"code": "def generate(self, descriptors):\n\n model_ids = self.search_tree.adj_list.keys()\n\n target_graph = None\n father_id = None\n descriptors = deepcopy(descriptors)\n elem_class = Elem\n if self.optimizemode is OptimizeMode.Maximize:\n elem_class = ReverseElem\n\n # Initialize the priority queue.\n pq = PriorityQueue()\n temp_list = []\n for model_id in model_ids:\n metric_value = self.searcher.get_metric_value_by_id(model_id)\n temp_list.append((metric_value, model_id))\n temp_list = sorted(temp_list)\n for metric_value, model_id in temp_list:\n graph = self.searcher.load_model_by_id(model_id)\n graph.clear_operation_history()\n graph.clear_weights()\n pq.put(elem_class(metric_value, model_id, graph))\n\n t = 1.0\n t_min = self.t_min\n alpha = 0.9\n opt_acq = self._get_init_opt_acq_value()\n while not pq.empty() and t > t_min:\n elem = pq.get()\n if self.optimizemode is OptimizeMode.Maximize:\n temp_exp = min((elem.metric_value - opt_acq) / t, 1.0)\n else:\n temp_exp = min((opt_acq - elem.metric_value) / t, 1.0)\n ap = math.exp(temp_exp)\n if ap >= random.uniform(0, 1):\n for temp_graph in transform(elem.graph):\n if contain(descriptors, temp_graph.extract_descriptor()):\n continue\n\n temp_acq_value = self.acq(temp_graph)\n pq.put(elem_class(temp_acq_value, elem.father_id, temp_graph))\n descriptors.append(temp_graph.extract_descriptor())\n if self._accept_new_acq_value(opt_acq, temp_acq_value):\n opt_acq = temp_acq_value\n father_id = elem.father_id\n target_graph = deepcopy(temp_graph)\n t *= alpha\n\n # Did not found a not duplicated architecture\n if father_id is None:\n return None, None\n nm_graph = self.searcher.load_model_by_id(father_id)\n for args in target_graph.operation_history:\n getattr(nm_graph, args[0])(*list(args[1:]))\n return nm_graph, father_id", "docstring": "Generate new architecture.\n\nArgs:\n descriptors: All the searched neural architectures.\n\nReturns:\n graph: An instance of Graph. A morphed neural network with weights.\n father_id: The father node ID in the search tree.", "source": "juraj_google_style"} -{"code": "def getTraitCovarStdErrors(self,term_i):\n\n assert self.init, 'GP not initialised'\n assert self.fast==False, 'Not supported for fast implementation'\n\n if self.P==1:\n out = (2*self.getScales()[term_i])**2*self._getLaplaceCovar()[term_i,term_i]\n else:\n C = self.vd.getTerm(term_i).getTraitCovar()\n n_params = C.getNumberParams()\n par_index = 0\n for term in range(term_i-1):\n par_index += self.vd.getTerm(term_i).getNumberScales()\n Sigma1 = self._getLaplaceCovar()[par_index:(par_index+n_params),:][:,par_index:(par_index+n_params)]\n out = sp.zeros((self.P,self.P))\n for param_i in range(n_params):\n out += C.Kgrad_param(param_i)**2*Sigma1[param_i,param_i]\n for param_j in range(param_i):\n out += 2*abs(C.Kgrad_param(param_i)*C.Kgrad_param(param_j))*Sigma1[param_i,param_j]\n out = sp.sqrt(out)\n return out", "docstring": "Returns standard errors on trait covariances from term_i (for the covariance estimate \\see getTraitCovar)\n\nArgs:\n term_i: index of the term we are interested in", "source": "juraj_google_style"} -{"code": "def _initialize(self):\n # type: () -> None\n\n self._cdfp = BytesIO()\n self.svds = [] # type: List[headervd.PrimaryOrSupplementaryVD]\n self.brs = [] # type: List[headervd.BootRecord]\n self.vdsts = [] # type: List[headervd.VolumeDescriptorSetTerminator]\n self.eltorito_boot_catalog = None # type: Optional[eltorito.EltoritoBootCatalog]\n self._initialized = False\n self.rock_ridge = ''\n self.isohybrid_mbr = None # type: Optional[isohybrid.IsoHybrid]\n self.xa = False\n self._managing_fp = False\n self.pvds = [] # type: List[headervd.PrimaryOrSupplementaryVD]\n self._has_udf = False\n self.udf_bea = udfmod.BEAVolumeStructure() # type: udfmod.BEAVolumeStructure\n self.udf_nsr = udfmod.NSRVolumeStructure() # type: udfmod.NSRVolumeStructure\n self.udf_tea = udfmod.TEAVolumeStructure() # type: udfmod.TEAVolumeStructure\n self.udf_anchors = [] # type: List[udfmod.UDFAnchorVolumeStructure]\n self.udf_main_descs = self._UDFDescriptors()\n self.udf_reserve_descs = self._UDFDescriptors()\n self.udf_logical_volume_integrity = udfmod.UDFLogicalVolumeIntegrityDescriptor()\n self.udf_logical_volume_integrity_terminator = udfmod.UDFTerminatingDescriptor()\n self.udf_root = None # type: Optional[udfmod.UDFFileEntry]\n self.udf_file_set = udfmod.UDFFileSetDescriptor()\n self.udf_file_set_terminator = udfmod.UDFTerminatingDescriptor()\n self._needs_reshuffle = False\n self._rr_moved_record = None # type: ignore\n self._rr_moved_name = None # type: Optional[bytes]\n self._rr_moved_rr_name = None # type: Optional[bytes]\n self.enhanced_vd = None # type: Optional[headervd.PrimaryOrSupplementaryVD]\n self.joliet_vd = None # type: Optional[headervd.PrimaryOrSupplementaryVD]\n self._find_iso_record.cache_clear() # pylint: disable=no-member\n self._find_rr_record.cache_clear() # pylint: disable=no-member\n self._find_joliet_record.cache_clear() # pylint: disable=no-member\n self._find_udf_record.cache_clear() # pylint: disable=no-member\n self._write_check_list = [] # type: List[PyCdlib._WriteRange]\n self.version_vd = None # type: Optional[headervd.VersionVolumeDescriptor]\n self.inodes = []", "docstring": "An internal method to re-initialize the object. Called from\n both __init__ and close.\n\n Parameters:\n None.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def copy_remote_file(web_file, destination):\n\n size = 0\n dir_name = os.path.dirname(destination)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n with open(destination, 'wb') as file_:\n chunk_size = 8 * 1024\n for chunk in web_file.iter_content(chunk_size=chunk_size):\n if chunk:\n file_.write(chunk)\n size += len(chunk)\n return size", "docstring": "Check if exist the destination path, and copy the online resource\n file to local.\n\nArgs:\n :web_file: reference to online file resource to take.\n :destination: path to store the file.", "source": "juraj_google_style"} -{"code": "def max(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':\n\n return cls._binary_op(x, y, tf.maximum, tf.float32)", "docstring": "Returns a TensorFluent for the maximum function.TensorFluent\n\nArgs:\n x: The first operand.\n y: The second operand.\n\nReturns:\n A TensorFluent wrapping the maximum function.", "source": "juraj_google_style"} -{"code": "def parse_outputtrans(path_dir):\n\n run_type = None\n warning = None\n efermi = None\n gap = None\n doping_levels = []\n\n with open(os.path.join(path_dir, \"boltztrap.outputtrans\"), 'r') \\\n as f:\n for line in f:\n if \"WARNING\" in line:\n warning = line\n elif \"Calc type:\" in line:\n run_type = line.split()[-1]\n elif line.startswith(\"VBM\"):\n efermi = Energy(line.split()[1], \"Ry\").to(\"eV\")\n elif line.startswith(\"Egap:\"):\n gap = Energy(float(line.split()[1]), \"Ry\").to(\"eV\")\n elif line.startswith(\"Doping level number\"):\n doping_levels.append(float(line.split()[6]))\n\n return run_type, warning, efermi, gap, doping_levels", "docstring": "Parses .outputtrans file\n\nArgs:\n path_dir: dir containing boltztrap.outputtrans\n\nReturns:\n tuple - (run_type, warning, efermi, gap, doping_levels)", "source": "juraj_google_style"} -{"code": "def get_function_id(sig):\n\n s = sha3.keccak_256()\n s.update(sig.encode('utf-8'))\n return int(\"0x\" + s.hexdigest()[:8], 16)", "docstring": "Return the function id of the given signature\n\nArgs:\n sig (str)\n\nReturns:\n (int)", "source": "juraj_google_style"} -{"code": "def dbmin50years(self, value=None):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `dbmin50years`'.format(value))\n\n self._dbmin50years = value", "docstring": "Corresponds to IDD Field `dbmin50years`\n 50-year return period values for minimum extreme dry-bulb temperature\n\nArgs:\n value (float): value for IDD Field `dbmin50years`\n Unit: C\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def dump_as_two_item_tsv(record, output_file):\n\n for ordered_stats in record.ordered_statistics:\n if len(ordered_stats.items_base) != 1:\n continue\n if len(ordered_stats.items_add) != 1:\n continue\n output_file.write('{0}\\t{1}\\t{2:.8f}\\t{3:.8f}\\t{4:.8f}{5}'.format(\n list(ordered_stats.items_base)[0], list(ordered_stats.items_add)[0],\n record.support, ordered_stats.confidence, ordered_stats.lift,\n os.linesep))", "docstring": "Dump a relation record as TSV only for 2 item relations.\n\nArgs:\n record -- A RelationRecord instance to dump.\n output_file -- A file to output.", "source": "juraj_google_style"} -{"code": "def _ParseInformationalOptions(self, options):\n\n self._debug_mode = getattr(options, 'debug', False)\n self._quiet_mode = getattr(options, 'quiet', False)\n\n if self._debug_mode and self._quiet_mode:\n logger.warning(\n 'Cannot use debug and quiet mode at the same time, defaulting to '\n 'debug output.')", "docstring": "Parses the informational options.\n\nArgs:\n options (argparse.Namespace): command line arguments.", "source": "juraj_google_style"} -{"code": "def from_tuplelist(tuple_list):\n\n out = Layout()\n for physical, virtual in enumerate(tuple_list):\n if virtual is None:\n continue\n elif Layout.is_virtual(virtual):\n if virtual in out._v2p:\n raise LayoutError('Duplicate values not permitted; Layout is bijective.')\n out[virtual] = physical\n else:\n raise LayoutError(\"The list should contain elements of the form\"\n \" (Register, integer) or None\")\n return out", "docstring": "Populates a Layout from a list containing virtual\n qubits---(QuantumRegister, int) tuples---, or None.\n\nArgs:\n tuple_list (list):\n e.g.: [qr[0], None, qr[2], qr[3]]\n\nReturns:\n Layout: the corresponding Layout object\n\nRaises:\n LayoutError: If the elements are not (Register, integer) or None", "source": "juraj_google_style"} -{"code": "def add_new_entry(self, entry):\n # type: (EltoritoEntry) -> None\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('El Torito Section Header not yet initialized')\n\n self.num_section_entries += 1\n\n self.section_entries.append(entry)", "docstring": "A method to add a completely new entry to the list of entries of this\n header.\n\n Parameters:\n entry - The new EltoritoEntry object to add to the list of entries.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def add_case(self, case, update=False):\n\n existing_case = self.case(case)\n if existing_case and not update:\n raise CaseError(\"Case {} already exists\".format(case['case_id']))\n if existing_case:\n self.db.case.find_one_and_replace(\n {'case_id': case['case_id']},\n case,\n )\n else:\n self.db.case.insert_one(case)\n\n return case", "docstring": "Add a case to the case collection\n\n If the case exists and update is False raise error.\n\nArgs:\n db (MongoClient): A connection to the mongodb\n case (dict): A case dictionary\n update(bool): If existing case should be updated\n\nReturns:\n mongo_case_id(ObjectId)", "source": "juraj_google_style"} -{"code": "def load_glove(file):\n\n model = {}\n with open(file, encoding=\"utf8\", errors='ignore') as f:\n for line in f:\n line = line.split(' ')\n word = line[0]\n vector = np.array([float(val) for val in line[1:]])\n model[word] = vector\n\n return model", "docstring": "Loads GloVe vectors in numpy array.\n\nArgs:\n file (str): a path to a glove file.\n\nReturns:\n dict: a dict of numpy arrays.", "source": "juraj_google_style"} -{"code": "def addSearchers(self, *searchers):\n\n self._searchers.extend(searchers)\n\n debug.logger & debug.flagCompiler and debug.logger(\n 'current compiled MIBs location(s): %s' % ', '.join([str(x) for x in self._searchers]))\n\n return self", "docstring": "Add more transformed MIBs repositories.\n\n MibCompiler.compile will invoke each of configured searcher objects\n in order of their addition asking each if already transformed MIB\n module already exists and is more recent than specified.\n\nArgs:\n searchers: searcher object(s)\n\nReturns:\n reference to itself (can be used for call chaining)", "source": "juraj_google_style"} -{"code": "def __init__(self, parameter, value, valid_values=None):\n # type: (str, str, typing.Sequence[str]) -> None\n\n msg = 'Invalid value \"{value}\" supplied to {parameter}.'.format(\n parameter=parameter, value=value)\n if valid_values:\n msg += ' Valid options are: {}'.format(', '.join(valid_values))\n super(InvalidCliValueError, self).__init__(msg)", "docstring": "Instantiate the exception with a descriptive message.\n\nArgs:\n parameter: The CLI parameter with the invalid value.\n value: The invalid value passed to the CLI parameter.\n valid_values: The values that would have been accepted by the\n parameter.", "source": "juraj_google_style"} -{"code": "def CaptureVariablesList(self, items, depth, empty_message, limits):\n\n v = []\n for name, value in items:\n if (self._total_size >= self.max_size) or (\n len(v) >= limits.max_list_items):\n v.append({\n 'status': {\n 'refersTo': 'VARIABLE_VALUE',\n 'description': {\n 'format':\n ('Only first $0 items were captured. Use in an '\n 'expression to see all items.'),\n 'parameters': [str(len(v))]}}})\n break\n v.append(self.CaptureNamedVariable(name, value, depth, limits))\n\n if not v:\n return [{'status': {\n 'refersTo': 'VARIABLE_NAME',\n 'description': {'format': empty_message}}}]\n\n return v", "docstring": "Captures list of named items.\n\nArgs:\n items: iterable of (name, value) tuples.\n depth: nested depth of dictionaries and vectors for items.\n empty_message: info status message to set if items is empty.\n limits: Per-object limits for capturing variable data.\n\nReturns:\n List of formatted variable objects.", "source": "juraj_google_style"} -{"code": "def _get_common_params(self, user_id, attributes):\n\n commonParams = {}\n\n commonParams[self.EventParams.PROJECT_ID] = self._get_project_id()\n commonParams[self.EventParams.ACCOUNT_ID] = self._get_account_id()\n\n visitor = {}\n visitor[self.EventParams.END_USER_ID] = user_id\n visitor[self.EventParams.SNAPSHOTS] = []\n\n commonParams[self.EventParams.USERS] = []\n commonParams[self.EventParams.USERS].append(visitor)\n commonParams[self.EventParams.USERS][0][self.EventParams.ATTRIBUTES] = self._get_attributes(attributes)\n\n commonParams[self.EventParams.SOURCE_SDK_TYPE] = 'python-sdk'\n commonParams[self.EventParams.ENRICH_DECISIONS] = True\n commonParams[self.EventParams.SOURCE_SDK_VERSION] = version.__version__\n commonParams[self.EventParams.ANONYMIZE_IP] = self._get_anonymize_ip()\n commonParams[self.EventParams.REVISION] = self._get_revision()\n\n return commonParams", "docstring": "Get params which are used same in both conversion and impression events.\n\nArgs:\n user_id: ID for user.\n attributes: Dict representing user attributes and values which need to be recorded.\n\nReturns:\n Dict consisting of parameters common to both impression and conversion events.", "source": "juraj_google_style"} -{"code": "def ServiceWorker_deliverPushMessage(self, origin, registrationId, data):\n\n\t\tassert isinstance(origin, (str,)\n\t\t ), \"Argument 'origin' must be of type '['str']'. Received type: '%s'\" % type(\n\t\t origin)\n\t\tassert isinstance(registrationId, (str,)\n\t\t ), \"Argument 'registrationId' must be of type '['str']'. Received type: '%s'\" % type(\n\t\t registrationId)\n\t\tassert isinstance(data, (str,)\n\t\t ), \"Argument 'data' must be of type '['str']'. Received type: '%s'\" % type(\n\t\t data)\n\t\tsubdom_funcs = self.synchronous_command('ServiceWorker.deliverPushMessage',\n\t\t origin=origin, registrationId=registrationId, data=data)\n\t\treturn subdom_funcs", "docstring": "Function path: ServiceWorker.deliverPushMessage\n Domain: ServiceWorker\n Method name: deliverPushMessage\n\n Parameters:\n Required arguments:\n 'origin' (type: string) -> No description\n 'registrationId' (type: string) -> No description\n 'data' (type: string) -> No description\n No return value.", "source": "juraj_google_style"} -{"code": "def __init__(self, desc, scope=None, syn_type=None, xref=None):\n\n if isinstance(desc, six.binary_type):\n self.desc = desc.decode('utf-8')\n elif isinstance(desc, six.text_type):\n self.desc = desc\n else:\n raise ValueError(\"desc must be bytes or str, not {}\".format(type(desc).__name__))\n\n if isinstance(scope, six.binary_type):\n self.scope = scope.decode('utf-8')\n elif isinstance(scope, six.text_type):\n self.scope = scope\n elif scope is None:\n self.scope = \"RELATED\"\n\n if syn_type is not None:\n try:\n self.syn_type = SynonymType._instances[syn_type]\n self.scope = self.syn_type.scope or self.scope or 'RELATED'\n except KeyError as e:\n raise ValueError(\"Undefined synonym type: {}\".format(syn_type))\n else:\n self.syn_type = None\n\n if self.scope not in {'EXACT', 'BROAD', 'NARROW', 'RELATED', None}:\n raise ValueError(\"scope must be 'NARROW', 'BROAD', 'EXACT', 'RELATED' or None\")\n\n self.xref = xref or []", "docstring": "Create a new synonym.\n\nArgs:\n desc (str): a description of the synonym.\n scope (str, optional): the scope of the synonym (either\n EXACT, BROAD, NARROW or RELATED).\n syn_type (SynonymType, optional): the type of synonym if\n relying on a synonym type defined in the *Typedef*\n section of the ontology.\n xref (list, optional): a list of cross-references for the\n synonym.", "source": "juraj_google_style"} -{"code": "def encode_field(self, field, value):\n\n if isinstance(field, messages.BytesField):\n if field.repeated:\n value = [base64.b64encode(byte) for byte in value]\n else:\n value = base64.b64encode(value)\n elif isinstance(field, message_types.DateTimeField):\n # DateTimeField stores its data as a RFC 3339 compliant string.\n if field.repeated:\n value = [i.isoformat() for i in value]\n else:\n value = value.isoformat()\n return value", "docstring": "Encode a python field value to a JSON value.\n\nArgs:\n field: A ProtoRPC field instance.\n value: A python value supported by field.\n\nReturns:\n A JSON serializable value appropriate for field.", "source": "juraj_google_style"} -{"code": "def create_media_service_rg(access_token, subscription_id, rgname, location, stoname, msname):\n\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourceGroups/', rgname,\n '/providers/microsoft.media/mediaservices/', msname,\n '?api-version=', MEDIA_API])\n ms_body = {'name': msname}\n ms_body['location'] = location\n sub_id_str = '/subscriptions/' + subscription_id + '/resourceGroups/' + rgname + \\\n '/providers/Microsoft.Storage/storageAccounts/' + stoname\n storage_account = {'id': sub_id_str}\n storage_account['isPrimary'] = True\n properties = {'storageAccounts': [storage_account]}\n ms_body['properties'] = properties\n body = json.dumps(ms_body)\n return do_put(endpoint, body, access_token)", "docstring": "Create a media service in a resource group.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n rgname (str): Azure resource group name.\n location (str): Azure data center location. E.g. westus.\n stoname (str): Azure storage account name.\n msname (str): Media service name.\n\nReturns:\n HTTP response. JSON body.", "source": "juraj_google_style"} -{"code": "def __init__(self, parent=None):\n\n super(BigIntSpinbox, self).__init__(parent)\n\n self._singleStep = 1\n self._minimum = -18446744073709551616\n self._maximum = 18446744073709551615\n\n rx = QtCore.QRegExp(\"[0-9]\\\\d{0,20}\")\n validator = QtGui.QRegExpValidator(rx, self)\n\n self._lineEdit = QtGui.QLineEdit(self)\n self._lineEdit.setText('0')\n self._lineEdit.setValidator(validator)\n self.setLineEdit(self._lineEdit)", "docstring": "the __init__ method.\n\nArgs:\n parent (QObject): defaults to None. If parent is 0, the new widget becomes a window.\n If parent is another widget, this widget becomes a child window inside parent.\n The new widget is deleted when its parent is deleted.", "source": "juraj_google_style"} -{"code": "def VerifyStructure(self, parser_mediator, line):\n\n try:\n structure = self._LOG_LINE.parseString(line)\n except pyparsing.ParseException:\n logger.debug('Not a Sophos Anti-Virus log file')\n return False\n\n # Expect spaces at position 9 and 16.\n if ' ' not in (line[8], line[15]):\n logger.debug('Not a Sophos Anti-Virus log file')\n return False\n\n try:\n dfdatetime_time_elements.TimeElements(\n time_elements_tuple=structure.date_time)\n except ValueError:\n logger.debug((\n 'Not a Sophos Anti-Virus log file, invalid date and time: '\n '{0!s}').format(structure.date_time))\n return False\n\n return True", "docstring": "Verify that this file is a Sophos Anti-Virus log file.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfVFS.\n line (str): line from a text file.\n\nReturns:\n bool: True if the line is in the expected format, False if not.", "source": "juraj_google_style"} -{"code": "def download_models(self, uniprot_acc, outdir='', force_rerun=False):\n\n downloaded = []\n subset = self.get_models(uniprot_acc)\n\n for entry in subset:\n ident = '{}_{}_{}_{}'.format(uniprot_acc, entry['template'], entry['from'], entry['to'])\n outfile = op.join(outdir, ident + '.pdb')\n\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n response = requests.get(entry['url'])\n\n if response.status_code == 404:\n log.error('{}: 404 returned, no model available.'.format(ident))\n\n else:\n with open(outfile, 'w') as f:\n f.write(response.text)\n\n log.debug('{}: downloaded homology model'.format(ident))\n downloaded.append(outfile)\n else:\n downloaded.append(outfile)\n\n return downloaded", "docstring": "Download all models available for a UniProt accession number.\n\nArgs:\n uniprot_acc (str): UniProt ACC/ID\n outdir (str): Path to output directory, uses working directory if not set\n force_rerun (bool): Force a redownload the models if they already exist\n\nReturns:\n list: Paths to the downloaded models", "source": "juraj_google_style"} -{"code": "def activate_nsxcontroller(self, **kwargs):\n\n name = kwargs.pop('name')\n name_args = dict(name=name)\n method_name = 'nsx_controller_activate'\n method_class = self._brocade_tunnels\n nsxcontroller_attr = getattr(method_class, method_name)\n config = nsxcontroller_attr(**name_args)\n output = self._callback(config)\n return output", "docstring": "Activate NSX Controller\n\nArgs:\n name (str): nsxcontroller name\n callback (function): A function executed upon completion of the\n method.\n\nReturns:\n Return value of `callback`.\n\nRaises:\n None", "source": "juraj_google_style"} -{"code": "def _operator(attr):\n\n @functools.wraps(attr)\n def func(a, *args):\n return attr(a.value, *args)\n return func", "docstring": "Defers an operator overload to `attr`.\n\nArgs:\n attr: Operator attribute to use.\n\nReturns:\n Function calling operator attribute.", "source": "juraj_google_style"} -{"code": "def verify_account(self, email_address):\n\n request = self._get_request()\n resp = request.post(self.ACCOUNT_VERIFY_URL, {\n 'email_address': email_address\n })\n return ('account' in resp)", "docstring": "Verify whether a HelloSign Account exists\n\nArgs:\n email_address (str): Email address for the account to verify\n\nReturns:\n True or False", "source": "juraj_google_style"} -{"code": "def __init__(self, etk, cdr_document: Dict, mime_type, url, doc_id=None) -> None:\n\n\n Segment.__init__(self, json_path=\"$\", _value=cdr_document, _document=self)\n self.etk = etk\n self.cdr_document = cdr_document\n self.mime_type = mime_type\n self.url = url\n if doc_id:\n self.cdr_document[\"doc_id\"] = doc_id\n self.extraction_provenance_records = list()\n if self.etk.kg_schema:\n self.kg = KnowledgeGraph(self.etk.kg_schema, self.etk.ontology, self)\n else:\n self.kg = None\n if not self.etk.kg_schema:\n self.etk.log(\"Schema not found.\", \"warning\", self.doc_id, self.url)\n self._provenance_id_index = 0\n self._provenances = dict()\n self._jsonpath_provenances = dict()\n self._kg_provenances = dict()", "docstring": "Wrapper object for CDR documents.\n\nArgs:\n etk (ETK): embed the etk object so that docs have access to global info.\n cdr_document (JSON): the raw CDR document received in ETK.\n\n Returns: the wrapped CDR document", "source": "juraj_google_style"} -{"code": "def run_step(self, representer):\n\n assert representer, (\"ObjectRepresenter instance required to run \"\n \"ObjectRewriterStep.\")\n rewriter = ObjectRewriter(self.context.get_formatted_iterable,\n representer)\n super().run_step(rewriter)", "docstring": "Do the object in-out rewrite.\n\nArgs:\n representer: A pypyr.filesystem.ObjectRepresenter instance.", "source": "juraj_google_style"} -{"code": "def member_update(self, repl_id, member_id, params):\n\n repl = self[repl_id]\n result = repl.member_update(member_id, params)\n self[repl_id] = repl\n return result", "docstring": "apply new params to replica set member\n\nArgs:\n repl_id - replica set identity\n member_id - member index\n params - new member's params\n\n return True if operation success otherwise False", "source": "juraj_google_style"} -{"code": "def p_portfolio(I,sigma,r,alpha,beta):\n\n\n model = Model(\"p_portfolio\")\n\n x = {}\n for i in I:\n x[i] = model.addVar(vtype=\"C\", name=\"x(%s)\"%i) # quantity of i to buy\n rho = model.addVar(vtype=\"C\", name=\"rho\")\n rhoaux = model.addVar(vtype=\"C\", name=\"rhoaux\")\n\n model.addCons(rho == quicksum(r[i]*x[i] for i in I))\n model.addCons(quicksum(x[i] for i in I) == 1)\n\n model.addCons(rhoaux == (alpha - rho)*(1/phi_inv(beta))) #todo\n model.addCons(quicksum(sigma[i]**2 * x[i] * x[i] for i in I) <= rhoaux * rhoaux)\n\n model.setObjective(rho, \"maximize\")\n\n model.data = x\n return model", "docstring": "p_portfolio -- modified markowitz model for portfolio optimization.\n Parameters:\n - I: set of items\n - sigma[i]: standard deviation of item i\n - r[i]: revenue of item i\n - alpha: acceptance threshold\n - beta: desired confidence level\n Returns a model, ready to be solved.", "source": "juraj_google_style"} -{"code": "def __str__(self):\n\n text = super(Baken, self).__format__('dms')\n if self._locator:\n text = '%s (%s)' % (self._locator, text)\n return text", "docstring": "Pretty printed location string.\n\nArgs:\n mode (str): Coordinate formatting system to use\n\nReturns:\n str: Human readable string representation of ``Baken`` object", "source": "juraj_google_style"} -{"code": "def __call__(self, data, dtype=None):\n\n if isinstance(data, np.ndarray):\n if not data.size > 0:\n raise ValueError(\"empty array can't be serialized\")\n return _npy_serialize(data)\n\n if isinstance(data, list):\n if not len(data) > 0:\n raise ValueError(\"empty array can't be serialized\")\n return _npy_serialize(np.array(data, dtype))\n\n # files and buffers. Assumed to hold npy-formatted data.\n if hasattr(data, 'read'):\n return data.read()\n\n return _npy_serialize(np.array(data))", "docstring": "Serialize data into the request body in NPY format.\n\nArgs:\n data (object): Data to be serialized. Can be a numpy array, list, file, or buffer.\n\nReturns:\n object: NPY serialized data used for the request.", "source": "juraj_google_style"} -{"code": "def resize_imgs(self, targ, new_path, resume=True, fn=None):\n\n dest = resize_imgs(self.fnames, targ, self.path, new_path, resume, fn)\n return self.__class__(self.fnames, self.y, self.transform, dest)", "docstring": "resize all images in the dataset and save them to `new_path`\n\nArgs:\n targ (int): the target size\n new_path (string): the new folder to save the images\n resume (bool): if true (default), allow resuming a partial resize operation by checking for the existence\n of individual images rather than the existence of the directory\n fn (function): custom resizing function Img -> Img", "source": "juraj_google_style"} -{"code": "def extend(self, *iterables):\n\n\n for value in iterables:\n list.extend(self, value)\n return self", "docstring": "Add all values of all iterables at the end of the list\n\nArgs:\n iterables: iterable which content to add at the end\n\nExample:\n >>> from ww import l\n >>> lst = l([])\n >>> lst.extend([1, 2])\n [1, 2]\n >>> lst\n [1, 2]\n >>> lst.extend([3, 4]).extend([5, 6])\n [1, 2, 3, 4, 5, 6]\n >>> lst\n [1, 2, 3, 4, 5, 6]", "source": "juraj_google_style"} -{"code": "def convert_snapshot(self, shift, instruction):\n\n command_dict = {\n 'name': 'snapshot',\n 't0': shift+instruction.start_time,\n 'label': instruction.name,\n 'type': instruction.type\n }\n return self._qobj_model(**command_dict)", "docstring": "Return converted `Snapshot`.\n\nArgs:\n shift(int): Offset time.\n instruction (Snapshot): snapshot instruction.\n\nReturns:\n dict: Dictionary of required parameters.", "source": "juraj_google_style"} -{"code": "def PC_varExplained(Y,standardized=True):\n\n # figuring out the number of latent factors\n if standardized:\n Y-=Y.mean(0)\n Y/=Y.std(0)\n covY = sp.cov(Y)\n S,U = linalg.eigh(covY+1e-6*sp.eye(covY.shape[0]))\n S = S[::-1]\n rv = np.array([S[0:i].sum() for i in range(1,S.shape[0])])\n rv/= S.sum()\n return rv", "docstring": "Run PCA and calculate the cumulative fraction of variance\n\nArgs:\n Y: phenotype values\n standardize: if True, phenotypes are standardized\n\nReturns:\n var: cumulative distribution of variance explained", "source": "juraj_google_style"} -{"code": "def __init__(self, filesystem, os_module=None):\n\n self.filesystem = filesystem\n self._os_path = self._OS_PATH_COPY\n if os_module is None:\n warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning,\n stacklevel=2)\n self._os_path.os = self.os = os_module\n self.sep = self.filesystem.path_separator\n self.altsep = self.filesystem.alternative_path_separator", "docstring": "Init.\n\nArgs:\n filesystem: FakeFilesystem used to provide file system information\n os_module: (deprecated) FakeOsModule to assign to self.os", "source": "juraj_google_style"} -{"code": "def reset(self, indices=None):\n\n if indices is None:\n indices = np.arange(len(self._envs))\n if self._blocking:\n observs = [self._envs[index].reset() for index in indices]\n else:\n observs = [self._envs[index].reset(blocking=False) for index in indices]\n observs = [observ() for observ in observs]\n observ = np.stack(observs)\n return observ", "docstring": "Reset the environment and convert the resulting observation.\n\nArgs:\n indices: The batch indices of environments to reset; defaults to all.\n\nReturns:\n Batch of observations.", "source": "juraj_google_style"} -{"code": "def file_to_list(file_name, file_location):\n\n file = __os.path.join(file_location, file_name)\n read_file = open(file, \"r\")\n temp_list = read_file.read().splitlines()\n read_file.close()\n return temp_list", "docstring": "Function to import a text file to a list\n\nArgs:\n file_name: The name of file to be import\n file_location: The location of the file, derive from the os module\n\n Returns: returns a list", "source": "juraj_google_style"} -{"code": "def save_index(self, filename):\n\n data = {}\n for f in self.files.values():\n entities = {v.entity.id: v.value for k, v in f.tags.items()}\n data[f.path] = {'domains': f.domains, 'entities': entities}\n with open(filename, 'w') as outfile:\n json.dump(data, outfile)", "docstring": "Save the current Layout's index to a .json file.\n\nArgs:\n filename (str): Filename to write to.\n\n Note: At the moment, this won't serialize directory-specific config\n files. This means reconstructed indexes will only work properly in\n cases where there aren't multiple layout specs within a project.", "source": "juraj_google_style"} -{"code": "def orphan_entry(self, rval: RawObject) -> \"ArrayEntry\":\n\n val = self.entry_from_raw(rval)\n return ArrayEntry(0, EmptyList(), EmptyList(), val, None, self,\n val.timestamp)", "docstring": "Return an isolated entry of the receiver.\n\nArgs:\n rval: Raw object to be used for the returned entry.", "source": "juraj_google_style"} -{"code": "def build_query_string(self, data):\n\n query = []\n keys_to_be_removed = []\n for key, value in data.items():\n if key not in ['version', 'restApi', 'resourcePath']:\n if not key == 'method':\n if key == 'points':\n value = ','.join(str(val) for val in value)\n keys_to_be_removed.append(key)\n query.append('{0}={1}'.format(key, value))\n keys_to_be_removed.append(key)\n keys_to_be_removed.append(key)\n querystring = '&'.join(query)\n data['query'] = '{0}?{1}'.format(data['method'], querystring)\n for k in list(set(keys_to_be_removed)):\n del data[k]\n return data", "docstring": "This method occurs after dumping the data into the class.\n\nArgs:\n data (dict): dictionary of all the query values\n\nReturns:\n data (dict): ordered dict of all the values", "source": "juraj_google_style"} -{"code": "def push(self, filename, data):\n\n self._queue.put(Chunk(filename, data))", "docstring": "Push a chunk of a file to the streaming endpoint.\n\nArgs:\n filename: Name of file that this is a chunk of.\n chunk_id: TODO: change to 'offset'\n chunk: File data.", "source": "juraj_google_style"} -{"code": "def get_electron_number(self, charge=0):\n\n atomic_number = constants.elements['atomic_number'].to_dict()\n return sum([atomic_number[atom] for atom in self['atom']]) - charge", "docstring": "Return the number of electrons.\n\nArgs:\n charge (int): Charge of the molecule.\n\nReturns:\n int:", "source": "juraj_google_style"} -{"code": "def get_nn(self, structure, n):\n\n\n return [e['site'] for e in self.get_nn_info(structure, n)]", "docstring": "Get near neighbors of site with index n in structure.\n\nArgs:\n structure (Structure): input structure.\n n (integer): index of site in structure for which to determine\n neighbors.\n\nReturns:\n sites (list of Site objects): near neighbors.", "source": "juraj_google_style"} -{"code": "def clean_for_storage(self, data):\n\n data = self.data_to_unicode(data)\n if isinstance(data, dict):\n for k in dict(data).keys():\n if k == '_id':\n del data[k]\n continue\n if '.' in k:\n new_k = k.replace('.', '_')\n data[new_k] = data[k]\n del data[k]\n k = new_k\n if isinstance(data[k], dict):\n data[k] = self.clean_for_storage(data[k])\n elif isinstance(data[k], list):\n data[k] = [self.clean_for_storage(item) for item in data[k]]\n return data", "docstring": "Clean data in preparation for storage.\n\n Deletes items with key having a '.' or is '_id'. Also deletes those items\n whose value is a dictionary or a list.\n\nArgs:\n data: Sample data dictionary to be cleaned.\n\nReturns:\n Cleaned data dictionary.", "source": "juraj_google_style"} -{"code": "def batch(self, timelimit=None):\n\n from .launcher import BatchLauncher\n # Create a batch dir from the flow.workdir.\n prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1])\n prev_dir = os.path.join(os.path.sep, prev_dir)\n workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + \"_batch\")\n\n return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit)", "docstring": "Run the flow in batch mode, return exit status of the job script.\n Requires a manager.yml file and a batch_adapter adapter.\n\nArgs:\n timelimit: Time limit (int with seconds or string with time given with the slurm convention:\n \"days-hours:minutes:seconds\"). If timelimit is None, the default value specified in the\n `batch_adapter` entry of `manager.yml` is used.", "source": "juraj_google_style"} -{"code": "def model_fn_sharded(self, sharded_features):\n\n dp = self._data_parallelism\n\n # [{str: Tensor}]. Transpose of 'sharded_features'.\n datashard_to_features = self._to_features_per_datashard(sharded_features)\n if self.use_body_sharded():\n if self.hparams.scheduled_sampling_prob > 0.0:\n raise NotImplementedError(\n \"Scheduled sampling for non-sharded body only.\")\n\n # MoE models override body_sharded\n transformed_features = dp(self.bottom, datashard_to_features)\n body_out = self.body_sharded(\n self._to_single_features_dict(transformed_features))\n body_out, losses = self._normalize_body_output(body_out)\n if \"training\" in losses:\n log_info(\"Skipping T2TModel top and loss because training loss \"\n \"returned from body\")\n sharded_logits = body_out\n else:\n if isinstance(body_out, dict):\n sharded_logits = collections.OrderedDict()\n sharded_losses = collections.OrderedDict()\n for k, v in sorted(six.iteritems(body_out)):\n sharded_logits[k] = dp(self.top, v, datashard_to_features)\n sharded_losses[k] = dp(self.loss, sharded_logits[k],\n datashard_to_features)\n training_loss_dict = average_sharded_losses([({\n \"training\": l\n } for l in loss) for loss in sharded_losses.values()])\n losses.update(training_loss_dict)\n else:\n sharded_logits = dp(self.top, body_out, datashard_to_features)\n sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)\n if isinstance(sharded_losses, tuple):\n nums, dens = sharded_losses\n sharded_losses = zip(nums, dens)\n training_loss_dict = average_sharded_losses([{\n \"training\": loss\n } for loss in sharded_losses])\n losses.update(training_loss_dict)\n else:\n sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)\n sharded_logits, sharded_losses = dp(\n self.maybe_scheduled_sampling,\n datashard_to_features, sharded_logits, sharded_losses)\n if isinstance(sharded_logits[0], dict):\n temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}\n for k, _ in six.iteritems(sharded_logits[0]):\n for l in sharded_logits:\n temp_dict[k].append(l[k])\n sharded_logits = temp_dict\n losses = average_sharded_losses(sharded_losses)\n\n return sharded_logits, losses", "docstring": "Estimator model_fn sharded along batch dimension.\n\nArgs:\n sharded_features: {str: [Tensor]}. Features sharded along batch dimension.\n Each list is the same length (== number of shards).\n\nReturns:\n sharded_logits: [Tensor]. Logits for each shard of examples.\n losses: {str: 0-D Tensor}. Loss averaged across shards.", "source": "juraj_google_style"} -{"code": "def start(self, request: Request) -> Response:\n\n if self._session_state != SessionState.ready:\n raise RuntimeError('Session already started')\n\n assert not self._request\n self._request = request\n _logger.debug(__('Client fetch request {0}.', request))\n\n connection = yield from self._acquire_request_connection(request)\n full_url = connection.proxied and not connection.tunneled\n\n self._stream = stream = self._stream_factory(connection)\n\n yield from self._stream.reconnect()\n\n request.address = connection.address\n\n self.event_dispatcher.notify(self.Event.begin_request, request)\n write_callback = functools.partial(self.event_dispatcher.notify, self.Event.request_data)\n stream.data_event_dispatcher.add_write_listener(write_callback)\n\n yield from stream.write_request(request, full_url=full_url)\n\n if request.body:\n assert 'Content-Length' in request.fields\n length = int(request.fields['Content-Length'])\n yield from stream.write_body(request.body, length=length)\n\n stream.data_event_dispatcher.remove_write_listener(write_callback)\n self.event_dispatcher.notify(self.Event.end_request, request)\n\n read_callback = functools.partial(self.event_dispatcher.notify, self.Event.response_data)\n stream.data_event_dispatcher.add_read_listener(read_callback)\n\n self._response = response = yield from stream.read_response()\n response.request = request\n\n self.event_dispatcher.notify(self.Event.begin_response, response)\n\n self._session_state = SessionState.request_sent\n\n return response", "docstring": "Begin a HTTP request\n\nArgs:\n request: Request information.\n\nReturns:\n A response populated with the HTTP headers.\n\n Once the headers are received, call :meth:`download`.\n\n Coroutine.", "source": "juraj_google_style"} -{"code": "def is_feature_enabled(self, feature_key, user_id, attributes=None):\n\n\n if not self.is_valid:\n self.logger.error(enums.Errors.INVALID_DATAFILE.format('is_feature_enabled'))\n return False\n\n if not validator.is_non_empty_string(feature_key):\n self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('feature_key'))\n return False\n\n if not isinstance(user_id, string_types):\n self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n return False\n\n if not self._validate_user_inputs(attributes):\n return False\n\n feature = self.config.get_feature_from_key(feature_key)\n if not feature:\n return False\n\n feature_enabled = False\n source_info = {}\n decision = self.decision_service.get_variation_for_feature(feature, user_id, attributes)\n is_source_experiment = decision.source == enums.DecisionSources.FEATURE_TEST\n\n if decision.variation:\n if decision.variation.featureEnabled is True:\n feature_enabled = True\n # Send event if Decision came from an experiment.\n if is_source_experiment:\n source_info = {\n 'experiment_key': decision.experiment.key,\n 'variation_key': decision.variation.key\n }\n self._send_impression_event(decision.experiment,\n decision.variation,\n user_id,\n attributes)\n\n if feature_enabled:\n self.logger.info('Feature \"%s\" is enabled for user \"%s\".' % (feature_key, user_id))\n else:\n self.logger.info('Feature \"%s\" is not enabled for user \"%s\".' % (feature_key, user_id))\n\n self.notification_center.send_notifications(\n enums.NotificationTypes.DECISION,\n enums.DecisionNotificationTypes.FEATURE,\n user_id,\n attributes or {},\n {\n 'feature_key': feature_key,\n 'feature_enabled': feature_enabled,\n 'source': decision.source,\n 'source_info': source_info\n }\n )\n\n return feature_enabled", "docstring": "Returns true if the feature is enabled for the given user.\n\nArgs:\n feature_key: The key of the feature for which we are determining if it is enabled or not for the given user.\n user_id: ID for user.\n attributes: Dict representing user attributes.\n\nReturns:\n True if the feature is enabled for the user. False otherwise.", "source": "juraj_google_style"} -{"code": "def _make_generic(of, coll):\n\n\n assert(issubclass(coll, Collection))\n key = (coll.__name__, \"%s.%s\" % (of.__module__, of.__name__))\n if key in GENERIC_TYPES:\n if GENERIC_TYPES[key].itemtype != of:\n raise exc.PropertyNotUnique(key=key)\n else:\n # oh, we get to name it? Goodie!\n generic_name = \"%s%s\" % (of.__name__, coll.suffix)\n GENERIC_TYPES[key] = type(\n generic_name, (coll, _Generic), dict(itemtype=of, generic_key=key)\n )\n mod = sys.modules[of.__module__]\n if not hasattr(mod, generic_name):\n setattr(mod, generic_name, GENERIC_TYPES[key])\n return GENERIC_TYPES[key]", "docstring": "Used to make a new Collection type, without that type having to be\n defined explicitly. Generates a new type name using the item type and a\n 'suffix' Collection class property.\n\nArgs:\n ``of=``\\ *Record type*\n The type of values of the collection\n\n ``coll=``\\ *Collection sub-class*\n The container class.", "source": "juraj_google_style"} -{"code": "def grab_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None, if_id=0):\n\n\n self.freqs = self.populate_freqs()\n self.timestamps = self.populate_timestamps()\n\n if f_start is None:\n f_start = self.freqs[0]\n if f_stop is None:\n f_stop = self.freqs[-1]\n\n i0 = np.argmin(np.abs(self.freqs - f_start))\n i1 = np.argmin(np.abs(self.freqs - f_stop))\n\n if i0 < i1:\n plot_f = self.freqs[i0:i1 + 1]\n plot_data = np.squeeze(self.data[t_start:t_stop, ..., i0:i1 + 1])\n else:\n plot_f = self.freqs[i1:i0 + 1]\n plot_data = np.squeeze(self.data[t_start:t_stop, ..., i1:i0 + 1])\n\n return plot_f, plot_data", "docstring": "Extract a portion of data by frequency range.\n\nArgs:\n f_start (float): start frequency in MHz\n f_stop (float): stop frequency in MHz\n if_id (int): IF input identification (req. when multiple IFs in file)\n\nReturns:\n (freqs, data) (np.arrays): frequency axis in MHz and data subset", "source": "juraj_google_style"} -{"code": "def camelize(word):\n\n return ''.join(w[0].upper() + w[1:]\n for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' '))", "docstring": "Convert a word from lower_with_underscores to CamelCase.\n\nArgs:\n word: The string to convert.\n\nReturns:\n The modified string.", "source": "juraj_google_style"} -{"code": "def map_template(category, template_list):\n\n\n if isinstance(template_list, str):\n template_list = [template_list]\n\n for template in template_list:\n path = os.path.normpath(category)\n while path is not None:\n for extension in ['', '.html', '.htm', '.xml', '.json']:\n candidate = os.path.join(path, template + extension)\n file_path = os.path.join(config.template_folder, candidate)\n if os.path.isfile(file_path):\n return Template(template, candidate, file_path)\n parent = os.path.dirname(path)\n if parent != path:\n path = parent\n else:\n path = None", "docstring": "Given a file path and an acceptable list of templates, return the\n best-matching template's path relative to the configured template\n directory.\n\nArgs:\n category -- The path to map\n template_list -- A template to look up (as a string), or a list of templates.", "source": "juraj_google_style"} -{"code": "def record(self):\n # type: () -> bytes\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog not yet initialized')\n\n outlist = [self.validation_entry.record(), self.initial_entry.record()]\n\n for sec in self.sections:\n outlist.append(sec.record())\n\n for entry in self.standalone_entries:\n outlist.append(entry.record())\n\n return b''.join(outlist)", "docstring": "A method to generate a string representing this El Torito Boot Catalog.\n\n Parameters:\n None.\n\nReturns:\n A string representing this El Torito Boot Catalog.", "source": "juraj_google_style"} -{"code": "def __init__(self, checker, message):\n\n self.checker = checker\n self.message = message\n Validator.validators_count += 1\n # Used to assert validators in the order they were registered.\n self.insertion_index = Validator.validators_count", "docstring": "Constructor to create all validators.\n\nArgs:\n checker: function to verify the constraint.\n Input of this method varies, see SingleFlagValidator and\n multi_flags_validator for a detailed description.\n message: str, error message to be shown to the user.", "source": "juraj_google_style"} -{"code": "def _separate_words(string):\n\n words = []\n separator = \"\"\n\n # Index of current character. Initially 1 because we don't want to check\n # if the 0th character is a boundary.\n i = 1\n # Index of first character in a sequence\n s = 0\n # Previous character.\n p = string[0:1]\n\n # Treat an all-caps stringiable as lower-case, so that every letter isn't\n # counted as a boundary.\n was_upper = False\n if string.isupper():\n string = string.lower()\n was_upper = True\n\n # Iterate over each character, checking for boundaries, or places where\n # the stringiable should divided.\n while i <= len(string):\n c = string[i:i + 1]\n\n split = False\n if i < len(string):\n # Detect upper-case letter as boundary.\n if UPPER.match(c):\n split = True\n # Detect transition from separator to not separator.\n elif NOTSEP.match(c) and SEP.match(p):\n split = True\n # Detect transition not separator to separator.\n elif SEP.match(c) and NOTSEP.match(p):\n split = True\n else:\n # The loop goes one extra iteration so that it can handle the\n # remaining text after the last boundary.\n split = True\n\n if split:\n if NOTSEP.match(p):\n words.append(string[s:i])\n else:\n # stringiable contains at least one separator.\n # Use the first one as the stringiable's primary separator.\n if not separator:\n separator = string[s:s + 1]\n\n # Use None to indicate a separator in the word list.\n words.append(None)\n # If separators weren't included in the list, then breaks\n # between upper-case sequences (\"AAA_BBB\") would be\n # disregarded; the letter-run detector would count them as one\n # sequence (\"AAABBB\").\n s = i\n\n i += 1\n p = c\n\n return words, separator, was_upper", "docstring": "Segment string on separator into list of words.\n\nArgs:\n string -- the string we want to process\n\nReturns:\n words -- list of words the string got minced to\n separator -- the separator char intersecting words\n was_upper -- whether string happened to be upper-case", "source": "juraj_google_style"} -{"code": "def encode_csv(data_dict, column_names):\n\n import csv\n import six\n values = [str(data_dict[x]) for x in column_names]\n str_buff = six.StringIO()\n writer = csv.writer(str_buff, lineterminator='')\n writer.writerow(values)\n return str_buff.getvalue()", "docstring": "Builds a csv string.\n\nArgs:\n data_dict: dict of {column_name: 1 value}\n column_names: list of column names\n\nReturns:\n A csv string version of data_dict", "source": "juraj_google_style"} -{"code": "def read_route_line(route):\n\n scrf_patt = re.compile(r\"^([sS][cC][rR][fF])\\s*=\\s*(.+)\")\n multi_params_patt = re.compile(r\"^([A-z]+[0-9]*)[\\s=]+\\((.*)\\)$\")\n functional = None\n basis_set = None\n route_params = {}\n dieze_tag = None\n if route:\n if \"/\" in route:\n tok = route.split(\"/\")\n functional = tok[0].split()[-1]\n basis_set = tok[1].split()[0]\n for tok in [functional, basis_set, \"/\"]:\n route = route.replace(tok, \"\")\n\n for tok in route.split():\n if scrf_patt.match(tok):\n m = scrf_patt.match(tok)\n route_params[m.group(1)] = m.group(2)\n elif tok.upper() in [\"#\", \"#N\", \"#P\", \"#T\"]:\n # does not store # in route to avoid error in input\n if tok == \"#\":\n dieze_tag = \"#N\"\n else:\n dieze_tag = tok\n continue\n else:\n m = re.match(multi_params_patt, tok.strip(\"#\"))\n if m:\n pars = {}\n for par in m.group(2).split(\",\"):\n p = par.split(\"=\")\n pars[p[0]] = None if len(p) == 1 else p[1]\n route_params[m.group(1)] = pars\n else:\n d = tok.strip(\"#\").split(\"=\")\n route_params[d[0]] = None if len(d) == 1 else d[1]\n\n return functional, basis_set, route_params, dieze_tag", "docstring": "read route line in gaussian input/output and return functional basis_set\n and a dictionary of other route parameters\n\nArgs:\n route (str) : the route line\n\n return\n functional (str) : the method (HF, PBE ...)\n basis_set (str) : the basis set\n route (dict) : dictionary of parameters", "source": "juraj_google_style"} -{"code": "def _cache_form_details(self, form):\n\n cache = FormCache()\n form['model']['form_key'] = cache.form_id\n form['model']['form_name'] = self.__class__.__name__\n cache.set(\n {\n 'model': list(form['model'].keys()), # In Python 3, dictionary keys are not serializable\n 'non_data_fields': self.non_data_fields\n }\n )", "docstring": "Caches some form details to lates process and validate incoming (response) form data\n\nArgs:\n form: form dict", "source": "juraj_google_style"} -{"code": "def signature(cert, sig, body):\n\n body = six.b(body)\n\n sig = base64.decodestring(sig)\n padder = padding.PKCS1v15()\n public_key = cert.public_key()\n try:\n public_key.verify(sig, body, padder, hashes.SHA1())\n return True\n except InvalidSignature:\n warnings.warn('Signature verification failed.')\n return False", "docstring": "Validate data request signature.\n\n See `validate.request` for additional info.\n\nArgs:\n cert: cryptography.hazmat.backends.openssl.x509._Certificate. The Amazon\n signing certificate.\n sig: str. Signature header value sent by request.\n body: str. HTTPS request body.\n\nReturns:\n bool: True if valid, False otherwise.", "source": "juraj_google_style"} -{"code": "def apriori(transactions, **kwargs):\n\n # Parse the arguments.\n min_support = kwargs.get('min_support', 0.1)\n min_confidence = kwargs.get('min_confidence', 0.0)\n min_lift = kwargs.get('min_lift', 0.0)\n max_length = kwargs.get('max_length', None)\n\n # Check arguments.\n if min_support <= 0:\n raise ValueError('minimum support must be > 0')\n\n # For testing.\n _gen_support_records = kwargs.get(\n '_gen_support_records', gen_support_records)\n _gen_ordered_statistics = kwargs.get(\n '_gen_ordered_statistics', gen_ordered_statistics)\n _filter_ordered_statistics = kwargs.get(\n '_filter_ordered_statistics', filter_ordered_statistics)\n\n # Calculate supports.\n transaction_manager = TransactionManager.create(transactions)\n support_records = _gen_support_records(\n transaction_manager, min_support, max_length=max_length)\n\n # Calculate ordered stats.\n for support_record in support_records:\n ordered_statistics = list(\n _filter_ordered_statistics(\n _gen_ordered_statistics(transaction_manager, support_record),\n min_confidence=min_confidence,\n min_lift=min_lift,\n )\n )\n if not ordered_statistics:\n continue\n yield RelationRecord(\n support_record.items, support_record.support, ordered_statistics)", "docstring": "Executes Apriori algorithm and returns a RelationRecord generator.\n\nArgs:\n transactions -- A transaction iterable object\n (eg. [['A', 'B'], ['B', 'C']]).\n\n Keyword arguments:\n min_support -- The minimum support of relations (float).\n min_confidence -- The minimum confidence of relations (float).\n min_lift -- The minimum lift of relations (float).\n max_length -- The maximum length of the relation (integer).", "source": "juraj_google_style"} -{"code": "def contains(self, name):\n\n try:\n self._api.buckets_get(name)\n except google.datalab.utils.RequestException as e:\n if e.status == 404:\n return False\n raise e\n except Exception as e:\n raise e\n return True", "docstring": "Checks if the specified bucket exists.\n\nArgs:\n name: the name of the bucket to lookup.\n\nReturns:\n True if the bucket exists; False otherwise.\n\nRaises:\n Exception if there was an error requesting information about the bucket.", "source": "juraj_google_style"} -{"code": "def __init__(self, max_size=10):\n\n # This class implements a LRU cache which needs fast updates of the LRU\n # order for random elements. This is usually implemented by using a\n # dict for fast lookups and a linked list for quick deletions / insertions.\n self._age = LinkedList()\n self._hash = {}\n self._limit = max_size\n self.lock = threading.RLock()", "docstring": "Constructor.\n\nArgs:\n max_size: The maximum number of objects held in cache.", "source": "juraj_google_style"} -{"code": "def import_metadata(self, elements):\n\n metadata_elem = lambda name: etree.QName(GPX_NS, name)\n\n for child in elements.getchildren():\n tag_ns, tag_name = child.tag[1:].split('}')\n if not tag_ns == GPX_NS:\n continue\n if tag_name in ('name', 'desc', 'keywords'):\n setattr(self, tag_name, child.text)\n elif tag_name == 'time':\n self.time = utils.Timestamp.parse_isoformat(child.text)\n elif tag_name == 'author':\n self.author['name'] = child.findtext(metadata_elem('name'))\n aemail = child.find(metadata_elem('email'))\n if aemail:\n self.author['email'] = '%s@%s' % (aemail.get('id'),\n aemail.get('domain'))\n self.author['link'] = child.findtext(metadata_elem('link'))\n elif tag_name == 'bounds':\n self.bounds = {\n 'minlat': child.get('minlat'),\n 'maxlat': child.get('maxlat'),\n 'minlon': child.get('minlon'),\n 'maxlon': child.get('maxlon'),\n }\n elif tag_name == 'extensions':\n self.extensions = child.getchildren()\n elif tag_name == 'copyright':\n if child.get('author'):\n self.copyright['name'] = child.get('author')\n self.copyright['year'] = child.findtext(metadata_elem('year'))\n self.copyright['license'] = child.findtext(metadata_elem('license'))\n elif tag_name == 'link':\n link = {\n 'href': child.get('href'),\n 'type': child.findtext(metadata_elem('type')),\n 'text': child.findtext(metadata_elem('text')),\n }\n self.link.append(link)", "docstring": "Import information from GPX metadata.\n\nArgs:\n elements (etree.Element): GPX metadata subtree", "source": "juraj_google_style"} -{"code": "def find_stable_entry(self, pH, V):\n\n energies_at_conditions = [e.normalized_energy_at_conditions(pH, V)\n for e in self.stable_entries]\n return self.stable_entries[np.argmin(energies_at_conditions)]", "docstring": "Finds stable entry at a pH,V condition\n\nArgs:\n pH (float): pH to find stable entry\n V (float): V to find stable entry\n\n Returns:", "source": "juraj_google_style"} -{"code": "def vals2colors(vals,cmap='GnBu_d',res=100):\n\n # flatten if list of lists\n if any(isinstance(el, list) for el in vals):\n vals = list(itertools.chain(*vals))\n\n # get palette from seaborn\n palette = np.array(sns.color_palette(cmap, res))\n ranks = np.digitize(vals, np.linspace(np.min(vals), np.max(vals)+1, res+1)) - 1\n return [tuple(i) for i in palette[ranks, :]]", "docstring": "Maps values to colors\n\nArgs:\n values (list or list of lists) - list of values to map to colors\n cmap (str) - color map (default is 'husl')\n res (int) - resolution of the color map (default: 100)\n\nReturns:\n list of rgb tuples", "source": "juraj_google_style"} -{"code": "def gpp_soco(V,E):\n\n model = Model(\"gpp model -- soco\")\n\n x,s,z = {},{},{}\n for i in V:\n x[i] = model.addVar(vtype=\"B\", name=\"x(%s)\"%i)\n for (i,j) in E:\n s[i,j] = model.addVar(vtype=\"C\", name=\"s(%s,%s)\"%(i,j))\n z[i,j] = model.addVar(vtype=\"C\", name=\"z(%s,%s)\"%(i,j))\n\n model.addCons(quicksum(x[i] for i in V) == len(V)/2, \"Partition\")\n\n for (i,j) in E:\n model.addCons((x[i] + x[j] -1)*(x[i] + x[j] -1) <= s[i,j], \"S(%s,%s)\"%(i,j))\n model.addCons((x[j] - x[i])*(x[j] - x[i]) <= z[i,j], \"Z(%s,%s)\"%(i,j))\n model.addCons(s[i,j] + z[i,j] == 1, \"P(%s,%s)\"%(i,j))\n\n # # triangle inequalities (seem to make model slower)\n # for i in V:\n # for j in V:\n # for k in V:\n # if (i,j) in E and (j,k) in E and (i,k) in E:\n # print(\"\\t***\",(i,j,k)\n # model.addCons(z[i,j] + z[j,k] + z[i,k] <= 2, \"T1(%s,%s,%s)\"%(i,j,k))\n # model.addCons(z[i,j] + s[j,k] + s[i,k] <= 2, \"T2(%s,%s,%s)\"%(i,j,k))\n # model.addCons(s[i,j] + s[j,k] + z[i,k] <= 2, \"T3(%s,%s,%s)\"%(i,j,k))\n # model.addCons(s[i,j] + z[j,k] + s[i,k] <= 2, \"T4(%s,%s,%s)\"%(i,j,k))\n\n model.setObjective(quicksum(z[i,j] for (i,j) in E), \"minimize\")\n\n model.data = x,s,z\n return model", "docstring": "gpp -- model for the graph partitioning problem in soco\n Parameters:\n - V: set/list of nodes in the graph\n - E: set/list of edges in the graph\n Returns a model, ready to be solved.", "source": "juraj_google_style"} -{"code": "def register_function(self, name: Optional[str]=None):\n\n def wrap(function: Callable)->Any:\n nonlocal name\n if name is None:\n name = function.__name__\n self.funcs[name] = function\n return function\n return wrap", "docstring": "注册函数.\n\n Parameters:\n name (Optional[str]): - 将函数注册到的名字,如果为None,name就用其原来的名字", "source": "juraj_google_style"} -{"code": "def set_default_by_alias(self, alias):\n\n if alias not in self._aliases:\n raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))\n\n self._default_index = self._aliases[alias]", "docstring": "Set the default dataset by its alias.\n\n After changing the default dataset, all calls without explicitly specifying the\n dataset by index or alias will be redirected to this dataset.\n\nArgs:\n alias (str): The alias of the dataset that should be made the default.\n\nRaises:\n DataInvalidAlias: If the alias does not represent a valid dataset.", "source": "juraj_google_style"} -{"code": "def symbols(self, *args, **kwargs):\n\n return [self.symbol(idendifier, **kwargs) for idendifier in args]", "docstring": "Lookup equities by symbol.\n\n Parameters:\n args (iterable[str]): List of ticker symbols for the asset.\n\nReturns:\n equities (List[Equity]): The equity lookuped by the ``symbol``.\n\nRaises:\n AssetNotFound: When could not resolve the ``Asset`` by ``symbol``.", "source": "juraj_google_style"} -{"code": "def dispatch(self, state_change: StateChange) -> List[Event]:\n\n assert isinstance(state_change, StateChange)\n\n # the state objects must be treated as immutable, so make a copy of the\n # current state and pass the copy to the state machine to be modified.\n next_state = deepcopy(self.current_state)\n\n # update the current state by applying the change\n iteration = self.state_transition(\n next_state,\n state_change,\n )\n\n assert isinstance(iteration, TransitionResult)\n\n self.current_state = iteration.new_state\n events = iteration.events\n\n assert isinstance(self.current_state, (State, type(None)))\n assert all(isinstance(e, Event) for e in events)\n\n return events", "docstring": "Apply the `state_change` in the current machine and return the\n resulting events.\n\nArgs:\n state_change: An object representation of a state\n change.\n\nReturns:\n A list of events produced by the state transition.\n It's the upper layer's responsibility to decided how to handle\n these events.", "source": "juraj_google_style"} -{"code": "def _validate_query(query):\n\n query = deepcopy(query)\n # q is always required\n if query[\"q\"] == BLANK_QUERY[\"q\"]:\n raise ValueError(\"No query specified.\")\n\n query[\"q\"] = _clean_query_string(query[\"q\"])\n\n # limit should be set to appropriate default if not specified\n if query[\"limit\"] is None:\n query[\"limit\"] = SEARCH_LIMIT if query[\"advanced\"] else NONADVANCED_LIMIT\n # If specified, the limit should not be greater than the Search maximum\n elif query[\"limit\"] > SEARCH_LIMIT:\n warnings.warn('Reduced result limit from {} to the Search maximum: {}'\n .format(query[\"limit\"], SEARCH_LIMIT), RuntimeWarning)\n query[\"limit\"] = SEARCH_LIMIT\n\n # Remove all blank/default values\n for key, val in BLANK_QUERY.items():\n # Default for get is NaN so comparison is always False\n if query.get(key, float('nan')) == val:\n query.pop(key)\n\n # Remove unsupported fields\n to_remove = [field for field in query.keys() if field not in BLANK_QUERY.keys()]\n [query.pop(field) for field in to_remove]\n\n return query", "docstring": "Validate and clean up a query to be sent to Search.\n Cleans the query string, removes unneeded parameters, and validates for correctness.\n Does not modify the original argument.\n Raises an Exception on invalid input.\n\nArgs:\n query (dict): The query to validate.\n\nReturns:\n dict: The validated query.", "source": "juraj_google_style"} -{"code": "def ParseOptions(cls, options, configuration_object):\n\n if not isinstance(configuration_object, tools.CLITool):\n raise errors.BadConfigObject(\n 'Configuration object is not an instance of CLITool')\n\n preferred_year = cls._ParseNumericOption(options, 'preferred_year')\n\n process_archives = getattr(options, 'process_archives', False)\n process_compressed_streams = getattr(\n options, 'process_compressed_streams', True)\n\n setattr(configuration_object, '_preferred_year', preferred_year)\n setattr(configuration_object, '_process_archives', process_archives)\n setattr(\n configuration_object, '_process_compressed_streams',\n process_compressed_streams)", "docstring": "Parses and validates options.\n\nArgs:\n options (argparse.Namespace): parser options.\n configuration_object (CLITool): object to be configured by the argument\n helper.\n\nRaises:\n BadConfigObject: when the configuration object is of the wrong type.", "source": "juraj_google_style"} -{"code": "def parse_ensembl_exon_request(result):\n\n keys = [\n 'chrom',\n 'gene',\n 'transcript',\n 'exon_id',\n 'exon_chrom_start',\n 'exon_chrom_end',\n '5_utr_start',\n '5_utr_end',\n '3_utr_start',\n '3_utr_end',\n 'strand',\n 'rank'\n ]\n\n # for res in result.itertuples():\n for res in zip(result['Chromosome/scaffold name'],\n result['Gene stable ID'],\n result['Transcript stable ID'],\n result['Exon stable ID'],\n result['Exon region start (bp)'],\n result['Exon region end (bp)'],\n result[\"5' UTR start\"],\n result[\"5' UTR end\"],\n result[\"3' UTR start\"],\n result[\"3' UTR end\"],\n result[\"Strand\"],\n result[\"Exon rank in transcript\"]):\n ensembl_info = dict(zip(keys, res))\n\n # Recalculate start and stop (taking UTR regions into account for end exons)\n if ensembl_info['strand'] == 1:\n # highest position: start of exon or end of 5' UTR\n # If no 5' UTR make sure exon_start is allways choosen\n start = max(ensembl_info['exon_chrom_start'], ensembl_info['5_utr_end'] or -1)\n # lowest position: end of exon or start of 3' UTR\n end = min(ensembl_info['exon_chrom_end'], ensembl_info['3_utr_start'] or float('inf'))\n elif ensembl_info['strand'] == -1:\n # highest position: start of exon or end of 3' UTR\n start = max(ensembl_info['exon_chrom_start'], ensembl_info['3_utr_end'] or -1)\n # lowest position: end of exon or start of 5' UTR\n end = min(ensembl_info['exon_chrom_end'], ensembl_info['5_utr_start'] or float('inf'))\n\n ensembl_info['start'] = start\n ensembl_info['end'] = end\n\n yield ensembl_info", "docstring": "Parse a dataframe with ensembl exon information\n\nArgs:\n res(pandas.DataFrame)\n\nYields:\n gene_info(dict)", "source": "juraj_google_style"} -{"code": "def alleles_to_retrieve(df):\n\n contig_blastn_records = defaultdict(list)\n markers = df.marker.unique()\n for m in markers:\n dfsub = df[df.marker == m]\n for i, r in dfsub.iterrows():\n if r.coverage < 1.0:\n contig_blastn_records[r.stitle].append(r)\n break\n return contig_blastn_records", "docstring": "Alleles to retrieve from genome fasta\n\n Get a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be\n retrieved from the genome contig.\n\nArgs:\n df (pandas.DataFrame): blastn results dataframe\n\nReturns:\n {str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker\n for which the allele sequence must be retrieved from the original sequence.", "source": "juraj_google_style"} -{"code": "def FromJson(json):\n\n type = ContractParameterType.FromString(json['type'])\n\n value = json['value']\n param = ContractParameter(type=type, value=None)\n\n if type == ContractParameterType.Signature or type == ContractParameterType.ByteArray:\n param.Value = bytearray.fromhex(value)\n\n elif type == ContractParameterType.Boolean:\n param.Value = bool(value)\n\n elif type == ContractParameterType.Integer:\n param.Value = int(value)\n\n elif type == ContractParameterType.Hash160:\n param.Value = UInt160.ParseString(value)\n\n elif type == ContractParameterType.Hash256:\n param.Value = UInt256.ParseString(value)\n\n # @TODO Not sure if this is working...\n elif type == ContractParameterType.PublicKey:\n param.Value = ECDSA.decode_secp256r1(value).G\n\n elif type == ContractParameterType.String:\n param.Value = str(value)\n\n elif type == ContractParameterType.Array:\n val = [ContractParameter.FromJson(item) for item in value]\n param.Value = val\n\n return param", "docstring": "Convert a json object to a ContractParameter object\n\nArgs:\n item (dict): The item to convert to a ContractParameter object\n\nReturns:\n ContractParameter", "source": "juraj_google_style"} -{"code": "def _get_container_environment(self, **kwargs):\n\n environment = {}\n environment.update(self.primary_container['Environment'])\n environment['SAGEMAKER_BATCH'] = 'True'\n if 'MaxPayloadInMB' in kwargs:\n environment['SAGEMAKER_MAX_PAYLOAD_IN_MB'] = str(kwargs['MaxPayloadInMB'])\n\n if 'BatchStrategy' in kwargs:\n if kwargs['BatchStrategy'] == 'SingleRecord':\n strategy_env_value = 'SINGLE_RECORD'\n elif kwargs['BatchStrategy'] == 'MultiRecord':\n strategy_env_value = 'MULTI_RECORD'\n else:\n raise ValueError('Invalid BatchStrategy, must be \\'SingleRecord\\' or \\'MultiRecord\\'')\n environment['SAGEMAKER_BATCH_STRATEGY'] = strategy_env_value\n\n # we only do 1 max concurrent transform in Local Mode\n if 'MaxConcurrentTransforms' in kwargs and int(kwargs['MaxConcurrentTransforms']) > 1:\n logger.warning('Local Mode only supports 1 ConcurrentTransform. Setting MaxConcurrentTransforms to 1')\n environment['SAGEMAKER_MAX_CONCURRENT_TRANSFORMS'] = '1'\n\n # if there were environment variables passed to the Transformer we will pass them to the\n # container as well.\n if 'Environment' in kwargs:\n environment.update(kwargs['Environment'])\n return environment", "docstring": "Get all the Environment variables that will be passed to the container\n\n Certain input fields such as BatchStrategy have different values for the API vs the Environment\n variables, such as SingleRecord vs SINGLE_RECORD. This method also handles this conversion.\n\nArgs:\n **kwargs: existing transform arguments\n\nReturns:\n dict: All the environment variables that should be set in the container", "source": "juraj_google_style"} -{"code": "def prose_wc(args):\n\n if args.file is None:\n return 1\n if args.split_hyphens:\n INTERSTITIAL_PUNCTUATION.append(re.compile(r'-'))\n content = args.file.read().decode('utf-8')\n filename = args.file.name\n body = strip_frontmatter(content)\n parsed = markdown_to_text(body)\n result = wc(filename, body, parsed=parsed,\n is_jekyll=(body != content))\n if (args.update and\n filename != '_stdin_' and\n result['counts']['type'] == 'jekyll'):\n update_file(filename, result, content, args.indent)\n else:\n _mockable_print({\n 'yaml': yaml.safe_dump(result, default_flow_style=False,\n indent=args.indent),\n 'json': json.dumps(result, indent=args.indent),\n 'default': default_dump(result),\n }[args.format])\n return 0", "docstring": "Processes data provided to print a count object, or update a file.\n\nArgs:\n args: an ArgumentParser object returned by setup()", "source": "juraj_google_style"} -{"code": "def encode_row(fields):\n\n # NOTE: str(f) only works for Python3\n unicode_fields = [unicode(f) for f in fields]\n escaped_fields = map(escape, unicode_fields)\n return _field_delimiter.join(escaped_fields)", "docstring": "Encode a list of column values into a [incr tsdb()] profile line.\n\n Encoding involves escaping special characters for each value, then\n joining the values into a single string with the field delimiter\n (`\"@\"` by default). It does not fill in default values (see\n make_row()).\n\nArgs:\n fields: a list of column values\n\nReturns:\n A [incr tsdb()]-encoded string", "source": "juraj_google_style"} -{"code": "def CopyFromStringTuple(self, time_elements_tuple):\n\n if len(time_elements_tuple) < 7:\n raise ValueError((\n 'Invalid time elements tuple at least 7 elements required,'\n 'got: {0:d}').format(len(time_elements_tuple)))\n\n super(TimeElementsWithFractionOfSecond, self).CopyFromStringTuple(\n time_elements_tuple)\n\n try:\n fraction_of_second = decimal.Decimal(time_elements_tuple[6])\n except (TypeError, ValueError):\n raise ValueError('Invalid fraction of second value: {0!s}'.format(\n time_elements_tuple[6]))\n\n if fraction_of_second < 0.0 or fraction_of_second >= 1.0:\n raise ValueError('Fraction of second value: {0:f} out of bounds.'.format(\n fraction_of_second))\n\n self.fraction_of_second = fraction_of_second", "docstring": "Copies time elements from string-based time elements tuple.\n\nArgs:\n time_elements_tuple (Optional[tuple[str, str, str, str, str, str, str]]):\n time elements, contains year, month, day of month, hours, minutes,\n seconds and fraction of seconds.\n\nRaises:\n ValueError: if the time elements tuple is invalid.", "source": "juraj_google_style"} -{"code": "def _SkipFieldMessage(tokenizer):\n\n\n if tokenizer.TryConsume('<'):\n delimiter = '>'\n else:\n tokenizer.Consume('{')\n delimiter = '}'\n\n while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'):\n _SkipField(tokenizer)\n\n tokenizer.Consume(delimiter)", "docstring": "Skips over a field message.\n\nArgs:\n tokenizer: A tokenizer to parse the field name and values.", "source": "juraj_google_style"} -{"code": "def select_with_condition(self, condition, key=None):\n\n condition = Condition.as_condition(condition)\n new_confs = []\n\n for conf in self:\n # Select the object on which condition is applied\n obj = conf if key is None else AttrDict(conf[key])\n add_it = condition(obj=obj)\n #if key is \"vars\": print(\"conf\", conf, \"added:\", add_it)\n if add_it: new_confs.append(conf)\n\n self._confs = new_confs", "docstring": "Remove all the configurations that do not satisfy the given condition.\n\nArgs:\n condition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax\n key: Selects the sub-dictionary on which condition is applied, e.g. key=\"vars\"\n if we have to filter the configurations depending on the values in vars", "source": "juraj_google_style"} -{"code": "def run(self, args):\n\n jlink = pylink.JLink()\n\n if args.test:\n if jlink.test():\n print('Self-test succeeded.')\n else:\n print('Self-test failed.')\n elif args.list is None or args.list in ['usb', 'ip']:\n host = pylink.JLinkHost.USB_OR_IP\n if args.list == 'usb':\n host = pylink.JLinkHost.USB\n elif args.list == 'ip':\n host = pylink.JLinkHost.IP\n\n emulators = jlink.connected_emulators(host)\n for (index, emulator) in enumerate(emulators):\n if index > 0:\n print('')\n\n print('Product Name: %s' % emulator.acProduct.decode())\n print('Serial Number: %s' % emulator.SerialNumber)\n\n usb = bool(emulator.Connection)\n if not usb:\n print('Nickname: %s' % emulator.acNickname.decode())\n print('Firmware: %s' % emulator.acFWString.decode())\n\n print('Connection: %s' % ('USB' if usb else 'IP'))\n\n if not usb:\n print('IP Address: %s' % emulator.aIPAddr)\n elif args.supported is not None:\n device = args.supported[0]\n num_supported_devices = jlink.num_supported_devices()\n for i in range(num_supported_devices):\n found_device = jlink.supported_device(i)\n if device.lower() == found_device.name.lower():\n print('Device Name: %s' % device)\n print('Core ID: %s' % found_device.CoreId)\n print('Flash Address: %s' % found_device.FlashAddr)\n print('Flash Size: %s bytes' % found_device.FlashSize)\n print('RAM Address: %s' % found_device.RAMAddr)\n print('RAM Size: %s bytes' % found_device.RAMSize)\n print('Manufacturer: %s' % found_device.manufacturer)\n break\n else:\n print('%s is not supported :(' % device)\n\n return None", "docstring": "Runs the emulator command.\n\nArgs:\n self (EmulatorCommand): the ``EmulatorCommand`` instance\n args (Namespace): arguments to parse\n\nReturns:\n ``None``", "source": "juraj_google_style"} -{"code": "def SetOption(self, section, option, value, overwrite=True):\n\n if not overwrite and self.config.has_option(section, option):\n return\n if not self.config.has_section(section):\n self.config.add_section(section)\n self.config.set(section, option, str(value))", "docstring": "Set the value of an option in the config file.\n\nArgs:\n section: string, the section of the config file to check.\n option: string, the option to set the value of.\n value: string, the value to set the option.\n overwrite: bool, True to overwrite an existing value in the config file.", "source": "juraj_google_style"} -{"code": "def assign_selective_dynamics(self, slab):\n\n sd_list = []\n sd_list = [[False, False, False] if site.properties['surface_properties'] == 'subsurface'\n else [True, True, True] for site in slab.sites]\n new_sp = slab.site_properties\n new_sp['selective_dynamics'] = sd_list\n return slab.copy(site_properties=new_sp)", "docstring": "Helper function to assign selective dynamics site_properties\n based on surface, subsurface site properties\n\nArgs:\n slab (Slab): slab for which to assign selective dynamics", "source": "juraj_google_style"} -{"code": "def read(self, size=None):\n\n data = b''\n while ((size and len(data) < size) and\n self._current_offset < self.uncompressed_data_size):\n member = self._GetMemberForOffset(self._current_offset)\n member_offset = self._current_offset - member.uncompressed_data_offset\n data_read = member.ReadAtOffset(member_offset, size)\n if data_read:\n self._current_offset += len(data_read)\n data = b''.join([data, data_read])\n\n return data", "docstring": "Reads a byte string from the gzip file at the current offset.\n\n The function will read a byte string up to the specified size or\n all of the remaining data if no size was specified.\n\nArgs:\n size (Optional[int]): number of bytes to read, where None is all\n remaining data.\n\nReturns:\n bytes: data read.\n\nRaises:\n IOError: if the read failed.\n OSError: if the read failed.", "source": "juraj_google_style"} -{"code": "def __init__(self, board_name, https=False, session=None):\n\n self._board_name = board_name\n self._https = https\n self._protocol = 'https://' if https else 'http://'\n self._url = Url(board_name=board_name, https=self._https)\n\n self._requests_session = session or requests.session()\n self._requests_session.headers['User-Agent'] = 'py-4chan/%s' % __version__\n\n self._thread_cache = {}", "docstring": "Creates a :mod:`basc_py4chan.Board` object.\n\nArgs:\n board_name (string): Name of the board, such as \"tg\" or \"etc\".\n https (bool): Whether to use a secure connection to 4chan.\n session: Existing requests.session object to use instead of our current one.", "source": "juraj_google_style"} -{"code": "def __init__(self, data_type_definition):\n\n super(DataTypeMap, self).__init__()\n self._data_type_definition = data_type_definition", "docstring": "Initializes a data type map.\n\nArgs:\n data_type_definition (DataTypeDefinition): data type definition.\n\nRaises:\n FormatError: if the data type map cannot be determined from the data\n type definition.", "source": "juraj_google_style"} -{"code": "def add_section(self, section):\n\n if section in self.sections():\n raise DuplicateSectionError(section)\n if isinstance(section, str):\n # create a new section\n section = Section(section, container=self)\n elif not isinstance(section, Section):\n raise ValueError(\"Parameter must be a string or Section type!\")\n self._structure.append(section)", "docstring": "Create a new section in the configuration.\n\n Raise DuplicateSectionError if a section by the specified name\n already exists. Raise ValueError if name is DEFAULT.\n\nArgs:\n section (str or :class:`Section`): name or Section type", "source": "juraj_google_style"} -{"code": "def load(filename):\n\n fileObj = open(filename, 'rb')\n variable = pickle.load(fileObj)\n fileObj.close()\n return variable", "docstring": "Load variable from Pickle file\n\nArgs:\n path (str): path of the file to load\n\nReturns:\n variable read from path", "source": "juraj_google_style"} -{"code": "def fetch_weighted_complexity(self, recalculate_metrics=False):\n\n # TODO: implment metrics recalculation\n max_total = sum(\n [self.metrics_weights[metric_name] for metric_name in self.metrics_weights]\n )\n total = 0\n if recalculate_metrics:\n self.calculate_indicator_metrics()\n for metric in self.metrics.all():\n if metric.name in self.metrics_weights and metric.is_outlier:\n total += self.metrics_weights[metric.name]\n\n value = total / max_total\n\n final_value = \"{:.1f}\".format(value * 10)\n\n if final_value[-1] == \"0\":\n final_value = \"{:.0f}\".format(value * 10)\n final_value = int(final_value)\n else:\n final_value = float(final_value)\n self.value = float(final_value)\n self.is_valid = True\n self.updated_at = datetime.datetime.now()\n self.save()\n return final_value", "docstring": "Calculates indicator value according to metrics weights\n Uses metrics in database\n\nArgs:\n recalculate_metrics: If true metrics values are updated before\n using weights", "source": "juraj_google_style"} -{"code": "def cprint(\n text,\n fg=Color.normal,\n bg=Color.normal,\n fg_dark=False,\n bg_dark=False,\n underlined=False,\n parse=False,\n):\n\n if parse:\n color_re = Color.color_re()\n lines = text.splitlines()\n count = len(lines)\n for i, line in enumerate(lines):\n previous = 0\n end = len(line)\n for match in color_re.finditer(line):\n sys.stdout.write(line[previous : match.start()])\n d = match.groupdict()\n set_color(\n d[\"color\"], fg_dark=False if d[\"dark\"] is None else True\n )\n previous = match.end()\n sys.stdout.write(\n line[previous:end]\n + (\"\\n\" if (i < (count - 1) or text[-1] == \"\\n\") else \"\")\n )\n else:\n set_color(fg, bg, fg_dark, bg_dark, underlined)\n sys.stdout.write(text)\n set_color()", "docstring": "Print string in to stdout using colored font.\n\n See L{set_color} for more details about colors.\n\nArgs:\n text (str): Text that needs to be printed.", "source": "juraj_google_style"} -{"code": "def percent_point(self, y, V):\n\n self.check_fit()\n\n if self.theta < 0:\n return V\n\n else:\n a = np.power(y, self.theta / (-1 - self.theta))\n b = np.power(V, self.theta)\n u = np.power((a + b - 1) / b, -1 / self.theta)\n return u", "docstring": "Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1`\n\nArgs:\n y: `np.ndarray` value of :math:`C(u|v)`.\n v: `np.ndarray` given value of v.", "source": "juraj_google_style"} -{"code": "def create_wf_instances(self, roles=None):\n\n\n # if roles specified then create an instance for each role\n # else create only one instance\n\n if roles:\n wf_instances = [\n WFInstance(\n wf=self.wf,\n current_actor=role,\n task=self,\n name=self.wf.name\n ) for role in roles\n ]\n else:\n wf_instances = [\n WFInstance(\n wf=self.wf,\n task=self,\n name=self.wf.name\n )\n ]\n\n # if task type is not related with objects save instances immediately.\n if self.task_type in [\"C\", \"D\"]:\n return [wfi.save() for wfi in wf_instances]\n\n # if task type is related with its objects, save populate instances per object\n else:\n wf_obj_instances = []\n for wfi in wf_instances:\n role = wfi.current_actor if self.task_type == \"A\" else None\n keys = self.get_object_keys(role)\n wf_obj_instances.extend(\n [WFInstance(\n wf=self.wf,\n current_actor=role,\n task=self,\n name=self.wf.name,\n wf_object=key,\n wf_object_type=self.object_type\n ).save() for key in keys]\n )\n\n return wf_obj_instances", "docstring": "Creates wf instances.\n\nArgs:\n roles (list): role list\n\nReturns:\n (list): wf instances", "source": "juraj_google_style"} -{"code": "def GetFileEntryByPathSpec(self, path_spec):\n\n if not self.FileEntryExistsByPathSpec(path_spec):\n return None\n return os_file_entry.OSFileEntry(self._resolver_context, self, path_spec)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\n path_spec (PathSpec): a path specification.\n\nReturns:\n OSFileEntry: a file entry or None if not available.", "source": "juraj_google_style"} -{"code": "def WaitUntilDone(self, timeout=None):\n\n\n utils.Poll(\n generator=self.GetState,\n condition=lambda s: s != self.__class__.STATE_RUNNING,\n timeout=timeout)\n self.target_file = self.target_file.Get()\n return self", "docstring": "Wait until the operation is done.\n\nArgs:\n timeout: timeout in seconds. None means default timeout (1 hour).\n 0 means no timeout (wait forever).\n\nReturns:\n Operation object with refreshed target_file.\n\nRaises:\n PollTimeoutError: if timeout is reached.", "source": "juraj_google_style"} -{"code": "def to_base_10_int(n, input_base):\n\n return sum(c * input_base ** i for i, c in enumerate(n[::-1]))", "docstring": "Converts an integer in any base into it's decimal representation.\n\nArgs:\n n - An integer represented as a tuple of digits in the specified base.\n input_base - the base of the input number.\n\nReturns:\n integer converted into base 10.\n\nExample:\n >>> to_base_10_int((8,1), 16)\n 129", "source": "juraj_google_style"} -{"code": "def set_ignores(self, folder, *patterns):\n\n if not patterns:\n return {}\n data = {'ignore': list(patterns)}\n return self.post('ignores', params={'folder': folder}, data=data)", "docstring": "Applies ``patterns`` to ``folder``'s ``.stignore`` file.\n\nArgs:\n folder (str):\n patterns (str):\n\nReturns:\n dict", "source": "juraj_google_style"} -{"code": "def name_from_base(base, max_length=63, short=False):\n\n timestamp = sagemaker_short_timestamp() if short else sagemaker_timestamp()\n trimmed_base = base[:max_length - len(timestamp) - 1]\n return '{}-{}'.format(trimmed_base, timestamp)", "docstring": "Append a timestamp to the provided string.\n\n This function assures that the total length of the resulting string is not\n longer than the specified max length, trimming the input parameter if necessary.\n\nArgs:\n base (str): String used as prefix to generate the unique name.\n max_length (int): Maximum length for the resulting string.\n short (bool): Whether or not to use a truncated timestamp.\n\nReturns:\n str: Input parameter with appended timestamp.", "source": "juraj_google_style"} -{"code": "def filter_all_reachable_leaves_many(self, identifier_filters, language, forbidden_identifiers=None):\n\n for i, identifier_filter in enumerate(identifier_filters):\n if len(identifier_filter) == 1 and not isinstance(identifier_filter[0], list):\n identifier_filters[i] = [identifier_filter]\n item_identifiers = [\n identifier[1:] if identifier.startswith('-') else identifier\n for identifier_filter in identifier_filters\n for identifier in set(flatten(identifier_filter))\n ]\n if forbidden_identifiers is None:\n forbidden_identifiers = []\n for identifier in forbidden_identifiers:\n item_identifiers.append(identifier)\n translated = self.translate_identifiers(item_identifiers, language)\n forbidden_item_ids = {translated[identifier] for identifier in forbidden_identifiers}\n leaves = self.get_leaves({translated[i] for i in item_identifiers}, language=language, forbidden_item_ids=forbidden_item_ids)\n result = []\n for identifier_filter in identifier_filters:\n if len(identifier_filter) == 0:\n result.append(self.get_all_available_leaves(language=language, forbidden_item_ids=forbidden_item_ids))\n continue\n filter_result = None\n filter_neg_result = set()\n for inner_filter in identifier_filter:\n inner_result = None\n inner_neg_result = None\n if len(inner_filter) == 0:\n raise Exception('Empty nested filters are not allowed.')\n for identifier in inner_filter:\n if inner_neg_result is not None:\n raise Exception('Nested filters can not contain multiple statements.')\n if identifier.startswith('-'):\n inner_neg_result = set(leaves[translated[identifier[1:]]])\n else:\n if inner_result is None:\n inner_result = set()\n inner_result |= set(leaves[translated[identifier]])\n if inner_result is not None:\n if filter_result is None:\n filter_result = inner_result\n else:\n filter_result &= inner_result\n if inner_neg_result is not None:\n filter_neg_result != inner_neg_result\n result.append(sorted(list(filter_result - filter_neg_result)))\n return result", "docstring": "Provides the same functionality as .. py:method:: ItemManager.filter_all_reachable_leaves(),\n but for more filters in the same time.\n\nArgs:\n identifier_filters: list of identifier filters\n language (str): language used for further filtering (some objects\n for different languages share the same item\n\nReturns:\n list: list of list of item ids", "source": "juraj_google_style"} -{"code": "def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format=\"%Y%m%d-%H%M\"):\n\n merged_forecasts = pd.merge(forecasts[\"condition\"],\n forecasts[\"dist\"],\n on=[\"Step_ID\",\"Track_ID\",\"Ensemble_Member\",\"Forecast_Hour\"])\n all_members = self.data[mode][\"combo\"][\"Ensemble_Member\"]\n members = np.unique(all_members)\n all_run_dates = pd.DatetimeIndex(self.data[mode][\"combo\"][\"Run_Date\"])\n run_dates = pd.DatetimeIndex(np.unique(all_run_dates))\n print(run_dates)\n for member in members:\n for run_date in run_dates:\n mem_run_index = (all_run_dates == run_date) & (all_members == member)\n member_forecast = merged_forecasts.loc[mem_run_index]\n member_forecast.to_csv(join(csv_path, \"hail_forecasts_{0}_{1}_{2}.csv\".format(self.ensemble_name,\n member,\n run_date.strftime\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (run_date_format))))\n return", "docstring": "Output hail forecast values to csv files by run date and ensemble member.\n\nArgs:\n forecasts:\n mode:\n csv_path:\n Returns:", "source": "juraj_google_style"} -{"code": "def get_volumes(blocks, layout_info):\n\n volumes = {}\n\n vol_blocks_lists = sort.by_vol_id(blocks, layout_info[2])\n\n for vol_rec in blocks[layout_info[0]].vtbl_recs:\n vol_name = vol_rec.name.strip(b'\\x00').decode('utf-8')\n if vol_rec.rec_index not in vol_blocks_lists:\n vol_blocks_lists[vol_rec.rec_index] = []\n volumes[vol_name] = description(vol_rec.rec_index, vol_rec, vol_blocks_lists[vol_rec.rec_index])\n\n return volumes", "docstring": "Get a list of UBI volume objects from list of blocks\n\nArgs:\n List:blocks -- List of layout block objects\n List:layout_info -- Layout info (indexes of layout blocks and\n associated data blocks.)\n\nReturns:\n Dict -- Of Volume objects by volume name, including any\n relevant blocks.", "source": "juraj_google_style"} -{"code": "def init_algebra(*, default_hs_cls='LocalSpace'):\n\n from qnet.algebra.core.hilbert_space_algebra import LocalSpace\n from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression\n default_hs_cls = getattr(importlib.import_module('qnet'), default_hs_cls)\n if issubclass(default_hs_cls, LocalSpace):\n QuantumExpression._default_hs_cls = default_hs_cls\n else:\n raise TypeError(\"default_hs_cls must be a subclass of LocalSpace\")", "docstring": "Initialize the algebra system\n\nArgs:\n default_hs_cls (str): The name of the :class:`.LocalSpace` subclass\n that should be used when implicitly creating Hilbert spaces, e.g.\n in :class:`.OperatorSymbol`", "source": "juraj_google_style"} -{"code": "def gather(weights, indices, dim, output_shape=None):\n\n dim = convert_to_dimension(dim)\n output_shape = convert_to_shape(output_shape)\n if weights.dtype == tf.bool:\n return cast(gather(to_float(weights), indices, dim, output_shape), tf.bool)\n return einsum([one_hot(indices, dim, dtype=weights.dtype), weights],\n reduced_dims=[dim], output_shape=output_shape)", "docstring": "Shorthand for einsum([one_hot(indices, dim)], weights, reduced_dims=[dim]).\n\nArgs:\n weights: a Tensor\n indices: a Tensor with integer type\n dim: a Dimension\n output_shape: an optional mtf.Shape\n\nReturns:\n a Tensor", "source": "juraj_google_style"} -{"code": "def _transform_col(self, x, i):\n\n\n labels = self.label_encoder._transform_col(x, i)\n label_max = self.label_encoder.label_maxes[i]\n\n # build row and column index for non-zero values of a sparse matrix\n index = np.array(range(len(labels)))\n i = index[labels > 0]\n j = labels[labels > 0] - 1 # column index starts from 0\n\n if len(i) > 0:\n return sparse.coo_matrix((np.ones_like(i), (i, j)),\n shape=(x.shape[0], label_max))\n else:\n # if there is no non-zero value, return no matrix\n return None", "docstring": "Encode one categorical column into sparse matrix with one-hot-encoding.\n\nArgs:\n x (pandas.Series): a categorical column to encode\n i (int): column index\n\nReturns:\n X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical\n variable into dummy variables", "source": "juraj_google_style"} -{"code": "def get_help(sakefile):\n\n full_string = \"You can 'sake' one of the following...\\n\\n\"\n errmes = \"target '{}' is not allowed to not have help message\\n\"\n outerlines = []\n for target in sakefile:\n if target == \"all\":\n # this doesn't have a help message\n continue\n middle_lines = []\n if \"formula\" not in sakefile[target]:\n # this means it's a meta-target\n innerstr = \"{}:\\n - {}\\n\\n\".format(escp(target),\n sakefile[target][\"help\"])\n inner = []\n for atom_target in sakefile[target]:\n if atom_target == \"help\":\n continue\n inner.append(\" {}:\\n - {}\\n\\n\".format(escp(atom_target),\n sakefile[target][atom_target][\"help\"]))\n if inner:\n innerstr += '\\n'.join(sorted(inner))\n middle_lines.append(innerstr)\n else:\n middle_lines.append(\"{}:\\n - {}\\n\\n\".format(escp(target),\n sakefile[target][\"help\"]))\n if middle_lines:\n outerlines.append('\\n'.join(sorted(middle_lines)))\n\n if outerlines:\n full_string += '\\n'.join(sorted(outerlines))\n what_clean_does = \"remove all targets' outputs and start from scratch\"\n full_string += \"\\nclean:\\n - {}\\n\\n\".format(what_clean_does)\n what_visual_does = \"output visual representation of project's dependencies\"\n full_string += \"visual:\\n - {}\\n\".format(what_visual_does)\n full_string = re.sub(\"\\n{3,}\", \"\\n\\n\", full_string)\n return full_string", "docstring": "Returns the prettily formatted help strings (for printing)\n\nArgs:\n A dictionary that is the parsed Sakefile (from sake.py)\n\nNote:\n the list sorting in this function is required for this\n function to be deterministic", "source": "juraj_google_style"} -{"code": "def codemirror_field_css_bundle(field):\n\n manifesto = CodemirrorAssetTagRender()\n manifesto.register_from_fields(field)\n\n try:\n bundle_name = manifesto.css_bundle_names()[0]\n except IndexError:\n msg = (\"Given field with configuration name '{}' does not have a \"\n \"Javascript bundle name\")\n raise CodeMirrorFieldBundleError(msg.format(field.config_name))\n\n return bundle_name", "docstring": "Filter to get CodeMirror CSS bundle name needed for a single field.\n\nExample:\n ::\n\n {% load djangocodemirror_tags %}\n {{ form.myfield|codemirror_field_css_bundle }}\n\nArgs:\n field (djangocodemirror.fields.CodeMirrorField): A form field.\n\nRaises:\n CodeMirrorFieldBundleError: Raised if Codemirror configuration from\n field does not have a bundle name.\n\nReturns:\n string: Bundle name to load with webassets.", "source": "juraj_google_style"} -{"code": "def get_answers_for_student(student_item):\n\n submissions = sub_api.get_submissions(student_item)\n if not submissions:\n return Answers()\n\n latest_submission = submissions[0]\n latest_answer_item = latest_submission.get('answer', {})\n return Answers(latest_answer_item.get(ANSWER_LIST_KEY, []))", "docstring": "Retrieve answers from backend for a student and question\n\nArgs:\n student_item (dict): The location of the problem this submission is\n associated with, as defined by a course, student, and item.\n\nReturns:\n Answers: answers for the student", "source": "juraj_google_style"} -{"code": "def __init__(\n self,\n tcex,\n owner,\n action=None,\n attribute_write_type=None,\n halt_on_error=True,\n playbook_triggers_enabled=None,\n ):\n\n self.tcex = tcex\n self._action = action or 'Create'\n self._attribute_write_type = attribute_write_type or 'Replace'\n self._batch_max_chunk = 5000\n self._halt_on_error = halt_on_error\n self._hash_collision_mode = None\n self._file_merge_mode = None\n self._owner = owner\n self._playbook_triggers_enabled = playbook_triggers_enabled\n\n # shelf settings\n self._group_shelf_fqfn = None\n self._indicator_shelf_fqfn = None\n\n # global overrides on batch/file errors\n self._halt_on_batch_error = None\n self._halt_on_file_error = None\n self._halt_on_poll_error = None\n\n # debug/saved flags\n self._saved_xids = None\n self._saved_groups = None # indicates groups shelf file was provided\n self._saved_indicators = None # indicates indicators shelf file was provided\n self.enable_saved_file = False\n\n # default properties\n self._batch_data_count = None\n self._poll_interval = None\n self._poll_interval_times = []\n self._poll_timeout = 3600\n\n # containers\n self._files = {}\n self._groups = None\n self._groups_shelf = None\n self._indicators = None\n self._indicators_shelf = None\n\n # build custom indicator classes\n self._gen_indicator_class()", "docstring": "Initialize Class Properties.\n\nArgs:\n tcex (obj): An instance of TcEx object.\n owner (str): The ThreatConnect owner for Batch action.\n action (str, default:Create): Action for the batch job ['Create', 'Delete'].\n attribute_write_type (str, default:Replace): Write type for Indicator attributes\n ['Append', 'Replace'].\n halt_on_error (bool, default:True): If True any batch error will halt the batch job.", "source": "juraj_google_style"} -{"code": "def ExamineEvent(self, mediator, event):\n\n # Only interested in filesystem events.\n if event.data_type != 'fs:stat':\n return\n\n filename = getattr(event, 'filename', None)\n if not filename:\n return\n\n # Determine if we have a Chrome extension ID.\n if 'chrome' not in filename.lower():\n return\n\n if not self._sep:\n self._sep = self._GetPathSegmentSeparator(filename)\n\n if '{0:s}Extensions{0:s}'.format(self._sep) not in filename:\n return\n\n # Now we have extension IDs, let's check if we've got the\n # folder, nothing else.\n paths = filename.split(self._sep)\n if paths[-2] != 'Extensions':\n return\n\n extension_identifier = paths[-1]\n if extension_identifier == 'Temp':\n return\n\n # Get the user and ID.\n user = mediator.GetUsernameForPath(filename)\n\n # We still want this information in here, so that we can\n # manually deduce the username.\n if not user:\n if len(filename) > 25:\n user = 'Not found ({0:s}...)'.format(filename[0:25])\n else:\n user = 'Not found ({0:s})'.format(filename)\n\n extension_string = self._GetTitleFromChromeWebStore(extension_identifier)\n if not extension_string:\n extension_string = extension_identifier\n\n self._results.setdefault(user, [])\n if (extension_string, extension_identifier) not in self._results[user]:\n self._results[user].append((extension_string, extension_identifier))", "docstring": "Analyzes an event.\n\nArgs:\n mediator (AnalysisMediator): mediates interactions between analysis\n plugins and other components, such as storage and dfvfs.\n event (EventObject): event to examine.", "source": "juraj_google_style"} -{"code": "def check_filepath(self, path, filename):\n\n settings_path = os.path.join(path, filename)\n\n if not os.path.exists(settings_path) or \\\n not os.path.isfile(settings_path):\n msg = \"Unable to find settings file: {}\"\n raise SettingsBackendError(msg.format(settings_path))\n\n return settings_path", "docstring": "Check and return the final filepath to settings\n\nArgs:\n path (str): Directory path where to search for settings file.\n filename (str): Filename to use to search for settings file.\n\nRaises:\n boussole.exceptions.SettingsBackendError: If determined filepath\n does not exists or is a directory.\n\nReturns:\n string: Settings file path, joining given path and filename.", "source": "juraj_google_style"} -{"code": "def _GetPathSegmentIndexForValueWeights(self, value_weights):\n\n largest_weight = value_weights.GetLargestWeight()\n\n if largest_weight > 0:\n value_weight_indexes = value_weights.GetIndexesForWeight(largest_weight)\n else:\n value_weight_indexes = []\n\n if value_weight_indexes:\n path_segment_index = value_weight_indexes[0]\n else:\n path_segment_index = value_weights.GetFirstAvailableIndex()\n\n if path_segment_index is None:\n raise RuntimeError('No path segment index found.')\n\n return path_segment_index", "docstring": "Retrieves the index of the path segment based on value weights.\n\nArgs:\n value_weights: the value weights object (instance of _PathSegmentWeights).\n\nReturns:\n An integer containing the path segment index.\n\nRaises:\n RuntimeError: is no path segment index can be found.", "source": "juraj_google_style"} -{"code": "def get_stored_variation(self, experiment, user_profile):\n\n\n user_id = user_profile.user_id\n variation_id = user_profile.get_variation_for_experiment(experiment.id)\n\n if variation_id:\n variation = self.config.get_variation_from_id(experiment.key, variation_id)\n if variation:\n self.logger.info('Found a stored decision. User \"%s\" is in variation \"%s\" of experiment \"%s\".' % (\n user_id,\n variation.key,\n experiment.key\n ))\n return variation\n\n return None", "docstring": "Determine if the user has a stored variation available for the given experiment and return that.\n\nArgs:\n experiment: Object representing the experiment for which user is to be bucketed.\n user_profile: UserProfile object representing the user's profile.\n\nReturns:\n Variation if available. None otherwise.", "source": "juraj_google_style"} -{"code": "def save(self, savepath, **kwargs):\n\n\n self.update_bbox()\n tempfile = open(savepath,\"w\")\n json.dump(self._data, tempfile, **kwargs)\n tempfile.close()", "docstring": "Saves the geojson instance to file. To save with a different text encoding use the 'encoding' argument.\n\n Parameters:\n\n - **savepath**: Filepath to save the file.", "source": "juraj_google_style"} -{"code": "def get_modname_from_modpath(module_fpath):\n\n modsubdir_list = get_module_subdir_list(module_fpath)\n modname = '.'.join(modsubdir_list)\n modname = modname.replace('.__init__', '').strip()\n modname = modname.replace('.__main__', '').strip()\n return modname", "docstring": "returns importable name from file path\n\n get_modname_from_modpath\n\nArgs:\n module_fpath (str): module filepath\n\nReturns:\n str: modname\n\nExample:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_path import * # NOQA\n >>> import utool as ut\n >>> module_fpath = ut.util_path.__file__\n >>> modname = ut.get_modname_from_modpath(module_fpath)\n >>> result = modname\n >>> print(result)\n utool.util_path", "source": "juraj_google_style"} -{"code": "def load_obs(self, mask_threshold=0.5):\n\n print(\"Loading obs \", self.run_date, self.model_name, self.forecast_variable)\n start_date = self.run_date + timedelta(hours=self.start_hour)\n end_date = self.run_date + timedelta(hours=self.end_hour)\n mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path)\n mrms_grid.load_data()\n if len(mrms_grid.data) > 0:\n self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data)\n self.period_obs[self.mrms_variable] = self.raw_obs[self.mrms_variable].max(axis=0)\n if self.obs_mask:\n mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path)\n mask_grid.load_data()\n self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0)\n self.period_obs[self.mask_variable] = self.raw_obs[self.mask_variable].max(axis=0)", "docstring": "Loads observations and masking grid (if needed).\n\nArgs:\n mask_threshold: Values greater than the threshold are kept, others are masked.", "source": "juraj_google_style"} -{"code": "def defer(target, args=None, kwargs=None, callback=None):\n\n\n obj = _defer(target, args, kwargs, callback)\n obj.finished.connect(lambda: _defer_cleanup(obj))\n obj.start()\n _defer_threads.append(obj)\n return obj", "docstring": "Perform operation in thread with callback\n\n Instances are cached until finished, at which point\n they are garbage collected. If we didn't do this,\n Python would step in and garbage collect the thread\n before having had time to finish, resulting in an\n exception.\n\nArgs:\n target (callable): Method or function to call\n callback (callable, optional): Method or function to call\n once `target` has finished.\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def expect_true(condition, msg, extras=None):\n\n try:\n asserts.assert_true(condition, msg, extras)\n except signals.TestSignal as e:\n logging.exception('Expected a `True` value, got `False`.')\n recorder.add_error(e)", "docstring": "Expects an expression evaluates to True.\n\n If the expectation is not met, the test is marked as fail after its\n execution finishes.\n\nArgs:\n expr: The expression that is evaluated.\n msg: A string explaining the details in case of failure.\n extras: An optional field for extra information to be included in test\n result.", "source": "juraj_google_style"} -{"code": "def get(self, prop):\n\n prop_parts = prop.split(\".\")\n val = None\n for part in prop_parts:\n if val is None:\n val = self.obj.get(part)\n else:\n val = val.get(part)\n return val", "docstring": "get the value off the passed in dot notation\n\nArgs:\n prop: a string of the property to retreive\n \"a.b.c\" ~ dictionary['a']['b']['c']", "source": "juraj_google_style"} -{"code": "def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):\n\n # Get the update_id for the playlist\n response, _ = self.music_library._music_lib_search(\n sonos_playlist.item_id, 0, 1)\n update_id = response['UpdateID']\n\n # Form the metadata for queueable_item\n metadata = to_didl_string(queueable_item)\n\n # Make the request\n self.avTransport.AddURIToSavedQueue([\n ('InstanceID', 0),\n ('UpdateID', update_id),\n ('ObjectID', sonos_playlist.item_id),\n ('EnqueuedURI', queueable_item.resources[0].uri),\n ('EnqueuedURIMetaData', metadata),\n # 2 ** 32 - 1 = 4294967295, this field has always this value. Most\n # likely, playlist positions are represented as a 32 bit uint and\n # this is therefore the largest index possible. Asking to add at\n # this index therefore probably amounts to adding it \"at the end\"\n ('AddAtIndex', 4294967295)\n ])", "docstring": "Adds a queueable item to a Sonos' playlist.\n\nArgs:\n queueable_item (DidlObject): the item to add to the Sonos' playlist\n sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to\n which the item should be added", "source": "juraj_google_style"} -{"code": "def __init__(self, role, train_instance_count, train_instance_type, data_location=None, **kwargs):\n\n super(AmazonAlgorithmEstimatorBase, self).__init__(role, train_instance_count, train_instance_type,\n **kwargs)\n\n data_location = data_location or \"s3://{}/sagemaker-record-sets/\".format(\n self.sagemaker_session.default_bucket())\n self.data_location = data_location", "docstring": "Initialize an AmazonAlgorithmEstimatorBase.\n\nArgs:\n data_location (str or None): The s3 prefix to upload RecordSet objects to, expressed as an\n S3 url. For example \"s3://example-bucket/some-key-prefix/\". Objects will be\n saved in a unique sub-directory of the specified location. If None, a default\n data location will be used.", "source": "juraj_google_style"} -{"code": "def ParseFileObject(self, parser_mediator, file_object):\n\n try:\n self._ParseFileHeader(file_object)\n except errors.ParseError as exception:\n raise errors.ParseError(\n 'Unable to parse index file header with error: {0!s}'.format(\n exception))\n # Skip over the LRU data, which is 112 bytes in size.\n file_object.seek(112, os.SEEK_CUR)\n self._ParseIndexTable(file_object)", "docstring": "Parses a file-like object.\n\nArgs:\n parser_mediator (ParserMediator): a parser mediator.\n file_object (dfvfs.FileIO): a file-like object to parse.\n\nRaises:\n ParseError: when the file cannot be parsed.", "source": "juraj_google_style"} -{"code": "def uniform_distribution(number_of_nodes):\n\n # The size of the state space for binary nodes is 2^(number of nodes).\n number_of_states = 2 ** number_of_nodes\n # Generate the maximum entropy distribution\n # TODO extend to nonbinary nodes\n return (np.ones(number_of_states) /\n number_of_states).reshape([2] * number_of_nodes)", "docstring": "Return the uniform distribution for a set of binary nodes, indexed by state\n (so there is one dimension per node, the size of which is the number of\n possible states for that node).\n\nArgs:\n nodes (np.ndarray): A set of indices of binary nodes.\n\nReturns:\n np.ndarray: The uniform distribution over the set of nodes.", "source": "juraj_google_style"} -{"code": "def _create_RSA_private_key(self,\n bytes):\n\n\n try:\n private_key = serialization.load_pem_private_key(\n bytes,\n password=None,\n backend=default_backend()\n )\n return private_key\n except Exception:\n private_key = serialization.load_der_private_key(\n bytes,\n password=None,\n backend=default_backend()\n )\n return private_key", "docstring": "Instantiates an RSA key from bytes.\n\nArgs:\n bytes (byte string): Bytes of RSA private key.\n\nReturns:\n private_key\n (cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):\n RSA private key created from key bytes.", "source": "juraj_google_style"} -{"code": "def fermi_fourier_trans_inverse_conjugate_4(qubits):\n\n\n yield fswap(qubits[1], qubits[2]),\n yield fermi_fourier_trans_2(qubits[0], qubits[1])\n yield fermi_fourier_trans_2(qubits[2], qubits[3])\n yield fswap(qubits[1], qubits[2])\n yield fermi_fourier_trans_2(qubits[0], qubits[1])\n yield cirq.S(qubits[2]) ** 3\n yield fermi_fourier_trans_2(qubits[2], qubits[3])\n yield fswap(qubits[1], qubits[2])", "docstring": "We will need to map the momentum states in the reversed order for\n spin-down states to the position picture. This transformation can be\n simply implemented the complex conjugate of the former one. We only\n need to change the S gate to S* = S ** 3.\n\nArgs:\n qubits: list of four qubits", "source": "juraj_google_style"} -{"code": "def get_data_path(module_id: str) -> Path:\n\n profile = coordinator.profile\n data_path = get_base_path() / 'profiles' / profile / module_id\n if not data_path.exists():\n data_path.mkdir(parents=True)\n return data_path", "docstring": "Get the path for persistent storage of a module.\n\n This method creates the queried path if not existing.\n\nArgs:\n module_id (str): Module ID\n\nReturns:\n The data path of indicated module.", "source": "juraj_google_style"} -{"code": "def get_property(self, name):\n\n for prop in self.resource.properties:\n if prop.name == name:\n return prop\n\n raise AttributeError(name)", "docstring": "Return a named property for a resource, if available. Will raise an `AttributeError` if the property\n does not exist\n\nArgs:\n name (str): Name of the property to return\n\nReturns:\n `ResourceProperty`", "source": "juraj_google_style"} -{"code": "def __call__(self, observed_obj, *arg, **kw):\n\n\n bound_method = getattr(self.inst(), self.method_name)\n if self.identify_observed:\n return bound_method(observed_obj, *arg, **kw)\n else:\n return bound_method(*arg, **kw)", "docstring": "Call the function I wrap.\n\nArgs:\n *arg: The arguments passed to me by the observed object.\n **kw: The keyword args passed to me by the observed object.\n observed_obj: The observed object which called me.\n\nReturns:\n Whatever the function I wrap returns.", "source": "juraj_google_style"} -{"code": "def to_element(self, include_namespaces=False):\n\n # We piggy back on the implementation in DidlItem\n didl_item = DidlItem(\n title=\"DUMMY\",\n # This is ignored. Sonos gets the title from the item_id\n parent_id=\"DUMMY\", # Ditto\n item_id=self.item_id,\n desc=self.desc,\n resources=self.resources\n )\n return didl_item.to_element(include_namespaces=include_namespaces)", "docstring": "Return an ElementTree Element representing this instance.\n\nArgs:\n include_namespaces (bool, optional): If True, include xml\n namespace attributes on the root element\n\nReturns:\n ~xml.etree.ElementTree.Element: The (XML) Element representation of\n this object", "source": "juraj_google_style"} -{"code": "def transform_python_types(self, obj):\n\n\n # date/time values that get serialized as milliseconds\n if is_datetime_type(obj):\n return convert_datetime_type(obj)\n\n if is_timedelta_type(obj):\n return convert_timedelta_type(obj)\n\n # slice objects\n elif isinstance(obj, slice):\n return dict(start=obj.start, stop=obj.stop, step=obj.step)\n\n # NumPy scalars\n elif np.issubdtype(type(obj), np.floating):\n return float(obj)\n elif np.issubdtype(type(obj), np.integer):\n return int(obj)\n elif np.issubdtype(type(obj), np.bool_):\n return bool(obj)\n\n # Decimal values\n elif isinstance(obj, decimal.Decimal):\n return float(obj)\n\n # RelativeDelta gets serialized as a dict\n elif rd and isinstance(obj, rd.relativedelta):\n return dict(years=obj.years,\n months=obj.months,\n days=obj.days,\n hours=obj.hours,\n minutes=obj.minutes,\n seconds=obj.seconds,\n microseconds=obj.microseconds)\n\n else:\n return super(BokehJSONEncoder, self).default(obj)", "docstring": "Handle special scalars such as (Python, NumPy, or Pandas)\n datetimes, or Decimal values.\n\nArgs:\n obj (obj) :\n\n The object to encode. Anything not specifically handled in\n this method is passed on to the default system JSON encoder.", "source": "juraj_google_style"} -{"code": "def process_event(self, event_name: str, data: dict):\n\n if (isinstance(self.opt.get(\"learning_rate\", None), float) and\n isinstance(self.opt.get(\"learning_rate_decay\", None), float)):\n pass\n else:\n if event_name == 'after_train_log':\n if (self.get_learning_rate_variable() is not None) and ('learning_rate' not in data):\n data['learning_rate'] = float(K.get_value(self.get_learning_rate_variable()))\n # data['learning_rate'] = self._lr\n if (self.get_momentum_variable() is not None) and ('momentum' not in data):\n data['momentum'] = float(K.get_value(self.get_momentum_variable()))\n # data['momentum'] = self._mom\n else:\n super().process_event(event_name, data)", "docstring": "Process event after epoch\n\nArgs:\n event_name: whether event is send after epoch or batch.\n Set of values: ``\"after_epoch\", \"after_batch\"``\n data: event data (dictionary)\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def post(self, request, customer_uuid):\n\n enterprise_customer = EnterpriseCustomer.objects.get(uuid=customer_uuid) # pylint: disable=no-member\n manage_learners_form = ManageLearnersForm(\n request.POST,\n request.FILES,\n user=request.user,\n enterprise_customer=enterprise_customer\n )\n\n # initial form validation - check that form data is well-formed\n if manage_learners_form.is_valid():\n email_field_as_bulk_input = split_usernames_and_emails(\n manage_learners_form.cleaned_data[ManageLearnersForm.Fields.EMAIL_OR_USERNAME]\n )\n is_bulk_entry = len(email_field_as_bulk_input) > 1\n # The form is valid. Call the appropriate helper depending on the mode:\n mode = manage_learners_form.cleaned_data[ManageLearnersForm.Fields.MODE]\n if mode == ManageLearnersForm.Modes.MODE_SINGULAR and not is_bulk_entry:\n linked_learners = self._handle_singular(enterprise_customer, manage_learners_form)\n elif mode == ManageLearnersForm.Modes.MODE_SINGULAR:\n linked_learners = self._handle_bulk_upload(\n enterprise_customer,\n manage_learners_form,\n request,\n email_list=email_field_as_bulk_input\n )\n else:\n linked_learners = self._handle_bulk_upload(enterprise_customer, manage_learners_form, request)\n\n # _handle_form might add form errors, so we check if it is still valid\n if manage_learners_form.is_valid():\n course_details = manage_learners_form.cleaned_data.get(ManageLearnersForm.Fields.COURSE)\n program_details = manage_learners_form.cleaned_data.get(ManageLearnersForm.Fields.PROGRAM)\n\n notification_type = manage_learners_form.cleaned_data.get(ManageLearnersForm.Fields.NOTIFY)\n notify = notification_type == ManageLearnersForm.NotificationTypes.BY_EMAIL\n\n course_id = None\n if course_details:\n course_id = course_details['course_id']\n\n if course_id or program_details:\n course_mode = manage_learners_form.cleaned_data[ManageLearnersForm.Fields.COURSE_MODE]\n self._enroll_users(\n request=request,\n enterprise_customer=enterprise_customer,\n emails=linked_learners,\n mode=course_mode,\n course_id=course_id,\n program_details=program_details,\n notify=notify,\n )\n\n # Redirect to GET if everything went smooth.\n manage_learners_url = reverse(\"admin:\" + UrlNames.MANAGE_LEARNERS, args=(customer_uuid,))\n search_keyword = self.get_search_keyword(request)\n if search_keyword:\n manage_learners_url = manage_learners_url + \"?q=\" + search_keyword\n return HttpResponseRedirect(manage_learners_url)\n\n # if something went wrong - display bound form on the page\n context = self._build_context(request, customer_uuid)\n context.update({self.ContextParameters.MANAGE_LEARNERS_FORM: manage_learners_form})\n return render(request, self.template, context)", "docstring": "Handle POST request - handle form submissions.\n\nArgs:\n request (django.http.request.HttpRequest): Request instance\n customer_uuid (str): Enterprise Customer UUID\n\nReturns:\n django.http.response.HttpResponse: HttpResponse", "source": "juraj_google_style"} -{"code": "def __ne__(self, other):\n\n if not isinstance(other, SemanticTime):\n return True\n\n return self._SORT_ORDER != other._SORT_ORDER", "docstring": "Determines if the date time values are not equal to other.\n\nArgs:\n other (DateTimeValues): date time values to compare against.\n\nReturns:\n bool: True if the date time values are not equal to other.", "source": "juraj_google_style"} -{"code": "def QA_SU_save_etf_day(engine, client=DATABASE):\n\n\n engine = select_save_engine(engine)\n engine.QA_SU_save_etf_day(client=client)", "docstring": "save etf_day\n\nArgs:\n engine {[type]} -- [description]\n\n Keyword Arguments:\n client {[type]} -- [description] (default: {DATABASE})", "source": "juraj_google_style"} -{"code": "def run(self, dag):\n\n cx_runs = dag.collect_runs([\"cx\"])\n for cx_run in cx_runs:\n # Partition the cx_run into chunks with equal gate arguments\n partition = []\n chunk = []\n for i in range(len(cx_run) - 1):\n chunk.append(cx_run[i])\n\n qargs0 = cx_run[i].qargs\n qargs1 = cx_run[i + 1].qargs\n\n if qargs0 != qargs1:\n partition.append(chunk)\n chunk = []\n chunk.append(cx_run[-1])\n partition.append(chunk)\n # Simplify each chunk in the partition\n for chunk in partition:\n if len(chunk) % 2 == 0:\n for n in chunk:\n dag.remove_op_node(n)\n else:\n for n in chunk[1:]:\n dag.remove_op_node(n)\n return dag", "docstring": "Run one pass of cx cancellation on the circuit\n\nArgs:\n dag (DAGCircuit): the directed acyclic graph to run on.\n\nReturns:\n DAGCircuit: Transformed DAG.", "source": "juraj_google_style"} -{"code": "def get_ast_dict(belstr, component_type: str = \"\"):\n\n\n errors = []\n parsed = {}\n bels = list(belstr)\n char_locs, errors = parse_chars(bels, errors)\n parsed, errors = parse_functions(belstr, char_locs, parsed, errors)\n parsed, errors = parse_args(bels, char_locs, parsed, errors)\n parsed, errors = arg_types(parsed, errors)\n parsed, errors = parse_relations(belstr, char_locs, parsed, errors)\n parsed, errors = parse_nested(bels, char_locs, parsed, errors)\n errors = parsed_top_level_errors(parsed, errors)\n\n ast, errors = parsed_to_ast(parsed, errors, component_type=component_type)\n\n return ast, errors", "docstring": "Convert BEL string to AST dictionary\n\nArgs:\n belstr: BEL string\n component_type: Empty string or 'subject' or 'object' to indicate that we\n are parsing the subject or object field input", "source": "juraj_google_style"} -{"code": "def to_sql(cls, qc, **kwargs):\n\n # we first insert an empty DF in order to create the full table in the database\n # This also helps to validate the input against pandas\n # we would like to_sql() to complete only when all rows have been inserted into the database\n # since the mapping operation is non-blocking, each partition will return an empty DF\n # so at the end, the blocking operation will be this empty DF to_pandas\n\n empty_df = qc.head(1).to_pandas().head(0)\n empty_df.to_sql(**kwargs)\n # so each partition will append its respective DF\n kwargs[\"if_exists\"] = \"append\"\n columns = qc.columns\n\n def func(df, **kwargs):\n df.columns = columns\n df.to_sql(**kwargs)\n return pandas.DataFrame()\n\n map_func = qc._prepare_method(func, **kwargs)\n result = qc._map_across_full_axis(1, map_func)\n # blocking operation\n result.to_pandas()", "docstring": "Write records stored in a DataFrame to a SQL database.\n\nArgs:\n qc: the query compiler of the DF that we want to run to_sql on\n kwargs: parameters for pandas.to_sql(**kwargs)", "source": "juraj_google_style"} -{"code": "def ComponentsToPath(components):\n\n precondition.AssertIterableType(components, Text)\n for component in components:\n if not component:\n raise ValueError(\"Empty path component in: {}\".format(components))\n\n if \"/\" in component:\n raise ValueError(\"Path component with '/' in: {}\".format(components))\n\n if components:\n return \"/\" + \"/\".join(components)\n else:\n return \"\"", "docstring": "Converts a list of path components to a canonical path representation.\n\nArgs:\n components: A sequence of path components.\n\nReturns:\n A canonical MySQL path representation.", "source": "juraj_google_style"} -{"code": "def search(self, filters):\n\n records = self.__model__.search(self.__five9__, filters)\n return self.__class__(\n self.__five9__, self.__model__, records,\n )", "docstring": "Search Five9 given a filter.\n\nArgs:\n filters (dict): A dictionary of search strings, keyed by the name\n of the field to search.\n\nReturns:\n Environment: An environment representing the recordset.", "source": "juraj_google_style"} -{"code": "def __init__(self, config):\n\n super(ModelExporter, self).__init__()\n self.config = config", "docstring": "Initialise the export process.\n\nArgs:\n config (PredictConfig): the config to use.\n The graph will be built with the tower function defined by this `PredictConfig`.\n Then the input / output names will be used to export models for inference.", "source": "juraj_google_style"} -{"code": "def trace(self, predicate):\n\n self._handler = predicate\n if self.threading_support is None or self.threading_support:\n self._threading_previous = getattr(threading, '_trace_hook', None)\n threading.settrace(self)\n self._previous = sys.gettrace()\n sys.settrace(self)\n return self", "docstring": "Starts tracing with the given callable.\n\nArgs:\n predicate (callable that accepts a single :obj:`hunter.Event` argument):\n\nReturns:\n self", "source": "juraj_google_style"} -{"code": "def _GetUnsortedNotifications(self,\n queue_shard,\n notifications_by_session_id=None):\n\n if notifications_by_session_id is None:\n notifications_by_session_id = {}\n end_time = self.frozen_timestamp or rdfvalue.RDFDatetime.Now()\n for notification in self.data_store.GetNotifications(queue_shard, end_time):\n\n existing = notifications_by_session_id.get(notification.session_id)\n if existing:\n # If we have a notification for this session_id already, we only store\n # the one that was scheduled last.\n if notification.first_queued > existing.first_queued:\n notifications_by_session_id[notification.session_id] = notification\n elif notification.first_queued == existing.first_queued and (\n notification.last_status > existing.last_status):\n # Multiple notifications with the same timestamp should not happen.\n # We can still do the correct thing and use the latest one.\n logging.warning(\n \"Notifications with equal first_queued fields detected: %s %s\",\n notification, existing)\n notifications_by_session_id[notification.session_id] = notification\n else:\n notifications_by_session_id[notification.session_id] = notification\n\n return notifications_by_session_id", "docstring": "Returns all the available notifications for a queue_shard.\n\nArgs:\n queue_shard: urn of queue shard\n notifications_by_session_id: store notifications in this dict rather than\n creating a new one\n\nReturns:\n dict of notifications. keys are session ids.", "source": "juraj_google_style"} -{"code": "def update_port_monitor(self, resource, timeout=-1):\n\n data = resource.copy()\n if 'type' not in data:\n data['type'] = 'port-monitor'\n\n uri = \"{}{}\".format(self.data[\"uri\"], self.PORT_MONITOR_PATH)\n return self._helper.update(data, uri=uri, timeout=timeout)", "docstring": "Updates the port monitor configuration of a logical interconnect.\n\nArgs:\n resource: Port monitor configuration.\n\nReturns:\n dict: Port monitor configuration.", "source": "juraj_google_style"} -{"code": "def dump_json(json_info, json_file, overwrite=True):\n\n if overwrite:\n mode = \"w\"\n else:\n mode = \"w+\"\n\n try:\n with open(json_file, mode) as f:\n f.write(json.dumps(json_info))\n except BaseException as e:\n logging.error(e.message)", "docstring": "Dump a whole json record into the given file.\n\n Overwrite the file if the overwrite flag set.\n\nArgs:\n json_info (dict): Information dict to be dumped.\n json_file (str): File path to be dumped to.\n overwrite(boolean)", "source": "juraj_google_style"} -{"code": "def _add_validator(fv, validator_instance):\n\n for flag_name in validator_instance.get_flags_names():\n fv[flag_name].validators.append(validator_instance)", "docstring": "Register new flags validator to be checked.\n\nArgs:\n fv: flags.FlagValues, the FlagValues instance to add the validator.\n validator_instance: validators.Validator, the validator to add.\n\nRaises:\n KeyError: Raised when validators work with a non-existing flag.", "source": "juraj_google_style"} -{"code": "def get_permissions(self, grp_name, resource):\n\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_permissions(grp_name, resource)", "docstring": "Get permissions associated the group has with the given resource.\n\nArgs:\n grp_name (string): Name of group.\n resource (intern.resource.boss.Resource): Identifies which data\n model object to operate on.\n\nReturns:\n (list): List of permissions.\n\nRaises:\n requests.HTTPError on failure.", "source": "juraj_google_style"} -{"code": "def get_file_download(self, resources):\n\n api_name = 'virustotal-file-download'\n api_endpoint = 'file/download'\n return self._extract_all_responses(resources, api_endpoint, api_name)", "docstring": "Retrieves a file from its a md5, sha1, and/or sha2 hash.\n\nArgs:\n resources: list of string hashes.\n\nReturns:\n a file download", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.ListEntityTypes = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.EntityTypes/ListEntityTypes',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.ListEntityTypesRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.ListEntityTypesResponse.FromString,\n )\n self.GetEntityType = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.EntityTypes/GetEntityType',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.GetEntityTypeRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString,\n )\n self.CreateEntityType = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.EntityTypes/CreateEntityType',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.CreateEntityTypeRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString,\n )\n self.UpdateEntityType = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.EntityTypes/UpdateEntityType',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.UpdateEntityTypeRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString,\n )\n self.DeleteEntityType = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.EntityTypes/DeleteEntityType',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.DeleteEntityTypeRequest.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.BatchUpdateEntityTypes = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchUpdateEntityTypes',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchUpdateEntityTypesRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.BatchDeleteEntityTypes = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchDeleteEntityTypes',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchDeleteEntityTypesRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.BatchCreateEntities = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchCreateEntities',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchCreateEntitiesRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.BatchUpdateEntities = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchUpdateEntities',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchUpdateEntitiesRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.BatchDeleteEntities = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchDeleteEntities',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchDeleteEntitiesRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def allconcat(self, x, mesh_axis, concat_axis):\n\n return self._collective_with_groups(\n x, [mesh_axis],\n functools.partial(allconcat_ring, concat_axis=concat_axis))", "docstring": "Grouped allconcat (like MPI allgather followed by concat).\n\nArgs:\n x: a LaidOutTensor\n mesh_axis: an integer - the mesh axis along which to group\n concat_axis: an integer (the Tensor axis along which to concatenate)\n\nReturns:\n a LaidOutTensor", "source": "juraj_google_style"} -{"code": "def clear_events(self, event_name):\n\n self.lock.acquire()\n try:\n q = self.get_event_q(event_name)\n q.queue.clear()\n except queue.Empty:\n return\n finally:\n self.lock.release()", "docstring": "Clear all events of a particular name.\n\nArgs:\n event_name: Name of the events to be popped.", "source": "juraj_google_style"} -{"code": "def idxmax(self, axis=0, skipna=True, *args, **kwargs):\n\n if not all(d != np.dtype(\"O\") for d in self._get_dtypes()):\n raise TypeError(\"reduction operation 'argmax' not allowed for this dtype\")\n axis = self._get_axis_number(axis)\n return self._reduce_dimension(\n self._query_compiler.idxmax(axis=axis, skipna=skipna)\n )", "docstring": "Get the index of the first occurrence of the max value of the axis.\n\nArgs:\n axis (int): Identify the max over the rows (1) or columns (0).\n skipna (bool): Whether or not to skip NA values.\n\nReturns:\n A Series with the index for each maximum value for the axis\n specified.", "source": "juraj_google_style"} -{"code": "def date_range(start, end, boo):\n\n earliest = datetime.strptime(start.replace('-', ' '), '%Y %m %d')\n latest = datetime.strptime(end.replace('-', ' '), '%Y %m %d')\n num_days = (latest - earliest).days + 1\n all_days = [latest - timedelta(days=x) for x in range(num_days)]\n all_days.reverse()\n\n output = []\n\n if boo:\n # Return as Integer, yyyymmdd\n for d in all_days:\n output.append(int(str(d).replace('-', '')[:8]))\n else:\n # Return as String, yyyy-mm-dd\n for d in all_days:\n output.append(str(d)[:10])\n return output", "docstring": "Return list of dates within a specified range, inclusive.\n\nArgs:\n start: earliest date to include, String (\"2015-11-25\")\n end: latest date to include, String (\"2015-12-01\")\n boo: if true, output list contains Numbers (20151230); if false, list contains Strings (\"2015-12-30\")\n\nReturns:\n list of either Numbers or Strings", "source": "juraj_google_style"} -{"code": "def register_service(self, short_name, long_name, allow_duplicate=True):\n\n\n self._loop.run_coroutine(self._client.register_service(short_name, long_name, allow_duplicate))", "docstring": "Register a new service with the service manager.\n\nArgs:\n short_name (string): A unique short name for this service that functions\n as an id\n long_name (string): A user facing name for this service\n allow_duplicate (boolean): Don't throw an error if this service is already\n registered. This is important if the service is preregistered for example.\n\nRaises:\n ArgumentError: if the short_name is already taken", "source": "juraj_google_style"} -{"code": "def get_latest_package(name, range_=None, paths=None, error=False):\n\n it = iter_packages(name, range_=range_, paths=paths)\n try:\n return max(it, key=lambda x: x.version)\n except ValueError: # empty sequence\n if error:\n # FIXME this isn't correct, since the pkg fam may exist but a pkg\n # in the range does not.\n raise PackageFamilyNotFoundError(\"No such package family %r\" % name)\n return None", "docstring": "Get the latest package for a given package name.\n\nArgs:\n name (str): Package name.\n range_ (`VersionRange`): Version range to search within.\n paths (list of str, optional): paths to search for package families,\n defaults to `config.packages_path`.\n error (bool): If True, raise an error if no package is found.\n\nReturns:\n `Package` object, or None if no package is found.", "source": "juraj_google_style"} -{"code": "def _commit_change(alias_table, export_path=None, post_commit=True):\n\n with open(export_path or GLOBAL_ALIAS_PATH, 'w+') as alias_config_file:\n alias_table.write(alias_config_file)\n if post_commit:\n alias_config_file.seek(0)\n alias_config_hash = hashlib.sha1(alias_config_file.read().encode('utf-8')).hexdigest()\n AliasManager.write_alias_config_hash(alias_config_hash)\n collided_alias = AliasManager.build_collision_table(alias_table.sections())\n AliasManager.write_collided_alias(collided_alias)\n build_tab_completion_table(alias_table)", "docstring": "Record changes to the alias table.\n Also write new alias config hash and collided alias, if any.\n\nArgs:\n alias_table: The alias table to commit.\n export_path: The path to export the aliases to. Default: GLOBAL_ALIAS_PATH.\n post_commit: True if we want to perform some extra actions after writing alias to file.", "source": "juraj_google_style"} -{"code": "def make_code_readable(s):\n\n\n s = s if isinstance(s, str) else str(s)\n\n MAP = {\",\": \",\\n\", \"{\": \"{\\n \", \"}\": \"\\n}\"}\n\n ll = []\n\n state = \"open\"\n flag_single = False\n flag_double = False\n flag_backslash = False\n for ch in s:\n if flag_backslash:\n flag_backslash = False\n continue\n\n if ch == \"\\\\\":\n flag_backslash = True\n continue\n\n if flag_single:\n if ch == \"'\":\n flag_single = False\n elif not flag_double and ch == \"'\":\n flag_single = True\n\n if flag_double:\n if ch == '\"':\n flag_double = False\n elif not flag_single and ch == '\"':\n flag_double = True\n\n if flag_single or flag_double:\n ll.append(ch)\n else:\n ll.append(MAP.get(ch, ch))\n\n return \"\".join(ll)", "docstring": "Add newlines at strategic places in code string for printing.\n\nArgs:\n s: str, piece of code. If not str, will attempt to convert to str.\n\nReturns:\n str", "source": "juraj_google_style"} -{"code": "def role_instance(self, value):\n\n if value == self._defaults['ai.cloud.roleInstance'] and 'ai.cloud.roleInstance' in self._values:\n del self._values['ai.cloud.roleInstance']\n else:\n self._values['ai.cloud.roleInstance'] = value", "docstring": "The role_instance property.\n\nArgs:\n value (string). the property value.", "source": "juraj_google_style"} -{"code": "def parse_GSM(filepath, entry_name=None):\n\n if isinstance(filepath, str):\n with utils.smart_open(filepath) as f:\n soft = []\n has_table = False\n for line in f:\n if \"_table_begin\" in line or (not line.startswith((\"^\", \"!\", \"#\"))):\n has_table = True\n soft.append(line.rstrip())\n else:\n soft = []\n has_table = False\n for line in filepath:\n if \"_table_begin\" in line or (not line.startswith((\"^\", \"!\", \"#\"))):\n has_table = True\n soft.append(line.rstrip())\n\n if entry_name is None:\n sets = [i for i in soft if i.startswith(\"^\")]\n if len(sets) > 1:\n raise Exception(\"More than one entry in GPL\")\n if len(sets) == 0:\n raise NoEntriesException(\n \"No entries found. Check the if accession is correct!\")\n entry_name = parse_entry_name(sets[0])\n\n columns = parse_columns(soft)\n metadata = parse_metadata(soft)\n if has_table:\n table_data = parse_table_data(soft)\n else:\n table_data = DataFrame()\n\n gsm = GSM(name=entry_name,\n table=table_data,\n metadata=metadata,\n columns=columns)\n\n return gsm", "docstring": "Parse GSM entry from SOFT file.\n\nArgs:\n filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry\n or list of lines representing GSM from GSE file.\n entry_name (:obj:`str`, optional): Name of the entry. By default it is\n inferred from the data.\n\nReturns:\n :obj:`GEOparse.GSM`: A GSM object.", "source": "juraj_google_style"} -{"code": "def _model_to_dict(obj):\n\n result = _properties_model_to_dict(obj.properties)\n for attribute in ('metadata', 'snapshot'):\n try:\n value = getattr(obj, attribute)\n except AttributeError:\n continue\n if value:\n result[attribute] = value\n return result", "docstring": "Convert object model to dict.\n\nArgs:\n obj: Object model.\n\nReturns:\n dict: Converted model.", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.ListIntents = channel.unary_unary(\n '/google.cloud.dialogflow.v2.Intents/ListIntents',\n request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.ListIntentsRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.ListIntentsResponse.FromString,\n )\n self.GetIntent = channel.unary_unary(\n '/google.cloud.dialogflow.v2.Intents/GetIntent',\n request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.GetIntentRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString,\n )\n self.CreateIntent = channel.unary_unary(\n '/google.cloud.dialogflow.v2.Intents/CreateIntent',\n request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.CreateIntentRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString,\n )\n self.UpdateIntent = channel.unary_unary(\n '/google.cloud.dialogflow.v2.Intents/UpdateIntent',\n request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.UpdateIntentRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString,\n )\n self.DeleteIntent = channel.unary_unary(\n '/google.cloud.dialogflow.v2.Intents/DeleteIntent',\n request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.DeleteIntentRequest.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.BatchUpdateIntents = channel.unary_unary(\n '/google.cloud.dialogflow.v2.Intents/BatchUpdateIntents',\n request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.BatchUpdateIntentsRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.BatchDeleteIntents = channel.unary_unary(\n '/google.cloud.dialogflow.v2.Intents/BatchDeleteIntents',\n request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.BatchDeleteIntentsRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def _log_band_gap_information(bs):\n\n bg_data = bs.get_band_gap()\n if not bg_data['direct']:\n logging.info('Indirect band gap: {:.3f} eV'.format(bg_data['energy']))\n\n direct_data = bs.get_direct_band_gap_dict()\n if bs.is_spin_polarized:\n direct_bg = min((spin_data['value']\n for spin_data in direct_data.values()))\n logging.info('Direct band gap: {:.3f} eV'.format(direct_bg))\n\n for spin, spin_data in direct_data.items():\n direct_kindex = spin_data['kpoint_index']\n direct_kpoint = bs.kpoints[direct_kindex].frac_coords\n direct_kpoint = kpt_str.format(k=direct_kpoint)\n eq_kpoints = bs.get_equivalent_kpoints(direct_kindex)\n k_indices = ', '.join(map(str, eq_kpoints))\n\n # add 1 to band indices to be consistent with VASP band numbers.\n b_indices = ', '.join([str(i+1) for i in spin_data['band_indices']])\n\n logging.info(' {}:'.format(spin.name.capitalize()))\n logging.info(' k-point: {}'.format(direct_kpoint))\n logging.info(' k-point indices: {}'.format(k_indices))\n logging.info(' Band indices: {}'.format(b_indices))\n\n else:\n direct_bg = direct_data[Spin.up]['value']\n logging.info('Direct band gap: {:.3f} eV'.format(direct_bg))\n\n direct_kindex = direct_data[Spin.up]['kpoint_index']\n direct_kpoint = kpt_str.format(k=bs.kpoints[direct_kindex].frac_coords)\n k_indices = ', '.join(map(str,\n bs.get_equivalent_kpoints(direct_kindex)))\n b_indices = ', '.join([str(i+1) for i in\n direct_data[Spin.up]['band_indices']])\n\n logging.info(' k-point: {}'.format(direct_kpoint))\n logging.info(' k-point indices: {}'.format(k_indices))\n logging.info(' Band indices: {}'.format(b_indices))", "docstring": "Log data about the direct and indirect band gaps.\n\nArgs:\n bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):", "source": "juraj_google_style"} -{"code": "def relative_probability_from_lookup_table( self, jump_lookup_table ):\n\n l1 = self.initial_site.label\n l2 = self.final_site.label\n c1 = self.initial_site.nn_occupation()\n c2 = self.final_site.nn_occupation()\n return jump_lookup_table.jump_probability[ l1 ][ l2 ][ c1 ][ c2 ]", "docstring": "Relative probability of accepting this jump from a lookup-table.\n\nArgs:\n jump_lookup_table (LookupTable): the lookup table to be used for this jump.\n\nReturns:\n (Float): relative probability of accepting this jump.", "source": "juraj_google_style"} -{"code": "def parse_vhdl_file(fname):\n\n with open(fname, 'rt') as fh:\n text = fh.read()\n return parse_vhdl(text)", "docstring": "Parse a named VHDL file\n\nArgs:\n fname(str): Name of file to parse\n\nReturns:\n Parsed objects.", "source": "juraj_google_style"} -{"code": "def gbest_idx(swarm):\n\n best = 0\n cmp = comparator(swarm[best].best_fitness)\n for (idx, particle) in enumerate(swarm):\n if cmp(particle.best_fitness, swarm[best].best_fitness):\n best = idx\n return best", "docstring": "gbest Neighbourhood topology function.\n\nArgs:\n swarm: list: The list of particles.\n\nReturns:\n int: The index of the gbest particle.", "source": "juraj_google_style"} -{"code": "def get_metadata(did):\n\n try:\n asset_record = dao.get(did)\n metadata = _get_metadata(asset_record['service'])\n return Response(_sanitize_record(metadata), 200, content_type='application/json')\n except Exception as e:\n logger.error(e)\n return f'{did} asset DID is not in OceanDB', 404", "docstring": "Get metadata of a particular asset\n ---\n tags:\n - metadata\n parameters:\n - name: did\n in: path\n description: DID of the asset.\n required: true\n type: string\n responses:\n 200:\n description: successful operation.\n 404:\n description: This asset DID is not in OceanDB.", "source": "juraj_google_style"} -{"code": "def try_pick_piece_of_work(self, worker_id, submission_id=None):\n\n client = self._datastore_client\n unclaimed_work_ids = None\n if submission_id:\n unclaimed_work_ids = [\n k for k, v in iteritems(self.work)\n if is_unclaimed(v) and (v['submission_id'] == submission_id)\n ]\n if not unclaimed_work_ids:\n unclaimed_work_ids = [k for k, v in iteritems(self.work)\n if is_unclaimed(v)]\n if unclaimed_work_ids:\n next_work_id = random.choice(unclaimed_work_ids)\n else:\n return None\n try:\n with client.transaction() as transaction:\n work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id,\n KIND_WORK, next_work_id)\n work_entity = client.get(work_key, transaction=transaction)\n if not is_unclaimed(work_entity):\n return None\n work_entity['claimed_worker_id'] = worker_id\n work_entity['claimed_worker_start_time'] = get_integer_time()\n transaction.put(work_entity)\n except Exception:\n return None\n return next_work_id", "docstring": "Tries pick next unclaimed piece of work to do.\n\n Attempt to claim work piece is done using Cloud Datastore transaction, so\n only one worker can claim any work piece at a time.\n\nArgs:\n worker_id: ID of current worker\n submission_id: if not None then this method will try to pick\n piece of work for this submission\n\nReturns:\n ID of the claimed work piece", "source": "juraj_google_style"} -{"code": "def argmin(input_, key=None):\n\n # if isinstance(input_, dict):\n # return list(input_.keys())[argmin(list(input_.values()))]\n # elif hasattr(input_, 'index'):\n # return input_.index(min(input_))\n # else:\n # return min(enumerate(input_), key=operator.itemgetter(1))[0]\n if isinstance(input, dict):\n return list(input.keys())[argmin(list(input.values()), key=key)]\n else:\n if key is None:\n def _key(item):\n return item[1]\n else:\n def _key(item):\n return key(item[1])\n return min(enumerate(input), key=_key)[0]", "docstring": "Returns index / key of the item with the smallest value.\n\nArgs:\n input_ (dict or list):\n\nNote:\n a[argmin(a, key=key)] == min(a, key=key)", "source": "juraj_google_style"} -{"code": "def table(text):\n\n\n def table_bar(col_lengths):\n return \"+-%s-+%s\" % (\n \"-+-\".join([\"-\" * length for length in col_lengths]),\n os.linesep,\n )\n\n rows = []\n for line in text.splitlines():\n rows.append([part.strip() for part in line.split(\"|\")])\n max_cols = max(map(len, rows))\n col_lengths = [0] * max_cols\n for row in rows:\n cols = len(row)\n if cols < max_cols:\n row.extend([\"\"] * (max_cols - cols))\n for i, col in enumerate(row):\n col_length = len(col)\n if col_length > col_lengths[i]:\n col_lengths[i] = col_length\n text = table_bar(col_lengths)\n for i, row in enumerate(rows):\n cols = []\n for i, col in enumerate(row):\n cols.append(col.ljust(col_lengths[i]))\n text += \"| %s |%s\" % (\" | \".join(cols), os.linesep)\n text += table_bar(col_lengths)\n return text", "docstring": "Format the text as a table.\n\n Text in format:\n\n first | second\n row 2 col 1 | 4\n\n Will be formatted as::\n\n +-------------+--------+\n | first | second |\n +-------------+--------+\n | row 2 col 1 | 4 |\n +-------------+--------+\n\nArgs:\n text (str): Text that needs to be formatted.\n\nReturns:\n str: Formatted string.", "source": "juraj_google_style"} -{"code": "def get_name(node):\n\n if isinstance(node, gast.Name):\n return node.id\n elif isinstance(node, (gast.Subscript, gast.Attribute)):\n return get_name(node.value)\n else:\n raise TypeError", "docstring": "Get the name of a variable.\n\nArgs:\n node: A `Name`, `Subscript` or `Attribute` node.\n\nReturns:\n The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.", "source": "juraj_google_style"} -{"code": "def _sideral(date, longitude=0., model='mean', eop_correction=True, terms=106):\n\n\n t = date.change_scale('UT1').julian_century\n\n # Compute GMST in seconds\n theta = 67310.54841 + (876600 * 3600 + 8640184.812866) * t + 0.093104 * t ** 2\\\n - 6.2e-6 * t ** 3\n\n # Conversion from second (time) to degrees (angle)\n theta /= 240.\n\n if model == 'apparent':\n theta += equinox(date, eop_correction, terms)\n\n # Add local longitude to the sideral time\n theta += longitude\n # Force to 0-360 degrees range\n theta %= 360.\n\n return theta", "docstring": "Get the sideral time at a defined date\n\nArgs:\n date (Date):\n longitude (float): Longitude of the observer (in degrees)\n East positive/West negative.\n model (str): 'mean' or 'apparent' for GMST and GAST respectively\n\nReturns:\n float: Sideral time in degrees\n\n GMST: Greenwich Mean Sideral Time\n LST: Local Sideral Time (Mean)\n GAST: Greenwich Apparent Sideral Time", "source": "juraj_google_style"} -{"code": "def main(argv=None):\n\n\n if argv is None:\n argv = sys.argv[1:]\n\n parser = build_args()\n args = parser.parse_args(args=argv)\n\n recipe_name, _ext = os.path.splitext(os.path.basename(args.recipe))\n\n rm = RecipeManager()\n rm.add_recipe_folder(os.path.dirname(args.recipe), whitelist=[os.path.basename(args.recipe)])\n recipe = rm.get_recipe(recipe_name)\n\n if args.archive is not None:\n print(\"Archiving recipe into %s\" % args.archive)\n recipe.archive(args.archive)\n return 0\n\n if args.info:\n print(recipe)\n return 0\n\n variables = load_variables(args.define, args.config)\n\n success = 0\n\n start_time = time.time()\n\n if args.loop is None:\n try:\n recipe.run(variables)\n success += 1\n except IOTileException as exc:\n print(\"Error running recipe: %s\" % str(exc))\n return 1\n else:\n while True:\n value = input(\"Enter value for loop variable %s (return to stop): \" % args.loop)\n\n if value == '':\n break\n\n local_vars = dict(**variables)\n local_vars[args.loop] = value\n\n try:\n recipe.run(local_vars)\n success += 1\n except IOTileException as exc:\n print(\"--> ERROR processing loop variable %s: %s\" % (value, str(exc)))\n\n end_time = time.time()\n total_time = end_time - start_time\n\n if success == 0:\n per_time = 0.0\n else:\n per_time = total_time / success\n\n print(\"Performed %d runs in %.1f seconds (%.1f seconds / run)\" % (success, total_time, per_time))\n return 0", "docstring": "Main entry point for iotile-ship recipe runner.\n\n This is the iotile-ship command line program.\n\nArgs:\n argv (list of str): An optional set of command line\n parameters. If not passed, these are taken from\n sys.argv.", "source": "juraj_google_style"} -{"code": "def get_rng(obj=None):\n\n seed = (id(obj) + os.getpid() +\n int(datetime.now().strftime(\"%Y%m%d%H%M%S%f\"))) % 4294967295\n if _RNG_SEED is not None:\n seed = _RNG_SEED\n return np.random.RandomState(seed)", "docstring": "Get a good RNG seeded with time, pid and the object.\n\nArgs:\n obj: some object to use to generate random seed.\n\nReturns:\n np.random.RandomState: the RNG.", "source": "juraj_google_style"} -{"code": "def __init__(self, direction, edge_name, depth, within_optional_scope=False):\n\n super(Recurse, self).__init__(\n direction, edge_name, depth, within_optional_scope=within_optional_scope)\n self.direction = direction\n self.edge_name = edge_name\n self.depth = depth\n # Denotes whether the traversal is occuring after a prior @optional traversal\n self.within_optional_scope = within_optional_scope\n self.validate()", "docstring": "Create a new Recurse block which traverses the given edge up to \"depth\" times.\n\nArgs:\n direction: string, 'in' or 'out'.\n edge_name: string obeying variable name rules (see validate_safe_string).\n depth: int, always greater than or equal to 1.\n\nReturns:\n new Recurse object", "source": "juraj_google_style"} -{"code": "def dtype(self, value):\n\n for (dtype, string) in self._all:\n if string == value:\n return dtype\n\n return None", "docstring": "Gets the datatype for the given `value` (description).\n\nArgs:\n value (str): A text description for any datatype.\n\nReturns:\n numpy.dtype: The matching datatype for the given text.\n None: If no match can be found, `None` will be returned.", "source": "juraj_google_style"} -{"code": "def retrieve(url):\n\n try:\n pem_data = urlopen(url).read()\n except (ValueError, HTTPError):\n warnings.warn('Certificate URL is invalid.')\n return False\n\n if sys.version >= '3':\n try:\n pem_data = pem_data.decode()\n except(UnicodeDecodeError):\n warnings.warn('Certificate encoding is not utf-8.')\n return False\n\n return _parse_pem_data(pem_data)", "docstring": "Retrieve and parse PEM-encoded X.509 certificate chain.\n\n See `validate.request` for additional info.\n\nArgs:\n url: str. SignatureCertChainUrl header value sent by request.\n\nReturns:\n list or bool: If url is valid, returns the certificate chain as a list\n of cryptography.hazmat.backends.openssl.x509._Certificate\n certificates where certs[0] is the first certificate in the file; if\n url is invalid, returns False.", "source": "juraj_google_style"} -{"code": "def bcs_parameters(n_site, n_fermi, u, t) :\n\n\n # The wave numbers satisfy the periodic boundary condition.\n wave_num = np.linspace(0, 1, n_site, endpoint=False)\n # The hopping energy as a function of wave numbers\n hop_erg = -2 * t * np.cos(2 * np.pi * wave_num)\n # Finding the Fermi energy\n fermi_erg = hop_erg[n_fermi // 2]\n # Set the Fermi energy to zero\n hop_erg = hop_erg - fermi_erg\n\n def _bcs_gap(x):\n\n\n s = 0.\n for i in range(n_site):\n s += 1. / np.sqrt(hop_erg[i] ** 2 + x ** 2)\n return 1 + s * u / (2 * n_site)\n\n # Superconducting gap\n delta = scipy.optimize.bisect(_bcs_gap, 0.01, 10000. * abs(u))\n # The amplitude of the double excitation state\n bcs_v = np.sqrt(0.5 * (1 - hop_erg / np.sqrt(hop_erg ** 2 + delta ** 2)))\n # The rotational angle in the Bogoliubov transformation.\n bog_theta = np.arcsin(bcs_v)\n\n return delta, bog_theta", "docstring": "Generate the parameters for the BCS ground state, i.e., the\n superconducting gap and the rotational angles in the Bogoliubov\n transformation.\n\nArgs:\n n_site: the number of sites in the Hubbard model\n n_fermi: the number of fermions\n u: the interaction strength\n t: the tunneling strength\n\nReturns:\n float delta, List[float] bog_theta", "source": "juraj_google_style"} -{"code": "def _next_file_gen(self, root):\n\n for e in root.getiterator(common._T_CONTENTS):\n st_ctime, size, etag, key = None, None, None, None\n for child in e.getiterator('*'):\n if child.tag == common._T_LAST_MODIFIED:\n st_ctime = common.dt_str_to_posix(child.text)\n elif child.tag == common._T_ETAG:\n etag = child.text\n elif child.tag == common._T_SIZE:\n size = child.text\n elif child.tag == common._T_KEY:\n key = child.text\n yield common.GCSFileStat(self._path + '/' + key,\n size, etag, st_ctime)\n e.clear()\n yield None", "docstring": "Generator for next file element in the document.\n\nArgs:\n root: root element of the XML tree.\n\nYields:\n GCSFileStat for the next file.", "source": "juraj_google_style"} -{"code": "def as_session(name_or_func): # decorator\n\n if callable(name_or_func): # no name provided\n func = name_or_func\n name = func.__name__\n name = \"\".join([(' ' + x) if x.isupper() else x for x in name])\n name = name.replace('_', ' ')\n return as_session(name)(func) # deco(func) -> deco(name)(func)\n else:\n name = name_or_func\n\n def get_func(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n start()\n title(name)\n result = func(*args, **kwargs)\n end()\n return result\n return wrapper\n return get_func", "docstring": "print start/title/end info before and after the function call\n\nArgs:\n title: title will show after the start, if has any", "source": "juraj_google_style"} -{"code": "def status(self, order_id):\n\n\n self.logger.debug('Get status of order ' + order_id)\n url = '%(base_url)s/order/%(order_id)s' % {\n 'base_url': self.base_url, 'order_id': order_id\n }\n r = self.gbdx_connection.get(url)\n r.raise_for_status()\n return r.json().get(\"acquisitions\", {})", "docstring": "Checks imagery order status. There can be more than one image per\n order and this function returns the status of all images\n within the order.\n\nArgs:\n order_id (str): The id of the order placed.\n\nReturns:\n List of dictionaries, one per image. Each dictionary consists\n of the keys 'acquisition_id', 'location' and 'state'.", "source": "juraj_google_style"} -{"code": "def make_export_strategy(\n args,\n keep_target,\n assets_extra,\n features,\n schema,\n stats):\n\n target_name = feature_transforms.get_target_name(features)\n csv_header = [col['name'] for col in schema]\n if not keep_target:\n csv_header.remove(target_name)\n\n def export_fn(estimator, export_dir_base, checkpoint_path=None, eval_result=None):\n with ops.Graph().as_default() as g:\n contrib_variables.create_global_step(g)\n\n input_ops = feature_transforms.build_csv_serving_tensors_for_training_step(\n args.analysis, features, schema, stats, keep_target)\n model_fn_ops = estimator._call_model_fn(input_ops.features,\n None,\n model_fn_lib.ModeKeys.INFER)\n output_fetch_tensors = make_prediction_output_tensors(\n args=args,\n features=features,\n input_ops=input_ops,\n model_fn_ops=model_fn_ops,\n keep_target=keep_target)\n\n # Don't use signature_def_utils.predict_signature_def as that renames\n # tensor names if there is only 1 input/output tensor!\n signature_inputs = {key: tf.saved_model.utils.build_tensor_info(tensor)\n for key, tensor in six.iteritems(input_ops.default_inputs)}\n signature_outputs = {key: tf.saved_model.utils.build_tensor_info(tensor)\n for key, tensor in six.iteritems(output_fetch_tensors)}\n signature_def_map = {\n 'serving_default':\n signature_def_utils.build_signature_def(\n signature_inputs,\n signature_outputs,\n tf.saved_model.signature_constants.PREDICT_METHOD_NAME)}\n\n if not checkpoint_path:\n # Locate the latest checkpoint\n checkpoint_path = saver.latest_checkpoint(estimator._model_dir)\n if not checkpoint_path:\n raise ValueError(\"Couldn't find trained model at %s.\"\n % estimator._model_dir)\n\n export_dir = saved_model_export_utils.get_timestamped_export_dir(\n export_dir_base)\n\n if (model_fn_ops.scaffold is not None and\n model_fn_ops.scaffold.saver is not None):\n saver_for_restore = model_fn_ops.scaffold.saver\n else:\n saver_for_restore = saver.Saver(sharded=True)\n\n with tf_session.Session('') as session:\n saver_for_restore.restore(session, checkpoint_path)\n init_op = control_flow_ops.group(\n variables.local_variables_initializer(),\n resources.initialize_resources(resources.shared_resources()),\n tf.tables_initializer())\n\n # Perform the export\n builder = saved_model_builder.SavedModelBuilder(export_dir)\n builder.add_meta_graph_and_variables(\n session, [tag_constants.SERVING],\n signature_def_map=signature_def_map,\n assets_collection=ops.get_collection(\n ops.GraphKeys.ASSET_FILEPATHS),\n legacy_init_op=init_op)\n builder.save(False)\n\n # Add the extra assets\n if assets_extra:\n assets_extra_path = os.path.join(compat.as_bytes(export_dir),\n compat.as_bytes('assets.extra'))\n for dest_relative, source in assets_extra.items():\n dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),\n compat.as_bytes(dest_relative))\n dest_path = os.path.dirname(dest_absolute)\n file_io.recursive_create_dir(dest_path)\n file_io.copy(source, dest_absolute)\n\n # only keep the last 3 models\n saved_model_export_utils.garbage_collect_exports(\n export_dir_base,\n exports_to_keep=3)\n\n # save the last model to the model folder.\n # export_dir_base = A/B/intermediate_models/\n if keep_target:\n final_dir = os.path.join(args.job_dir, 'evaluation_model')\n else:\n final_dir = os.path.join(args.job_dir, 'model')\n if file_io.is_directory(final_dir):\n file_io.delete_recursively(final_dir)\n file_io.recursive_create_dir(final_dir)\n recursive_copy(export_dir, final_dir)\n\n return export_dir\n\n if keep_target:\n intermediate_dir = 'intermediate_evaluation_models'\n else:\n intermediate_dir = 'intermediate_prediction_models'\n\n return export_strategy.ExportStrategy(intermediate_dir, export_fn)", "docstring": "Makes prediction graph that takes json input.\n\nArgs:\n args: command line args\n keep_target: If ture, target column is returned in prediction graph. Target\n column must also exist in input data\n assets_extra: other fiels to copy to the output folder\n job_dir: root job folder\n features: features dict\n schema: schema list\n stats: stats dict", "source": "juraj_google_style"} -{"code": "def get(cls, keyval, key='id', user_id=None):\n\n if keyval is None:\n return None\n if (key in cls.__table__.columns\n and cls.__table__.columns[key].primary_key):\n # if user_id and hasattr(cls, 'user_id'):\n # return cls.query.filter_by(id=keyval, user_id=user_id).first()\n return cls.query.get(keyval)\n else:\n result = cls.query.filter(\n getattr(cls, key) == keyval)\n # if user_id and hasattr(cls, 'user_id'):\n # result = result.filter(cls.user_id == user_id)\n return result.first()", "docstring": "Fetches a single instance which has value `keyval`\n for the attribute `key`.\n\nArgs:\n keyval: The value of the attribute.\n\n key (str, optional): The attribute to search by. By default,\n it is 'id'.\n\nReturns:\n A model instance if found. Else None.\n\nExample:\n >>> User.get(35)\n user35@i.com\n\n >>> User.get('user35@i.com', key='email')\n user35@i.com", "source": "juraj_google_style"} -{"code": "def merge_csv(filenames: List[str],\n outfile: TextIO = sys.stdout,\n input_dialect: str = 'excel',\n output_dialect: str = 'excel',\n debug: bool = False,\n headers: bool = True) -> None:\n\n writer = csv.writer(outfile, dialect=output_dialect)\n written_header = False\n header_items = [] # type: List[str]\n for filename in filenames:\n log.info(\"Processing file \" + repr(filename))\n with open(filename, 'r') as f:\n reader = csv.reader(f, dialect=input_dialect)\n if headers:\n if not written_header:\n header_items = next(reader)\n if debug:\n log.debug(\"Header row: {!r}\", header_items)\n writer.writerow(header_items)\n written_header = True\n else:\n new_headers = next(reader)\n if new_headers != header_items:\n raise ValueError(\n \"Header line in file {filename} doesn't match - \"\n \"it was {new} but previous was {old}\".format(\n filename=repr(filename),\n new=repr(new_headers),\n old=repr(header_items),\n ))\n if debug:\n log.debug(\"Header row matches previous\")\n else:\n if debug:\n log.debug(\"No headers in use\")\n for row in reader:\n if debug:\n log.debug(\"Data row: {!r}\", row)\n writer.writerow(row)", "docstring": "Amalgamate multiple CSV/TSV/similar files into one.\n\nArgs:\n filenames: list of filenames to process\n outfile: file-like object to write output to\n input_dialect: dialect of input files, as passed to ``csv.reader``\n output_dialect: dialect to write, as passed to ``csv.writer``\n debug: be verbose?\n headers: do the files have header lines?", "source": "juraj_google_style"} -{"code": "def render_text(text, preformatted=False):\n\n return IPython.core.display.HTML(_html.HtmlBuilder.render_text(text, preformatted))", "docstring": "Return text formatted as a HTML\n\nArgs:\n text: the text to render\n preformatted: whether the text should be rendered as preformatted", "source": "juraj_google_style"} -{"code": "def get_layer_timing_signal_learned_1d(channels, layer, num_layers):\n\n shape = [num_layers, 1, 1, channels]\n layer_embedding = (\n tf.get_variable(\n \"layer_embedding\",\n shape,\n initializer=tf.random_normal_initializer(0, channels**-0.5)) *\n (channels**0.5))\n return layer_embedding[layer, :, :, :]", "docstring": "get n-dimensional embedding as the layer (vertical) timing signal.\n\n Adds embeddings to represent the position of the layer in the tower.\n\nArgs:\n channels: dimension of the timing signal\n layer: layer num\n num_layers: total number of layers\n\nReturns:\n a Tensor of timing signals [1, 1, channels].", "source": "juraj_google_style"} -{"code": "def flush(self, queue_name):\n\n for name in (queue_name, dq_name(queue_name)):\n self.do_purge(name)", "docstring": "Drop all the messages from a queue.\n\n Parameters:\n queue_name(str): The queue to flush.", "source": "juraj_google_style"} -{"code": "def error_messages(self, driver_id=None):\n\n if driver_id is not None:\n assert isinstance(driver_id, ray.DriverID)\n return self._error_messages(driver_id)\n\n error_table_keys = self.redis_client.keys(\n ray.gcs_utils.TablePrefix_ERROR_INFO_string + \"*\")\n driver_ids = [\n key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):]\n for key in error_table_keys\n ]\n\n return {\n binary_to_hex(driver_id): self._error_messages(\n ray.DriverID(driver_id))\n for driver_id in driver_ids\n }", "docstring": "Get the error messages for all drivers or a specific driver.\n\nArgs:\n driver_id: The specific driver to get the errors for. If this is\n None, then this method retrieves the errors for all drivers.\n\nReturns:\n A dictionary mapping driver ID to a list of the error messages for\n that driver.", "source": "juraj_google_style"} -{"code": "def create_output(self, key, value, variable_type=None):\n\n results = None\n if key is not None:\n key = key.strip()\n key_type = '{}-{}'.format(key, variable_type)\n if self.out_variables_type.get(key_type) is not None:\n # variable key-type has been requested\n v = self.out_variables_type.get(key_type)\n self.tcex.log.info(\n u'Variable {} was requested by downstream app.'.format(v.get('variable'))\n )\n if value is not None:\n results = self.create(v.get('variable'), value)\n else:\n self.tcex.log.info(\n u'Variable {} has a none value and will not be written.'.format(key)\n )\n elif self.out_variables.get(key) is not None and variable_type is None:\n # variable key has been requested\n v = self.out_variables.get(key)\n self.tcex.log.info(\n u'Variable {} was requested by downstream app.'.format(v.get('variable'))\n )\n if value is not None:\n results = self.create(v.get('variable'), value)\n else:\n self.tcex.log.info(\n u'Variable {} has a none value and will not be written.'.format(\n v.get('variable')\n )\n )\n else:\n var_value = key\n if variable_type is not None:\n var_value = key_type\n self.tcex.log.info(\n u'Variable {} was NOT requested by downstream app.'.format(var_value)\n )\n return results", "docstring": "Wrapper for Create method of CRUD operation for working with KeyValue DB.\n\n This method will automatically check to see if provided variable was requested by\n a downstream app and if so create the data in the KeyValue DB.\n\nArgs:\n key (string): The variable to write to the DB.\n value (any): The data to write to the DB.\n variable_type (string): The variable type being written.\n\nReturns:\n (string): Result string of DB write.", "source": "juraj_google_style"} -{"code": "def ToScriptHash(self, address):\n\n if len(address) == 34:\n if address[0] == 'A':\n data = b58decode(address)\n if data[0] != self.AddressVersion:\n raise ValueError('Not correct Coin Version')\n\n checksum = Crypto.Default().Hash256(data[:21])[:4]\n if checksum != data[21:]:\n raise Exception('Address format error')\n return UInt160(data=data[1:21])\n else:\n raise Exception('Address format error')\n else:\n raise ValueError('Not correct Address, wrong length.')", "docstring": "Retrieve the script_hash based from an address.\n\nArgs:\n address (str): a base58 encoded address.\n\nRaises:\n ValuesError: if an invalid address is supplied or the coin version is incorrect\n Exception: if the address string does not start with 'A' or the checksum fails\n\nReturns:\n UInt160: script hash.", "source": "juraj_google_style"} -{"code": "def include_revision(revision_num, skip_factor=1.1):\n\n if skip_factor <= 1.0:\n return True\n return (int(math.log1p(revision_num) / math.log(skip_factor)) != int(\n math.log(revision_num + 2.0) / math.log(skip_factor)))", "docstring": "Decide whether to include a revision.\n\n If the number of revisions is large, we exclude some revisions to avoid\n a quadratic blowup in runtime, since the article is likely also large.\n\n We make the ratio between consecutive included revision numbers\n appproximately equal to \"factor\".\n\nArgs:\n revision_num: an integer\n skip_factor: a floating point number >= 1.0\n\nReturns:\n a boolean", "source": "juraj_google_style"} -{"code": "def Decompress(self, compressed_data):\n\n try:\n uncompressed_data = self._zlib_decompressor.decompress(compressed_data)\n remaining_compressed_data = getattr(\n self._zlib_decompressor, 'unused_data', b'')\n\n except zlib.error as exception:\n raise errors.BackEndError((\n 'Unable to decompress zlib compressed stream with error: '\n '{0!s}.').format(exception))\n\n return uncompressed_data, remaining_compressed_data", "docstring": "Decompresses the compressed data.\n\nArgs:\n compressed_data (bytes): compressed data.\n\nReturns:\n tuple(bytes, bytes): uncompressed data and remaining compressed data.\n\nRaises:\n BackEndError: if the zlib compressed stream cannot be decompressed.", "source": "juraj_google_style"} -{"code": "def set_of_vars(arg_plot):\n\n sovs = set(tuple((var + '+').split('+')[:2])\n for var in arg_plot.split(','))\n sovs.discard(('', ''))\n return sovs", "docstring": "Build set of needed field variables.\n\n Each var is a tuple, first component is a scalar field, second component is\n either:\n\n - a scalar field, isocontours are added to the plot.\n - a vector field (e.g. 'v' for the (v1,v2,v3) vector), arrows are added to\n the plot.\n\nArgs:\n arg_plot (str): string with variable names separated with\n ``,`` (figures), and ``+`` (same plot).\n\nReturns:\n set of str: set of needed field variables.", "source": "juraj_google_style"} -{"code": "def head(self, path=None, client_kwargs=None, header=None):\n\n if header is not None:\n return header\n elif client_kwargs is None:\n client_kwargs = self.get_client_kwargs(path)\n return self._head(client_kwargs)", "docstring": "Returns object HTTP header.\n\nArgs:\n path (str): Path or URL.\n client_kwargs (dict): Client arguments.\n header (dict): Object header.\n\nReturns:\n dict: HTTP header.", "source": "juraj_google_style"} -{"code": "def ColorWithLightness(self, lightness):\n\n h, s, l = self.__hsl\n return Color((h, s, lightness), 'hsl', self.__a, self.__wref)", "docstring": "Create a new instance based on this one with a new lightness value.\n\n Parameters:\n :lightness:\n The lightness of the new color [0...1].\n\nReturns:\n A grapefruit.Color instance.\n\n >>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25)\n (0.5, 0.25, 0.0, 1.0)\n >>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25).hsl\n (30, 1, 0.25)", "source": "juraj_google_style"} -{"code": "def validate_field(self, field_name: str) -> bool:\n\n if field_name in {\"@id\", \"@type\"}:\n return True\n result = self.schema.has_field(field_name)\n if not result:\n # todo: how to comply with our error handling policies?\n raise UndefinedFieldError(\"'{}' should be present in the knowledge graph schema.\".format(field_name))\n return result", "docstring": "Complain if a field in not in the schema\n\nArgs:\n field_name:\n\n Returns: True if the field is present.", "source": "juraj_google_style"} -{"code": "def get_precursor_mz(exact_mass, precursor_type):\n\n\n # these are just taken from what was present in the massbank .msp file for those missing the exact mass\n d = {'[M-H]-': -1.007276,\n '[M+H]+': 1.007276,\n '[M+H-H2O]+': 1.007276 - ((1.007276 * 2) + 15.9949)\n }\n\n try:\n\n return exact_mass + d[precursor_type]\n except KeyError as e:\n print(e)\n return False", "docstring": "Calculate precursor mz based on exact mass and precursor type\n\nArgs:\n exact_mass (float): exact mass of compound of interest\n precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+'\n\nReturns:\n neutral mass of compound", "source": "juraj_google_style"} -{"code": "def get_error_name(error):\n\n error_type = type(error)\n if error_type.__module__ in ['__main__', 'builtins']:\n return error_type.__name__\n else:\n return f'{error_type.__module__}.{error_type.__name__}'", "docstring": "Return canonical error name as string.\n\n For builtin errors like ValueError or Exception, will return the bare\n name, like ValueError or Exception.\n\n For all other exceptions, will return modulename.errorname, such as\n arbpackage.mod.myerror\n\nArgs:\n error: Exception object.\n\nReturns:\n str. Canonical error name.", "source": "juraj_google_style"} -{"code": "def inference(self, observed_arr):\n\n for i in range(len(self.__deconvolution_layer_list)):\n try:\n observed_arr = self.__deconvolution_layer_list[i].forward_propagate(observed_arr)\n except:\n self.__logger.debug(\"Error raised in Deconvolution layer \" + str(i + 1))\n raise\n\n return observed_arr", "docstring": "Draws samples from the `fake` distribution.\n\nArgs:\n observed_arr: `np.ndarray` of observed data points.\n\nReturns:\n `np.ndarray` of inferenced.", "source": "juraj_google_style"} -{"code": "def kill_redis(self, check_alive=True):\n\n self._kill_process_type(\n ray_constants.PROCESS_TYPE_REDIS_SERVER, check_alive=check_alive)", "docstring": "Kill the Redis servers.\n\nArgs:\n check_alive (bool): Raise an exception if any of the processes\n were already dead.", "source": "juraj_google_style"} -{"code": "def init_app(self, app):\n\n app.config.setdefault('FEDORA_BASE_URL', 'http://localhost:8080')\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)\n else:\n app.teardown_request(self.teardown)", "docstring": "Initializes a Flask app object for the extension.\n\nArgs:\n app(Flask): Flask app", "source": "juraj_google_style"} -{"code": "def get_by_identifier(self, identifier):\n\n\n params = {'identifier': identifier}\n\n return self.client.get(self._url(), params=params)", "docstring": "Gets blocks by identifier\n\nArgs:\n identifier (str): Should be any of: username, phone_number, email.\n\n See: https://auth0.com/docs/api/management/v2#!/User_Blocks/get_user_blocks", "source": "juraj_google_style"} -{"code": "def _SkipField(tokenizer):\n\n if tokenizer.TryConsume('['):\n # Consume extension name.\n tokenizer.ConsumeIdentifier()\n while tokenizer.TryConsume('.'):\n tokenizer.ConsumeIdentifier()\n tokenizer.Consume(']')\n else:\n tokenizer.ConsumeIdentifier()\n\n _SkipFieldContents(tokenizer)\n\n # For historical reasons, fields may optionally be separated by commas or\n # semicolons.\n if not tokenizer.TryConsume(','):\n tokenizer.TryConsume(';')", "docstring": "Skips over a complete field (name and value/message).\n\nArgs:\n tokenizer: A tokenizer to parse the field name and values.", "source": "juraj_google_style"} -{"code": "def wrap(tensor, books=None, tensor_shape=None):\n\n if books is None:\n books = bookkeeper.for_default_graph()\n if isinstance(tensor, PrettyTensor):\n return tensor.as_layer()\n elif isinstance(tensor, UnboundVariable):\n\n def set_input_from_unbound_var(data):\n\n if data is not None:\n return wrap(data, books)\n else:\n return None\n\n return _DeferredLayer(books, set_input_from_unbound_var, [tensor], {})\n else:\n tensor = tf.convert_to_tensor(tensor, name='input')\n if tensor_shape:\n _set_shape_on_tensor(tensor, tensor_shape)\n return Layer(books, tensor=tensor, name=tensor.name)", "docstring": "Creates an input layer representing the given tensor.\n\nArgs:\n tensor: The tensor.\n books: The bookkeeper; this is usually not required unless you are building\n multiple `tf.Graphs.`\n tensor_shape: An optional shape that will be set on the Tensor or verified\n to match the tensor.\n\nReturns:\n A layer.", "source": "juraj_google_style"} -{"code": "def reset(self, ms=0, halt=True):\n\n self._dll.JLINKARM_SetResetDelay(ms)\n\n res = self._dll.JLINKARM_Reset()\n if res < 0:\n raise errors.JLinkException(res)\n elif not halt:\n self._dll.JLINKARM_Go()\n\n return res", "docstring": "Resets the target.\n\n This method resets the target, and by default toggles the RESET and\n TRST pins.\n\nArgs:\n self (JLink): the ``JLink`` instance\n ms (int): Amount of milliseconds to delay after reset (default: 0)\n halt (bool): if the CPU should halt after reset (default: True)\n\nReturns:\n Number of bytes read.", "source": "juraj_google_style"} -{"code": "def AddForwardedIp(self, address, interface):\n\n for ip in list(netaddr.IPNetwork(address)):\n self._RunIfconfig(args=[interface, 'alias', '%s/32' % str(ip)])", "docstring": "Configure a new IP address on the network interface.\n\nArgs:\n address: string, the IP address to configure.\n interface: string, the output device to use.", "source": "juraj_google_style"} -{"code": "def __init__(self, access_token=None, rate_limit=True):\n\n super(Search, self).__init__()\n self.session = self.get_session(access_token=access_token,\n rate_limit=rate_limit)\n self._ignore_codes = []\n if rate_limit:\n self._ignore_codes.append(429)", "docstring": "Construct a Search object.\n\nArgs:\n access_token (str): A valid Companies House API. If an\n access token isn't specified then looks for *CompaniesHouseKey*\n or COMPANIES_HOUSE_KEY environment variables. Defaults to None.", "source": "juraj_google_style"} -{"code": "def fill_dataset_tree(self, tree, data_sets):\n\n\n tree.model().removeRows(0, tree.model().rowCount())\n for index, (time, script) in enumerate(data_sets.items()):\n name = script.settings['tag']\n type = script.name\n\n item_time = QtGui.QStandardItem(str(time))\n item_name = QtGui.QStandardItem(str(name))\n item_type = QtGui.QStandardItem(str(type))\n\n item_time.setSelectable(False)\n item_time.setEditable(False)\n item_type.setSelectable(False)\n item_type.setEditable(False)\n\n tree.model().appendRow([item_time, item_name, item_type])", "docstring": "fills the tree with data sets where datasets is a dictionary of the form\n\nArgs:\n tree:\n data_sets: a dataset\n\n Returns:", "source": "juraj_google_style"} -{"code": "def get_factors(n):\n\n\n def factor(n, i, combi, res):\n\n\n while i * i <= n:\n if n % i == 0:\n res += combi + [i, int(n/i)],\n factor(n/i, i, combi+[i], res)\n i += 1\n return res\n return factor(n, 2, [], [])", "docstring": "[summary]\n\nArgs:\n n {[int]} -- [to analysed number]\n\nReturns:\n [list of lists] -- [all factors of the number n]", "source": "juraj_google_style"} -{"code": "def _CreateRouteOptions(self, **kwargs):\n\n options = {\n 'proto': self.proto_id,\n 'scope': 'host',\n }\n options.update(kwargs)\n return options", "docstring": "Create a dictionary of parameters to append to the ip route command.\n\nArgs:\n **kwargs: dict, the string parameters to update in the ip route command.\n\nReturns:\n dict, the string parameters to append to the ip route command.", "source": "juraj_google_style"} -{"code": "def sync(self, since=None, timeout_ms=30000, filter=None,\n full_state=None, set_presence=None):\n\n\n request = {\n # non-integer timeouts appear to cause issues\n \"timeout\": int(timeout_ms)\n }\n\n if since:\n request[\"since\"] = since\n\n if filter:\n request[\"filter\"] = filter\n\n if full_state:\n request[\"full_state\"] = json.dumps(full_state)\n\n if set_presence:\n request[\"set_presence\"] = set_presence\n\n return self._send(\"GET\", \"/sync\", query_params=request,\n api_path=MATRIX_V2_API_PATH)", "docstring": "Perform a sync request.\n\nArgs:\n since (str): Optional. A token which specifies where to continue a sync from.\n timeout_ms (int): Optional. The time in milliseconds to wait.\n filter (int|str): Either a Filter ID or a JSON string.\n full_state (bool): Return the full state for every room the user has joined\n Defaults to false.\n set_presence (str): Should the client be marked as \"online\" or\" offline\"", "source": "juraj_google_style"} -{"code": "def save(self, filename):\n\n with open(filename, 'w') as outfile:\n json.dump(self.to_json(), outfile)", "docstring": "Writes the JSON representation of this graph to the provided\n filename, such that the graph can be easily reconstructed using\n Graph(spec=filename).\n\nArgs:\n filename (str): Path at which to write out the json file.", "source": "juraj_google_style"} -{"code": "def section(self, section):\n\n if not isinstance(self._container, ConfigUpdater):\n raise ValueError(\"Sections can only be added at section level!\")\n if isinstance(section, str):\n # create a new section\n section = Section(section, container=self._container)\n elif not isinstance(section, Section):\n raise ValueError(\"Parameter must be a string or Section type!\")\n if section.name in [block.name for block in self._container\n if isinstance(block, Section)]:\n raise DuplicateSectionError(section.name)\n self._container.structure.insert(self._idx, section)\n self._idx += 1\n return self", "docstring": "Creates a section block\n\nArgs:\n section (str or :class:`Section`): name of section or object\n\nReturns:\n self for chaining", "source": "juraj_google_style"} -{"code": "def read_requirements(req_file):\n\n items = list(parse_requirements(req_file, session={}))\n result = []\n\n for item in items:\n # Get line number from item\n line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]\n if item.req:\n item.req.marker = item.markers\n result.append((item.req, line_number))\n else:\n result.append((item, line_number))\n\n return result", "docstring": "Reads a requirements file.\n\nArgs:\n req_file (str): Filename of requirements file", "source": "juraj_google_style"} -{"code": "def _CheckPythonModule(self, dependency):\n\n module_object = self._ImportPythonModule(dependency.name)\n if not module_object:\n status_message = 'missing: {0:s}'.format(dependency.name)\n return False, status_message\n\n if not dependency.version_property:\n return True, dependency.name\n\n return self._CheckPythonModuleVersion(\n dependency.name, module_object, dependency.version_property,\n dependency.minimum_version, dependency.maximum_version)", "docstring": "Checks the availability of a Python module.\n\nArgs:\n dependency (DependencyDefinition): dependency definition.\n\nReturns:\n tuple: consists:\n\n bool: True if the Python module is available and conforms to\n the minimum required version, False otherwise.\n str: status message.", "source": "juraj_google_style"} -{"code": "def login(self, username=None, password=None, android_id=None):\n\n\n\t\tcls_name = type(self).__name__\n\n\t\tif username is None:\n\t\t\tusername = input(\"Enter your Google username or email address: \")\n\n\t\tif password is None:\n\t\t\tpassword = getpass.getpass(\"Enter your Google Music password: \")\n\n\t\tif android_id is None:\n\t\t\tandroid_id = Mobileclient.FROM_MAC_ADDRESS\n\n\t\ttry:\n\t\t\tself.api.login(username, password, android_id)\n\t\texcept OSError:\n\t\t\tlogger.exception(\"{} authentication failed.\".format(cls_name))\n\n\t\tif not self.is_authenticated:\n\t\t\tlogger.warning(\"{} authentication failed.\".format(cls_name))\n\n\t\t\treturn False\n\n\t\tlogger.info(\"{} authentication succeeded.\\n\".format(cls_name))\n\n\t\treturn True", "docstring": "Authenticate the gmusicapi Mobileclient instance.\n\n Parameters:\n username (Optional[str]): Your Google Music username. Will be prompted if not given.\n\n password (Optional[str]): Your Google Music password. Will be prompted if not given.\n\n android_id (Optional[str]): The 16 hex digits from an Android device ID.\n Default: Use gmusicapi.Mobileclient.FROM_MAC_ADDRESS to create ID from computer's MAC address.\n\nReturns:\n ``True`` on successful login or ``False`` on unsuccessful login.", "source": "juraj_google_style"} -{"code": "def add_data(self, data):\n\n if not self._data:\n self._data = {}\n self._data.update(data)", "docstring": "Add POST data.\n\nArgs:\n data (dict): key => value dictionary", "source": "juraj_google_style"} -{"code": "def extractUnits(self, inp):\n\n inp = self._preprocess(inp)\n\n units = []\n description = \"\"\n for w in inp.split(' '):\n if self.isValidUnit(w) or w == '/':\n if description:\n description += \" \"\n description += w\n else:\n if description:\n units.append(description)\n description = \"\"\n\n if description:\n units.append(description)\n return units", "docstring": "Collects all the valid units from an inp string. Works by\n appending consecutive words from the string and cross-referncing\n them with a set of valid units.\n\nArgs:\n inp (str): Some text which hopefully contains descriptions\n of different units.\n\nReturns:\n A list of strings, each entry in which is a valid quantities\n unit.", "source": "juraj_google_style"} -{"code": "def to_dataframe(self, start_row=0, max_rows=None):\n\n fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)\n count = 0\n page_token = None\n df = None\n while True:\n page_rows, page_token = fetcher(page_token, count)\n if len(page_rows):\n count += len(page_rows)\n if df is None:\n df = pandas.DataFrame.from_records(page_rows)\n else:\n df = df.append(page_rows, ignore_index=True)\n if not page_token:\n break\n\n # Need to reorder the dataframe to preserve column ordering\n ordered_fields = [field.name for field in self.schema]\n return df[ordered_fields] if df is not None else pandas.DataFrame()", "docstring": "Exports the table to a Pandas dataframe.\n\nArgs:\n start_row: the row of the table at which to start the export (default 0)\n max_rows: an upper limit on the number of rows to export (default None)\n\nReturns:\n A Pandas dataframe containing the table data.", "source": "juraj_google_style"} -{"code": "def set_string(self, option, value):\n\n if not isinstance(value, str):\n raise TypeError(\"%s must be a string\" % option)\n\n self.options[option] = value", "docstring": "Set a string option.\n\nArgs:\n option (str): name of option.\n value (str): value of the option.\n\nRaises:\n TypeError: Value must be a string.", "source": "juraj_google_style"} -{"code": "def delete(self, version_name):\n\n name = ('%s/versions/%s' % (self._full_model_name, version_name))\n response = self._api.projects().models().versions().delete(name=name).execute()\n if 'name' not in response:\n raise Exception('Invalid response from service. \"name\" is not found.')\n _util.wait_for_long_running_operation(response['name'])", "docstring": "Delete a version of model.\n\nArgs:\n version_name: the name of the version in short form, such as \"v1\".", "source": "juraj_google_style"} -{"code": "def is_coord_subset(subset, superset, atol=1e-8):\n\n c1 = np.array(subset)\n c2 = np.array(superset)\n is_close = np.all(np.abs(c1[:, None, :] - c2[None, :, :]) < atol, axis=-1)\n any_close = np.any(is_close, axis=-1)\n return np.all(any_close)", "docstring": "Tests if all coords in subset are contained in superset.\n Doesn't use periodic boundary conditions\n\nArgs:\n subset, superset: List of coords\n\nReturns:\n True if all of subset is in superset.", "source": "juraj_google_style"} -{"code": "def filter(self, predicates):\n\n tys = []\n for col_name, raw_column in self.raw_columns.items():\n dtype = str(raw_column.dtype)\n if dtype == 'object' or dtype == '|S64':\n weld_type = WeldVec(WeldChar())\n else:\n weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype]\n tys.append(weld_type)\n\n if len(tys) == 1:\n weld_type = tys[0]\n else:\n weld_type = WeldStruct(tys)\n\n if isinstance(predicates, SeriesWeld):\n predicates = predicates.expr\n\n return DataFrameWeldExpr(\n grizzly_impl.filter(\n grizzly_impl.zip_columns(\n self.raw_columns.values(),\n ),\n predicates\n ),\n self.raw_columns.keys(),\n weld_type\n )", "docstring": "Summary\n\nArgs:\n grouping_column_name (TYPE): Description\n\nReturns:\n TYPE: Description", "source": "juraj_google_style"} -{"code": "def get_device_name(self, cached=True):\n\n if cached and self.name is not None:\n return self.name\n\n device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME)\n if device_name is None:\n logger.warn('Failed to find handle for device name')\n return None\n\n self.name = self.dongle._read_attribute(self.conn_handle, device_name)\n return self.name", "docstring": "Returns the SK8 device BLE name.\n\nArgs:\n cached (bool): if True, returns the locally cached copy of the name. If this is\n set to False, or the name is not cached, it will read from the device instead.\n\nReturns:\n str. The current device name. May be `None` if an error occurs.", "source": "juraj_google_style"} -{"code": "def compose(*parameter_functions):\n\n def composed_fn(var_name, variable, phase):\n for fn in parameter_functions:\n variable = fn(var_name, variable, phase)\n return variable\n return composed_fn", "docstring": "Composes multiple modification functions in order.\n\nArgs:\n *parameter_functions: The functions to compose.\n\nReturns:\n A parameter modification function that consists of applying all the provided\n functions.", "source": "juraj_google_style"} -{"code": "def unpack(self, fmt, length=1):\n\n return struct.unpack(fmt, self.stream.read(length))[0]", "docstring": "Unpack the stream contents according to the specified format in `fmt`.\n For more information about the `fmt` format see: https://docs.python.org/3/library/struct.html\n\nArgs:\n fmt (str): format string.\n length (int): amount of bytes to read.\n\nReturns:\n variable: the result according to the specified format.", "source": "juraj_google_style"} -{"code": "def bottleneck_block_v1(cnn, depth, depth_bottleneck, stride):\n\n input_layer = cnn.top_layer\n in_size = cnn.top_size\n name_key = \"resnet_v1\"\n name = name_key + str(cnn.counts[name_key])\n cnn.counts[name_key] += 1\n\n with tf.variable_scope(name):\n if depth == in_size:\n if stride == 1:\n shortcut = input_layer\n else:\n shortcut = cnn.apool(\n 1,\n 1,\n stride,\n stride,\n input_layer=input_layer,\n num_channels_in=in_size)\n else:\n shortcut = cnn.conv(\n depth,\n 1,\n 1,\n stride,\n stride,\n activation=None,\n use_batch_norm=True,\n input_layer=input_layer,\n num_channels_in=in_size,\n bias=None)\n cnn.conv(\n depth_bottleneck,\n 1,\n 1,\n stride,\n stride,\n input_layer=input_layer,\n num_channels_in=in_size,\n use_batch_norm=True,\n bias=None)\n cnn.conv(\n depth_bottleneck,\n 3,\n 3,\n 1,\n 1,\n mode=\"SAME_RESNET\",\n use_batch_norm=True,\n bias=None)\n res = cnn.conv(\n depth, 1, 1, 1, 1, activation=None, use_batch_norm=True, bias=None)\n output = tf.nn.relu(shortcut + res)\n cnn.top_layer = output\n cnn.top_size = depth", "docstring": "Bottleneck block with identity short-cut for ResNet v1.\n\nArgs:\n cnn: the network to append bottleneck blocks.\n depth: the number of output filters for this bottleneck block.\n depth_bottleneck: the number of bottleneck filters for this block.\n stride: Stride used in the first layer of the bottleneck block.", "source": "juraj_google_style"} -{"code": "def _parse_package(cls, package_string):\n\n pkg, arch = rsplit(package_string, cls._arch_sep(package_string))\n if arch not in KNOWN_ARCHITECTURES:\n pkg, arch = (package_string, None)\n pkg, release = rsplit(pkg, '-')\n name, version = rsplit(pkg, '-')\n epoch, version = version.split(':', 1) if \":\" in version else ['0', version]\n # oracleasm packages have a dash in their version string, fix that\n if name.startswith('oracleasm') and name.endswith('.el5'):\n name, version2 = name.split('-', 1)\n version = version2 + '-' + version\n return {\n 'name': name,\n 'version': version,\n 'release': release,\n 'arch': arch,\n 'epoch': epoch\n }", "docstring": "Helper method for parsing package string.\n\nArgs:\n package_string (str): dash separated package string such as 'bash-4.2.39-3.el7'\n\nReturns:\n dict: dictionary containing 'name', 'version', 'release' and 'arch' keys", "source": "juraj_google_style"} -{"code": "def GetMessages(self, files):\n\n\n result = {}\n for f in files:\n result.update(self._symbols_by_file[f])\n return result", "docstring": "Gets all the messages from a specified file.\n\n This will find and resolve dependencies, failing if they are not registered\n in the symbol database.\n\nArgs:\n files: The file names to extract messages from.\n\nReturns:\n A dictionary mapping proto names to the message classes. This will include\n any dependent messages as well as any messages defined in the same file as\n a specified message.\n\nRaises:\n KeyError: if a file could not be found.", "source": "juraj_google_style"} -{"code": "def of_cte(cls, header: Optional[ContentTransferEncodingHeader]) \\\n -> 'MessageDecoder':\n\n if header is None:\n return _NoopDecoder()\n hdr_str = str(header).lower()\n custom = cls.registry.get(hdr_str)\n if custom is not None:\n return custom\n elif hdr_str in ('7bit', '8bit'):\n return _NoopDecoder()\n elif hdr_str == 'quoted-printable':\n return _QuotedPrintableDecoder()\n elif hdr_str == 'base64':\n return _Base64Decoder()\n else:\n raise NotImplementedError(hdr_str)", "docstring": "Return a decoder from the CTE header value.\n\n There is built-in support for ``7bit``, ``8bit``, ``quoted-printable``,\n and ``base64`` CTE header values. Decoders can be added or overridden\n with the :attr:`.registry` dictionary.\n\nArgs:\n header: The CTE header value.", "source": "juraj_google_style"} -{"code": "def test_string(self, string: str) -> bool:\n\n if self.input.startswith(string, self.offset):\n self.offset += len(string)\n return True\n return False", "docstring": "If `string` comes next, return ``True`` and advance offset.\n\nArgs:\n string: string to test", "source": "juraj_google_style"} -{"code": "def AddLabels(self, labels):\n\n for label in labels:\n if not self._VALID_LABEL_REGEX.match(label):\n raise ValueError((\n 'Unsupported label: \"{0:s}\". A label must only consist of '\n 'alphanumeric characters or underscores.').format(label))\n\n for label in labels:\n if label not in self.labels:\n self.labels.append(label)", "docstring": "Adds labels to the event tag.\n\nArgs:\n labels (list[str]): labels.\n\nRaises:\n ValueError: if a label is malformed.", "source": "juraj_google_style"} -{"code": "def _ParsePage(self, parser_mediator, file_offset, page_data):\n\n page_header_map = self._GetDataTypeMap('binarycookies_page_header')\n\n try:\n page_header = self._ReadStructureFromByteStream(\n page_data, file_offset, page_header_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError((\n 'Unable to map page header data at offset: 0x{0:08x} with error: '\n '{1!s}').format(file_offset, exception))\n\n for record_offset in page_header.offsets:\n if parser_mediator.abort:\n break\n\n self._ParseRecord(parser_mediator, page_data, record_offset)", "docstring": "Parses a page.\n\nArgs:\n parser_mediator (ParserMediator): parser mediator.\n file_offset (int): offset of the data relative from the start of\n the file-like object.\n page_data (bytes): page data.\n\nRaises:\n ParseError: when the page cannot be parsed.", "source": "juraj_google_style"} -{"code": "def to_image(dataset):\n\n dataset = dataset.squeeze()\n if dataset.ndim < 2:\n raise ValueError(\"Need at least a 2D array to make an image.\")\n else:\n return XRImage(dataset)", "docstring": "convert ``dataset`` into a :class:`~trollimage.xrimage.XRImage` instance.\n\n Convert the ``dataset`` into an instance of the\n :class:`~trollimage.xrimage.XRImage` class. This function makes no other\n changes. To get an enhanced image, possibly with overlays and decoration,\n see :func:`~get_enhanced_image`.\n\nArgs:\n dataset (xarray.DataArray): Data to be converted to an image.\n\nReturns:\n Instance of :class:`~trollimage.xrimage.XRImage`.", "source": "juraj_google_style"} -{"code": "def username(self, value):\n\n self._username = value\n self._connectionXML.set('username', value)", "docstring": "Set the connection's username property.\n\nArgs:\n value: New username value. String.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def open(self, mode=None):\n\n\n if mode is None:\n mode = self.mode\n elif mode not in ['r', 'w', 'a']:\n raise ValueError('Invalid mode! Modes: [\\'a\\', \\'r\\', \\'w\\']')\n\n if self._file is None:\n self._file = h5py.File(self.path, mode=mode)", "docstring": "Open the container file.\n\nArgs:\n mode (str): Either 'r' for read-only, 'w' for truncate and write or\n 'a' for append. (default: 'a').\n If ``None``, uses ``self.mode``.", "source": "juraj_google_style"} -{"code": "def overlay(self, dimensions=None, **kwargs):\n\n dimensions = self._valid_dimensions(dimensions)\n if len(dimensions) == self.ndims:\n with item_check(False):\n return NdOverlay(self, **kwargs).reindex(dimensions)\n else:\n dims = [d for d in self.kdims if d not in dimensions]\n return self.groupby(dims, group_type=NdOverlay, **kwargs)", "docstring": "Group by supplied dimension(s) and overlay each group\n\n Groups data by supplied dimension(s) overlaying the groups\n along the dimension(s).\n\nArgs:\n dimensions: Dimension(s) of dimensions to group by\n\nReturns:\n NdOverlay object(s) with supplied dimensions", "source": "juraj_google_style"} -{"code": "def print_stats(self, reset=True):\n\n if not self.ncalls:\n return\n\n stats = self.stats\n code = self.fn.__code__\n print('--- Function Profiling ---')\n print('File \"{}\", line {}, function {}'.format(\n code.co_filename,\n code.co_firstlineno,\n self.fn.__name__))\n stats.sort_stats(*self.sort_keys)\n stats.print_stats(*self.print_restrictions)\n print('--------------------------')\n if reset:\n self.reset_stats()", "docstring": "Manually print profiling result.\n\nArgs:\n reset (bool): If False is specified, the profiling statistics so\n far is maintained. If ``True`` (default),\n :obj:`~reset_stats`\n is called to reset the profiling statistics.", "source": "juraj_google_style"} -{"code": "def reduce_logsumexp(x, reduced_dim, extra_logit=None, name=None):\n\n reduced_dim = convert_to_dimension(reduced_dim)\n with tf.variable_scope(name, default_name=\"reduce_logsumexp\"):\n reduced_shape = x.shape - reduced_dim\n max_logit = reduce_max(stop_gradient(x), output_shape=reduced_shape)\n if extra_logit is not None:\n if isinstance(extra_logit, Tensor):\n extra_logit = stop_gradient(extra_logit)\n max_logit = maximum(max_logit, extra_logit)\n x -= max_logit\n exp_x = exp(x)\n sum_exp_x = reduce_sum(exp_x, output_shape=reduced_shape)\n if extra_logit is not None:\n sum_exp_x += exp(extra_logit - max_logit)\n return log(sum_exp_x) + max_logit", "docstring": "Numerically stable version of log(reduce_sum(exp(x))).\n\n Unlike other reductions, the output has the same shape as the input.\n Note: with a minor change, we could allow multiple reduced dimensions.\n\nArgs:\n x: a Tensor\n reduced_dim: a dimension in x\n extra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim)\n name: an optional string\n\nReturns:\n a Tensor with the same shape and dtype as x.", "source": "juraj_google_style"} -{"code": "def from_csv(cls, filename: str):\n\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=unicode2str(\",\"),\n quotechar=unicode2str(\"\\\"\"),\n quoting=csv.QUOTE_MINIMAL)\n entries = list()\n header_read = False\n elements = None\n for row in reader:\n if not header_read:\n elements = row[1:(len(row) - 1)]\n header_read = True\n else:\n name = row[0]\n energy = float(row[-1])\n comp = dict()\n for ind in range(1, len(row) - 1):\n if float(row[ind]) > 0:\n comp[Element(elements[ind - 1])] = float(row[ind])\n entries.append(PDEntry(Composition(comp), energy, name))\n return cls(entries)", "docstring": "Imports PDEntries from a csv.\n\nArgs:\n filename: Filename to import from.\n\nReturns:\n List of Elements, List of PDEntries", "source": "juraj_google_style"} -{"code": "def coalescence_times(self, backward=True):\n\n if not isinstance(backward, bool):\n raise TypeError(\"backward must be a bool\")\n for dist in sorted((d for n,d in self.distances_from_root() if len(n.children) > 1), reverse=backward):\n yield dist", "docstring": "Generator over the times of successive coalescence events\n\nArgs:\n ``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False``", "source": "juraj_google_style"} -{"code": "def add_cohp(self, label, cohp):\n\n energies = cohp.energies - cohp.efermi if self.zero_at_efermi \\\n else cohp.energies\n populations = cohp.get_cohp()\n int_populations = cohp.get_icohp()\n self._cohps[label] = {\"energies\": energies, \"COHP\": populations,\n \"ICOHP\": int_populations, \"efermi\": cohp.efermi}", "docstring": "Adds a COHP for plotting.\n\nArgs:\n label: Label for the COHP. Must be unique.\n\n cohp: COHP object.", "source": "juraj_google_style"} -{"code": "def update_video(self, video_id, title=\"\", description=\"\", keywords=\"\", access_control=AccessControl.Unlisted):\n\n\n # Raise ApiError if not authenticated\n if not self.authenticated:\n raise ApiError(_(\"Authentication is required\"))\n\n entry = self.fetch_video(video_id)\n\n # Set Access Control\n extension = self._access_control(access_control)\n if extension:\n entry.extension_elements = extension\n\n if title:\n entry.media.title.text = title\n\n if description:\n entry.media.description.text = description\n\n #if keywords:\n # entry.media.keywords.text = keywords\n\n success = Api.yt_service.UpdateVideoEntry(entry)\n return success", "docstring": "Updates the video\n\n Authentication is required\n\n Params:\n entry: video entry fetch via 'fetch_video()'\n title: string\n description: string\n keywords: string\n\nReturns:\n a video entry on success\n None otherwise", "source": "juraj_google_style"} -{"code": "def _update_seek(self, offset, whence):\n\n with self._seek_lock:\n if whence == SEEK_SET:\n self._seek = offset\n elif whence == SEEK_CUR:\n self._seek += offset\n elif whence == SEEK_END:\n self._seek = offset + self._size\n else:\n raise ValueError('whence value %s unsupported' % whence)\n return self._seek", "docstring": "Update seek value.\n\nArgs:\n offset (int): Offset.\n whence (int): Whence.\n\nReturns:\n int: Seek position.", "source": "juraj_google_style"} -{"code": "def sils_cut(T,f,c,d,h):\n\n Ts = range(1,T+1)\n\n model = sils(T,f,c,d,h)\n y,x,I = model.data\n\n # relax integer variables\n for t in Ts:\n y[t].vtype = \"C\"\n\n # compute D[i,j] = sum_{t=i}^j d[t]\n D = {}\n for t in Ts:\n s = 0\n for j in range(t,T+1):\n s += d[j]\n D[t,j] = s\n\n EPS = 1.e-6\n cuts = True\n while cuts:\n model.optimize()\n cuts = False\n for ell in Ts:\n lhs = 0\n S,L = [],[]\n for t in range(1,ell+1):\n yt = model.getVal(y[t])\n xt = model.getVal(x[t])\n if D[t,ell]*yt < xt:\n S.append(t)\n lhs += D[t,ell]*yt\n else:\n L.append(t)\n lhs += xt\n if lhs < D[1,ell]:\n # add cutting plane constraint\n model.addCons(quicksum([x[t] for t in L]) +\\\n quicksum(D[t,ell] * y[t] for t in S)\n >= D[1,ell])\n cuts = True\n\n model.data = y,x,I\n return model", "docstring": "solve_sils -- solve the lot sizing problem with cutting planes\n - start with a relaxed model\n - add cuts until there are no fractional setup variables\n Parameters:\n - T: number of periods\n - P: set of products\n - f[t]: set-up costs (on period t)\n - c[t]: variable costs\n - d[t]: demand values\n - h[t]: holding costs\n Returns the final model solved, with all necessary cuts added.", "source": "juraj_google_style"} -{"code": "def get_value(value_proto):\n\n field = value_proto.WhichOneof('value_type')\n if field in __native_value_types:\n return getattr(value_proto, field)\n if field == 'timestamp_value':\n return from_timestamp(value_proto.timestamp_value)\n if field == 'array_value':\n return [get_value(sub_value)\n for sub_value in value_proto.array_value.values]\n return None", "docstring": "Gets the python object equivalent for the given value proto.\n\nArgs:\n value_proto: datastore.Value proto message.\n\nReturns:\n the corresponding python object value. timestamps are converted to\n datetime, and datastore.Value is returned for blob_key_value.", "source": "juraj_google_style"} -{"code": "def renderJsonReadsSince(self, timestamp, meter):\n\n result = \"\"\n try:\n connection = sqlite3.connect(self.m_connection_string)\n connection.row_factory = self.dict_factory\n select_cursor = connection.cursor()\n select_cursor.execute(\"select * from Meter_Reads where \" + Field.Time_Stamp +\n \" > \" + str(timestamp) + \" and \" + Field.Meter_Address +\n \"= '\" + meter + \"';\")\n reads = select_cursor.fetchall()\n result = json.dumps(reads, indent=4)\n\n except:\n ekm_log(traceback.format_exc(sys.exc_info()))\n return result", "docstring": "Simple since Time_Stamp query returned as JSON records.\n\nArgs:\n timestamp (int): Epoch time in seconds.\n meter (str): 12 character meter address to query\n\nReturns:\n str: JSON rendered read records.", "source": "juraj_google_style"} -{"code": "def AddEventSource(self, event_source):\n\n self._RaiseIfNotWritable()\n\n event_source = self._PrepareAttributeContainer(event_source)\n\n self._event_sources.append(event_source)\n self.number_of_event_sources += 1", "docstring": "Adds an event source.\n\nArgs:\n event_source (EventSource): event source.\n\nRaises:\n IOError: when the storage writer is closed.\n OSError: when the storage writer is closed.", "source": "juraj_google_style"} -{"code": "def auto_repr(obj: Any, with_addr: bool = False,\n sort_attrs: bool = True, joiner: str = COMMA_SPACE) -> str:\n\n if sort_attrs:\n keys = sorted(obj.__dict__.keys())\n else:\n keys = obj.__dict__.keys()\n elements = [\"{}={}\".format(k, repr(getattr(obj, k))) for k in keys]\n return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)", "docstring": "Convenience function for :func:`__repr__`.\n Works its way through the object's ``__dict__`` and reports accordingly.\n\nArgs:\n obj: object to display\n with_addr: include the memory address of ``obj``\n sort_attrs: sort the attributes into alphabetical order?\n joiner: string with which to join the elements\n\nReturns:\n string: :func:`repr`-style representation", "source": "juraj_google_style"} -{"code": "def _raise_if_annotated(self, func):\n\n if hasattr(func, ANNOTATED) and getattr(func, ANNOTATED):\n msg = ('Functions decorated with {!r} '\n 'should not be decorated with {!r}.\\n'\n 'Please reverse the order of the decorators!'.format(\n self.__class__.__name__, Annotate.__name__))\n raise TypeError(msg)", "docstring": "Raise TypeError if a function is decorated with Annotate, as such\n functions cause visual bugs when decorated with Animate.\n\n Animate should be wrapped by Annotate instead.\n\nArgs:\n func (function): Any callable.\n\nRaises:\n TypeError", "source": "juraj_google_style"} -{"code": "def calculate_checksum_on_bytes(\n b, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM\n):\n\n checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)\n checksum_calc.update(b)\n return checksum_calc.hexdigest()", "docstring": "Calculate the checksum of ``bytes``.\n\n Warning: This method requires the entire object to be buffered in (virtual) memory,\n which should normally be avoided in production code.\n\nArgs:\n b: bytes\n Raw bytes\n\n algorithm: str\n Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.\n\nReturns:\n str : Checksum as a hexadecimal string, with length decided by the algorithm.", "source": "juraj_google_style"} -{"code": "def show_events(self, status=None, nids=None):\n\n nrows, ncols = get_terminal_size()\n\n for task in self.iflat_tasks(status=status, nids=nids):\n report = task.get_event_report()\n if report:\n print(make_banner(str(task), width=ncols, mark=\"=\"))\n print(report)", "docstring": "Print the Abinit events (ERRORS, WARNIING, COMMENTS) to stdout\n\nArgs:\n status: if not None, only the tasks with this status are select\n nids: optional list of node identifiers used to filter the tasks.", "source": "juraj_google_style"} -{"code": "def setAvatar(self, image):\n\n self.conn(\"PUT\", \"{0}/users/{1}/profile/avatar\".format(SkypeConnection.API_USER, self.userId),\n auth=SkypeConnection.Auth.SkypeToken, data=image.read())", "docstring": "Update the profile picture for the current user.\n\nArgs:\n image (file): a file-like object to read the image from", "source": "juraj_google_style"} -{"code": "def build_album_art_full_uri(self, url):\n\n # Add on the full album art link, as the URI version\n # does not include the ipaddress\n if not url.startswith(('http:', 'https:')):\n url = 'http://' + self.soco.ip_address + ':1400' + url\n return url", "docstring": "Ensure an Album Art URI is an absolute URI.\n\nArgs:\n url (str): the album art URI.\n\nReturns:\n str: An absolute URI.", "source": "juraj_google_style"} -{"code": "def _handle_http_errors(response):\n\n code = response.status_code\n if 200 <= code < 400:\n return response\n elif code in (403, 404):\n raise {403: _ObjectPermissionError,\n 404: _ObjectNotFoundError}[code](response.reason)\n response.raise_for_status()", "docstring": "Check for HTTP errors and raise\n OSError if relevant.\n\nArgs:\n response (requests.Response):\n\nReturns:\n requests.Response: response", "source": "juraj_google_style"} -{"code": "def _ParseRecords(self, parser_mediator, evtx_file):\n\n # To handle errors when parsing a Windows XML EventLog (EVTX) file in the\n # most granular way the following code iterates over every event record.\n # The call to evt_file.get_record() and access to members of evt_record\n # should be called within a try-except.\n\n for record_index in range(evtx_file.number_of_records):\n if parser_mediator.abort:\n break\n\n try:\n evtx_record = evtx_file.get_record(record_index)\n self._ParseRecord(parser_mediator, record_index, evtx_record)\n\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to parse event record: {0:d} with error: {1!s}'.format(\n record_index, exception))\n\n for record_index in range(evtx_file.number_of_recovered_records):\n if parser_mediator.abort:\n break\n\n try:\n evtx_record = evtx_file.get_recovered_record(record_index)\n self._ParseRecord(\n parser_mediator, record_index, evtx_record, recovered=True)\n\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning((\n 'unable to parse recovered event record: {0:d} with error: '\n '{1!s}').format(record_index, exception))", "docstring": "Parses Windows XML EventLog (EVTX) records.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n evtx_file (pyevt.file): Windows XML EventLog (EVTX) file.", "source": "juraj_google_style"} -{"code": "def cn_occupation_energy( self, delta_occupation=None ):\n\n nn_occupations = self.site_specific_nn_occupation()\n if delta_occupation:\n for site in delta_occupation:\n assert( site in nn_occupations )\n nn_occupations[ site ] += delta_occupation[ site ]\n return sum( [ self.cn_occupation_energies[ s ][ n ] for s, n in nn_occupations.items() ] )", "docstring": "The coordination-number dependent energy for this site.\n\nArgs:\n delta_occupation (:obj:Dict(Str:Int), optional): A dictionary of a change in (site-type specific) coordination number, e.g. { 'A' : 1, 'B' : -1 }.\n If this is not None, the coordination-number dependent energy is calculated including these changes in neighbour-site occupations. Defaults to None\n\nReturns:\n (Float): The coordination-number dependent energy for this site.", "source": "juraj_google_style"} -{"code": "def initialize_schema(connection):\n\n cursor = connection.cursor()\n cursor.execute(\"PRAGMA application_id={}\".format(_TENSORBOARD_APPLICATION_ID))\n cursor.execute(\"PRAGMA user_version={}\".format(_TENSORBOARD_USER_VERSION))\n with connection:\n for statement in _SCHEMA_STATEMENTS:\n lines = statement.strip('\\n').split('\\n')\n message = lines[0] + ('...' if len(lines) > 1 else '')\n logger.debug('Running DB init statement: %s', message)\n cursor.execute(statement)", "docstring": "Initializes the TensorBoard sqlite schema using the given connection.\n\nArgs:\n connection: A sqlite DB connection.", "source": "juraj_google_style"} -{"code": "def load_config(self, file_name):\n\n\n # load config or default if invalid\n\n def load_settings(file_name):\n\n\n instruments_loaded = {}\n probes_loaded = {}\n scripts_loaded = {}\n\n if os.path.isfile(file_name):\n in_data = load_b26_file(file_name)\n\n instruments = in_data['instruments'] if 'instruments' in in_data else {}\n scripts = in_data['scripts'] if 'scripts' in in_data else {}\n probes = in_data['probes'] if 'probes' in in_data else {}\n\n instruments_loaded, failed = Instrument.load_and_append(instruments)\n if len(failed) > 0:\n print(('WARNING! Following instruments could not be loaded: ', failed))\n\n scripts_loaded, failed, instruments_loaded = Script.load_and_append(\n script_dict=scripts,\n instruments=instruments_loaded,\n log_function=self.log,\n data_path=self.gui_settings['data_folder'])\n\n if len(failed) > 0:\n print(('WARNING! Following scripts could not be loaded: ', failed))\n\n probes_loaded, failed, instruments_loadeds = Probe.load_and_append(\n probe_dict=probes,\n probes=probes_loaded,\n instruments=instruments_loaded)\n return instruments_loaded, scripts_loaded, probes_loaded\n\n print(('loading script/instrument/probes config from {:s}'.format(file_name)))\n try:\n config = load_b26_file(file_name)['gui_settings']\n if config['settings_file'] != file_name:\n print((\n 'WARNING path to settings file ({:s}) in config file is different from path of settings file ({:s})'.format(\n config['settings_file'], file_name)))\n config['settings_file'] = file_name\n print(('loading of {:s} successful'.format(file_name)))\n except Exception:\n print(('WARNING path to settings file ({:s}) invalid use default settings'.format(file_name)))\n config = self._DEFAULT_CONFIG\n\n\n for x in list(self._DEFAULT_CONFIG.keys()):\n if x in config:\n if not os.path.exists(config[x]):\n try:\n os.makedirs(config[x])\n except Exception:\n config[x] = self._DEFAULT_CONFIG[x]\n os.makedirs(config[x])\n print(('WARNING: failed validating or creating path: set to default path'.format(config[x])))\n else:\n config[x] = self._DEFAULT_CONFIG[x]\n os.makedirs(config[x])\n print(('WARNING: path {:s} not specified set to default {:s}'.format(x, config[x])))\n\n # check if file_name is a valid filename\n if os.path.exists(os.path.dirname(file_name)):\n config['settings_file'] = file_name\n\n self.gui_settings = config\n\n self.instruments, self.scripts, self.probes = load_settings(file_name)\n\n\n self.refresh_tree(self.tree_gui_settings, self.gui_settings)\n self.refresh_tree(self.tree_scripts, self.scripts)\n self.refresh_tree(self.tree_settings, self.instruments)\n\n self._hide_parameters(file_name)", "docstring": "checks if the file is a valid config file\n\nArgs:\n file_name:", "source": "juraj_google_style"} -{"code": "def create_composite_loss(self,\n losses,\n regularize=True,\n include_marked=True,\n name='cost'):\n\n all_losses = []\n if losses:\n all_losses.extend(losses)\n if include_marked:\n all_losses.extend(self.marked_losses)\n if not all_losses:\n raise ValueError('No losses specified!')\n if regularize:\n all_losses.extend(self.regularization_losses)\n with self._g.as_default():\n result = tf.add_n(all_losses, name=name)\n self.add_scalar_summary(result)\n return result", "docstring": "Creates a loss that is the sum of all specified losses.\n\nArgs:\n losses: A sequence of losses to include.\n regularize: Whether or not to include regularization losses.\n include_marked: Whether or not to use the marked losses.\n name: The name for this variable.\n\nReturns:\n A single tensor that is the sum of all losses.\n\nRaises:\n ValueError: if there are no losses.", "source": "juraj_google_style"} -{"code": "def _check_docstring_quotes(self, quote_record):\n\n _, triple, row, col = quote_record\n if triple != TRIPLE_QUOTE_OPTS.get(self.config.docstring_quote):\n self._invalid_docstring_quote(triple, row, col)", "docstring": "Check if the docstring quote from tokenization is valid.\n\nArgs:\n quote_record: a tuple containing the info about the string\n from tokenization, giving the (token, quote, row number).", "source": "juraj_google_style"} -{"code": "def whatIfOrder(self, contract: Contract, order: Order) -> OrderState:\n\n return self._run(self.whatIfOrderAsync(contract, order))", "docstring": "Retrieve commission and margin impact without actually\n placing the order. The given order will not be modified in any way.\n\n This method is blocking.\n\nArgs:\n contract: Contract to test.\n order: Order to test.", "source": "juraj_google_style"} -{"code": "def modutf7_encode(data: str) -> bytes:\n\n ret = bytearray()\n is_usascii = True\n encode_start = None\n for i, symbol in enumerate(data):\n charpoint = ord(symbol)\n if is_usascii:\n if charpoint == 0x26:\n ret.extend(b'&-')\n elif 0x20 <= charpoint <= 0x7e:\n ret.append(charpoint)\n else:\n encode_start = i\n is_usascii = False\n else:\n if 0x20 <= charpoint <= 0x7e:\n to_encode = data[encode_start:i]\n encoded = _modified_b64encode(to_encode)\n ret.append(0x26)\n ret.extend(encoded)\n ret.extend((0x2d, charpoint))\n is_usascii = True\n if not is_usascii:\n to_encode = data[encode_start:]\n encoded = _modified_b64encode(to_encode)\n ret.append(0x26)\n ret.extend(encoded)\n ret.append(0x2d)\n return bytes(ret)", "docstring": "Encode the string using modified UTF-7.\n\nArgs:\n data: The input string to encode.", "source": "juraj_google_style"} -{"code": "def ping(self, destination, length=20):\n\n print '%s call ping' % self.port\n print 'destination: %s' %destination\n try:\n cmd = 'ping %s %s' % (destination, str(length))\n print cmd\n self._sendline(cmd)\n self._expect(cmd)\n # wait echo reply\n time.sleep(1)\n except Exception, e:\n ModuleHelper.WriteIntoDebugLogger(\"ping() Error: \" + str(e))", "docstring": "send ICMPv6 echo request with a given length to a unicast destination\n address\n\nArgs:\n destination: the unicast destination address of ICMPv6 echo request\n length: the size of ICMPv6 echo request payload", "source": "juraj_google_style"} -{"code": "def MapByteStream(self, byte_stream, byte_offset=0, **kwargs):\n\n byte_stream = super(StringMap, self).MapByteStream(\n byte_stream, byte_offset=byte_offset, **kwargs)\n\n if self._HasElementsTerminator():\n # Remove the elements terminator and any trailing data from\n # the byte stream.\n elements_terminator = self._data_type_definition.elements_terminator\n elements_terminator_size = len(elements_terminator)\n\n byte_offset = 0\n byte_stream_size = len(byte_stream)\n\n while byte_offset < byte_stream_size:\n end_offset = byte_offset + elements_terminator_size\n if byte_stream[byte_offset:end_offset] == elements_terminator:\n break\n\n byte_offset += elements_terminator_size\n\n byte_stream = byte_stream[:byte_offset]\n\n try:\n return byte_stream.decode(self._data_type_definition.encoding)\n\n except Exception as exception:\n error_string = (\n 'Unable to read: {0:s} from byte stream at offset: {1:d} '\n 'with error: {2!s}').format(\n self._data_type_definition.name, byte_offset, exception)\n raise errors.MappingError(error_string)", "docstring": "Maps the data type on a byte stream.\n\nArgs:\n byte_stream (bytes): byte stream.\n byte_offset (Optional[int]): offset into the byte stream where to start.\n\nReturns:\n str: mapped values.\n\nRaises:\n MappingError: if the data type definition cannot be mapped on\n the byte stream.", "source": "juraj_google_style"} -{"code": "def update_(self, conf_dict, conf_arg=True):\n\n for section, secdict in conf_dict.items():\n self[section].update_(secdict, conf_arg)", "docstring": "Update values of configuration options with dict.\n\nArgs:\n conf_dict (dict): dict of dict indexed with section and option\n names.\n conf_arg (bool): if True, only options that can be set in a config\n file are updated.", "source": "juraj_google_style"} -{"code": "def get_head_revision_from_alembic(\n alembic_config_filename: str,\n alembic_base_dir: str = None,\n version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> str:\n\n if alembic_base_dir is None:\n alembic_base_dir = os.path.dirname(alembic_config_filename)\n os.chdir(alembic_base_dir) # so the directory in the config file works\n config = Config(alembic_config_filename)\n script = ScriptDirectory.from_config(config)\n with EnvironmentContext(config,\n script,\n version_table=version_table):\n return script.get_current_head()", "docstring": "Ask Alembic what its head revision is (i.e. where the Python code would\n like the database to be at).\n\nArgs:\n alembic_config_filename: config filename\n alembic_base_dir: directory to start in, so relative paths in the\n config file work.\n version_table: table name for Alembic versions", "source": "juraj_google_style"} -{"code": "def is_first(self, value):\n\n if value == self._defaults['ai.session.isFirst'] and 'ai.session.isFirst' in self._values:\n del self._values['ai.session.isFirst']\n else:\n self._values['ai.session.isFirst'] = value", "docstring": "The is_first property.\n\nArgs:\n value (string). the property value.", "source": "juraj_google_style"} -{"code": "def __init__(self, resolver, mets_url, src_dir=None, skip=None, download=False, page_strictness='strict'):\n\n self.report = ValidationReport()\n self.skip = skip if skip else []\n log.debug('resolver=%s mets_url=%s src_dir=%s', resolver, mets_url, src_dir)\n self.resolver = resolver\n self.mets_url = mets_url\n self.download = download\n self.page_strictness = page_strictness\n\n self.src_dir = src_dir\n if mets_url is None and src_dir is not None:\n mets_url = '%s/mets.xml' % src_dir\n self.workspace = None\n self.mets = None", "docstring": "Construct a new WorkspaceValidator.\n\nArgs:\n resolver (Resolver):\n mets_url (string):\n src_dir (string):\n skip (list):\n download (boolean):\n page_strictness (\"strict\"|\"lax\"|\"fix\"|\"off\"):", "source": "juraj_google_style"} -{"code": "def __call__(self, name, value):\n\n super(IterableTypeChecker, self).__call__(name, value)\n if isinstance(self.item_type, type):\n if not all(isinstance(o, self.item_type) for o in value):\n raise ValueError(\"All elements of %s must be %s\" % (name, self.item_type))\n if isinstance(self.min_length, int):\n if len(value) < self.min_length:\n raise ValueError(\"%s must be longer than %s (or equal)\" % (name, self.min_length))\n if isinstance(self.max_length, int):\n if len(value) > self.max_length:\n raise ValueError(\"%s must be shorter than %s (or equal)\" % (name, self.max_length))\n if len(value) == 0 and not self.empty:\n raise ValueError(\"%s must not be empty\" % name)", "docstring": "Call method.\n\nArgs:\n name (str): the value's name.\n value (iterable): the value to check.\n\nRaises:\n ValueError: if value is not type iter_type.\n ValueError: if any item in value is not type item_type.\n ValueError: if value's length is less than min_length.\n ValueError: if value's length is more than max_length.\n ValueError: if value's length is 0 and emptiness is not allowed.", "source": "juraj_google_style"} -{"code": "def rgb_to_hex(rgb):\n\n r, g, b = rgb[:3]\n if (r < 0) or (g < 0) or (b < 0):\n raise Exception(\"RGB values must all be 0-255 or 0-1\")\n if (r > 255) or (g > 255) or (b > 255):\n raise Exception(\"RGB values must all be 0-255 or 0-1\")\n if (0 < r < 1) or (0 < g < 1) or (0 < b < 1):\n if (r > 1) or (g > 1) or (b > 1):\n raise Exception(\"RGB values must all be 0-255 or 0-1\")\n if (0 <= r <= 1) and (0 <= g <= 1) and (0 <= b <= 1):\n rgb = tuple([int(round(val * 255)) for val in [r, g, b]])\n else:\n rgb = (int(r), int(g), int(b))\n result = '#%02x%02x%02x' % rgb\n return result.lower()", "docstring": "Utility function to convert (r,g,b) triples to hex.\n http://ageo.co/1CFxXpO\n\nArgs:\n rgb (tuple): A sequence of RGB values in the\n range 0-255 or 0-1.\n\nReturns:\n str: The hex code for the colour.", "source": "juraj_google_style"} -{"code": "def recursion_error(self, repeated_parser: str):\n\n if self.finished:\n return super().recursion_error(repeated_parser)\n else:\n line_index, character_index, line, pointer = self.current_line()\n\n return 'Infinite recursion detected in {}; empty string was matched and will be matched forever\\n' \\\n 'Line {}, character {}\\n\\n{}{}'.format(repeated_parser, line_index, character_index, line, pointer)", "docstring": "Generate an error to indicate that infinite recursion was encountered.\n\n A parser can supply a representation of itself to this method and the\n reader will supply the context, including the location where the\n parser stalled.\n\nArgs:\n repeated_parser: A representation of the repeated parser\n\nReturns:\n A full error message", "source": "juraj_google_style"} -{"code": "def ParseOptions(cls, options, configuration_object):\n\n if not isinstance(configuration_object, tools.CLITool):\n raise errors.BadConfigObject(\n 'Configuration object is not an instance of CLITool')\n\n temporary_directory = getattr(options, 'temporary_directory', None)\n if temporary_directory and not os.path.isdir(temporary_directory):\n raise errors.BadConfigOption(\n 'No such temporary directory: {0:s}'.format(temporary_directory))\n\n setattr(configuration_object, '_temporary_directory', temporary_directory)", "docstring": "Parses and validates options.\n\nArgs:\n options (argparse.Namespace): parser options.\n configuration_object (CLITool): object to be configured by the argument\n helper.\n\nRaises:\n BadConfigObject: when the configuration object is of the wrong type.", "source": "juraj_google_style"} -{"code": "def __init__(self, config=None):\n\n self.http = urllib3.PoolManager()\n self.serving_port = 8080\n self.config = config\n self.serving_port = get_config_value('local.serving_port', config) or 8080", "docstring": "Initializes a LocalSageMakerRuntimeClient\n\nArgs:\n config (dict): Optional configuration for this client. In particular only\n the local port is read.", "source": "juraj_google_style"} -{"code": "def unpack(self, buff, offset=0):\n\n super().unpack(buff, offset)\n if not self.is_valid():\n raise UnpackException(\"Unsupported protocols in ARP packet\")", "docstring": "Unpack a binary struct into this object's attributes.\n\n Return the values instead of the lib's basic types.\n Check if the protocols involved are Ethernet and IPv4. Other protocols\n are currently not supported.\n\nArgs:\n buff (bytes): Binary buffer.\n offset (int): Where to begin unpacking.\n\nRaises:\n :exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj_google_style"} -{"code": "def _jit_get_rotation_matrix(axis, angle):\n\n axis = _jit_normalize(axis)\n a = m.cos(angle / 2)\n b, c, d = axis * m.sin(angle / 2)\n rot_matrix = np.empty((3, 3))\n rot_matrix[0, 0] = a**2 + b**2 - c**2 - d**2\n rot_matrix[0, 1] = 2. * (b * c - a * d)\n rot_matrix[0, 2] = 2. * (b * d + a * c)\n rot_matrix[1, 0] = 2. * (b * c + a * d)\n rot_matrix[1, 1] = a**2 + c**2 - b**2 - d**2\n rot_matrix[1, 2] = 2. * (c * d - a * b)\n rot_matrix[2, 0] = 2. * (b * d - a * c)\n rot_matrix[2, 1] = 2. * (c * d + a * b)\n rot_matrix[2, 2] = a**2 + d**2 - b**2 - c**2\n return rot_matrix", "docstring": "Returns the rotation matrix.\n\n This function returns a matrix for the counterclockwise rotation\n around the given axis.\n The Input angle is in radians.\n\nArgs:\n axis (vector):\n angle (float):\n\nReturns:\n Rotation matrix (np.array):", "source": "juraj_google_style"} -{"code": "def delete_user(self, email):\n\n LOG.info(\"Deleting user %s\", email)\n user_obj = self.user_collection.delete_one({'_id': email})\n\n return user_obj", "docstring": "Delete a user from the database\n\nArgs:\n email(str)\n\nReturns:\n user_obj(dict)", "source": "juraj_google_style"} -{"code": "def analyze(self, text, tokenizer=str.split):\n\n if not self.tagger:\n self.tagger = Tagger(self.model,\n preprocessor=self.p,\n tokenizer=tokenizer)\n\n return self.tagger.analyze(text)", "docstring": "Analyze text and return pretty format.\n\nArgs:\n text: string, the input text.\n tokenizer: Tokenize input sentence. Default tokenizer is `str.split`.\n\nReturns:\n res: dict.", "source": "juraj_google_style"} -{"code": "def sasl_plain(self, name, password, identity=None):\n\n if identity is None:\n identity = name\n\n self.sasl('plain', name, password, identity)", "docstring": "Authenticate to a server using SASL plain, or does so on connection.\n\nArgs:\n name (str): Name to auth with.\n password (str): Password to auth with.\n identity (str): Identity to auth with (defaults to name).", "source": "juraj_google_style"} -{"code": "def ParseFromHumanReadable(self, string):\n\n if not string:\n return None\n\n match = self.REGEX.match(string.strip().lower())\n if not match:\n raise DecodeError(\"Unknown specification for ByteSize %s\" % string)\n\n multiplier = self.DIVIDERS.get(match.group(2))\n if not multiplier:\n raise DecodeError(\"Invalid multiplier %s\" % match.group(2))\n\n # The value may be represented as a float, but if not dont lose accuracy.\n value = match.group(1)\n if \".\" in value:\n value = float(value)\n else:\n value = int(value)\n\n self._value = int(value * multiplier)", "docstring": "Parse a human readable string of a byte string.\n\nArgs:\n string: The string to parse.\n\nRaises:\n DecodeError: If the string can not be parsed.", "source": "juraj_google_style"} -{"code": "def _AddEdge(self, start_node, end_node):\n\n\n self.graph[start_node].outgoing.append(end_node)\n\n # This check is necessary because an artifact can provide attributes that\n # are not covered by the graph because they are not relevant for the\n # requested artifacts.\n if end_node in self.graph:\n self.graph[end_node].incoming.append(start_node)", "docstring": "Add a directed edge to the graph.\n\n Add the end to the list of outgoing nodes of the start and the start to the\n list of incoming nodes of the end node.\n\nArgs:\n start_node: name of the start node\n end_node: name of the end node", "source": "juraj_google_style"} -{"code": "def add_review(self, reviewer, product, review, date=None):\n\n if not isinstance(reviewer, self._reviewer_cls):\n raise TypeError(\n \"Type of given reviewer isn't acceptable:\", reviewer,\n \", expected:\", self._reviewer_cls)\n elif not isinstance(product, self._product_cls):\n raise TypeError(\n \"Type of given product isn't acceptable:\", product,\n \", expected:\", self._product_cls)\n r = self._review_cls(review, date=date)\n self.graph.add_edge(reviewer, product, review=r)\n return r", "docstring": "Add a new review from a given reviewer to a given product.\n\nArgs:\n reviewer: an instance of Reviewer.\n product: an instance of Product.\n review: a float value.\n date: date the review issued.\n\nReturns:\n the added new review object.\n\nRaises:\n TypeError: when given reviewer and product aren't instance of\n specified reviewer and product class when this graph is constructed.", "source": "juraj_google_style"} -{"code": "def get_records(self, name):\n\n if name in self._cache:\n return self._cache[name].values()\n else:\n return []", "docstring": "Return all the records for the given name in the cache.\n\nArgs:\n name (string): The name which the required models are stored under.\n\nReturns:\n list: A list of :class:`cinder_data.model.CinderModel` models.", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.ListTraces = channel.unary_unary(\n \"/google.devtools.cloudtrace.v1.TraceService/ListTraces\",\n request_serializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.ListTracesRequest.SerializeToString,\n response_deserializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.ListTracesResponse.FromString,\n )\n self.GetTrace = channel.unary_unary(\n \"/google.devtools.cloudtrace.v1.TraceService/GetTrace\",\n request_serializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.GetTraceRequest.SerializeToString,\n response_deserializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.Trace.FromString,\n )\n self.PatchTraces = channel.unary_unary(\n \"/google.devtools.cloudtrace.v1.TraceService/PatchTraces\",\n request_serializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.PatchTracesRequest.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def Get(self, key):\n\n if key not in self._hash:\n raise KeyError(key)\n\n node = self._hash[key]\n\n self._age.Unlink(node)\n self._age.AppendNode(node)\n\n return node.data", "docstring": "Fetch the object from cache.\n\n Objects may be flushed from cache at any time. Callers must always\n handle the possibility of KeyError raised here.\n\nArgs:\n key: The key used to access the object.\n\nReturns:\n Cached object.\n\nRaises:\n KeyError: If the object is not present in the cache.", "source": "juraj_google_style"} -{"code": "def remove_attribute(self, attr):\n\n update = [fapi._attr_rem(attr)]\n r = fapi.update_workspace_attributes(self.namespace, self.name,\n update, self.api_url)\n self.data[\"workspace\"][\"attributes\"].pop(attr, None)\n fapi._check_response_code(r, 200)", "docstring": "Remove attribute from a workspace.\n\nArgs:\n attr (str): attribute name", "source": "juraj_google_style"} -{"code": "def probe_services(self, handle, conn_id, callback):\n\n\n self._command_task.async_command(['_probe_services', handle], callback,\n {'connection_id': conn_id, 'handle': handle})", "docstring": "Given a connected device, probe for its GATT services and characteristics\n\nArgs:\n handle (int): a handle to the connection on the BLED112 dongle\n conn_id (int): a unique identifier for this connection on the DeviceManager\n that owns this adapter.\n callback (callable): Callback to be called when this procedure finishes", "source": "juraj_google_style"} -{"code": "def __init__(self, filepaths, overrides=None, locked=False):\n\n self.filepaths = filepaths\n self._sourced_filepaths = None\n self.overrides = overrides or {}\n self.locked = locked", "docstring": "Create a config.\n\nArgs:\n filepaths (list of str): List of config files to load.\n overrides (dict): A dict containing settings that override all\n others. Nested settings are overridden with nested dicts.\n locked: If True, settings overrides in environment variables are\n ignored.", "source": "juraj_google_style"} -{"code": "def ext_xsect(scatterer, h_pol=True):\n\n\n if scatterer.psd_integrator is not None:\n try:\n return scatterer.psd_integrator.get_angular_integrated(\n scatterer.psd, scatterer.get_geometry(), \"ext_xsect\")\n except AttributeError:\n # Fall back to the usual method of computing this from S\n pass\n\n old_geom = scatterer.get_geometry()\n (thet0, thet, phi0, phi, alpha, beta) = old_geom\n try:\n scatterer.set_geometry((thet0, thet0, phi0, phi0, alpha, beta))\n S = scatterer.get_S() \n finally:\n scatterer.set_geometry(old_geom)\n\n\n\n if h_pol:\n return 2 * scatterer.wavelength * S[1,1].imag\n else:\n return 2 * scatterer.wavelength * S[0,0].imag", "docstring": "Extinction cross section for the current setup, with polarization.\n\nArgs:\n scatterer: a Scatterer instance.\n h_pol: If True (default), use horizontal polarization.\n If False, use vertical polarization.\n\nReturns:\n The extinction cross section.", "source": "juraj_google_style"} -{"code": "def ParseDownloadsRow(\n self, parser_mediator, query, row, **unused_kwargs):\n\n query_hash = hash(query)\n\n event_data = FirefoxDownloadEventData()\n event_data.full_path = self._GetRowValue(query_hash, row, 'target')\n event_data.mime_type = self._GetRowValue(query_hash, row, 'mimeType')\n event_data.name = self._GetRowValue(query_hash, row, 'name')\n event_data.offset = self._GetRowValue(query_hash, row, 'id')\n event_data.query = query\n event_data.received_bytes = self._GetRowValue(query_hash, row, 'currBytes')\n event_data.referrer = self._GetRowValue(query_hash, row, 'referrer')\n event_data.temporary_location = self._GetRowValue(\n query_hash, row, 'tempPath')\n event_data.total_bytes = self._GetRowValue(query_hash, row, 'maxBytes')\n event_data.url = self._GetRowValue(query_hash, row, 'source')\n\n timestamp = self._GetRowValue(query_hash, row, 'startTime')\n if timestamp:\n date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_START)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n timestamp = self._GetRowValue(query_hash, row, 'endTime')\n if timestamp:\n date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_END)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a downloads row.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n query (str): query that created the row.\n row (sqlite3.Row): row.", "source": "juraj_google_style"} -{"code": "def contains(self, rect):\n\n return (rect.y >= self.y and \\\n rect.x >= self.x and \\\n rect.y+rect.height <= self.y+self.height and \\\n rect.x+rect.width <= self.x+self.width)", "docstring": "Tests if another rectangle is contained by this one\n\nArgs:\n rect (Rectangle): The other rectangle\n\nReturns:\n bool: True if it is container, False otherwise", "source": "juraj_google_style"} -{"code": "def AddArguments(cls, argument_group):\n\n default_fields = ','.join(cls._DEFAULT_FIELDS)\n argument_group.add_argument(\n '--fields', dest='fields', type=str, action='store',\n default=default_fields, help=(\n 'Defines which fields should be included in the output.'))\n\n default_fields = ', '.join(cls._DEFAULT_FIELDS)\n argument_group.add_argument(\n '--additional_fields', dest='additional_fields', type=str,\n action='store', default='', help=(\n 'Defines extra fields to be included in the output, in addition to'\n ' the default fields, which are {0:s}.'.format(default_fields)))", "docstring": "Adds command line arguments the helper supports to an argument group.\n\n This function takes an argument parser or an argument group object and adds\n to it all the command line arguments this helper supports.\n\nArgs:\n argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\n argparse group.", "source": "juraj_google_style"} -{"code": "def onkeydown(self, key, keycode, ctrl, shift, alt):\n\n return (key, keycode, ctrl, shift, alt)", "docstring": "Called when user types and releases a key.\n The widget should be able to receive the focus in order to emit the event.\n Assign a 'tabindex' attribute to make it focusable.\n\nArgs:\n key (str): the character value\n keycode (str): the numeric char code", "source": "juraj_google_style"} -{"code": "def iterwindows(self, count=64, window_shape=(256, 256)):\n\n if count is None:\n while True:\n yield self.randwindow(window_shape)\n else:\n for i in xrange(count):\n yield self.randwindow(window_shape)", "docstring": "Iterate over random windows of an image\n\nArgs:\n count (int): the number of the windows to generate. Defaults to 64, if `None` will continue to iterate over random windows until stopped.\n window_shape (tuple): The desired shape of each image as (height, width) in pixels.\n\nYields:\n image: an image of the given shape and same type.", "source": "juraj_google_style"} -{"code": "def get_dataset(self, dsid, dsinfo):\n\n data = self[dsinfo.get('file_key', dsid.name)]\n data.attrs.update(dsinfo)\n\n data.attrs[\"platform_name\"] = self['/attr/satellite_name']\n data.attrs[\"sensor\"] = self['/attr/instrument_name']\n\n return data", "docstring": "Get dataset function\n\nArgs:\n dsid: Dataset ID\n param2: Dataset Information\n\nReturns:\n Dask DataArray: Data", "source": "juraj_google_style"} -{"code": "def CheckDataVisiblity(self, value):\n\n if not self.data_visibility_policy:\n return None\n\n visible, reason = self.data_visibility_policy.IsDataVisible(\n DetermineType(value))\n\n if visible:\n return None\n\n return {\n 'status': {\n 'isError': True,\n 'refersTo': 'VARIABLE_NAME',\n 'description': {\n 'format': reason\n }\n }\n }", "docstring": "Returns a status object if the given name is not visible.\n\nArgs:\n value: The value to check. The actual value here is not important but the\n value's metadata (e.g. package and type) will be checked.\n\nReturns:\n None if the value is visible. A variable structure with an error status\n if the value should not be visible.", "source": "juraj_google_style"} -{"code": "def parse_ids(chrom, pos, ref, alt, case_id, variant_type):\n\n ids = {}\n pos = str(pos)\n\n ids['simple_id'] = parse_simple_id(chrom, pos, ref, alt)\n ids['variant_id'] = parse_variant_id(chrom, pos, ref, alt, variant_type)\n ids['display_name'] = parse_display_name(chrom, pos, ref, alt, variant_type)\n ids['document_id'] = parse_document_id(chrom, pos, ref, alt, variant_type, case_id)\n\n return ids", "docstring": "Construct the necessary ids for a variant\n\nArgs:\n chrom(str): Variant chromosome\n pos(int): Variant position\n ref(str): Variant reference\n alt(str): Variant alternative\n case_id(str): Unique case id\n variant_type(str): 'clinical' or 'research'\n\nReturns:\n ids(dict): Dictionary with the relevant ids", "source": "juraj_google_style"} -{"code": "def console_map_string_to_font(s: str, fontCharX: int, fontCharY: int) -> None:\n\n lib.TCOD_console_map_string_to_font_utf(_unicode(s), fontCharX, fontCharY)", "docstring": "Remap a string of codes to a contiguous set of tiles.\n\nArgs:\n s (AnyStr): A string of character codes to map to new values.\n The null character `'\\\\x00'` will prematurely end this\n function.\n fontCharX (int): The starting X tile coordinate on the loaded tileset.\n 0 is the leftmost tile.\n fontCharY (int): The starting Y tile coordinate on the loaded tileset.\n 0 is the topmost tile.", "source": "juraj_google_style"} -{"code": "def __init__(self, app, env, region, prop_path):\n\n self.app_name = app\n self.env = env\n self.region = region\n self.properties = get_properties(prop_path)\n generated = get_details(app=self.app_name)\n self.group = generated.data['project']\n\n try:\n self.pipeline = self.properties['pipeline']['lambda']\n except KeyError:\n raise RequiredKeyNotFound(\"Lambda key in pipeline.json is required.\")\n\n self.runtime = self.pipeline['runtime']\n self.description = self.pipeline['app_description']\n self.handler = self.pipeline['handler']\n self.vpc_enabled = self.pipeline['vpc_enabled']\n\n self.settings = get_properties(prop_path, env=self.env, region=self.region)\n app = self.settings['app']\n self.lambda_environment = app['lambda_environment']\n self.memory = app['lambda_memory']\n self.role = app.get('lambda_role') or generated.iam()['lambda_role']\n self.timeout = app['lambda_timeout']\n self.concurrency_limit = app.get('lambda_concurrency_limit')\n\n self.role_arn = get_role_arn(self.role, self.env, self.region)\n\n self.session = boto3.Session(profile_name=self.env, region_name=self.region)\n self.lambda_client = self.session.client('lambda')", "docstring": "Lambda function object.\n\nArgs:\n app (str): Application name\n env (str): Environment/Account\n region (str): AWS Region\n prop_path (str): Path of environment property file", "source": "juraj_google_style"} -{"code": "def __new__(cls, force=False):\n\n self = super().__new__(cls)\n\n if not force:\n if not _CHOSEN_PALETTE:\n self = empty_bin # None, deactivate completely\n # else: continue on unabated\n return self", "docstring": "Override new() to replace the class entirely on deactivation.\n\n Complies with palette detection, unless force is on:\n\nArgs:\n force - Force on.", "source": "juraj_google_style"} -{"code": "def fit_richness(self, atol=1.e-3, maxiter=50):\n\n # Check whether the signal probability for all objects are zero\n # This can occur for finite kernels on the edge of the survey footprint\n if np.isnan(self.u).any():\n logger.warning(\"NaN signal probability found\")\n return 0., 0., None\n\n if not np.any(self.u):\n logger.warning(\"Signal probability is zero for all objects\")\n return 0., 0., None\n\n if self.f == 0:\n logger.warning(\"Observable fraction is zero\")\n return 0., 0., None\n\n # Richness corresponding to 0, 1, and 10 observable stars\n richness = np.array([0., 1./self.f, 10./self.f])\n loglike = np.array([self.value(richness=r) for r in richness])\n\n found_maximum = False\n iteration = 0\n while not found_maximum:\n parabola = ugali.utils.parabola.Parabola(richness, 2.*loglike)\n if parabola.vertex_x < 0.:\n found_maximum = True\n else:\n richness = np.append(richness, parabola.vertex_x)\n loglike = np.append(loglike, self.value(richness=richness[-1]))\n\n if np.fabs(loglike[-1] - np.max(loglike[0: -1])) < atol:\n found_maximum = True\n iteration+=1\n if iteration > maxiter:\n logger.warning(\"Maximum number of iterations reached\")\n break\n\n index = np.argmax(loglike)\n return loglike[index], richness[index], parabola", "docstring": "Maximize the log-likelihood as a function of richness.\n\n ADW 2018-06-04: Does it make sense to set the richness to the mle?\n\n Parameters:\n -----------\n atol : absolute tolerence for conversion\n maxiter : maximum number of iterations\n\nReturns:\n --------\n loglike, richness, parabola : the maximum loglike, the mle, and the parabola", "source": "juraj_google_style"} -{"code": "def cancel(self, force=False):\n\n return self.rest_client._sc._delegator._cancel_job(self, force)", "docstring": "Cancel this job.\n\nArgs:\n force (bool, optional): Forcefully cancel this job.\n\nReturns:\n bool: True if the job was cancelled, otherwise False if an error occurred.", "source": "juraj_google_style"} -{"code": "def _convert_json(obj):\n\n if isinstance(obj, dict):\n return {_convert_json(key): _convert_json(val)\n for (key, val) in six.iteritems(obj)}\n elif isinstance(obj, list) and len(obj) == 2:\n first = obj[0]\n second = obj[1]\n if first == 'set' and isinstance(second, list):\n return [_convert_json(elem) for elem in second]\n elif first == 'map' and isinstance(second, list):\n for elem in second:\n if not isinstance(elem, list) or len(elem) != 2:\n return obj\n return {elem[0]: _convert_json(elem[1]) for elem in second}\n else:\n return obj\n elif isinstance(obj, list):\n return [_convert_json(elem) for elem in obj]\n else:\n return obj", "docstring": "Converts from the JSON output provided by ovs-vsctl into a usable Python\n object tree. In particular, sets and maps are converted from lists to\n actual sets or maps.\n\nArgs:\n obj: Object that shall be recursively converted.\n\nReturns:\n Converted version of object.", "source": "juraj_google_style"} -{"code": "def _make_dir(self, client_kwargs):\n\n with _handle_oss_error():\n bucket = self._get_bucket(client_kwargs)\n\n # Object\n if 'key' in client_kwargs:\n return bucket.put_object(\n key=client_kwargs['key'], data=b'')\n\n # Bucket\n return bucket.create_bucket()", "docstring": "Make a directory.\n\nArgs:\n client_kwargs (dict): Client arguments.", "source": "juraj_google_style"} -{"code": "def _async_query(self, ID, methodname, *args, **kwargs):\n\n self.send_query(ID, methodname, True, *args, **kwargs)\n task = self.tasks[ID]\n return task", "docstring": "将调用请求的ID,方法名,参数包装为请求数据后编码为字节串发送出去.并创建一个Future对象占位.\n\n Parameters:\n ID (str): - 任务ID\n methodname (str): - 要调用的方法名\n args (Any): - 要调用的方法的位置参数\n kwargs (Any): - 要调用的方法的关键字参数\n\nReturns:\n (asyncio.Future): - 返回对应ID的Future对象", "source": "juraj_google_style"} -{"code": "def _extract_match(self, candidate, offset):\n\n # Skip a match that is more likely a publication page reference or a\n # date.\n if (_SLASH_SEPARATED_DATES.search(candidate)):\n return None\n\n # Skip potential time-stamps.\n if _TIME_STAMPS.search(candidate):\n following_text = self.text[offset + len(candidate):]\n if _TIME_STAMPS_SUFFIX.match(following_text):\n return None\n\n # Try to come up with a valid match given the entire candidate.\n match = self._parse_and_verify(candidate, offset)\n if match is not None:\n return match\n\n # If that failed, try to find an \"inner match\" -- there might be a\n # phone number within this candidate.\n return self._extract_inner_match(candidate, offset)", "docstring": "Attempts to extract a match from a candidate string.\n\nArgs:\n candidate -- The candidate text that might contain a phone number.\n offset -- The offset of candidate within self.text\n Returns the match found, None if none can be found", "source": "juraj_google_style"} -{"code": "def clustering_factory(clf):\n\n required_methods = ['fit', 'fit_predict']\n\n for method in required_methods:\n if not hasattr(clf, method):\n raise TypeError('\"{}\" is not in clf. Did you '\n 'pass a clusterer instance?'.format(method))\n\n additional_methods = {\n 'plot_silhouette': plot_silhouette,\n 'plot_elbow_curve': plot_elbow_curve\n }\n\n for key, fn in six.iteritems(additional_methods):\n if hasattr(clf, key):\n warnings.warn('\"{}\" method already in clf. '\n 'Overriding anyway. This may '\n 'result in unintended behavior.'.format(key))\n setattr(clf, key, types.MethodType(fn, clf))\n return clf", "docstring": "Embeds scikit-plot plotting methods in an sklearn clusterer instance.\n\nArgs:\n clf: Scikit-learn clusterer instance\n\nReturns:\n The same scikit-learn clusterer instance passed in **clf** with\n embedded scikit-plot instance methods.\n\nRaises:\n ValueError: If **clf** does not contain the instance methods necessary\n for scikit-plot instance methods.", "source": "juraj_google_style"} -{"code": "def get_connection(db_type, db_pth, user=None, password=None, name=None):\n\n if db_type == 'sqlite':\n print(db_pth)\n conn = sqlite3.connect(db_pth)\n elif db_type == 'mysql':\n import mysql.connector\n conn = mysql.connector.connect(user=user, password=password, database=name)\n elif db_type == 'django_mysql':\n from django.db import connection as conn\n else:\n print('unsupported database type: {}, choices are \"sqlite\", \"mysql\" or \"django_mysql\"'.format(db_type))\n\n return conn", "docstring": "Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database\n\nExample:\n >>> from msp2db.db import get_connection\n >>> conn = get_connection('sqlite', 'library.db')\n\n If using \"mysql\" mysql.connector needs to be installed.\n\n If using \"django_mysql\" Django needs to be installed.\n\nArgs:\n db_type (str): Type of database can either be \"sqlite\", \"mysql\" or \"django_mysql\"\n\nReturns:\n sql connection object", "source": "juraj_google_style"} -{"code": "def pep8(amend: bool = False, stage: bool = False):\n\n _pep8(amend, stage)", "docstring": "Runs Pyup's Safety tool (https://pyup.io/safety/)\n\nArgs:\n amend: whether or not to commit results\n stage: whether or not to stage changes", "source": "juraj_google_style"} -{"code": "def FVDEVolumeOpen(fvde_volume, path_spec, file_object, key_chain):\n\n encrypted_root_plist = key_chain.GetCredential(\n path_spec, 'encrypted_root_plist')\n if encrypted_root_plist:\n fvde_volume.read_encrypted_root_plist(encrypted_root_plist)\n\n password = key_chain.GetCredential(path_spec, 'password')\n if password:\n fvde_volume.set_password(password)\n\n recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')\n if recovery_password:\n fvde_volume.set_recovery_password(recovery_password)\n\n fvde_volume.open_file_object(file_object)", "docstring": "Opens the FVDE volume using the path specification.\n\nArgs:\n fvde_volume (pyfvde.volume): FVDE volume.\n path_spec (PathSpec): path specification.\n file_object (FileIO): file-like object.\n key_chain (KeyChain): key chain.", "source": "juraj_google_style"} -{"code": "def resolve(self, context, provider):\n\n try:\n self._value.resolve(context, provider)\n except FailedLookup as e:\n raise FailedVariableLookup(self.name, e.lookup, e.error)", "docstring": "Recursively resolve any lookups with the Variable.\n\nArgs:\n context (:class:`stacker.context.Context`): Current context for\n building the stack\n provider (:class:`stacker.provider.base.BaseProvider`): subclass of\n the base provider", "source": "juraj_google_style"} -{"code": "def configure(self, x0, axis):\n\n self.x0 = x0\n self.axis = axis", "docstring": "Configure the 1D function for a line search\n\nArgs:\n x0 -- the reference point (q=0)\n axis -- a unit vector in the direction of the line search", "source": "juraj_google_style"} -{"code": "def profile_update_schema(profile):\n\n\n # add new \"autoclear\" field\n if profile.get('autoclear') is None:\n print(\n '{}{}Profile Update: Adding new \"autoclear\" parameter.'.format(\n c.Style.BRIGHT, c.Fore.YELLOW\n )\n )\n profile['autoclear'] = True\n\n # add new \"data_type\" field\n for validation in profile.get('validations') or []:\n if validation.get('data_type') is None:\n print(\n '{}{}Profile Update: Adding new \"data_type\" parameter.'.format(\n c.Style.BRIGHT, c.Fore.YELLOW\n )\n )\n validation['data_type'] = 'redis'\n\n # remove \"script\" parameter from profile\n if profile.get('install_json') is not None and profile.get('script') is not None:\n print(\n '{}{}Removing deprecated \"script\" parameter.'.format(c.Style.BRIGHT, c.Fore.YELLOW)\n )\n profile.pop('script')", "docstring": "Update profile to latest schema.\n\nArgs:\n profile (dict): The dictionary containting the profile settings.", "source": "juraj_google_style"} -{"code": "def list(self):\n\n self._initialize_list()\n interested = True\n\n response = self._cloudFormation.list_stacks()\n print('Stack(s):')\n while interested:\n if 'StackSummaries' in response:\n for stack in response['StackSummaries']:\n stack_status = stack['StackStatus']\n if stack_status != 'DELETE_COMPLETE':\n print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))\n\n next_token = response.get('NextToken', None)\n if next_token:\n response = self._cloudFormation.list_stacks(NextToken=next_token)\n else:\n interested = False\n\n return True", "docstring": "List the existing stacks in the indicated region\n\nArgs:\n None\n\nReturns:\n True if True\n\n Todo:\n Figure out what could go wrong and take steps\n to hanlde problems.", "source": "juraj_google_style"} -{"code": "def log(self, level, msg, *args, **kwargs):\n\n if level >= logging.FATAL:\n # Add property to the LogRecord created by this logger.\n # This will be used by the ABSLHandler to determine whether it should\n # treat CRITICAL/FATAL logs as really FATAL.\n extra = kwargs.setdefault('extra', {})\n extra[_ABSL_LOG_FATAL] = True\n super(ABSLLogger, self).log(level, msg, *args, **kwargs)", "docstring": "Logs a message at a cetain level substituting in the supplied arguments.\n\n This method behaves differently in python and c++ modes.\n\nArgs:\n level: int, the standard logging level at which to log the message.\n msg: str, the text of the message to log.\n *args: The arguments to substitute in the message.\n **kwargs: The keyword arguments to substitute in the message.", "source": "juraj_google_style"} -{"code": "def _send_success_response(self, response, start_response):\n\n headers = [('Content-Type', 'application/json; charset=UTF-8')]\n return util.send_wsgi_response('200 OK', headers, response, start_response)", "docstring": "Sends an HTTP 200 json success response.\n\n This calls start_response and returns the response body.\n\nArgs:\n response: A string containing the response body to return.\n start_response: A function with semantics defined in PEP-333.\n\nReturns:\n A string, the response body.", "source": "juraj_google_style"} -{"code": "def _sign_of(money):\n\n units = money.units\n nanos = money.nanos\n if units:\n if units > 0:\n return 1\n elif units < 0:\n return -1\n if nanos:\n if nanos > 0:\n return 1\n elif nanos < 0:\n return -1\n return 0", "docstring": "Determines the amount sign of a money instance\n\nArgs:\n money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the\n instance to test\n\nReturns:\n int: 1, 0 or -1", "source": "juraj_google_style"} -{"code": "def destringize(self, string):\n\n\n m = segment_destr_pattern.match(string)\n self.genome_id = int(m.group(1))\n self.chr_id = int(m.group(2))\n self.direction = m.group(3)\n self.left = int(m.group(4))\n self.right = int(m.group(5))", "docstring": "Get RNF values for this segment from its textual representation and\n save them into this object.\n\nArgs:\n string (str): Textual representation of a segment.", "source": "juraj_google_style"} -{"code": "def browse_stations(self, station_category_id):\n\n\n\t\tresponse = self._call(\n\t\t\tmc_calls.BrowseStations,\n\t\t\tstation_category_id\n\t\t)\n\t\tstations = response.body.get('stations', [])\n\n\t\treturn stations", "docstring": "Get the stations for a category from Browse Stations.\n\n Parameters:\n station_category_id (str): A station category ID as\n found with :meth:`browse_stations_categories`.\n\nReturns:\n list: Station dicts.", "source": "juraj_google_style"} -{"code": "def rgb_to_websafe(r, g=None, b=None, alt=False):\n\n if type(r) in [list,tuple]:\n r, g, b = r\n websafeComponent = _websafe_component\n return tuple((websafeComponent(v, alt) for v in (r, g, b)))", "docstring": "Convert the color from RGB to 'web safe' RGB\n\n Parameters:\n :r:\n The Red component value [0...1]\n :g:\n The Green component value [0...1]\n :b:\n The Blue component value [0...1]\n :alt:\n If True, use the alternative color instead of the nearest one.\n Can be used for dithering.\n\nReturns:\n The color as an (r, g, b) tuple in the range:\n the range:\n r[0...1],\n g[0...1],\n b[0...1]\n\n >>> '(%g, %g, %g)' % rgb_to_websafe(1, 0.55, 0.0)\n '(1, 0.6, 0)'", "source": "juraj_google_style"} -{"code": "def setData(self, data, setName=None):\n\n if not isinstance(data, DataFrame):\n if pd is not None and isinstance(data, pd.DataFrame):\n data = DataFrame.fromPandas(data)\n if setName is None:\n lock_and_call(\n lambda: self._impl.setData(data._impl),\n self._lock\n )\n else:\n lock_and_call(\n lambda: self._impl.setData(data._impl, setName),\n self._lock\n )", "docstring": "Assign the data in the dataframe to the AMPL entities with the names\n corresponding to the column names.\n\nArgs:\n data: The dataframe containing the data to be assigned.\n\n setName: The name of the set to which the indices values of the\n DataFrame are to be assigned.\n\nRaises:\n AMPLException: if the data assignment procedure was not successful.", "source": "juraj_google_style"} -{"code": "def compare_with_existing(self, region='us-east-1', onetime=False):\n\n pipelines = self.get_existing_pipelines()\n pipeline_id = None\n found = False\n for pipeline in pipelines:\n correct_app_and_region = (pipeline['application'] == self.app_name) and (region in pipeline['name'])\n if onetime:\n onetime_str = \"(onetime-{})\".format(self.environments[0])\n if correct_app_and_region and onetime_str in pipeline['name']:\n found = True\n elif correct_app_and_region:\n found = True\n\n if found:\n self.log.info('Existing pipeline found - %s', pipeline['name'])\n pipeline_id = pipeline['id']\n break\n else:\n self.log.info('No existing pipeline found')\n\n return pipeline_id", "docstring": "Compare desired pipeline with existing pipelines.\n\nArgs:\n region (str): Region of desired pipeline.\n onetime (bool): Looks for different pipeline if Onetime\n\nReturns:\n str: pipeline_id if existing, empty string of not.", "source": "juraj_google_style"} -{"code": "def verify_key_in_shelve(file_name, save_key, file_location):\n\n file = __os.path.join(file_location, file_name)\n shelve_store = __shelve.open(file)\n exists = shelve_store.get(save_key)\n shelve_store.close()\n if exists:\n return True\n\n elif not exists:\n return False", "docstring": "Function to check for a key in a shelve\n\nArgs:\n file_name: Shelve storage file name\n save_key: The name of the key the item is stored in\n file_location: The location of the file, derive from the os module\n\n Returns: returns true or false", "source": "juraj_google_style"} -{"code": "def company(symbol, token='', version=''):\n\n _raiseIfNotStr(symbol)\n return _getJson('stock/' + symbol + '/company', token, version)", "docstring": "Company reference data\n\n https://iexcloud.io/docs/api/#company\n Updates at 4am and 5am UTC every day\n\nArgs:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\nReturns:\n dict: result", "source": "juraj_google_style"} -{"code": "def easeOutElastic(n, amplitude=1, period=0.3):\n\n _checkRange(n)\n\n if amplitude < 1:\n amplitude = 1\n s = period / 4\n else:\n s = period / (2 * math.pi) * math.asin(1 / amplitude)\n\n return amplitude * 2**(-10*n) * math.sin((n-s)*(2*math.pi / period)) + 1", "docstring": "An elastic tween function that overshoots the destination and then \"rubber bands\" into the destination.\n\nArgs:\n n (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj_google_style"} -{"code": "def subscribe(self, callback, filter_):\n\n sub_id = \"subscriber_{uuid}\".format(uuid=uuid.uuid4())\n # register subscriber in manager\n sub = pd.DataFrame({sub_id: filter_}).T\n sub['callback'] = callback\n self.subscribers = self.subscribers.append(sub)\n\n # find metrics for subscriber using `filter`\n this_subscriber_metrics = self.__filter(self.metrics_meta, filter_)\n if this_subscriber_metrics.empty:\n logger.debug('Metrics for subscriber %s not found', sub_id)\n else:\n logger.debug('Found metrics for this subscriber, subscribing...: %s', this_subscriber_metrics)\n # attach this sub callback to discovered metrics and select id <-> callbacks\n this_subscriber_metrics['callback'] = callback\n prepared_callbacks = this_subscriber_metrics[['callback']]\n # add this subscriber callbacks to DataManager's callbacks\n self.callbacks = self.callbacks.append(prepared_callbacks)", "docstring": "Create and register metric subscriber,\n find metrics for this subscriber (using filter_) and subscribe\n\nArgs:\n callback (object method): subscriber's callback\n filter_ (dict): filter dict\n\n filter sample:\n {'type': 'metrics', 'source': 'gun'}", "source": "juraj_google_style"} -{"code": "def _compress_json(self, j):\n\n compressed_json = copy.copy(j)\n compressed_json.pop('users', None)\n\n compressed_data = zlib.compress(\n json.dumps(j['users']).encode('utf-8'),\n self.zlib_compression_strength\n )\n b64_data = base64.b64encode(compressed_data).decode('utf-8')\n\n compressed_json['blob'] = b64_data\n\n return compressed_json", "docstring": "Compress the BLOB data portion of the usernotes.\n\nArgs:\n j: the JSON in Schema v5 format (dict)\n\n Returns a dict with the 'users' key removed and 'blob' key added", "source": "juraj_google_style"} -{"code": "def plot_stacked_hist(self, key=\"wall_time\", nmax=5, ax=None, **kwargs):\n\n ax, fig, plt = get_ax_fig_plt(ax=ax)\n\n mpi_rank = \"0\"\n timers = self.timers(mpi_rank=mpi_rank)\n n = len(timers)\n\n names, values = [], []\n rest = np.zeros(n)\n\n for idx, sname in enumerate(self.section_names(ordkey=key)):\n sections = self.get_sections(sname)\n svals = np.asarray([s.__dict__[key] for s in sections])\n if idx < nmax:\n names.append(sname)\n values.append(svals)\n else:\n rest += svals\n\n names.append(\"others (nmax=%d)\" % nmax)\n values.append(rest)\n\n # The dataset is stored in values. Now create the stacked histogram.\n ind = np.arange(n) # the locations for the groups\n width = 0.35 # the width of the bars\n colors = nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']\n\n bars = []\n bottom = np.zeros(n)\n for idx, vals in enumerate(values):\n color = colors[idx]\n bar = ax.bar(ind, vals, width, color=color, bottom=bottom)\n bars.append(bar)\n bottom += vals\n\n ax.set_ylabel(key)\n ax.set_title(\"Stacked histogram with the %d most important sections\" % nmax)\n\n ticks = ind + width / 2.0\n labels = [\"MPI=%d, OMP=%d\" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels, rotation=15)\n\n # Add legend.\n ax.legend([bar[0] for bar in bars], names, loc=\"best\")\n\n return fig", "docstring": "Plot stacked histogram of the different timers.\n\nArgs:\n key: Keyword used to extract data from the timers. Only the first `nmax`\n sections with largest value are show.\n mmax: Maximum nuber of sections to show. Other entries are grouped together\n in the `others` section.\n ax: matplotlib :class:`Axes` or None if a new figure should be created.\n\nReturns:\n `matplotlib` figure", "source": "juraj_google_style"} -{"code": "def locale(self, value):\n\n if value == self._defaults['ai.device.locale'] and 'ai.device.locale' in self._values:\n del self._values['ai.device.locale']\n else:\n self._values['ai.device.locale'] = value", "docstring": "The locale property.\n\nArgs:\n value (string). the property value.", "source": "juraj_google_style"} -{"code": "def ReadFromDirectory(self, artifacts_reader, path, extension='yaml'):\n\n for artifact_definition in artifacts_reader.ReadDirectory(\n path, extension=extension):\n self.RegisterDefinition(artifact_definition)", "docstring": "Reads artifact definitions into the registry from files in a directory.\n\n This function does not recurse sub directories.\n\nArgs:\n artifacts_reader (ArtifactsReader): an artifacts reader.\n path (str): path of the directory to read from.\n extension (Optional[str]): extension of the filenames to read.\n\nRaises:\n KeyError: if a duplicate artifact definition is encountered.", "source": "juraj_google_style"} -{"code": "def __init__(self, value, data_type, masks=None, name='Secret Data'):\n\n super(SecretData, self).__init__()\n\n self._object_type = enums.ObjectType.SECRET_DATA\n\n self.value = value\n self.data_type = data_type\n self.names = [name]\n\n if masks:\n self.cryptographic_usage_masks = masks\n\n # All remaining attributes are not considered part of the public API\n # and are subject to change.\n\n # The following attributes are placeholders for attributes that are\n # unsupported by kmip.core\n\n self.validate()", "docstring": "Create a SecretData object.\n\nArgs:\n value(bytes): The bytes representing secret data.\n data_type(SecretDataType): An enumeration defining the type of the\n secret value.\n masks(list): A list of CryptographicUsageMask enumerations\n defining how the key will be used.\n name(string): The string name of the key.", "source": "juraj_google_style"} -{"code": "def report(self, verbose=1):\n\n lines = []\n if verbose >= 2:\n # use a multi-line format for high verbosity\n lines.append(self._status_line(tense='past'))\n if verbose >= 3:\n unit, mag = _choose_unit(self.total_time, self.unit,\n self._asciimode)\n lines.append(' body took: {total:.{pr}{t}} {unit}'.format(\n total=self.total_time / mag,\n t=self._precision_type,\n pr=self._precision, unit=unit))\n lines.append(' time per loop: {}'.format(self._seconds_str()))\n else:\n # use a single-line format for low verbosity\n line = 'Timed ' + self._seconds_str()\n if self.label:\n line += ' for ' + self.label\n lines.append(line)\n text = '\\n'.join(lines)\n return text", "docstring": "Creates a human readable report\n\nArgs:\n verbose (int): verbosity level. Either 1, 2, or 3.\n\nReturns:\n str: the report\n\n SeeAlso:\n timerit.Timerit.print\n\nExample:\n >>> import math\n >>> ti = Timerit(num=1).call(math.factorial, 5)\n >>> print(ti.report(verbose=1))\n Timed best=...s, mean=...s", "source": "juraj_google_style"} -{"code": "def torque_on(self):\n\n data = []\n data.append(0x0A)\n data.append(self.servoid)\n data.append(RAM_WRITE_REQ)\n data.append(TORQUE_CONTROL_RAM)\n data.append(0x01)\n data.append(0x60)\n send_data(data)", "docstring": "Enable the torques of Herkulex\n\n In this mode, position control and velocity control\n will work.\n\nArgs:\n none", "source": "juraj_google_style"} -{"code": "def Page_deleteCookie(self, cookieName, url):\n\n\t\tassert isinstance(cookieName, (str,)\n\t\t ), \"Argument 'cookieName' must be of type '['str']'. Received type: '%s'\" % type(\n\t\t cookieName)\n\t\tassert isinstance(url, (str,)\n\t\t ), \"Argument 'url' must be of type '['str']'. Received type: '%s'\" % type(\n\t\t url)\n\t\tsubdom_funcs = self.synchronous_command('Page.deleteCookie', cookieName=\n\t\t cookieName, url=url)\n\t\treturn subdom_funcs", "docstring": "Function path: Page.deleteCookie\n Domain: Page\n Method name: deleteCookie\n\n WARNING: This function is marked 'Experimental'!\n\n Parameters:\n Required arguments:\n 'cookieName' (type: string) -> Name of the cookie to remove.\n 'url' (type: string) -> URL to match cooke domain and path.\n No return value.\n\n Description: Deletes browser cookie with given name, domain and path.", "source": "juraj_google_style"} -{"code": "def __init__(self, data, limit=None):\n\n self._data = data\n self._limit = limit", "docstring": "Initialise the Action object.\n\nArgs:\n data (MultiTaskData): The processed data from the task that should be passed\n on to successor tasks.\n limit (list): A list of names of all immediate successor tasks that\n should be executed.", "source": "juraj_google_style"} -{"code": "def destroy_connection(self, connection):\n\n\n log.debug(\"Destroying connection at <{0}>\".format(hex(id(connection))))\n self._decontextualise_connection(connection)\n connection.unbind()", "docstring": "Destroys a connection. Removes the connection from the appcontext, and\n unbinds it.\n\nArgs:\n connection (ldap3.Connection): The connnection to destroy", "source": "juraj_google_style"} -{"code": "def plot_projectors(self, ax=None, fontsize=12, **kwargs):\n\n ax, fig, plt = get_ax_fig_plt(ax)\n title = kwargs.pop(\"title\", \"Projectors\")\n ax.grid(True)\n ax.set_xlabel('r [Bohr]')\n ax.set_ylabel(r\"$r\\tilde p\\, [Bohr]^{-\\frac{1}{2}}$\")\n\n #ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle=\"--\")\n #ax.annotate(\"$r_c$\", xy=(self.paw_radius + 0.1, 0.1))\n\n for state, rfunc in self.projector_functions.items():\n ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label=\"TPROJ: \" + state)\n\n ax.legend(loc=\"best\", shadow=True, fontsize=fontsize)\n\n return fig", "docstring": "Plot the PAW projectors.\n\nArgs:\n ax: matplotlib :class:`Axes` or None if a new figure should be created.\n\n Returns: `matplotlib` figure", "source": "juraj_google_style"} -{"code": "def load(self, profile_args):\n\n for key, value in profile_args.items():\n self.add(key, value)", "docstring": "Load provided CLI Args.\n\nArgs:\n args (dict): Dictionary of args in key/value format.", "source": "juraj_google_style"} -{"code": "def _has_old_request_ended(self, shard_state):\n\n assert shard_state.slice_start_time is not None\n assert shard_state.slice_request_id is not None\n request_ids = [shard_state.slice_request_id]\n logs = None\n try:\n logs = list(logservice.fetch(request_ids=request_ids))\n except (apiproxy_errors.FeatureNotEnabledError,\n apiproxy_errors.CapabilityDisabledError) as e:\n # Managed VMs do not have access to the logservice API\n # See https://groups.google.com/forum/#!topic/app-engine-managed-vms/r8i65uiFW0w\n logging.warning(\"Ignoring exception: %s\", e)\n\n if not logs or not logs[0].finished:\n return False\n return True", "docstring": "Whether previous slice retry has ended according to Logs API.\n\nArgs:\n shard_state: shard state.\n\nReturns:\n True if the request of previous slice retry has ended. False if it has\n not or unknown.", "source": "juraj_google_style"} -{"code": "def diffuse_horizontal_illuminance(self, value=999999.0):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type float '\n 'for field `diffuse_horizontal_illuminance`'.format(value))\n if value < 0.0:\n raise ValueError('value need to be greater or equal 0.0 '\n 'for field `diffuse_horizontal_illuminance`')\n\n self._diffuse_horizontal_illuminance = value", "docstring": "Corresponds to IDD Field `diffuse_horizontal_illuminance`\n will be missing if >= 999900\n\nArgs:\n value (float): value for IDD Field `diffuse_horizontal_illuminance`\n Unit: lux\n value >= 0.0\n Missing value: 999999.0\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def partial_derivative(self, X, y=0):\n\n self.check_fit()\n\n U, V = self.split_matrix(X)\n\n if self.theta == 0:\n return V\n\n else:\n num = np.multiply(self._g(U), self._g(V)) + self._g(U)\n den = np.multiply(self._g(U), self._g(V)) + self._g(1)\n return (num / den) - y", "docstring": "Compute partial derivative :math:`C(u|v)` of cumulative distribution.\n\nArgs:\n X: `np.ndarray`\n y: `float`\n\nReturns:\n np.ndarray", "source": "juraj_google_style"} -{"code": "def match_variables(self, pattern, return_type='name'):\n\n pattern = re.compile(pattern)\n vars_ = [v for v in self.variables.values() if pattern.search(v.name)]\n return vars_ if return_type.startswith('var') \\\n else [v.name for v in vars_]", "docstring": "Return columns whose names match the provided regex pattern.\n\nArgs:\n pattern (str): A regex pattern to match all variable names against.\n return_type (str): What to return. Must be one of:\n 'name': Returns a list of names of matching variables.\n 'variable': Returns a list of Variable objects whose names\n match.", "source": "juraj_google_style"} -{"code": "def handle_api_static_request(self, request, start_response):\n\n if request.path == PROXY_PATH:\n return util.send_wsgi_response('200 OK',\n [('Content-Type',\n 'text/html')],\n PROXY_HTML, start_response)\n else:\n _logger.debug('Unknown static url requested: %s',\n request.relative_url)\n return util.send_wsgi_response('404 Not Found', [('Content-Type',\n 'text/plain')], 'Not Found',\n start_response)", "docstring": "Handler for requests to {base_path}/static/.*.\n\n This calls start_response and returns the response body.\n\nArgs:\n request: An ApiRequest, the request from the user.\n start_response: A function with semantics defined in PEP-333.\n\nReturns:\n A string containing the response body.", "source": "juraj_google_style"} -{"code": "def delete_cookies():\n\n\n cookies = dict(request.args.items())\n r = app.make_response(redirect(url_for(\"view_cookies\")))\n for key, value in cookies.items():\n r.delete_cookie(key=key)\n\n return r", "docstring": "Deletes cookie(s) as provided by the query string and redirects to cookie list.\n ---\n tags:\n - Cookies\n parameters:\n - in: query\n name: freeform\n explode: true\n allowEmptyValue: true\n schema:\n type: object\n additionalProperties:\n type: string\n style: form\n produces:\n - text/plain\n responses:\n 200:\n description: Redirect to cookie list", "source": "juraj_google_style"} -{"code": "def object_download(self, bucket, key, start_offset=0, byte_count=None):\n\n args = {'alt': 'media'}\n headers = {}\n if start_offset > 0 or byte_count is not None:\n header = 'bytes=%d-' % start_offset\n if byte_count is not None:\n header += '%d' % byte_count\n headers['Range'] = header\n url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))\n return google.datalab.utils.Http.request(url, args=args, headers=headers,\n credentials=self._credentials, raw_response=True)", "docstring": "Reads the contents of an object as text.\n\nArgs:\n bucket: the name of the bucket containing the object.\n key: the key of the object to be read.\n start_offset: the start offset of bytes to read.\n byte_count: the number of bytes to read. If None, it reads to the end.\n\nReturns:\n The text content within the object.\n\nRaises:\n Exception if the object could not be read from.", "source": "juraj_google_style"} -{"code": "def stitch_images(images, margin=5, cols=5):\n\n if len(images) == 0:\n return None\n\n h, w, c = images[0].shape\n n_rows = int(math.ceil(len(images) / cols))\n n_cols = min(len(images), cols)\n\n out_w = n_cols * w + (n_cols - 1) * margin\n out_h = n_rows * h + (n_rows - 1) * margin\n stitched_images = np.zeros((out_h, out_w, c), dtype=images[0].dtype)\n\n for row in range(n_rows):\n for col in range(n_cols):\n img_idx = row * cols + col\n if img_idx >= len(images):\n break\n\n stitched_images[(h + margin) * row: (h + margin) * row + h,\n (w + margin) * col: (w + margin) * col + w, :] = images[img_idx]\n\n return stitched_images", "docstring": "Utility function to stitch images together with a `margin`.\n\nArgs:\n images: The array of 2D images to stitch.\n margin: The black border margin size between images (Default value = 5)\n cols: Max number of image cols. New row is created when number of images exceed the column size.\n (Default value = 5)\n\nReturns:\n A single numpy image array comprising of input images.", "source": "juraj_google_style"} -{"code": "def _count_objs(self, obj, path=None, **kwargs):\n\n sub_val = None\n # pdb.set_trace()\n if isinstance(obj, dict):\n for key, value in obj.items():\n if isinstance(value, (list, dict)):\n kwargs = self._count_objs(value,\n self.make_path(key, path),\n **kwargs)\n else:\n if self.make_path(key, path) == self.sub_total:\n # pdb.set_trace()\n sub_val = value\n kwargs['current'] = self._increment_prop(key,\n path,\n **kwargs)\n elif isinstance(obj, list):\n for item in obj:\n if isinstance(item, (list, dict)):\n kwargs = self._count_objs(item, path, **kwargs)\n else:\n if path == self.sub_total:\n pdb.set_trace()\n sub_val = item\n kwargs['current'] = self._increment_prop(path, **kwargs)\n else:\n kwargs['current'] = self._increment_prop(path, **kwargs)\n if path == self.sub_total:\n pdb.set_trace()\n sub_val = item\n if kwargs.get('sub_val') is None:\n kwargs['sub_val'] = sub_val\n return kwargs", "docstring": "cycles through the object and adds in count values\n\nArgs:\n -----\n obj: the object to parse\n path: the current path\n\n kwargs:\n -------\n current: a dictionary of counts for current call\n sub_val: the value to use for subtotal aggregation", "source": "juraj_google_style"} -{"code": "def _check_wires_list(self, wires, node):\n\n if len(set(wires)) != len(wires):\n raise DAGCircuitError(\"duplicate wires\")\n\n wire_tot = len(node.qargs) + len(node.cargs)\n if node.condition is not None:\n wire_tot += node.condition[0].size\n\n if len(wires) != wire_tot:\n raise DAGCircuitError(\"expected %d wires, got %d\"\n % (wire_tot, len(wires)))", "docstring": "Check that a list of wires is compatible with a node to be replaced.\n\n - no duplicate names\n - correct length for operation\n Raise an exception otherwise.\n\nArgs:\n wires (list[register, index]): gives an order for (qu)bits\n in the input circuit that is replacing the node.\n node (DAGNode): a node in the dag\n\nRaises:\n DAGCircuitError: if check doesn't pass.", "source": "juraj_google_style"} -{"code": "def get_ip(self, access='public', addr_family=None, strict=None):\n\n if addr_family not in ['IPv4', 'IPv6', None]:\n raise Exception(\"`addr_family` must be 'IPv4', 'IPv6' or None\")\n\n if access not in ['private', 'public']:\n raise Exception(\"`access` must be 'public' or 'private'\")\n\n if not hasattr(self, 'ip_addresses'):\n self.populate()\n\n # server can have several public or private IPs\n ip_addrs = [\n ip_addr for ip_addr in self.ip_addresses\n if ip_addr.access == access\n ]\n\n # prefer addr_family (or IPv4 if none given)\n preferred_family = addr_family if addr_family else 'IPv4'\n for ip_addr in ip_addrs:\n if ip_addr.family == preferred_family:\n return ip_addr.address\n\n # any IP (of the right access) will do if available and addr_family is None\n return ip_addrs[0].address if ip_addrs and not addr_family else None", "docstring": "Return the server's IP address.\n\n Params:\n - addr_family: IPv4, IPv6 or None. None prefers IPv4 but will\n return IPv6 if IPv4 addr was not available.\n - access: 'public' or 'private'", "source": "juraj_google_style"} -{"code": "def isiterable(obj, reject_string=True):\n\n\n iterable = hasattr(obj, '__len__')\n\n if reject_string:\n iterable = iterable and not isinstance(obj, str)\n\n return iterable", "docstring": "convenience tool to detect if something is iterable.\n in python3, strings count as iterables to we have the option to exclude them\n\n Parameters:\n -----------\n obj : object to analyse\n reject_string : bool, whether to ignore strings\n\nReturns:\n --------\n bool, if the object is itereable.", "source": "juraj_google_style"} -{"code": "def bind_to_uniform_block(self, binding=0, *, offset=0, size=-1) -> None:\n\n\n self.mglo.bind_to_uniform_block(binding, offset, size)", "docstring": "Bind the buffer to a uniform block.\n\nArgs:\n binding (int): The uniform block binding.\n\n Keyword Args:\n offset (int): The offset.\n size (int): The size. Value ``-1`` means all.", "source": "juraj_google_style"} -{"code": "def set_messages(self, messages: Sequence[CachedMessage]) -> None:\n\n uids = {msg.uid for msg in messages}\n expunged = self._messages._uids - uids\n return self.add_updates(messages, expunged)", "docstring": "This is the non-optimized alternative to :meth:`.add_updates` for\n backend implementations that cannot detect their own updates and must\n instead compare the entire state of the mailbox.\n\n The ``messages`` list should contain the entire set of messages in the\n mailbox, ordered by UID. Any UID that previously existed and is not\n included in ``messages`` will be expunged.\n\nArgs:\n messages: The entire set of cached message objects.", "source": "juraj_google_style"} -{"code": "def _update_belief(self, belief_prop, clique, clique_potential, message=None):\n\n old_factor = belief_prop.junction_tree.get_factors(clique)\n belief_prop.junction_tree.remove_factors(old_factor)\n if message:\n if message.scope() and clique_potential.scope():\n new_factor = old_factor * message\n new_factor = new_factor / clique_potential\n else:\n new_factor = old_factor\n else:\n new_factor = old_factor * clique_potential\n belief_prop.junction_tree.add_factors(new_factor)\n belief_prop.calibrate()", "docstring": "Method for updating the belief.\n\n Parameters:\n ----------\n belief_prop: Belief Propagation\n Belief Propagation which needs to be updated.\n\n in_clique: clique\n The factor which needs to be updated corresponding to the input clique.\n\n out_clique_potential: factor\n Multiplying factor which will be multiplied to the factor corresponding to the clique.", "source": "juraj_google_style"} -{"code": "def check_denotation(target_values, predicted_values):\n\n # Check size\n if len(target_values) != len(predicted_values):\n return False\n # Check items\n for target in target_values:\n if not any(target.match(pred) for pred in predicted_values):\n return False\n return True", "docstring": "Return True if the predicted denotation is correct.\n\nArgs:\n target_values (list[Value])\n predicted_values (list[Value])\n\nReturns:\n bool", "source": "juraj_google_style"} -{"code": "def add_feature(self, obj=None, geometry=None, properties=None):\n\n properties = properties or {}\n if isinstance(obj, Feature):\n # instead of creating copy, the original feat should reference the same one that was added here\n feat = obj._data\n elif isinstance(obj, dict):\n feat = obj.copy()\n else:\n feat = Feature(geometry=geometry, properties=properties).__geo_interface__\n self._data[\"features\"].append(feat)", "docstring": "Adds a given feature. If obj isn't specified, geometry and properties can be set as arguments directly.\n\n Parameters:\n\n - **obj**: Another feature instance, an object with the \\_\\_geo_interface__ or a geojson dictionary of the Feature type.\n - **geometry** (optional): Anything that the Geometry instance can accept.\n - **properties** (optional): A dictionary of key-value property pairs.", "source": "juraj_google_style"} -{"code": "def dict_from_file(filename, key_type=str):\n\n mapping = {}\n with open(filename, 'r') as f:\n for line in f:\n items = line.rstrip('\\n').split()\n assert len(items) >= 2\n key = key_type(items[0])\n val = items[1:] if len(items) > 2 else items[1]\n mapping[key] = val\n return mapping", "docstring": "Load a text file and parse the content as a dict.\n\n Each line of the text file will be two or more columns splited by\n whitespaces or tabs. The first column will be parsed as dict keys, and\n the following columns will be parsed as dict values.\n\nArgs:\n filename(str): Filename.\n key_type(type): Type of the dict's keys. str is user by default and\n type conversion will be performed if specified.\n\nReturns:\n dict: The parsed contents.", "source": "juraj_google_style"} -{"code": "def get_effect_class(self, effect_name: str, package_name: str = None) -> Type['Effect']:\n\n return self._project.get_effect_class(effect_name, package_name=package_name)", "docstring": "Get an effect class by the class name\n\nArgs:\n effect_name (str): Name of the effect class\n\n Keyword Args:\n package_name (str): The package the effect belongs to. This is optional and only\n needed when effect class names are not unique.\n\nReturns:\n :py:class:`Effect` class", "source": "juraj_google_style"} -{"code": "def gen_ordered_statistics(transaction_manager, record):\n\n items = record.items\n for combination_set in combinations(sorted(items), len(items) - 1):\n items_base = frozenset(combination_set)\n items_add = frozenset(items.difference(items_base))\n confidence = (\n record.support / transaction_manager.calc_support(items_base))\n lift = confidence / transaction_manager.calc_support(items_add)\n yield OrderedStatistic(\n frozenset(items_base), frozenset(items_add), confidence, lift)", "docstring": "Returns a generator of ordered statistics as OrderedStatistic instances.\n\nArgs:\n transaction_manager -- Transactions as a TransactionManager instance.\n record -- A support record as a SupportRecord instance.", "source": "juraj_google_style"} -{"code": "def get_comments(self, sharekey=None):\n\n if not sharekey:\n raise Exception(\n \"You must specify a sharekey of the file you\"\n \"want to 'like'.\")\n\n endpoint = '/api/sharedfile/{0}/comments'.format(sharekey)\n\n data = self._make_request(\"GET\", endpoint=endpoint)\n\n return [Comment.NewFromJSON(c) for c in data['comments']]", "docstring": "Retrieve comments on a SharedFile\n\nArgs:\n sharekey (str): Sharekey for the file from which you want to return\n the set of comments.\n\nReturns:\n List of Comment objects.", "source": "juraj_google_style"} -{"code": "def ch_start_time(self, *channels: List[Channel]) -> int:\n\n return self.timeslots.ch_start_time(*channels)", "docstring": "Return minimum start time for supplied channels.\n\nArgs:\n *channels: Supplied channels", "source": "juraj_google_style"} -{"code": "def _from_proto_sparse_tensor(sparse_tensor_proto, process_leafs):\n\n if not sparse_tensor_proto.HasField(\"named_tuple\"):\n raise base_errors.ModuleInfoError(\n \"Error while deserializing a SparseTensor: expected proto tuple.\")\n if sparse_tensor_proto.named_tuple.name != _SPARSE_TENSOR_NAME:\n raise base_errors.ModuleInfoError(\n \"Error while deserializing a SparseTensor: The name of the tuple \"\n \"should have been {} but was {}.\".format(\n _SPARSE_TENSOR_NAME, sparse_tensor_proto.named_tuple.name))\n named_tuple_map = sparse_tensor_proto.named_tuple.map\n return tf.SparseTensor(\n indices=process_leafs(named_tuple_map[\"indices\"].value),\n values=process_leafs(named_tuple_map[\"values\"].value),\n dense_shape=process_leafs(named_tuple_map[\"dense_shape\"].value))", "docstring": "Deserializes a `tf.SparseTensor` from `sparse_tensor_proto`.\n\nArgs:\n sparse_tensor_proto: A proto representing a `tf.SparseTensor`.\n process_leafs: A function to be applied to the leaf valued of the nested\n structure.\n\nReturns:\n An instance of `tf.SparseTensor`.", "source": "juraj_google_style"} -{"code": "def plot_state_qsphere(rho, figsize=None):\n\n if not HAS_MATPLOTLIB:\n raise ImportError('Must have Matplotlib installed.')\n rho = _validate_input_state(rho)\n if figsize is None:\n figsize = (7, 7)\n num = int(np.log2(len(rho)))\n # get the eigenvectors and eigenvalues\n we, stateall = linalg.eigh(rho)\n for _ in range(2**num):\n # start with the max\n probmix = we.max()\n prob_location = we.argmax()\n if probmix > 0.001:\n # get the max eigenvalue\n state = stateall[:, prob_location]\n loc = np.absolute(state).argmax()\n # get the element location closes to lowest bin representation.\n for j in range(2**num):\n test = np.absolute(np.absolute(state[j]) -\n np.absolute(state[loc]))\n if test < 0.001:\n loc = j\n break\n # remove the global phase\n angles = (np.angle(state[loc]) + 2 * np.pi) % (2 * np.pi)\n angleset = np.exp(-1j*angles)\n # print(state)\n # print(angles)\n state = angleset*state\n # print(state)\n state.flatten()\n # start the plotting\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n ax.axes.set_xlim3d(-1.0, 1.0)\n ax.axes.set_ylim3d(-1.0, 1.0)\n ax.axes.set_zlim3d(-1.0, 1.0)\n ax.set_aspect(\"equal\")\n ax.axes.grid(False)\n # Plot semi-transparent sphere\n u = np.linspace(0, 2 * np.pi, 25)\n v = np.linspace(0, np.pi, 25)\n x = np.outer(np.cos(u), np.sin(v))\n y = np.outer(np.sin(u), np.sin(v))\n z = np.outer(np.ones(np.size(u)), np.cos(v))\n ax.plot_surface(x, y, z, rstride=1, cstride=1, color='k',\n alpha=0.05, linewidth=0)\n # wireframe\n # Get rid of the panes\n ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n\n # Get rid of the spines\n ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n # Get rid of the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n\n d = num\n for i in range(2**num):\n # get x,y,z points\n element = bin(i)[2:].zfill(num)\n weight = element.count(\"1\")\n zvalue = -2 * weight / d + 1\n number_of_divisions = n_choose_k(d, weight)\n weight_order = bit_string_index(element)\n # if weight_order >= number_of_divisions / 2:\n # com_key = compliment(element)\n # weight_order_temp = bit_string_index(com_key)\n # weight_order = np.floor(\n # number_of_divisions / 2) + weight_order_temp + 1\n angle = weight_order * 2 * np.pi / number_of_divisions\n xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)\n yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)\n ax.plot([xvalue], [yvalue], [zvalue],\n markerfacecolor=(.5, .5, .5),\n markeredgecolor=(.5, .5, .5),\n marker='o', markersize=10, alpha=1)\n # get prob and angle - prob will be shade and angle color\n prob = np.real(np.dot(state[i], state[i].conj()))\n colorstate = phase_to_color_wheel(state[i])\n a = Arrow3D([0, xvalue], [0, yvalue], [0, zvalue],\n mutation_scale=20, alpha=prob, arrowstyle=\"-\",\n color=colorstate, lw=10)\n ax.add_artist(a)\n # add weight lines\n for weight in range(d + 1):\n theta = np.linspace(-2 * np.pi, 2 * np.pi, 100)\n z = -2 * weight / d + 1\n r = np.sqrt(1 - z**2)\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n ax.plot(x, y, z, color=(.5, .5, .5))\n # add center point\n ax.plot([0], [0], [0], markerfacecolor=(.5, .5, .5),\n markeredgecolor=(.5, .5, .5), marker='o', markersize=10,\n alpha=1)\n we[prob_location] = 0\n else:\n break\n plt.tight_layout()\n plt.close(fig)\n return fig", "docstring": "Plot the qsphere representation of a quantum state.\n\nArgs:\n rho (ndarray): State vector or density matrix representation.\n of quantum state.\n figsize (tuple): Figure size in inches.\n\nReturns:\n Figure: A matplotlib figure instance.\n\nRaises:\n ImportError: Requires matplotlib.", "source": "juraj_google_style"} -{"code": "def write_tarball(voevents, filepath):\n\n tuple_gen = ( (v.ivorn, v.xml) for v in voevents)\n return write_tarball_from_ivorn_xml_tuples(tuple_gen,\n filepath)", "docstring": "Iterate over voevent models / dbrows and write to bz'd tarball.\n\nArgs:\n voevents (iterable): An iterable (e.g. list) of e.g. Voevent db-rows,\n with access to the 'ivorn' and 'xml' attributes.\n filepath (string): Path to the new tarball to create. Typically of form\n '/path/to/foo.tar.bz2'\n Returns\n packet_count (int): Number of packets written to tarball", "source": "juraj_google_style"} -{"code": "def simple_generate_batch(cls, create, size, **kwargs):\n\n strategy = enums.CREATE_STRATEGY if create else enums.BUILD_STRATEGY\n return cls.generate_batch(strategy, size, **kwargs)", "docstring": "Generate a batch of instances.\n\n These instances will be either 'built' or 'created'.\n\nArgs:\n size (int): the number of instances to generate\n create (bool): whether to 'build' or 'create' the instances.\n\nReturns:\n object list: the generated instances", "source": "juraj_google_style"} -{"code": "def add_shadow(img, vertices_list):\n\n non_rgb_warning(img)\n input_dtype = img.dtype\n needs_float = False\n\n if input_dtype == np.float32:\n img = from_float(img, dtype=np.dtype('uint8'))\n needs_float = True\n elif input_dtype not in (np.uint8, np.float32):\n raise ValueError('Unexpected dtype {} for RandomSnow augmentation'.format(input_dtype))\n\n image_hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n mask = np.zeros_like(img)\n\n # adding all shadow polygons on empty mask, single 255 denotes only red channel\n for vertices in vertices_list:\n cv2.fillPoly(mask, vertices, 255)\n\n # if red channel is hot, image's \"Lightness\" channel's brightness is lowered\n red_max_value_ind = mask[:, :, 0] == 255\n image_hls[:, :, 1][red_max_value_ind] = image_hls[:, :, 1][red_max_value_ind] * 0.5\n\n image_rgb = cv2.cvtColor(image_hls, cv2.COLOR_HLS2RGB)\n\n if needs_float:\n image_rgb = to_float(image_rgb, max_value=255)\n\n return image_rgb", "docstring": "Add shadows to the image.\n\n From https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library\n\nArgs:\n img (np.array):\n vertices_list (list):\n\n Returns:", "source": "juraj_google_style"} -{"code": "def PrintFieldValue(self, field, value):\n\n out = self.out\n if self.pointy_brackets:\n openb = '<'\n closeb = '>'\n else:\n openb = '{'\n closeb = '}'\n\n if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n if self.as_one_line:\n out.write(' %s ' % openb)\n self.PrintMessage(value)\n out.write(closeb)\n else:\n out.write(' %s\\n' % openb)\n self.indent += 2\n self.PrintMessage(value)\n self.indent -= 2\n out.write(' ' * self.indent + closeb)\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:\n enum_value = field.enum_type.values_by_number.get(value, None)\n if enum_value is not None:\n out.write(enum_value.name)\n else:\n out.write(str(value))\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:\n out.write('\\\"')\n if isinstance(value, six.text_type):\n out_value = value.encode('utf-8')\n else:\n out_value = value\n if field.type == descriptor.FieldDescriptor.TYPE_BYTES:\n # We need to escape non-UTF8 chars in TYPE_BYTES field.\n out_as_utf8 = False\n else:\n out_as_utf8 = self.as_utf8\n out.write(text_encoding.CEscape(out_value, out_as_utf8))\n out.write('\\\"')\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:\n if value:\n out.write('true')\n else:\n out.write('false')\n elif field.cpp_type in _FLOAT_TYPES and self.float_format is not None:\n out.write('{1:{0}}'.format(self.float_format, value))\n else:\n out.write(str(value))", "docstring": "Print a single field value (not including name).\n\n For repeated fields, the value should be a single element.\n\nArgs:\n field: The descriptor of the field to be printed.\n value: The value of the field.", "source": "juraj_google_style"} -{"code": "def import_tf_tensor(self, x, tf_x):\n\n return self.LaidOutTensor(self.make_slices(tf_x, x.shape))", "docstring": "Import a tf.Tensor, producing a LaidOutTensor.\n\nArgs:\n x: a Tensor\n tf_x: a tf.Tensor\n\nReturns:\n a LaidOutTensor", "source": "juraj_google_style"} -{"code": "def read32(self, offset):\n\n if not isinstance(offset, (int, long)):\n raise TypeError(\"Invalid offset type, should be integer.\")\n\n offset = self._adjust_offset(offset)\n self._validate_offset(offset, 4)\n return struct.unpack(\"=L\", self.mapping[offset:offset + 4])[0]", "docstring": "Read 32-bits from the specified `offset` in bytes, relative to the\n base physical address of the MMIO region.\n\nArgs:\n offset (int, long): offset from base physical address, in bytes.\n\nReturns:\n int: 32-bit value read.\n\nRaises:\n TypeError: if `offset` type is invalid.\n ValueError: if `offset` is out of bounds.", "source": "juraj_google_style"} -{"code": "def set_column(self, X, column, value):\n\n\n if isinstance(X, pd.DataFrame):\n X.loc[:, column] = value\n\n else:\n X[:, column] = value\n\n return X", "docstring": "Sets a column on the matrix X with the given value.\n\nArgs:\n X: `numpy.ndarray` or `pandas.DataFrame`.\n column: `int` or `str`.\n value: `np.ndarray` with shape (1,)\n\nReturns:\n `np.ndarray` or `pandas.DataFrame` with the inserted column.", "source": "juraj_google_style"} -{"code": "def get_schema_descendant(\n self, route: SchemaRoute) -> Optional[SchemaNode]:\n\n node = self\n for p in route:\n node = node.get_child(*p)\n if node is None:\n return None\n return node", "docstring": "Return descendant schema node or ``None`` if not found.\n\nArgs:\n route: Schema route to the descendant node\n (relative to the receiver).", "source": "juraj_google_style"} -{"code": "def compress(d, output, fmt='gz', logger=None):\n\n if not logger:\n logger = log.get_logger('s3')\n if type(d) not in [list, tuple]:\n d = [d, ]\n d = [os.path.expanduser(_d) for _d in d]\n print_compress_info(d, output, compress, logger)\n if fmt.lower() == 'none':\n fmt = ''\n elif fmt.lower() not in ['gz', 'bz2']:\n logger.info('Compression option (\"{}\") is invalid.\\nFalling back to uncompressed.'.format(fmt))\n fmt = ''\n output = os.path.expanduser(output)\n tar = tarfile.open(output, 'w:{}'.format(fmt))\n for obj in d:\n tar.add(obj)\n tar.close()\n return output", "docstring": "Creates a compressed/uncompressed tar file.\n\nArgs:\n d: Can be one of three things:\n\n 1. the path to a single file, as a string\n\n 2. the path to a single directory, as a string\n\n 3. an iterable of file or directory paths\n\n output (str): Output file path.\n\n fmt: Compression method. Options are ``'gz'`` (gzip),\n ``'bz2'`` (bzip2) and ``'none'`` (uncompressed). Default is ``'gz'``.", "source": "juraj_google_style"} -{"code": "def export_pipeline(url, pipeline_id, auth, verify_ssl):\n\n export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n if export_result.status_code == 404:\n logging.error('Pipeline not found: ' + pipeline_id)\n export_result.raise_for_status()\n return export_result.json()", "docstring": "Export the config and rules for a pipeline.\n\nArgs:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n verify_ssl (bool): whether to verify ssl certificates\n\nReturns:\n dict: the response json", "source": "juraj_google_style"} -{"code": "def halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False):\n\n if halo_size == 0:\n return x\n\n block_size = block_size_dim.size\n partial_size = halo_size % block_size\n num_complete_blocks = halo_size // block_size\n parts = [x]\n\n for i in xrange(1, num_complete_blocks + 1):\n parts = ([shift(x, i, blocks_dim, wrap)] + parts +\n [shift(x, -i, blocks_dim, wrap)])\n if partial_size > 0:\n left_margin = mtf_slice(x, 0, partial_size, block_size_dim.name)\n right_margin = mtf_slice(\n x, block_size_dim.size - partial_size, partial_size,\n block_size_dim.name)\n parts = (\n [shift(right_margin, num_complete_blocks + 1, blocks_dim, wrap)]\n + parts +\n [shift(left_margin, -(num_complete_blocks + 1), blocks_dim, wrap)])\n return concat(parts, block_size_dim.name)", "docstring": "Concat each block with the margins of adjacent blocks.\n\n Get left and right blocks_dim and concatenate along block_size_dim.\n\nArgs:\n x: a Tensor.\n blocks_dim: a Dimension in x.shape\n block_size_dim: a Dimension in x.shape\n halo_size: an integer\n wrap: a boolean\n\nReturns:\n a Tensor with the same shape as x, other than in block_size_dim, whose\n size is increased by 2*halo_size.", "source": "juraj_google_style"} -{"code": "def WaitUntilNoFlowsToProcess(self, timeout=None):\n\n t = self.flow_handler_thread\n if not t:\n return\n\n start_time = time.time()\n while True:\n with self.lock:\n # If the thread is dead, or there are no requests\n # to be processed/being processed, we stop waiting\n # and return from the function.\n if (not t.isAlive() or\n (not self._GetFlowRequestsReadyForProcessing() and\n not self.flow_handler_num_being_processed)):\n return\n\n time.sleep(0.2)\n\n if timeout and time.time() - start_time > timeout:\n raise TimeOutWhileWaitingForFlowsToBeProcessedError(\n \"Flow processing didn't finish in time.\")", "docstring": "Waits until flow processing thread is done processing flows.\n\nArgs:\n timeout: If specified, is a max number of seconds to spend waiting.\n\nRaises:\n TimeOutWhileWaitingForFlowsToBeProcessedError: if timeout is reached.", "source": "juraj_google_style"} -{"code": "def get(self, secret_id):\n\n return self.prepare_model(self.client.api.inspect_secret(secret_id))", "docstring": "Get a secret.\n\nArgs:\n secret_id (str): Secret ID.\n\nReturns:\n (:py:class:`Secret`): The secret.\n\nRaises:\n :py:class:`docker.errors.NotFound`\n If the secret does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "juraj_google_style"} -{"code": "def tags(pode, leaf=False):\n\n fulltags = [tag for tag in pode[1]['tags']]\n if not leaf:\n return fulltags\n\n # longest first\n retn = []\n\n # brute force rather than build a tree. faster in small sets.\n for size, tag in sorted([(len(t), t) for t in fulltags], reverse=True):\n look = tag + '.'\n if any([r.startswith(look) for r in retn]):\n continue\n retn.append(tag)\n return retn", "docstring": "Get all the tags for a given node.\n\nArgs:\n pode (tuple): A packed node.\n leaf (bool): If True, only return the full tags.\n\nReturns:\n list: A list of tag strings.", "source": "juraj_google_style"} -{"code": "def filter_by_conditional_statement(self, statement):\n\n _filt_values, _filt_datetimes = self._filter_by_statement(statement)\n collection = HourlyDiscontinuousCollection(\n self.header.duplicate(), _filt_values, _filt_datetimes)\n collection._validated_a_period = True\n return collection", "docstring": "Filter the Data Collection based on a conditional statement.\n\nArgs:\n statement: A conditional statement as a string (e.g. a > 25 and a%5 == 0).\n The variable should always be named as 'a' (without quotations).\n\nReturns:\n A new Data Collection containing only the filtered data", "source": "juraj_google_style"} -{"code": "def save_page(self, path=None):\n\n\n path = _prepare_path(path, \"html\")\n\n with open(path, \"wb\") as f:\n f.write(encode_string(self.body))\n\n return path", "docstring": "Save a snapshot of the page.\n\n If invoked without arguments, it will save a file to :data:`capybara.save_path` and the\n file will be given a randomly generated filename. If invoked with a relative path, the path\n will be relative to :data:`capybara.save_path`.\n\nArgs:\n path (str, optional): The path to where it should be saved.\n\nReturns:\n str: The path to which the file was saved.", "source": "juraj_google_style"} -{"code": "def force_in_A_to_force_in_B(force_A, torque_A, pose_A_in_B):\n\n pos_A_in_B = pose_A_in_B[:3, 3]\n rot_A_in_B = pose_A_in_B[:3, :3]\n skew_symm = _skew_symmetric_translation(pos_A_in_B)\n force_B = rot_A_in_B.T.dot(force_A)\n torque_B = -rot_A_in_B.T.dot(skew_symm.dot(force_A)) + rot_A_in_B.T.dot(torque_A)\n return force_B, torque_B", "docstring": "Converts linear and rotational force at a point in frame A to the equivalent in frame B.\n\nArgs:\n force_A: 3-dim iterable for linear force in A\n torque_A: 3-dim iterable for rotational force (moment) in A\n pose_A_in_B: numpy array of shape (4,4) corresponding to the pose of A in frame B\n\nReturns:\n force_B, torque_B: two numpy arrays of shape (3,) for the forces in B", "source": "juraj_google_style"} -{"code": "def broadcast_impl(self, old_slices, old_shape, new_shape):\n\n new_slice_shape = self.slice_shape(new_shape)\n def tf_fn(x):\n return (tf.zeros(new_slice_shape, dtype=x.dtype) +\n _expand_dims(x, old_shape, new_shape))\n return self.slicewise(tf_fn, old_slices)", "docstring": "Implementation of a broadcast operation.\n\nArgs:\n old_slices: LaidOutTensor.\n old_shape: Shape.\n new_shape: Shape.\n\nReturns:\n LaidOutTensor.", "source": "juraj_google_style"} -{"code": "def upgrade(self, remote=None):\n\n if self.enabled:\n raise errors.DockerError(\n 'Plugin must be disabled before upgrading.'\n )\n\n if remote is None:\n remote = self.name\n privileges = self.client.api.plugin_privileges(remote)\n for d in self.client.api.upgrade_plugin(self.name, remote, privileges):\n yield d\n self._reload()", "docstring": "Upgrade the plugin.\n\nArgs:\n remote (string): Remote reference to upgrade to. The\n ``:latest`` tag is optional and is the default if omitted.\n Default: this plugin's name.\n\nReturns:\n A generator streaming the decoded API logs", "source": "juraj_google_style"} -{"code": "def __init__(\n self, processing_configuration, enable_sigsegv_handler=False, **kwargs):\n\n super(MultiProcessBaseProcess, self).__init__(**kwargs)\n self._debug_output = False\n self._enable_sigsegv_handler = enable_sigsegv_handler\n self._guppy_memory_profiler = None\n self._log_filename = None\n self._memory_profiler = None\n self._original_sigsegv_handler = None\n # TODO: check if this can be replaced by self.pid or does this only apply\n # to the parent process?\n self._pid = None\n self._processing_configuration = processing_configuration\n self._process_information = None\n self._processing_profiler = None\n self._quiet_mode = False\n self._rpc_server = None\n self._serializers_profiler = None\n self._status_is_running = False\n self._storage_profiler = None\n self._tasks_profiler = None\n\n if self._processing_configuration:\n self._debug_output = self._processing_configuration.debug_output\n\n if processing_configuration.log_filename:\n log_path = os.path.dirname(self._processing_configuration.log_filename)\n log_filename = os.path.basename(\n self._processing_configuration.log_filename)\n log_filename = '{0:s}_{1:s}'.format(self._name, log_filename)\n self._log_filename = os.path.join(log_path, log_filename)\n\n # We need to share the RPC port number with the engine process.\n self.rpc_port = multiprocessing.Value('I', 0)", "docstring": "Initializes a process.\n\nArgs:\n processing_configuration (ProcessingConfiguration): processing\n configuration.\n enable_sigsegv_handler (Optional[bool]): True if the SIGSEGV handler\n should be enabled.\n kwargs (dict[str,object]): keyword arguments to pass to\n multiprocessing.Process.", "source": "juraj_google_style"} -{"code": "def get_context(self, max_frames=None, missing_entities=[]):\n\n if not max_frames or max_frames > len(self.frame_stack):\n max_frames = len(self.frame_stack)\n\n missing_entities = list(missing_entities)\n context = []\n for i in xrange(max_frames):\n frame_entities = [entity.copy() for entity in self.frame_stack[i].entities]\n for entity in frame_entities:\n entity['confidence'] = entity.get('confidence', 1.0) / (2.0 + i)\n context += frame_entities\n\n result = []\n if len(missing_entities) > 0:\n for entity in context:\n if entity.get('data') in missing_entities:\n result.append(entity)\n # NOTE: this implies that we will only ever get one\n # of an entity kind from context, unless specified\n # multiple times in missing_entities. Cannot get\n # an arbitrary number of an entity kind.\n missing_entities.remove(entity.get('data'))\n else:\n result = context\n\n return result", "docstring": "Constructs a list of entities from the context.\n\nArgs:\n max_frames(int): maximum number of frames to look back\n missing_entities(list of str): a list or set of tag names, as strings\n\nReturns:\n list: a list of entities", "source": "juraj_google_style"} -{"code": "def get_frequency_shift(\n self,\n grid_points,\n temperatures=np.arange(0, 1001, 10, dtype='double'),\n epsilons=None,\n output_filename=None):\n\n\n if self._interaction is None:\n self.set_phph_interaction()\n if epsilons is None:\n _epsilons = [0.1]\n else:\n _epsilons = epsilons\n self._grid_points = grid_points\n get_frequency_shift(self._interaction,\n self._grid_points,\n self._band_indices,\n _epsilons,\n temperatures,\n output_filename=output_filename,\n log_level=self._log_level)", "docstring": "Frequency shift from lowest order diagram is calculated.\n\nArgs:\n epslins(list of float):\n The value to avoid divergence. When multiple values are given\n frequency shifts for those values are returned.", "source": "juraj_google_style"} -{"code": "def Gamma(cls,\n shape: 'TensorFluent',\n scale: 'TensorFluent',\n batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:\n\n if shape.scope != scale.scope:\n raise ValueError('Gamma distribution: parameters must have same scope!')\n concentration = shape.tensor\n rate = 1 / scale.tensor\n dist = tf.distributions.Gamma(concentration, rate)\n batch = shape.batch or scale.batch\n if not batch and batch_size is not None:\n t = dist.sample(batch_size)\n batch = True\n else:\n t = dist.sample()\n scope = shape.scope.as_list()\n return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Gamma sampling op with given shape and scale parameters.\n\nArgs:\n shape: The shape parameter of the Gamma distribution.\n scale: The scale parameter of the Gamma distribution.\n batch_size: The size of the batch (optional).\n\nReturns:\n The Gamma distribution and a TensorFluent sample drawn from the distribution.\n\nRaises:\n ValueError: If parameters do not have the same scope.", "source": "juraj_google_style"} -{"code": "def disambiguate_text(self, text, language=None, entities=None):\n\n\n body = {\n \"text\": text,\n \"entities\": [],\n \"onlyNER\": \"false\",\n \"customisation\": \"generic\"\n }\n\n if language:\n body['language'] = {\"lang\": language}\n\n if entities:\n body['entities'] = entities\n\n result, status_code = self._process_query(body)\n\n if status_code != 200:\n logger.debug('Disambiguation failed.')\n\n return result, status_code", "docstring": "Call the disambiguation service in order to get meanings.\n\nArgs:\n text (str): Text to be disambiguated.\n language (str): language of text (if known)\n entities (list): list of entities or mentions to be supplied by\n the user.\n\nReturns:\n dict, int: API response and API status.", "source": "juraj_google_style"} -{"code": "def _auth(f):\n\n @wraps(f)\n def method(self, *args, **kwargs):\n if not self._auth_token or datetime.utcnow() >= self._last_auth + timedelta(minutes=10):\n # Need to get new jwt\n self.auth_refresh()\n\n return f(self, *args, **kwargs)\n return method", "docstring": "Makes sure the request has a valid authorization jwt before calling the wrapped function.\n It does this by checking the timestamp of the last jwt and if > 10 minutes have elapsed,\n it refreshes it's existing jwt from the server.\n\nArgs:\n f: Function to wrap\n\nReturns:\n Function, f", "source": "juraj_google_style"} -{"code": "def apply(self, func, workers=1, job_size=10000):\n\n if workers == 1:\n for lines in self.iter_chunks(job_size):\n yield func(lines)\n else:\n with ProcessPoolExecutor(max_workers=workers) as executor:\n for result in executor.map(func, self.iter_chunks(job_size)):\n yield result", "docstring": "Apply `func` to lines of text in parallel or sequential.\n\nArgs:\n func : a function that takes a list of lines.", "source": "juraj_google_style"} -{"code": "def _build_url(self, api_call):\n\n if self.api_version in ('1.13.0', '1.13.0+update.1', '1.13.0+update.2'):\n if '/' not in api_call:\n return \"{0}/{1}/index.json\".format(self.site_url, api_call)\n return \"{0}/{1}.json\".format(self.site_url, api_call)", "docstring": "Build request url.\n\n Parameters:\n api_call (str): Base API Call.\n\nReturns:\n Complete url (str).", "source": "juraj_google_style"} -{"code": "def __copy_extracted(self, path, destination):\n\n\n unpacked_dir = self.filename + '.unpacked'\n if not os.path.isdir(unpacked_dir):\n LOGGER.warn(\n 'Failed to copy extracted file %s, no extracted dir',\n path\n )\n\n return\n\n source_path = os.path.join(unpacked_dir, path)\n\n if not os.path.exists(source_path):\n LOGGER.warn(\n 'Failed to copy extracted file %s, does not exist',\n path\n )\n\n return\n\n destination_path = os.path.join(destination, path)\n shutil.copyfile(source_path, destination_path)", "docstring": "Copies a file that was already extracted to the destination directory.\n\nArgs:\n path (str):\n Relative (to the root of the archive) of the file to copy.\n\n destination (str):\n Directory to extract the archive to.", "source": "juraj_google_style"} -{"code": "def attention_bias_to_padding(attention_bias, cast_fn=tf.to_float):\n\n # `attention_bias` is a large negative number in padding positions and 0.0\n # elsewhere.\n return tf.squeeze(cast_fn(tf.less(attention_bias, -1)), axis=[1, 2])", "docstring": "Inverse of attention_bias_ignore_padding().\n\nArgs:\n attention_bias: a `Tensor` with shape [batch, 1, 1, memory_length], as\n returned by attention_bias_ignore_padding().\n cast_fn: function used to cast to output type.\n\nReturns:\n a Tensor with shape [batch, memory_length] with 1.0 in padding positions\n and 0.0 in non-padding positions. Type is determined by cast_fn.", "source": "juraj_google_style"} -{"code": "def nCr(n, r):\n\n f = math.factorial\n return int(f(n) / f(r) / f(n-r))", "docstring": "Calculates nCr.\n\nArgs:\n n (int): total number of items.\n r (int): items to choose\n\nReturns:\n nCr.", "source": "juraj_google_style"} -{"code": "def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=''):\n\n key_flags = self._GetKeyFlagsForModule(module)\n if key_flags:\n self.__RenderModuleFlags(module, key_flags, output_lines, prefix)", "docstring": "Generates a help string for the key flags of a given module.\n\nArgs:\n module: A module object or a module name (a string).\n output_lines: A list of strings. The generated help message\n lines will be appended to this list.\n prefix: A string that is prepended to each generated help line.", "source": "juraj_google_style"} -{"code": "def get(cls, id_or_name, *, parent=None, namespace=None):\n\n return Key(cls, id_or_name, parent=parent, namespace=namespace).get()", "docstring": "Get an entity by id.\n\n Parameters:\n id_or_name(int or str): The entity's id.\n parent(anom.Key, optional): The entity's parent Key.\n namespace(str, optional): The entity's namespace.\n\nReturns:\n Model: An entity or ``None`` if the entity doesn't exist in\n Datastore.", "source": "juraj_google_style"} -{"code": "def __init__(\n self, name, data_type_definition, aliases=None, data_type=None,\n description=None, urls=None):\n\n super(StringDefinition, self).__init__(\n name, data_type_definition, aliases=aliases, data_type=data_type,\n description=description, urls=urls)\n self.encoding = 'ascii'", "docstring": "Initializes a string data type definition.\n\nArgs:\n name (str): name.\n data_type_definition (DataTypeDefinition): string element data type\n definition.\n aliases (Optional[list[str]]): aliases.\n data_type (Optional[str]): name of the string element data type.\n description (Optional[str]): description.\n urls (Optional[list[str]]): URLs.", "source": "juraj_google_style"} -{"code": "def CreateSubdivision(self, parent=None, value=None):\n\n division = {\n 'xsi_type': 'ProductPartition',\n 'partitionType': 'SUBDIVISION',\n 'id': str(self.next_id)\n }\n\n # The root has neither a parent nor a value.\n if parent is not None:\n division['parentCriterionId'] = parent['id']\n division['caseValue'] = value\n\n adgroup_criterion = {\n 'xsi_type': 'BiddableAdGroupCriterion',\n 'adGroupId': self.adgroup_id,\n 'criterion': division\n }\n\n self.CreateAddOperation(adgroup_criterion)\n self.next_id -= 1\n\n return division", "docstring": "Creates a subdivision node.\n\nArgs:\n parent: The node that should be this node's parent.\n value: The value being partitioned on.\n\nReturns:\n A new subdivision node.", "source": "juraj_google_style"} -{"code": "def update_script_from_item(self, item):\n\n\n script, path_to_script, script_item = item.get_script()\n\n # build dictionary\n # get full information from script\n dictator = list(script_item.to_dict().values())[0] # there is only one item in the dictionary\n\n for instrument in list(script.instruments.keys()):\n # update instrument\n script.instruments[instrument]['settings'] = dictator[instrument]['settings']\n # remove instrument\n del dictator[instrument]\n\n\n for sub_script_name in list(script.scripts.keys()):\n sub_script_item = script_item.get_subscript(sub_script_name)\n self.update_script_from_item(sub_script_item)\n del dictator[sub_script_name]\n\n script.update(dictator)\n # update datefolder path\n script.data_path = self.gui_settings['data_folder']", "docstring": "updates the script based on the information provided in item\n\nArgs:\n script: script to be updated\n item: B26QTreeItem that contains the new settings of the script", "source": "juraj_google_style"} -{"code": "def quantize_flow(flow, max_val=0.02, norm=True):\n\n h, w, _ = flow.shape\n dx = flow[..., 0]\n dy = flow[..., 1]\n if norm:\n dx = dx / w # avoid inplace operations\n dy = dy / h\n # use 255 levels instead of 256 to make sure 0 is 0 after dequantization.\n flow_comps = [\n quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy]\n ]\n return tuple(flow_comps)", "docstring": "Quantize flow to [0, 255].\n\n After this step, the size of flow will be much smaller, and can be\n dumped as jpeg images.\n\nArgs:\n flow (ndarray): (h, w, 2) array of optical flow.\n max_val (float): Maximum value of flow, values beyond\n [-max_val, max_val] will be truncated.\n norm (bool): Whether to divide flow values by image width/height.\n\nReturns:\n tuple[ndarray]: Quantized dx and dy.", "source": "juraj_google_style"} -{"code": "def need(self, folder, page=None, perpage=None):\n\n assert isinstance(page, int) or page is None\n assert isinstance(perpage, int) or perpage is None\n return self.get('need', params={'folder': folder,\n 'page': page,\n 'perpage': perpage})", "docstring": "Returns lists of files which are needed by this device in order\n for it to become in sync.\n\nArgs:\n folder (str):\n page (int): If defined applies pagination accross the\n collection of results.\n perpage (int): If defined applies pagination across the\n collection of results.\n\nReturns:\n dict", "source": "juraj_google_style"} -{"code": "def create(self, key, value):\n\n key = quote(key, safe='~')\n headers = {'content-type': 'application/octet-stream'}\n url = '/internal/playbooks/keyValue/{}'.format(key)\n r = self.tcex.session.put(url, data=value, headers=headers)\n return r.content", "docstring": "Create key/value pair in remote KV store.\n\nArgs:\n key (string): The key to create in remote KV store.\n value (any): The value to store in remote KV store.\n\nReturns:\n (string): The response from the API call.", "source": "juraj_google_style"} -{"code": "def predict_raw(self, X):\n\n # b -- bias for the input and h layers\n b = np.ones((X.shape[0], 1))\n w2 = self.w[-(self.h + 1):].reshape(self.h + 1, 1)\n w1 = self.w[:-(self.h + 1)].reshape(self.i + 1, self.h)\n\n # Make X to have the same number of columns as self.i.\n # Because of the sparse matrix representation, X for prediction can\n # have a different number of columns.\n if X.shape[1] > self.i:\n # If X has more columns, cut extra columns.\n X = X[:, :self.i]\n elif X.shape[1] < self.i:\n # If X has less columns, cut the rows of the weight matrix between\n # the input and h layers instead of X itself because the SciPy\n # sparse matrix does not support .set_shape() yet.\n idx = range(X.shape[1])\n idx.append(self.i) # Include the last row for the bias\n w1 = w1[idx, :]\n\n if sparse.issparse(X):\n return np.hstack((sigm(sparse.hstack((X, b)).dot(w1)), b)).dot(w2)\n else:\n return np.hstack((sigm(np.hstack((X, b)).dot(w1)), b)).dot(w2)", "docstring": "Predict targets for a feature matrix.\n\nArgs:\n X (np.array of float): feature matrix for prediction", "source": "juraj_google_style"} -{"code": "def reserve(self, *args, **kwargs):\n\n data = self.get_data('floating_ips/',\n type=POST,\n params={'region': self.region_slug})\n\n if data:\n self.ip = data['floating_ip']['ip']\n self.region = data['floating_ip']['region']\n\n return self", "docstring": "Creates a FloatingIP in a region without assigning\n it to a specific Droplet.\n\n Note: Every argument and parameter given to this method will be\n assigned to the object.\n\nArgs:\n region_slug: str - region's slug (e.g. 'nyc3')", "source": "juraj_google_style"} -{"code": "def nic_b(msg):\n\n tc = typecode(msg)\n\n if tc < 9 or tc > 18:\n raise RuntimeError(\"%s: Not a airborne position message, expecting 8 upper_bound:\n raise pyparsing.ParseException(\n 'Value: {0:d} exceeds upper bound: {1:d}'.format(\n check_number, upper_bound))\n\n # Since callback methods for pyparsing need to accept certain parameters\n # and there is no way to define conditions, like upper and lower bounds\n # we need to return here a method that accepts those pyparsing parameters.\n return CheckRange", "docstring": "Verify that a number is within a defined range.\n\n This is a callback method for pyparsing setParseAction\n that verifies that a read number is within a certain range.\n\n To use this method it needs to be defined as a callback method\n in setParseAction with the upper and lower bound set as parameters.\n\nArgs:\n lower_bound (int): lower bound of the range.\n upper_bound (int): upper bound of the range.\n\nReturns:\n Function: callback method that can be used by pyparsing setParseAction.", "source": "juraj_google_style"} -{"code": "def _publish_actor_class_to_key(self, key, actor_class_info):\n\n # We set the driver ID here because it may not have been available when\n # the actor class was defined.\n self._worker.redis_client.hmset(key, actor_class_info)\n self._worker.redis_client.rpush(\"Exports\", key)", "docstring": "Push an actor class definition to Redis.\n\n The is factored out as a separate function because it is also called\n on cached actor class definitions when a worker connects for the first\n time.\n\nArgs:\n key: The key to store the actor class info at.\n actor_class_info: Information about the actor class.", "source": "juraj_google_style"} -{"code": "def SetTimeZone(self, time_zone):\n\n try:\n self._time_zone = pytz.timezone(time_zone)\n except (AttributeError, pytz.UnknownTimeZoneError):\n raise ValueError('Unsupported timezone: {0!s}'.format(time_zone))", "docstring": "Sets the time zone.\n\nArgs:\n time_zone (str): time zone.\n\nRaises:\n ValueError: if the timezone is not supported.", "source": "juraj_google_style"} -{"code": "def plot(self, tag, mpl_plt, step=None, close_plot=True):\n\n if step is None:\n step = self._step\n else:\n self._step = step\n fig = mpl_plt.get_current_fig_manager()\n img_w, img_h = fig.canvas.get_width_height()\n image_buf = io.BytesIO()\n mpl_plt.savefig(image_buf, format='png')\n image_summary = Summary.Image(\n encoded_image_string=image_buf.getvalue(),\n colorspace=4, # RGBA\n height=img_h,\n width=img_w)\n summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])\n self.add_summary(summary, step)\n if close_plot:\n mpl_plt.close()", "docstring": "Saves matplotlib plot output to summary image.\n\nArgs:\n tag: str: label for this data\n mpl_plt: matplotlib stateful pyplot object with prepared plotting state\n step: int: training step\n close_plot: bool: automatically closes plot", "source": "juraj_google_style"} -{"code": "def __new__(cls, name, parents, dct):\n\n newClass = super(CommandMeta, cls).__new__(cls, name, parents, dct)\n\n if name != 'Command':\n for attribute in ['name', 'description', 'help']:\n if attribute not in dct or dct[attribute] is None:\n raise ValueError('%s cannot be None.' % attribute)\n CommandMeta.registry[name] = newClass\n\n return newClass", "docstring": "Creates a new Command class and validates it.\n\nArgs:\n cls (Class): the class object being created\n name (name): the name of the class being created\n parents (list): list of parent classes\n dct (dictionary): class attributes\n\nReturns:\n ``Class``", "source": "juraj_google_style"} -{"code": "def apply(self, window_length, samples=True, func1d=None):\n\n window_length /= 1 if samples else self.step\n if func1d is None:\n func1d = np.mean\n params = self.__dict__.copy()\n out = self._rolling_window(int(window_length), func1d)\n return Curve(out, params=params)", "docstring": "Runs any kind of function over a window.\n\nArgs:\n window_length (int): the window length. Required.\n samples (bool): window length is in samples. Use False for a window\n length given in metres.\n func1d (function): a function that takes a 1D array and returns a\n scalar. Default: ``np.mean()``.\n\nReturns:\n Curve.", "source": "juraj_google_style"} -{"code": "def as_dict(self, voigt=False):\n\n input_array = self.voigt if voigt else self\n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"input_array\": input_array.tolist()}\n if voigt:\n d.update({\"voigt\": voigt})\n return d", "docstring": "Serializes the tensor object\n\nArgs:\n voigt (bool): flag for whether to store entries in\n voigt-notation. Defaults to false, as information\n may be lost in conversion.\n\n Returns (Dict):\n serialized format tensor object", "source": "juraj_google_style"} -{"code": "def atomic_write(path, mode='wt', permissions=None, file_factory=None, **kwargs):\n\n if permissions is None:\n permissions = apply_umask()\n # Handle stdout:\n if path == '-':\n yield sys.stdout\n else:\n base_dir = os.path.dirname(path)\n kwargs['suffix'] = os.path.basename(path)\n tf = tempfile.NamedTemporaryFile(\n dir=base_dir, mode=mode, delete=False, **kwargs)\n\n # If a file_factory is given, close, and re-open a handle using the\n # file_factory\n if file_factory is not None:\n tf.close()\n tf = file_factory(tf.name)\n try:\n with tf:\n yield tf\n # Move\n os.rename(tf.name, path)\n os.chmod(path, permissions)\n except:\n os.remove(tf.name)\n raise", "docstring": "Open a file for atomic writing.\n\n Generates a temp file, renames to value of ``path``.\n\nArgs:\n ``permissions``: Permissions to set (default: umask)\n ``file_factory``: If given, the handle yielded will be the result of\n calling file_factory(path)\n\n Additional arguments are passed to tempfile.NamedTemporaryFile", "source": "juraj_google_style"} -{"code": "def dispatch_event(event):\n\n\n try:\n if event.http_verb == enums.HTTPVerbs.GET:\n requests.get(event.url, params=event.params, timeout=REQUEST_TIMEOUT).raise_for_status()\n elif event.http_verb == enums.HTTPVerbs.POST:\n requests.post(\n event.url, data=json.dumps(event.params), headers=event.headers, timeout=REQUEST_TIMEOUT\n ).raise_for_status()\n\n except request_exception.RequestException as error:\n logging.error('Dispatch event failed. Error: %s' % str(error))", "docstring": "Dispatch the event being represented by the Event object.\n\nArgs:\n event: Object holding information about the request to be dispatched to the Optimizely backend.", "source": "juraj_google_style"} -{"code": "def _ScanFileSystemForWindowsDirectory(self, path_resolver):\n\n result = False\n for windows_path in self._WINDOWS_DIRECTORIES:\n windows_path_spec = path_resolver.ResolvePath(windows_path)\n\n result = windows_path_spec is not None\n if result:\n self._windows_directory = windows_path\n break\n\n return result", "docstring": "Scans a file system for a known Windows directory.\n\nArgs:\n path_resolver (WindowsPathResolver): Windows path resolver.\n\nReturns:\n bool: True if a known Windows directory was found.", "source": "juraj_google_style"} -{"code": "def multiply(self, other):\n\n if not isinstance(other, Number):\n raise QiskitError(\"other is not a number\")\n return SuperOp(other * self._data, self.input_dims(),\n self.output_dims())", "docstring": "Return the QuantumChannel self + other.\n\nArgs:\n other (complex): a complex number.\n\nReturns:\n SuperOp: the scalar multiplication other * self as a SuperOp object.\n\nRaises:\n QiskitError: if other is not a valid scalar.", "source": "juraj_google_style"} -{"code": "def env_problem(env_problem_name, **kwargs):\n\n\n ep_cls = Registries.env_problems[env_problem_name]\n ep = ep_cls()\n ep.initialize(**kwargs)\n return ep", "docstring": "Get and initialize the `EnvProblem` with the given name and batch size.\n\nArgs:\n env_problem_name: string name of the registered env problem.\n **kwargs: forwarded to env problem's initialize method.\n\nReturns:\n an initialized EnvProblem with the given batch size.", "source": "juraj_google_style"} -{"code": "def changes(self, **kwargs):\n\n path = '%s/%s/changes' % (self.manager.path, self.get_id())\n return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "List the merge request changes.\n\nArgs:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabListError: If the list could not be retrieved\n\nReturns:\n RESTObjectList: List of changes", "source": "juraj_google_style"} -{"code": "def encode(self, s):\n\n return [int(w) + self._num_reserved_ids for w in s.split()]", "docstring": "Transform a human-readable string into a sequence of int ids.\n\n The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,\n num_reserved_ids) are reserved.\n\n EOS is not appended.\n\nArgs:\n s: human-readable string to be converted.\n\nReturns:\n ids: list of integers", "source": "juraj_google_style"} -{"code": "def _CompareStores(self, storage_reader, compare_storage_reader):\n\n storage_counters = self._CalculateStorageCounters(storage_reader)\n compare_storage_counters = self._CalculateStorageCounters(\n compare_storage_reader)\n\n # TODO: improve comparison, currently only total numbers are compared.\n\n return storage_counters == compare_storage_counters", "docstring": "Compares the contents of two stores.\n\nArgs:\n storage_reader (StorageReader): storage reader.\n compare_storage_reader (StorageReader): storage to compare against.\n\nReturns:\n bool: True if the content of the stores is identical.", "source": "juraj_google_style"} -{"code": "def add_element(self, element):\n\n if isinstance(element, BaseExpression):\n element.set_parent(self._working_fragment)\n self._working_fragment.elements.append(element)\n return self\n else:\n return self.add_operator(element)", "docstring": "Add an element of type ``Operator``, ``Constraint``, or\n ``Expression`` to the ``Expression``.\n\nArgs:\n element: ``Constraint``, ``Expression``, or ``Operator``.\n\nReturns:\n Expression: ``self``\n\nRaises:\n FiqlObjectException: Element is not a valid type.", "source": "juraj_google_style"} -{"code": "def append_flags_into_file(self, filename):\n\n with open(filename, 'a') as out_file:\n out_file.write(self.flags_into_string())", "docstring": "Appends all flags assignments from this FlagInfo object to a file.\n\n Output will be in the format of a flagfile.\n\n NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile\n from https://github.com/gflags/gflags.\n\nArgs:\n filename: str, name of the file.", "source": "juraj_google_style"} -{"code": "def do_ams_sto_put(endpoint, body, content_length):\n\n headers = {\"Accept\": json_acceptformat,\n \"Accept-Charset\" : charset,\n \"x-ms-blob-type\" : \"BlockBlob\",\n \"x-ms-meta-m1\": \"v1\",\n \"x-ms-meta-m2\": \"v2\",\n \"x-ms-version\" : \"2015-02-21\",\n \"Content-Length\" : str(content_length)}\n return requests.put(endpoint, data=body, headers=headers)", "docstring": "Do a PUT request to the Azure Storage API and return JSON.\n\nArgs:\n endpoint (str): Azure Media Services Initial Endpoint.\n body (str): Azure Media Services Content Body.\n content_length (str): Content_length.\n\nReturns:\n HTTP response. JSON body.", "source": "juraj_google_style"} -{"code": "def convert_frame_change(self, shift, instruction):\n\n command_dict = {\n 'name': 'fc',\n 't0': shift+instruction.start_time,\n 'ch': instruction.channels[0].name,\n 'phase': instruction.command.phase\n }\n return self._qobj_model(**command_dict)", "docstring": "Return converted `FrameChangeInstruction`.\n\nArgs:\n shift(int): Offset time.\n instruction (FrameChangeInstruction): frame change instruction.\n\nReturns:\n dict: Dictionary of required parameters.", "source": "juraj_google_style"} -{"code": "def to_string(cls, error_code):\n\n if error_code == cls.ZONE_NOT_FOUND_ERROR:\n return 'Zone not found'\n return super(JLinkWriteErrors, cls).to_string(error_code)", "docstring": "Returns the string message for the given ``error_code``.\n\nArgs:\n cls (JLinkWriteErrors): the ``JLinkWriteErrors`` class\n error_code (int): error code to convert\n\nReturns:\n An error string corresponding to the error code.\n\nRaises:\n ValueError: if the error code is invalid.", "source": "juraj_google_style"} -{"code": "def template(self, name):\n\n self.plain, self.html = self._find_tpls(name)\n if not self.plain:\n self.plain = self._find_tpl(name)\n try:\n self.send = self._sendtpl\n yield\n finally:\n self.plain, self.html = None, None\n self.send = self._send", "docstring": "Set an active template to use with our Postman.\n\n This changes the call signature of send.\n\nArgs:\n - `name`: str\n\n Return: None\n Exceptions: None", "source": "juraj_google_style"} -{"code": "def __init__(self, session, proxy_class):\n\n assert isinstance(proxy_class, type)\n self.session = session\n self.proxy_class = proxy_class", "docstring": "Instantiate an API Authentication Proxy.\n\nArgs:\n auth (requests.Session): Authenticated requests Session.\n proxy_class (type): A class implementing the ``BaseApi``\n interface.", "source": "juraj_google_style"} -{"code": "def TSKVolumeGetBytesPerSector(tsk_volume):\n\n # Note that because pytsk3.Volume_Info does not explicitly defines info\n # we need to check if the attribute exists and has a value other\n # than None. Default to 512 otherwise.\n if hasattr(tsk_volume, 'info') and tsk_volume.info is not None:\n block_size = getattr(tsk_volume.info, 'block_size', 512)\n else:\n block_size = 512\n\n return block_size", "docstring": "Retrieves the number of bytes per sector from a TSK volume object.\n\nArgs:\n tsk_volume (pytsk3.Volume_Info): TSK volume information.\n\nReturns:\n int: number of bytes per sector or 512 by default.", "source": "juraj_google_style"} -{"code": "def _publish_scan_response(self, client):\n\n\n devices = self._manager.scanned_devices\n\n converted_devs = []\n for uuid, info in devices.items():\n slug = self._build_device_slug(uuid)\n\n message = {}\n message['uuid'] = uuid\n if uuid in self._connections:\n message['user_connected'] = True\n elif 'user_connected' in info:\n message['user_connected'] = info['user_connected']\n else:\n message['user_connected'] = False\n\n message['connection_string'] = slug\n message['signal_strength'] = info['signal_strength']\n\n converted_devs.append({x: y for x, y in message.items()})\n message['type'] = 'notification'\n message['operation'] = 'advertisement'\n\n self.client.publish(self.topics.gateway_topic(slug, 'data/advertisement'), message)\n\n probe_message = {}\n probe_message['type'] = 'response'\n probe_message['client'] = client\n probe_message['success'] = True\n probe_message['devices'] = converted_devs\n\n self.client.publish(self.topics.status, probe_message)", "docstring": "Publish a scan response message\n\n The message contains all of the devices that are currently known\n to this agent. Connection strings for direct connections are\n translated to what is appropriate for this agent.\n\nArgs:\n client (string): A unique id for the client that made this request", "source": "juraj_google_style"} -{"code": "def get_local_variable_from_name(self, variable_name):\n\n return next((v for v in self.variables if v.name == variable_name), None)", "docstring": "Return a local variable from a name\n\nArgs:\n varible_name (str): name of the variable\n\nReturns:\n LocalVariable", "source": "juraj_google_style"} -{"code": "def __init__(self, resolver_context):\n\n super(CPIOFile, self).__init__(resolver_context)\n self._cpio_archive_file = None\n self._cpio_archive_file_entry = None\n self._current_offset = 0\n self._file_system = None\n self._size = 0", "docstring": "Initializes a file-like object.\n\nArgs:\n resolver_context (Context): resolver context.", "source": "juraj_google_style"} -{"code": "def _adjust_penalty(self, observ, old_policy_params, length):\n\n old_policy = self._policy_type(**old_policy_params)\n with tf.name_scope('adjust_penalty'):\n network = self._network(observ, length)\n print_penalty = tf.Print(0, [self._penalty], 'current penalty: ')\n with tf.control_dependencies([print_penalty]):\n kl_change = tf.reduce_mean(self._mask(\n tf.contrib.distributions.kl_divergence(old_policy, network.policy),\n length))\n kl_change = tf.Print(kl_change, [kl_change], 'kl change: ')\n maybe_increase = tf.cond(\n kl_change > 1.3 * self._config.kl_target,\n # pylint: disable=g-long-lambda\n lambda: tf.Print(self._penalty.assign(\n self._penalty * 1.5), [0], 'increase penalty '),\n float)\n maybe_decrease = tf.cond(\n kl_change < 0.7 * self._config.kl_target,\n # pylint: disable=g-long-lambda\n lambda: tf.Print(self._penalty.assign(\n self._penalty / 1.5), [0], 'decrease penalty '),\n float)\n with tf.control_dependencies([maybe_increase, maybe_decrease]):\n return tf.summary.merge([\n tf.summary.scalar('kl_change', kl_change),\n tf.summary.scalar('penalty', self._penalty)])", "docstring": "Adjust the KL policy between the behavioral and current policy.\n\n Compute how much the policy actually changed during the multiple\n update steps. Adjust the penalty strength for the next training phase if we\n overshot or undershot the target divergence too much.\n\nArgs:\n observ: Sequences of observations.\n old_policy_params: Parameters of the behavioral policy.\n length: Batch of sequence lengths.\n\nReturns:\n Summary tensor.", "source": "juraj_google_style"} -{"code": "def get_by_provider_display_name(self, provider_display_name):\n\n san_managers = self._client.get_all()\n result = [x for x in san_managers if x['providerDisplayName'] == provider_display_name]\n return result[0] if result else None", "docstring": "Gets a SAN Manager by provider display name.\n\nArgs:\n provider_display_name: Name of the Provider Display Name\n\nReturns:\n dict: SAN Manager.", "source": "juraj_google_style"} -{"code": "def exec_function(ast, globals_map):\n\n locals_map = globals_map\n exec ast in globals_map, locals_map\n return locals_map", "docstring": "Execute a python code object in the given environment.\n\nArgs:\n globals_map: Dictionary to use as the globals context.\n\nReturns:\n locals_map: Dictionary of locals from the environment after execution.", "source": "juraj_google_style"} -{"code": "def post(self, url, params=None, data=None, files=None, **kwargs):\n\n return self.call_api(\n \"POST\",\n url,\n params=params,\n data=data,\n files=files,\n **kwargs\n )", "docstring": "Call the API with a POST request.\n\nArgs:\n url (str): Resource location relative to the base URL.\n params (dict or None): Query-string parameters.\n data (dict or None): Request body contents.\n files (dict or None: Files to be passed to the request.\n\nReturns:\n An instance of ResultParser or ErrorParser.", "source": "juraj_google_style"} -{"code": "def set_energy_upperbound(self, spins, offset=0):\n\n spin_energy = self.energy_upperbound(spins)\n self.assertions.add(GE(spin_energy, self.gap + offset))", "docstring": "Upper bound the energy of Theta with spins fixed to be greater than (gap + offset).\n\nArgs:\n spins (dict): Spin values for a subset of the variables in Theta.\n offset (float): A value that is added to the upper bound. Default value is 0.\n\nNote:\n Add equality constraint to assertions.", "source": "juraj_google_style"} -{"code": "def __init__(self, shape_id=None, lat=None, lon=None,seq=None, dist=None,\n field_dict=None):\n\n self._schedule = None\n if field_dict:\n if isinstance(field_dict, self.__class__):\n for k, v in field_dict.iteritems():\n self.__dict__[k] = v\n else:\n self.__dict__.update(field_dict)\n else:\n self.shape_id = shape_id\n self.shape_pt_lat = lat\n self.shape_pt_lon = lon\n self.shape_pt_sequence = seq\n self.shape_dist_traveled = dist", "docstring": "Initialize a new ShapePoint object.\n\nArgs:\n field_dict: A dictionary mapping attribute name to unicode string", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.ListUptimeCheckConfigs = channel.unary_unary(\n \"/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs\",\n request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckConfigsRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckConfigsResponse.FromString,\n )\n self.GetUptimeCheckConfig = channel.unary_unary(\n \"/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig\",\n request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.GetUptimeCheckConfigRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString,\n )\n self.CreateUptimeCheckConfig = channel.unary_unary(\n \"/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig\",\n request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.CreateUptimeCheckConfigRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString,\n )\n self.UpdateUptimeCheckConfig = channel.unary_unary(\n \"/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig\",\n request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.UpdateUptimeCheckConfigRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString,\n )\n self.DeleteUptimeCheckConfig = channel.unary_unary(\n \"/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig\",\n request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.DeleteUptimeCheckConfigRequest.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.ListUptimeCheckIps = channel.unary_unary(\n \"/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps\",\n request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckIpsRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckIpsResponse.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def _build(self, x, prev_state):\n\n x.get_shape().with_rank(2)\n self._batch_size = x.get_shape().as_list()[0]\n self._dtype = x.dtype\n\n x_zeros = tf.concat(\n [x, tf.zeros(\n shape=(self._batch_size, 1), dtype=self._dtype)], 1)\n x_ones = tf.concat(\n [x, tf.ones(\n shape=(self._batch_size, 1), dtype=self._dtype)], 1)\n # Weights for the halting signal\n halting_linear = basic.Linear(name=\"halting_linear\", output_size=1)\n\n body = functools.partial(\n self._body, halting_linear=halting_linear, x_ones=x_ones)\n cumul_halting_init = tf.zeros(shape=(self._batch_size, 1),\n dtype=self._dtype)\n iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)\n core_output_size = [x.value for x in self._core.output_size]\n out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size),\n dtype=self._dtype)\n cumul_state_init = _nested_zeros_like(prev_state)\n remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)\n (unused_final_x, final_out, unused_final_state, final_cumul_state,\n unused_final_halting, final_iteration, final_remainder) = tf.while_loop(\n self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init,\n cumul_halting_init, iteration_init, remainder_init])\n\n act_output = basic.Linear(\n name=\"act_output_linear\", output_size=self._output_size)(final_out)\n\n return (act_output, (final_iteration, final_remainder)), final_cumul_state", "docstring": "Connects the core to the graph.\n\nArgs:\n x: Input `Tensor` of shape `(batch_size, input_size)`.\n prev_state: Previous state. This could be a `Tensor`, or a tuple of\n `Tensor`s.\n\nReturns:\n The tuple `(output, state)` for this core.\n\nRaises:\n ValueError: if the `Tensor` `x` does not have rank 2.", "source": "juraj_google_style"} -{"code": "def get_course_details(self, course_id):\n\n try:\n return self.client.course(course_id).get()\n except (SlumberBaseException, ConnectionError, Timeout) as exc:\n LOGGER.exception(\n 'Failed to retrieve course enrollment details for course [%s] due to: [%s]',\n course_id, str(exc)\n )\n return {}", "docstring": "Query the Enrollment API for the course details of the given course_id.\n\nArgs:\n course_id (str): The string value of the course's unique identifier\n\nReturns:\n dict: A dictionary containing details about the course, in an enrollment context (allowed modes, etc.)", "source": "juraj_google_style"} -{"code": "def create_explicit(bounds):\n\n safe_bounds = sorted(float(x) for x in bounds)\n if len(safe_bounds) != len(set(safe_bounds)):\n raise ValueError(u'Detected two elements of bounds that are the same')\n return sc_messages.Distribution(\n bucketCounts=[0] * (len(safe_bounds) + 1),\n explicitBuckets=sc_messages.ExplicitBuckets(bounds=safe_bounds))", "docstring": "Creates a new instance of distribution with explicit buckets.\n\n bounds is an iterable of ordered floats that define the explicit buckets\n\nArgs:\n bounds (iterable[float]): initializes the bounds\n\nReturns:\n :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`\n\nRaises:\n ValueError: if the args are invalid for creating an instance", "source": "juraj_google_style"} -{"code": "def GetMessages(self, formatter_mediator, event):\n\n regvalue = getattr(event, 'regvalue', {})\n # Loop over all the registry value names in the service key.\n for service_value_name in regvalue.keys():\n # A temporary variable so we can refer to this long name more easily.\n service_enums = human_readable_service_enums.SERVICE_ENUMS\n # Check if we need to can make the value more human readable.\n if service_value_name in service_enums.keys():\n service_enum = service_enums[service_value_name]\n # Find the human readable version of the name and fall back to the\n # raw value if it's not found.\n human_readable_value = service_enum.get(\n regvalue[service_value_name],\n regvalue[service_value_name])\n regvalue[service_value_name] = human_readable_value\n\n return super(WinRegistryServiceFormatter, self).GetMessages(\n formatter_mediator, event)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\n formatter_mediator (FormatterMediator): mediates the interactions between\n formatters and other components, such as storage and Windows EventLog\n resources.\n event (EventObject): event.\n\nReturns:\n tuple(str, str): formatted message string and short message string.\n\nRaises:\n WrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj_google_style"} -{"code": "def _example_number_anywhere_for_type(num_type):\n\n for region_code in SUPPORTED_REGIONS:\n example_numobj = example_number_for_type(region_code, num_type)\n if example_numobj is not None:\n return example_numobj\n # If there wasn't an example number for a region, try the non-geographical entities.\n for country_calling_code in COUNTRY_CODES_FOR_NON_GEO_REGIONS:\n metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None)\n desc = _number_desc_by_type(metadata, num_type)\n if desc is not None and desc.example_number is not None:\n try:\n return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION)\n except NumberParseException: # pragma no cover\n pass\n\n # There are no example numbers of this type for any country in the library.\n return None", "docstring": "Gets a valid number for the specified number type (it may belong to any country).\n\nArgs:\n num_type -- The type of number that is needed.\n\n Returns a valid number for the specified type. Returns None when the\n metadata does not contain such information. This should only happen when\n no numbers of this type are allocated anywhere in the world anymore.", "source": "juraj_google_style"} -{"code": "def set_cookies():\n\n\n cookies = dict(request.args.items())\n r = app.make_response(redirect(url_for(\"view_cookies\")))\n for key, value in cookies.items():\n r.set_cookie(key=key, value=value, secure=secure_cookie())\n\n return r", "docstring": "Sets cookie(s) as provided by the query string and redirects to cookie list.\n ---\n tags:\n - Cookies\n parameters:\n - in: query\n name: freeform\n explode: true\n allowEmptyValue: true\n schema:\n type: object\n additionalProperties:\n type: string\n style: form\n produces:\n - text/plain\n responses:\n 200:\n description: Redirect to cookie list", "source": "juraj_google_style"} -{"code": "def _set_details(self, content):\n\n try:\n self.details = str(content)\n except UnicodeEncodeError:\n if sys.version_info < (3, 0):\n # If Py2 threw encode error, convert to unicode.\n self.details = unicode(content)\n else:\n # We should never hit this in Py3, if this happens, record\n # an encoded version of the content for users to handle.\n logging.error(\n 'Unable to decode \"%s\" in Py3, encoding in utf-8.',\n content)\n self.details = content.encode('utf-8')", "docstring": "Sets the `details` field.\n\nArgs:\n content: the content to extract details from.", "source": "juraj_google_style"} -{"code": "def _add_input_deps(self, executor, args, kwargs):\n\n\n # Return if the task is _*_stage_in\n if executor == 'data_manager':\n return args, kwargs\n\n inputs = kwargs.get('inputs', [])\n for idx, f in enumerate(inputs):\n if isinstance(f, File) and f.is_remote():\n inputs[idx] = self.data_manager.stage_in(f, executor)\n\n for kwarg, f in kwargs.items():\n if isinstance(f, File) and f.is_remote():\n kwargs[kwarg] = self.data_manager.stage_in(f, executor)\n\n newargs = list(args)\n for idx, f in enumerate(newargs):\n if isinstance(f, File) and f.is_remote():\n newargs[idx] = self.data_manager.stage_in(f, executor)\n\n return tuple(newargs), kwargs", "docstring": "Look for inputs of the app that are remote files. Submit stage_in\n apps for such files and replace the file objects in the inputs list with\n corresponding DataFuture objects.\n\nArgs:\n - executor (str) : executor where the app is going to be launched\n - args (List) : Positional args to app function\n - kwargs (Dict) : Kwargs to app function", "source": "juraj_google_style"} -{"code": "def reduce_multiline(string):\n\n string = str(string)\n return \" \".join([item.strip()\n for item in string.split(\"\\n\")\n if item.strip()])", "docstring": "reduces a multiline string to a single line of text.\n\nArgs:\n string: the text to reduce", "source": "juraj_google_style"} -{"code": "def get_database_info(db_uri):\n\n if not db_uri:\n return None, None\n scheme = urlparse.urlparse(db_uri).scheme\n if scheme == 'sqlite':\n return sqlite3, create_sqlite_connection_provider(db_uri)\n else:\n raise ValueError('Only sqlite DB URIs are supported now: ' + db_uri)", "docstring": "Returns TBContext fields relating to SQL database.\n\nArgs:\n db_uri: A string URI expressing the DB file, e.g. \"sqlite:~/tb.db\".\n\nReturns:\n A tuple with the db_module and db_connection_provider TBContext fields. If\n db_uri was empty, then (None, None) is returned.\n\nRaises:\n ValueError: If db_uri scheme is not supported.", "source": "juraj_google_style"} -{"code": "def define_lattice_from_file( self, filename, cell_lengths ):\n\n self.lattice = init_lattice.lattice_from_sites_file( filename, cell_lengths = cell_lengths )", "docstring": "Set up the simulation lattice from a file containing site data.\n Uses `init_lattice.lattice_from_sites_file`, which defines the site file spec.\n\nArgs:\n filename (Str): sites file filename.\n cell_lengths (List(x,y,z)): cell lengths for the simulation cell.\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def bulkBatch(symbols, fields=None, range_='1m', last=10, token='', version=''):\n\n fields = fields or _BATCH_TYPES\n args = []\n empty_data = []\n list_orig = empty_data.__class__\n\n if not isinstance(symbols, list_orig):\n raise PyEXception('Symbols must be of type list')\n\n for i in range(0, len(symbols), 99):\n args.append((symbols[i:i+99], fields, range_, last, token, version))\n\n pool = ThreadPool(20)\n rets = pool.starmap(batch, args)\n pool.close()\n\n ret = {}\n\n for i, d in enumerate(rets):\n symbols_subset = args[i][0]\n if len(d) != len(symbols_subset):\n empty_data.extend(list_orig(set(symbols_subset) - set(d.keys())))\n ret.update(d)\n\n for k in empty_data:\n if k not in ret:\n if isinstance(fields, str):\n ret[k] = {}\n else:\n ret[k] = {x: {} for x in fields}\n return ret", "docstring": "Optimized batch to fetch as much as possible at once\n\n https://iexcloud.io/docs/api/#batch-requests\n\nArgs:\n symbols (list); List of tickers to request\n fields (list); List of fields to request\n range_ (string); Date range for chart\n last (int);\n token (string); Access token\n version (string); API version\n\nReturns:\n dict: results in json", "source": "juraj_google_style"} -{"code": "def __call__(self, fn):\n\n\n def output(app, *args, **kwargs):\n\n data = fn(app, *args, **kwargs)\n attr = getattr(app, self.attribute)\n if isinstance(data, list) and isinstance(attr, list):\n getattr(app, self.attribute).extend(data)\n elif isinstance(attr, list):\n getattr(app, self.attribute).append(data)\n else:\n setattr(app, self.attribute, data)\n return data\n\n return output", "docstring": "Implement __call__ function for decorator.\n\nArgs:\n fn (function): The decorated function.\n\nReturns:\n function: The custom decorator function.", "source": "juraj_google_style"} -{"code": "def format_error(status=None, title=None, detail=None, code=None):\n\n error = {}\n error.update({ 'title': title })\n\n if status is not None:\n error.update({ 'status': status })\n\n if detail is not None:\n error.update({ 'detail': detail })\n\n if code is not None:\n error.update({ 'code': code })\n\n return error", "docstring": "Formatting JSON API Error Object\n Constructing an error object based on JSON API standard\n ref: http://jsonapi.org/format/#error-objects\n\nArgs:\n status: Can be a http status codes\n title: A summary of error\n detail: A descriptive error message\n code: Application error codes (if any)\n\nReturns:\n A dictionary contains of status, title, detail and code", "source": "juraj_google_style"} -{"code": "def update(self, data):\n\n # If the instance was terminated, remove it\n if data.state['Name'] == 'terminated':\n self.delete(auto_commit=False)\n return True\n\n updated = self.set_property('launch_date', to_utc_date(data.launch_time).isoformat())\n updated |= self.set_property('state', data.state['Name'])\n updated |= self.set_property('instance_type', data.instance_type)\n updated |= self.set_property('public_ip', data.public_ip_address or None)\n updated |= self.set_property('public_dns', data.public_dns_name or None)\n\n tags = {x['Key']: x['Value'] for x in data.tags or {}}\n existing_tags = {x.key: x for x in self.tags}\n\n # Check for new tags\n for key, value in list(tags.items()):\n updated |= self.set_tag(key, value)\n\n # Check for updated or removed tags\n for key in list(existing_tags.keys()):\n if key not in tags:\n updated |= self.delete_tag(key)\n\n return updated", "docstring": "Updates the object information based on live data, if there were any changes made. Any changes will be\n automatically applied to the object, but will not be automatically persisted. You must manually call\n `db.session.add(instance)` on the object.\n\nArgs:\n data (:obj:): AWS API Resource object fetched from AWS API\n\nReturns:\n True if there were any changes to the object, else false", "source": "juraj_google_style"} -{"code": "def _CompareFields(field, other_field):\n\n field_attrs = _GetFieldAttributes(field)\n other_field_attrs = _GetFieldAttributes(other_field)\n if field_attrs != other_field_attrs:\n return False\n return field.__class__ == other_field.__class__", "docstring": "Checks if two ProtoRPC fields are \"equal\".\n\n Compares the arguments, rather than the id of the elements (which is\n the default __eq__ behavior) as well as the class of the fields.\n\nArgs:\n field: A ProtoRPC message field to be compared.\n other_field: A ProtoRPC message field to be compared.\n\nReturns:\n Boolean indicating whether the fields are equal.", "source": "juraj_google_style"} -{"code": "def __init__(self, details):\n\n\n\t\t# If details is not a dict instance\n\t\tif not isinstance(details, dict):\n\t\t\traise ValueError('details')\n\n\t\t# If the hash config is not found\n\t\tif '__hash__' not in details:\n\t\t\traise KeyError('__hash__')\n\n\t\t# If there's an optional flag somewhere in the mix\n\t\tif '__optional__' in details:\n\t\t\tbOptional = details['__optional__']\n\t\t\tdel details['__optional__']\n\t\telse:\n\t\t\tbOptional = None\n\n\t\t# If the hash is simply set to True, make it a string with no\n\t\t#\trequirements\n\t\tif details['__hash__'] is True:\n\t\t\tdetails['__hash__'] = {\"__type__\":\"string\"}\n\n\t\t# Store the key using the hash value\n\t\tself._key = Node(details['__hash__'])\n\n\t\t# Remove it from details\n\t\tdel details['__hash__']\n\n\t\t# Store the child\n\t\tself._node = _child(details)\n\n\t\t# If we had an optional flag, add it for the parent constructor\n\t\tif bOptional:\n\t\t\tdetails['__optional__'] = bOptional\n\n\t\t# Call the parent constructor\n\t\tsuper(HashNode, self).__init__(details, 'HashNode')", "docstring": "Constructor\n\n Initialises the instance\n\nArgs:\n details {dict} -- Details describing the type of values allowed for\n the node\n\nRaises:\n KeyError\n ValueError\n\nReturns:\n HashNode", "source": "juraj_google_style"} -{"code": "def get_links(self, **kw):\n\n\n links = [a for a in dir(self) if isinstance(getattr(self, a), Model)\n and not a.startswith('_model')]\n\n return [\n {\n 'field': l,\n 'mdl': getattr(self, l).__class__,\n } for l in links\n ]", "docstring": "Prepare links of form by mimicing pyoko's get_links method's result\n\nArgs:\n **kw:\n\n Returns: list of link dicts", "source": "juraj_google_style"} -{"code": "def read_handler(Model, name=None, **kwds):\n\n async def action_handler(service, action_type, payload, props, **kwds):\n # if the payload represents a new instance of `model`\n if action_type == get_crud_action('read', name or Model):\n # the props of the message\n message_props = {}\n # if there was a correlation id in the request\n if 'correlation_id' in props:\n # make sure it ends up in the reply\n message_props['correlation_id'] = props['correlation_id']\n\n try:\n # resolve the query using the service schema\n resolved = service.schema.execute(payload)\n # create the string response\n response = json.dumps({\n 'data': {key:value for key,value in resolved.data.items()},\n 'errors': resolved.errors\n })\n\n # publish the success event\n await service.event_broker.send(\n payload=response,\n action_type=change_action_status(action_type, success_status()),\n **message_props\n )\n\n # if something goes wrong\n except Exception as err:\n # publish the error as an event\n await service.event_broker.send(\n payload=str(err),\n action_type=change_action_status(action_type, error_status()),\n **message_props\n )\n\n\n # return the handler\n return action_handler", "docstring": "This factory returns an action handler that responds to read requests\n by resolving the payload as a graphql query against the internal schema.\n\nArgs:\n Model (nautilus.BaseModel): The model to delete when the action\n received.\n\nReturns:\n function(type, payload): The action handler for this model", "source": "juraj_google_style"} -{"code": "def _CountClientStatisticByLabel(self, day_buckets, extract_statistic_fn):\n\n counts = collections.defaultdict(int)\n now = rdfvalue.RDFDatetime.Now()\n for info in self.IterateAllClientsFullInfo(batch_size=db.MAX_COUNT):\n if not info.metadata.ping:\n continue\n statistic_value = extract_statistic_fn(info)\n for client_label in info.GetLabelsNames(owner=\"GRR\"):\n for day_bucket in day_buckets:\n time_boundary = now - rdfvalue.Duration.FromDays(day_bucket)\n if info.metadata.ping > time_boundary:\n # Count the client if it has been active in the last 'day_bucket'\n # days.\n counts[(statistic_value, client_label, day_bucket)] += 1\n return dict(counts)", "docstring": "Returns client-activity metrics for a particular statistic.\n\nArgs:\n day_buckets: A set of n-day-active buckets.\n extract_statistic_fn: A function that extracts the statistic's value from\n a ClientFullInfo object.", "source": "juraj_google_style"} -{"code": "def _GetDayOfYear(self, year, month, day_of_month):\n\n if month not in range(1, 13):\n raise ValueError('Month value out of bounds.')\n\n days_per_month = self._GetDaysPerMonth(year, month)\n if day_of_month < 1 or day_of_month > days_per_month:\n raise ValueError('Day of month value out of bounds.')\n\n day_of_year = day_of_month\n for past_month in range(1, month):\n day_of_year += self._GetDaysPerMonth(year, past_month)\n\n return day_of_year", "docstring": "Retrieves the day of the year for a specific day of a month in a year.\n\nArgs:\n year (int): year e.g. 1970.\n month (int): month, where 1 represents January.\n day_of_month (int): day of the month, where 1 represents the first day.\n\nReturns:\n int: day of year.\n\nRaises:\n ValueError: if the month or day of month value is out of bounds.", "source": "juraj_google_style"} -{"code": "def _set_unknown_flag(self, name, value):\n\n setter = self.__dict__['__set_unknown']\n if setter:\n try:\n setter(name, value)\n return value\n except (TypeError, ValueError): # Flag value is not valid.\n raise _exceptions.IllegalFlagValueError(\n '\"{1}\" is not valid for --{0}' .format(name, value))\n except NameError: # Flag name is not valid.\n pass\n raise _exceptions.UnrecognizedFlagError(name, value)", "docstring": "Returns value if setting flag |name| to |value| returned True.\n\nArgs:\n name: str, name of the flag to set.\n value: Value to set.\n\nReturns:\n Flag value on successful call.\n\nRaises:\n UnrecognizedFlagError\n IllegalFlagValueError", "source": "juraj_google_style"} -{"code": "def update_thread(cls, session, conversation, thread):\n\n data = thread.to_api()\n data['reload'] = True\n return cls(\n '/conversations/%s/threads/%d.json' % (\n conversation.id, thread.id,\n ),\n data=data,\n request_type=RequestPaginator.PUT,\n singleton=True,\n session=session,\n )", "docstring": "Update a thread.\n\nArgs:\n session (requests.sessions.Session): Authenticated session.\n conversation (helpscout.models.Conversation): The conversation\n that the thread belongs to.\n thread (helpscout.models.Thread): The thread to be updated.\n\nReturns:\n helpscout.models.Conversation: Conversation including freshly\n updated thread.", "source": "juraj_google_style"} -{"code": "def bind(self, event_name, callback, first = False):\n\n\t\tif event_name not in self.handlers:\n\t\t\tself.handlers[event_name] = []\n\n\t\tif first:\n\t\t\tself.handlers[event_name].insert(0, callback)\n\t\telse:\n\t\t\tself.handlers[event_name].append(callback)", "docstring": "Bind a callback to an event\n\n Params:\n event_name (string):\n Name of the event to bind to\n callback (callable):\n Callback that will be called when the event is triggered\n first (boolean):\n If True, this callback is placed before all the other events already\n registered for this event, otherwise it is placed at the end.", "source": "juraj_google_style"} -{"code": "def active_futures(ticker: str, dt) -> str:\n\n t_info = ticker.split()\n prefix, asset = ' '.join(t_info[:-1]), t_info[-1]\n info = const.market_info(f'{prefix[:-1]}1 {asset}')\n\n f1, f2 = f'{prefix[:-1]}1 {asset}', f'{prefix[:-1]}2 {asset}'\n fut_2 = fut_ticker(gen_ticker=f2, dt=dt, freq=info['freq'])\n fut_1 = fut_ticker(gen_ticker=f1, dt=dt, freq=info['freq'])\n\n fut_tk = bdp(tickers=[fut_1, fut_2], flds='Last_Tradeable_Dt', cache=True)\n\n if pd.Timestamp(dt).month < pd.Timestamp(fut_tk.last_tradeable_dt[0]).month: return fut_1\n\n d1 = bdib(ticker=f1, dt=dt)\n d2 = bdib(ticker=f2, dt=dt)\n\n return fut_1 if d1[f1].volume.sum() > d2[f2].volume.sum() else fut_2", "docstring": "Active futures contract\n\nArgs:\n ticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc.\n dt: date\n\nReturns:\n str: ticker name", "source": "juraj_google_style"} -{"code": "def get_app_region_products(self, app_uri):\n\n apps, retInfo = self.list_apps()\n if apps is None:\n return None\n\n for app in apps:\n if (app.get('uri') == app_uri):\n return self.get_region_products(app.get('region'))\n\n return", "docstring": "获得指定应用所在区域的产品信息\n\nArgs:\n - app_uri: 应用的完整标识\n\nReturns:\n 返回产品信息列表,若失败则返回None", "source": "juraj_google_style"} -{"code": "def make_config_get(conf_path):\n\n project_root = _get_project_root_from_conf_path(conf_path)\n config = load_config_in_dir(project_root)\n return partial(config_get, config)", "docstring": "Return a function to get configuration options for a specific project\n\nArgs:\n conf_path (path-like): path to project's conf file (i.e. foo.conf\n module)", "source": "juraj_google_style"} -{"code": "def sendfrom(self, user_id, dest_address, amount, minconf=1):\n\n amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN)\n txhash = self.rpc.call(\"sendfrom\",\n user_id, dest_address, float(str(amount)), minconf\n )\n self.logger.debug(\"Send %s %s from %s to %s\" % (str(amount), self.coin,\n str(user_id), dest_address))\n self.logger.debug(\"Transaction hash: %s\" % txhash)\n return txhash", "docstring": "Send coins from user's account.\n\nArgs:\n user_id (str): this user's unique identifier\n dest_address (str): address which is to receive coins\n amount (str or Decimal): amount to send (eight decimal points)\n minconf (int): ensure the account has a valid balance using this\n many confirmations (default=1)\n\nReturns:\n str: transaction ID", "source": "juraj_google_style"} -{"code": "def equal(mol, query, largest_only=True, ignore_hydrogen=True):\n\n m = molutil.clone(mol)\n q = molutil.clone(query)\n if largest_only:\n m = molutil.largest_graph(m)\n q = molutil.largest_graph(q)\n if ignore_hydrogen:\n m = molutil.make_Hs_implicit(m)\n q = molutil.make_Hs_implicit(q)\n if molutil.mw(m) == molutil.mw(q):\n gm = GraphMatcher(q.graph, m.graph, node_match=atom_match)\n return gm.is_isomorphic()\n return False", "docstring": "if mol is exactly same structure as the query, return True\n\nArgs:\n mol: Compound\n query: Compound", "source": "juraj_google_style"} -{"code": "def parse_rdf_payload(self, data, headers):\n\n\n\n\t\t# handle edge case for content-types not recognized by rdflib parser\n\t\tif headers['Content-Type'].startswith('text/plain'):\n\t\t\tlogger.debug('text/plain Content-Type detected, using application/n-triples for parser')\n\t\t\tparse_format = 'application/n-triples'\n\t\telse:\n\t\t\tparse_format = headers['Content-Type']\n\n\t\t# clean parse format for rdf parser (see: https://www.w3.org/2008/01/rdf-media-types)\n\t\tif ';charset' in parse_format:\n\t\t\tparse_format = parse_format.split(';')[0]\n\n\t\t# parse graph\n\t\tgraph = rdflib.Graph().parse(\n\t\t\tdata=data.decode('utf-8'),\n\t\t\tformat=parse_format)\n\n\t\t# return graph\n\t\treturn graph", "docstring": "small function to parse RDF payloads from various repository endpoints\n\nArgs:\n data (response.data): data from requests response\n headers (response.headers): headers from requests response\n\nReturns:\n (rdflib.Graph): parsed graph", "source": "juraj_google_style"} -{"code": "def setMinimum(self, minimum):\n\n if not isinstance(minimum, int):\n raise TypeError(\"Argument is not of type int or long\")\n self._minimum = minimum", "docstring": "setter to _minimum.\n\nArgs:\n minimum (int or long): new _minimum value.\n\nRaises:\n TypeError: If the given argument is not an integer.", "source": "juraj_google_style"} -{"code": "def __init__(self, input_file_name, reference_histogram):\n\n self.start_time_sec = 0.0\n self.observed_start_time = False\n self.base_time_sec = 0.0\n self.observed_base_time = False\n self.input_file = open(input_file_name, \"r\")\n self.reference_histogram = reference_histogram", "docstring": "Constructs a new HistogramLogReader that produces intervals read\n from the specified file name.\n Params:\n input_file_name The name of the file to read from\n reference_histogram a histogram instance used as a reference to create\n new instances for all subsequent decoded interval\n histograms", "source": "juraj_google_style"} -{"code": "def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix):\n\n button_entry_comment = extract_element_internationalized_comment(button)\n if button_entry_comment is None:\n return\n\n for state in button.getElementsByTagName('state'):\n state_name = state.attributes['key'].value\n state_entry_comment = button_entry_comment + \" - \" + state_name + \" state of button\"\n if not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment):\n try:\n button_entry_key = state.attributes['title'].value\n except KeyError:\n try:\n button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue\n except Exception:\n continue\n\n results.append((button_entry_key, state_entry_comment))\n\n warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix)", "docstring": "Adds strings pairs from a button xib element.\n\nArgs:\n xib_file (str): Path to the xib file.\n results (list): The list to add the results to.\n button(element): The button element from the xib, to extract the string pairs from.\n special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)", "source": "juraj_google_style"} -{"code": "def _infer_mutants_handler(self, request):\n\n try:\n if request.method != 'GET':\n logger.error('%s requests are forbidden.', request.method)\n return http_util.Respond(request, {'error': 'invalid non-GET request'},\n 'application/json', code=405)\n\n example_index = int(request.args.get('example_index', '0'))\n feature_name = request.args.get('feature_name')\n examples = (self.examples if example_index == -1\n else [self.examples[example_index]])\n\n (inference_addresses, model_names, model_versions,\n model_signatures) = self._parse_request_arguments(request)\n\n serving_bundles = []\n for model_num in xrange(len(inference_addresses)):\n serving_bundles.append(inference_utils.ServingBundle(\n inference_addresses[model_num],\n model_names[model_num],\n request.args.get('model_type'),\n model_versions[model_num],\n model_signatures[model_num],\n request.args.get('use_predict') == 'true',\n request.args.get('predict_input_tensor'),\n request.args.get('predict_output_tensor')))\n\n viz_params = inference_utils.VizParams(\n request.args.get('x_min'), request.args.get('x_max'),\n self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,\n request.args.get('feature_index_pattern'))\n json_mapping = inference_utils.mutant_charts_for_feature(\n examples, feature_name, serving_bundles, viz_params)\n return http_util.Respond(request, json_mapping, 'application/json')\n except common_utils.InvalidUserInputError as e:\n return http_util.Respond(request, {'error': e.message},\n 'application/json', code=400)", "docstring": "Returns JSON for the `vz-line-chart`s for a feature.\n\nArgs:\n request: A request that should contain 'feature_name', 'example_index',\n 'inference_address', 'model_name', 'model_type', 'model_version', and\n 'model_signature'.\n\nReturns:\n A list of JSON objects, one for each chart.", "source": "juraj_google_style"} -{"code": "def QA_fetch_strategy(message={}, db=DATABASE):\n\n collection = DATABASE.strategy\n return [res for res in collection.find(message, {\"_id\": 0})]", "docstring": "get the account\n\nArgs:\n query_mes {[type]} -- [description]\n\n Keyword Arguments:\n collection {[type]} -- [description] (default: {DATABASE})\n\nReturns:\n [type] -- [description]", "source": "juraj_google_style"} -{"code": "def show(self, objtype, objid):\n\n url = self._object_url(objtype, int(objid))\n return self._make_request(url, method=\"get\")", "docstring": "Query for a specific resource by ID\n\nArgs:\n objtype (str): object type, e.g. 'device', 'interface'\n objid (int): object ID (DeviceID, etc.)\n\nReturns:\n A dict with that object\n\nRaises:\n requests.exceptions.HTTPError", "source": "juraj_google_style"} -{"code": "def save_as(self, filename: str) -> None:\n\n lib.TCOD_image_save(self.image_c, filename.encode(\"utf-8\"))", "docstring": "Save the Image to a 32-bit .bmp or .png file.\n\nArgs:\n filename (Text): File path to same this Image.", "source": "juraj_google_style"} -{"code": "def design_stat_extremes(self, value=\"Extremes\"):\n\n if value is not None:\n try:\n value = str(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type str '\n 'for field `design_stat_extremes`'.format(value))\n if ',' in value:\n raise ValueError('value should not contain a comma '\n 'for field `design_stat_extremes`')\n vals = set()\n vals.add(\"Extremes\")\n if value not in vals:\n raise ValueError('value {} is not an accepted value for '\n 'field `design_stat_extremes`'.format(value))\n\n self._design_stat_extremes = value", "docstring": "Corresponds to IDD Field `design_stat_extremes`\n\nArgs:\n value (str): value for IDD Field `design_stat_extremes`\n Accepted values are:\n - Extremes\n Default value: Extremes\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def _get_handled_methods(self, actions_map):\n\n methods = ('OPTIONS',)\n\n defined_actions = []\n for action_name in actions_map.keys():\n view_method = getattr(self, action_name, None)\n method_exists = view_method is not None\n method_defined = view_method != self.not_allowed_action\n if method_exists and method_defined:\n defined_actions.append(action_name)\n\n for action in defined_actions:\n methods += actions_map[action]\n\n return methods", "docstring": "Get names of HTTP methods that can be used at requested URI.\n\nArgs:\n :actions_map: Map of actions. Must have the same structure as\n self._item_actions and self._collection_actions", "source": "juraj_google_style"} -{"code": "def raster_to_gtiff(tif, geotif, change_nodata=False, change_gdal_type=False):\n\n rst_file = RasterUtilClass.read_raster(tif)\n nodata = rst_file.noDataValue\n if change_nodata:\n if not MathClass.floatequal(rst_file.noDataValue, DEFAULT_NODATA):\n nodata = DEFAULT_NODATA\n rst_file.data[rst_file.data == rst_file.noDataValue] = DEFAULT_NODATA\n gdal_type = rst_file.dataType\n if change_gdal_type:\n gdal_type = GDT_Float32\n RasterUtilClass.write_gtiff_file(geotif, rst_file.nRows, rst_file.nCols, rst_file.data,\n rst_file.geotrans, rst_file.srs, nodata,\n gdal_type)", "docstring": "Converting Raster format to GeoTIFF.\n\nArgs:\n tif: source raster file path.\n geotif: output raster file path.\n change_nodata: change NoDataValue to -9999 or not.\n gdal_type (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.\n change_gdal_type: If True, output the Float32 data type.", "source": "juraj_google_style"} -{"code": "def k_value_orifice(pipe_id, orifice_id, orifice_l, q,\n nu=con.WATER_NU):\n\n\n if orifice_id > pipe_id:\n raise ValueError('The orifice\\'s inner diameter cannot be larger than'\n 'that of the entrance pipe.')\n\n re = pc.re_pipe(q, pipe_id, nu) # Entrance pipe's Reynolds number.\n\n orifice_type = _get_orifice_type(orifice_l, orifice_id)\n\n if orifice_type == 'thin':\n return _k_value_thin_sharp_orifice(pipe_id, orifice_id, re)\n elif orifice_type == 'thick':\n return _k_value_thick_orifice(pipe_id, orifice_id, orifice_l, re)\n elif orifice_type == 'oversize':\n return k_value_reduction(pipe_id, orifice_id, q) \\\n + k_value_expansion(orifice_id, pipe_id, q)", "docstring": "Calculates the minor loss coefficient of an orifice plate in a\n pipe.\n\n Parameters:\n pipe_id: Entrance pipe's inner diameter from which fluid flows.\n orifice_id: Orifice's inner diameter.\n orifice_l: Orifice's length from start to end.\n q: Fluid's q rate.\n\n nu: Fluid's dynamic viscosity of the fluid. Default: room\n temperature water (1 * 10**-6 * m**2/s)\n\nReturns:\n k-value at the orifice.", "source": "juraj_google_style"} -{"code": "def parsed_stack(self, value):\n\n if value == self._defaults['parsedStack'] and 'parsedStack' in self._values:\n del self._values['parsedStack']\n else:\n self._values['parsedStack'] = value", "docstring": "The parsed_stack property.\n\nArgs:\n value (list). the property value.", "source": "juraj_google_style"} -{"code": "def decode_offset_commit_response(cls, data):\n\n ((correlation_id,), cur) = relative_unpack('>i', data, 0)\n ((num_topics,), cur) = relative_unpack('>i', data, cur)\n\n for _ in xrange(num_topics):\n (topic, cur) = read_short_string(data, cur)\n ((num_partitions,), cur) = relative_unpack('>i', data, cur)\n\n for _ in xrange(num_partitions):\n ((partition, error), cur) = relative_unpack('>ih', data, cur)\n yield OffsetCommitResponse(topic, partition, error)", "docstring": "Decode bytes to an OffsetCommitResponse\n\nArgs:\n data: bytes to decode", "source": "juraj_google_style"} -{"code": "def regex_check(equation_str):\n\n match1 = re.match(\n r'^(([xy+\\-*/()0-9. ]+|sin\\(|cos\\(|exp\\(|log\\()?)+$',\n equation_str\n )\n match2 = re.match(r'^.*([xy]) *([xy]).*$', equation_str)\n if match1 and not match2:\n return True\n raise BadInputError('Cannot parse entered equation')", "docstring": "A quick regular expression check to see that the input is sane\n\nArgs:\n equation_str (str): String of equation to be parsed by sympify\n function. Expected to be valid Python.\n\nRaises:\n BadInputError: If input does not look safe to parse as an equation.", "source": "juraj_google_style"} -{"code": "def replace_urls(status):\n\n text = status.text\n\n if not has_url(status):\n return text\n\n urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']]\n urls.sort(key=lambda x: x[0][0], reverse=True)\n\n for (start, end), url in urls:\n text = text[:start] + url + text[end:]\n\n return text", "docstring": "Replace shorturls in a status with expanded urls.\n\nArgs:\n status (tweepy.status): A tweepy status object\n\nReturns:\n str", "source": "juraj_google_style"} -{"code": "def save(self, wf_state):\n\n self.wf_state = wf_state\n self.wf_state['role_id'] = self.current.role_id\n self.set(self.wf_state)\n if self.wf_state['name'] not in settings.EPHEMERAL_WORKFLOWS:\n self.publish(job='_zops_sync_wf_cache',\n token=self.db_key)", "docstring": "write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache\n\nArgs:\n wf_state dict: wf state", "source": "juraj_google_style"} -{"code": "def GetYearFromPosixTime(posix_time, timezone=pytz.UTC):\n\n datetime_object = datetime.datetime.fromtimestamp(posix_time, tz=timezone)\n return datetime_object.year", "docstring": "Gets the year from a POSIX timestamp\n\n The POSIX time is the number of seconds since 1970-01-01 00:00:00 UTC.\n\nArgs:\n posix_time: An integer containing the number of seconds since\n 1970-01-01 00:00:00 UTC.\n timezone: Optional timezone of the POSIX timestamp.\n\nReturns:\n The year of the POSIX timestamp.\n\nRaises:\n ValueError: If the posix timestamp is out of the range of supported values.", "source": "juraj_google_style"} -{"code": "def remove(self, layers):\n\n if not isinstance(layers, list):\n layers = [layers]\n for l in layers:\n if isinstance(l, string_types):\n if l not in self.layers:\n raise ValueError(\"There's no image/layer named '%s' in \"\n \"the masking stack!\" % l)\n self.stack.remove(l)\n else:\n l = self.stack.pop(l)\n del self.layers[l]\n\n self.set_mask()", "docstring": "Remove one or more layers from the stack of masking layers.\n\nArgs:\n layers: An int, string or list of strings and/or ints. Ints are\n interpreted as indices in the stack to remove; strings are\n interpreted as names of layers to remove. Negative ints will\n also work--i.e., remove(-1) will drop the last layer added.", "source": "juraj_google_style"} -{"code": "def kill_raylet_monitor(self, check_alive=True):\n\n self._kill_process_type(\n ray_constants.PROCESS_TYPE_RAYLET_MONITOR, check_alive=check_alive)", "docstring": "Kill the raylet monitor.\n\nArgs:\n check_alive (bool): Raise an exception if the process was already\n dead.", "source": "juraj_google_style"} -{"code": "def get_ieee_rotation(structure, refine_rotation=True):\n\n # Check conventional setting:\n sga = SpacegroupAnalyzer(structure)\n dataset = sga.get_symmetry_dataset()\n trans_mat = dataset['transformation_matrix']\n conv_latt = Lattice(np.transpose(np.dot(np.transpose(\n structure.lattice.matrix), np.linalg.inv(trans_mat))))\n xtal_sys = sga.get_crystal_system()\n\n vecs = conv_latt.matrix\n lengths = np.array(conv_latt.abc)\n angles = np.array(conv_latt.angles)\n rotation = np.zeros((3, 3))\n\n # IEEE rules: a,b,c || x1,x2,x3\n if xtal_sys == \"cubic\":\n rotation = [vecs[i] / lengths[i] for i in range(3)]\n\n # IEEE rules: a=b in length; c,a || x3, x1\n elif xtal_sys == \"tetragonal\":\n rotation = np.array([vec / mag for (mag, vec) in\n sorted(zip(lengths, vecs),\n key=lambda x: x[0])])\n if abs(lengths[2] - lengths[1]) < abs(lengths[1] - lengths[0]):\n rotation[0], rotation[2] = rotation[2], rotation[0].copy()\n rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))\n\n # IEEE rules: c ...\n\n def wrapper(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n six.reraise(error_class, error_class(e), sys.exc_info()[2])\n\n return wrapper", "docstring": "Wraps function fn in a try catch block that re-raises error_class.\n\nArgs:\n fn (function): function to wrapped\n error_class (Exception): Error class to be re-raised\n\nReturns:\n (object): fn wrapped in a try catch.", "source": "juraj_google_style"} -{"code": "def _format_field_name(self, field_name) -> str:\n\n\n field = self._get_model_field(field_name)\n return self.qn(field.column)", "docstring": "Formats a field's name for usage in SQL.\n\nArgs:\n field_name:\n The field name to format.\n\nReturns:\n The specified field name formatted for\n usage in SQL.", "source": "juraj_google_style"} -{"code": "def associate_blocks(blocks, layout_pairs, start_peb_num):\n\n seq_blocks = []\n for layout_pair in layout_pairs:\n seq_blocks = sort.by_image_seq(blocks, blocks[layout_pair[0]].ec_hdr.image_seq)\n\n layout_pair.append(seq_blocks)\n\n return layout_pairs", "docstring": "Group block indexes with appropriate layout pairs\n\nArgs:\n List:blocks -- List of block objects\n List:layout_pairs -- List of grouped layout blocks\n Int:start_peb_num -- Number of the PEB to start from.\n\nReturns:\n List -- Layout block pairs grouped with associated block ranges.", "source": "juraj_google_style"} -{"code": "def run(self, args):\n\n jlink = self.create_jlink(args)\n if args.product:\n print('Product: %s' % jlink.product_name)\n\n manufacturer = 'SEGGER' if jlink.oem is None else jlink.oem\n print('Manufacturer: %s' % manufacturer)\n\n print('Hardware Version: %s' % jlink.hardware_version)\n print('Firmware: %s' % jlink.firmware_version)\n print('DLL Version: %s' % jlink.version)\n print('Features: %s' % ', '.join(jlink.features))\n elif args.jtag:\n status = jlink.hardware_status\n print('TCK Pin Status: %d' % status.tck)\n print('TDI Pin Status: %d' % status.tdi)\n print('TDO Pin Status: %d' % status.tdo)\n print('TMS Pin Status: %d' % status.tms)\n print('TRES Pin Status: %d' % status.tres)\n print('TRST Pin Status: %d' % status.trst)", "docstring": "Runs the information command.\n\nArgs:\n self (InfoCommand): the ``InfoCommand`` instance\n args (Namespace): the arguments passed on the command-line\n\nReturns:\n ``None``", "source": "juraj_google_style"} -{"code": "def l1_loss(tensor, weight=1.0, scope=None):\n\n with tf.name_scope(scope, 'L1Loss', [tensor]):\n weight = tf.convert_to_tensor(weight,\n dtype=tensor.dtype.base_dtype,\n name='loss_weight')\n loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value')\n tf.add_to_collection(LOSSES_COLLECTION, loss)\n return loss", "docstring": "Define a L1Loss, useful for regularize, i.e. lasso.\n\nArgs:\n tensor: tensor to regularize.\n weight: scale the loss by this factor.\n scope: Optional scope for name_scope.\n\nReturns:\n the L1 loss op.", "source": "juraj_google_style"} -{"code": "def __new__(cls, obj=None, prop=None, func=None):\n\n new = super(SinonBase, cls).__new__(cls)\n if func:\n new.__init__(obj, prop, func)\n else:\n new.__init__(obj, prop)\n cls._queue.append(new)\n return weakref.proxy(new)", "docstring": "Constructor of SinonBase\n It will new true base but return a proxy of weakref and store it in _queue\n\nArgs:\n obj: None / function / instance method / module / class\n Inspected target\n prop: None / string\n Inspected target when obj contains callable things\n func: function / instance method\n ONLY used by stub, it will replace original target\n\nReturns:\n weakref", "source": "juraj_google_style"} -{"code": "def __init__(self, callback):\n\n super(Interface, self).__init__(callback)\n self._mac_address_table = brocade_mac_address_table(\n callback=pynos.utilities.return_xml\n )", "docstring": "Interface init function.\n\nArgs:\n callback: Callback function that will be called for each action.\n\nReturns:\n Interface Object\n\nRaises:\n None", "source": "juraj_google_style"} -{"code": "def podcasts_iter(self, *, device_id=None, page_size=250):\n\n\n\t\tif device_id is None:\n\t\t\tdevice_id = self.device_id\n\n\t\tstart_token = None\n\t\tprev_items = None\n\n\t\twhile True:\n\t\t\tresponse = self._call(\n\t\t\t\tmc_calls.PodcastSeries,\n\t\t\t\tdevice_id,\n\t\t\t\tmax_results=page_size,\n\t\t\t\tstart_token=start_token\n\t\t\t)\n\t\t\titems = response.body.get('data', {}).get('items', [])\n\n\t\t\t# Google does some weird shit.\n\t\t\tif items != prev_items:\n\t\t\t\tsubscribed_podcasts = [\n\t\t\t\t\titem\n\t\t\t\t\tfor item in items\n\t\t\t\t\tif item.get('userPreferences', {}).get('subscribed')\n\t\t\t\t]\n\n\t\t\t\tyield subscribed_podcasts\n\n\t\t\t\tprev_items = items\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\t\tstart_token = response.body.get('nextPageToken')\n\t\t\tif start_token is None:\n\t\t\t\tbreak", "docstring": "Get a paged iterator of subscribed podcast series.\n\n Parameters:\n device_id (str, Optional): A mobile device ID.\n Default: Use ``device_id`` of the :class:`MobileClient` instance.\n page_size (int, Optional): The maximum number of results per returned page.\n Max allowed is ``49995``.\n Default: ``250``\n\nYields:\n list: Podcast series dicts.", "source": "juraj_google_style"} -{"code": "def PushTask(self, task):\n\n storage_file_size = getattr(task, 'storage_file_size', None)\n if not storage_file_size:\n raise ValueError('Task storage file size not set.')\n\n if task.file_entry_type == dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY:\n weight = 1\n else:\n weight = storage_file_size\n\n task.merge_priority = weight\n\n heap_values = (weight, task)\n heapq.heappush(self._heap, heap_values)\n self._task_identifiers.add(task.identifier)", "docstring": "Pushes a task onto the heap.\n\nArgs:\n task (Task): task.\n\nRaises:\n ValueError: if the size of the storage file is not set in the task.", "source": "juraj_google_style"} -{"code": "def findall_operations_with_gate_type(\n self,\n gate_type: Type[T_DESIRED_GATE_TYPE]\n ) -> Iterable[Tuple[int,\n ops.GateOperation,\n T_DESIRED_GATE_TYPE]]:\n\n result = self.findall_operations(lambda operation: bool(\n ops.op_gate_of_type(operation, gate_type)))\n for index, op in result:\n gate_op = cast(ops.GateOperation, op)\n yield index, gate_op, cast(T_DESIRED_GATE_TYPE, gate_op.gate)", "docstring": "Find the locations of all gate operations of a given type.\n\nArgs:\n gate_type: The type of gate to find, e.g. XPowGate or\n MeasurementGate.\n\nReturns:\n An iterator (index, operation, gate)'s for operations with the given\n gate type.", "source": "juraj_google_style"} -{"code": "def __init__(self, outputdir, os_name, os_bits):\n\n if type(self) == Basedriver:\n raise Exception('Basedriver cannot be instantiated')\n self.outputdir = outputdir\n self.os_name = os_name\n self.os_bits = os_bits", "docstring": "Instantiate a new webdriver class\n\nArgs:\n outputdir: The path to the directory to use.\n os_name: Valid options: ['windows', 'linux', 'mac']\n os_bits: Valid options: ['32', '64']", "source": "juraj_google_style"} -{"code": "def _WriteRow(self, output_writer, values):\n\n maximum_row_width = self._MAXIMUM_WIDTH - self._column_width - 3\n\n # The format string of the first line of the column value.\n primary_format_string = '{{0:>{0:d}s}} : {{1:s}}\\n'.format(\n self._column_width)\n\n # The format string of successive lines of the column value.\n secondary_format_string = '{{0:<{0:d}s}}{{1:s}}\\n'.format(\n self._column_width + 3)\n\n if isinstance(values[1], py2to3.STRING_TYPES):\n value_string = values[1]\n else:\n value_string = '{0!s}'.format(values[1])\n\n if len(value_string) < maximum_row_width:\n output_writer.Write(primary_format_string.format(\n values[0], value_string))\n return\n\n # Split the column value in words.\n words = value_string.split()\n\n current = 0\n\n lines = []\n word_buffer = []\n for word in words:\n current += len(word) + 1\n if current >= maximum_row_width:\n current = len(word)\n lines.append(' '.join(word_buffer))\n word_buffer = [word]\n else:\n word_buffer.append(word)\n lines.append(' '.join(word_buffer))\n\n # Split the column value across multiple lines.\n output_writer.Write(\n primary_format_string.format(values[0], lines[0]))\n for line in lines[1:]:\n output_writer.Write(secondary_format_string.format('', line))", "docstring": "Writes a row of values aligned to the column width.\n\nArgs:\n output_writer (OutputWriter): output writer.\n values (list[object]): values.", "source": "juraj_google_style"} -{"code": "def truncate(text, max_len=350, end='...'):\n\n if len(text) <= max_len:\n return text\n return text[:max_len].rsplit(' ', maxsplit=1)[0] + end", "docstring": "Truncate the supplied text for display.\n\nArgs:\n text (:py:class:`str`): The text to truncate.\n max_len (:py:class:`int`, optional): The maximum length of the\n text before truncation (defaults to 350 characters).\n end (:py:class:`str`, optional): The ending to use to show that\n the text was truncated (defaults to ``'...'``).\n\nReturns:\n :py:class:`str`: The truncated text.", "source": "juraj_google_style"} -{"code": "def get_class_members(self, cls_name, cls):\n\n for name, member in inspect.getmembers(cls):\n # Only show methods and properties presently. In Python 3,\n # methods register as isfunction.\n is_method = inspect.ismethod(member) or inspect.isfunction(member)\n if not (is_method or isinstance(member, property)):\n continue\n if ((is_method and member.__name__ == \"__init__\")\n or self._should_include_member(name, member)):\n yield name, (\"%s.%s\" % (cls_name, name), member)", "docstring": "Returns the list of class members to document in `cls`.\n\n This function filters the class member to ONLY return those\n defined by the class. It drops the inherited ones.\n\nArgs:\n cls_name: Qualified name of `cls`.\n cls: An inspect object of type 'class'.\n\nYields:\n name, member tuples.", "source": "juraj_google_style"} -{"code": "def default(self, value):\n\n if isinstance(value, messages.Enum):\n return str(value)\n\n if six.PY3 and isinstance(value, bytes):\n return value.decode('utf8')\n\n if isinstance(value, messages.Message):\n result = {}\n for field in value.all_fields():\n item = value.get_assigned_value(field.name)\n if item not in (None, [], ()):\n result[field.name] = (\n self.__protojson_protocol.encode_field(field, item))\n # Handle unrecognized fields, so they're included when a message is\n # decoded then encoded.\n for unknown_key in value.all_unrecognized_fields():\n unrecognized_field, _ = value.get_unrecognized_field_info(\n unknown_key)\n # Unknown fields are not encoded as they should have been\n # processed before we get to here.\n result[unknown_key] = unrecognized_field\n return result\n\n return super(MessageJSONEncoder, self).default(value)", "docstring": "Return dictionary instance from a message object.\n\nArgs:\n value: Value to get dictionary for. If not encodable, will\n call superclasses default method.", "source": "juraj_google_style"} -{"code": "def add_arguments(cls, parser):\n\n\n parser.add_argument(\n '-c', '--create-missing-tasks',\n action='store_true',\n dest='create_missing_tasks',\n help=\"[sync] create asana tasks for issues without tasks\"\n )\n\n parser.add_argument(\n '-l', '--sync-labels',\n action='store_true',\n dest='sync_labels',\n help=\"[sync] sync labels and milestones for each issue\"\n )", "docstring": "Add arguments to the parser for collection in app.args.\n\nArgs:\n parser:\n `argparse.ArgumentParser`. Parser.\n Arguments added here are server on\n self.args.", "source": "juraj_google_style"} -{"code": "def _identify_eds_ing(first, second):\n\n A = set([first.L, first.R])\n A.update(first.D)\n\n B = set([second.L, second.R])\n B.update(second.D)\n\n depend_set = A & B\n left, right = sorted(list(A ^ B))\n\n return left, right, depend_set", "docstring": "Find nodes connecting adjacent edges.\n\nArgs:\n first(Edge): Edge object representing the first edge.\n second(Edge): Edge object representing the second edge.\n\nReturns:\n tuple[int, int, set[int]]: The first two values represent left and right node\n indicies of the new edge. The third value is the new dependence set.", "source": "juraj_google_style"} -{"code": "def optimize_boolean_expression_comparisons(ir_blocks):\n\n operator_inverses = {\n u'=': u'!=',\n u'!=': u'=',\n }\n\n def visitor_fn(expression):\n\n if not isinstance(expression, BinaryComposition):\n return expression\n\n left_is_binary_composition = isinstance(expression.left, BinaryComposition)\n right_is_binary_composition = isinstance(expression.right, BinaryComposition)\n\n if not left_is_binary_composition and not right_is_binary_composition:\n # Nothing to rewrite, return the expression as-is.\n return expression\n\n identity_literal = None # The boolean literal for which we just use the inner expression.\n inverse_literal = None # The boolean literal for which we negate the inner expression.\n if expression.operator == u'=':\n identity_literal = TrueLiteral\n inverse_literal = FalseLiteral\n elif expression.operator == u'!=':\n identity_literal = FalseLiteral\n inverse_literal = TrueLiteral\n else:\n return expression\n\n expression_to_rewrite = None\n if expression.left == identity_literal and right_is_binary_composition:\n return expression.right\n elif expression.right == identity_literal and left_is_binary_composition:\n return expression.left\n elif expression.left == inverse_literal and right_is_binary_composition:\n expression_to_rewrite = expression.right\n elif expression.right == inverse_literal and left_is_binary_composition:\n expression_to_rewrite = expression.left\n\n if expression_to_rewrite is None:\n # We couldn't find anything to rewrite, return the expression as-is.\n return expression\n elif expression_to_rewrite.operator not in operator_inverses:\n # We can't rewrite the inner expression since we don't know its inverse operator.\n return expression\n else:\n return BinaryComposition(\n operator_inverses[expression_to_rewrite.operator],\n expression_to_rewrite.left,\n expression_to_rewrite.right)\n\n new_ir_blocks = []\n for block in ir_blocks:\n new_block = block.visit_and_update_expressions(visitor_fn)\n new_ir_blocks.append(new_block)\n\n return new_ir_blocks", "docstring": "Optimize comparisons of a boolean binary comparison expression against a boolean literal.\n\n Rewriting example:\n BinaryComposition(\n '=',\n BinaryComposition('!=', something, NullLiteral)\n False)\n\n The above is rewritten into:\n BinaryComposition('=', something, NullLiteral)\n\nArgs:\n ir_blocks: list of basic block objects\n\nReturns:\n a new list of basic block objects, with the optimization applied", "source": "juraj_google_style"} -{"code": "def setup_data_stream(\n self,\n connection_factory: Callable[[tuple], Connection],\n data_stream_factory: Callable[[Connection], DataStream]=DataStream) -> \\\n DataStream:\n\n yield from self._control_stream.write_command(Command('TYPE', 'I'))\n reply = yield from self._control_stream.read_reply()\n\n self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply)\n\n address = yield from self.passive_mode()\n\n connection = yield from connection_factory(address)\n\n # TODO: unit test for following line for connections that have\n # the same port over time but within pool cleaning intervals\n connection.reset()\n\n yield from connection.connect()\n\n data_stream = data_stream_factory(connection)\n\n return data_stream", "docstring": "Create and setup a data stream.\n\n This function will set up passive and binary mode and handle\n connecting to the data connection.\n\nArgs:\n connection_factory: A coroutine callback that returns a connection\n data_stream_factory: A callback that returns a data stream\n\n Coroutine.\n\nReturns:\n DataStream", "source": "juraj_google_style"} -{"code": "def __init__(self, tag, ID, actual, expected):\n\n self.tag = tag\n self.ID = ID\n self.actual = actual\n self.expected = expected\n super(ConsistencyError, self).__init__(\"INCONSISTENCY in %s ID '%s': text results '%s' != concatenated '%s'\" % (tag, ID, actual, expected))", "docstring": "Construct a new ConsistencyError.\n\nArgs:\n tag (string): Level of the inconsistent element\n ID (string): ``ID`` of the inconsistent element\n actual (string):\n expected (string):", "source": "juraj_google_style"} -{"code": "def export_warnings(self, export_file):\n\n warn_filepath = op.dirname(export_file)\n warn_filename = op.splitext(op.basename(export_file))[0]\n self._add_entry(templates.EXPORT_WARNINGS\n .format(warnings_export_path=warn_filepath,\n warnings_export_file=warn_filename))", "docstring": "Append an export warnings entry to the journal.\n\n This instructs Revit to export warnings from the opened model.\n Currently Revit will stop journal execution if the model does not\n have any warnings and the export warnings UI button is disabled.\n\nArgs:\n export_file (str): full path of the ouput html file", "source": "juraj_google_style"} -{"code": "def _send_json(self, method, path, data):\n\n headers = {'Content-type': 'application/json'}\n return self._make_request(method, path, data=data, headers=headers)", "docstring": "Make a application/json request.\n\nArgs:\n `method`: The method of the request (POST or PUT).\n `path`: The path to the resource.\n `data`: The JSON-encoded data.\n\nReturns:\n The content of the response.\n\nRaises:\n An exception depending on the HTTP status code of the response.", "source": "juraj_google_style"} -{"code": "def ne(self, other, axis=\"columns\", level=None):\n\n return self._binary_op(\"ne\", other, axis=axis, level=level)", "docstring": "Checks element-wise that this is not equal to other.\n\nArgs:\n other: A DataFrame or Series or scalar to compare to.\n axis: The axis to perform the ne over.\n level: The Multilevel index level to apply ne over.\n\nReturns:\n A new DataFrame filled with Booleans.", "source": "juraj_google_style"} -{"code": "def compute_samples_displays(\n self,\n program: Union[circuits.Circuit, schedules.Schedule],\n param_resolver: 'study.ParamResolverOrSimilarType' = None,\n ) -> study.ComputeDisplaysResult:\n\n return self.compute_samples_displays_sweep(\n program,\n study.ParamResolver(param_resolver))[0]", "docstring": "Computes SamplesDisplays in the supplied Circuit or Schedule.\n\nArgs:\n program: The circuit or schedule to simulate.\n param_resolver: Parameters to run with the program.\n\nReturns:\n ComputeDisplaysResult for the simulation.", "source": "juraj_google_style"} -{"code": "def _ParseFileHeader(self, file_object):\n\n file_header_map = self._GetDataTypeMap(\n 'chrome_cache_data_block_file_header')\n\n try:\n file_header, _ = self._ReadStructureFromFileObject(\n file_object, 0, file_header_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError(\n 'Unable to parse data block file header with error: {0!s}'.format(\n exception))\n\n if file_header.signature != self._FILE_SIGNATURE:\n raise errors.ParseError('Unsupported data block file signature')\n\n format_version = '{0:d}.{1:d}'.format(\n file_header.major_version, file_header.minor_version)\n if format_version not in ('2.0', '2.1'):\n raise errors.ParseError(\n 'Unsupported data block file format version: {0:s}'.format(\n format_version))\n\n if file_header.block_size not in (256, 1024, 4096):\n raise errors.ParseError(\n 'Unsupported data block file block size: {0:d}'.format(\n file_header.block_size))", "docstring": "Parses the file header.\n\nArgs:\n file_object (dfvfs.FileIO): a file-like object to parse.\n\nRaises:\n ParseError: if the file header cannot be read.", "source": "juraj_google_style"} -{"code": "def put(self, url, params=None, data=None, files=None, **kwargs):\n\n return self.call_api(\n \"PUT\",\n url,\n params=params,\n data=data,\n files=files,\n **kwargs\n )", "docstring": "Call the API with a PUT request.\n\nArgs:\n url (str): Resource location relative to the base URL.\n params (dict or None): Query-string parameters.\n data (dict or None): Request body contents.\n files (dict or None: Files to be passed to the request.\n\nReturns:\n An instance of ResultParser or ErrorParser.", "source": "juraj_google_style"} -{"code": "def substitute(expr, var_map):\n\n try:\n if isinstance(expr, SympyBasic):\n sympy_var_map = {\n k: v for (k, v) in var_map.items()\n if isinstance(k, SympyBasic)}\n return expr.subs(sympy_var_map)\n else:\n return expr.substitute(var_map)\n except AttributeError:\n if expr in var_map:\n return var_map[expr]\n return expr", "docstring": "Substitute symbols or (sub-)expressions with the given replacements and\n re-evalute the result\n\nArgs:\n expr: The expression in which to perform the substitution\n var_map (dict): The substitution dictionary.", "source": "juraj_google_style"} -{"code": "def ParseIfaddrs(ifaddrs):\n\n precondition.AssertOptionalType(ifaddrs, ctypes.POINTER(Ifaddrs))\n\n ifaces = {}\n\n for ifaddr in IterIfaddrs(ifaddrs):\n ifname = ctypes.string_at(ifaddr.ifa_name).decode(\"utf-8\")\n iface = ifaces.setdefault(ifname, rdf_client_network.Interface())\n iface.ifname = ifname\n\n if not ifaddr.ifa_addr:\n continue\n\n sockaddr = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddr))\n iffamily = sockaddr.contents.sa_family\n if iffamily == AF_INET:\n sockaddrin = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrin))\n\n address = rdf_client_network.NetworkAddress()\n address.address_type = rdf_client_network.NetworkAddress.Family.INET\n address.packed_bytes = struct.pack(\"=L\", sockaddrin.contents.sin_addr)\n iface.addresses.append(address)\n elif iffamily == AF_INET6:\n sockaddrin = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrin6))\n\n address = rdf_client_network.NetworkAddress()\n address.address_type = rdf_client_network.NetworkAddress.Family.INET6\n address.packed_bytes = bytes(list(sockaddrin.contents.sin6_addr))\n iface.addresses.append(address)\n elif iffamily == AF_LINK:\n sockaddrdl = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrdl))\n\n nlen = sockaddrdl.contents.sdl_nlen\n alen = sockaddrdl.contents.sdl_alen\n iface.mac_address = bytes(sockaddrdl.contents.sdl_data[nlen:nlen + alen])\n else:\n raise ValueError(\"Unexpected socket address family: %s\" % iffamily)\n\n return itervalues(ifaces)", "docstring": "Parses contents of the intrusive linked list of `ifaddrs`.\n\nArgs:\n ifaddrs: A pointer to the first node of `ifaddrs` linked list. Can be NULL.\n\nReturns:\n An iterator over instances of `rdf_client_network.Interface`.", "source": "juraj_google_style"} -{"code": "def parse(self, rrstr):\n # type: (bytes) -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('SL record already initialized!')\n\n (su_len, su_entry_version_unused, self.flags) = struct.unpack_from('=BBB', rrstr[:5], 2)\n\n # We assume that the caller has already checked the su_entry_version,\n # so we don't bother.\n\n cr_offset = 5\n data_len = su_len - 5\n while data_len > 0:\n (cr_flags, len_cp) = struct.unpack_from('=BB', rrstr[:cr_offset + 2], cr_offset)\n\n data_len -= 2\n cr_offset += 2\n\n self.symlink_components.append(self.Component(cr_flags, len_cp,\n rrstr[cr_offset:cr_offset + len_cp]))\n\n # FIXME: if this is the last component in this SL record,\n # but the component continues on in the next SL record, we will\n # fail to record this bit. We should fix that.\n\n cr_offset += len_cp\n data_len -= len_cp\n\n self._initialized = True", "docstring": "Parse a Rock Ridge Symbolic Link record out of a string.\n\n Parameters:\n rrstr - The string to parse the record out of.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def is_list_of_matching_dicts(list_of_dicts, expected_keys=None):\n\n\n if isinstance(list_of_dicts, list) and len(list_of_dicts) == 0:\n return False\n\n is_not_list_msg = .format(list_of_dicts)\n assert isinstance(list_of_dicts, list), is_not_list_msg\n\n not_all_dicts_msg = .format(list_of_dicts)\n assert all([isinstance(d, dict) for d in list_of_dicts]), not_all_dicts_msg\n\n # Si no se pasan expected_keys, se las toma del primer diccionario\n expected_keys = expected_keys or set(list_of_dicts[0].keys())\n\n elements = [set(d.keys()) == expected_keys for d in list_of_dicts]\n\n return all(elements)", "docstring": "Comprueba que una lista esté compuesta únicamente por diccionarios,\n que comparten exactamente las mismas claves.\n\nArgs:\n list_of_dicts (list): Lista de diccionarios a comparar.\n expected_keys (set): Conjunto de las claves que cada diccionario debe\n tener. Si no se incluye, se asume que son las claves del primer\n diccionario de la lista.\n\nReturns:\n bool: True si todos los diccionarios comparten sus claves.", "source": "juraj_google_style"} -{"code": "def _piecewise_learning_rate(step, boundaries, values):\n\n values = [1.0] + values\n boundaries = [float(x) for x in boundaries]\n return tf.train.piecewise_constant(\n step, boundaries, values, name=\"piecewise_lr\")", "docstring": "Scale learning rate according to the given schedule.\n\n Multipliers are not cumulative.\n\nArgs:\n step: global step\n boundaries: List of steps to transition on.\n values: Multiplier to apply at each boundary transition.\n\nReturns:\n Scaled value for the learning rate.", "source": "juraj_google_style"} -{"code": "def remove_object_from_list(self, obj, list_element):\n\n list_element = self._handle_location(list_element)\n\n if isinstance(obj, JSSObject):\n results = [item for item in list_element.getchildren() if\n item.findtext(\"id\") == obj.id]\n elif isinstance(obj, (int, basestring)):\n results = [item for item in list_element.getchildren() if\n item.findtext(\"id\") == str(obj) or\n item.findtext(\"name\") == obj]\n\n if len(results) == 1:\n list_element.remove(results[0])\n elif len(results) > 1:\n raise ValueError(\"There is more than one matching object at that \"\n \"path!\")", "docstring": "Remove an object from a list element.\n\nArgs:\n obj: Accepts JSSObjects, id's, and names\n list_element: Accepts an Element or a string path to that\n element", "source": "juraj_google_style"} -{"code": "def clean_video_data(_data):\n\n\n data = _data.copy()\n\n # TODO: fix this ugliness\n title = data.get('title')\n if title:\n data['title'] = clean_title(title)\n\n return data", "docstring": "Clean video data:\n -> cleans title\n -> ...\n\nArgs:\n _data (dict): Information about the video.\n\nReturns:\n dict: Refined video data.", "source": "juraj_google_style"} -{"code": "def bool_input(message):\n\n\n while True:\n suffix = ' (true or false): '\n inp = input(message + suffix)\n if inp.lower() == 'true':\n return True\n elif inp.lower() == 'false':\n return False\n else:\n print(colored('Must be either true or false, try again!', 'red'))", "docstring": "Ask a user for a boolean input\n\nArgs:\n message (str): Prompt for user\n\nReturns:\n bool_in (boolean): Input boolean", "source": "juraj_google_style"} -{"code": "def Reference(uri, meaning=None):\n\n attrib = {'uri': uri}\n if meaning is not None:\n attrib['meaning'] = meaning\n return objectify.Element('Reference', attrib)", "docstring": "Represents external information, typically original obs data and metadata.\n\nArgs:\n uri(str): Uniform resource identifier for external data, e.g. FITS file.\n meaning(str): The nature of the document referenced, e.g. what\n instrument and filter was used to create the data?", "source": "juraj_google_style"} -{"code": "def GetValueLength(rd, pos):\n\n rd = bytearray(rd)\n key = rd[pos]\n if key == LONG_ITEM_ENCODING:\n # If the key is tagged as a long item (0xfe), then the format is\n # [key (1 byte)] [data len (1 byte)] [item tag (1 byte)] [data (n # bytes)].\n # Thus, the entire key record is 3 bytes long.\n if pos + 1 < len(rd):\n return (3, rd[pos + 1])\n else:\n raise errors.HidError('Malformed report descriptor')\n\n else:\n # If the key is tagged as a short item, then the item tag and data len are\n # packed into one byte. The format is thus:\n # [tag (high 4 bits)] [type (2 bits)] [size code (2 bits)] [data (n bytes)].\n # The size code specifies 1,2, or 4 bytes (0x03 means 4 bytes).\n code = key & 0x03\n if code <= 0x02:\n return (1, code)\n elif code == 0x03:\n return (1, 4)\n\n raise errors.HidError('Cannot happen')", "docstring": "Get value length for a key in rd.\n\n For a key at position pos in the Report Descriptor rd, return the length\n of the associated value. This supports both short and long format\n values.\n\nArgs:\n rd: Report Descriptor\n pos: The position of the key in rd.\n\nReturns:\n (key_size, data_len) where key_size is the number of bytes occupied by\n the key and data_len is the length of the value associated by the key.", "source": "juraj_google_style"} -{"code": "def __init__(self, resolver_context, file_object=None):\n\n if file_object:\n raise ValueError('File object value set.')\n\n super(EWFFile, self).__init__(resolver_context)\n self._file_objects = []", "docstring": "Initializes a file-like object.\n\nArgs:\n resolver_context (Context): resolver context.\n file_object (Optional[FileIO]): file-like object.\n\nRaises:\n ValueError: when file_object is set.", "source": "juraj_google_style"} -{"code": "def decode(obj, content_type):\n # type: (np.array or Iterable or int or float, str) -> np.array\n\n try:\n decoder = _decoders_map[content_type]\n return decoder(obj)\n except KeyError:\n raise _errors.UnsupportedFormatError(content_type)", "docstring": "Decode an object ton a one of the default content types to a numpy array.\n\nArgs:\n obj (object): to be decoded.\n content_type (str): content type to be used.\n\nReturns:\n np.array: decoded object.", "source": "juraj_google_style"} -{"code": "def resetThingStateForLogicalInterface(self, thingTypeId, thingId , logicalInterfaceId):\n\n req = ApiClient.thingStateUrl % (self.host, \"\", thingTypeId,thingId , logicalInterfaceId)\n body = {\"operation\" : \"reset-state\"}\n resp = requests.patch(req, auth=self.credentials, headers={\"Content-Type\":\"application/json\"}, data=json.dumps(body),\n verify=self.verify)\n if (resp.status_code == 200):\n self.logger.debug(\"Reset ThingState For LogicalInterface succeeded\")\n else:\n raise ibmiotf.APIException(resp.status_code, \" HTTP error on reset ThingState For LogicalInterface \", resp)\n return resp.json()", "docstring": "Perform an operation against the thing state for a logical interface\n Parameters:\n - thingTypeId (string)\n - thingId (string)\n - logicalInterfaceId (string)\n Throws APIException on failure.", "source": "juraj_google_style"} -{"code": "def __init__(self, ind_id, case_id=None, mother=None,\n father=None, sex=None, phenotype=None, ind_index=None,\n profile=None, similar_samples=None):\n\n super(Individual, self).__init__(\n ind_id=ind_id,\n name=ind_id,\n case_id=case_id,\n ind_index=ind_index,\n sex=sex,\n )\n\n if profile: self['profile'] = profile\n if similar_samples: self['similar_samples'] = similar_samples", "docstring": "Construct a individual object\n\nArgs:\n ind_id (str): The individual id\n case_id (str): What case it belongs to\n mother (str): The mother id\n father (str): The father id\n sex (str): Sex in ped format\n phenotype (str): Phenotype in ped format\n ind_index (int): Column in the vcf.", "source": "juraj_google_style"} -{"code": "def get_details(self, ids):\n\n if isinstance(ids, list):\n if len(ids) > 5:\n ids = ids[:5]\n id_param = ';'.join(ids) + '/'\n else:\n ids = str(ids)\n id_param = ids + '/'\n\n header, content = self._http_request(id_param)\n resp = json.loads(content)\n if not self._is_http_response_ok(header):\n error = resp.get('error_message', 'Unknown Error')\n raise HttpException(header.status, header.reason, error) \n return resp", "docstring": "Locu Venue Details API Call Wrapper\n\nArgs:\n list of ids : ids of a particular venues to get insights about. Can process up to 5 ids", "source": "juraj_google_style"} -{"code": "def __init__(self, latitude, longitude, name, units='km'):\n\n super(NumberedPoint, self).__init__(latitude, longitude, units)\n\n self.name = name", "docstring": "Initialise a new ``NumberedPoint`` object.\n\nArgs:\n latitude (float): Location's latitude\n longitude (float): Location's longitude\n name (str): Location's name or command line position\n units (str): Unit type to be used for distances", "source": "juraj_google_style"} -{"code": "def _serialize_scalar_from_string_representation_factory(type_name, types, str_func=str):\n\n def serialize(ion_event):\n value = ion_event.value\n validate_scalar_value(value, types)\n return six.b(str_func(value))\n serialize.__name__ = '_serialize_' + type_name\n return serialize", "docstring": "Builds functions that leverage Python ``str()`` or similar functionality.\n\nArgs:\n type_name (str): The name of the Ion type.\n types (Union[Sequence[type],type]): The Python types to validate for.\n str_func (Optional[Callable]): The function to convert the value with, defaults to ``str``.\n\nReturns:\n function: The function for serializing scalars of a given type to Ion text bytes.", "source": "juraj_google_style"} -{"code": "def indent_css(f, output):\n\n line_count = get_line_count(f)\n f = open(f, 'r+')\n output = open(output, 'r+')\n for line in range(line_count):\n string = f.readline().rstrip()\n if len(string) > 0:\n if string[-1] == \";\":\n output.write(\" \" + string + \"\\n\")\n else:\n output.write(string + \"\\n\")\n output.close()\n f.close()", "docstring": "Indentes css that has not been indented and saves it to a new file.\n A new file is created if the output destination does not already exist.\n\nArgs:\n f: string, path to file.\n\n output: string, path/name of the output file (e.g. /directory/output.css).\n print type(response.read())\n\nReturns:\n None.", "source": "juraj_google_style"} -{"code": "def GetFileEntryByPathSpec(self, path_spec):\n\n tsk_vs_part, partition_index = tsk_partition.GetTSKVsPartByPathSpec(\n self._tsk_volume, path_spec)\n\n location = getattr(path_spec, 'location', None)\n\n # The virtual root file has not corresponding TSK volume system part object\n # but should have a location.\n if tsk_vs_part is None:\n if location is None or location != self.LOCATION_ROOT:\n return None\n\n return tsk_partition_file_entry.TSKPartitionFileEntry(\n self._resolver_context, self, path_spec, is_root=True,\n is_virtual=True)\n\n if location is None and partition_index is not None:\n path_spec.location = '/p{0:d}'.format(partition_index)\n\n return tsk_partition_file_entry.TSKPartitionFileEntry(\n self._resolver_context, self, path_spec)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\n path_spec (PathSpec): a path specification.\n\nReturns:\n TSKPartitionFileEntry: a file entry or None of not available.", "source": "juraj_google_style"} -{"code": "def _load_config_include(self, include_directory):\n\n include_directory = os.path.join(self.app_path, include_directory)\n if not os.path.isdir(include_directory):\n msg = 'Provided include directory does not exist ({}).'.format(include_directory)\n sys.exit(msg)\n\n profiles = []\n for filename in sorted(os.listdir(include_directory)):\n if filename.endswith('.json'):\n self.log.info('Loading config: {}'.format(filename))\n print('Include File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, filename))\n config_file = os.path.join(include_directory, filename)\n with open(config_file) as data_file:\n try:\n profiles.extend(json.load(data_file))\n except ValueError as e:\n print('Invalid JSON file: {}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, e))\n sys.exit(1)\n return profiles", "docstring": "Load included configuration files.\n\nArgs:\n include_directory (str): The name of the config include directory.\n\nReturns:\n list: A list of all profiles for the current App.", "source": "juraj_google_style"} -{"code": "def swo_supported_speeds(self, cpu_speed, num_speeds=3):\n\n buf_size = num_speeds\n buf = (ctypes.c_uint32 * buf_size)()\n res = self._dll.JLINKARM_SWO_GetCompatibleSpeeds(cpu_speed, 0, buf, buf_size)\n if res < 0:\n raise errors.JLinkException(res)\n\n return list(buf)[:res]", "docstring": "Retrives a list of SWO speeds supported by both the target and the\n connected J-Link.\n\n The supported speeds are returned in order from highest to lowest.\n\nArgs:\n self (JLink): the ``JLink`` instance\n cpu_speed (int): the target's CPU speed in Hz\n num_speeds (int): the number of compatible speeds to return\n\nReturns:\n A list of compatible SWO speeds in Hz in order from highest to lowest.", "source": "juraj_google_style"} -{"code": "def _get_lattices(self, target_lattice, s, supercell_size=1):\n\n lattices = s.lattice.find_all_mappings(\n target_lattice, ltol=self.ltol, atol=self.angle_tol,\n skip_rotation_matrix=True)\n for l, _, scale_m in lattices:\n if abs(abs(np.linalg.det(scale_m)) - supercell_size) < 0.5:\n yield l, scale_m", "docstring": "Yields lattices for s with lengths and angles close to the\n lattice of target_s. If supercell_size is specified, the\n returned lattice will have that number of primitive cells\n in it\n\nArgs:\n s, target_s: Structure objects", "source": "juraj_google_style"} -{"code": "def approve(self, sha=None, **kwargs):\n\n path = '%s/%s/approve' % (self.manager.path, self.get_id())\n data = {}\n if sha:\n data['sha'] = sha\n\n server_data = self.manager.gitlab.http_post(path, post_data=data,\n **kwargs)\n self._update_attrs(server_data)", "docstring": "Approve the merge request.\n\nArgs:\n sha (str): Head SHA of MR\n **kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabMRApprovalError: If the approval failed", "source": "juraj_google_style"} -{"code": "def get_predicted_structure(self, structure, ref_structure):\n\n new_structure = structure.copy()\n new_structure.scale_lattice(self.predict(structure, ref_structure))\n\n return new_structure", "docstring": "Given a structure, returns back the structure scaled to predicted\n volume.\n\nArgs:\n structure (Structure): structure w/unknown volume\n ref_structure (Structure): A reference structure with a similar\n structure but different species.\n\nReturns:\n a Structure object with predicted volume", "source": "juraj_google_style"} -{"code": "def _read_data_handler(length, whence, ctx, skip=False, stream_event=ION_STREAM_INCOMPLETE_EVENT):\n\n trans = None\n queue = ctx.queue\n\n if length > ctx.remaining:\n raise IonException('Length overrun: %d bytes, %d remaining' % (length, ctx.remaining))\n\n # Make sure to check the queue first.\n queue_len = len(queue)\n if queue_len > 0:\n # Any data available means we can only be incomplete.\n stream_event = ION_STREAM_INCOMPLETE_EVENT\n length -= queue_len\n\n if skip:\n # For skipping we need to consume any remnant in the buffer queue.\n if length >= 0:\n queue.skip(queue_len)\n else:\n queue.skip(queue_len + length)\n\n while True:\n data_event, self = (yield trans)\n if data_event is not None and data_event.data is not None:\n data = data_event.data\n data_len = len(data)\n if data_len > 0:\n # We got something so we can only be incomplete.\n stream_event = ION_STREAM_INCOMPLETE_EVENT\n length -= data_len\n if not skip:\n queue.extend(data)\n else:\n pos_adjustment = data_len\n if length < 0:\n pos_adjustment += length\n # More data than we need to skip, so make sure to accumulate that remnant.\n queue.extend(data[length:])\n queue.position += pos_adjustment\n if length <= 0:\n # We got all the data we need, go back immediately\n yield Transition(None, whence)\n\n trans = Transition(stream_event, self)", "docstring": "Creates a co-routine for retrieving data up to a requested size.\n\nArgs:\n length (int): The minimum length requested.\n whence (Coroutine): The co-routine to return to after the data is satisfied.\n ctx (_HandlerContext): The context for the read.\n skip (Optional[bool]): Whether the requested number of bytes should be skipped.\n stream_event (Optional[IonEvent]): The stream event to return if no bytes are read or\n available.", "source": "juraj_google_style"} -{"code": "def get_collection(self, lang=None, task=None):\n\n if lang: id = \"{}{}\".format(Downloader.LANG_PREFIX, lang)\n elif task: id = \"{}{}\".format(Downloader.TASK_PREFIX, task)\n else: raise ValueError(\"You should pass either the task or the lang\")\n try:\n return self.info(id)\n except ValueError as e:\n if lang: raise LanguageNotSupported(\"Language {} is not supported\".format(id))\n if task: raise TaskNotSupported(\"Task {} is not supported\".format(id))", "docstring": "Return the collection that represents a specific language or task.\n\nArgs:\n lang (string): Language code.\n task (string): Task name.", "source": "juraj_google_style"} -{"code": "def QA_SU_save_stock_info(engine, client=DATABASE):\n\n\n engine = select_save_engine(engine)\n engine.QA_SU_save_stock_info(client=client)", "docstring": "save stock info\n\nArgs:\n engine {[type]} -- [description]\n\n Keyword Arguments:\n client {[type]} -- [description] (default: {DATABASE})", "source": "juraj_google_style"} -{"code": "def _combineResults(result, *namedTuples):\n\n results = ClientJobsDAO.partitionAtIntervals(\n result, [len(nt._fields) for nt in namedTuples])\n return [nt._make(result) for nt, result in zip(namedTuples, results)]", "docstring": "Return a list of namedtuples from the result of a join query. A\n single database result is partitioned at intervals corresponding to the\n fields in namedTuples. The return value is the result of applying\n namedtuple._make() to each of the partitions, for each of the namedTuples.\n\n Parameters:\n ----------------------------------------------------------------\n result: Tuple representing a single result from a database query\n *namedTuples: List of named tuples.", "source": "juraj_google_style"} -{"code": "def restrict_bond_dict(self, bond_dict):\n\n return {j: bond_dict[j] & set(self.index) for j in self.index}", "docstring": "Restrict a bond dictionary to self.\n\nArgs:\n bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`,\n to see examples for a bond_dict.\n\nReturns:\n bond dictionary", "source": "juraj_google_style"} -{"code": "def _FormatUsername(self, event):\n\n username = self._output_mediator.GetUsername(event)\n return self._SanitizeField(username)", "docstring": "Formats the username.\n\nArgs:\n event (EventObject): event.\n\nReturns:\n str: formatted username field.", "source": "juraj_google_style"} -{"code": "def minimum(self, vars_list: List[str]) -> 'TensorFluent':\n\n return self._aggregation_op(tf.reduce_min, self, vars_list)", "docstring": "Returns the TensorFluent for the minimum aggregation function.\n\nArgs:\n vars_list: The list of variables to be aggregated over.\n\nReturns:\n A TensorFluent wrapping the minimum aggregation function.", "source": "juraj_google_style"} -{"code": "def outline(self, level=logging.INFO, message=\"\"):\n\n steps = 1\n logger.log(level, \"Plan \\\"%s\\\":\", self.description)\n for step in self.steps:\n logger.log(\n level,\n \" - step: %s: target: \\\"%s\\\", action: \\\"%s\\\"\",\n steps,\n step.name,\n step.fn.__name__,\n )\n steps += 1\n\n if message:\n logger.log(level, message)", "docstring": "Print an outline of the actions the plan is going to take.\n The outline will represent the rough ordering of the steps that will be\n taken.\n\nArgs:\n level (int, optional): a valid log level that should be used to log\n the outline\n message (str, optional): a message that will be logged to\n the user after the outline has been logged.", "source": "juraj_google_style"} -{"code": "def get_suggestions(self, iteration_config=None):\n\n matrix = self.hptuning_config.matrix\n\n suggestions = []\n keys = list(matrix.keys())\n values = [v.to_numpy() for v in matrix.values()]\n for v in itertools.product(*values):\n suggestions.append(dict(zip(keys, v)))\n\n if self.hptuning_config.grid_search:\n n_suggestions = self.hptuning_config.grid_search.n_experiments\n if n_suggestions:\n return suggestions[:n_suggestions]\n return suggestions", "docstring": "Return a list of suggestions based on grid search.\n\n Params:\n matrix: `dict` representing the {hyperparam: hyperparam matrix config}.\n n_suggestions: number of suggestions to make.", "source": "juraj_google_style"} -{"code": "def _load_yaml_(file_name):\n\n if not os.path.exists(file_name): return dict()\n\n with open(file_name, 'r', encoding='utf-8') as fp:\n return YAML().load(stream=fp)", "docstring": "Load assets infomation from file\n\nArgs:\n file_name: file name\n\nReturns:\n dict", "source": "juraj_google_style"} -{"code": "def extract(self, file_path, is_drum=False):\n\n midi_data = pretty_midi.PrettyMIDI(file_path)\n note_tuple_list = []\n for instrument in midi_data.instruments:\n if (is_drum is False and instrument.is_drum is False) or (is_drum is True and instrument.is_drum is True):\n for note in instrument.notes:\n note_tuple_list.append((instrument.program, note.start, note.end, note.pitch, note.velocity))\n note_df = pd.DataFrame(note_tuple_list, columns=[\"program\", \"start\", \"end\", \"pitch\", \"velocity\"])\n note_df = note_df.sort_values(by=[\"program\", \"start\", \"end\"])\n note_df[\"duration\"] = note_df.end - note_df.start\n\n return note_df", "docstring": "Extract MIDI file.\n\nArgs:\n file_path: File path of MIDI.\n is_drum: Extract drum data or not.\n\nReturns:\n pd.DataFrame(columns=[\"program\", \"start\", \"end\", \"pitch\", \"velocity\", \"duration\"])", "source": "juraj_google_style"} -{"code": "def write_content(self, content, destination):\n\n directory = os.path.dirname(destination)\n\n if directory and not os.path.exists(directory):\n os.makedirs(directory)\n\n with io.open(destination, 'w', encoding='utf-8') as f:\n f.write(content)\n\n return destination", "docstring": "Write given content to destination path.\n\n It will create needed directory structure first if it contain some\n directories that does not allready exists.\n\nArgs:\n content (str): Content to write to target file.\n destination (str): Destination path for target file.\n\nReturns:\n str: Path where target file has been written.", "source": "juraj_google_style"} -{"code": "def get(self, resource):\n\n return self.service.get(\n resource, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "docstring": "Get attributes of the data model object named by the given resource.\n\nArgs:\n resource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed.\n\nReturns:\n (intern.resource.boss.BossResource): Returns resource of type requested on success.\n\nRaises:\n requests.HTTPError on failure.", "source": "juraj_google_style"} -{"code": "def _pre_action(self, action):\n\n\n # clip actions into valid range\n assert len(action) == self.dof, \"environment got invalid action dimension\"\n low, high = self.action_spec\n action = np.clip(action, low, high)\n\n if self.has_gripper:\n arm_action = action[: self.mujoco_robot.dof]\n gripper_action_in = action[\n self.mujoco_robot.dof : self.mujoco_robot.dof + self.gripper.dof\n ]\n gripper_action_actual = self.gripper.format_action(gripper_action_in)\n action = np.concatenate([arm_action, gripper_action_actual])\n\n # rescale normalized action to control ranges\n ctrl_range = self.sim.model.actuator_ctrlrange\n bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])\n weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])\n applied_action = bias + weight * action\n self.sim.data.ctrl[:] = applied_action\n\n # gravity compensation\n self.sim.data.qfrc_applied[\n self._ref_joint_vel_indexes\n ] = self.sim.data.qfrc_bias[self._ref_joint_vel_indexes]\n\n if self.use_indicator_object:\n self.sim.data.qfrc_applied[\n self._ref_indicator_vel_low : self._ref_indicator_vel_high\n ] = self.sim.data.qfrc_bias[\n self._ref_indicator_vel_low : self._ref_indicator_vel_high\n ]", "docstring": "Overrides the superclass method to actuate the robot with the\n passed joint velocities and gripper control.\n\nArgs:\n action (numpy array): The control to apply to the robot. The first\n @self.mujoco_robot.dof dimensions should be the desired\n normalized joint velocities and if the robot has\n a gripper, the next @self.gripper.dof dimensions should be\n actuation controls for the gripper.", "source": "juraj_google_style"} -{"code": "def create(self, body):\n\n\n return self.client.post(self._url(), data=body)", "docstring": "Creates a new connection.\n\nArgs:\n body (dict): Attributes used to create the connection. Mandatory\n attributes are: 'name' and 'strategy'.\n See: https://auth0.com/docs/api/management/v2#!/Connections/post_connections", "source": "juraj_google_style"} -{"code": "def download_file(bucket_name, path, target, sagemaker_session):\n\n path = path.lstrip('/')\n boto_session = sagemaker_session.boto_session\n\n s3 = boto_session.resource('s3')\n bucket = s3.Bucket(bucket_name)\n bucket.download_file(path, target)", "docstring": "Download a Single File from S3 into a local path\n\nArgs:\n bucket_name (str): S3 bucket name\n path (str): file path within the bucket\n target (str): destination directory for the downloaded file.\n sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker session to interact with S3.", "source": "juraj_google_style"} -{"code": "def reset(self, indices=None):\n\n if self._store_rollouts and self.current_epoch is None:\n raise ValueError(\n \"No current epoch. start_new_epoch() should first be called.\"\n )\n\n if indices is None:\n indices = np.arange(self.batch_size)\n new_obs = self._reset(indices)\n if self._should_preprocess_on_reset:\n new_obs = self._preprocess_observations(new_obs)\n if self._store_rollouts:\n encoded_obs = self._encode_observations(new_obs)\n for (index, ob) in zip(indices, encoded_obs):\n frame = self._current_batch_frames[index]\n if frame is not None:\n rollout = self._current_batch_rollouts[index]\n rollout.append(frame._replace(action=0))\n self._current_epoch_rollouts.append(rollout)\n self._current_batch_rollouts[index] = []\n self._current_batch_frames[index] = Frame(\n observation=ob, reward=0, unclipped_reward=0, done=False,\n action=None\n )\n return new_obs", "docstring": "Resets environments at given indices.\n\n Does any preprocessing and adds rollouts to history.\n\nArgs:\n indices: Indices of environments to reset.\n\nReturns:\n Batch of initial observations of reset environments.\n\nRaises:\n ValueError: when there's no current epoch.", "source": "juraj_google_style"} -{"code": "def _GetSources(self, event_object):\n\n try:\n source_short, source_long = (\n formatters_manager.FormattersManager.GetSourceStrings(event_object))\n except KeyError as exception:\n logging.warning(\n 'Unable to correctly assemble event with error: {0!s}'.format(\n exception))\n\n return source_short, source_long", "docstring": "Returns properly formatted source strings.\n\nArgs:\n event_object: the event object (instance od EventObject).", "source": "juraj_google_style"} -{"code": "def load(self, context):\n\n try:\n # pylint: disable=g-import-not-at-top,unused-import\n import tensorflow\n except ImportError:\n return\n # pylint: disable=g-import-not-at-top\n from tensorboard.plugins.hparams.hparams_plugin import HParamsPlugin\n return HParamsPlugin(context)", "docstring": "Returns the plugin, if possible.\n\nArgs:\n context: The TBContext flags.\n\nReturns:\n A HParamsPlugin instance or None if it couldn't be loaded.", "source": "juraj_google_style"} -{"code": "def _CreateFeedItems(client, feed_details, label_name):\n\n # Get the FeedItemService.\n feed_item_service = client.GetService('FeedItemService', version='v201809')\n\n # For page feed URL recommendations and rules, see:\n # https://support.google.com/adwords/answer/7166527\n urls = ('http://www.example.com/discounts/rental-cars?id={feeditem}',\n 'http://www.example.com/discounts/hotel-deals?id={feeditem}',\n 'http://www.example.com/discounts/flight-deals?id={feeditem}')\n\n # Create the operation.\n operations = [{\n # Create the feed item.\n 'operand': {\n 'feedId': feed_details.feed_id,\n 'attributeValues': [\n {\n 'feedAttributeId': feed_details.url_attribute_id,\n 'stringValues': [url]\n },\n {\n 'feedAttributeId': feed_details.label_attribute_id,\n 'stringValues': [label_name]\n }\n ]\n },\n 'operator': 'ADD'\n } for url in urls]\n\n # Add the feed item.\n feed_item_service.mutate(operations)", "docstring": "Creates the page URLs in the DSA page feed.\n\nArgs:\n client: an AdWordsClient instance.\n feed_details: a _DSAFeedDetails instance.\n label_name: a str containing the page feed URL label.", "source": "juraj_google_style"} -{"code": "def constant(duration: int, amp: complex, name: str = None) -> SamplePulse:\n\n return _sampled_constant_pulse(duration, amp, name=name)", "docstring": "Generates constant-sampled `SamplePulse`.\n\n Applies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\n duration: Duration of pulse. Must be greater than zero.\n amp: Complex pulse amplitude.\n name: Name of pulse.", "source": "juraj_google_style"} -{"code": "def setup(app):\n\n event = 'html-page-context' if six.PY3 else b'html-page-context'\n app.connect(event, update_context)\n\n return {\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n 'version': __version__,\n }", "docstring": "Sphinx extension to update the rendering context with the feedback form URL.\n\nArgs:\n app (Sphinx): Application object for the Sphinx process\n\nReturns:\n a dictionary of metadata (http://www.sphinx-doc.org/en/stable/extdev/#extension-metadata)", "source": "juraj_google_style"} -{"code": "def __init__(self, filepath, eps=10, max_rows=None):\n\n\n # Compute EPS timer\n # Logic:\n # - Normal distribution centered around 1.0/eps\n # - Make sure never less than 0\n # - Precompute 1000 deltas and then just cycle around\n self.eps_timer = itertools.cycle([max(0, delta) for delta in np.random.normal(1.0/float(eps), .5/float(eps), size=1000)])\n\n # Initialize the Bro log reader\n self.log_reader = bro_log_reader.BroLogReader(filepath, tail=False)\n\n # Store max_rows\n self.max_rows = max_rows", "docstring": "Initialization for the LiveSimulator Class\n\nArgs:\n eps (int): Events Per Second that the simulator will emit events (default = 10)\n max_rows (int): The maximum number of rows to generate (default = None (go forever))", "source": "juraj_google_style"} -{"code": "def RegisterDecoder(cls, decoder):\n\n encoding_method = decoder.ENCODING_METHOD.lower()\n if encoding_method in cls._decoders:\n raise KeyError(\n 'Decoder for encoding method: {0:s} already set.'.format(\n decoder.ENCODING_METHOD))\n\n cls._decoders[encoding_method] = decoder", "docstring": "Registers a decoder for a specific encoding method.\n\nArgs:\n decoder (type): decoder class.\n\nRaises:\n KeyError: if the corresponding decoder is already set.", "source": "juraj_google_style"} -{"code": "def filter_by_months_per_hour(self, months_per_hour):\n\n _filt_values = []\n _filt_datetimes = []\n for i, d in enumerate(self.datetimes):\n if d in months_per_hour:\n _filt_datetimes.append(d)\n _filt_values.append(self._values[i])\n return MonthlyPerHourCollection(\n self.header.duplicate(), _filt_values, _filt_datetimes)", "docstring": "Filter the Data Collection based on a list of months per hour (as strings).\n\nArgs:\n months_per_hour: A list of tuples representing months per hour.\n Each tuple should possess two values: the first is the month\n and the second is the hour. (eg. (12, 23) = December at 11 PM)\n\nReturns:\n A new Data Collection with filtered data", "source": "juraj_google_style"} -{"code": "def teleport(self, location=None, rotation=None):\n\n val = 0\n if location is not None:\n val += 1\n np.copyto(self._teleport_buffer, location)\n if rotation is not None:\n np.copyto(self._rotation_buffer, rotation)\n val += 2\n self._teleport_bool_buffer[0] = val", "docstring": "Teleports the agent to a specific location, with a specific rotation.\n\nArgs:\n location (np.ndarray, optional): An array with three elements specifying the target world coordinate in meters.\n If None, keeps the current location. Defaults to None.\n rotation (np.ndarray, optional): An array with three elements specifying the target rotation of the agent.\n If None, keeps the current rotation. Defaults to None.\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def get_files(self, retrieve=False):\n\n\n\n\t\tif self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasFile'):\n\t\t\tfiles = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasFile ]\n\n\t\t\t# return\n\t\t\treturn files\n\n\t\telse:\n\t\t\treturn []", "docstring": "get pcdm:hasFile for this resource\n\nArgs:\n retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload", "source": "juraj_google_style"} -{"code": "def get_library_barcode_sequence_hash(self, inverse=False):\n\n action = os.path.join(self.record_url, \"get_library_barcode_sequence_hash\")\n res = requests.get(url=action, headers=HEADERS, verify=False)\n res.raise_for_status()\n res_json = res.json()\n # Convert library ID from string to int\n new_res = {}\n for lib_id in res_json:\n new_res[int(lib_id)] = res_json[lib_id]\n res_json = new_res\n\n if inverse:\n rev = {}\n for lib_id in res_json:\n rev[res_json[lib_id]] = lib_id\n res_json = rev\n return res_json", "docstring": "Calls the SequencingRequest's get_library_barcode_sequence_hash server-side endpoint to\n create a hash of the form {LibraryID -> barcode_sequence} for all Libraries on the\n SequencingRequest.\n\nArgs:\n inverse: `bool`. True means to inverse the key and value pairs such that the barcode\n sequence serves as the key.\n\n Returns: `dict`.", "source": "juraj_google_style"} -{"code": "def flatten(data):\n\n if not data:\n return data\n\n if type(data[0]) in (list, tuple):\n return list(flatten(data[0])) + list(flatten(data[1:]))\n\n return list(data[:1]) + list(flatten(data[1:]))", "docstring": "Returns a flattened version of a list.\n\n Courtesy of https://stackoverflow.com/a/12472564\n\nArgs:\n data (`tuple` or `list`): Input data\n\nReturns:\n `list`", "source": "juraj_google_style"} -{"code": "def send_notice(self, room_id, text_content, timestamp=None):\n\n body = {\n \"msgtype\": \"m.notice\",\n \"body\": text_content\n }\n return self.send_message_event(room_id, \"m.room.message\", body,\n timestamp=timestamp)", "docstring": "Perform PUT /rooms/$room_id/send/m.room.message with m.notice msgtype\n\nArgs:\n room_id (str): The room ID to send the event in.\n text_content (str): The m.notice body to send.\n timestamp (int): Set origin_server_ts (For application services only)", "source": "juraj_google_style"} -{"code": "def get_first(self, status):\n\n\n items = self.get_all(status)\n\n if items:\n return list(items.items())[0][1]\n\n return None", "docstring": "Get the first item in the queue that has the given status.\n\nArgs:\n status (str): return the first item with this status.\n\nReturns:\n :class:`nyawc.QueueItem`: The first queue item with the given status.", "source": "juraj_google_style"} -{"code": "def cancel(self, consumers):\n\n for consumer in consumers:\n del self._consumers[consumer.queue]\n protocol = yield self.when_connected()\n yield protocol.cancel(consumer)", "docstring": "Cancel a consumer that was previously started with consume.\n\nArgs:\n consumer (list of fedora_messaging.api.Consumer): The consumers to cancel.", "source": "juraj_google_style"} -{"code": "def find_pad_index(self, array):\n\n try:\n return list(array).index(self.pad_value)\n except ValueError:\n return len(array)", "docstring": "Find padding index.\n\nArgs:\n array (list): integer list.\n\nReturns:\n idx: padding index.\n\nExample:\n >>> array = [1, 2, 0]\n >>> self.find_pad_index(array)\n 2", "source": "juraj_google_style"} -{"code": "def write(self, message, cur_time=None):\n\n if cur_time is None:\n cur_time = time.time()\n lines = self._line_buffer.add_string(message)\n for line in lines:\n #print('ts line', repr(line))\n timestamp = ''\n if self._prepend_timestamp:\n timestamp = datetime.datetime.utcfromtimestamp(\n cur_time).isoformat() + ' '\n line = u'{}{}{}'.format(self._line_prepend, timestamp, line)\n self._fsapi.push(self._filename, line)", "docstring": "Write some text to the pusher.\n\nArgs:\n message: a string to push for this file.\n cur_time: used for unit testing. override line timestamp.", "source": "juraj_google_style"} -{"code": "def structure_path(self, path):\n\n if not path:\n self.structure_dir = None\n self.structure_file = None\n\n else:\n if not op.exists(path):\n raise OSError('{}: file does not exist!'.format(path))\n\n if not op.dirname(path):\n self.structure_dir = '.'\n else:\n self.structure_dir = op.dirname(path)\n self.structure_file = op.basename(path)", "docstring": "Provide pointers to the paths of the structure file\n\nArgs:\n path: Path to structure file", "source": "juraj_google_style"} -{"code": "def _prepare_for_training(self, job_name=None):\n\n if job_name is not None:\n self._current_job_name = job_name\n else:\n # honor supplied base_job_name or generate it\n if self.base_job_name:\n base_name = self.base_job_name\n elif isinstance(self, sagemaker.algorithm.AlgorithmEstimator):\n base_name = self.algorithm_arn.split('/')[-1] # pylint: disable=no-member\n else:\n base_name = base_name_from_image(self.train_image())\n\n self._current_job_name = name_from_base(base_name)\n\n # if output_path was specified we use it otherwise initialize here.\n # For Local Mode with local_code=True we don't need an explicit output_path\n if self.output_path is None:\n local_code = get_config_value('local.local_code', self.sagemaker_session.config)\n if self.sagemaker_session.local_mode and local_code:\n self.output_path = ''\n else:\n self.output_path = 's3://{}/'.format(self.sagemaker_session.default_bucket())", "docstring": "Set any values in the estimator that need to be set before training.\n\nArgs:\n * job_name (str): Name of the training job to be created. If not specified, one is generated,\n using the base name given to the constructor if applicable.", "source": "juraj_google_style"} -{"code": "def get_dimension_indices(self, query):\n\n ids = self['id'] if self.get('id') else self['dimension']['id']\n indices = []\n\n for idx, id in enumerate(ids):\n indices.append(self.get_dimension_index(id,\n [d.get(id) for d in query\n if id in d][0]))\n\n return indices", "docstring": "Converts a dimension/category list of dicts into a list of \\\n dimensions’ indices.\n\nArgs:\n query(list): dimension/category list of dicts.\n\nReturns:\n indices(list): list of dimensions' indices.", "source": "juraj_google_style"} -{"code": "def parse_delete_zone(prs, conn):\n\n prs_zone_delete = prs.add_parser('zone_delete', help='delete zone')\n prs_zone_delete.add_argument('--domain', action='store', required=True,\n help='specify zone')\n conn_options(prs_zone_delete, conn)\n prs_zone_delete.set_defaults(func=delete_zone)", "docstring": "Delete zone.\n\nArgs:\n prs: parser object of argparse\n conn: dictionary of connection information", "source": "juraj_google_style"} -{"code": "def tensor_layout(self, arg):\n\n if isinstance(arg, Tensor):\n arg = arg.shape\n return self.layout_rules.tensor_layout(arg, self.shape)", "docstring": "Compute TensorLayout for a Tensor or a Shape.\n\nArgs:\n arg: Tensor or Shape.\n\nReturns:\n TensorLayout.", "source": "juraj_google_style"} -{"code": "def set_servo_angle(self, goalangle, goaltime, led):\n\n if (self.servomodel==0x06) or (self.servomodel == 0x04):\n goalposition = scale(goalangle, -159.9, 159.6, 10627, 22129)\n else:\n goalposition = scale(goalangle, -150, 150, 21, 1002)\n\n self.set_servo_position(goalposition, goaltime, led)", "docstring": "Sets the servo angle (in degrees)\n\n Enable torque using torque_on function before calling this\n\nArgs:\n goalangle (int): The desired angle in degrees, range -150 to 150\n goaltime (int): the time taken to move from present\n position to goalposition\n led (int): the LED color\n 0x00 LED off\n 0x04 GREEN\n 0x08 BLUE\n 0x10 RED", "source": "juraj_google_style"} -{"code": "def position(self, partition):\n\n if not isinstance(partition, TopicPartition):\n raise TypeError('partition must be a TopicPartition namedtuple')\n assert self._subscription.is_assigned(partition), 'Partition is not assigned'\n offset = self._subscription.assignment[partition].position\n if offset is None:\n self._update_fetch_positions([partition])\n offset = self._subscription.assignment[partition].position\n return offset", "docstring": "Get the offset of the next record that will be fetched\n\nArgs:\n partition (TopicPartition): Partition to check\n\nReturns:\n int: Offset", "source": "juraj_google_style"} -{"code": "def __init__(self, couplinglist=None):\n\n\n # the coupling map graph\n self.graph = nx.DiGraph()\n # a dict of dicts from node pairs to distances\n self._dist_matrix = None\n # a sorted list of physical qubits (integers) in this coupling map\n self._qubit_list = None\n\n if couplinglist is not None:\n for source, target in couplinglist:\n self.add_edge(source, target)", "docstring": "Create coupling graph. By default, the generated coupling has no nodes.\n\nArgs:\n couplinglist (list or None): An initial coupling graph, specified as\n an adjacency list containing couplings, e.g. [[0,1], [0,2], [1,2]].", "source": "juraj_google_style"} -{"code": "def get_recipe(self, recipe_name):\n\n if recipe_name.endswith('.yaml'):\n recipe = self._recipes.get(RecipeObject.FromFile(recipe_name, self._recipe_actions, self._recipe_resources).name)\n else:\n recipe = self._recipes.get(recipe_name)\n if recipe is None:\n raise RecipeNotFoundError(\"Could not find recipe\", recipe_name=recipe_name, known_recipes=[x for x in self._recipes.keys()])\n\n return recipe", "docstring": "Get a recipe by name.\n\nArgs:\n recipe_name (str): The name of the recipe to fetch. Can be either the\n yaml file name or the name of the recipe.", "source": "juraj_google_style"} -{"code": "def __getitem__(self, index):\n\n getter = coordinates.Coordinates.from_string(index)\n return getter(self._values)", "docstring": "Return the value(s) of the given cell(s).\n\nArgs:\n index (str): cell/row/col index ('A1', '2', 'B') or slice ('A1':'C3')\n\nReturns:\n value (cell), list(col, row), or nested list (two-dimentional slice)\n\nRaises:\n TypeError: if ``index`` is not a string or slice of strings\n ValueError: if ``index`` canot be parsed\n IndexError: if ``index`` is out of range", "source": "juraj_google_style"} -{"code": "def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):\n\n generator = self.generate_samples(data_dir, tmp_dir, dataset_split)\n encoder = self.get_or_create_vocab(data_dir, tmp_dir)\n label_encoder = self.get_labels_encoder(data_dir)\n for sample in generator:\n inputs = encoder.encode(sample[\"inputs\"])\n inputs.append(text_encoder.EOS_ID)\n context = encoder.encode(sample[\"context\"])\n context.append(text_encoder.EOS_ID)\n targets = label_encoder.encode(sample[\"targets\"])\n sample[\"targets\"] = targets\n yield {\"inputs\": inputs, \"context\": context, \"targets\": targets}", "docstring": "A generator that generates samples that are encoded.\n\nArgs:\n data_dir: data directory\n tmp_dir: temp directory\n dataset_split: dataset split\n\nYields:\n A dict.", "source": "juraj_google_style"} -{"code": "def set_device_name(self, new_name):\n\n\n device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME)\n if device_name is None:\n logger.warn('Failed to find handle for device name')\n return False\n\n if len(new_name) > MAX_DEVICE_NAME_LEN:\n logger.error('Device name exceeds maximum length ({} > {})'.format(len(new_name), MAX_DEVICE_NAME_LEN))\n return False\n\n if self.dongle._write_attribute(self.conn_handle, device_name, new_name.encode('ascii')):\n self.name = new_name\n return True\n\n return False", "docstring": "Sets a new BLE device name for this SK8.\n\nArgs:\n new_name (str): the new device name as an ASCII string, max 20 characters.\n\nReturns:\n True if the name was updated successfully, False otherwise.", "source": "juraj_google_style"} -{"code": "def Reboot(self, destination=b''):\n\n self.protocol_handler.Open(self._handle, b'reboot:%s' % destination)", "docstring": "Reboot the device.\n\nArgs:\n destination: Specify 'bootloader' for fastboot.", "source": "juraj_google_style"} -{"code": "def _StartProfiling(self, configuration):\n\n if not configuration:\n return\n\n if configuration.HaveProfileMemoryGuppy():\n self._guppy_memory_profiler = profilers.GuppyMemoryProfiler(\n self._name, configuration)\n self._guppy_memory_profiler.Start()\n\n if configuration.HaveProfileMemory():\n self._memory_profiler = profilers.MemoryProfiler(\n self._name, configuration)\n self._memory_profiler.Start()\n\n if configuration.HaveProfileProcessing():\n identifier = '{0:s}-processing'.format(self._name)\n self._processing_profiler = profilers.ProcessingProfiler(\n identifier, configuration)\n self._processing_profiler.Start()\n\n if configuration.HaveProfileSerializers():\n identifier = '{0:s}-serializers'.format(self._name)\n self._serializers_profiler = profilers.SerializersProfiler(\n identifier, configuration)\n self._serializers_profiler.Start()\n\n if configuration.HaveProfileStorage():\n self._storage_profiler = profilers.StorageProfiler(\n self._name, configuration)\n self._storage_profiler.Start()\n\n if configuration.HaveProfileTasks():\n self._tasks_profiler = profilers.TasksProfiler(self._name, configuration)\n self._tasks_profiler.Start()", "docstring": "Starts profiling.\n\nArgs:\n configuration (ProfilingConfiguration): profiling configuration.", "source": "juraj_google_style"} -{"code": "def get_cohp(self, spin=None, integrated=False):\n\n if not integrated:\n populations = self.cohp\n else:\n populations = self.icohp\n\n if populations is None:\n return None\n elif spin is None:\n return populations\n else:\n if isinstance(spin, int):\n spin = Spin(spin)\n elif isinstance(spin, str):\n s = {\"up\": 1, \"down\": -1}[spin.lower()]\n spin = Spin(s)\n return {spin: populations[spin]}", "docstring": "Returns the COHP or ICOHP for a particular spin.\n\nArgs:\n spin: Spin. Can be parsed as spin object, integer (-1/1)\n or str (\"up\"/\"down\")\n integrated: Return COHP (False) or ICOHP (True)\n\nReturns:\n Returns the CHOP or ICOHP for the input spin. If Spin is\n None and both spins are present, both spins will be returned\n as a dictionary.", "source": "juraj_google_style"} -{"code": "def Verify(self, mempool):\n\n\n for descriptor in self.Descriptors:\n if not descriptor.Verify():\n return False\n\n return super(StateTransaction, self).Verify(mempool)", "docstring": "Verify the transaction.\n\nArgs:\n mempool:\n\nReturns:\n bool: True if verified. False otherwise.", "source": "juraj_google_style"} -{"code": "def add_plugin(self, f):\n\n if f.endswith('.py'):\n\n # Just the basename without extension\n plugin_name = os.path.splitext(os.path.basename(f))[0]\n\n # It's possible the plugin has been modified and needs to be reloaded\n if plugin_name in sys.modules:\n try:\n handler = reload(sys.modules[plugin_name])\n print'\\t- %s %sRELOAD%s' % (plugin_name, color.Yellow, color.Normal)\n except ImportError, error:\n print 'Failed to import plugin: %s (%s)' % (plugin_name, error)\n return\n else:\n # Not already loaded so try to import it\n try:\n handler = __import__(plugin_name, globals(), locals(), [], -1)\n except ImportError, error:\n print 'Failed to import plugin: %s (%s)' % (plugin_name, error)\n return\n\n # Run the handler through plugin validation\n plugin = self.validate(handler)\n print '\\t- %s %sOK%s' % (plugin_name, color.Green, color.Normal)\n if plugin:\n\n # Okay must be successfully loaded so capture the plugin meta-data,\n # modification time and register the plugin through the callback\n plugin['name'] = plugin_name\n plugin['dependencies'] = plugin['class'].dependencies\n plugin['docstring'] = plugin['class'].__doc__\n plugin['mod_time'] = datetime.utcfromtimestamp(os.path.getmtime(f))\n\n # Plugin may accept sample_sets as input\n try:\n plugin['sample_set_input'] = getattr(plugin['class'], 'sample_set_input')\n except AttributeError:\n plugin['sample_set_input'] = False\n\n # Now pass the plugin back to workbench\n self.plugin_callback(plugin)", "docstring": "Adding and verifying plugin.\n\nArgs:\n f: the filepath for the plugin.", "source": "juraj_google_style"} -{"code": "def get_vmss_vm(access_token, subscription_id, resource_group, vmss_name, instance_id):\n\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourceGroups/', resource_group,\n '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n '/virtualMachines/', str(instance_id),\n '?api-version=', COMP_API])\n return do_get(endpoint, access_token)", "docstring": "Get individual VMSS VM details.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n vmss_name (str): Name of the virtual machine scale set.\n instance_id (int): VM ID of the scale set VM.\n\nReturns:\n HTTP response. JSON body of VMSS VM model view.", "source": "juraj_google_style"} -{"code": "def add_filter_set(name):\n\n\n factory = FilterSetFactory(name)\n yield factory\n filter_sets[name] = factory.build_filter_set()", "docstring": "Builds and registers a global :class:`FilterSet`.\n\nArgs:\n name (str): The name of the set.\n\nYields:\n FilterSetFactory: A configurable factory for building a :class:`FilterSet`.", "source": "juraj_google_style"} -{"code": "def load_institute(adapter, internal_id, display_name, sanger_recipients=None):\n\n\n institute_obj = build_institute(\n internal_id=internal_id,\n display_name=display_name,\n sanger_recipients=sanger_recipients\n )\n log.info(\"Loading institute {0} with display name {1}\" \\\n \" into database\".format(internal_id, display_name))\n\n adapter.add_institute(institute_obj)", "docstring": "Load a institute into the database\n\nArgs:\n adapter(MongoAdapter)\n internal_id(str)\n display_name(str)\n sanger_recipients(list(email))", "source": "juraj_google_style"} -{"code": "def __init__(self, data_type_definition):\n\n super(UUIDMap, self).__init__(data_type_definition)\n self._byte_order = data_type_definition.byte_order", "docstring": "Initializes an UUID (or GUID) data type map.\n\nArgs:\n data_type_definition (DataTypeDefinition): data type definition.", "source": "juraj_google_style"} -{"code": "def eval_adiabatic_limit(YABFGN, Ytilde, P0):\n\n Y, A, B, F, G, N = YABFGN\n\n Klim = (P0 * (B - A * Ytilde * A) * P0).expand().simplify_scalar()\n Hlim = ((Klim - Klim.dag())/2/I).expand().simplify_scalar()\n\n Ldlim = (P0 * (G - A * Ytilde * F) * P0).expand().simplify_scalar()\n\n dN = identity_matrix(N.shape[0]) + F.H * Ytilde * F\n Nlim = (P0 * N * dN * P0).expand().simplify_scalar()\n\n return SLH(Nlim.dag(), Ldlim.dag(), Hlim.dag())", "docstring": "Compute the limiting SLH model for the adiabatic approximation\n\nArgs:\n YABFGN: The tuple (Y, A, B, F, G, N)\n as returned by prepare_adiabatic_limit.\n Ytilde: The pseudo-inverse of Y, satisfying Y * Ytilde = P0.\n P0: The projector onto the null-space of Y.\n\nReturns:\n SLH: Limiting SLH model", "source": "juraj_google_style"} -{"code": "def ParseFileEntry(self, parser_mediator, file_entry):\n\n index_file_parser = ChromeCacheIndexFileParser()\n\n file_object = file_entry.GetFileObject()\n try:\n index_file_parser.ParseFileObject(parser_mediator, file_object)\n except (IOError, errors.ParseError) as exception:\n file_object.close()\n\n display_name = parser_mediator.GetDisplayName()\n raise errors.UnableToParseFile(\n '[{0:s}] unable to parse index file {1:s} with error: {2!s}'.format(\n self.NAME, display_name, exception))\n\n # TODO: create event based on index file creation time.\n\n try:\n file_system = file_entry.GetFileSystem()\n self._ParseIndexTable(\n parser_mediator, file_system, file_entry,\n index_file_parser.index_table)\n finally:\n file_object.close()", "docstring": "Parses Chrome Cache files.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n file_entry (dfvfs.FileEntry): file entry.\n\nRaises:\n UnableToParseFile: when the file cannot be parsed.", "source": "juraj_google_style"} -{"code": "def set_files(self, files_downloaded, files_failed):\n\n self.files_downloaded = files_downloaded\n self.files_failed = files_failed\n self.__record_progress(Status.GET_FILE_DIFF)", "docstring": "set_files: records progress from downloading files\n\nArgs:\n files_downloaded ([str]): list of files that have been downloaded\n files_failed ([str]): list of files that failed to download\n Returns: None", "source": "juraj_google_style"} -{"code": "def validate_functions(ast: BELAst, bo):\n\n\n if isinstance(ast, Function):\n log.debug(f\"Validating: {ast.name}, {ast.function_type}, {ast.args}\")\n function_signatures = bo.spec[\"functions\"][\"signatures\"][ast.name][\"signatures\"]\n\n function_name = ast.name\n (valid_function, messages) = check_function_args(\n ast.args, function_signatures, function_name\n )\n if not valid_function:\n message = \", \".join(messages)\n bo.validation_messages.append(\n (\n \"ERROR\",\n \"Invalid BEL Statement function {} - problem with function signatures: {}\".format(\n ast.to_string(), message\n ),\n )\n )\n bo.parse_valid = False\n\n # Recursively process every NSArg by processing BELAst and Functions\n if hasattr(ast, \"args\"):\n for arg in ast.args:\n validate_functions(arg, bo)\n\n return bo", "docstring": "Recursively validate function signatures\n\n Determine if function matches one of the available signatures. Also,\n\n 1. Add entity types to AST NSArg, e.g. Abundance, ...\n 2. Add optional to AST Arg (optional means it is not a\n fixed, required argument and needs to be sorted for\n canonicalization, e.g. reactants(A, B, C) )\n\nArgs:\n bo: bel object\n\nReturns:\n bel object", "source": "juraj_google_style"} -{"code": "def wire_names(self, with_initial_value=True):\n\n qubit_labels = self._get_qubit_labels()\n clbit_labels = self._get_clbit_labels()\n\n if with_initial_value:\n qubit_labels = ['%s: |0>' % qubit for qubit in qubit_labels]\n clbit_labels = ['%s: 0 ' % clbit for clbit in clbit_labels]\n else:\n qubit_labels = ['%s: ' % qubit for qubit in qubit_labels]\n clbit_labels = ['%s: ' % clbit for clbit in clbit_labels]\n\n return qubit_labels + clbit_labels", "docstring": "Returns a list of names for each wire.\n\nArgs:\n with_initial_value (bool): Optional (Default: True). If true, adds the initial value to\n the name.\n\nReturns:\n List: The list of wire names.", "source": "juraj_google_style"} -{"code": "def update_power_state(self, id_or_uri, power_state):\n\n uri = self._client.build_uri(id_or_uri) + \"/powerState\"\n return self._client.update(power_state, uri)", "docstring": "Sets the power state of the specified power delivery device. The device must be an HP Intelligent Outlet.\n\nArgs:\n id_or_uri:\n Can be either the power device id or the uri\n power_state:\n {\"powerState\":\"On|Off\"}\n\nReturns:\n str: The power state", "source": "juraj_google_style"} -{"code": "def _get_ruuvitag_datas(macs=[], search_duratio_sec=None, run_flag=RunFlag(), bt_device=''):\n\n\n mac_blacklist = []\n start_time = time.time()\n data_iter = ble.get_datas(mac_blacklist, bt_device)\n\n for ble_data in data_iter:\n # Check duration\n if search_duratio_sec and time.time() - start_time > search_duratio_sec:\n data_iter.send(StopIteration)\n break\n # Check running flag\n if not run_flag.running:\n data_iter.send(StopIteration)\n break\n # Check MAC whitelist\n if macs and not ble_data[0] in macs:\n continue\n (data_format, data) = RuuviTagSensor.convert_data(ble_data[1])\n # Check that encoded data is valid RuuviTag data and it is sensor data\n # If data is not valid RuuviTag data add MAC to blacklist\n if data is not None:\n state = get_decoder(data_format).decode_data(data)\n if state is not None:\n yield (ble_data[0], state)\n else:\n mac_blacklist.append(ble_data[0])", "docstring": "Get data from BluetoothCommunication and handle data encoding.\n\nArgs:\n macs (list): MAC addresses. Default empty list\n search_duratio_sec (int): Search duration in seconds. Default None\n run_flag (object): RunFlag object. Function executes while run_flag.running. Default new RunFlag\n bt_device (string): Bluetooth device id\n\nYields:\n tuple: MAC and State of RuuviTag sensor data", "source": "juraj_google_style"} -{"code": "def __setattr__(cls, name, value):\n\n if cls.__initialized and name not in _POST_INIT_ATTRIBUTE_NAMES:\n raise AttributeError('May not change values: %s' % name)\n else:\n type.__setattr__(cls, name, value)", "docstring": "Overridden to avoid setting variables after init.\n\n Setting attributes on a class must work during the period of\n initialization to set the enumation value class variables and\n build the name/number maps. Once __init__ has set the\n __initialized flag to True prohibits setting any more values\n on the class. The class is in effect frozen.\n\nArgs:\n name: Name of value to set.\n value: Value to set.", "source": "juraj_google_style"} -{"code": "def _get_update(self, variant):\n\n update = {\n '$inc': {\n 'homozygote': variant.get('homozygote', 0),\n 'hemizygote': variant.get('hemizygote', 0),\n 'observations': 1\n },\n '$set': {\n 'chrom': variant.get('chrom'),\n 'start': variant.get('pos'),\n 'end': variant.get('end'),\n 'ref': variant.get('ref'),\n 'alt': variant.get('alt'),\n }\n }\n if variant.get('case_id'):\n update['$push'] = {\n 'families': {\n '$each': [variant.get('case_id')],\n '$slice': -50\n }\n }\n return update", "docstring": "Convert a variant to a proper update\n\nArgs:\n variant(dict)\n\nReturns:\n update(dict)", "source": "juraj_google_style"} -{"code": "def _find_image_bounding_boxes(filenames, image_to_bboxes):\n\n num_image_bbox = 0\n bboxes = []\n for f in filenames:\n basename = os.path.basename(f)\n if basename in image_to_bboxes:\n bboxes.append(image_to_bboxes[basename])\n num_image_bbox += 1\n else:\n bboxes.append([])\n print('Found %d images with bboxes out of %d images' % (\n num_image_bbox, len(filenames)))\n return bboxes", "docstring": "Find the bounding boxes for a given image file.\n\nArgs:\n filenames: list of strings; each string is a path to an image file.\n image_to_bboxes: dictionary mapping image file names to a list of\n bounding boxes. This list contains 0+ bounding boxes.\n\nReturns:\n List of bounding boxes for each image. Note that each entry in this\n list might contain from 0+ entries corresponding to the number of bounding\n box annotations for the image.", "source": "juraj_google_style"} -{"code": "def IsEquivalent(self, other):\n\n if self.name and other.name:\n return self.name == other.name\n\n if self.name:\n self_family, self_version_tuple = self._FAMILY_AND_VERSION_PER_NAME.get(\n self.name, self._DEFAULT_FAMILY_AND_VERSION)\n return (\n self_family == other.family and\n self_version_tuple == other.version_tuple)\n\n if self.family and self.version:\n if other.name:\n other_family, other_version_tuple = (\n self._FAMILY_AND_VERSION_PER_NAME.get(\n other.name, self._DEFAULT_FAMILY_AND_VERSION))\n else:\n other_family = other.family\n other_version_tuple = other.version_tuple\n\n return (\n self.family == other_family and\n self.version_tuple == other_version_tuple)\n\n if self.family:\n if other.name:\n other_family, _ = self._FAMILY_AND_VERSION_PER_NAME.get(\n other.name, self._DEFAULT_FAMILY_AND_VERSION)\n else:\n other_family = other.family\n\n return self.family == other_family\n\n return False", "docstring": "Determines if 2 operating system artifacts are equivalent.\n\n This function compares the operating systems based in order of:\n * name derived from product\n * family and version\n * family\n\nArgs:\n other (OperatingSystemArtifact): operating system artifact attribute\n container to compare with.\n\nReturns:\n bool: True if the operating systems are considered equivalent, False if\n the most specific criteria do no match, or no criteria are available.", "source": "juraj_google_style"} -{"code": "def CheckAltTokens(filename, clean_lines, linenum, error):\n\n line = clean_lines.elided[linenum]\n\n # Avoid preprocessor lines\n if Match(r'^\\s*#', line):\n return\n\n # Last ditch effort to avoid multi-line comments. This will not help\n # if the comment started before the current line or ended after the\n # current line, but it catches most of the false positives. At least,\n # it provides a way to workaround this warning for people who use\n # multi-line comments in preprocessor macros.\n #\n # TODO(unknown): remove this once cpplint has better support for\n # multi-line comments.\n if line.find('/*') >= 0 or line.find('*/') >= 0:\n return\n\n for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):\n error(filename, linenum, 'readability/alt_tokens', 2,\n 'Use operator %s instead of %s' % (\n _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))", "docstring": "Check alternative keywords being used in boolean expressions.\n\nArgs:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.", "source": "juraj_google_style"} -{"code": "def _pack_with_tf_ops(dataset, keys, length):\n\n empty_example = {}\n for k in keys:\n empty_example[k] = tf.zeros([0], dtype=tf.int32)\n empty_example[k + \"_position\"] = tf.zeros([0], dtype=tf.int32)\n keys_etc = empty_example.keys()\n\n def write_packed_example(partial, outputs):\n new_partial = empty_example.copy()\n new_outputs = {}\n for k in keys_etc:\n new_outputs[k] = outputs[k].write(\n outputs[k].size(),\n tf.pad(partial[k], [[0, length - tf.size(partial[k])]]))\n return new_partial, new_outputs\n\n def map_fn(x):\n\n partial = empty_example.copy()\n i = tf.zeros([], dtype=tf.int32)\n dynamic_batch_size = tf.shape(x[keys[0]])[0]\n outputs = {}\n for k in keys:\n outputs[k] = tf.TensorArray(\n tf.int32, size=0, dynamic_size=True, element_shape=[length])\n outputs[k + \"_position\"] = tf.TensorArray(\n tf.int32, size=0, dynamic_size=True, element_shape=[length])\n def cond_fn(i, partial, outputs):\n del partial, outputs\n return i < dynamic_batch_size\n def body_fn(i, partial, outputs):\n\n can_append = True\n one_example = {}\n for k in keys:\n val = tf.cast(x[k][i], tf.int32)\n val = val[:tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]\n one_example[k] = val\n for k in keys:\n can_append = tf.logical_and(\n can_append,\n tf.less_equal(\n tf.size(partial[k]) + tf.size(one_example[k]), length))\n def false_fn():\n return write_packed_example(partial, outputs)\n def true_fn():\n return partial, outputs\n partial, outputs = tf.cond(can_append, true_fn, false_fn)\n new_partial = {}\n for k in keys:\n new_seq = one_example[k][:length]\n new_seq_len = tf.size(new_seq)\n new_partial[k] = tf.concat([partial[k], new_seq], 0)\n new_partial[k + \"_position\"] = tf.concat(\n [partial[k + \"_position\"],\n tf.range(new_seq_len, dtype=tf.int32)], 0)\n partial = new_partial\n return i+1, partial, outputs\n\n i, partial, outputs = tf.while_loop(\n cond_fn, body_fn, (i, partial, outputs),\n back_prop=False,\n shape_invariants=(\n tf.TensorShape([]),\n {k: tf.TensorShape([None]) for k in keys_etc},\n {k: tf.TensorShape(None) for k in keys_etc},\n ))\n partial, outputs = write_packed_example(partial, outputs)\n packed = {k: outputs[k].stack() for k in keys_etc}\n for k in keys:\n packed[k + \"_segmentation\"] = (\n tf.cumsum(\n tf.cast(tf.equal(packed[k + \"_position\"], 0), tf.int32), axis=1) *\n tf.cast(tf.not_equal(packed[k], 0), tf.int32))\n return packed\n dataset = dataset.map(map_fn,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset.flat_map(tf.data.Dataset.from_tensor_slices)", "docstring": "Helper-function for packing a dataset which has already been batched.\n\n See pack_dataset()\n\n Uses tf.while_loop. Slow.\n\nArgs:\n dataset: a dataset containing padded batches of examples.\n keys: a list of strings\n length: an integer\n\nReturns:\n a dataset.", "source": "juraj_google_style"} -{"code": "def generate_sjson_from_srt(srt_subs):\n\n sub_starts = []\n sub_ends = []\n sub_texts = []\n for sub in srt_subs:\n sub_starts.append(sub.start.ordinal)\n sub_ends.append(sub.end.ordinal)\n sub_texts.append(sub.text.replace('\\n', ' '))\n\n sjson_subs = {\n 'start': sub_starts,\n 'end': sub_ends,\n 'text': sub_texts\n }\n return sjson_subs", "docstring": "Generate transcripts from sjson to SubRip (*.srt).\n\nArgs:\n srt_subs(SubRip): \"SRT\" subs object\n\nReturns:\n Subs converted to \"SJSON\" format.", "source": "juraj_google_style"} -{"code": "def create_single_token(self, *, payer_id, name, identification_number, payment_method, number, expiration_date):\n\n payload = {\n \"language\": self.client.language.value,\n \"command\": PaymentCommand.CREATE_TOKEN.value,\n \"merchant\": {\n \"apiLogin\": self.client.api_login,\n \"apiKey\": self.client.api_key\n },\n \"creditCardToken\": {\n \"payerId\": payer_id,\n \"name\": name,\n \"identificationNumber\": identification_number,\n \"paymentMethod\": payment_method,\n \"number\": number,\n \"expirationDate\": expiration_date\n },\n \"test\": self.client.is_test\n }\n return self.client._post(self.url, json=payload)", "docstring": "Using this feature you can register a customer’s credit card data and get a token sequential number.\n\nArgs:\n payer_id:\n name:\n identification_number:\n payment_method:\n number:\n expiration_date:\n\n Returns:", "source": "juraj_google_style"} -{"code": "def wb004(self, value=None):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `wb004`'.format(value))\n\n self._wb004 = value", "docstring": "Corresponds to IDD Field `wb004`\n Wet-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\n value (float): value for IDD Field `wb004`\n Unit: C\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def get_raw_data(self, url, *args, **kwargs):\n\n res = self._conn.get(url, headers=self._prepare_headers(**kwargs))\n if res.status_code == 200:\n return res.content\n else:\n return None", "docstring": "Gets data from url as bytes\n\n Returns content under the provided url as bytes\n ie. for binary data\n\nArgs:\n **url**: address of the wanted data\n\n .. versionadded:: 0.3.2\n **additional_headers**: (optional) Additional headers\n to be used with request\n\nReturns:\n bytes", "source": "juraj_google_style"} -{"code": "def tonicdns_client(uri, method, token='', data='', keyword='',\n content='', raw_flag=False):\n\n res = request(uri, method, data, token)\n if token:\n if keyword == 'serial':\n args = {\"token\": token, \"keyword\": keyword, \"content\": content}\n cur_soa, new_soa = response(uri, method, res, **args)\n return cur_soa, new_soa\n\n else:\n if content is None:\n args = {\"token\": token, \"keyword\": keyword,\n \"content\": content.get('domain')}\n response(uri, method, res, **args)\n else:\n # get sub command\n args = {\"token\": token, \"keyword\": keyword,\n \"raw_flag\": raw_flag}\n data = response(uri, method, res, **args)\n return data\n\n else:\n args = {\"token\": token, \"keyword\": keyword}\n token = response(uri, method, res, **args)\n return token", "docstring": "TonicDNS API client\n\nArgs:\n uri: TonicDNS API URI\n method: TonicDNS API request method\n token: TonicDNS API authentication token\n data: Post data to TonicDNS API\n keyword: Processing keyword of response\n content: data exist flag\n raw_flag: True is return response data, False is pretty printing", "source": "juraj_google_style"} -{"code": "def _average_precision(self, rec, prec):\n\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap += p / 11.\n return ap", "docstring": "calculate average precision, override the default one,\n special 11-point metric\n\n Params:\n ----------\n rec : numpy.array\n cumulated recall\n prec : numpy.array\n cumulated precision\n\nReturns:\n ----------\n ap as float", "source": "juraj_google_style"} -{"code": "def delayedQuoteDF(symbol, token='', version=''):\n\n df = pd.io.json.json_normalize(delayedQuote(symbol, token, version))\n _toDatetime(df)\n _reindex(df, 'symbol')\n return df", "docstring": "This returns the 15 minute delayed market quote.\n\n https://iexcloud.io/docs/api/#delayed-quote\n 15min delayed\n 4:30am - 8pm ET M-F when market is open\n\nArgs:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\nReturns:\n DataFrame: result", "source": "juraj_google_style"} -{"code": "def major_complex(network, state):\n\n log.info('Calculating major complex...')\n\n result = complexes(network, state)\n if result:\n result = max(result)\n else:\n empty_subsystem = Subsystem(network, state, ())\n result = _null_sia(empty_subsystem)\n\n log.info(\"Finished calculating major complex.\")\n\n return result", "docstring": "Return the major complex of the network.\n\nArgs:\n network (Network): The |Network| of interest.\n state (tuple[int]): The state of the network (a binary tuple).\n\nReturns:\n SystemIrreducibilityAnalysis: The |SIA| for the |Subsystem| with\n maximal |big_phi|.", "source": "juraj_google_style"} -{"code": "def GetBlockHash(self, height):\n\n if self._current_block_height < height:\n return\n\n if len(self._header_index) <= height:\n return\n\n return self._header_index[height]", "docstring": "Get the block hash by its block height\n\nArgs:\n height(int): height of the block to retrieve hash from.\n\nReturns:\n bytes: a non-raw block hash (e.g. b'6dd83ed8a3fc02e322f91f30431bf3662a8c8e8ebe976c3565f0d21c70620991', but not b'\\x6d\\xd8...etc'", "source": "juraj_google_style"} -{"code": "def __set_mutation_type(self, hgvs_string):\n\n self.__set_lost_stop_status(hgvs_string)\n self.__set_lost_start_status(hgvs_string)\n self.__set_missense_status(hgvs_string) # missense mutations\n self.__set_indel_status() # indel mutations\n self.__set_frame_shift_status() # check for fs\n self.__set_premature_stop_codon_status(hgvs_string)", "docstring": "Interpret the mutation type (missense, etc.) and set appropriate flags.\n\nArgs:\n hgvs_string (str): hgvs syntax with \"p.\" removed", "source": "juraj_google_style"} -{"code": "def _ParseKey(self, knowledge_base, registry_key, value_name):\n\n try:\n registry_value = registry_key.GetValueByName(value_name)\n except IOError as exception:\n raise errors.PreProcessFail((\n 'Unable to retrieve Windows Registry key: {0:s} value: {1:s} '\n 'with error: {2!s}').format(\n registry_key.path, value_name, exception))\n\n if registry_value:\n value_object = registry_value.GetDataAsObject()\n if value_object:\n self._ParseValueData(knowledge_base, value_object)", "docstring": "Parses a Windows Registry key for a preprocessing attribute.\n\nArgs:\n knowledge_base (KnowledgeBase): to fill with preprocessing information.\n registry_key (dfwinreg.WinRegistryKey): Windows Registry key.\n value_name (str): name of the Windows Registry value.\n\nRaises:\n PreProcessFail: if the preprocessing fails.", "source": "juraj_google_style"} -{"code": "def validate_tag(self, key, value):\n\n if key == 'owner':\n return validate_email(value, self.partial_owner_match)\n elif key == self.gdpr_tag:\n return value in self.gdpr_tag_values\n else:\n return True", "docstring": "Check whether a tag value is valid\n\nArgs:\n key: A tag key\n value: A tag value\n\nReturns:\n `(True or False)`\n A boolean indicating whether or not the value is valid", "source": "juraj_google_style"} -{"code": "def add_direct(self, target, var_id, var_type, data):\n\n\n data = struct.pack(\">> lst = [\"\\int x = y\", \"x + 6\"]\n >>> MathList(lst)\n ... see nicely formatted math.", "source": "juraj_google_style"} -{"code": "def _ParseIntegerValue(self, byte_stream, file_offset):\n\n data_type_map = self._GetDataTypeMap('int32be')\n\n try:\n return self._ReadStructureFromByteStream(\n byte_stream, file_offset, data_type_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError(\n 'Unable to parse integer value with error: {0!s}'.format(exception))", "docstring": "Parses an integer value.\n\nArgs:\n byte_stream (bytes): byte stream.\n file_offset (int): offset of the attribute data relative to the start of\n the file-like object.\n\nReturns:\n int: integer value.\n\nRaises:\n ParseError: when the integer value cannot be parsed.", "source": "juraj_google_style"} -{"code": "def ldirectory(inpath, outpath, args, scope):\n\n yacctab = 'yacctab' if args.debug else None\n if not outpath:\n sys.exit(\"Compile directory option needs -o ...\")\n else:\n if not os.path.isdir(outpath):\n if args.verbose:\n print(\"Creating '%s'\" % outpath, file=sys.stderr)\n if not args.dry_run:\n os.mkdir(outpath)\n less = glob.glob(os.path.join(inpath, '*.less'))\n f = formatter.Formatter(args)\n for lf in less:\n outf = os.path.splitext(os.path.basename(lf))\n minx = '.min' if args.min_ending else ''\n outf = \"%s/%s%s.css\" % (outpath, outf[0], minx)\n if not args.force and os.path.exists(outf):\n recompile = os.path.getmtime(outf) < os.path.getmtime(lf)\n else:\n recompile = True\n if recompile:\n print('%s -> %s' % (lf, outf))\n p = parser.LessParser(\n yacc_debug=(args.debug),\n lex_optimize=True,\n yacc_optimize=(not args.debug),\n scope=scope,\n tabfile=yacctab,\n verbose=args.verbose)\n p.parse(filename=lf, debuglevel=0)\n css = f.format(p)\n if not args.dry_run:\n with open(outf, 'w') as outfile:\n outfile.write(css)\n elif args.verbose:\n print('skipping %s, not modified' % lf, file=sys.stderr)\n sys.stdout.flush()\n if args.recurse:\n [\n ldirectory(\n os.path.join(inpath, name), os.path.join(outpath, name), args,\n scope) for name in os.listdir(inpath)\n if os.path.isdir(os.path.join(inpath, name))\n and not name.startswith('.') and not name == outpath\n ]", "docstring": "Compile all *.less files in directory\n\nArgs:\n inpath (str): Path to compile\n outpath (str): Output directory\n args (object): Argparse Object\n scope (Scope): Scope object or None", "source": "juraj_google_style"} -{"code": "def _GetPropertyValue(self, parser_mediator, properties, property_name):\n\n property_value = properties.get(property_name, None)\n if isinstance(property_value, py2to3.BYTES_TYPE):\n try:\n # TODO: get encoding form XML metadata.\n property_value = property_value.decode('utf-8')\n except UnicodeDecodeError:\n parser_mediator.ProduceExtractionWarning(\n 'unable to decode property: {0:s}'.format(property_name))\n\n return property_value", "docstring": "Retrieves a property value.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n properties (dict[str, object]): properties.\n property_name (str): name of the property.\n\nReturns:\n str: property value.", "source": "juraj_google_style"} -{"code": "def reformat_to_pretty_xml(doc_xml):\n\n assert isinstance(doc_xml, str)\n dom_obj = xml.dom.minidom.parseString(doc_xml)\n pretty_xml = dom_obj.toprettyxml(indent=' ')\n # Remove empty lines in the result caused by a bug in toprettyxml()\n return re.sub(r'^\\s*$\\n', r'', pretty_xml, flags=re.MULTILINE)", "docstring": "Pretty print XML doc.\n\nArgs:\n doc_xml : str\n Well formed XML doc\n\nReturns:\n str: Pretty printed XML doc", "source": "juraj_google_style"} -{"code": "def parse_config(data: dict) -> dict:\n\n return {\n 'email': data.get('email'),\n 'family': data['family_id'],\n 'samples': [{\n 'id': sample_id,\n 'type': analysis_type,\n } for sample_id, analysis_type in data['analysis_type'].items()],\n 'config_path': data['config_file_analysis'],\n 'is_dryrun': True if 'dry_run_all' in data else False,\n 'log_path': data['log_file'],\n 'out_dir': data['outdata_dir'],\n 'priority': data['slurm_quality_of_service'],\n 'sampleinfo_path': data['sample_info_file'],\n }", "docstring": "Parse MIP config file.\n\nArgs:\n data (dict): raw YAML input from MIP analysis config file\n\nReturns:\n dict: parsed data", "source": "juraj_google_style"} -{"code": "def char_spacing(self, dots):\n\n if dots in range(0,127):\n self.send(chr(27)+chr(32)+chr(dots))\n else:\n raise RuntimeError('Invalid dot amount in function charSpacing')", "docstring": "Specifes character spacing in dots.\n\nArgs:\n dots: the character spacing you desire, in dots\n\nReturns:\n None\n\nRaises:\n RuntimeError: Invalid dot amount.", "source": "juraj_google_style"} -{"code": "def noise_set_type(n: tcod.noise.Noise, typ: int) -> None:\n\n n.algorithm = typ", "docstring": "Set a Noise objects default noise algorithm.\n\nArgs:\n typ (int): Any NOISE_* constant.", "source": "juraj_google_style"} -{"code": "def LoadFromStorage(cls, path=None):\n\n if path is None:\n path = os.path.join(os.path.expanduser('~'), 'googleads.yaml')\n\n return cls(**googleads.common.LoadFromStorage(\n path, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES,\n cls._OPTIONAL_INIT_VALUES))", "docstring": "Creates an AdWordsClient with information stored in a yaml file.\n\nArgs:\n [optional]\n path: The path string to the file containing cached AdWords data.\n\nReturns:\n An AdWordsClient initialized with the values cached in the file.\n\nRaises:\n A GoogleAdsValueError if the given yaml file does not contain the\n information necessary to instantiate a client object - either a\n required key was missing or an OAuth2 key was missing.", "source": "juraj_google_style"} -{"code": "def compile_action_preconditions_checking(self,\n state: Sequence[tf.Tensor],\n action: Sequence[tf.Tensor]) -> tf.Tensor:\n\n with self.graph.as_default():\n with tf.name_scope('action_preconditions_checking'):\n preconds = self.compile_action_preconditions(state, action)\n all_preconds = tf.stack([p.tensor for p in preconds], axis=1)\n checking = tf.reduce_all(all_preconds, axis=1)\n return checking", "docstring": "Combines the action preconditions into an applicability checking op.\n\nArgs:\n state (Sequence[tf.Tensor]): The current state fluents.\n action (Sequence[tf.Tensor]): The action fluents.\n\nReturns:\n A boolean tensor for checking if `action` is application in `state`.", "source": "juraj_google_style"} -{"code": "def inference(self, observed_arr):\n\n if observed_arr.ndim < 4:\n # Add rank for channel.\n observed_arr = np.expand_dims(observed_arr, axis=1)\n self.__add_channel_flag = True\n else:\n self.__add_channel_flag = False\n\n return super().inference(observed_arr)", "docstring": "Draws samples from the `true` distribution.\n\nArgs:\n observed_arr: `np.ndarray` of observed data points.\n\nReturns:\n `np.ndarray` of inferenced.", "source": "juraj_google_style"} -{"code": "def generate_supremacy_circuit_google_v2_bristlecone(n_rows: int,\n cz_depth: int, seed: int\n ) -> circuits.Circuit:\n\n def get_qubits(n_rows):\n def count_neighbors(qubits, qubit):\n\n possibles = [\n devices.GridQubit(qubit.row + 1, qubit.col),\n devices.GridQubit(qubit.row - 1, qubit.col),\n devices.GridQubit(qubit.row, qubit.col + 1),\n devices.GridQubit(qubit.row, qubit.col - 1),\n ]\n return len(list(e for e in possibles if e in qubits))\n\n assert 1 <= n_rows <= 11\n max_row = n_rows - 1\n dev = google.Bristlecone\n # we need a consistent order of qubits\n qubits = list(dev.qubits)\n qubits.sort()\n qubits = [q for q in qubits\n if q.row <= max_row and q.row + q.col < n_rows + 6\n and q.row - q.col < n_rows - 5]\n qubits = [q for q in qubits if count_neighbors(qubits, q) > 1]\n return qubits\n\n qubits = get_qubits(n_rows)\n return generate_supremacy_circuit_google_v2(qubits, cz_depth, seed)", "docstring": "Generates Google Random Circuits v2 in Bristlecone.\n See also https://arxiv.org/abs/1807.10749\n\nArgs:\n n_rows: number of rows in a Bristlecone lattice.\n Note that we do not include single qubit corners.\n cz_depth: number of layers with CZ gates.\n seed: seed for the random instance.\n\nReturns:\n A circuit with given size and seed.", "source": "juraj_google_style"} -{"code": "def get_updated(node):\n\n if isinstance(node, gast.Assign):\n return set.union(*(_get_target(target)\n for target in node.targets))\n elif isinstance(node, (gast.For, gast.AugAssign)):\n return _get_target(node.target)\n elif isinstance(node, gast.arguments):\n targets = set(arg.id for arg in node.args + node.kwonlyargs)\n if node.vararg:\n targets.add(node.vararg.id)\n if node.kwarg:\n targets.add(node.kwarg.id)\n return targets\n else:\n return set()", "docstring": "Return the variable names created or mutated by this statement.\n\n This function considers assign statements, augmented assign statements, and\n the targets of for loops, as well as function arguments.\n\n For example, `x[0] = 2` will return `x`, `x, y = 3, 4` will return `x` and\n `y`, `for i in range(x)` will return `i`, etc.\n\nArgs:\n node: An AST node\n\nReturns:\n A set of variable names (strings) of all the variables created or mutated.", "source": "juraj_google_style"} -{"code": "def short(cls, path):\n\n if not path:\n return path\n\n path = str(path)\n if cls.paths:\n for p in cls.paths:\n if p:\n path = path.replace(p + \"/\", \"\")\n\n path = path.replace(cls.home, \"~\")\n return path", "docstring": "Example:\n short(\"examined /Users/joe/foo\") => \"examined ~/foo\"\n\nArgs:\n path: Path to represent in its short form\n\nReturns:\n (str): Short form, using '~' if applicable", "source": "juraj_google_style"} -{"code": "def __init__(self, gans_value_function=None):\n\n if gans_value_function is None:\n gans_value_function = MiniMax()\n\n if isinstance(gans_value_function, GANsValueFunction) is False:\n raise TypeError(\"The type of `gans_value_function` must be `GANsValueFunction`.\")\n self.__gans_value_function = gans_value_function\n self.__logger = getLogger(\"pygan\")\n\n super().__init__(gans_value_function)", "docstring": "Init.\n\nArgs:\n gans_value_function: is-a `GANsValueFunction`.", "source": "juraj_google_style"} -{"code": "def AddRow(self, values):\n\n if self._number_of_columns and len(values) != self._number_of_columns:\n raise ValueError('Number of values is out of bounds.')\n\n if not self._column_sizes and self._columns:\n self._column_sizes = [len(column) for column in self._columns]\n\n value_strings = []\n for value_index, value_string in enumerate(values):\n if not isinstance(value_string, py2to3.UNICODE_TYPE):\n value_string = '{0!s}'.format(value_string)\n value_strings.append(value_string)\n\n self._column_sizes[value_index] = max(\n self._column_sizes[value_index], len(value_string))\n\n self._rows.append(value_strings)\n\n if not self._number_of_columns:\n self._number_of_columns = len(value_strings)", "docstring": "Adds a row of values.\n\nArgs:\n values (list[object]): values.\n\nRaises:\n ValueError: if the number of values is out of bounds.", "source": "juraj_google_style"} -{"code": "def fetch(self, plan_id, data={}, **kwargs):\n\n return super(Plan, self).fetch(plan_id, data, **kwargs)", "docstring": "Fetch Plan for given Id\n\nArgs:\n plan_id : Id for which Plan object has to be retrieved\n\nReturns:\n Plan dict for given subscription Id", "source": "juraj_google_style"} -{"code": "def read_gold_standard_file(data_dir, fileroot, encoding=None, cetr=False):\n\n fname = os.path.join(\n data_dir, GOLD_STANDARD_DIRNAME, fileroot + GOLD_STANDARD_EXT)\n encodings = (encoding,) if encoding else ('utf-8', 'utf-16', 'iso-8859-1')\n for encoding in encodings:\n try:\n with io.open(fname, mode='rt', encoding=encoding) as f:\n gold_standard = f.read()\n break\n except (UnicodeDecodeError, UnicodeError):\n gold_standard = None\n\n if not gold_standard:\n return [u'', u'']\n\n if not cetr:\n content_comments = RE_COMMENTS_DELIM.split(gold_standard, maxsplit=1)\n # if no comments delimiter found, append empty comments string\n if len(content_comments) == 1:\n content_comments = [content_comments[0], u'']\n else:\n tree = etree.fromstring(gold_standard, parser=etree.HTMLParser())\n content_comments = [u' '.join(text_from_subtree(tree)), u'']\n\n # fix text in case of mangled encodings\n content_comments = [ftfy.fix_encoding(content_comments[0]).strip(),\n ftfy.fix_encoding(content_comments[1]).strip()]\n\n return content_comments", "docstring": "Read the gold standard content file corresponding to identifier ``fileroot``\n in the gold standard directory below the root ``data_dir``.\n\nArgs:\n data_dir (str)\n fileroot (str)\n encoding (str)\n cetr (bool): if True, assume no comments and parse the gold standard\n to remove tags\n\nReturns:\n List[str, str]: contents string and comments string, respectively", "source": "juraj_google_style"} -{"code": "def GetArchiveTypeIndicators(cls, path_spec, resolver_context=None):\n\n if (cls._archive_remainder_list is None or\n cls._archive_store is None):\n specification_store, remainder_list = cls._GetSpecificationStore(\n definitions.FORMAT_CATEGORY_ARCHIVE)\n cls._archive_remainder_list = remainder_list\n cls._archive_store = specification_store\n\n if cls._archive_scanner is None:\n cls._archive_scanner = cls._GetSignatureScanner(cls._archive_store)\n\n return cls._GetTypeIndicators(\n cls._archive_scanner, cls._archive_store,\n cls._archive_remainder_list, path_spec,\n resolver_context=resolver_context)", "docstring": "Determines if a file contains a supported archive types.\n\nArgs:\n path_spec (PathSpec): path specification.\n resolver_context (Optional[Context]): resolver context, where None\n represents the built-in context which is not multi process safe.\n\nReturns:\n list[str]: supported format type indicators.", "source": "juraj_google_style"} -{"code": "def parse_meta(meta):\n\n resources = {}\n for name in meta:\n if name.startswith(\"$\"):\n continue\n resources[name] = resource = {}\n for action in meta[name]:\n if action.startswith(\"$\"):\n continue\n url, httpmethod = res_to_url(name, action)\n resource[action] = {\n \"url\": url,\n \"method\": httpmethod\n }\n url_prefix = meta.get(\"$url_prefix\", \"\").rstrip(\"/\")\n return url_prefix, meta[\"$auth\"][\"header\"].lower(), resources", "docstring": "Parse metadata of API\n\nArgs:\n meta: metadata of API\n\nReturns:\n tuple(url_prefix, auth_header, resources)", "source": "juraj_google_style"} -{"code": "def _HashRow(cls, row):\n\n values = []\n for value in row:\n try:\n value = '{0!s}'.format(value)\n except UnicodeDecodeError:\n # In Python 2, blobs are \"read-write buffer\" and will cause a\n # UnicodeDecodeError exception if we try format it as a string.\n # Since Python 3 does not support the buffer type we cannot check\n # the type of value.\n value = repr(value)\n\n values.append(value)\n\n return hash(' '.join(values))", "docstring": "Hashes the given row.\n\nArgs:\n row (sqlite3.Row): row.\n\nReturns:\n int: hash value of the given row.", "source": "juraj_google_style"} -{"code": "def parse(self, ident_str):\n # type: (bytes) -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('This File or Text identifier is already initialized')\n self.text = ident_str\n\n # FIXME: we do not support a file identifier here. In the future, we\n # might want to implement this.\n\n self._initialized = True", "docstring": "Parse a file or text identifier out of a string.\n\n Parameters:\n ident_str - The string to parse the file or text identifier from.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def wb010(self, value=None):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `wb010`'.format(value))\n\n self._wb010 = value", "docstring": "Corresponds to IDD Field `wb010`\n Wet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\n value (float): value for IDD Field `wb010`\n Unit: C\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def model(self, value):\n\n if value == self._defaults['ai.device.model'] and 'ai.device.model' in self._values:\n del self._values['ai.device.model']\n else:\n self._values['ai.device.model'] = value", "docstring": "The model property.\n\nArgs:\n value (string). the property value.", "source": "juraj_google_style"} -{"code": "def CaptureFrameLocals(self, frame):\n\n # Capture all local variables (including method arguments).\n variables = {n: self.CaptureNamedVariable(n, v, 1,\n self.default_capture_limits)\n for n, v in six.viewitems(frame.f_locals)}\n\n # Split between locals and arguments (keeping arguments in the right order).\n nargs = frame.f_code.co_argcount\n if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1\n if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1\n\n frame_arguments = []\n for argname in frame.f_code.co_varnames[:nargs]:\n if argname in variables: frame_arguments.append(variables.pop(argname))\n\n return (frame_arguments, list(six.viewvalues(variables)))", "docstring": "Captures local variables and arguments of the specified frame.\n\nArgs:\n frame: frame to capture locals and arguments.\n\nReturns:\n (arguments, locals) tuple.", "source": "juraj_google_style"} -{"code": "def __request_finish(self, queue_item, new_requests, request_failed=False):\n\n\n if self.__stopping:\n return\n\n del self.__threads[queue_item.get_hash()]\n\n if request_failed:\n new_queue_items = []\n self.queue.move(queue_item, QueueItem.STATUS_ERRORED)\n else:\n self.routing.increase_route_count(queue_item.request)\n new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests)\n self.queue.move(queue_item, QueueItem.STATUS_FINISHED)\n\n try:\n action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items)\n except Exception as e:\n action = None\n print(e)\n print(traceback.format_exc())\n\n queue_item.decompose()\n\n if action == CrawlerActions.DO_STOP_CRAWLING:\n self.__should_stop = True\n\n if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None:\n self.__should_spawn_new_requests = True", "docstring": "Called when the crawler finished the given queue item.\n\nArgs:\n queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished.\n new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request.\n request_failed (bool): True if the request failed (if needs to be moved to errored).", "source": "juraj_google_style"} -{"code": "def list_folder(cls, session, mailbox, folder):\n\n return cls(\n '/mailboxes/%d/folders/%s/conversations.json' % (\n mailbox.id, folder.id,\n ),\n session=session,\n )", "docstring": "Return conversations in a specific folder of a mailbox.\n\nArgs:\n session (requests.sessions.Session): Authenticated session.\n mailbox (helpscout.models.Mailbox): Mailbox that folder is in.\n folder (helpscout.models.Folder): Folder to list.\n\nReturns:\n RequestPaginator(output_type=helpscout.models.Conversation):\n Conversations iterator.", "source": "juraj_google_style"} -{"code": "def cpfs(self,\n state: Sequence[tf.Tensor],\n action: Sequence[tf.Tensor],\n noise: Optional[Noise] = None) -> Tuple[List[TensorFluent], List[TensorFluent]]:\n\n scope = self.transition_scope(state, action)\n batch_size = int(state[0].shape[0])\n interm_fluents, next_state_fluents = self.compile_cpfs(scope, batch_size, noise)\n interms = [fluent for _, fluent in interm_fluents]\n next_state = [fluent for _, fluent in next_state_fluents]\n return interms, next_state", "docstring": "Compiles the intermediate and next state fluent CPFs given\n the current `state` and `action`.\n\nArgs:\n state (Sequence[tf.Tensor]): A tuple of state tensors.\n action (Sequence[tf.Tensor]): A tuple of action tensors.\n\nReturns:\n Tuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent\n representing the intermediate and state CPFs.", "source": "juraj_google_style"} -{"code": "def GetNTFSFileEntryByPathSpec(self, path_spec):\n\n # Opening a file by MFT entry is faster than opening a file by location.\n # However we need the index of the corresponding $FILE_NAME MFT attribute.\n location = getattr(path_spec, 'location', None)\n mft_attribute = getattr(path_spec, 'mft_attribute', None)\n mft_entry = getattr(path_spec, 'mft_entry', None)\n\n if mft_attribute is not None and mft_entry is not None:\n fsntfs_file_entry = self._fsntfs_volume.get_file_entry(mft_entry)\n elif location is not None:\n fsntfs_file_entry = self._fsntfs_volume.get_file_entry_by_path(location)\n else:\n raise errors.PathSpecError(\n 'Path specification missing location and MFT entry.')\n\n return fsntfs_file_entry", "docstring": "Retrieves the NTFS file entry for a path specification.\n\nArgs:\n path_spec (PathSpec): a path specification.\n\nReturns:\n pyfsntfs.file_entry: NTFS file entry.\n\nRaises:\n PathSpecError: if the path specification is missing location and\n MFT entry.", "source": "juraj_google_style"} -{"code": "def member_del(self, repl_id, member_id):\n\n repl = self[repl_id]\n result = repl.member_del(member_id)\n self[repl_id] = repl\n return result", "docstring": "remove member from replica set (reconfig replica)\n\nArgs:\n repl_id - replica set identity\n member_id - member index", "source": "juraj_google_style"} -{"code": "def post_async(self, path, params=None):\n\n request = Post(self._get_next_id(), path, params)\n request.set_callback(self._q.put)\n future = self._dispatch_request(request)\n return future", "docstring": "Asynchronously calls a function on a child block\n\nArgs:\n path (list): The path to post to\n params (dict): parameters for the call\n\nReturns:\n Future: as single Future that will resolve to the result", "source": "juraj_google_style"} -{"code": "def AsDict(self, dt=True):\n\n data = {}\n\n if self.body:\n data['body'] = self.body\n if self.posted_at:\n data['posted_at'] = self.posted_at\n if self.user:\n data['user'] = self.user.AsDict()\n\n return data", "docstring": "A dict representation of this Comment instance.\n\n The return value uses the same key names as the JSON representation.\n\nArgs:\n dt (bool): If True, return dates as python datetime objects. If\n False, return dates as ISO strings.\n\nReturns:\n A dict representing this Comment instance", "source": "juraj_google_style"} -{"code": "def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':\n\n return self if self._is_ok else cast(\n 'Result[T, F]',\n self._type.Err(op(cast(E, self._val)))\n )", "docstring": "Applies a function to the contained :meth:`Result.Err` value.\n\nArgs:\n op: The function to apply to the :meth:`Result.Err` value.\n\nReturns:\n A :class:`Result` with its error value as the function result\n if `self` is a :meth:`Result.Err` value, otherwise returns\n `self`.\n\nExample:\n >>> Ok(1).map_err(lambda x: x * 2)\n Ok(1)\n >>> Err(1).map_err(lambda x: x * 2)\n Err(2)", "source": "juraj_google_style"} -{"code": "def _strip_unnecessary_contents_from_stack(result, processed):\n\n # pylint: disable=protected-access\n if isinstance(result, (PrettyTensor, Loss)):\n if result.is_sequence():\n for tensor in result.sequence:\n _strip_unnecessary_contents_from_stack(tensor, processed)\n return\n else:\n result = result.tensor\n if hasattr(result, 'op'):\n result = result.op\n if result in processed:\n return\n else:\n processed.add(result)\n trace = []\n found = False\n for f, line_no, method, _ in result._traceback:\n if (method in ('_replace_deferred', '_construct') and\n f.endswith('pretty_tensor_class.py')):\n found = True\n continue\n trace.append((f, line_no, method, {}))\n result._traceback = trace\n\n # Assume that if we didn't find any PT deferred lines, then this node is\n # not part of the deferred construction.\n if not found:\n return\n for inp in result.inputs:\n _strip_unnecessary_contents_from_stack(inp, processed)", "docstring": "Remove the distracting lines from the stored tracebacks.\n\n This also reduces memory overhead by removing the frame contents. This is very\n important when doing long unrolls.\n\nArgs:\n result: The result to process.\n processed: A set of already processed nodes, used to stop early.", "source": "juraj_google_style"} -{"code": "def deep_update(d, u):\n\n\n for k, v in u.items():\n if isinstance(v, Mapping):\n d[k] = deep_update(d.get(k, {}), v)\n elif isinstance(v, list):\n existing_elements = d.get(k, [])\n d[k] = existing_elements + [ele for ele in v if ele not in existing_elements]\n else:\n d[k] = v\n\n return d", "docstring": "Deeply updates a dictionary. List values are concatenated.\n\nArgs:\n d (dict): First dictionary which will be updated\n u (dict): Second dictionary use to extend the first one\n\nReturns:\n dict: The merge dictionary", "source": "juraj_google_style"} -{"code": "def indexSearch(self, indexes):\n\n\n if not self._dataFrame.empty:\n filter0 = self._dataFrame.index == -9999\n for index in indexes:\n filter1 = self._dataFrame.index == index\n filter0 = np.logical_or(filter0, filter1)\n\n return filter0\n else:\n return []", "docstring": "Filters the data by a list of indexes.\n\nArgs:\n indexes (list of int): List of index numbers to return.\n\nReturns:\n list: A list containing all indexes with filtered data. Matches\n will be `True`, the remaining items will be `False`. If the\n dataFrame is empty, an empty list will be returned.", "source": "juraj_google_style"} -{"code": "def BatchConvert(self, metadata_value_pairs, token=None):\n\n if data_store.RelationalDBEnabled():\n result_generator = self._BatchConvertRelational(metadata_value_pairs)\n else:\n result_generator = self._BatchConvertLegacy(\n metadata_value_pairs, token=token)\n\n for r in result_generator:\n yield r", "docstring": "Converts a batch of StatEntry value to ExportedFile values at once.\n\nArgs:\n metadata_value_pairs: a list or a generator of tuples (metadata, value),\n where metadata is ExportedMetadata to be used for conversion and value\n is a StatEntry to be converted.\n token: Security token:\n\nYields:\n Resulting ExportedFile values. Empty list is a valid result and means that\n conversion wasn't possible.", "source": "juraj_google_style"} -{"code": "def _ParseTokenType(self, file_object, file_offset):\n\n token_type_map = self._GetDataTypeMap('uint8')\n\n token_type, _ = self._ReadStructureFromFileObject(\n file_object, file_offset, token_type_map)\n\n return token_type", "docstring": "Parses a token type.\n\nArgs:\n file_object (dfvfs.FileIO): file-like object.\n file_offset (int): offset of the token relative to the start of\n the file-like object.\n\nReturns:\n int: token type", "source": "juraj_google_style"} -{"code": "def save_aggregate_report_to_elasticsearch(aggregate_report,\n index_suffix=None,\n monthly_indexes=False):\n\n logger.debug(\"Saving aggregate report to Elasticsearch\")\n aggregate_report = aggregate_report.copy()\n metadata = aggregate_report[\"report_metadata\"]\n org_name = metadata[\"org_name\"]\n report_id = metadata[\"report_id\"]\n domain = aggregate_report[\"policy_published\"][\"domain\"]\n begin_date = human_timestamp_to_datetime(metadata[\"begin_date\"])\n end_date = human_timestamp_to_datetime(metadata[\"end_date\"])\n begin_date_human = begin_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n end_date_human = end_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n if monthly_indexes:\n index_date = begin_date.strftime(\"%Y-%m\")\n else:\n index_date = begin_date.strftime(\"%Y-%m-%d\")\n aggregate_report[\"begin_date\"] = begin_date\n aggregate_report[\"end_date\"] = end_date\n date_range = [aggregate_report[\"begin_date\"],\n aggregate_report[\"end_date\"]]\n\n org_name_query = Q(dict(match=dict(org_name=org_name)))\n report_id_query = Q(dict(match=dict(report_id=report_id)))\n domain_query = Q(dict(match={\"published_policy.domain\": domain}))\n begin_date_query = Q(dict(match=dict(date_range=begin_date)))\n end_date_query = Q(dict(match=dict(date_range=end_date)))\n\n search = Search(index=\"dmarc_aggregate*\")\n query = org_name_query & report_id_query & domain_query\n query = query & begin_date_query & end_date_query\n search.query = query\n\n existing = search.execute()\n if len(existing) > 0:\n raise AlreadySaved(\"An aggregate report ID {0} from {1} about {2} \"\n \"with a date range of {3} UTC to {4} UTC already \"\n \"exists in \"\n \"Elasticsearch\".format(report_id,\n org_name,\n domain,\n begin_date_human,\n end_date_human))\n published_policy = _PublishedPolicy(\n domain=aggregate_report[\"policy_published\"][\"domain\"],\n adkim=aggregate_report[\"policy_published\"][\"adkim\"],\n aspf=aggregate_report[\"policy_published\"][\"aspf\"],\n p=aggregate_report[\"policy_published\"][\"p\"],\n sp=aggregate_report[\"policy_published\"][\"sp\"],\n pct=aggregate_report[\"policy_published\"][\"pct\"],\n fo=aggregate_report[\"policy_published\"][\"fo\"]\n )\n\n for record in aggregate_report[\"records\"]:\n agg_doc = _AggregateReportDoc(\n xml_schemea=aggregate_report[\"xml_schema\"],\n org_name=metadata[\"org_name\"],\n org_email=metadata[\"org_email\"],\n org_extra_contact_info=metadata[\"org_extra_contact_info\"],\n report_id=metadata[\"report_id\"],\n date_range=date_range,\n errors=metadata[\"errors\"],\n published_policy=published_policy,\n source_ip_address=record[\"source\"][\"ip_address\"],\n source_country=record[\"source\"][\"country\"],\n source_reverse_dns=record[\"source\"][\"reverse_dns\"],\n source_base_domain=record[\"source\"][\"base_domain\"],\n message_count=record[\"count\"],\n disposition=record[\"policy_evaluated\"][\"disposition\"],\n dkim_aligned=record[\"policy_evaluated\"][\"dkim\"] == \"pass\",\n spf_aligned=record[\"policy_evaluated\"][\"spf\"] == \"pass\",\n header_from=record[\"identifiers\"][\"header_from\"],\n envelope_from=record[\"identifiers\"][\"envelope_from\"],\n envelope_to=record[\"identifiers\"][\"envelope_to\"]\n )\n\n for override in record[\"policy_evaluated\"][\"policy_override_reasons\"]:\n agg_doc.add_policy_override(type_=override[\"type\"],\n comment=override[\"comment\"])\n\n for dkim_result in record[\"auth_results\"][\"dkim\"]:\n agg_doc.add_dkim_result(domain=dkim_result[\"domain\"],\n selector=dkim_result[\"selector\"],\n result=dkim_result[\"result\"])\n\n for spf_result in record[\"auth_results\"][\"spf\"]:\n agg_doc.add_spf_result(domain=spf_result[\"domain\"],\n scope=spf_result[\"scope\"],\n result=spf_result[\"result\"])\n\n index = \"dmarc_aggregate\"\n if index_suffix:\n index = \"{0}_{1}\".format(index, index_suffix)\n index = \"{0}-{1}\".format(index, index_date)\n create_indexes([index])\n agg_doc.meta.index = index\n\n try:\n agg_doc.save()\n except Exception as e:\n raise ElasticsearchError(\n \"Elasticsearch error: {0}\".format(e.__str__()))", "docstring": "Saves a parsed DMARC aggregate report to ElasticSearch\n\nArgs:\n aggregate_report (OrderedDict): A parsed forensic report\n index_suffix (str): The suffix of the name of the index to save to\n monthly_indexes (bool): Use monthly indexes instead of daily indexes\n\nRaises:\n AlreadySaved", "source": "juraj_google_style"} -{"code": "def post_vote(self, post_id, score):\n\n if score <= 3 and score >= 0:\n params = {'id': post_id, 'score': score}\n return self._get('post/vote', params, 'POST')\n else:\n raise PybooruAPIError(\"Value of 'score' only can be 0, 1, 2 or 3.\")", "docstring": "Action lets you vote for a post (Requires login).\n\n Parameters:\n post_id (int): The post id.\n score (int):\n * 0: No voted or Remove vote.\n * 1: Good.\n * 2: Great.\n * 3: Favorite, add post to favorites.\n\nRaises:\n PybooruAPIError: When score is > 3.", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.GetVersion = channel.unary_unary(\n '/versionpb.API/GetVersion',\n request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n response_deserializer=client_dot_version_dot_versionpb_dot_version__pb2.Version.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def write_data(worksheet, data):\n\n if not data:\n return\n\n if isinstance(data, list):\n rows = data\n else:\n rows = [data]\n\n if isinstance(rows[0], dict):\n keys = get_keys(rows)\n worksheet.append([utilities.convert_snake_to_title_case(key) for key in keys])\n for row in rows:\n values = [get_value_from_row(row, key) for key in keys]\n worksheet.append(values)\n elif isinstance(rows[0], list):\n for row in rows:\n values = [utilities.normalize_cell_value(value) for value in row]\n worksheet.append(values)\n else:\n for row in rows:\n worksheet.append([utilities.normalize_cell_value(row)])", "docstring": "Writes data into worksheet.\n\nArgs:\n worksheet: worksheet to write into\n data: data to be written", "source": "juraj_google_style"} -{"code": "def merge_variables(variables, **kwargs):\n\n var_dict = OrderedDict()\n for v in variables:\n if v.name not in var_dict:\n var_dict[v.name] = []\n var_dict[v.name].append(v)\n return [merge_variables(vars_, **kwargs)\n for vars_ in list(var_dict.values())]", "docstring": "Concatenates Variables along row axis.\n\nArgs:\n variables (list): List of Variables to merge. Variables can have\n different names (and all Variables that share a name will be\n concatenated together).\n\nReturns:\n A list of Variables.", "source": "juraj_google_style"} -{"code": "def tag(self, name, formatter=None):\n\n tag = Tag(name, formatter)\n for tag_data in self._tags:\n if tag_data.name == name:\n tag = tag_data\n break\n else:\n self._tags.append(tag)\n return tag", "docstring": "Return instance of Tag.\n\nArgs:\n name (str): The value for this tag.\n formatter (method, optional): A method that take a tag value and returns a\n formatted tag.\n\nReturns:\n obj: An instance of Tag.", "source": "juraj_google_style"} -{"code": "def CheckVersion(problems, latest_version=None):\n\n if not latest_version:\n timeout = 20\n socket.setdefaulttimeout(timeout)\n request = urllib2.Request(LATEST_RELEASE_VERSION_URL)\n\n try:\n response = urllib2.urlopen(request)\n content = response.read()\n m = re.search(r'version=(\\d+\\.\\d+\\.\\d+)', content)\n if m:\n latest_version = m.group(1)\n\n except urllib2.HTTPError as e:\n description = ('During the new-version check, we failed to reach '\n 'transitfeed server: Reason: %s [%s].' %\n (e.reason, e.code))\n problems.OtherProblem(\n description=description, type=errors.TYPE_NOTICE)\n return\n except urllib2.URLError as e:\n description = ('During the new-version check, we failed to reach '\n 'transitfeed server. Reason: %s.' % e.reason)\n problems.OtherProblem(\n description=description, type=errors.TYPE_NOTICE)\n return\n\n if not latest_version:\n description = ('During the new-version check, we had trouble parsing the '\n 'contents of %s.' % LATEST_RELEASE_VERSION_URL)\n problems.OtherProblem(\n description=description, type=errors.TYPE_NOTICE)\n return\n\n newest_version = _MaxVersion([latest_version, __version__])\n if __version__ != newest_version:\n problems.NewVersionAvailable(newest_version)", "docstring": "Check if there is a newer version of transitfeed available.\n\nArgs:\n problems: if a new version is available, a NewVersionAvailable problem will\n be added\n latest_version: if specified, override the latest version read from the\n project page", "source": "juraj_google_style"} -{"code": "def __init__(self, values=None, **kwargs):\n\n self._values = type(self).tuples_to_coll(\n type(self).coll_to_tuples(values)\n )\n super(Collection, self).__init__(**kwargs)", "docstring": "Default collection constructor.\n\nArgs:\n ``values=``\\ *iterable*\n Specify the initial contents of the collection. It will be\n converted to the correct type using :py:meth:`coll_to_tuples`\n and :py:meth:`tuples_to_coll`\n\n ``attribute=``\\ *VALUE*\n It is possible to add extra properties to ``Collection``\n objects; this is how you specify them on construction.", "source": "juraj_google_style"} -{"code": "def __init__(self, feed_merger):\n\n self.feed_merger = feed_merger\n self._num_merged = 0\n self._num_not_merged_a = 0\n self._num_not_merged_b = 0", "docstring": "Initialise.\n\nArgs:\n feed_merger: The FeedMerger.", "source": "juraj_google_style"} -{"code": "def _get_radius(site):\n\n if hasattr(site.specie, 'oxi_state'):\n el = site.specie.element\n oxi = site.specie.oxi_state\n\n if oxi == 0:\n return CrystalNN._get_default_radius(site)\n\n elif oxi in el.ionic_radii:\n return el.ionic_radii[oxi]\n\n # e.g., oxi = 2.667, average together 2+ and 3+ radii\n elif int(math.floor(oxi)) in el.ionic_radii and \\\n int(math.ceil(oxi)) in el.ionic_radii:\n oxi_low = el.ionic_radii[int(math.floor(oxi))]\n oxi_high = el.ionic_radii[int(math.ceil(oxi))]\n x = oxi - int(math.floor(oxi))\n return (1 - x) * oxi_low + x * oxi_high\n\n elif oxi > 0 and el.average_cationic_radius > 0:\n return el.average_cationic_radius\n\n elif oxi < 0 and el.average_anionic_radius > 0:\n return el.average_anionic_radius\n\n else:\n warnings.warn(\"CrystalNN: distance cutoffs set but no oxidation \"\n \"states specified on sites! For better results, set \"\n \"the site oxidation states in the structure.\")\n return 0", "docstring": "An internal method to get the expected radius for a site with\n oxidation state.\n\nArgs:\n site: (Site)\n\nReturns:\n Oxidation-state dependent radius: ionic, covalent, or atomic.\n Returns 0 if no oxidation state or appropriate radius is found.", "source": "juraj_google_style"} -{"code": "def _validate_path(path):\n\n if not path:\n raise ValueError('Path is empty')\n if not isinstance(path, basestring):\n raise TypeError('Path should be a string but is %s (%s).' %\n (path.__class__, path))", "docstring": "Basic validation of Google Storage paths.\n\nArgs:\n path: a Google Storage path. It should have form '/bucket/filename'\n or '/bucket'.\n\nRaises:\n ValueError: if path is invalid.\n TypeError: if path is not of type basestring.", "source": "juraj_google_style"} -{"code": "def get_course_runs_from_program(program):\n\n course_runs = set()\n for course in program.get(\"courses\", []):\n for run in course.get(\"course_runs\", []):\n if \"key\" in run and run[\"key\"]:\n course_runs.add(run[\"key\"])\n\n return course_runs", "docstring": "Return course runs from program data.\n\nArgs:\n program(dict): Program data from Course Catalog API\n\nReturns:\n set: course runs in given program", "source": "juraj_google_style"} -{"code": "def __init__(self, row_class=Row):\n\n self.row_class = row_class\n self.separator = ', '\n self.Reset()", "docstring": "Initialises a new table.\n\nArgs:\n row_class: A class to use as the row object. This should be a\n subclass of this module's Row() class.", "source": "juraj_google_style"} -{"code": "def get_labels_encoder(self, data_dir):\n\n label_filepath = os.path.join(data_dir, self.vocab_filename)\n return text_encoder.TokenTextEncoder(label_filepath)", "docstring": "Builds encoder for the given class labels.\n\nArgs:\n data_dir: data directory\n\nReturns:\n An encoder for class labels.", "source": "juraj_google_style"} -{"code": "def flatten(dictionary, separator='.', prefix=''):\n\n new_dict = {}\n for key, value in dictionary.items():\n new_key = prefix + separator + key if prefix else key\n if isinstance(value, collections.MutableMapping):\n new_dict.update(flatten(value, separator, new_key))\n\n elif isinstance(value, list):\n new_value = []\n for item in value:\n if isinstance(item, collections.MutableMapping):\n new_value.append(flatten(item, separator, new_key))\n else:\n new_value.append(item)\n new_dict[new_key] = new_value\n\n else:\n new_dict[new_key] = value\n\n return new_dict", "docstring": "Flatten the dictionary keys are separated by separator\n\nArgs:\n dictionary {dict} -- The dictionary to be flattened.\n\n Keyword Arguments:\n separator {str} -- The separator to use (default is '.'). It will\n crush items with key conflicts.\n prefix {str} -- Used for recursive calls.\n\nReturns:\n dict -- The flattened dictionary.", "source": "juraj_google_style"} -{"code": "def db_create_table(self, table_name, columns):\n\n formatted_columns = ''\n for col in set(columns):\n formatted_columns += '\"{}\" text, '.format(col.strip('\"').strip('\\''))\n formatted_columns = formatted_columns.strip(', ')\n\n create_table_sql = 'CREATE TABLE IF NOT EXISTS {} ({});'.format(\n table_name, formatted_columns\n )\n try:\n cr = self.db_conn.cursor()\n cr.execute(create_table_sql)\n except sqlite3.Error as e:\n self.handle_error(e)", "docstring": "Create a temporary DB table.\n\nArgs:\n table_name (str): The name of the table.\n columns (list): List of columns to add to the DB.", "source": "juraj_google_style"} -{"code": "def downsample(data, percent):\n\n n_genes = data.shape[0]\n n_cells = data.shape[1]\n new_data = data.copy()\n total_count = float(data.sum())\n to_remove = total_count*percent\n # sum of read counts per cell\n cell_sums = data.sum(0).astype(float)\n # probability of selecting genes per cell\n cell_gene_probs = data/cell_sums\n # probability of selecting cells\n cell_probs = np.array(cell_sums/total_count).flatten()\n cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)\n for i, num_selected in enumerate(cells_selected):\n cell_gene = np.array(cell_gene_probs[:,i]).flatten()\n genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)\n if sparse.issparse(data):\n genes_selected = sparse.csc_matrix(genes_selected).T\n new_data[:,i] -= genes_selected\n new_data[new_data < 0] = 0\n return new_data", "docstring": "downsample the data by removing a given percentage of the reads.\n\nArgs:\n data: genes x cells array or sparse matrix\n percent: float between 0 and 1", "source": "juraj_google_style"} -{"code": "def _GetDeps(self, dependencies):\n\n\n for dependency in dependencies:\n dep_desc = self.FindFileByName(dependency)\n yield dep_desc\n for parent_dep in dep_desc.dependencies:\n yield parent_dep", "docstring": "Recursively finds dependencies for file protos.\n\nArgs:\n dependencies: The names of the files being depended on.\n\nYields:\n Each direct and indirect dependency.", "source": "juraj_google_style"} -{"code": "def addSingleTraitTerm(self,K=None,is_noise=False,normalize=True,Ks=None):\n\n\n assert self.P == 1, 'Incompatible number of traits'\n\n assert K!=None or is_noise, 'Specify covariance structure'\n\n if is_noise:\n assert self.noisPos==None, 'noise term already exists'\n K = SP.eye(self.Nt)\n self.noisPos = self.n_terms\n else:\n assert K.shape[0]==self.Nt, 'Incompatible shape'\n assert K.shape[1]==self.Nt, 'Incompatible shape'\n\n if Ks!=None:\n assert Ks.shape[0]==self.N, 'Incompatible shape'\n\n if normalize:\n Norm = 1/K.diagonal().mean()\n K *= Norm\n if Ks!=None: Ks *= Norm\n\n self.vd.addTerm(limix.CSingleTraitTerm(K))\n if Ks!=None: self.setKstar(self.n_terms,Ks)\n self.n_terms+=1\n\n self.gp = None\n self.init = False\n self.fast = False\n self.optimum = None\n\n self.cache['Sigma'] = None\n self.cache['Hessian'] = None\n self.cache['Lparams'] = None\n self.cache['paramsST']= None", "docstring": "add random effects term for single trait models (no trait-trait covariance matrix)\n\nArgs:\n K: NxN sample covariance matrix\n is_noise:\tbool labeling the noise term (noise term has K=eye)\n normalize:\tif True, K and Ks are scales such that K.diagonal().mean()==1\n Ks:\t\t\tNxN test cross covariance for predictions", "source": "juraj_google_style"} -{"code": "def cancelOrder(self, order: Order) -> Trade:\n\n self.client.cancelOrder(order.orderId)\n now = datetime.datetime.now(datetime.timezone.utc)\n key = self.wrapper.orderKey(\n order.clientId, order.orderId, order.permId)\n trade = self.wrapper.trades.get(key)\n if trade:\n if not trade.isDone():\n status = trade.orderStatus.status\n if (status == OrderStatus.PendingSubmit and not order.transmit\n or status == OrderStatus.Inactive):\n newStatus = OrderStatus.Cancelled\n else:\n newStatus = OrderStatus.PendingCancel\n logEntry = TradeLogEntry(now, newStatus, '')\n trade.log.append(logEntry)\n trade.orderStatus.status = newStatus\n self._logger.info(f'cancelOrder: {trade}')\n trade.cancelEvent.emit(trade)\n trade.statusEvent.emit(trade)\n self.cancelOrderEvent.emit(trade)\n self.orderStatusEvent.emit(trade)\n if newStatus == OrderStatus.Cancelled:\n trade.cancelledEvent.emit(trade)\n else:\n self._logger.error(f'cancelOrder: Unknown orderId {order.orderId}')\n return trade", "docstring": "Cancel the order and return the Trade it belongs to.\n\nArgs:\n order: The order to be canceled.", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.Activate = channel.unary_unary(\n '/enterprise.API/Activate',\n request_serializer=client_dot_enterprise_dot_enterprise__pb2.ActivateRequest.SerializeToString,\n response_deserializer=client_dot_enterprise_dot_enterprise__pb2.ActivateResponse.FromString,\n )\n self.GetState = channel.unary_unary(\n '/enterprise.API/GetState',\n request_serializer=client_dot_enterprise_dot_enterprise__pb2.GetStateRequest.SerializeToString,\n response_deserializer=client_dot_enterprise_dot_enterprise__pb2.GetStateResponse.FromString,\n )\n self.Deactivate = channel.unary_unary(\n '/enterprise.API/Deactivate',\n request_serializer=client_dot_enterprise_dot_enterprise__pb2.DeactivateRequest.SerializeToString,\n response_deserializer=client_dot_enterprise_dot_enterprise__pb2.DeactivateResponse.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def __init__(self, value, masks=None, name='X.509 Certificate'):\n\n super(X509Certificate, self).__init__(\n enums.CertificateType.X_509, value, masks, name)\n\n # All remaining attributes are not considered part of the public API\n # and are subject to change.\n\n # The following attributes are placeholders for attributes that are\n # unsupported by kmip.core\n self._x509_certificate_identifier = None\n self._x509_certificate_subject = None\n self._x509_certificate_issuer = None\n\n self.validate()", "docstring": "Create an X509Certificate.\n\nArgs:\n value(bytes): The bytes representing the certificate.\n masks(list): A list of CryptographicUsageMask enumerations\n defining how the certificate will be used.\n name(string): The string name of the certificate.", "source": "juraj_google_style"} -{"code": "def GetFormattedEventObject(cls, event):\n\n time_string = timelib.Timestamp.CopyToIsoFormat(event.timestamp)\n\n lines_of_text = [\n '+-' * 40,\n '[Timestamp]:',\n ' {0:s}'.format(time_string)]\n\n pathspec = getattr(event, 'pathspec', None)\n if pathspec:\n lines_of_text.append('[Pathspec]:')\n attribute_string = pathspec.comparable.replace('\\n', '\\n ')\n attribute_string = ' {0:s}\\n'.format(attribute_string)\n lines_of_text.append(attribute_string)\n\n # TODO: add support for event tag after event clean up.\n\n lines_of_text.append('[Reserved attributes]:')\n out_additional = ['[Additional attributes]:']\n\n for attribute_name, attribute_value in sorted(event.GetAttributes()):\n if attribute_name not in definitions.RESERVED_VARIABLE_NAMES:\n attribute_string = ' {{{0!s}}} {1!s}'.format(\n attribute_name, attribute_value)\n out_additional.append(attribute_string)\n\n elif attribute_name not in ('pathspec', 'tag'):\n attribute_string = ' {{{0!s}}} {1!s}'.format(\n attribute_name, attribute_value)\n lines_of_text.append(attribute_string)\n\n lines_of_text.append('')\n out_additional.append('')\n\n lines_of_text.extend(out_additional)\n return '\\n'.join(lines_of_text)", "docstring": "Retrieves a string representation of the event.\n\nArgs:\n event (EventObject): event.\n\nReturns:\n str: string representation of the event.", "source": "juraj_google_style"} -{"code": "def main(args):\n\n\n print(\"\\nNote it's very possible that this doesn't work correctly so take what it gives with a bucketload of salt\\n\")\n\n #########################\n # #\n # #\n # prompt #\n # #\n # #\n #########################\n\n if not len(sys.argv) > 1:\n initialAnswers = askInitial()\n\n inputPath = pathlib.Path(initialAnswers['inputPath'])\n year = int(initialAnswers['year'])\n # create a list from every row\n badFormat = badFormater(inputPath) # create a list from every row\n howManyCandidates = len(badFormat) - 1\n\n length = int(len(badFormat['Cand'])/2)\n finalReturn = []\n\n if \"Get your rank in the year\" in initialAnswers['whatToDo']:\n candidateNumber = askCandidateNumber()\n weightedAverage = myGrades(year, candidateNumber, badFormat, length)\n rank = myRank(weightedAverage, badFormat, year, length)\n\n if \"Get your weighted average\" in initialAnswers['whatToDo']:\n finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(\n weightedAverage))\n\n finalReturn.append('Your rank is {}th of {} ({:.2f} percentile)'.format(\n rank, howManyCandidates, (rank * 100) / howManyCandidates))\n elif \"Get your weighted average\" in initialAnswers['whatToDo']:\n candidateNumber = askCandidateNumber()\n weightedAverage = myGrades(year, candidateNumber, badFormat, length)\n finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(\n weightedAverage))\n\n if \"Reformat results by module and output to csv\" in initialAnswers['whatToDo']:\n\n formatOutputPath = pathlib.Path(askFormat())\n\n goodFormat = goodFormater(badFormat, formatOutputPath, year, length)\n\n if \"Plot the results by module\" in initialAnswers['whatToDo']:\n howPlotAsk(goodFormat)\n\n elif \"Plot the results by module\" in initialAnswers['whatToDo']:\n goodFormat = goodFormater(badFormat, None, year, length)\n howPlotAsk(goodFormat)\n\n [print('\\n', x) for x in finalReturn]\n\n #########################\n # #\n # end #\n # prompt #\n # #\n # #\n #########################\n\n #########################\n # #\n # #\n # run with #\n # cli args #\n # #\n #########################\n\n if len(sys.argv) > 1:\n if not args.input:\n inputPath = pathlib.Path(askInput())\n else:\n inputPath = pathlib.Path(args.input)\n if not args.year:\n year = int(askYear())\n else:\n year = int(args.year)\n\n # create a list from every row\n badFormat = badFormater(inputPath) # create a list from every row\n howManyCandidates = len(badFormat) - 1\n\n length = int(len(badFormat['Cand'])/2)\n finalReturn = []\n\n if args.rank:\n if not args.candidate:\n candidateNumber = askCandidateNumber()\n else:\n candidateNumber = args.candidate\n\n weightedAverage = myGrades(year, candidateNumber, badFormat, length)\n rank = myRank(weightedAverage, badFormat, year, length)\n\n if args.my:\n finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(\n weightedAverage))\n\n finalReturn.append('Your rank is {}th of {} ({:.2f} percentile)'.format(\n rank, howManyCandidates, (rank * 100) / howManyCandidates))\n\n elif args.my:\n if not args.candidate:\n candidateNumber = askCandidateNumber()\n else:\n candidateNumber = args.candidate\n\n weightedAverage = myGrades(year, candidateNumber, badFormat, length)\n finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(\n weightedAverage))\n\n if args.format is not None:\n formatOutputPath = pathlib.Path(args.format)\n goodFormat = goodFormater(badFormat, formatOutputPath, year, length)\n\n if args.plot:\n howPlotArgs(goodFormat)\n elif args.plot:\n goodFormat = goodFormater(badFormat, None, year, length)\n howPlotArgs(goodFormat)\n\n [print('\\n', x) for x in finalReturn]\n\n #########################\n # #\n # end #\n # run with #\n # cli args #\n # #\n #########################\n\n print('')", "docstring": "main entry point of app\n\nArgs:\n args {namespace} -- arguments provided in cli", "source": "juraj_google_style"} -{"code": "def getTraceSummariesByIds(self, trace_ids, adjust):\n\n self.send_getTraceSummariesByIds(trace_ids, adjust)\n return self.recv_getTraceSummariesByIds()", "docstring": "Fetch trace summaries for the given trace ids.\n\n Second argument is a list of methods of adjusting the trace\n data before returning it. Can be empty.\n\n Note that if one of the trace ids does not have any data associated with it, it will not be\n represented in the output list.\n\n Parameters:\n - trace_ids\n - adjust", "source": "juraj_google_style"} -{"code": "def token_request(self, authorization_code):\n\n if not self._client.token_endpoint:\n return None\n\n request = {\n 'grant_type': 'authorization_code',\n 'code': authorization_code,\n 'redirect_uri': self._redirect_uri\n }\n\n logger.debug('making token request: %s', request)\n client_auth_method = self._client.registration_response.get('token_endpoint_auth_method', 'client_secret_basic')\n auth_header = _ClientAuthentication(self._client.client_id, self._client.client_secret)(client_auth_method,\n request)\n resp = self._provider_configuration.requests_session \\\n .post(self._client.token_endpoint,\n data=request,\n headers=auth_header) \\\n .json()\n logger.debug('received token response: %s', json.dumps(resp))\n\n if 'error' in resp:\n token_resp = TokenErrorResponse(**resp)\n else:\n token_resp = AccessTokenResponse(**resp)\n token_resp.verify(keyjar=self._client.keyjar)\n if 'id_token' in resp:\n token_resp['id_token_jwt'] = resp['id_token']\n\n return token_resp", "docstring": "Makes a token request. If the 'token_endpoint' is not configured in the provider metadata, no request will\n be made.\n\nArgs:\n authorization_code (str): authorization code issued to client after user authorization\n\nReturns:\n Union[AccessTokenResponse, TokenErrorResponse, None]: The parsed token response, or None if no token\n request was performed.", "source": "juraj_google_style"} -{"code": "def user_agent_detail(self, **kwargs):\n\n path = '%s/%s/user_agent_detail' % (self.manager.path, self.get_id())\n return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Get the user agent detail.\n\nArgs:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabGetError: If the server cannot perform the request", "source": "juraj_google_style"} -{"code": "def lookup_value(self, api_name, key):\n\n if api_name in self._cache:\n return self._cache[api_name].get(key, None)\n return None", "docstring": "Add the value of an API call to the cache.\n\nArgs:\n api_name: a string name of the API. Keys and values are segmented by api_name.\n key: a string key for the specific call.", "source": "juraj_google_style"} -{"code": "def led(host, seq, anim, f, d):\n\n at(host, 'LED', seq, [anim, float(f), d])", "docstring": "Control the drones LED.\n\n Parameters:\n seq -- sequence number\n anim -- Integer: animation to play\n f -- Float: frequency in HZ of the animation\n d -- Integer: total duration in seconds of the animation", "source": "juraj_google_style"} -{"code": "def _parse_metadata(self, message):\n\n\n metadata = Metadata(source=self.actor_urn).__dict__\n if 'author' in message['d']:\n metadata['source_user'] = message['d']['author']['username']\n else:\n metadata['source_user'] = None\n if 'channel_id' in message['d']:\n metadata['source_channel'] = message['d']['channel_id']\n else:\n metadata['source_channel'] = None\n metadata['user_id'] = metadata['source_user']\n metadata['display_name'] = metadata['source_user']\n\n metadata['source_connector'] = 'discord'\n\n return metadata", "docstring": "Sets metadata in Legobot message\n\nArgs:\n message (dict): Full message from Discord websocket connection\"\n\nReturns:\n Legobot.Metadata", "source": "juraj_google_style"} -{"code": "def run(self, text):\n\n\n stack = ['root']\n pos = 0\n\n patterns = self.tokens[stack[-1]]\n\n while True:\n for pat, action, new_state in patterns:\n m = pat.match(text, pos)\n if m:\n if action:\n #print('## MATCH: {} -> {}'.format(m.group(), action))\n yield (pos, m.end()-1), action, m.groups()\n\n pos = m.end()\n\n if new_state:\n if isinstance(new_state, int): # Pop states\n del stack[new_state:]\n else:\n stack.append(new_state)\n\n #print('## CHANGE STATE:', pos, new_state, stack)\n patterns = self.tokens[stack[-1]]\n\n break\n\n else:\n try:\n if text[pos] == '\\n':\n pos += 1\n continue\n pos += 1\n except IndexError:\n break", "docstring": "Run lexer rules against a source text\n\nArgs:\n text (str): Text to apply lexer to\n\nYields:\n A sequence of lexer matches.", "source": "juraj_google_style"} -{"code": "def _is_in_targets(self, site, targets):\n\n elems = self._get_elements(site)\n for elem in elems:\n if elem not in targets:\n return False\n return True", "docstring": "Test whether a site contains elements in the target list\n\nArgs:\n site (Site): Site to assess\n targets ([Element]) List of elements\n\nReturns:\n (boolean) Whether this site contains a certain list of elements", "source": "juraj_google_style"} -{"code": "def color_is_disabled(**envars):\n\n result = None\n if 'NO_COLOR' in env:\n result = True\n elif env.CLICOLOR == '0':\n result = True\n\n log.debug('%r (NO_COLOR=%s, CLICOLOR=%s)', result,\n env.NO_COLOR or '',\n env.CLICOLOR or ''\n )\n for name, value in envars.items():\n envar = getattr(env, name)\n if envar.value == value:\n result = True\n log.debug('%s == %r: %r', name, value, result)\n\n return result", "docstring": "Look for clues in environment, e.g.:\n\n - https://bixense.com/clicolors/\n - http://no-color.org/\n\nArgs:\n envars: Additional environment variables to check for\n equality, i.e. ``MYAPP_COLOR_DISABLED='1'``\n\nReturns:\n None, Bool: Disabled", "source": "juraj_google_style"} -{"code": "def add_layer(self, layer, input_node_id):\n\n if isinstance(input_node_id, Iterable):\n layer.input = list(map(lambda x: self.node_list[x], input_node_id))\n output_node_id = self._add_node(Node(layer.output_shape))\n for node_id in input_node_id:\n self._add_edge(layer, node_id, output_node_id)\n\n else:\n layer.input = self.node_list[input_node_id]\n output_node_id = self._add_node(Node(layer.output_shape))\n self._add_edge(layer, input_node_id, output_node_id)\n\n layer.output = self.node_list[output_node_id]\n return output_node_id", "docstring": "Add a layer to the Graph.\n\nArgs:\n layer: An instance of the subclasses of StubLayer in layers.py.\n input_node_id: An integer. The ID of the input node of the layer.\n\nReturns:\n output_node_id: An integer. The ID of the output node of the layer.", "source": "juraj_google_style"} -{"code": "def zip_file(self, app_path, app_name, tmp_path):\n\n # zip build directory\n zip_file = os.path.join(app_path, self.args.outdir, app_name)\n zip_file_zip = '{}.zip'.format(zip_file)\n zip_file_tcx = '{}.tcx'.format(zip_file)\n shutil.make_archive(zip_file, 'zip', tmp_path, app_name)\n shutil.move(zip_file_zip, zip_file_tcx)\n self._app_packages.append(zip_file_tcx)\n # update package data\n self.package_data['package'].append({'action': 'App Package:', 'output': zip_file_tcx})", "docstring": "Zip the App with tcex extension.\n\nArgs:\n app_path (str): The path of the current project.\n app_name (str): The name of the App.\n tmp_path (str): The temp output path for the zip.", "source": "juraj_google_style"} -{"code": "def index_in_block(self, channel_index: int) -> int:\n\n if channel_index < 0 or channel_index >= self.cdim:\n raise ValueError()\n\n struct = self.block_structure\n\n if len(struct) == 1:\n return channel_index, 0\n i = 1\n while sum(struct[:i]) <= channel_index and i < self.cdim:\n i += 1\n block_index = i - 1\n index_in_block = channel_index - sum(struct[:block_index])\n\n return index_in_block, block_index", "docstring": "Return the index a channel has within the subblock it belongs to\n\n I.e., only for reducible circuits, this gives a result different from\n the argument itself.\n\nArgs:\n channel_index (int): The index of the external channel\n\nRaises:\n ValueError: for an invalid `channel_index`", "source": "juraj_google_style"} -{"code": "def get_el(el):\n\n tag_name = el.elt.tagName.lower()\n if tag_name in {\"input\", \"textarea\", \"select\"}:\n return el.value\n else:\n raise ValueError(\n \"Getter for %s (%s) not implemented!\" % (tag_name, el.id)\n )", "docstring": "Get value of given `el` tag element.\n\n Automatically choose proper method to set the `value` based on the type\n of the `el`.\n\nArgs:\n el (obj): Element reference to the input you want to convert to\n typeahead.\n\nReturns:\n str: Value of the object.", "source": "juraj_google_style"} -{"code": "def register(self, name, option):\n\n if name in self._options:\n\n raise ValueError(\"Option {0} already exists.\".format(name))\n\n if not isinstance(option, opt.Option):\n\n raise TypeError(\"Options must be of type Option.\")\n\n self._options[name] = option", "docstring": "Register a new option with the namespace.\n\nArgs:\n name (str): The name to register the option under.\n option (option.Option): The option object to register.\n\nRaises:\n TypeError: If the option is not an option.Option object.\n ValueError: If the name is already registered.", "source": "juraj_google_style"} -{"code": "def get_user_display_name(self, userid):\n\n\n user_info = self.slack_client.api_call('users.info', user=userid)\n if user_info.get('ok'):\n user = user_info.get('user')\n if user.get('profile'):\n return user.get('profile').get('display_name')\n else:\n return user.get('name')\n else:\n return userid", "docstring": "Given a Slack userid, grabs user display_name from api.\n\nArgs:\n userid (string): the user id of the user being queried\n\nReturns:\n dict: a dictionary of the api response", "source": "juraj_google_style"} -{"code": "def DeserializeUnsignedWithoutType(self, reader):\n\n self.Version = reader.ReadByte()\n self.DeserializeExclusiveData(reader)\n self.Attributes = reader.ReadSerializableArray('neo.Core.TX.TransactionAttribute.TransactionAttribute',\n max=self.MAX_TX_ATTRIBUTES)\n self.inputs = reader.ReadSerializableArray('neo.Core.CoinReference.CoinReference')\n self.outputs = reader.ReadSerializableArray('neo.Core.TX.Transaction.TransactionOutput')", "docstring": "Deserialize object without reading transaction type data.\n\nArgs:\n reader (neo.IO.BinaryReader):", "source": "juraj_google_style"} -{"code": "def get_changeset(changeset):\n\n url = 'https://www.openstreetmap.org/api/0.6/changeset/{}/download'.format(\n changeset\n )\n return ET.fromstring(requests.get(url).content)", "docstring": "Get the changeset using the OSM API and return the content as a XML\n ElementTree.\n\nArgs:\n changeset: the id of the changeset.", "source": "juraj_google_style"} -{"code": "def store_object(file_name, save_key, file_location, object_to_store=None):\n\n file = __os.path.join(file_location, file_name)\n try:\n shelve_store = __shelve.open(file)\n except Exception as e:\n LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))\n print('Bad storage dB, rebuilding!!')\n __os.remove(file)\n shelve_store = __shelve.open(file)\n shelve_store[save_key] = object_to_store\n shelve_store.close()", "docstring": "Function to store objects in a shelve\n\nArgs:\n file_name: Shelve storage file name\n save_key: The name of the key to store the item to\n file_location: The location of the file, derive from the os module\n object_to_store: The object you want to store\n\n Returns:", "source": "juraj_google_style"} -{"code": "def load_rml(self, rml_name):\n\n conn = CFG.rml_tstore\n cache_path = os.path.join(CFG.CACHE_DATA_PATH, 'rml_files', rml_name)\n if not os.path.exists(cache_path):\n results = get_graph(NSM.uri(getattr(NSM.kdr, rml_name), False),\n conn)\n with open(cache_path, \"w\") as file_obj:\n file_obj.write(json.dumps(results, indent=4))\n else:\n results = json.loads(open(cache_path).read())\n self.rml[rml_name] = RdfDataset(results)\n return self.rml[rml_name]", "docstring": "loads an rml mapping into memory\n\nArgs:\n rml_name(str): the name of the rml file", "source": "juraj_google_style"} -{"code": "def popular(self, **kwargs):\n\n path = self._get_path('popular')\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Get the list of popular movies on The Movie Database. This list\n refreshes every day.\n\nArgs:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n\nReturns:\n A dict representation of the JSON returned from the API.", "source": "juraj_google_style"} -{"code": "def label(self, main_type, sub_type, unique_id, label, action='ADD', owner=None, params=None):\n\n params = params or {}\n\n if owner:\n params['owner'] = owner\n\n action = action.upper()\n\n if not sub_type:\n url = '/v2/{}/{}/securityLabels/{}'.format(main_type, unique_id, quote(label))\n else:\n url = '/v2/{}/{}/{}/securityLabels/{}'.format(\n main_type, sub_type, unique_id, quote(label)\n )\n\n if action == 'ADD':\n return self.tcex.session.post(url, params=params)\n\n if action == 'DELETE':\n return self.tcex.session.delete(url, params=params)\n\n if action == 'GET':\n return self.tcex.session.get(url, params=params)\n\n return None", "docstring": "Args:\n owner:\n main_type:\n sub_type:\n unique_id:\n label:\n action:\n params:\n\n Return:", "source": "juraj_google_style"} -{"code": "def get_fba_flux(self, objective):\n\n flux_result = self.solve_fba(objective)\n fba_fluxes = {}\n\n # Place all the flux values in a dictionary\n for key in self._model.reactions:\n fba_fluxes[key] = flux_result.get_value(self._v_wt[key])\n return fba_fluxes", "docstring": "Return a dictionary of all the fluxes solved by FBA.\n\n Dictionary of fluxes is used in :meth:`.lin_moma` and :meth:`.moma`\n to minimize changes in the flux distributions following model\n perturbation.\n\nArgs:\n objective: The objective reaction that is maximized.\n\nReturns:\n Dictionary of fluxes for each reaction in the model.", "source": "juraj_google_style"} -{"code": "def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):\n # type: (Any, slice, Callable, str) -> str\n\n if keyfunc is not cls._default:\n return \"{}.{}[{}, {}, {}]\".format(\n cls.__module__,\n cls.__name__,\n cls._get_fullname(type_),\n cls._get_bound_repr(bound),\n keyfunc_name,\n )\n return \"{}.{}[{}, {}]\".format(\n cls.__module__,\n cls.__name__,\n cls._get_fullname(type_),\n cls._get_bound_repr(bound),\n )", "docstring": "Return a class representation using the slice parameters.\n\nArgs:\n type_: The type the class was sliced with.\n bound: The boundaries specified for the values of type_.\n keyfunc: The comparison function used to check the value\n boundaries.\n keyfunc_name: The name of keyfunc.\n\nReturns:\n A string representing the class.", "source": "juraj_google_style"} -{"code": "def count(cls, cur, table:str, where_keys: list=None):\n\n\n if where_keys:\n where_clause, values = cls._get_where_clause_with_values(where_keys)\n query = cls._count_query_where.format(table, where_clause)\n q, t = query, values\n else:\n query = cls._count_query.format(table)\n q, t = query, ()\n yield from cur.execute(q, t)\n result = yield from cur.fetchone()\n return int(result[0])", "docstring": "gives the number of records in the table\n\nArgs:\n table: a string indicating the name of the table\n\nReturns:\n an integer indicating the number of records in the table", "source": "juraj_google_style"} -{"code": "def _parameter_net(self, theta, kernel_shape=9):\n\n with argscope(FullyConnected, nl=tf.nn.leaky_relu):\n net = FullyConnected('fc1', theta, 64)\n net = FullyConnected('fc2', net, 128)\n\n pred_filter = FullyConnected('fc3', net, kernel_shape ** 2, nl=tf.identity)\n pred_filter = tf.reshape(pred_filter, [BATCH, kernel_shape, kernel_shape, 1], name=\"pred_filter\")\n logger.info('Parameter net output: {}'.format(pred_filter.get_shape().as_list()))\n return pred_filter", "docstring": "Estimate filters for convolution layers\n\nArgs:\n theta: angle of filter\n kernel_shape: size of each filter\n\nReturns:\n learned filter as [B, k, k, 1]", "source": "juraj_google_style"} -{"code": "def _from_to_as_term(self, frm, to):\n\n\n # The wackiness with the conversion to int and str, and adding ' ', is because there\n # can't be a space between the 'TO' and the brackets in the time range\n # when one end is open\n from_year = ''\n to_year = ''\n\n def year_or_empty(prefix, year, suffix):\n try:\n return prefix + str(int(year)) + suffix\n except (ValueError, TypeError):\n return ''\n\n if frm:\n from_year = year_or_empty('', frm, ' ')\n\n if to:\n to_year = year_or_empty(' ', to, '')\n\n if bool(from_year) or bool(to_year):\n return '[{}TO{}]'.format(from_year, to_year)\n else:\n return None", "docstring": "Turns from and to into the query format.\n\nArgs:\n frm (str): from year\n to (str): to year\n\nReturns:\n FTS query str with years range.", "source": "juraj_google_style"} -{"code": "def process_file(self, path):\n\n if self._config.verbose:\n self._logger.info('Processing file \"%s\"', path)\n\n output_path = '%s%s' % (path, BATCH_EXTENSION)\n\n with open(output_path, 'w') as file:\n for line in lines_generator(path):\n file.write('%s\\n' % self._cucco.normalize(\n line.encode().decode('utf-8')))\n\n self._logger.debug('Created file \"%s\"', output_path)", "docstring": "Process a file applying normalizations.\n\n Get a file as input and generate a new file with the\n result of applying normalizations to every single line\n in the original file. The extension for the new file\n will be the one defined in BATCH_EXTENSION.\n\nArgs:\n path: Path to the file.", "source": "juraj_google_style"} -{"code": "def cafferesnet101(num_classes=1000, pretrained='imagenet'):\n\n model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['cafferesnet101'][pretrained]\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model", "docstring": "Constructs a ResNet-101 model.\n\nArgs:\n pretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "juraj_google_style"} -{"code": "def SummaryMetadata(self, run, tag):\n\n accumulator = self.GetAccumulator(run)\n return accumulator.SummaryMetadata(tag)", "docstring": "Return the summary metadata for the given tag on the given run.\n\nArgs:\n run: A string name of the run for which summary metadata is to be\n retrieved.\n tag: A string name of the tag whose summary metadata is to be\n retrieved.\n\nRaises:\n KeyError: If the run is not found, or the tag is not available for\n the given run.\n\nReturns:\n A `SummaryMetadata` protobuf.", "source": "juraj_google_style"} -{"code": "def __init__(self, resolver_context):\n\n super(TSKFileSystem, self).__init__(resolver_context)\n self._file_object = None\n self._tsk_file_system = None\n self._tsk_fs_type = None", "docstring": "Initializes a file system.\n\nArgs:\n resolver_context (Context): resolver context.", "source": "juraj_google_style"} -{"code": "def unbroadcast_numpy_to(array, shape):\n\n axis = create_unbroadcast_axis(shape, numpy.shape(array))\n return numpy.reshape(numpy.sum(array, axis=axis), shape)", "docstring": "Reverse the broadcasting operation.\n\nArgs:\n array: An array.\n shape: A shape that could have been broadcasted to the shape of array.\n\nReturns:\n Array with dimensions summed to match `shape`.", "source": "juraj_google_style"} -{"code": "def write_event(self, event):\n\n self._lock.acquire()\n try:\n self._events_writer.WriteEvent(event)\n self._event_count += 1\n if self._always_flush:\n # We flush on every event within the integration test.\n self._events_writer.Flush()\n\n if self._event_count == self._check_this_often:\n # Every so often, we check whether the size of the file is too big.\n self._event_count = 0\n\n # Flush to get an accurate size check.\n self._events_writer.Flush()\n\n file_path = os.path.join(self._events_directory,\n self.get_current_file_name())\n if not tf.io.gfile.exists(file_path):\n # The events file does not exist. Perhaps the user had manually\n # deleted it after training began. Create a new one.\n self._events_writer.Close()\n self._events_writer = self._create_events_writer(\n self._events_directory)\n elif tf.io.gfile.stat(file_path).length > self._single_file_size_cap_bytes:\n # The current events file has gotten too big. Close the previous\n # events writer. Make a new one.\n self._events_writer.Close()\n self._events_writer = self._create_events_writer(\n self._events_directory)\n except IOError as err:\n logger.error(\n \"Writing to %s failed: %s\", self.get_current_file_name(), err)\n self._lock.release()", "docstring": "Writes an event proto to disk.\n\n This method is threadsafe with respect to invocations of itself.\n\nArgs:\n event: The event proto.\n\nRaises:\n IOError: If writing the event proto to disk fails.", "source": "juraj_google_style"} -{"code": "def _receive_signal(self, progress_subscript):\n\n\n self.progress = self._estimate_progress()\n self.updateProgress.emit(int(self.progress))", "docstring": "this function takes care of signals emitted by the subscripts\n\nArgs:\n progress_subscript: progress of subscript", "source": "juraj_google_style"} -{"code": "def comment_create(self, post_id, comment_body, anonymous=None):\n\n params = {\n 'comment[post_id]': post_id,\n 'comment[body]': comment_body,\n 'comment[anonymous]': anonymous\n }\n return self._get('comment/create', params, method='POST')", "docstring": "Action to lets you create a comment (Requires login).\n\n Parameters:\n post_id (int): The post id number to which you are responding.\n comment_body (str): The body of the comment.\n anonymous (int): Set to 1 if you want to post this comment\n anonymously.", "source": "juraj_google_style"} -{"code": "def __init__(self, options):\n\n\n self.__options = options\n self.count_total = 0\n self.items_queued = OrderedDict()\n self.items_in_progress = OrderedDict()\n self.items_finished = OrderedDict()\n self.items_cancelled = OrderedDict()\n self.items_errored = OrderedDict()", "docstring": "Constructs a Queue instance.\n\nArgs:\n options (:class:`nyawc.Options`): The options to use.", "source": "juraj_google_style"} -{"code": "def post_task(task_data, task_uri='/tasks'):\n\n url = '{}/{}'.format(API_URL, task_uri.lstrip('/'))\n\n if isinstance(task_data, str):\n task_json = task_data\n else:\n task_json = json.dumps(task_data)\n\n resp = requests.post(url, data=task_json, headers=HEADERS, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n resp_json = resp.json()\n\n LOG.debug(resp_json)\n\n assert resp.ok, 'Spinnaker communication error: {0}'.format(resp.text)\n\n return resp_json['ref']", "docstring": "Create Spinnaker Task.\n\nArgs:\n task_data (str): Task JSON definition.\n\nReturns:\n str: Spinnaker Task ID.\n\nRaises:\n AssertionError: Error response from Spinnaker.", "source": "juraj_google_style"} -{"code": "def connection_required(func):\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n\n if not self.target_connected():\n raise errors.JLinkException('Target is not connected.')\n return func(self, *args, **kwargs)\n return wrapper", "docstring": "Decorator to specify that a target connection is required in order\n for the given method to be used.\n\nArgs:\n func (function): function being decorated\n\nReturns:\n The wrapper function.", "source": "juraj_google_style"} -{"code": "def nlargest(self, n=None):\n\n\t\tif n is None:\n\t\t\treturn sorted(self.counts(), key=itemgetter(1), reverse=True)\n\t\telse:\n\t\t\treturn heapq.nlargest(n, self.counts(), key=itemgetter(1))", "docstring": "List the n most common elements and their counts.\n\n List is from the most\n common to the least. If n is None, the list all element counts.\n\n Run time should be O(m log m) where m is len(self)\n\nArgs:\n n (int): The number of elements to return", "source": "juraj_google_style"} -{"code": "def parse_timedelta(__delta: str) -> datetime.timedelta:\n\n match = re.fullmatch(r, __delta, re.IGNORECASE | re.VERBOSE)\n if not match:\n raise ValueError('Invalid ‘frequency’ value')\n value, units = match.groups()\n units_i = 'hdwmy'.index(units.lower())\n # hours per hour/day/week/month/year\n multiplier = (1, 24, 168, 672, 8760)\n return datetime.timedelta(hours=float(value) * multiplier[units_i])", "docstring": "Parse human readable frequency.\n\nArgs:\n __delta: Frequency to parse", "source": "juraj_google_style"} -{"code": "def _fdopen(self, *args, **kwargs):\n\n if not is_int_type(args[0]):\n raise TypeError('an integer is required')\n return FakeFileOpen(self.filesystem)(*args, **kwargs)", "docstring": "Redirector to open() builtin function.\n\nArgs:\n *args: Pass through args.\n **kwargs: Pass through kwargs.\n\nReturns:\n File object corresponding to file_des.\n\nRaises:\n TypeError: if file descriptor is not an integer.", "source": "juraj_google_style"} -{"code": "def set_config(self, key, value):\n\n\n keyname = \"config:\" + key\n\n self.kvstore.set(keyname, value)", "docstring": "Set a persistent config key to a value, stored in the registry\n\nArgs:\n key (string): The key name\n value (string): The key value", "source": "juraj_google_style"} -{"code": "def validate(self, read_tuple_name):\n\n if reg_lrn.match(read_tuple_name) is None:\n self.report_error(\n read_tuple_name=read_tuple_name,\n error_name=\"wrong_read_tuple_name_structure\",\n message=\"'{}' is not matched\".format(reg_lrn),\n )\n else:\n parts = read_tuple_name.split(\"__\")\n\n if reg_prefix_part.match(parts[0]) is None:\n self.report_error(\n read_tuple_name=read_tuple_name,\n error_name=\"wrong_prefix_part\",\n message=\"'{}' is not matched\".format(reg_prefix_part),\n )\n\n if reg_id_part.match(parts[1]) is None:\n self.report_error(\n read_tuple_name=read_tuple_name,\n error_name=\"wrong_id_part\",\n message=\"'{}' is not matched\".format(reg_id_part),\n )\n\n if reg_segmental_part.match(parts[2]) is None:\n self.report_error(\n read_tuple_name=read_tuple_name,\n error_name=\"wrong_segmental_part\",\n message=\"'{}' is not matched\".format(reg_segmental_part),\n )\n\n if reg_suffix_part.match(parts[3]) is None:\n self.report_error(\n read_tuple_name=read_tuple_name,\n error_name=\"wrong_suffix_part\",\n message=\"'{}' is not matched\".format(reg_suffix_part),\n )\n\n if not self.rnf_profile.check(read_tuple_name):\n self.report_error(\n read_tuple_name=read_tuple_name,\n error_name=\"wrong_profile\",\n message=\"Read has a wrong profile (wrong widths). It should be: {} but it is: {}.\".format(\n self.rnf_profile,\n rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name),\n ),\n warning=True,\n )", "docstring": "Check RNF validity of a read tuple.\n\nArgs:\n read_tuple_name (str): Read tuple name to be checked.s", "source": "juraj_google_style"} -{"code": "def mark_complex(self, name, serializer, deserializer):\n\n\n self._complex_properties[name] = (serializer, deserializer)", "docstring": "Mark a property as complex with serializer and deserializer functions.\n\nArgs:\n name (str): The name of the complex property.\n serializer (callable): The function to call to serialize the property's\n value to something that can be saved in a json.\n deserializer (callable): The function to call to unserialize the property\n from a dict loaded by a json back to the original value.", "source": "juraj_google_style"} -{"code": "def parse_args(self, arglist=None):\n\n args = self._parser.parse_args(args=arglist)\n sub_cmd = args.loam_sub_name\n if sub_cmd is None:\n for opt, sct in self._opt_bare.items():\n self._conf[sct][opt] = getattr(args, opt, None)\n else:\n for opt, sct in self._opt_cmds[sub_cmd].items():\n self._conf[sct][opt] = getattr(args, opt, None)\n return args", "docstring": "Parse arguments and update options accordingly.\n\nArgs:\n arglist (list of str): list of arguments to parse. If set to None,\n ``sys.argv[1:]`` is used.\n\nReturns:\n :class:`Namespace`: the argument namespace returned by the\n :class:`argparse.ArgumentParser`.", "source": "juraj_google_style"} -{"code": "def clean(self, force: bool=False):\n\n with (yield from self._lock):\n for connection in tuple(self.ready):\n if force or connection.closed():\n connection.close()\n self.ready.remove(connection)", "docstring": "Clean closed connections.\n\nArgs:\n force: Clean connected and idle connections too.\n\n Coroutine.", "source": "juraj_google_style"} -{"code": "def get_audience(self, audience_id):\n\n\n audience = self.audience_id_map.get(audience_id)\n if audience:\n return audience\n\n self.logger.error('Audience ID \"%s\" is not in datafile.' % audience_id)\n self.error_handler.handle_error(exceptions.InvalidAudienceException((enums.Errors.INVALID_AUDIENCE_ERROR)))", "docstring": "Get audience object for the provided audience ID.\n\nArgs:\n audience_id: ID of the audience.\n\nReturns:\n Dict representing the audience.", "source": "juraj_google_style"} -{"code": "def _xys(date):\n\n\n X, Y, s_xy2 = _xysxy2(date)\n\n # convert milli-arcsecond to arcsecond\n dX, dY = date.eop.dx / 1000., date.eop.dy / 1000.\n\n # Convert arcsecond to degrees then to radians\n X = np.radians((X + dX) / 3600.)\n Y = np.radians((Y + dY) / 3600.)\n s = np.radians(s_xy2 / 3600.) - (X * Y / 2)\n\n return X, Y, s", "docstring": "Get The X, Y and s coordinates\n\nArgs:\n date (Date):\n\nReturns:\n 3-tuple of float: Values of X, Y and s, in radians", "source": "juraj_google_style"} -{"code": "def parse(self, valstr):\n # type: (bytes) -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('El Torito Section Header already initialized')\n\n (self.header_indicator, self.platform_id, self.num_section_entries,\n self.id_string) = struct.unpack_from(self.FMT, valstr, 0)\n\n self._initialized = True", "docstring": "Parse an El Torito section header from a string.\n\n Parameters:\n valstr - The string to parse.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def merge(self, other_rel):\n\n if other_rel.thresholds.size == self.thresholds.size and np.all(other_rel.thresholds == self.thresholds):\n self.frequencies += other_rel.frequencies\n else:\n print(\"Input table thresholds do not match.\")", "docstring": "Ingest another DistributedReliability and add its contents to the current object.\n\nArgs:\n other_rel: a Distributed reliability object.", "source": "juraj_google_style"} -{"code": "def to_dict(mapreduce_yaml):\n\n all_configs = []\n for config in mapreduce_yaml.mapreduce:\n out = {\n \"name\": config.name,\n \"mapper_input_reader\": config.mapper.input_reader,\n \"mapper_handler\": config.mapper.handler,\n }\n if config.mapper.params_validator:\n out[\"mapper_params_validator\"] = config.mapper.params_validator\n if config.mapper.params:\n param_defaults = {}\n for param in config.mapper.params:\n param_defaults[param.name] = param.default or param.value\n out[\"mapper_params\"] = param_defaults\n if config.params:\n param_defaults = {}\n for param in config.params:\n param_defaults[param.name] = param.default or param.value\n out[\"params\"] = param_defaults\n if config.mapper.output_writer:\n out[\"mapper_output_writer\"] = config.mapper.output_writer\n all_configs.append(out)\n\n return all_configs", "docstring": "Converts a MapReduceYaml file into a JSON-encodable dictionary.\n\n For use in user-visible UI and internal methods for interfacing with\n user code (like param validation). as a list\n\nArgs:\n mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.\n\nReturns:\n A list of configuration dictionaries.", "source": "juraj_google_style"} -{"code": "def decorate(fn):\n\n if not isfunction(fn):\n raise TypeError('paco: fn must be a callable object')\n\n @functools.wraps(fn)\n def decorator(*args, **kw):\n # If coroutine object is passed\n for arg in args:\n if iscoro_or_corofunc(arg):\n return fn(*args, **kw)\n\n # Explicit argument must be at least a coroutine\n if len(args) and args[0] is None:\n raise TypeError('paco: first argument cannot be empty')\n\n def wrapper(coro, *_args, **_kw):\n # coro must be a valid type\n if not iscoro_or_corofunc(coro):\n raise TypeError('paco: first argument must be a '\n 'coroutine or coroutine function')\n\n # Merge call arguments\n _args = ((coro,) + (args + _args))\n kw.update(_kw)\n\n # Trigger original decorated function\n return fn(*_args, **kw)\n return wrapper\n return decorator", "docstring": "Generic decorator for coroutines helper functions allowing\n multiple variadic initialization arguments.\n\n This function is intended to be used internally.\n\nArgs:\n fn (function): target function to decorate.\n\nRaises:\n TypeError: if function or coroutine function is not provided.\n\nReturns:\n function: decorated function.", "source": "juraj_google_style"} -{"code": "def browse(self, path=None):\n\n params = None\n if path:\n assert isinstance(path, string_types)\n params = {'current': path}\n return self.get('browse', params=params)", "docstring": "Returns a list of directories matching the path given.\n\nArgs:\n path (str): glob pattern.\n\nReturns:\n List[str]", "source": "juraj_google_style"} -{"code": "def random_new(algo: int = RNG_CMWC) -> tcod.random.Random:\n\n return tcod.random.Random(algo)", "docstring": "Return a new Random instance. Using ``algo``.\n\nArgs:\n algo (int): The random number algorithm to use.\n\nReturns:\n Random: A new Random instance using the given algorithm.", "source": "juraj_google_style"} -{"code": "def _update_install_json(self, install_json):\n\n updated = False\n # Update features\n install_json.setdefault('features', [])\n for feature in self.features:\n if feature not in install_json.get('features'):\n install_json['features'].append(feature)\n updated = True\n # update package data\n self.package_data['updates'].append(\n {'action': 'Updated Feature:', 'output': feature}\n )\n\n return install_json, updated", "docstring": "Write install.json file.\n\nArgs:\n install_json (dict): The contents of the install.json file.\n\nReturns:\n dict, bool: The contents of the install.json file and boolean value that is True if\n an update was made.", "source": "juraj_google_style"} -{"code": "def on_enter(__msg: Optional[Union[Callable, str]] = None) -> Callable:\n\n # pylint: disable=missing-docstring\n def decorator(__func):\n @wraps(__func)\n def wrapper(*args, **kwargs):\n if __msg:\n print(__msg)\n else:\n print('Entering {!r}({!r})'.format(__func.__name__, __func))\n return __func(*args, **kwargs)\n return wrapper\n if callable(__msg):\n return on_enter()(__msg)\n return decorator", "docstring": "Decorator to display a message when entering a function.\n\nArgs:\n __msg: Message to display\n\nReturns:\n Wrapped function", "source": "juraj_google_style"} -{"code": "def merge_default_values(resource_list, default_values):\n\n\n def merge_item(resource):\n return merge_resources(default_values, resource)\n\n return lmap(merge_item, resource_list)", "docstring": "Generate a new list where each item of original resource_list will be merged with the default_values.\n\nArgs:\n resource_list: list with items to be merged\n default_values: properties to be merged with each item list. If the item already contains some property\n the original value will be maintained.\n\nReturns:\n list: list containing each item merged with default_values", "source": "juraj_google_style"} -{"code": "def update(self, puts, deletes):\n\n with self._lmdb.begin(write=True, buffers=True) as txn:\n cursor = txn.cursor(self._main_db)\n # Process deletes first, to handle the case of new items replacing\n # old index locations\n for key in deletes:\n if not cursor.set_key(key.encode()):\n # value doesn't exist\n continue\n\n value = self._deserializer(bytes(cursor.value()))\n cursor.delete()\n\n for (index_db, index_key_fn) in self._indexes.values():\n index_keys = index_key_fn(value)\n index_cursor = txn.cursor(index_db)\n for idx_key in index_keys:\n if index_cursor.set_key(idx_key):\n index_cursor.delete()\n\n # process all the inserts\n for key, value in puts:\n packed = self._serializer(value)\n\n cursor.put(key.encode(), packed, overwrite=True)\n\n for (index_db, index_key_fn) in self._indexes.values():\n index_keys = index_key_fn(value)\n index_cursor = txn.cursor(index_db)\n for idx_key in index_keys:\n index_cursor.put(idx_key, key.encode())\n\n self.sync()", "docstring": "Applies the given puts and deletes atomically.\n\nArgs:\n puts (:iterable:`tuple`): an iterable of key/value pairs to insert\n deletes (:iterable:str:) an iterable of keys to delete", "source": "juraj_google_style"} -{"code": "def valid(self, value, include_name=True):\n\n\n\t\t# Call the parent valid method and return the result\n\t\treturn super(Tree, self).valid(value, include_name and [self._name] or [])", "docstring": "Valid\n\n Checks if a value is valid based on the instance's values\n\nArgs:\n value {mixed} -- The value to validate\n include_name {bool} -- If true, Tree's name will be prepended to\n all error keys\n\nReturns:\n bool", "source": "juraj_google_style"} -{"code": "def reqHeadTimeStamp(\n self, contract: Contract, whatToShow: str,\n useRTH: bool, formatDate: int = 1) -> datetime.datetime:\n\n return self._run(\n self.reqHeadTimeStampAsync(\n contract, whatToShow, useRTH, formatDate))", "docstring": "Get the datetime of earliest available historical data\n for the contract.\n\nArgs:\n contract: Contract of interest.\n useRTH: If True then only show data from within Regular\n Trading Hours, if False then show all data.\n formatDate: If set to 2 then the result is returned as a\n timezone-aware datetime.datetime with UTC timezone.", "source": "juraj_google_style"} -{"code": "def to_dms(angle, style='dms'):\n\n sign = 1 if angle >= 0 else -1\n angle = abs(angle) * 3600\n minutes, seconds = divmod(angle, 60)\n degrees, minutes = divmod(minutes, 60)\n if style == 'dms':\n return tuple(sign * abs(i) for i in (int(degrees), int(minutes),\n seconds))\n elif style == 'dm':\n return tuple(sign * abs(i) for i in (int(degrees),\n (minutes + seconds / 60)))\n else:\n raise ValueError('Unknown style type %r' % style)", "docstring": "Convert decimal angle to degrees, minutes and possibly seconds.\n\nArgs:\n angle (float): Angle to convert\n style (str): Return fractional or whole minutes values\n\nReturns:\n tuple of int: Angle converted to degrees, minutes and possibly seconds\n\nRaises:\n ValueError: Unknown value for ``style``", "source": "juraj_google_style"} -{"code": "def layer_postprocess(layer_input, layer_output, hparams):\n\n return layer_prepostprocess(\n layer_input,\n layer_output,\n sequence=hparams.layer_postprocess_sequence,\n dropout_rate=hparams.layer_prepostprocess_dropout,\n norm_type=hparams.norm_type,\n depth=None,\n epsilon=hparams.norm_epsilon,\n dropout_broadcast_dims=comma_separated_string_to_integer_list(\n getattr(hparams, \"layer_prepostprocess_dropout_broadcast_dims\", \"\")),\n default_name=\"layer_postprocess\")", "docstring": "Apply layer postprocessing.\n\n See layer_prepostprocess() for details.\n\n A hyperparameters object is passed for convenience. The hyperparameters\n that may be used are:\n\n layer_postprocess_sequence\n layer_prepostprocess_dropout\n norm_type\n hidden_size\n norm_epsilon\n\nArgs:\n layer_input: a Tensor\n layer_output: a Tensor\n hparams: a hyperparameters object.\n\nReturns:\n a Tensor", "source": "juraj_google_style"} -{"code": "def get_all_disorder_predictions(self, iupred_path='/home/nathan/software/iupred/',\n iupred_exec='iupred', disembl_cmd='/home/nathan/software/DisEMBL-1.4/DisEMBL.py',\n representative_only=True):\n\n if representative_only:\n # Check if a representative sequence was set\n if not self.representative_sequence:\n log.warning('{}: no representative sequence set, cannot get disorder properties'.format(self.id))\n return\n\n # Also need to check if a sequence has been stored\n if not self.representative_sequence.seq:\n log.warning('{}: representative sequence {} set, but no sequence stored. '\n 'Cannot get disorder properties.'.format(self.id, self.representative_sequence.id))\n return\n\n # self.representative_sequence.store_iupred_disorder_predictions(iupred_path=iupred_path,\n # iupred_exec=iupred_exec,\n # prediction_type='long')\n # self.representative_sequence.store_iupred_disorder_predictions(iupred_path=iupred_path,\n # iupred_exec=iupred_exec,\n # prediction_type='short')\n self.representative_sequence.store_disembl_disorder_predictions(disembl_cmd=disembl_cmd)\n\n if not representative_only:\n for s in self.sequences:\n # Need to check if a sequence has been stored\n if not s.seq:\n log.warning('{}: no sequence stored. '\n 'Cannot get disorder properties.'.format(s.id))\n continue\n\n else:\n # s.store_iupred_disorder_predictions(iupred_path=iupred_path,\n # iupred_exec=iupred_exec,\n # prediction_type='long')\n # s.store_iupred_disorder_predictions(iupred_path=iupred_path,\n # iupred_exec=iupred_exec,\n # prediction_type='short')\n s.store_disembl_disorder_predictions(disembl_cmd=disembl_cmd)", "docstring": "Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of the protein sequences.\n Results are stored in the protein's respective SeqProp objects at ``.annotations``\n\nArgs:\n representative_only (bool): If analysis should only be run on the representative sequence", "source": "juraj_google_style"} -{"code": "def _get_non_defined(self, data, class_types):\n\n subj_set = set([item[self.smap] for item in class_types])\n non_def_set = set([item[self.smap] for item in data])\n return list(non_def_set - subj_set)", "docstring": "returns a list of URIs and blanknodes that are not defined within\n the dataset. For example: schema:Person has an associated rdf:type\n then it is considered defined.\n\nArgs:\n data: a list of triples\n class_types: list of subjects that are defined in the dataset", "source": "juraj_google_style"} -{"code": "def AppendFlagValues(self, flag_values):\n\n for flag_name, flag in six.iteritems(flag_values.FlagDict()):\n # Each flags with shortname appears here twice (once under its\n # normal name, and again with its short name). To prevent\n # problems (DuplicateFlagError) with double flag registration, we\n # perform a check to make sure that the entry we're looking at is\n # for its normal name.\n if flag_name == flag.name:\n try:\n self[flag_name] = flag\n except exceptions.DuplicateFlagError:\n raise exceptions.DuplicateFlagError.from_flag(\n flag_name, self, other_flag_values=flag_values)", "docstring": "Appends flags registered in another FlagValues instance.\n\nArgs:\n flag_values: registry to copy from", "source": "juraj_google_style"} -{"code": "def finalise(output=None, figsize=None, tight=True, **kwargs):\n\n import matplotlib.pyplot as plt\n\n # Set figure size.\n if figsize is not None:\n plt.gcf().set_size_inches(*figsize)\n\n # Set plot layout.\n if tight:\n plt.tight_layout()\n\n if output is None:\n plt.show()\n else:\n plt.savefig(output, **kwargs)\n io.info(\"Wrote\", output)\n plt.close()", "docstring": "Finalise a plot.\n\n Display or show the plot, then close it.\n\nArgs:\n output (str, optional): Path to save figure to. If not given,\n show plot.\n figsize ((float, float), optional): Figure size in inches.\n **kwargs: Any additional arguments to pass to\n plt.savefig(). Only required if output is not None.", "source": "juraj_google_style"} -{"code": "def __init__(self, pb_id):\n\n SchedulingObject.__init__(self, PB_KEY, pb_id)\n self._check_object_exists()", "docstring": "Create a PB object.\n\nArgs:\n pb_id (str): Processing Block Identifier\n\nRaises:\n KeyError, if the specified PB does not exist", "source": "juraj_google_style"} -{"code": "def __init__(self, run_object):\n\n run_obj_type = self.get_run_object_type(run_object)\n if run_obj_type == 'module':\n self.init_module(run_object)\n elif run_obj_type == 'package':\n self.init_package(run_object)\n else:\n self.init_function(run_object)", "docstring": "Initializes profiler.\n\nArgs:\n run_object: object to be profiled.", "source": "juraj_google_style"} -{"code": "def DeserializeFromBufer(buffer, offset=0):\n\n mstream = StreamManager.GetStream(buffer)\n reader = BinaryReader(mstream)\n tx = Transaction.DeserializeFrom(reader)\n\n StreamManager.ReleaseStream(mstream)\n return tx", "docstring": "Deserialize object instance from the specified buffer.\n\nArgs:\n buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.\n offset: UNUSED\n\nReturns:\n Transaction:", "source": "juraj_google_style"} -{"code": "def make_action(self, fn, schema_parser, meta):\n\n validate_input = validate_output = None\n if \"$input\" in meta:\n with MarkKey(\"$input\"):\n validate_input = schema_parser.parse(meta[\"$input\"])\n if \"$output\" in meta:\n with MarkKey(\"$output\"):\n validate_output = schema_parser.parse(meta[\"$output\"])\n\n def action(data):\n if validate_input:\n try:\n data = validate_input(data)\n except Invalid as ex:\n return abort(400, \"InvalidData\", str(ex))\n if isinstance(data, dict):\n rv = fn(**data)\n else:\n rv = fn(data)\n else:\n rv = fn()\n rv, status, headers = unpack(rv)\n if validate_output:\n try:\n rv = validate_output(rv)\n except Invalid as ex:\n return abort(500, \"ServerError\", str(ex))\n return rv, status, headers\n return action", "docstring": "Make resource's method an action\n\n Validate input, output by schema in meta.\n If no input schema, call fn without params.\n If no output schema, will not validate return value.\n\nArgs:\n fn: resource's method\n schema_parser: for parsing schema in meta\n meta: meta data of the action", "source": "juraj_google_style"} -{"code": "def __init__(self, scope, parent, explicit=True):\n\n CodeStatement.__init__(self, scope, parent)\n self.body = []\n self.explicit = explicit", "docstring": "Constructor for code blocks.\n\nArgs:\n scope (CodeEntity): The program scope where this object belongs.\n parent (CodeEntity): This object's parent in the program tree.\n\n Kwargs:\n explicit (bool): Whether the block is explicit in the code.", "source": "juraj_google_style"} -{"code": "def _check(cls, name, val, can_be_zero=False, val_type=float):\n\n valid_types = [val_type]\n if val_type is float:\n valid_types.append(int)\n\n if type(val) not in valid_types:\n raise TypeError(\n 'Expect type %s for parameter %s' % (val_type.__name__, name))\n if val < 0:\n raise ValueError(\n 'Value for parameter %s has to be greater than 0' % name)\n if not can_be_zero and val == 0:\n raise ValueError(\n 'Value for parameter %s can not be 0' % name)\n return val", "docstring": "Check init arguments.\n\nArgs:\n name: name of the argument. For logging purpose.\n val: value. Value has to be non negative number.\n can_be_zero: whether value can be zero.\n val_type: Python type of the value.\n\nReturns:\n The value.\n\nRaises:\n ValueError: when invalid value is passed in.\n TypeError: when invalid value type is passed in.", "source": "juraj_google_style"} -{"code": "def _split_generators(self, dl_manager):\n\n path = dl_manager.download_and_extract(_DOWNLOAD_URL)\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TEST,\n num_shards=1,\n gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)})\n ]", "docstring": "Return the test split of Cifar10.\n\nArgs:\n dl_manager: download manager object.\n\nReturns:\n test split.", "source": "juraj_google_style"} -{"code": "def energies(self, samples_like, dtype=np.float):\n\n samples, labels = as_samples(samples_like)\n if labels:\n idx, label = zip(*enumerate(labels))\n labeldict = dict(zip(label, idx))\n else:\n labeldict = {}\n\n num_samples = samples.shape[0]\n\n energies = np.zeros(num_samples, dtype=dtype)\n for term, bias in self.items():\n if len(term) == 0:\n energies += bias\n else:\n energies += np.prod([samples[:, labeldict[v]] for v in term], axis=0) * bias\n\n return energies", "docstring": "The energies of the given samples.\n\nArgs:\n samples_like (samples_like):\n A collection of raw samples. `samples_like` is an extension of\n NumPy's array_like structure. See :func:`.as_samples`.\n\n dtype (:class:`numpy.dtype`, optional):\n The data type of the returned energies. Defaults to float.\n\nReturns:\n :obj:`numpy.ndarray`: The energies.", "source": "juraj_google_style"} -{"code": "def __init__(self, terms: Mapping[raw_types.Gate, value.Scalar]) -> None:\n\n super().__init__(terms, validator=self._is_compatible)", "docstring": "Initializes linear combination from a collection of terms.\n\nArgs:\n terms: Mapping of gates to coefficients in the linear combination\n being initialized.", "source": "juraj_google_style"} -{"code": "def reserveIdentifier(self, pid, vendorSpecific=None):\n\n response = self.reserveIdentifierResponse(pid, vendorSpecific)\n return self._read_dataone_type_response(response, 'Identifier', vendorSpecific)", "docstring": "See Also: reserveIdentifierResponse()\n\nArgs:\n pid:\n vendorSpecific:\n\n Returns:", "source": "juraj_google_style"} -{"code": "def __init__(self, name, type_var, impl_type, type_checker):\n\n assert isinstance(name, str), repr(name)\n assert isinstance(impl_type, type), repr(impl_type)\n assert not isinstance(impl_type, TypingMeta), repr(impl_type)\n assert isinstance(type_var, (type, _TypingBase)), repr(type_var)\n self.name = name\n self.type_var = type_var\n self.impl_type = impl_type\n self.type_checker = type_checker", "docstring": "Initializer.\n\nArgs:\n name: The name, e.g. 'Pattern'.\n type_var: The type parameter, e.g. AnyStr, or the\n specific type, e.g. str.\n impl_type: The implementation type.\n type_checker: Function that takes an impl_type instance.\n and returns a value that should be a type_var instance.", "source": "juraj_google_style"} -{"code": "def query(self, minhash, size):\n\n for i, index in enumerate(self.indexes):\n u = self.uppers[i]\n if u is None:\n continue\n b, r = self._get_optimal_param(u, size)\n for key in index[r]._query_b(minhash, b):\n yield key", "docstring": "Giving the MinHash and size of the query set, retrieve\n keys that references sets with containment with respect to\n the query set greater than the threshold.\n\nArgs:\n minhash (datasketch.MinHash): The MinHash of the query set.\n size (int): The size (number of unique items) of the query set.\n\nReturns:\n `iterator` of keys.", "source": "juraj_google_style"} -{"code": "def __init__(self,\n block_shape,\n block_rows,\n name='block_diagonal_matrix'):\n\n super(BlockDiagonalMatrix, self).__init__(\n block_shape=block_shape,\n block_rows=block_rows,\n include_diagonal=True,\n include_off_diagonal=False,\n name=name)", "docstring": "Constructs a new `BlockDiagonalMatrix` module.\n\nArgs:\n block_shape: tuple, 2-dimensional tuple indicating the shape of each\n individual block.\n block_rows: int, the number of blocks in each row (and column) of the\n output matrix.\n name: string, name of the module.", "source": "juraj_google_style"} -{"code": "def get_column(self, column_name, column_type, index, verbose=True):\n\n return LazyOpResult(\n grizzly_impl.get_column(\n self.expr,\n self.weld_type,\n index\n ),\n column_type,\n 1\n )", "docstring": "Summary\n\nArgs:\n column_name (TYPE): Description\n column_type (TYPE): Description\n index (TYPE): Description\n\nReturns:\n TYPE: Description", "source": "juraj_google_style"} -{"code": "def reply_all(self, reply_comment):\n\n payload = '{ \"Comment\": \"' + reply_comment + '\"}'\n endpoint = 'https://outlook.office.com/api/v2.0/me/messages/{}/replyall'.format(self.message_id)\n\n self._make_api_call('post', endpoint, data=payload)", "docstring": "Replies to everyone on the email, including those on the CC line.\n\n With great power, comes great responsibility.\n\nArgs:\n reply_comment: The string comment to send to everyone on the email.", "source": "juraj_google_style"} -{"code": "def _GetUsernameFromProfilePath(self, path):\n\n # Strip trailing key separators.\n while path and path[-1] == '\\\\':\n path = path[:-1]\n\n if path:\n _, _, path = path.rpartition('\\\\')\n return path", "docstring": "Retrieves the username from a Windows profile path.\n\n Trailing path path segment are ignored.\n\nArgs:\n path (str): a Windows path with '\\\\' as path segment separator.\n\nReturns:\n str: basename which is the last path segment.", "source": "juraj_google_style"} -{"code": "def _parse_octet(self, octet_str):\n\n # Whitelist the characters, since int() allows a lot of bizarre stuff.\n if not self._DECIMAL_DIGITS.issuperset(octet_str):\n raise ValueError\n octet_int = int(octet_str, 10)\n # Disallow leading zeroes, because no clear standard exists on\n # whether these should be interpreted as decimal or octal.\n if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1):\n raise ValueError\n return octet_int", "docstring": "Convert a decimal octet into an integer.\n\nArgs:\n octet_str: A string, the number to parse.\n\nReturns:\n The octet as an integer.\n\nRaises:\n ValueError: if the octet isn't strictly a decimal from [0..255].", "source": "juraj_google_style"} -{"code": "def RemoveObject(self, identifier):\n\n if identifier not in self._values:\n raise KeyError('Missing cached object for identifier: {0:s}'.format(\n identifier))\n\n del self._values[identifier]", "docstring": "Removes a cached object based on the identifier.\n\n This method ignores the cache value reference count.\n\nArgs:\n identifier (str): VFS object identifier.\n\nRaises:\n KeyError: if the VFS object is not found in the cache.", "source": "juraj_google_style"} -{"code": "def CacheStorage_requestCachedResponse(self, cacheId, requestURL):\n\n\t\tassert isinstance(requestURL, (str,)\n\t\t ), \"Argument 'requestURL' must be of type '['str']'. Received type: '%s'\" % type(\n\t\t requestURL)\n\t\tsubdom_funcs = self.synchronous_command('CacheStorage.requestCachedResponse',\n\t\t cacheId=cacheId, requestURL=requestURL)\n\t\treturn subdom_funcs", "docstring": "Function path: CacheStorage.requestCachedResponse\n Domain: CacheStorage\n Method name: requestCachedResponse\n\n Parameters:\n Required arguments:\n 'cacheId' (type: CacheId) -> Id of cache that contains the enty.\n 'requestURL' (type: string) -> URL spec of the request.\n\nReturns:\n 'response' (type: CachedResponse) -> Response read from the cache.\n\n Description: Fetches cache entry.", "source": "juraj_google_style"} -{"code": "def sort_recursive(data):\n\n\n newdict = {}\n\n for i in data.items():\n if type(i[1]) is dict:\n newdict[i[0]] = sort_recursive(i[1])\n else:\n newdict[i[0]] = i[1]\n\n return OrderedDict(sorted(newdict.items(), key=lambda item: (compare_type(type(item[1])), item[0])))", "docstring": "Recursively sorts all elements in a dictionary\n\nArgs:\n data (dict): The dictionary to sort\n\nReturns:\n sorted_dict (OrderedDict): The sorted data dict", "source": "juraj_google_style"} -{"code": "def __init__(self, option=None, default=None, *args, **kwargs):\n\n super(ListOption, self).__init__(*args, **kwargs)\n if not isinstance(option, opt.Option):\n\n raise TypeError(\"Option must be an option type.\")\n\n self._option = option\n self._default = default\n\n if default is not None:\n\n self._value = self.coerce(default)", "docstring": "Initialize the option with an option type.\n\nArgs:\n option (option.Option): The option which is used to validate all\n list options.\n\nRaises:\n TypeError: If the given option is not an instance of option.Option.\n TypeError: If the default value is set but not an iterable.", "source": "juraj_google_style"} -{"code": "def _get_resource_from_obj(self, resource):\n # type: (Union[hdx.data.resource.Resource,Dict,str]) -> hdx.data.resource.Resource\n\n if isinstance(resource, str):\n if is_valid_uuid(resource) is False:\n raise HDXError('%s is not a valid resource id!' % resource)\n resource = hdx.data.resource.Resource.read_from_hdx(resource, configuration=self.configuration)\n elif isinstance(resource, dict):\n resource = hdx.data.resource.Resource(resource, configuration=self.configuration)\n if not isinstance(resource, hdx.data.resource.Resource):\n raise HDXError('Type %s cannot be added as a resource!' % type(resource).__name__)\n return resource", "docstring": "Add new or update existing resource in dataset with new metadata\n\nArgs:\n resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary\n\nReturns:\n hdx.data.resource.Resource: Resource object", "source": "juraj_google_style"} -{"code": "def _file_size(self, field):\n\n size = 0\n try:\n handle = open(self._files[field], \"r\")\n size = os.fstat(handle.fileno()).st_size\n handle.close()\n except:\n size = 0\n self._file_lengths[field] = size\n return self._file_lengths[field]", "docstring": "Returns the file size for given file field.\n\nArgs:\n field (str): File field\n\nReturns:\n int. File size", "source": "juraj_google_style"} -{"code": "def _destructively_move(self, dest_doc):\n\n\n if dest_doc is self:\n raise RuntimeError(\"Attempted to overwrite a document with itself\")\n\n dest_doc.clear()\n # we have to remove ALL roots before adding any\n # to the new doc or else models referenced from multiple\n # roots could be in both docs at once, which isn't allowed.\n roots = []\n self._push_all_models_freeze()\n try:\n while self.roots:\n r = next(iter(self.roots))\n self.remove_root(r)\n roots.append(r)\n finally:\n self._pop_all_models_freeze()\n for r in roots:\n if r.document is not None:\n raise RuntimeError(\"Somehow we didn't detach %r\" % (r))\n if len(self._all_models) != 0:\n raise RuntimeError(\"_all_models still had stuff in it: %r\" % (self._all_models))\n for r in roots:\n dest_doc.add_root(r)\n\n dest_doc.title = self.title", "docstring": "Move all data in this doc to the dest_doc, leaving this doc empty.\n\nArgs:\n dest_doc (Document) :\n The Bokeh document to populate with data from this one\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def __init__(self, location, field_type):\n\n super(GlobalContextField, self).__init__(location, field_type)\n self.location = location\n self.field_type = field_type\n self.validate()", "docstring": "Construct a new GlobalContextField object that references a field at a given location.\n\nArgs:\n location: Location, specifying where the field was declared.\n\nReturns:\n new GlobalContextField object", "source": "juraj_google_style"} -{"code": "def add_neighbor(self, edge: \"Edge\") -> None:\n\n if edge is None or (edge.source != self and edge.target != self):\n return\n\n if edge.source == self:\n other: Node = edge.target\n elif edge.target == self:\n other: Node = edge.source\n else:\n raise ValueError(\"Tried to add a neighbor with an invalid edge.\")\n\n edge_key: Tuple(int, int) = edge.key\n\n # The graph is considered undirected, check neighbor existence accordingly.\n if self._neighbors.get(edge_key) or self._neighbors.get((edge_key[1], edge_key[0])):\n return # The neighbor is already added.\n\n self._neighbors[edge_key] = edge\n self.dispatch_event(NeighborAddedEvent(other))", "docstring": "Adds a new neighbor to the node.\n\nArgs:\n edge (Edge): The edge that would connect this node with its neighbor.", "source": "juraj_google_style"} -{"code": "def ported_string(raw_data, encoding='utf-8', errors='ignore'):\n\n\n if not raw_data:\n return six.text_type()\n\n if isinstance(raw_data, six.text_type):\n return raw_data.strip()\n\n if six.PY2:\n try:\n return six.text_type(raw_data, encoding, errors).strip()\n except LookupError:\n return six.text_type(raw_data, \"utf-8\", errors).strip()\n\n if six.PY3:\n try:\n return six.text_type(raw_data, encoding).strip()\n except (LookupError, UnicodeDecodeError):\n return six.text_type(raw_data, \"utf-8\", errors).strip()", "docstring": "Give as input raw data and output a str in Python 3\n and unicode in Python 2.\n\nArgs:\n raw_data: Python 2 str, Python 3 bytes or str to porting\n encoding: string giving the name of an encoding\n errors: his specifies the treatment of characters\n which are invalid in the input encoding\n\nReturns:\n str (Python 3) or unicode (Python 2)", "source": "juraj_google_style"} -{"code": "def _apply(self, ctx: ExtensionContext) -> Any:\n\n _, external_path = ctx.node\n return ctx.mentor.load_yaml(self.locator(\n external_path,\n cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None\n ))", "docstring": "Loads a yaml fragment from an external file.\n\nArgs:\n ctx: The processing context.\n\nReturns:\n The external resource as a python dictionary. The fragment is already send through\n the processor as well.", "source": "juraj_google_style"} -{"code": "def git_branch_delete(branch_name):\n # type: (str) -> None\n\n if branch_name not in git.protected_branches():\n log.info(\"Deleting branch <33>{}\", branch_name)\n shell.run('git branch -d {}'.format(branch_name))", "docstring": "Delete the given branch.\n\nArgs:\n branch_name (str):\n Name of the branch to delete.", "source": "juraj_google_style"} -{"code": "def __init__(self, size, dropout=None, named_tensors=None, scope='lstm', summary_labels=(), return_final_state=True):\n\n self.size = size\n self.dropout = dropout\n self.return_final_state = return_final_state\n super(Lstm, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "LSTM layer.\n\nArgs:\n size: LSTM size.\n dropout: Dropout rate.", "source": "juraj_google_style"} -{"code": "def _to_enos_roles(roles):\n\n\n def to_host(h):\n extra = {}\n # create extra_vars for the nics\n # network_role = ethX\n for nic, roles in h[\"nics\"]:\n for role in roles:\n extra[role] = nic\n\n return Host(h[\"host\"], user=\"root\", extra=extra)\n\n enos_roles = {}\n for role, hosts in roles.items():\n enos_roles[role] = [to_host(h) for h in hosts]\n logger.debug(enos_roles)\n return enos_roles", "docstring": "Transform the roles to use enoslib.host.Host hosts.\n\nArgs:\n roles (dict): roles returned by\n :py:func:`enoslib.infra.provider.Provider.init`", "source": "juraj_google_style"} -{"code": "def login():\n\n try:\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n\n user = authenticate(username, password)\n if not user:\n raise Exception(\"User not found!\")\n\n resp = jsonify({\"message\": \"User authenticated\"})\n resp.status_code = 200\n\n access_token = jwt.jwt_encode_callback(user)\n\n # add token to response headers - so SwaggerUI can use it\n resp.headers.extend({'jwt-token': access_token})\n\n except Exception as e:\n resp = jsonify({\"message\": \"Bad username and/or password\"})\n resp.status_code = 401\n\n return resp", "docstring": "User authenticate method.\n ---\n description: Authenticate user with supplied credentials.\n parameters:\n - name: username\n in: formData\n type: string\n required: true\n - name: password\n in: formData\n type: string\n required: true\n responses:\n 200:\n description: User successfully logged in.\n 400:\n description: User login failed.", "source": "juraj_google_style"} -{"code": "def __init__(self, pos_filename, interval=2):\n\n if not pos_filename:\n pos_filename = os.path.join(os.getcwd(),\n 'mysqlbinlog2blinker.binlog.pos')\n self.pos_storage_filename = pos_filename\n assert self.pos_storage_filename\n self.interval = interval\n\n self._log_file = None\n self._log_pos = None\n self._pos_changed = False\n\n self.save_log_pos_thread_stop_flag = threading.Event()\n self.save_log_pos_thread = \\\n threading.Thread(target=self._save_log_pos_thread_runner)\n self.save_log_pos_thread.daemon = True", "docstring": "Create instance of FileBasedBinlogPosMemory\n\nArgs:\n pos_filename (str|None): position storage file. None will makes\n *mysqlbinlog2blinker.binlog.pos* at current working dir\n interval (float): the interval in second", "source": "juraj_google_style"} -{"code": "def joinpaths(self, *paths):\n\n if sys.version_info >= (3, 6):\n paths = [os.fspath(path) for path in paths]\n if len(paths) == 1:\n return paths[0]\n if self.is_windows_fs:\n return self._join_paths_with_drive_support(*paths)\n joined_path_segments = []\n sep = self._path_separator(paths[0])\n for path_segment in paths:\n if self._starts_with_root_path(path_segment):\n # An absolute path\n joined_path_segments = [path_segment]\n else:\n if (joined_path_segments and\n not joined_path_segments[-1].endswith(sep)):\n joined_path_segments.append(sep)\n if path_segment:\n joined_path_segments.append(path_segment)\n return self._matching_string(paths[0], '').join(joined_path_segments)", "docstring": "Mimic os.path.join using the specified path_separator.\n\nArgs:\n *paths: (str) Zero or more paths to join.\n\nReturns:\n (str) The paths joined by the path separator, starting with\n the last absolute path in paths.", "source": "juraj_google_style"} -{"code": "def get_central_coors(self, row, col):\n\n if row < 0 or row >= self.nRows or col < 0 or col >= self.nCols:\n raise ValueError(\"The row (%d) or col (%d) must be >=0 and less than \"\n \"nRows (%d) or nCols (%d)!\" % (row, col, self.nRows, self.nCols))\n else:\n tmpx = self.xMin + (col + 0.5) * self.dx\n tmpy = self.yMax - (row + 0.5) * self.dx\n return tmpx, tmpy", "docstring": "Get the coordinates of central grid.\n\nArgs:\n row: row number, range from 0 to (nRows - 1).\n col: col number, range from 0 to (nCols - 1).\n\nReturns:\n XY coordinates. If the row or col are invalid, raise ValueError.", "source": "juraj_google_style"} -{"code": "def _remove(self, removeList, selfValue):\n\n for removeValue in removeList:\n print(removeValue, removeList)\n # if removeValue equal to selfValue, remove\n removeEverything(removeValue, selfValue)", "docstring": "Remove elements from a list by matching the elements in the other list.\n\n This method only looks inside current instance's value, not recursive.\n There is no need for a recursive one anyway.\n Match by == operation.\n\nArgs:\n removeList (list): The list of matching elements.\n selfValue (list): The list you remove value from. Usually ``self.value``", "source": "juraj_google_style"} -{"code": "def validate_context(context):\n\n # Service must exist in service registry\n if not context.service_registry.service_record(context.service_name):\n fail(\"service: {} not found in service registry: {}\".format(\n context.service_name, context.service_registry.filespec))\n service_type = context.service_registry.service_record(context.service_name)[\"type\"]\n\n # Key must be valid\n if context.key not in EFConfig.VERSION_KEYS:\n fail(\"invalid key: {}; see VERSION_KEYS in ef_config for supported keys\".format(context.key))\n\n # Lookup allowed key for service type\n if \"allowed_types\" in EFConfig.VERSION_KEYS[context.key] and \\\n service_type not in EFConfig.VERSION_KEYS[context.key][\"allowed_types\"]:\n fail(\"service_type: {} is not allowed for key {}; see VERSION_KEYS[KEY]['allowed_types']\"\n \"in ef_config and validate service registry entry\".format(service_type, context.key))\n\n return True", "docstring": "Set the key for the current context.\n\nArgs:\n context: a populated EFVersionContext object", "source": "juraj_google_style"} -{"code": "def getStreamNetworkAsGeoJson(self, session, withNodes=True):\n\n features_list = []\n\n # Assemble link features\n for link in self.streamLinks:\n link_geoJson = link.getAsGeoJson(session)\n\n if link_geoJson:\n link_geometry = json.loads(link.getAsGeoJson(session))\n\n link_properties = {\"link_number\": link.linkNumber,\n \"type\": link.type,\n \"num_elements\": link.numElements,\n \"dx\": link.dx,\n \"erode\": link.erode,\n \"subsurface\": link.subsurface}\n\n link_feature = {\"type\": \"Feature\",\n \"geometry\": link_geometry,\n \"properties\": link_properties,\n \"id\": link.id}\n\n features_list.append(link_feature)\n\n # Assemble node features\n if withNodes:\n for node in link.nodes:\n node_geoJson = node.getAsGeoJson(session)\n\n if node_geoJson:\n node_geometry = json.loads(node_geoJson)\n\n node_properties = {\"link_number\": link.linkNumber,\n \"node_number\": node.nodeNumber,\n \"elevation\": node.elevation}\n\n node_feature = {\"type\": \"Feature\",\n \"geometry\": node_geometry,\n \"properties\": node_properties,\n \"id\": node.id}\n\n features_list.append(node_feature)\n\n feature_collection = {\"type\": \"FeatureCollection\",\n \"features\": features_list}\n\n return json.dumps(feature_collection)", "docstring": "Retrieve the stream network geometry in GeoJSON format.\n\nArgs:\n session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\n withNodes (bool, optional): Include nodes. Defaults to False.\n\nReturns:\n str: GeoJSON string.", "source": "juraj_google_style"} -{"code": "def clean(exclude):\n # type: (bool, List[str]) -> None\n\n pretend = context.get('pretend', False)\n exclude = list(exclude) + conf.get('clean.exclude', [])\n clean_patterns = conf.get('clean.patterns', [\n '*__pycache__*',\n '*.py[cod]',\n '*.swp',\n ])\n\n num_files = 0\n with util.timed_block() as t:\n files = fs.filtered_walk(conf.proj_path(), clean_patterns, exclude)\n for path in files:\n try:\n num_files += 1\n\n if not isdir(path):\n log.info(' <91>[file] <90>{}', path)\n not pretend and os.remove(path)\n else:\n log.info(' <91>[dir] <90>{}', path)\n not pretend and rmtree(path)\n\n except OSError:\n log.info(\"<33>Failed to remove <90>{}\", path)\n\n if pretend:\n msg = \"Would delete <33>{}<32> files. Took <33>{}<32>s\"\n else:\n msg = \"Deleted <33>{}<32> files in <33>{}<32>s\"\n\n log.info(msg.format(num_files, t.elapsed_s))", "docstring": "Remove all unnecessary files.\n\nArgs:\n pretend (bool):\n If set to **True**, do not delete any files, just show what would be\n deleted.\n exclude (list[str]):\n A list of path patterns to exclude from deletion.", "source": "juraj_google_style"} -{"code": "def wtime_to_minutes(time_string):\n\n hours, mins, seconds = time_string.split(':')\n total_mins = int(hours) * 60 + int(mins)\n if total_mins < 1:\n logger.warning(\"Time string '{}' parsed to {} minutes, less than 1\".format(time_string, total_mins))\n return total_mins", "docstring": "wtime_to_minutes\n\n Convert standard wallclock time string to minutes.\n\nArgs:\n - Time_string in HH:MM:SS format\n\nReturns:\n (int) minutes", "source": "juraj_google_style"} -{"code": "def DOMDebugger_setInstrumentationBreakpoint(self, eventName):\n\n\t\tassert isinstance(eventName, (str,)\n\t\t ), \"Argument 'eventName' must be of type '['str']'. Received type: '%s'\" % type(\n\t\t eventName)\n\t\tsubdom_funcs = self.synchronous_command(\n\t\t 'DOMDebugger.setInstrumentationBreakpoint', eventName=eventName)\n\t\treturn subdom_funcs", "docstring": "Function path: DOMDebugger.setInstrumentationBreakpoint\n Domain: DOMDebugger\n Method name: setInstrumentationBreakpoint\n\n WARNING: This function is marked 'Experimental'!\n\n Parameters:\n Required arguments:\n 'eventName' (type: string) -> Instrumentation name to stop on.\n No return value.\n\n Description: Sets breakpoint on particular native event.", "source": "juraj_google_style"} -{"code": "def __init__(self, path):\n\n super(FilterFile, self).__init__()\n self._path = path", "docstring": "Initializes a filter file.\n\nArgs:\n path (str): path to a file that contains one or more path filters.", "source": "juraj_google_style"} -{"code": "def slice(filename, number_tiles=None, col=None, row=None, save=True):\n\n im = Image.open(filename)\n im_w, im_h = im.size\n\n columns = 0\n rows = 0\n if not number_tiles is None:\n validate_image(im, number_tiles)\n columns, rows = calc_columns_rows(number_tiles)\n extras = (columns * rows) - number_tiles\n else:\n validate_image_col_row(im, col, row)\n columns = col\n rows = row\n extras = (columns * rows) - number_tiles\n\n\n tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))\n\n tiles = []\n number = 1\n for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.\n for pos_x in range(0, im_w - columns, tile_w): # as above.\n area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)\n image = im.crop(area)\n position = (int(floor(pos_x / tile_w)) + 1,\n int(floor(pos_y / tile_h)) + 1)\n coords = (pos_x, pos_y)\n tile = Tile(image, number, position, coords)\n tiles.append(tile)\n number += 1\n if save:\n save_tiles(tiles,\n prefix=get_basename(filename),\n directory=os.path.dirname(filename))\n return tuple(tiles)", "docstring": "Split an image into a specified number of tiles.\n\nArgs:\n filename (str): The filename of the image to split.\n number_tiles (int): The number of tiles required.\n\n Kwargs:\n save (bool): Whether or not to save tiles to disk.\n\nReturns:\n Tuple of :class:`Tile` instances.", "source": "juraj_google_style"} -{"code": "def remove_extra_presentations(self, resource, timeout=-1):\n\n uri = self.URI + \"/repair\"\n custom_headers = {'Accept-Language': 'en_US'}\n return self._client.create(resource, uri=uri, timeout=timeout, custom_headers=custom_headers)", "docstring": "Removes extra presentations from a specified server profile.\n\nArgs:\n resource (dict):\n Object to create\n timeout:\n Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\n in OneView; it just stops waiting for its completion.\n\nReturns:\n dict: Associated storage attachment resource.", "source": "juraj_google_style"} -{"code": "def _cursor_pb(cursor_pair):\n\n if cursor_pair is not None:\n data, before = cursor_pair\n value_pbs = [_helpers.encode_value(value) for value in data]\n return query_pb2.Cursor(values=value_pbs, before=before)", "docstring": "Convert a cursor pair to a protobuf.\n\n If ``cursor_pair`` is :data:`None`, just returns :data:`None`.\n\nArgs:\n cursor_pair (Optional[Tuple[list, bool]]): Two-tuple of\n\n * a list of field values.\n * a ``before`` flag\n\nReturns:\n Optional[google.cloud.firestore_v1beta1.types.Cursor]: A\n protobuf cursor corresponding to the values.", "source": "juraj_google_style"} -{"code": "def _lookup_model(cls, kind, default_model=None):\n\n modelclass = cls._kind_map.get(kind, default_model)\n if modelclass is None:\n raise KindError(\n \"No model class found for kind '%s'. Did you forget to import it?\" %\n kind)\n return modelclass", "docstring": "Get the model class for the kind.\n\nArgs:\n kind: A string representing the name of the kind to lookup.\n default_model: The model class to use if the kind can't be found.\n\nReturns:\n The model class for the requested kind.\n\nRaises:\n KindError: The kind was not found and no default_model was provided.", "source": "juraj_google_style"} -{"code": "def get_artifact_url(context, task_id, path):\n\n if path.startswith(\"public/\"):\n url = context.queue.buildUrl('getLatestArtifact', task_id, path)\n else:\n url = context.queue.buildSignedUrl(\n 'getLatestArtifact', task_id, path,\n # XXX Can set expiration kwarg in (int) seconds from now;\n # defaults to 15min.\n )\n\n return url", "docstring": "Get a TaskCluster artifact url.\n\nArgs:\n context (scriptworker.context.Context): the scriptworker context\n task_id (str): the task id of the task that published the artifact\n path (str): the relative path of the artifact\n\nReturns:\n str: the artifact url\n\nRaises:\n TaskClusterFailure: on failure.", "source": "juraj_google_style"} -{"code": "def roots_in_unit_interval(coeffs):\n r\n all_roots = polynomial.polyroots(coeffs)\n # Only keep roots inside or very near to the unit interval.\n all_roots = all_roots[\n (_UNIT_INTERVAL_WIGGLE_START < all_roots.real)\n & (all_roots.real < _UNIT_INTERVAL_WIGGLE_END)\n ]\n # Only keep roots with very small imaginary part. (Really only\n # keep the real parts.)\n real_inds = np.abs(all_roots.imag) < _IMAGINARY_WIGGLE\n return all_roots[real_inds].real", "docstring": "r\"\"\"Compute roots of a polynomial in the unit interval.\n\nArgs:\n coeffs (numpy.ndarray): A 1D array (size ``d + 1``) of coefficients in\n monomial / power basis.\n\nReturns:\n numpy.ndarray: ``N``-array of real values in :math:`\\left[0, 1\\right]`.", "source": "juraj_google_style"} -{"code": "def parse_conservations(variant):\n\n conservations = {}\n\n conservations['gerp'] = parse_conservation(\n variant,\n 'dbNSFP_GERP___RS'\n )\n conservations['phast'] = parse_conservation(\n variant,\n 'dbNSFP_phastCons100way_vertebrate'\n )\n conservations['phylop'] = parse_conservation(\n variant,\n 'dbNSFP_phyloP100way_vertebrate'\n )\n return conservations", "docstring": "Parse the conservation predictors\n\nArgs:\n variant(dict): A variant dictionary\n\nReturns:\n conservations(dict): A dictionary with the conservations", "source": "juraj_google_style"} -{"code": "def verified(context, collaborator, test, outpath=None):\n\n written_files = 0\n collaborator = collaborator or 'cust000'\n LOG.info('Exporting verified variants for cust {}'.format(collaborator))\n\n adapter = context.obj['adapter']\n verified_vars = adapter.verified(institute_id=collaborator)\n LOG.info('FOUND {} verified variants for institute {}'.format(len(verified_vars), collaborator))\n\n\n if not verified_vars:\n LOG.warning('There are no verified variants for institute {} in database!'.format(collaborator))\n return None\n\n document_lines = export_verified_variants(verified_vars)\n\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n document_name = '.'.join(['verified_variants', collaborator, today]) + '.xlsx'\n\n # If this was a test and lines are created return success\n if test and document_lines:\n written_files +=1\n LOG.info('Success. Verified variants file contains {} lines'.format(len(document_lines)))\n return written_files\n\n # create workbook and new sheet\n # set up outfolder\n if not outpath:\n outpath = str(os.getcwd())\n workbook = Workbook(os.path.join(outpath,document_name))\n Report_Sheet = workbook.add_worksheet()\n\n # Write the column header\n row = 0\n for col,field in enumerate(VERIFIED_VARIANTS_HEADER):\n Report_Sheet.write(row,col,field)\n\n # Write variant lines, after header (start at line 1)\n for row, line in enumerate(document_lines,1): # each line becomes a row in the document\n for col, field in enumerate(line): # each field in line becomes a cell\n Report_Sheet.write(row,col,field)\n workbook.close()\n\n if os.path.exists(os.path.join(outpath,document_name)):\n LOG.info('Success. Verified variants file of {} lines was written to disk'. format(len(document_lines)))\n written_files += 1\n\n return written_files", "docstring": "Export variants which have been verified for an institute\n and write them to an excel file.\n\nArgs:\n collaborator(str): institute id\n test(bool): True if the function is called for testing purposes\n outpath(str): path to output file\n\nReturns:\n written_files(int): number of written or simulated files", "source": "juraj_google_style"} -{"code": "def load_and_save_resfile(filename, outfile=None, outdir=None, mass=1.00):\n\n d = CellpyData()\n\n if not outdir:\n outdir = prms.Paths[\"cellpydatadir\"]\n\n if not outfile:\n outfile = os.path.basename(filename).split(\".\")[0] + \".h5\"\n outfile = os.path.join(outdir, outfile)\n\n print(\"filename:\", filename)\n print(\"outfile:\", outfile)\n print(\"outdir:\", outdir)\n print(\"mass:\", mass, \"mg\")\n\n d.from_raw(filename)\n d.set_mass(mass)\n d.make_step_table()\n d.make_summary()\n d.save(filename=outfile)\n d.to_csv(datadir=outdir, cycles=True, raw=True, summary=True)\n return outfile", "docstring": "Load a raw data file and save it as cellpy-file.\n\nArgs:\n mass (float): active material mass [mg].\n outdir (path): optional, path to directory for saving the hdf5-file.\n outfile (str): optional, name of hdf5-file.\n filename (str): name of the resfile.\n\nReturns:\n out_file_name (str): name of saved file.", "source": "juraj_google_style"} -{"code": "def is_valid(self, value):\n\n if not self.is_array:\n return self._valid(value)\n\n if isinstance(value, (list, set, tuple)):\n return all([self._valid(item) for item in value])\n\n return self._valid(value)", "docstring": "Validate value before actual instance setting based on type.\n\nArgs:\n value (object): The value object for validation.\n\nReturns:\n True if value validation succeeds else False.", "source": "juraj_google_style"} -{"code": "def GetFileEntryByPathSpec(self, path_spec):\n\n return fvde_file_entry.FVDEFileEntry(\n self._resolver_context, self, path_spec, is_root=True, is_virtual=True)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\n path_spec (PathSpec): path specification.\n\nReturns:\n FVDEFileEntry: file entry or None.", "source": "juraj_google_style"} -{"code": "def delete(self, key):\n\n key = self._service_key(key)\n self._service_ops['delete'](key)", "docstring": "Removes the object named by `key` in `service`.\n\nArgs:\n key: Key naming the object to remove.", "source": "juraj_google_style"} -{"code": "def load_obj(fn):\n\n position = [np.zeros(3, dtype=np.float32)]\n normal = [np.zeros(3, dtype=np.float32)]\n uv = [np.zeros(2, dtype=np.float32)]\n\n tuple2idx = OrderedDict()\n trinagle_indices = []\n\n input_file = open(fn) if isinstance(fn, str) else fn\n for line in input_file:\n line = line.strip()\n if not line or line[0] == '#':\n continue\n line = line.split(' ', 1)\n tag = line[0]\n if len(line) > 1:\n line = line[1]\n else:\n line = ''\n if tag == 'v':\n position.append(np.fromstring(line, sep=' '))\n elif tag == 'vt':\n uv.append(np.fromstring(line, sep=' '))\n elif tag == 'vn':\n normal.append(np.fromstring(line, sep=' '))\n elif tag == 'f':\n output_face_indices = []\n for chunk in line.split():\n # tuple order: pos_idx, uv_idx, normal_idx\n vt = _parse_vertex_tuple(chunk)\n if vt not in tuple2idx: # create a new output vertex?\n tuple2idx[vt] = len(tuple2idx)\n output_face_indices.append(tuple2idx[vt])\n # generate face triangles\n for i in range(1, len(output_face_indices)-1):\n for vi in [0, i, i+1]:\n trinagle_indices.append(output_face_indices[vi])\n\n outputs = {}\n outputs['face'] = np.int32(trinagle_indices)\n pos_idx, uv_idx, normal_idx = np.int32(list(tuple2idx)).T\n if np.any(pos_idx):\n outputs['position'] = _unify_rows(position)[pos_idx]\n if np.any(uv_idx):\n outputs['uv'] = _unify_rows(uv)[uv_idx]\n if np.any(normal_idx):\n outputs['normal'] = _unify_rows(normal)[normal_idx]\n return outputs", "docstring": "Load 3d mesh form .obj' file.\n\nArgs:\n fn: Input file name or file-like object.\n\nReturns:\n dictionary with the following keys (some of which may be missing):\n position: np.float32, (n, 3) array, vertex positions\n uv: np.float32, (n, 2) array, vertex uv coordinates\n normal: np.float32, (n, 3) array, vertex uv normals\n face: np.int32, (k*3,) traingular face indices", "source": "juraj_google_style"} -{"code": "def isworkday(self, date):\n\n date = parsefun(date)\n return self.weekdaymap[date.weekday()].isworkday", "docstring": "Check if a given date is a work date, ignoring holidays.\n\nArgs:\n date (date, datetime or str): Date to be checked.\n\nReturns:\n bool: True if the date is a work date, False otherwise.", "source": "juraj_google_style"} -{"code": "def __init__(self, envs, blocking):\n\n self._envs = envs\n self._blocking = blocking\n observ_space = self._envs[0].observation_space\n if not all(env.observation_space == observ_space for env in self._envs):\n raise ValueError('All environments must use the same observation space.')\n action_space = self._envs[0].action_space\n if not all(env.action_space == action_space for env in self._envs):\n raise ValueError('All environments must use the same observation space.')", "docstring": "Combine multiple environments to step them in batch.\n\n To step environments in parallel, environments must support a\n `blocking=False` argument to their step and reset functions that makes them\n return callables instead to receive the result at a later time.\n\nArgs:\n envs: List of environments.\n blocking: Step environments after another rather than in parallel.\n\nRaises:\n ValueError: Environments have different observation or action spaces.", "source": "juraj_google_style"} -{"code": "def _resolve_path(obj, path=None):\n\n if path:\n for attr_name in path.split('__'):\n obj = getattr(obj, attr_name)\n return obj", "docstring": "Resolve django-like path eg. object2__object3 for object\n\nArgs:\n obj: The object the view is displaying.\n path (str, optional): Description\n\nReturns:\n A oject at end of resolved path", "source": "juraj_google_style"} -{"code": "def getDiskSpace(self, file_path, upload_path = '', overwrite = False):\n\n\n self.checkAccount()\n\n url = nurls['checkUpload']\n\n file_size = os.stat(file_path).st_size\n file_name = os.path.basename(file_path)\n\n now = datetime.datetime.now().isoformat()\n\n data = {'userid': self.user_id,\n 'useridx': self.useridx,\n 'getlastmodified': now,\n 'dstresource': upload_path + file_name,\n 'overwrite': overwrite,\n 'uploadsize': file_size,\n }\n r = self.session.post(nurls['getDiskSpace'], data = data)\n\n return resultManager(r.text)", "docstring": "getDiskSpace\n\nArgs:\n file_path: Full path for a file you want to checkUpload\n upload_path: Ndrive path where you want to upload file\n ex) /Picture/\n\nReturns:\n True: Possible to upload a file with a given file_size\n False: Impossible to upload a file with a given file_size", "source": "juraj_google_style"} -{"code": "def backend_monitor(backend):\n\n if not isinstance(backend, IBMQBackend):\n raise QiskitError('Input variable is not of type IBMQBackend.')\n config = backend.configuration().to_dict()\n status = backend.status().to_dict()\n config_dict = {**status, **config}\n if not config['simulator']:\n props = backend.properties().to_dict()\n\n print(backend.name())\n print('='*len(backend.name()))\n print('Configuration')\n print('-'*13)\n offset = ' '\n\n upper_list = ['n_qubits', 'operational',\n 'status_msg', 'pending_jobs',\n 'basis_gates', 'local', 'simulator']\n\n lower_list = list(set(config_dict.keys()).difference(upper_list))\n # Remove gates because they are in a different tab\n lower_list.remove('gates')\n for item in upper_list+lower_list:\n print(offset+item+':', config_dict[item])\n\n # Stop here if simulator\n if config['simulator']:\n return\n\n print()\n qubit_header = 'Qubits [Name / Freq / T1 / T2 / U1 err / U2 err / U3 err / Readout err]'\n print(qubit_header)\n print('-'*len(qubit_header))\n\n sep = ' / '\n for qub in range(len(props['qubits'])):\n name = 'Q%s' % qub\n qubit_data = props['qubits'][qub]\n gate_data = props['gates'][3*qub:3*qub+3]\n t1_info = qubit_data[0]\n t2_info = qubit_data[1]\n freq_info = qubit_data[2]\n readout_info = qubit_data[3]\n\n freq = str(round(freq_info['value'], 5))+' '+freq_info['unit']\n T1 = str(round(t1_info['value'], # pylint: disable=invalid-name\n 5))+' ' + t1_info['unit']\n T2 = str(round(t2_info['value'], # pylint: disable=invalid-name\n 5))+' ' + t2_info['unit']\n # pylint: disable=invalid-name\n U1 = str(round(gate_data[0]['parameters'][0]['value'], 5))\n # pylint: disable=invalid-name\n U2 = str(round(gate_data[1]['parameters'][0]['value'], 5))\n # pylint: disable=invalid-name\n U3 = str(round(gate_data[2]['parameters'][0]['value'], 5))\n\n readout_error = str(round(readout_info['value'], 5))\n\n qstr = sep.join([name, freq, T1, T2, U1, U2, U3, readout_error])\n print(offset+qstr)\n\n print()\n multi_qubit_gates = props['gates'][3*config['n_qubits']:]\n multi_header = 'Multi-Qubit Gates [Name / Type / Gate Error]'\n print(multi_header)\n print('-'*len(multi_header))\n\n for gate in multi_qubit_gates:\n name = gate['name']\n ttype = gate['gate']\n error = str(round(gate['parameters'][0]['value'], 5))\n mstr = sep.join([name, ttype, error])\n print(offset+mstr)", "docstring": "Monitor a single IBMQ backend.\n\nArgs:\n backend (IBMQBackend): Backend to monitor.\n\nRaises:\n QiskitError: Input is not a IBMQ backend.", "source": "juraj_google_style"} -{"code": "def calculate_expiration(self, token):\n\n if not token:\n return None\n now = datetime.utcnow()\n time_to_live = self.config[\"expiration\"]\n if \"exp\" not in token:\n return now + timedelta(seconds=time_to_live)\n elif self.config[\"refresh\"]:\n exp = datetime.utcfromtimestamp(token[\"exp\"])\n # 0.5: reduce refresh frequent\n if exp - now < timedelta(seconds=0.5 * time_to_live):\n return now + timedelta(seconds=time_to_live)\n return None", "docstring": "Calculate token expiration\n\n return expiration if the token need to set expiration or refresh,\n otherwise return None.\n\nArgs:\n token (dict): a decoded token", "source": "juraj_google_style"} -{"code": "def itersplit(s, sep=None):\n\n if not s:\n yield s\n return\n exp = re.compile(r'\\s+' if sep is None else re.escape(sep))\n pos = 0\n while True:\n m = exp.search(s, pos)\n if not m:\n if pos < len(s) or sep is not None:\n yield s[pos:]\n break\n if pos < m.start() or sep is not None:\n yield s[pos:m.start()]\n pos = m.end()", "docstring": "Split a string by ``sep`` and yield chunks\n\nArgs:\n s (str-type): string to split\n sep (str-type): delimiter to split by\n\nYields:\n generator of strings: chunks of string s", "source": "juraj_google_style"} -{"code": "def InitFromAff4Object(self, aff4_obj):\n\n attr_blacklist = [] # We use this to show attributes only once.\n\n self.types = []\n for aff4_cls in aff4_obj.__class__.__mro__:\n if not hasattr(aff4_cls, \"SchemaCls\"):\n continue\n\n type_repr = ApiAff4ObjectType().InitFromAff4Object(\n aff4_obj, aff4_cls, attr_blacklist)\n\n if type_repr.attributes:\n self.types.append(type_repr)\n\n # Add all attribute names from this type representation to the\n # blacklist to not add them to the result again.\n attr_blacklist.extend([attr.name for attr in type_repr.attributes])\n\n return self", "docstring": "Initializes the current instance from an Aff4Object.\n\n Iterates the inheritance hierarchy of the given Aff4Object and adds a\n ApiAff4ObjectType for each class found in the hierarchy.\n\nArgs:\n aff4_obj: An Aff4Object as source for the initialization.\n\nReturns:\n A reference to the current instance.", "source": "juraj_google_style"} -{"code": "def _PromptUserForAPFSVolumeIdentifiers(\n self, volume_system, volume_identifiers):\n\n print_header = True\n while True:\n if print_header:\n self._PrintAPFSVolumeIdentifiersOverview(\n volume_system, volume_identifiers)\n\n print_header = False\n\n lines = self._textwrapper.wrap(self._USER_PROMPT_APFS)\n self._output_writer.Write('\\n'.join(lines))\n self._output_writer.Write('\\n\\nVolume identifiers: ')\n\n try:\n selected_volumes = self._ReadSelectedVolumes(\n volume_system, prefix='apfs')\n if (not selected_volumes or\n not set(selected_volumes).difference(volume_identifiers)):\n break\n except ValueError:\n pass\n\n self._output_writer.Write('\\n')\n\n lines = self._textwrapper.wrap(\n 'Unsupported volume identifier(s), please try again or abort with '\n 'Ctrl^C.')\n self._output_writer.Write('\\n'.join(lines))\n self._output_writer.Write('\\n\\n')\n\n return selected_volumes", "docstring": "Prompts the user to provide APFS volume identifiers.\n\nArgs:\n volume_system (dfvfs.APFSVolumeSystem): volume system.\n volume_identifiers (list[str]): volume identifiers including prefix.\n\nReturns:\n list[str]: selected volume identifiers including prefix or None.", "source": "juraj_google_style"} -{"code": "def gosper(seqs, f=None, start=None, key=lambda x: x):\n\n\n tab = []\n for c, value in enumerate(seqs[0], start=1):\n yield value\n try:\n e = tab.index(key(value))\n raise CycleDetected(\n period=c - ((((c >> e) - 1) | 1) << e))\n except ValueError:\n try:\n tab[(c ^ (c - 1)).bit_length() - 1] = key(value)\n except IndexError:\n tab.append(value)", "docstring": "Gosper's cycle detector\n\n See help(cycle_detector) for more context.\n\nArgs:\n sequence: A sequence to detect cyles in.\n\n f, start: Function and starting state for finite state machine\n\nYields:\n Values yielded by sequence_a if it terminates, undefined if a\n cycle is found.\n\nRaises:\n CycleFound if exception is found. Unlike Floyd and Brent's,\n Gosper's can only detect period of a cycle. It cannot\n compute the first position", "source": "juraj_google_style"} -{"code": "def _select_mgmt_networks(self, conf):\n\n\n nets = conf['nets']\n mgmts = sorted(\n [\n name for name, net in nets.iteritems()\n if net.get('management') is True\n ]\n )\n\n if len(mgmts) == 0:\n mgmt_name = sorted((nets.keys()))[0]\n LOGGER.debug(\n 'No management network configured, selecting network %s',\n mgmt_name\n )\n nets[mgmt_name]['management'] = True\n mgmts.append(mgmt_name)\n\n for mgmt_name in mgmts:\n if nets[mgmt_name].get('dns_domain_name', None) is None:\n nets[mgmt_name]['dns_domain_name'] = 'lago.local'\n\n return mgmts", "docstring": "Select management networks. If no management network is found, it will\n mark the first network found by sorted the network lists. Also adding\n default DNS domain, if none is set.\n\nArgs:\n conf(spec): spec", "source": "juraj_google_style"} -{"code": "def forum_post_undelete(self, post_id):\n\n return self._get('forum_posts/{0}/undelete.json'.format(post_id),\n method='POST', auth=True)", "docstring": "Undelete a specific forum post (Requires login)(Moderator+)(UNTESTED).\n\n Parameters:\n post_id (int): Forum post id.", "source": "juraj_google_style"} -{"code": "def __init__(self, transport, maxdata, remote_banner):\n\n try:\n self.systemtype, self.serial, self.banner = remote_banner.split(':', 2)\n except ValueError:\n raise usb_exceptions.AdbProtocolError('Received malformed banner %s',\n remote_banner)\n self.transport = transport\n self.maxdata = maxdata\n self._last_id_used = 0\n self._reader_lock = threading.Lock()\n self._open_lock = threading.Lock()\n # Maps local_id: AdbStreamTransport object for the relevant stream.\n self._stream_transport_map = {}\n self._stream_transport_map_lock = threading.RLock()", "docstring": "Create an ADB connection to a device.\n\nArgs:\n transport: AdbTransportAdapter to use for reading/writing AdbMessages\n maxdata: Max data size the remote endpoint will accept.\n remote_banner: Banner received from the remote endpoint.", "source": "juraj_google_style"} -{"code": "def joinCommissioned(self, strPSKd='threadjpaketest', waitTime=20):\n\n print '%s call joinCommissioned' % self.port\n self.__sendCommand('ifconfig up')\n cmd = 'joiner start %s %s' %(strPSKd, self.provisioningUrl)\n print cmd\n if self.__sendCommand(cmd)[0] == \"Done\":\n maxDuration = 150 # seconds\n self.joinCommissionedStatus = self.joinStatus['ongoing']\n\n if self.logThreadStatus == self.logStatus['stop']:\n self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(maxDuration,))\n\n t_end = time.time() + maxDuration\n while time.time() < t_end:\n if self.joinCommissionedStatus == self.joinStatus['succeed']:\n break\n elif self.joinCommissionedStatus == self.joinStatus['failed']:\n return False\n\n time.sleep(1)\n\n self.__sendCommand('thread start')\n time.sleep(30)\n return True\n else:\n return False", "docstring": "start joiner\n\nArgs:\n strPSKd: Joiner's PSKd\n\nReturns:\n True: successful to start joiner\n False: fail to start joiner", "source": "juraj_google_style"} -{"code": "def add_curves_from_lasio(self, l, remap=None, funcs=None):\n\n params = {}\n for field, (sect, code) in LAS_FIELDS['data'].items():\n params[field] = utils.lasio_get(l,\n sect,\n code,\n remap=remap,\n funcs=funcs)\n\n curves = {c.mnemonic: Curve.from_lasio_curve(c, **params)\n for c in l.curves}\n\n # This will clobber anything with the same key!\n self.data.update(curves)\n\n return None", "docstring": "Given a LAS file, add curves from it to the current well instance.\n Essentially just wraps ``add_curves_from_lasio()``.\n\nArgs:\n fname (str): The path of the LAS file to read curves from.\n remap (dict): Optional. A dict of 'old': 'new' LAS field names.\n funcs (dict): Optional. A dict of 'las field': function() for\n implementing a transform before loading. Can be a lambda.\n\nReturns:\n None. Works in place.", "source": "juraj_google_style"} -{"code": "def get_version_string(version):\n\n version_len = len(version)\n if version_len == 3:\n version_string = '%d.%d.%d' % version\n elif version_len == 4:\n version_string = '%d.%d.%d-%s' % version\n else:\n raise Exception(\n 'Version tuple is non-semver-compliant {} length!'.format(version_len)\n )\n return version_string", "docstring": "Translate a version tuple into a string.\n\n Specify the __version__ as a tuple for more precise comparisons, and\n translate it to __version_string__ for when that's needed.\n\n This function exists primarily for easier unit testing.\n\nArgs:\n version (Tuple[int, int, int, str]): three ints and an optional string.\n\nReturns:\n version_string (str): the tuple translated into a string per semver.org", "source": "juraj_google_style"} -{"code": "def list_insights_components(access_token, subscription_id, resource_group):\n\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourceGroups/', resource_group,\n '/providers/microsoft.insights/',\n '/components?api-version=', INSIGHTS_COMPONENTS_API])\n return do_get(endpoint, access_token)", "docstring": "List the Microsoft Insights components in a resource group.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n\nReturns:\n HTTP response. JSON body of components.", "source": "juraj_google_style"} -{"code": "def delete_group_maintainer(self, grp_name, user):\n\n self.service.delete_group_maintainer(\n grp_name, user, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "docstring": "Delete the given user to the named group.\n\n Both group and user must already exist for this to succeed.\n\nArgs:\n name (string): Name of group.\n user (string): User to add to group.\n\nRaises:\n requests.HTTPError on failure.", "source": "juraj_google_style"} -{"code": "def wrap_inference_results(inference_result_proto):\n\n inference_proto = inference_pb2.InferenceResult()\n if isinstance(inference_result_proto,\n classification_pb2.ClassificationResponse):\n inference_proto.classification_result.CopyFrom(\n inference_result_proto.result)\n elif isinstance(inference_result_proto, regression_pb2.RegressionResponse):\n inference_proto.regression_result.CopyFrom(inference_result_proto.result)\n return inference_proto", "docstring": "Returns packaged inference results from the provided proto.\n\nArgs:\n inference_result_proto: The classification or regression response proto.\n\nReturns:\n An InferenceResult proto with the result from the response.", "source": "juraj_google_style"} -{"code": "def __init__(self, message=None, orig_exc=None, context=None):\n\n self.orig_exc = orig_exc\n if message is not None:\n self.error_message = message\n elif orig_exc is not None:\n # derive the error message from the original exception\n self.error_message = \"%s\" % orig_exc\n\n self.context = context or StatikErrorContext()\n if not isinstance(self.context, StatikErrorContext):\n raise TypeError(\"Statik error context must be of type StatikErrorContext\")", "docstring": "Constructor.\n\nArgs:\n message: An optional message to override the predefined error message.\n orig_exc: The original exception from which this error was generated.\n context: An optional ErrorContext instance to provide additional information during\n error rendering.", "source": "juraj_google_style"} -{"code": "def build(cls, seqs: Iterable[int], uid: bool = False) -> 'SequenceSet':\n\n seqs_list = sorted(set(seqs))\n groups: List[Union[int, Tuple[int, int]]] = []\n group: Union[int, Tuple[int, int]] = seqs_list[0]\n for i in range(1, len(seqs_list)):\n group_i = seqs_list[i]\n if isinstance(group, int):\n if group_i == group + 1:\n group = (group, group_i)\n else:\n groups.append(group)\n group = group_i\n elif isinstance(group, tuple):\n if group_i == group[1] + 1:\n group = (group[0], group_i)\n else:\n groups.append(group)\n group = group_i\n groups.append(group)\n return SequenceSet(groups, uid)", "docstring": "Build a new sequence set that contains the given values using as\n few groups as possible.\n\nArgs:\n seqs: The sequence values to build.\n uid: True if the sequences refer to message UIDs.", "source": "juraj_google_style"} -{"code": "def dependency_of_fetches(fetches, op):\n\n try:\n from tensorflow.python.client.session import _FetchHandler as FetchHandler\n # use the graph of the op, so that this function can be called without being under a default graph\n handler = FetchHandler(op.graph, fetches, {})\n targets = tuple(handler.fetches() + handler.targets())\n except ImportError:\n if isinstance(fetches, list):\n targets = tuple(fetches)\n elif isinstance(fetches, dict):\n raise ValueError(\"Don't know how to parse dictionary to fetch list! \"\n \"This is a bug of tensorpack.\")\n else:\n targets = (fetches, )\n return dependency_of_targets(targets, op)", "docstring": "Check that op is in the subgraph induced by the dependencies of fetches.\n fetches may have more general structure.\n\nArgs:\n fetches: An argument to `sess.run`. Nested structure will affect performance.\n op (tf.Operation or tf.Tensor):\n\nReturns:\n bool: True if any of `fetches` depend on `op`.", "source": "juraj_google_style"} -{"code": "def get_user_roles(self, user):\n\n return self.service.get_user_roles(\n user, self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Get roles associated with the given user.\n\nArgs:\n user (string): User name.\n\nReturns:\n (list): List of roles that user has.\n\nRaises:\n requests.HTTPError on failure.", "source": "juraj_google_style"} -{"code": "def add_gene_panel(self, panel_obj):\n\n panel_name = panel_obj['panel_name']\n panel_version = panel_obj['version']\n display_name = panel_obj.get('display_name', panel_name)\n\n if self.gene_panel(panel_name, panel_version):\n raise IntegrityError(\"Panel {0} with version {1} already\"\n \" exist in database\".format(panel_name, panel_version))\n LOG.info(\"loading panel {0}, version {1} to database\".format(\n display_name, panel_version\n ))\n result = self.panel_collection.insert_one(panel_obj)\n LOG.debug(\"Panel saved\")\n return result.inserted_id", "docstring": "Add a gene panel to the database\n\nArgs:\n panel_obj(dict)", "source": "juraj_google_style"} -{"code": "def rating(self, value):\n\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n request_data = {'rating': value}\n return self.tc_requests.update(\n self.api_type, self.api_sub_type, self.unique_id, request_data, owner=self.owner\n )", "docstring": "Updates the Indicators rating\n\nArgs:\n value:", "source": "juraj_google_style"} -{"code": "def write_config_file(self, parsed_namespace, output_file_paths, exit_after=False):\n\n for output_file_path in output_file_paths:\n # validate the output file path\n try:\n with open(output_file_path, \"w\") as output_file:\n pass\n except IOError as e:\n raise ValueError(\"Couldn't open %s for writing: %s\" % (\n output_file_path, e))\n if output_file_paths:\n # generate the config file contents\n config_items = self.get_items_for_config_file_output(\n self._source_to_settings, parsed_namespace)\n file_contents = self._config_file_parser.serialize(config_items)\n for output_file_path in output_file_paths:\n with open(output_file_path, \"w\") as output_file:\n output_file.write(file_contents)\n message = \"Wrote config file to \" + \", \".join(output_file_paths)\n if exit_after:\n self.exit(0, message)\n else:\n print(message)", "docstring": "Write the given settings to output files.\n\nArgs:\n parsed_namespace: namespace object created within parse_known_args()\n output_file_paths: any number of file paths to write the config to\n exit_after: whether to exit the program after writing the config files", "source": "juraj_google_style"} -{"code": "def exceptions(error_is_fatal=True, error_messages=None):\n\n\n def exception_decorator(func):\n nonlocal error_messages\n\n @functools.wraps(func)\n def exc_wrapper(*args, **kwargs):\n nonlocal error_messages\n try:\n result = func(*args, **kwargs)\n except sa.exc.SQLAlchemyError as err:\n result = None\n details = None\n err_type = err.__class__\n if error_messages and err_type in error_messages:\n details = error_messages[err_type]\n if details:\n LOG.error(details)\n LOG.error(\"For developers: (%s) %s\", err.__class__, str(err))\n if error_is_fatal:\n sys.exit(\"Abort, SQL operation failed.\")\n if not ui.ask(\n \"I can continue at your own risk, do you want that?\"):\n raise err\n return result\n\n return exc_wrapper\n\n return exception_decorator", "docstring": "Handle SQLAlchemy exceptions in a sane way.\n\nArgs:\n func: An arbitrary function to wrap.\n error_is_fatal: Should we exit the program on exception?\n reraise: Should we reraise the exception, after logging? Only makes sense\n if error_is_fatal is False.\n error_messages: A dictionary that assigns an exception class to a\n customized error message.", "source": "juraj_google_style"} -{"code": "def new(self, ext_id, ext_des, ext_src):\n # type: (bytes, bytes, bytes) -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('ER record already initialized!')\n\n self.ext_id = ext_id\n self.ext_des = ext_des\n self.ext_src = ext_src\n self.ext_ver = 1\n\n self._initialized = True", "docstring": "Create a new Rock Ridge Extensions Reference record.\n\n Parameters:\n ext_id - The extension identifier to use.\n ext_des - The extension descriptor to use.\n ext_src - The extension specification source to use.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def find(self, username):\n\n filter = ['(uid={})'.format(username)]\n results = self.client.search(filter)\n\n if len(results) < 1:\n raise ldap_tools.exceptions.NoUserFound(\n 'User ({}) not found'.format(username))\n return # pragma: no cover\n elif len(results) > 1:\n raise ldap_tools.exceptions.TooManyResults(\n 'Multiple users found. Please narrow your search.')\n return # pragma: no cover\n else:\n return results", "docstring": "Find user with given username.\n\nArgs:\n username Username of the user to search for\n\nRaises:\n ldap_tools.exceptions.NoUserFound: No users returned by LDAP\n ldap_tools.exceptions.TooManyResults:\n Multiple users returned by LDAP", "source": "juraj_google_style"} -{"code": "def list_street_poi_parking(self, **kwargs):\n\n # Endpoint parameters\n url_args = {\n 'language': util.language_code(kwargs.get('lang')),\n 'address': kwargs.get('address', '')\n }\n\n # Request\n result = self.make_request('list_street_poi_parking', url_args)\n\n if not util.check_result(result):\n return False, result.get('message', 'UNKNOWN ERROR')\n\n # Parse\n values = util.response_list(result, 'Data')\n return True, [emtype.ParkingPoi(**a) for a in values]", "docstring": "Obtain a list of addresses and POIs.\n\n This endpoint uses an address to perform the search\n\nArgs:\n lang (str): Language code (*es* or *en*).\n address (str): Address in which to perform the search.\n\nReturns:\n Status boolean and parsed response (list[ParkingPoi]), or message\n string in case of error.", "source": "juraj_google_style"} -{"code": "def account_id(self, value):\n\n if type(value) is not str:\n raise TypeError(\"commit value must be string\")\n self._account_id = value", "docstring": "Sets the current account id\n\nArgs:\n value: current account id (string)\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def firmware_outdated(self):\n\n datefmt = ' %b %d %Y %H:%M:%S'\n\n compat_date = self.compatible_firmware_version.split('compiled')[1]\n compat_date = datetime.datetime.strptime(compat_date, datefmt)\n\n fw_date = self.firmware_version.split('compiled')[1]\n fw_date = datetime.datetime.strptime(fw_date, datefmt)\n return (compat_date > fw_date)", "docstring": "Returns whether the J-Link's firmware version is older than the one\n that the DLL is compatible with.\n\nNote:\n This is not the same as calling ``not jlink.firmware_newer()``.\n\nArgs:\n self (JLink): the ``JLink`` instance\n\nReturns:\n ``True`` if the J-Link's firmware is older than the one supported by\n the DLL, otherwise ``False``.", "source": "juraj_google_style"} -{"code": "def write_block(self, block_, body):\n\n self.write('for ; πF.State() >= 0; πF.PopCheckpoint() {')\n with self.indent_block():\n self.write('switch πF.State() {')\n self.write('case 0:')\n for checkpoint in block_.checkpoints:\n self.write_tmpl('case $state: goto Label$state', state=checkpoint)\n self.write('default: panic(\"unexpected function state\")')\n self.write('}')\n # Assume that body is aligned with goto labels.\n with self.indent_block(-1):\n self.write(body)\n self.write('}')", "docstring": "Outputs the boilerplate necessary for code blocks like functions.\n\nArgs:\n block_: The Block object representing the code block.\n body: String containing Go code making up the body of the code block.", "source": "juraj_google_style"} -{"code": "def select_action(self, next_action_arr, next_q_arr):\n\n key_arr = self.select_action_key(next_action_arr, next_q_arr)\n return next_action_arr[key_arr], next_q_arr[key_arr]", "docstring": "Select action by Q(state, action).\n\nArgs:\n next_action_arr: `np.ndarray` of actions.\n next_q_arr: `np.ndarray` of Q-Values.\n\n Retruns:\n Tuple(`np.ndarray` of action., Q-Value)", "source": "juraj_google_style"} -{"code": "def NewFromJSON(data):\n\n if data.get('shakes', None):\n shakes = [Shake.NewFromJSON(shk) for shk in data.get('shakes')]\n else:\n shakes = None\n\n return User(\n id=data.get('id', None),\n name=data.get('name', None),\n profile_image_url=data.get('profile_image_url', None),\n about=data.get('about', None),\n website=data.get('website', None),\n shakes=shakes)", "docstring": "Create a new User instance from a JSON dict.\n\nArgs:\n data (dict): JSON dictionary representing a user.\n\nReturns:\n A User instance.", "source": "juraj_google_style"} -{"code": "def ClientCertFromCSR(cls, csr):\n\n builder = x509.CertificateBuilder()\n # Use the client CN for a cert serial_id. This will ensure we do\n # not have clashing cert id.\n common_name = csr.GetCN()\n serial = int(common_name.split(\".\")[1], 16)\n builder = builder.serial_number(serial)\n builder = builder.subject_name(\n x509.Name(\n [x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))]))\n\n now = rdfvalue.RDFDatetime.Now()\n now_plus_year = now + rdfvalue.Duration(\"52w\")\n builder = builder.not_valid_after(now_plus_year.AsDatetime())\n now_minus_ten = now - rdfvalue.Duration(\"10s\")\n builder = builder.not_valid_before(now_minus_ten.AsDatetime())\n # TODO(user): dependency loop with\n # grr/core/grr_response_core/config/client.py.\n # pylint: disable=protected-access\n ca_cert = config_lib._CONFIG[\"CA.certificate\"]\n # pylint: enable=protected-access\n builder = builder.issuer_name(ca_cert.GetIssuer())\n builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey())\n\n # TODO(user): dependency loop with\n # grr/core/grr_response_core/config/client.py.\n # pylint: disable=protected-access\n ca_key = config_lib._CONFIG[\"PrivateKeys.ca_key\"]\n # pylint: enable=protected-access\n\n return RDFX509Cert(\n builder.sign(\n private_key=ca_key.GetRawPrivateKey(),\n algorithm=hashes.SHA256(),\n backend=openssl.backend))", "docstring": "Creates a new cert for the given common name.\n\nArgs:\n csr: A CertificateSigningRequest.\n\nReturns:\n The signed cert.", "source": "juraj_google_style"} -{"code": "def connect_socket(root_dir):\n\n # Get config directory where the daemon socket is located\n config_dir = os.path.join(root_dir, '.config/pueue')\n\n # Create Socket and exit with 1, if socket can't be created\n try:\n client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n socket_path = os.path.join(config_dir, 'pueue.sock')\n if os.path.exists(socket_path):\n client.connect(socket_path)\n else:\n print(\"Socket doesn't exist\")\n raise Exception\n except:\n print(\"Error connecting to socket. Make sure the daemon is running\")\n sys.exit(1)\n return client", "docstring": "Connect to a daemon's socket.\n\nArgs:\n root_dir (str): The directory that used as root by the daemon.\n\nReturns:\n socket.socket: A socket that is connected to the daemon.", "source": "juraj_google_style"} -{"code": "def load_filename(self, filename, index=None):\n\n filename = str(filename) # QString protection\n if index is None:\n index = self._get_tab_index()\n page = self.pages[index]\n\n # Maybe this is set on purpose before loading attempt to leave new load_dir set (?)\n self.load_dir, _ = os.path.split(filename)\n\n clss = page.clss_load\n if len(clss) == 1:\n # If there is only one class to handle the file, will load it in a way that eventual\n # load errors will raise\n f = clss[0]()\n f.load(filename)\n else:\n # At the moment, the multi-class alternative will not display particular error information\n # if the file does not load\n f = f311.load_with_classes(filename, page.clss_load)\n if f is None:\n raise RuntimeError(\"Could not load '{0!s}'\".format(filename))\n\n self.load(f, index)", "docstring": "Loads file given filename\n\nArgs:\n filename:\n index: tab index to load file into. If not passed, loads into current tab", "source": "juraj_google_style"} -{"code": "def keep_artifacts(self, **kwargs):\n\n path = '%s/%s/artifacts/keep' % (self.manager.path, self.get_id())\n self.manager.gitlab.http_post(path)", "docstring": "Prevent artifacts from being deleted when expiration is set.\n\nArgs:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabCreateError: If the request could not be performed", "source": "juraj_google_style"} -{"code": "def __init__(self, reactants, products):\n\n self._input_reactants = reactants\n self._input_products = products\n self._all_comp = reactants + products\n\n els = set()\n for c in self.all_comp:\n els.update(c.elements)\n els = sorted(els)\n\n # Solving:\n # | 0 R |\n # [ x y ] | | = [ 1 .. 1 0 .. 0]\n # | C P |\n # x, y are the coefficients of the reactants and products\n # R, P the matrices of the element compositions of the reactants\n # and products\n # C is a constraint matrix that chooses which compositions to normalize to\n\n # try just normalizing to just the first product\n rp_mat = np.array([[c[el] for el in els] for c in self._all_comp])\n f_mat = np.concatenate([np.zeros((len(rp_mat), 1)), rp_mat], axis=1)\n f_mat[len(reactants), 0] = 1 # set normalization by the first product\n b = np.zeros(len(els) + 1)\n b[0] = 1\n coeffs, res, _, s = np.linalg.lstsq(f_mat.T, b, rcond=None)\n\n # for whatever reason the rank returned by lstsq isn't always correct\n # seems to be a problem with low-rank M but inconsistent system\n # M x = b.\n # the singular values seem ok, so checking based on those\n if sum(np.abs(s) > 1e-12) == len(f_mat):\n if res.size > 0 and res[0] > self.TOLERANCE ** 2:\n raise ReactionError(\"Reaction cannot be balanced.\")\n else:\n ok = True\n else:\n # underdetermined, add product constraints to make non-singular\n ok = False\n n_constr = len(rp_mat) - np.linalg.matrix_rank(rp_mat)\n f_mat = np.concatenate([np.zeros((len(rp_mat), n_constr)),\n rp_mat], axis=1)\n b = np.zeros(f_mat.shape[1])\n b[:n_constr] = 1\n\n # try setting C to all n_constr combinations of products\n for inds in itertools.combinations(range(len(reactants),\n len(f_mat)),\n n_constr):\n f_mat[:, :n_constr] = 0\n for j, i in enumerate(inds):\n f_mat[i, j] = 1\n # try a solution\n coeffs, res, _, s = np.linalg.lstsq(f_mat.T, b, rcond=None)\n if sum(np.abs(s) > 1e-12) == len(self._all_comp) and \\\n (res.size == 0 or res[0] < self.TOLERANCE ** 2):\n ok = True\n break\n\n if not ok:\n r_mat = np.array([[c[el] for el in els] for c in reactants])\n reactants_underdetermined = (\n np.linalg.lstsq(r_mat.T, np.zeros(len(els)), rcond=None)[2]\n != len(reactants))\n if reactants_underdetermined:\n raise ReactionError(\"Reaction cannot be balanced. \"\n \"Reactants are underdetermined.\")\n raise ReactionError(\"Reaction cannot be balanced. \"\n \"Unknown error, please report.\")\n\n self._els = els\n self._coeffs = coeffs", "docstring": "Reactants and products to be specified as list of\n pymatgen.core.structure.Composition. e.g., [comp1, comp2]\n\nArgs:\n reactants ([Composition]): List of reactants.\n products ([Composition]): List of products.", "source": "juraj_google_style"} -{"code": "def listen(self, orb, listeners):\n\n\n if isinstance(listeners, Listener):\n listeners = [listeners]\n\n orb.event = None\n results = []\n for listener in listeners:\n if listener.check(orb):\n results.append(self._bisect(listener.prev, orb, listener))\n\n # Saving of the current value for the next iteration\n listener.prev = orb\n return sorted(results, key=lambda x: x.date)", "docstring": "This method allows to loop over the listeners and trigger the :py:meth:`_bisect` method\n in case a watched parameter has its state changed.\n\nArgs:\n orb (Orbit): The current state of the orbit\n listeners (iterable): List of Listener objects\n\nReturns:\n list of Orbit: Orbits corresponding to events, sorted by dates", "source": "juraj_google_style"} -{"code": "def execute(self, action):\n\n\n self.logger.debug('Executing action: %s', action)\n\n reward = self.wrapped.execute(action)\n if reward:\n self.total_reward += reward\n self.steps += 1\n\n self.logger.debug('Reward received on this step: %.5f',\n reward or 0)\n self.logger.debug('Average reward per step: %.5f',\n self.total_reward / self.steps)\n\n return reward", "docstring": "Execute the indicated action within the environment and\n return the resulting immediate reward dictated by the reward\n program.\n\n Usage:\n immediate_reward = scenario.execute(selected_action)\n\nArgs:\n action: The action to be executed within the current situation.\n\nReturns:\n A float, the reward received for the action that was executed,\n or None if no reward is offered.", "source": "juraj_google_style"} -{"code": "def copy(source, destination):\n\n if os.path.isdir(source):\n return __copytree(source, destination)\n else:\n return __copyfile2(source, destination)", "docstring": "Copy file or directory.\n\nArgs:\n source (str): Source file or directory\n destination (str): Destination file or directory (where to copy).\n\nReturns:\n bool: True if the operation is successful, False otherwise.", "source": "juraj_google_style"} -{"code": "def place_line(self,\n device: 'cirq.google.XmonDevice',\n length: int) -> GridQubitLineTuple:\n\n seqs = AnnealSequenceSearch(device, self.seed).search(self.trace_func)\n return GridQubitLineTuple.best_of(seqs, length)", "docstring": "Runs line sequence search.\n\nArgs:\n device: Chip description.\n length: Required line length.\n\nReturns:\n List of linear sequences on the chip found by simulated annealing\n method.", "source": "juraj_google_style"} -{"code": "def require_attribute(self, attribute: str, typ: Type = _Any) -> None:\n\n attr_nodes = [\n value_node for key_node, value_node in self.yaml_node.value\n if key_node.value == attribute\n ]\n if len(attr_nodes) == 0:\n raise RecognitionError(\n ('{}{}Missing required attribute {}').format(\n self.yaml_node.start_mark, os.linesep, attribute))\n attr_node = attr_nodes[0]\n\n if typ != _Any:\n recognized_types, message = self.__recognizer.recognize(\n attr_node, cast(Type, typ))\n if len(recognized_types) == 0:\n raise RecognitionError(message)", "docstring": "Require an attribute on the node to exist.\n\n If `typ` is given, the attribute must have this type.\n\nArgs:\n attribute: The name of the attribute / mapping key.\n typ: The type the attribute must have.", "source": "juraj_google_style"} -{"code": "def __init__(self):\n\n\n self._local_id = str(uuid4())\n self._creation_date = None\n self._last_updated_date = None\n self._id = None\n self._owner = None\n self._parent_id = None\n self._parent_type = None\n self._parent = None\n self._is_dirty = False\n\n self._attribute_errors = dict()\n self._attributes = dict()\n\n self.expose_attribute(local_name='id', remote_name=BambouConfig.get_id_remote_name(), attribute_type=BambouConfig.get_id_type(), is_identifier=True)\n self.expose_attribute(local_name='parent_id', remote_name='parentID', attribute_type=str)\n self.expose_attribute(local_name='parent_type', remote_name='parentType', attribute_type=str)\n self.expose_attribute(local_name='creation_date', remote_name='creationDate', attribute_type=float, is_editable=False)\n self.expose_attribute(local_name='last_updated_date', remote_name='lastUpdatedDate', attribute_type=float, is_editable=False)\n self.expose_attribute(local_name='owner', attribute_type=str, is_readonly=True)\n\n self._fetchers_registry = dict()", "docstring": "Initializes the object with general information\n\nArgs:\n creation_date: float representing time since epoch\n id: identifier of the object\n local_id: internal identifier of the object\n owner: string representing the owner\n parent_id: identifier of the object's parent\n parent_type: type of the parent", "source": "juraj_google_style"} -{"code": "def _compute_nfps_uniform(cum_counts, sizes):\n\n nfps = np.zeros((len(sizes), len(sizes)))\n # All u an l are inclusive bounds for intervals.\n # Compute p = 1, the NFPs\n for l in range(len(sizes)):\n for u in range(l, len(sizes)):\n nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes)\n return nfps", "docstring": "Computes the matrix of expected false positives for all possible\n sub-intervals of the complete domain of set sizes, assuming uniform\n distribution of set_sizes within each sub-intervals.\n\nArgs:\n cum_counts: the complete cummulative distribution of set sizes.\n sizes: the complete domain of set sizes.\n\n Return (np.array): the 2-D array of expected number of false positives\n for every pair of [l, u] interval, where l is axis-0 and u is\n axis-1.", "source": "juraj_google_style"} -{"code": "def copy_sizes(self, othervd):\n # type: (PrimaryOrSupplementaryVD) -> None\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not yet initialized')\n\n self.space_size = othervd.space_size\n self.path_tbl_size = othervd.path_tbl_size\n self.path_table_num_extents = othervd.path_table_num_extents", "docstring": "Copy the path_tbl_size, path_table_num_extents, and space_size from\n another volume descriptor.\n\n Parameters:\n othervd - The other volume descriptor to copy from.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def adb_cmd(self, command, **kwargs):\n\n kwargs['timeout'] = kwargs.get('timeout', self._adb_shell_timeout)\n if isinstance(command, list) or isinstance(command, tuple):\n return self.adb_device.run_cmd(*list(command), **kwargs)\n return self.adb_device.run_cmd(command, **kwargs)", "docstring": "Run adb command, for example: adb(['pull', '/data/local/tmp/a.png'])\n\nArgs:\n command: string or list of string\n\nReturns:\n command output", "source": "juraj_google_style"} -{"code": "def create(self, *args, **kwargs):\n\n data = self.get_data('floating_ips/',\n type=POST,\n params={'droplet_id': self.droplet_id})\n\n if data:\n self.ip = data['floating_ip']['ip']\n self.region = data['floating_ip']['region']\n\n return self", "docstring": "Creates a FloatingIP and assigns it to a Droplet.\n\n Note: Every argument and parameter given to this method will be\n assigned to the object.\n\nArgs:\n droplet_id: int - droplet id", "source": "juraj_google_style"} -{"code": "def run_board(args):\n\n init_config(args)\n\n # backend service, should import after django settings initialized\n from backend.collector import CollectorService\n\n service = CollectorService(\n args.logdir,\n args.reload_interval,\n standalone=False,\n log_level=args.log_level)\n service.run()\n\n # frontend service\n logger.info(\"Try to start automlboard on port %s\\n\" % args.port)\n command = [\n os.path.join(root_path, \"manage.py\"), \"runserver\",\n \"0.0.0.0:%s\" % args.port, \"--noreload\"\n ]\n execute_from_command_line(command)", "docstring": "Run main entry for AutoMLBoard.\n\nArgs:\n args: args parsed from command line", "source": "juraj_google_style"} -{"code": "def mark_all_as_done(self, **kwargs):\n\n result = self.gitlab.http_post('/todos/mark_as_done', **kwargs)\n try:\n return int(result)\n except ValueError:\n return 0", "docstring": "Mark all the todos as done.\n\nArgs:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabTodoError: If the server failed to perform the request\n\nReturns:\n int: The number of todos maked done", "source": "juraj_google_style"} -{"code": "def forward(self, input):\n\n sl,bs = input.size()\n if bs!=self.bs:\n self.bs=bs\n self.reset()\n with set_grad_enabled(self.training):\n emb = self.encoder_with_dropout(input, dropout=self.dropoute if self.training else 0)\n emb = self.dropouti(emb)\n raw_output = emb\n new_hidden,raw_outputs,outputs = [],[],[]\n for l, (rnn,drop) in enumerate(zip(self.rnns, self.dropouths)):\n current_input = raw_output\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n raw_output, new_h = rnn(raw_output, self.hidden[l])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n if l != self.n_layers - 1: raw_output = drop(raw_output)\n outputs.append(raw_output)\n\n self.hidden = repackage_var(new_hidden)\n return raw_outputs, outputs", "docstring": "Invoked during the forward propagation of the RNN_Encoder module.\n\nArgs:\n input (Tensor): input of shape (sentence length x batch_size)\n\nReturns:\n raw_outputs (tuple(list (Tensor), list(Tensor)): list of tensors evaluated from each RNN layer without using\n dropouth, list of tensors evaluated from each RNN layer using dropouth,", "source": "juraj_google_style"} -{"code": "def write_file(self, filepath, filename=None, directory=None):\n\n arcname = None\n if filename or directory:\n directory = directory.rstrip(\"/\") + \"/\" if directory else \"\"\n filename = filename or os.path.basename(filepath)\n arcname = \"{}{}\".format(directory, filename)\n self._copy_to_zipfile(filepath, arcname=arcname)\n return arcname or filepath", "docstring": "write_file: Write local file to zip\n\nArgs:\n filepath: (str) location to local file\n directory: (str) directory in zipfile to write file to (optional)\n Returns: path to file in zip\n\n Note: filepath must be a relative path", "source": "juraj_google_style"} -{"code": "def encode_offset_commit_request(cls, group, payloads):\n\n return kafka.protocol.commit.OffsetCommitRequest[0](\n consumer_group=group,\n topics=[(\n topic,\n [(\n partition,\n payload.offset,\n payload.metadata)\n for partition, payload in six.iteritems(topic_payloads)])\n for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])", "docstring": "Encode an OffsetCommitRequest struct\n\nArgs:\n group: string, the consumer group you are committing offsets for\n payloads: list of OffsetCommitRequestPayload", "source": "juraj_google_style"} -{"code": "def get_package_from_string(txt, paths=None):\n\n o = VersionedObject(txt)\n return get_package(o.name, o.version, paths=paths)", "docstring": "Get a package given a string.\n\nArgs:\n txt (str): String such as 'foo', 'bah-1.3'.\n paths (list of str, optional): paths to search for package, defaults\n to `config.packages_path`.\n\nReturns:\n `Package` instance, or None if no package was found.", "source": "juraj_google_style"} -{"code": "def get_sendback(self, uuid, key):\n\n def send_back_callback(data):\n self.sendResponse(\n serializers.serialize(data),\n uuid,\n key\n )\n\n return send_back_callback", "docstring": "Return function for sending progress messages back to original caller.\n\nArgs:\n uuid (str): UUID of the received message.\n key (str): Routing key.\n\nReturns:\n fn reference: Reference to function which takes only one data \\\n argument.", "source": "juraj_google_style"} -{"code": "def from_string(cls, text, comments=\"#\", error=\"warn\"):\n\n\n cache = []\n for line in text.splitlines():\n\n # If the line is empty or begins with a comment mark, we skip it.\n if not line.strip() or line.startswith(comments):\n continue\n\n # The startswith conditions include a blank space in order to not take into account\n # lines containing only a COSPAR ID, which happens when an object is detected but the\n # JSpOc doesn't know what is the source yet.\n if line.startswith('1 '):\n cache.append(line)\n elif line.startswith('2 '):\n cache.append(line)\n try:\n yield cls(\"\\n\".join(cache))\n except ValueError as e:\n if error in ('raise', 'warn'):\n if error == \"raise\":\n raise\n else:\n log.warning(str(e))\n\n cache = []\n else:\n # In the 3LE format, the first line (numbered 0, or unnumbered) contains the name\n # of the satellite\n # In the TLE format, this line doesn't exists.\n cache = [line]", "docstring": "Generator of TLEs from a string\n\nArgs:\n text (str): A text containing many TLEs\n comments (str): If a line starts with this character, it is ignored\n error (str): How to handle errors while parsing the text. Could be\n 'raise', 'warn' or 'ignore'.\n\nYields:\n Tle:", "source": "juraj_google_style"} -{"code": "def validate(cls, mapper_spec):\n\n if mapper_spec.input_reader_class() != cls:\n raise BadReaderParamsError(\"Input reader class mismatch\")\n params = _get_params(mapper_spec)\n if cls.ENTITY_KIND_PARAM not in params:\n raise BadReaderParamsError(\"Missing mapper parameter 'entity_kind'\")\n if cls.BATCH_SIZE_PARAM in params:\n try:\n batch_size = int(params[cls.BATCH_SIZE_PARAM])\n if batch_size < 1:\n raise BadReaderParamsError(\"Bad batch size: %s\" % batch_size)\n except ValueError, e:\n raise BadReaderParamsError(\"Bad batch size: %s\" % e)\n if cls.NAMESPACE_PARAM in params:\n if not isinstance(params[cls.NAMESPACE_PARAM],\n (str, unicode, type(None))):\n raise BadReaderParamsError(\n \"Expected a single namespace string\")\n if cls.NAMESPACES_PARAM in params:\n raise BadReaderParamsError(\"Multiple namespaces are no longer supported\")\n if cls.FILTERS_PARAM in params:\n filters = params[cls.FILTERS_PARAM]\n if not isinstance(filters, list):\n raise BadReaderParamsError(\"Expected list for filters parameter\")\n for f in filters:\n if not isinstance(f, (tuple, list)):\n raise BadReaderParamsError(\"Filter should be a tuple or list: %s\", f)\n if len(f) != 3:\n raise BadReaderParamsError(\"Filter should be a 3-tuple: %s\", f)\n if not isinstance(f[0], basestring):\n raise BadReaderParamsError(\"First element should be string: %s\", f)\n if f[1] != \"=\":\n raise BadReaderParamsError(\n \"Only equality filters are supported: %s\", f)", "docstring": "Validates mapper spec and all mapper parameters.\n\nArgs:\n mapper_spec: The MapperSpec for this InputReader.\n\nRaises:\n BadReaderParamsError: required parameters are missing or invalid.", "source": "juraj_google_style"} -{"code": "def stream_matching(self, address, name):\n\n\n matching = [x for x in self.entries if x.valid and x.target.matches(address, name)]\n\n rpc_list = []\n for var in matching:\n rpc_list.extend(var.generate_rpcs(address))\n\n return rpc_list", "docstring": "Return the RPCs needed to stream matching config variables to the given tile.\n\n This function will return a list of tuples suitable for passing to\n EmulatedDevice.deferred_rpc.\n\nArgs:\n address (int): The address of the tile that we wish to stream to\n name (str or bytes): The 6 character name of the target tile.\n\nReturns:\n list of tuple: The list of RPCs to send to stream these variables to a tile.", "source": "juraj_google_style"} -{"code": "def __init__(self, key_wrapping_data=None):\n\n super(Key, self).__init__()\n\n self.cryptographic_algorithm = None\n self.cryptographic_length = None\n self.key_format_type = None\n self.key_wrapping_data = key_wrapping_data\n\n # All remaining attributes are not considered part of the public API\n # and are subject to change.\n self._cryptographic_parameters = list()\n\n # The following attributes are placeholders for attributes that are\n # unsupported by kmip.core\n self._usage_limits = None", "docstring": "Create a Key object.\n\nArgs:\n key_wrapping_data(dict): A dictionary containing key wrapping data\n settings, describing how the key value has been wrapped.\n Optional, defaults to None.", "source": "juraj_google_style"} -{"code": "def Lookup(self, name):\n\n if name == '@':\n return self.stack[-1].context\n\n parts = name.split('.')\n value = self._LookUpStack(parts[0])\n\n # Now do simple lookups of the rest of the parts\n for part in parts[1:]:\n try:\n value = value[part]\n except (KeyError, TypeError): # TypeError for non-dictionaries\n return self._Undefined(part)\n\n return value", "docstring": "Get the value associated with a name in the current context.\n\n The current context could be an dictionary in a list, or a dictionary\n outside a list.\n\nArgs:\n name: name to lookup, e.g. 'foo' or 'foo.bar.baz'\n\nReturns:\n The value, or self.undefined_str\n\nRaises:\n UndefinedVariable if self.undefined_str is not set", "source": "juraj_google_style"} -{"code": "def bruteVersionStr(self, valu):\n\n try:\n valu, info = self.core.model.type('it:semver').norm(valu)\n subs = info.get('subs')\n return valu, subs\n except s_exc.BadTypeValu:\n # Try doing version part extraction by noming through the string\n subs = s_version.parseVersionParts(valu)\n if subs is None:\n raise s_exc.BadTypeValu(valu=valu, name='bruteVersionStr',\n mesg='Unable to brute force version parts out of the string')\n if subs:\n valu = s_version.packVersion(subs.get('major'),\n subs.get('minor', 0),\n subs.get('patch', 0))\n return valu, subs", "docstring": "Brute force the version out of a string.\n\nArgs:\n valu (str): String to attempt to get version information for.\n\nNote:\n This first attempts to parse strings using the it:semver normalization\n before attempting to extract version parts out of the string.\n\nReturns:\n int, dict: The system normalized version integer and a subs dictionary.", "source": "juraj_google_style"} -{"code": "def upload_dict(s3_conn, s3_prefix, data_to_sync):\n\n bucket_name, prefix = split_s3_path(s3_prefix)\n bucket = s3_conn.get_bucket(bucket_name)\n\n for key, value in data_to_sync.items():\n full_name = '{}/{}.json'.format(prefix, key)\n s3_key = boto.s3.key.Key(\n bucket=bucket,\n name=full_name\n )\n logging.info('uploading key %s', full_name)\n s3_key.set_contents_from_string(json.dumps(value))", "docstring": "Syncs a dictionary to an S3 bucket, serializing each value in the\n dictionary as a JSON file with the key as its name.\n\nArgs:\n s3_conn: (boto.s3.connection) an s3 connection\n s3_prefix: (str) the destination prefix\n data_to_sync: (dict)", "source": "juraj_google_style"} -{"code": "def jobs_get(self, job_id, project_id=None):\n\n if project_id is None:\n project_id = self._project_id\n url = Api._ENDPOINT + (Api._JOBS_PATH % (project_id, job_id))\n return datalab.utils.Http.request(url, credentials=self._credentials)", "docstring": "Issues a request to retrieve information about a job.\n\nArgs:\n job_id: the id of the job\n project_id: the project id to use to fetch the results; use None for the default project.\n\nReturns:\n A parsed result object.\n\nRaises:\n Exception if there is an error performing the operation.", "source": "juraj_google_style"} -{"code": "def __init__(self, name, min_val, max_val):\n\n\n self.name = name\n self.min_val = min_val\n self.max_val = max_val\n if type(min_val) != type(max_val):\n raise ValueError('Supplied min_val is not the same type as\\\n supplied max_val: {}, {}'.format(\n type(min_val),\n type(max_val))\n )\n self.dtype = type(min_val + max_val)\n if self.dtype not in SUPPORTED_DTYPES:\n raise ValueError('Unsupported data type: use {}'\n .format(SUPPORTED_DTYPES))", "docstring": "Parameter object\n\nArgs:\n name (str): name of the parameter\n min_val (int or float): minimum allowed value for the parameter\n max_val (int or float): maximum allowed value for the parameter", "source": "juraj_google_style"} -{"code": "def json_to_url(json, symbol):\n\n start = json[0]['date']\n end = json[-1]['date']\n diff = end - start\n\n # Get period by a ratio from calculated period to valid periods\n # Ratio closest to 1 is the period\n # Valid values: 300, 900, 1800, 7200, 14400, 86400\n periods = [300, 900, 1800, 7200, 14400, 86400]\n\n diffs = {}\n for p in periods:\n diffs[p] = abs(1 - (p / (diff / len(json)))) # Get ratio\n\n period = min(diffs, key=diffs.get) # Find closest period\n\n url = ('https://poloniex.com/public?command'\n '=returnChartData¤cyPair={0}&start={1}'\n '&end={2}&period={3}').format(symbol, start, end, period) \n return url", "docstring": "Converts a JSON to a URL by the Poloniex API\n\nArgs:\n json: JSON data as a list of dict dates, where the keys are\n the raw market statistics.\n symbol: String of currency pair, like a ticker symbol.\n\nReturns:\n String URL to Poloniex API representing the given JSON.", "source": "juraj_google_style"} -{"code": "def tf_step(self, x, iteration, conjugate, residual, squared_residual):\n\n x, next_iteration, conjugate, residual, squared_residual = super(ConjugateGradient, self).tf_step(\n x, iteration, conjugate, residual, squared_residual\n )\n\n # Ac := A * c_t\n A_conjugate = self.fn_x(conjugate)\n\n # TODO: reference?\n if self.damping > 0.0:\n A_conjugate = [A_conj + self.damping * conj for A_conj, conj in zip(A_conjugate, conjugate)]\n\n # cAc := c_t^T * Ac\n conjugate_A_conjugate = tf.add_n(\n inputs=[tf.reduce_sum(input_tensor=(conj * A_conj)) for conj, A_conj in zip(conjugate, A_conjugate)]\n )\n\n # \\alpha := r_t^2 / cAc\n alpha = squared_residual / tf.maximum(x=conjugate_A_conjugate, y=util.epsilon)\n\n # x_{t+1} := x_t + \\alpha * c_t\n next_x = [t + alpha * conj for t, conj in zip(x, conjugate)]\n\n # r_{t+1} := r_t - \\alpha * Ac\n next_residual = [res - alpha * A_conj for res, A_conj in zip(residual, A_conjugate)]\n\n # r_{t+1}^2 := r_{t+1}^T * r_{t+1}\n next_squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in next_residual])\n\n # \\beta = r_{t+1}^2 / r_t^2\n beta = next_squared_residual / tf.maximum(x=squared_residual, y=util.epsilon)\n\n # c_{t+1} := r_{t+1} + \\beta * c_t\n next_conjugate = [res + beta * conj for res, conj in zip(next_residual, conjugate)]\n\n return next_x, next_iteration, next_conjugate, next_residual, next_squared_residual", "docstring": "Iteration loop body of the conjugate gradient algorithm.\n\nArgs:\n x: Current solution estimate $x_t$.\n iteration: Current iteration counter $t$.\n conjugate: Current conjugate $c_t$.\n residual: Current residual $r_t$.\n squared_residual: Current squared residual $r_t^2$.\n\nReturns:\n Updated arguments for next iteration.", "source": "juraj_google_style"} -{"code": "def __init__(self, name, number, aliases=None, description=None):\n\n super(EnumerationValue, self).__init__()\n self.aliases = aliases or []\n self.description = description\n self.name = name\n self.number = number", "docstring": "Initializes an enumeration value.\n\nArgs:\n name (str): name.\n number (int): number.\n aliases (Optional[list[str]]): aliases.\n description (Optional[str]): description.", "source": "juraj_google_style"} -{"code": "def repl_member_add(self, params):\n\n repl_config = self.config\n member_id = max([member['_id'] for member in repl_config['members']]) + 1\n member_config = self.member_create(params, member_id)\n repl_config['members'].append(member_config)\n if not self.repl_update(repl_config):\n self.member_del(member_id, reconfig=True)\n raise ReplicaSetError(\"Could not add member to ReplicaSet.\")\n return member_id", "docstring": "create new mongod instances and add it to the replica set.\n\nArgs:\n params - mongod params\n return True if operation success otherwise False", "source": "juraj_google_style"} -{"code": "def load_pascal(image_set, year, devkit_path, shuffle=False):\n\n image_set = [y.strip() for y in image_set.split(',')]\n assert image_set, \"No image_set specified\"\n year = [y.strip() for y in year.split(',')]\n assert year, \"No year specified\"\n\n # make sure (# sets == # years)\n if len(image_set) > 1 and len(year) == 1:\n year = year * len(image_set)\n if len(image_set) == 1 and len(year) > 1:\n image_set = image_set * len(year)\n assert len(image_set) == len(year), \"Number of sets and year mismatch\"\n\n imdbs = []\n for s, y in zip(image_set, year):\n imdbs.append(PascalVoc(s, y, devkit_path, shuffle, is_train=True))\n if len(imdbs) > 1:\n return ConcatDB(imdbs, shuffle)\n else:\n return imdbs[0]", "docstring": "wrapper function for loading pascal voc dataset\n\n Parameters:\n ----------\n image_set : str\n train, trainval...\n year : str\n 2007, 2012 or combinations splitted by comma\n devkit_path : str\n root directory of dataset\n shuffle : bool\n whether to shuffle initial list\n\nReturns:\n ----------\n Imdb", "source": "juraj_google_style"} -{"code": "def Deserialize(self, reader):\n\n self.Magic = reader.ReadUInt32()\n self.Command = reader.ReadFixedString(12).decode('utf-8')\n self.Length = reader.ReadUInt32()\n\n if self.Length > self.PayloadMaxSizeInt:\n raise Exception(\"invalid format- payload too large\")\n\n self.Checksum = reader.ReadUInt32()\n self.Payload = reader.ReadBytes(self.Length)\n\n checksum = Message.GetChecksum(self.Payload)\n\n if checksum != self.Checksum:\n raise ChecksumException(\"checksum mismatch\")", "docstring": "Deserialize full object.\n\nArgs:\n reader (neo.IO.BinaryReader):", "source": "juraj_google_style"} -{"code": "def record_little_endian(self):\n # type: () -> bytes\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')\n\n return self._record(self.extent_location, self.parent_directory_num)", "docstring": "A method to generate a string representing the little endian version of\n this Path Table Record.\n\n Parameters:\n None.\n\nReturns:\n A string representing the little endian version of this Path Table Record.", "source": "juraj_google_style"} -{"code": "def _NormalizedVolumeIdentifiers(\n self, volume_system, volume_identifiers, prefix='v'):\n\n normalized_volume_identifiers = []\n for volume_identifier in volume_identifiers:\n if isinstance(volume_identifier, int):\n volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)\n\n elif not volume_identifier.startswith(prefix):\n try:\n volume_identifier = int(volume_identifier, 10)\n volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)\n except (TypeError, ValueError):\n pass\n\n try:\n volume = volume_system.GetVolumeByIdentifier(volume_identifier)\n except KeyError:\n volume = None\n\n if not volume:\n raise errors.ScannerError(\n 'Volume missing for identifier: {0:s}.'.format(volume_identifier))\n\n normalized_volume_identifiers.append(volume_identifier)\n\n return normalized_volume_identifiers", "docstring": "Normalizes volume identifiers.\n\nArgs:\n volume_system (VolumeSystem): volume system.\n volume_identifiers (list[int|str]): allowed volume identifiers, formatted\n as an integer or string with prefix.\n prefix (Optional[str]): volume identifier prefix.\n\nReturns:\n list[str]: volume identifiers with prefix.\n\nRaises:\n ScannerError: if the volume identifier is not supported or no volume\n could be found that corresponds with the identifier.", "source": "juraj_google_style"} -{"code": "def play(self, wav=None, data=None, rate=16000, channels=1, width=2, block=True, spectrum=None):\n\n if wav:\n f = wave.open(wav, 'rb')\n rate = f.getframerate()\n channels = f.getnchannels()\n width = f.getsampwidth()\n\n def gen(w):\n d = w.readframes(CHUNK_SIZE)\n while d:\n yield d\n d = w.readframes(CHUNK_SIZE)\n w.close()\n\n data = gen(f)\n\n self.stop_event.clear()\n if block:\n self._play(data, rate, channels, width, spectrum)\n else:\n thread = threading.Thread(target=self._play, args=(data, rate, channels, width, spectrum))\n thread.start()", "docstring": "play wav file or raw audio (string or generator)\n\nArgs:\n wav: wav file path\n data: raw audio data, str or iterator\n rate: sample rate, only for raw audio\n channels: channel number, only for raw data\n width: raw audio data width, 16 bit is 2, only for raw data\n block: if true, block until audio is played.\n spectrum: if true, use a spectrum analyzer thread to analyze data", "source": "juraj_google_style"} -{"code": "def read_from_hdx(identifier, configuration=None):\n # type: (str, Optional[Configuration]) -> Optional['ResourceView']\n\n\n resourceview = ResourceView(configuration=configuration)\n result = resourceview._load_from_hdx('resource view', identifier)\n if result:\n return resourceview\n return None", "docstring": "Reads the resource view given by identifier from HDX and returns ResourceView object\n\nArgs:\n identifier (str): Identifier of resource view\n configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\n Optional[ResourceView]: ResourceView object if successful read, None if not", "source": "juraj_google_style"} -{"code": "def create_group(self, name):\n\n self.service.create_group(\n name, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "docstring": "Create a new group.\n\nArgs:\n name (string): Name of the group to create.\n\nRaises:\n requests.HTTPError on failure.", "source": "juraj_google_style"} -{"code": "def WriteValuesToJSONFile(self, state, values):\n\n value_counters = {}\n max_post_size = config.CONFIG[\"BigQuery.max_file_post_size\"]\n for value in values:\n class_name = value.__class__.__name__\n output_tracker, created = self._GetTempOutputFileHandles(class_name)\n\n # If our output stream is getting huge we should flush everything now and\n # set up new output files. Only start checking when we are getting within\n # range of the limit because we need to flush the stream to check the\n # size. Start counting at 0 so we check each file the first time.\n value_counters[class_name] = value_counters.get(class_name, -1) + 1\n if not value_counters[class_name] % max_post_size // 1000:\n\n # Flush our temp gzip handle so we can stat it to see how big it is.\n output_tracker.gzip_filehandle.flush()\n if os.path.getsize(output_tracker.gzip_filehandle.name) > max_post_size:\n # Flush what we have and get new temp output handles.\n self.Flush(state)\n value_counters[class_name] = 0\n output_tracker, created = self._GetTempOutputFileHandles(class_name)\n\n if not output_tracker.schema:\n output_tracker.schema = self.RDFValueToBigQuerySchema(value)\n\n if created:\n # Omit the leading newline for the first entry in the file.\n self._WriteJSONValue(output_tracker.gzip_filehandle, value)\n else:\n self._WriteJSONValue(\n output_tracker.gzip_filehandle, value, delimiter=\"\\n\")\n\n for output_tracker in itervalues(self.temp_output_trackers):\n output_tracker.gzip_filehandle.flush()", "docstring": "Write newline separated JSON dicts for each value.\n\n We write each dict separately so we don't have to hold all of the output\n streams in memory. We open and close the JSON array manually with [].\n\nArgs:\n state: rdf_protodict.AttributedDict with the plugin's state.\n values: RDF values to export.", "source": "juraj_google_style"} -{"code": "def remove_role(self, databaseName, roleName, collectionName=None):\n\n role = {\"databaseName\" : databaseName,\n \"roleName\" : roleName}\n\n if collectionName:\n role[\"collectionName\"] = collectionName\n\n if role in self.roles:\n self.roles.remove(role)", "docstring": "Remove one role\n\nArgs:\n databaseName (str): Database Name\n roleName (RoleSpecs): role\n\n Keyword Args:\n collectionName (str): Collection", "source": "juraj_google_style"} -{"code": "def process_alias_export_namespace(namespace):\n\n namespace.export_path = os.path.abspath(namespace.export_path)\n if os.path.isfile(namespace.export_path):\n raise CLIError(FILE_ALREADY_EXISTS_ERROR.format(namespace.export_path))\n\n export_path_dir = os.path.dirname(namespace.export_path)\n if not os.path.isdir(export_path_dir):\n os.makedirs(export_path_dir)\n\n if os.path.isdir(namespace.export_path):\n namespace.export_path = os.path.join(namespace.export_path, ALIAS_FILE_NAME)", "docstring": "Validate input arguments when the user invokes 'az alias export'.\n\nArgs:\n namespace: argparse namespace object.", "source": "juraj_google_style"} -{"code": "def _project_TH2(self, hist: Hist) -> Any:\n\n if len(self.projection_axes) != 1:\n raise ValueError(len(self.projection_axes), \"Invalid number of axes\")\n\n #logger.debug(f\"self.projection_axes[0].axis: {self.projection_axes[0].axis}, axis range name: {self.projection_axes[0].name}, axis_type: {self.projection_axes[0].axis_type}\")\n # NOTE: We cannot use TH3.ProjectionZ(...) because it has different semantics than ProjectionX\n # and ProjectionY. In particular, it doesn't respect the axis limits of axis onto which it\n # is projected. So we have to separate the projection by histogram type as opposed to axis\n # length.\n projection_func_map = {\n TH1AxisType.x_axis.value: hist.ProjectionX,\n TH1AxisType.y_axis.value: hist.ProjectionY\n }\n\n # Determine the axis_type value\n # Use try here instead of checking for a particular type to protect against type changes (say\n # in the enum)\n try:\n # Try to extract the value from an enum\n axis_type = self.projection_axes[0].axis_type.value\n except ValueError:\n # Seems that we received an int, so just use that value\n axis_type = self.axis_type # type: ignore\n\n projection_func = projection_func_map[axis_type]\n\n # Do the actual projection\n logger.info(f\"Projecting onto axis range {self.projection_axes[0].name} from hist {hist.GetName()}\")\n projected_hist = projection_func()\n\n return projected_hist", "docstring": "Perform the actual TH2 -> TH1 projection.\n\n This projection can only be to 1D.\n\nArgs:\n hist (ROOT.TH2): Histogram from which the projections should be performed.\n\nReturns:\n ROOT.TH1: The projected histogram.", "source": "juraj_google_style"} -{"code": "def cleandata(inputlist):\n\n output = []\n for e in inputlist:\n new = []\n for f in e:\n if f == \"--\":\n new.append(None)\n else:\n new.append(float(f))\n output.append(new)\n return output", "docstring": "Helper function for parse.getdata.\n Remove empty variables, convert strings to float\n\nArgs:\n inputlist: list\n List of Variables\n\nReturns:\n ouput:\n Cleaned list", "source": "juraj_google_style"} -{"code": "def Validate(self, value):\n\n if value is None:\n return None\n\n if not isinstance(value, self.rdfclass):\n # Try to coerce the type to the correct rdf_class.\n try:\n r = self.rdfclass()\n r.FromDict(value)\n return r\n except (AttributeError, TypeError, rdfvalue.InitializeError):\n # AttributeError is raised if value contains items that don't\n # belong to the given rdfstruct.\n # TypeError will be raised if value is not a dict-like object.\n raise TypeValueError(\"Value for arg %s should be an %s\" %\n (self.name, self.rdfclass.__name__))\n\n return value", "docstring": "Validate the value.\n\nArgs:\n value: Value is expected to be a dict-like object that a given RDFStruct\n can be initialized from.\n\nRaises:\n TypeValueError: If the value is not a valid dict-like object that a given\n RDFStruct can be initialized from.\n\nReturns:\n A valid instance of self.rdfclass or None.", "source": "juraj_google_style"} -{"code": "def _sync_content_metadata(self, serialized_data):\n\n url = self.enterprise_configuration.sapsf_base_url + self.global_sap_config.course_api_path\n try:\n status_code, response_body = self._call_post_with_session(url, serialized_data)\n except requests.exceptions.RequestException as exc:\n raise ClientError(\n 'SAPSuccessFactorsAPIClient request failed: {error} {message}'.format(\n error=exc.__class__.__name__,\n message=str(exc)\n )\n )\n\n if status_code >= 400:\n raise ClientError(\n 'SAPSuccessFactorsAPIClient request failed with status {status_code}: {message}'.format(\n status_code=status_code,\n message=response_body\n )\n )", "docstring": "Create/update/delete content metadata records using the SuccessFactors OCN Course Import API endpoint.\n\nArgs:\n serialized_data: Serialized JSON string representing a list of content metadata items.\n\nRaises:\n ClientError: If SuccessFactors API call fails.", "source": "juraj_google_style"} -{"code": "def bbox_clip(bboxes, img_shape):\n\n assert bboxes.shape[-1] % 4 == 0\n clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype)\n clipped_bboxes[..., 0::2] = np.maximum(\n np.minimum(bboxes[..., 0::2], img_shape[1] - 1), 0)\n clipped_bboxes[..., 1::2] = np.maximum(\n np.minimum(bboxes[..., 1::2], img_shape[0] - 1), 0)\n return clipped_bboxes", "docstring": "Clip bboxes to fit the image shape.\n\nArgs:\n bboxes (ndarray): Shape (..., 4*k)\n img_shape (tuple): (height, width) of the image.\n\nReturns:\n ndarray: Clipped bboxes.", "source": "juraj_google_style"} -{"code": "def read(self, vals):\n\n i = 0\n if len(vals[i]) == 0:\n self.comments_1 = None\n else:\n self.comments_1 = vals[i]\n i += 1", "docstring": "Read values.\n\nArgs:\n vals (list): list of strings representing values", "source": "juraj_google_style"} -{"code": "def context(name=None):\n\n\n def _context(cls):\n annotated(cls, name)\n cls.context = True\n\n return cls\n\n return _context", "docstring": "Declare that a class defines a context.\n\n Contexts are for use with HierarchicalShell for discovering\n and using functionality from the command line.\n\nArgs:\n name (str): Optional name for this context if you don't want\n to just use the class name.", "source": "juraj_google_style"} -{"code": "def get_poi_types(self, **kwargs):\n\n # Endpoint parameters\n params = {\n 'cultureInfo': util.language_code(kwargs.get('lang'))\n }\n\n # Request\n result = self.make_request('geo', 'get_poi_types', **params)\n\n # Parse\n values = result.get('types', [])\n return True, [emtype.PoiType(**a) for a in values]", "docstring": "Obtain POI types.\n\nArgs:\n lang (str): Language code (*es* or *en*).\n\nReturns:\n Status boolean and parsed response (list[PoiType]), or message string\n in case of error.", "source": "juraj_google_style"} -{"code": "def to_qasm(self,\n header: Optional[str] = None,\n precision: int = 10,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n ) -> str:\n\n return str(self._to_qasm_output(header, precision, qubit_order))", "docstring": "Returns QASM equivalent to the circuit.\n\nArgs:\n header: A multi-line string that is placed in a comment at the top\n of the QASM. Defaults to a cirq version specifier.\n precision: Number of digits to use when representing numbers.\n qubit_order: Determines how qubits are ordered in the QASM\n register.", "source": "juraj_google_style"} -{"code": "def cv_score_mean(self, X, y):\n\n\n X, y = self._format_inputs(X, y)\n\n if self.problem_type.binary_classification:\n kf = StratifiedKFold(\n shuffle=True, random_state=RANDOM_STATE + 3)\n elif self.problem_type.multi_classification:\n self.target_type_transformer.inverse_transform(y)\n transformer = self.target_type_transformer\n kf = StratifiedKFoldMultiClassIndicator(\n transformer, shuffle=True, n_splits=3,\n random_state=RANDOM_STATE + 3)\n elif self.problem_type.regression:\n kf = KFold(shuffle=True, n_splits=3, random_state=RANDOM_STATE + 4)\n else:\n raise NotImplementedError\n\n scoring = {\n scorer_info.name: scorer_info.scorer\n for scorer_info in self.scorers_info\n }\n cv_results = cross_validate(\n self.estimator, X, y,\n scoring=scoring, cv=kf, return_train_score=False)\n\n # post-processing\n results = self._process_cv_results(cv_results)\n return results", "docstring": "Compute mean score across cross validation folds.\n\n Split data and labels into cross validation folds and fit the model for\n each fold. Then, for each scoring type in scorings, compute the score.\n Finally, average the scores across folds. Returns a dictionary mapping\n scoring to score.\n\nArgs:\n X (np.array): data\n y (np.array): labels\n scorings (List[str]): scoring types", "source": "juraj_google_style"} -{"code": "def GetMessages(self, formatter_mediator, event):\n\n if self.DATA_TYPE != event.data_type:\n raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n event.data_type))\n\n event_values = event.CopyToDict()\n\n file_entry_type = event_values.get('file_entry_type', None)\n if file_entry_type is not None:\n event_values['file_entry_type'] = self._FILE_ENTRY_TYPES.get(\n file_entry_type, 'UNKNOWN')\n\n # The usage of allocated is deprecated in favor of is_allocated but\n # is kept here to be backwards compatible.\n if (not event_values.get('allocated', False) and\n not event_values.get('is_allocated', False)):\n event_values['unallocated'] = 'unallocated'\n\n return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\n formatter_mediator (FormatterMediator): mediates the interactions\n between formatters and other components, such as storage and Windows\n EventLog resources.\n event (EventObject): event.\n\nReturns:\n tuple(str, str): formatted message string and short message string.\n\nRaises:\n WrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj_google_style"} -{"code": "def _list_like_func(self, func, axis, *args, **kwargs):\n\n func_prepared = self._prepare_method(\n lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs))\n )\n new_data = self._map_across_full_axis(axis, func_prepared)\n # When the function is list-like, the function names become the index/columns\n new_index = (\n [f if isinstance(f, string_types) else f.__name__ for f in func]\n if axis == 0\n else self.index\n )\n new_columns = (\n [f if isinstance(f, string_types) else f.__name__ for f in func]\n if axis == 1\n else self.columns\n )\n return self.__constructor__(new_data, new_index, new_columns)", "docstring": "Apply list-like function across given axis.\n\nArgs:\n func: The function to apply.\n axis: Target axis to apply the function along.\n\nReturns:\n A new PandasQueryCompiler.", "source": "juraj_google_style"} -{"code": "def WriteVarString(self, value, encoding=\"utf-8\"):\n\n if type(value) is str:\n value = value.encode(encoding)\n\n length = len(value)\n ba = bytearray(value)\n byts = binascii.hexlify(ba)\n string = byts.decode(encoding)\n self.WriteVarInt(length)\n self.WriteBytes(string)", "docstring": "Write a string value to the stream.\n Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention\n\nArgs:\n value (string): value to write to the stream.\n encoding (str): string encoding format.", "source": "juraj_google_style"} -{"code": "def split_leading_dim(tensor, inputs, n_dims=2):\n\n input_shape_static = inputs.get_shape()\n input_shape_list = input_shape_static.as_list()\n tensor_shape_static = tensor.get_shape()\n tensor_shape_list = tensor_shape_static.as_list()\n if (input_shape_static.is_fully_defined()\n and tensor_shape_static.is_fully_defined()):\n new_shape = input_shape_list[:n_dims] + tensor_shape_list[1:]\n return tf.reshape(tensor, new_shape)\n\n # Shape can't be inferred statically.\n dims_after_first = tf.shape(tensor)[1:]\n split_sizes = tf.shape(inputs)[:n_dims]\n known_split_sizes = input_shape_list[:n_dims]\n known_dims_after_first = tensor_shape_list[1:]\n output_size = tf.concat([split_sizes, dims_after_first], 0)\n result = tf.reshape(tensor, output_size)\n result.set_shape(known_split_sizes + known_dims_after_first)\n return result", "docstring": "Split the first dimension of a tensor.\n\nArgs:\n tensor: Tensor to have its first dimension split.\n inputs: Original reference input to look the dimensions of.\n n_dims: Number of dimensions to split.\n\nReturns:\n The input tensor, with its first dimension split.", "source": "juraj_google_style"} -{"code": "def _decontextualise_connection(self, connection):\n\n\n ctx = stack.top\n if ctx is not None and connection in ctx.ldap3_manager_connections:\n ctx.ldap3_manager_connections.remove(connection)", "docstring": "Remove a connection from the appcontext.\n\nArgs:\n connection (ldap3.Connection): connection to remove from the\n appcontext", "source": "juraj_google_style"} -{"code": "def remove(self, email):\n\n if email in self._collaborators:\n if self._collaborators[email] == ShareRequestValue.Add:\n del self._collaborators[email]\n else:\n self._collaborators[email] = ShareRequestValue.Remove\n self._dirty = True", "docstring": "Remove a Collaborator.\n\nArgs:\n str : Collaborator email address.", "source": "juraj_google_style"} -{"code": "def clone_source_dir(source_dir, dest_dir):\n\n if os.path.isdir(dest_dir):\n print('removing', dest_dir)\n shutil.rmtree(dest_dir)\n shutil.copytree(source_dir, dest_dir)", "docstring": "Copies the source Protobuf files into a build directory.\n\nArgs:\n source_dir (str): source directory of the Protobuf files\n dest_dir (str): destination directory of the Protobuf files", "source": "juraj_google_style"} -{"code": "def setMaximum(self, maximum):\n\n if not isinstance(maximum, int):\n raise TypeError(\"Argument is not of type int or long\")\n self._maximum = maximum", "docstring": "setter to _maximum.\n\nArgs:\n maximum (int or long): new _maximum value", "source": "juraj_google_style"} -{"code": "def predict_proba(self, L):\n\n # First, get the estimated probability distribution over the feasible\n # set defined by the TaskGraph\n # This is an [n,k] array, where k = |(feasible set)|\n Y_pf = LabelModel.predict_proba(self, L)\n n, k = Y_pf.shape\n\n # Now get the per-task marginals\n # TODO: Make this optional, versus just returning the above\n Y_p = [np.zeros((n, k_t)) for k_t in self.task_graph.K]\n for yi, y in enumerate(self.task_graph.feasible_set()):\n for t in range(self.t):\n k_t = int(y[t])\n Y_p[t][:, k_t - 1] += Y_pf[:, yi]\n return Y_p", "docstring": "Returns the task marginals estimated by the model: a t-length list of\n [n,k_t] matrices where the (i,j) entry of the sth matrix represents the\n estimated P((Y_i)_s | \\lambda_j(x_i))\n\nArgs:\n L: A t-length list of [n,m] scipy.sparse label matrices with values\n in {0,1,...,k}", "source": "juraj_google_style"} -{"code": "def add_backend_policy(self, json_data):\n\n env = boto3.session.Session(profile_name=self.env, region_name=self.region)\n elbclient = env.client('elb')\n\n # Attach backend server policies to created ELB\n for job in json.loads(json_data)['job']:\n for listener in job['listeners']:\n instance_port = listener['internalPort']\n backend_policy_list = listener['backendPolicies']\n if backend_policy_list:\n LOG.info('Adding backend server policies: %s', backend_policy_list)\n elbclient.set_load_balancer_policies_for_backend_server(\n LoadBalancerName=self.app, InstancePort=instance_port, PolicyNames=backend_policy_list)", "docstring": "Attaches backend server policies to an ELB\n\nArgs:\n json_data (json): return data from ELB upsert", "source": "juraj_google_style"} -{"code": "def decompress_file(filepath):\n\n toks = filepath.split(\".\")\n file_ext = toks[-1].upper()\n from monty.io import zopen\n if file_ext in [\"BZ2\", \"GZ\", \"Z\"]:\n with open(\".\".join(toks[0:-1]), 'wb') as f_out, \\\n zopen(filepath, 'rb') as f_in:\n f_out.writelines(f_in)\n os.remove(filepath)", "docstring": "Decompresses a file with the correct extension. Automatically detects\n gz, bz2 or z extension.\n\nArgs:\n filepath (str): Path to file.\n compression (str): A compression mode. Valid options are \"gz\" or\n \"bz2\". Defaults to \"gz\".", "source": "juraj_google_style"} -{"code": "def title_of_design_condition(self, value=None):\n\n if value is not None:\n try:\n value = str(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type str '\n 'for field `title_of_design_condition`'.format(value))\n if ',' in value:\n raise ValueError('value should not contain a comma '\n 'for field `title_of_design_condition`')\n\n self._title_of_design_condition = value", "docstring": "Corresponds to IDD Field `title_of_design_condition`\n\nArgs:\n value (str): value for IDD Field `title_of_design_condition`\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def __init__(self, process: Process):\n\n self.process = process\n self.stopped_due_to_worker_shutdown = False", "docstring": "Constructor.\n\nArgs:\n process (Process): task process", "source": "juraj_google_style"} -{"code": "def complete(command_line,\n current_token,\n position,\n shell: arg(choices=('bash', 'fish'))):\n\n position = int(position)\n tokens = shlex.split(command_line[:position])\n\n all_argv, run_argv, command_argv = run.partition_argv(tokens[1:])\n run_args = run.parse_args(run_argv)\n\n module = run_args.get('commands_module')\n module = module or DEFAULT_COMMANDS_MODULE\n module = normalize_path(module)\n\n try:\n collection = Collection.load_from_module(module)\n except Exception:\n collection = {}\n\n found_command = find_command(collection, tokens) or run\n\n if current_token:\n # Completing either a command name, option name, or path.\n if current_token.startswith('-'):\n if current_token not in found_command.option_map:\n print_command_options(found_command, current_token)\n else:\n print_commands(collection, shell)\n path = os.path.expanduser(current_token)\n path = os.path.expandvars(path)\n paths = glob.glob('%s*' % path)\n if paths:\n for entry in paths:\n if os.path.isdir(entry):\n print('%s/' % entry)\n else:\n print(entry)\n else:\n # Completing option value. If a value isn't expected, show the\n # options for the current command and the list of commands\n # instead.\n option = found_command.option_map.get(tokens[-1])\n\n if option and option.takes_value:\n if option.choices:\n for choice in option.choices:\n print(choice)\n else:\n for entry in os.listdir():\n if os.path.isdir(entry):\n print('%s/' % entry)\n else:\n print(entry)\n else:\n print_command_options(found_command)\n print_commands(collection, shell)", "docstring": "Find completions for current command.\n\n This assumes that we'll handle all completion logic here and that\n the shell's automatic file name completion is disabled.\n\nArgs:\n command_line: Command line\n current_token: Token at cursor\n position: Current cursor position\n shell: Name of shell", "source": "juraj_google_style"} -{"code": "def fastcc_is_consistent(model, epsilon, solver):\n\n for reaction in fastcc(model, epsilon, solver):\n return False\n return True", "docstring": "Quickly check whether model is consistent\n\n Return true if the model is consistent. If it is only necessary to know\n whether a model is consistent, this function is fast as it will return\n the result as soon as it finds a single inconsistent reaction.\n\nArgs:\n model: :class:`MetabolicModel` to solve.\n epsilon: Flux threshold value.\n solver: LP solver instance to use.", "source": "juraj_google_style"} -{"code": "def EnablePlugins(self, plugin_includes):\n\n super(SyslogParser, self).EnablePlugins(plugin_includes)\n\n self._plugin_by_reporter = {}\n for plugin in self._plugins:\n self._plugin_by_reporter[plugin.REPORTER] = plugin", "docstring": "Enables parser plugins.\n\nArgs:\n plugin_includes (list[str]): names of the plugins to enable, where None\n or an empty list represents all plugins. Note that the default plugin\n is handled separately.", "source": "juraj_google_style"} -{"code": "def in_cache(self, objpath, metahash):\n\n try:\n self.path_in_cache(objpath, metahash)\n return True\n except CacheMiss:\n return False", "docstring": "Returns true if object is cached.\n\nArgs:\n objpath: Filename relative to buildroot.\n metahash: hash object", "source": "juraj_google_style"} -{"code": "def fit_size_distribution_models(self, model_names, model_objs, input_columns,\n output_columns=None, calibrate=False):\n\n if output_columns is None:\n output_columns = [\"Shape\", \"Location\", \"Scale\"]\n groups = np.unique(self.data[\"train\"][\"member\"][self.group_col])\n\n weights=None\n\n for group in groups:\n group_data = self.data[\"train\"][\"combo\"].loc[self.data[\"train\"][\"combo\"][self.group_col] == group]\n group_data = group_data.dropna()\n group_data = group_data[group_data[output_columns[-1]] > 0]\n if self.sector:\n\n lon_obj = group_data.loc[:,'Centroid_Lon']\n lat_obj = group_data.loc[:,'Centroid_Lat']\n\n conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel())\n center_lon, center_lat = self.proj_dict[\"lon_0\"],self.proj_dict[\"lat_0\"] \n\n distances = np.array([np.sqrt((x-center_lon)**2+\\\n (y-center_lat)**2) for (x, y) in conus_lat_lon_points])\n\n min_dist, max_minus_min = min(distances),max(distances)-min(distances)\n\n distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances]\n weights = np.array(distance_0_1)\n\n self.size_distribution_models[group] = {\"multi\": {}, \"lognorm\": {}}\n if calibrate:\n self.size_distribution_models[group][\"calshape\"] = {}\n self.size_distribution_models[group][\"calscale\"] = {}\n log_labels = np.log(group_data[output_columns].values)\n log_means = log_labels.mean(axis=0)\n log_sds = log_labels.std(axis=0)\n self.size_distribution_models[group]['lognorm']['mean'] = log_means\n self.size_distribution_models[group]['lognorm']['sd'] = log_sds\n for m, model_name in enumerate(model_names):\n print(group, model_name)\n self.size_distribution_models[group][\"multi\"][model_name] = deepcopy(model_objs[m])\n try:\n self.size_distribution_models[group][\"multi\"][model_name].fit(group_data[input_columns],\n (log_labels - log_means) / log_sds,\n sample_weight=weights)\n except:\n self.size_distribution_models[group][\"multi\"][model_name].fit(group_data[input_columns],\n (log_labels - log_means) / log_sds)\n if calibrate:\n training_predictions = self.size_distribution_models[\n group][\"multi\"][model_name].predict(group_data[input_columns])\n self.size_distribution_models[group][\"calshape\"][model_name] = LinearRegression()\n self.size_distribution_models[group][\"calshape\"][model_name].fit(training_predictions[:, 0:1],\n (log_labels[:, 0] - log_means[0]) /\n log_sds[\n 0],\n sample_weight=weights)\n self.size_distribution_models[group][\"calscale\"][model_name] = LinearRegression()\n self.size_distribution_models[group][\"calscale\"][model_name].fit(training_predictions[:, 1:],\n (log_labels[:, 1] - log_means[1]) /\n log_sds[\n 1],\n sample_weight=weights)", "docstring": "Fits multitask machine learning models to predict the parameters of a size distribution\n\nArgs:\n model_names: List of machine learning model names\n model_objs: scikit-learn style machine learning model objects\n input_columns: Training data columns used as input for ML model\n output_columns: Training data columns used for prediction\n calibrate: Whether or not to fit a log-linear regression to predictions from ML model", "source": "juraj_google_style"} -{"code": "def get_template(template):\n\n from cloud_inquisitor.database import db\n\n tmpl = db.Template.find_one(template_name=template)\n if not tmpl:\n raise InquisitorError('No such template found: {}'.format(template))\n\n tmplenv = Environment(loader=BaseLoader, autoescape=True)\n tmplenv.filters['json_loads'] = json.loads\n tmplenv.filters['slack_quote_join'] = lambda data: ', '.join('`{}`'.format(x) for x in data)\n\n return tmplenv.from_string(tmpl.template)", "docstring": "Return a Jinja2 template by filename\n\nArgs:\n template (str): Name of the template to return\n\nReturns:\n A Jinja2 Template object", "source": "juraj_google_style"} -{"code": "def get_response(response: Dict[str, Any]) -> JSONRPCResponse:\n\n if \"error\" in response:\n return ErrorResponse(**response)\n return SuccessResponse(**response)", "docstring": "Converts a deserialized response into a JSONRPCResponse object.\n\n The dictionary be either an error or success response, never a notification.\n\nArgs:\n response: Deserialized response dictionary. We can assume the response is valid\n JSON-RPC here, since it passed the jsonschema validation.", "source": "juraj_google_style"} -{"code": "def resize(self, container, height, width):\n\n params = {'h': height, 'w': width}\n url = self._url(\"/containers/{0}/resize\", container)\n res = self._post(url, params=params)\n self._raise_for_status(res)", "docstring": "Resize the tty session.\n\nArgs:\n container (str or dict): The container to resize\n height (int): Height of tty session\n width (int): Width of tty session\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "juraj_google_style"} -{"code": "def read(self, path):\n\n with open(path, \"r\") as f:\n for line in f:\n line = line.strip()\n match_obj_name = re.search(r\"^([A-Z][A-Z/ \\d]+),\", line)\n if match_obj_name is not None:\n internal_name = match_obj_name.group(1)\n if internal_name in self._data:\n self._data[internal_name] = self._create_datadict(\n internal_name)\n data_line = line[len(internal_name) + 1:]\n vals = data_line.strip().split(',')\n self._data[internal_name].read(vals)\n else:\n wd = WeatherData()\n wd.read(line.strip().split(','))\n self.add_weatherdata(wd)", "docstring": "Read EPW weather data from path.\n\nArgs:\n path (str): path to read weather data from", "source": "juraj_google_style"} -{"code": "def _validate_instantiation_options(self, datafile, skip_json_validation):\n\n\n if not skip_json_validation and not validator.is_datafile_valid(datafile):\n raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('datafile'))\n\n if not validator.is_event_dispatcher_valid(self.event_dispatcher):\n raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('event_dispatcher'))\n\n if not validator.is_logger_valid(self.logger):\n raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('logger'))\n\n if not validator.is_error_handler_valid(self.error_handler):\n raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('error_handler'))", "docstring": "Helper method to validate all instantiation parameters.\n\nArgs:\n datafile: JSON string representing the project.\n skip_json_validation: Boolean representing whether JSON schema validation needs to be skipped or not.\n\nRaises:\n Exception if provided instantiation options are valid.", "source": "juraj_google_style"} -{"code": "def convert(self, vroot, entry_variables):\n\n self.graph_info = GraphInfo(vroot)\n self.entry_variables = entry_variables\n\n with nn.parameter_scope(self.name):\n # Function loop in the forward order\n for func in self.graph_info.funcs:\n o = self._identity_conversion(func)\n self.end_variable = o\n return self.end_variable", "docstring": "All functions are replaced with the same `new` function.\n\nArgs:\n vroot (:obj:`Variable`): NNabla Variable\n entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.", "source": "juraj_google_style"} -{"code": "def destroy_sg(app='', env='', region='', **_):\n\n vpc = get_vpc_id(account=env, region=region)\n\n url = '{api}/securityGroups/{env}/{region}/{app}'.format(api=API_URL, env=env, region=region, app=app)\n payload = {'vpcId': vpc}\n security_group = requests.get(url, params=payload, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n\n if not security_group:\n LOG.info('Nothing to delete.')\n else:\n LOG.info('Found Security Group in %(region)s: %(name)s', security_group)\n\n destroy_request = get_template('destroy/destroy_sg.json.j2', app=app, env=env, region=region, vpc=vpc)\n wait_for_task(destroy_request)\n\n return True", "docstring": "Destroy Security Group.\n\nArgs:\n app (str): Spinnaker Application name.\n env (str): Deployment environment.\n region (str): Region name, e.g. us-east-1.\n\nReturns:\n True upon successful completion.", "source": "juraj_google_style"} -{"code": "def ParseFileObject(self, parser_mediator, file_object):\n\n if not self.LINE_STRUCTURES:\n raise errors.UnableToParseFile('Missing line structures.')\n\n encoding = self._ENCODING or parser_mediator.codepage\n text_reader = EncodedTextReader(\n encoding, buffer_size=self.BUFFER_SIZE)\n\n text_reader.Reset()\n\n try:\n text_reader.ReadLines(file_object)\n except UnicodeDecodeError as exception:\n raise errors.UnableToParseFile(\n 'Not a text file, with error: {0!s}'.format(exception))\n\n if not self.VerifyStructure(parser_mediator, text_reader.lines):\n raise errors.UnableToParseFile('Wrong file structure.')\n\n # Using parseWithTabs() overrides Pyparsing's default replacement of tabs\n # with spaces to SkipAhead() the correct number of bytes after a match.\n for key, structure in self.LINE_STRUCTURES:\n structure.parseWithTabs()\n\n\n consecutive_line_failures = 0\n # Read every line in the text file.\n while text_reader.lines:\n if parser_mediator.abort:\n break\n\n # Initialize pyparsing objects.\n tokens = None\n start = 0\n end = 0\n\n key = None\n\n index = None\n\n # Try to parse the line using all the line structures.\n for index, (key, structure) in enumerate(self._line_structures):\n try:\n structure_generator = structure.scanString(\n text_reader.lines, maxMatches=1)\n parsed_structure = next(structure_generator, None)\n except pyparsing.ParseException:\n parsed_structure = None\n\n if not parsed_structure:\n continue\n\n tokens, start, end = parsed_structure\n\n # Only want to parse the structure if it starts\n # at the beginning of the buffer.\n if start == 0:\n break\n\n if tokens and start == 0:\n # Move matching key, structure pair to the front of the list, so that\n # structures that are more likely to match are tried first.\n if index is not None and index != 0:\n key_structure = self._line_structures.pop(index)\n self._line_structures.insert(0, key_structure)\n\n try:\n self.ParseRecord(parser_mediator, key, tokens)\n consecutive_line_failures = 0\n except (errors.ParseError, errors.TimestampError) as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to parse record: {0:s} with error: {1!s}'.format(\n key, exception))\n\n text_reader.SkipAhead(file_object, end)\n\n else:\n odd_line = text_reader.ReadLine(file_object)\n if odd_line:\n if len(odd_line) > 80:\n odd_line = '{0:s}...'.format(odd_line[:77])\n parser_mediator.ProduceExtractionWarning(\n 'unable to parse log line: {0:s}'.format(repr(odd_line)))\n consecutive_line_failures += 1\n if (consecutive_line_failures >\n self.MAXIMUM_CONSECUTIVE_LINE_FAILURES):\n raise errors.UnableToParseFile(\n 'more than {0:d} consecutive failures to parse lines.'.format(\n self.MAXIMUM_CONSECUTIVE_LINE_FAILURES))\n try:\n text_reader.ReadLines(file_object)\n except UnicodeDecodeError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to read lines with error: {0!s}'.format(exception))", "docstring": "Parses a text file-like object using a pyparsing definition.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n file_object (dfvfs.FileIO): file-like object.\n\nRaises:\n UnableToParseFile: when the file cannot be parsed.", "source": "juraj_google_style"} -{"code": "def __init__(self,\n entries: Iterable[Tuple[int, TItem]] = (),\n *,\n drop_duplicate_entries: bool=False):\n\n self._buckets = [] # type: List[List[TItem]]\n self._offset = 0\n self._len = 0\n self._drop_set = (set()\n if drop_duplicate_entries\n else None) # type: Optional[Set[Tuple[int, TItem]]]\n\n for p, e in entries:\n self.enqueue(p, e)", "docstring": "Initializes a new priority queue.\n\nArgs:\n entries: Initial contents of the priority queue.\n drop_duplicate_entries: If set, the priority queue will ignore\n operations that enqueue a (priority, item) pair that is already\n in the priority queue. Note that duplicates of an item may still\n be enqueued, as long as they have different priorities.", "source": "juraj_google_style"} -{"code": "def export(self, top=True):\n\n out = []\n if top:\n out.append(self._internal_name)\n out.append(self._to_str(self.number_of_records_per_hour))\n out.append(self._to_str(self.data_period_name_or_description))\n out.append(self._to_str(self.data_period_start_day_of_week))\n out.append(self._to_str(self.data_period_start_day))\n out.append(self._to_str(self.data_period_end_day))\n return \",\".join(out)", "docstring": "Exports object to its string representation.\n\nArgs:\n top (bool): if True appends `internal_name` before values.\n All non list objects should be exported with value top=True,\n all list objects, that are embedded in as fields inlist objects\n should be exported with `top`=False\n\nReturns:\n str: The objects string representation", "source": "juraj_google_style"} -{"code": "def tuplize(nested):\n\n if isinstance(nested, str):\n return nested\n try:\n return tuple(map(tuplize, nested))\n except TypeError:\n return nested", "docstring": "Recursively converts iterables into tuples.\n\nArgs:\n nested: A nested structure of items and iterables.\n\nReturns:\n A nested structure of items and tuples.", "source": "juraj_google_style"} -{"code": "def _build_projection_expression(clean_table_keys):\n\n projection_expression = ''\n for key in clean_table_keys[:-1]:\n projection_expression += ('{},').format(key)\n projection_expression += clean_table_keys[-1]\n return projection_expression", "docstring": "Given cleaned up keys, this will return a projection expression for\n the dynamodb lookup.\n\nArgs:\n clean_table_keys (dict): keys without the data types attached\n\nReturns:\n str: A projection expression for the dynamodb lookup.", "source": "juraj_google_style"} -{"code": "def append(self, text, afterline=None):\n\n if afterline:\n self._vim.current.buffer.append(text, afterline)\n else:\n self._vim.current.buffer.append(text)", "docstring": "Append text to the current buffer.\n\nArgs:\n text (str or Sequence[str]): One or many lines of text to append.\n afterline (Optional[int]):\n Line number to append after. If 0, text is prepended before the\n first line; if ``None``, at end of the buffer.", "source": "juraj_google_style"} -{"code": "def has_extana(self, cached=True):\n\n if cached and self.hardware != -1:\n return True if (self.hardware & EXT_HW_EXTANA) else False\n\n result = self._check_hardware()\n return True if (result & EXT_HW_EXTANA) != 0 else False", "docstring": "Can be used to check if an SK8-ExtAna device is currently connected.\n NOTE: do not attempt to call while data streaming is active!\n\nArgs:\n cached (bool): if True, use the cached value of the connected hardware\n state rather than querying the device. Set to False to force a query.\n\nReturns:\n bool. True if the SK8 currently has an SK8-ExtAna device attached, False otherwise.", "source": "juraj_google_style"} -{"code": "def add_implem(self, transition, attribute, function, **kwargs):\n\n implem = ImplementationProperty(\n field_name=self.state_field,\n transition=transition,\n workflow=self.workflow,\n implementation=function,\n **kwargs)\n self.implementations[transition.name] = implem\n self.transitions_at[transition.name] = attribute\n return implem", "docstring": "Add an implementation.\n\nArgs:\n transition (Transition): the transition for which the implementation\n is added\n attribute (str): the name of the attribute where the implementation\n will be available\n function (callable): the actual implementation function\n **kwargs: extra arguments for the related ImplementationProperty.", "source": "juraj_google_style"} -{"code": "def __call__(self, product):\n\n reviewers = self.reviewers(product)\n Nq = len(reviewers)\n\n if Nq == 1:\n return 0.5\n\n else:\n # Computing the unbiased variance of scores.\n var = np.var([self.review_score(r, product)\n for r in reviewers], ddof=1)\n return np.log(Nq) / (var + 1)", "docstring": "Compute credibility of a given product.\n\nArgs:\n product: An instance of :class:`bipartite.Product`.\n\nReturns:\n The credibility of the product. It is >= 0.5.", "source": "juraj_google_style"} -{"code": "def unique(self, token_list_x, token_list_y):\n\n x = set(list(token_list_x))\n y = set(list(token_list_y))\n return (x, y)", "docstring": "Remove duplicated elements.\n\nArgs:\n token_list_x: [token, token, token, ...]\n token_list_y: [token, token, token, ...]\n\nReturns:\n Tuple(token_list_x, token_list_y)", "source": "juraj_google_style"} -{"code": "def limitReal(x, max_denominator=1000000):\n\n f = Fraction(x).limit_denominator(max_denominator)\n return Real((f.numerator, f.denominator))", "docstring": "Creates an pysmt Real constant from x.\n\nArgs:\n x (number): A number to be cast to a pysmt constant.\n max_denominator (int, optional): The maximum size of the denominator.\n Default 1000000.\n\nReturns:\n A Real constant with the given value and the denominator limited.", "source": "juraj_google_style"} -{"code": "def _ParseShellItem(self, parser_mediator, shell_item):\n\n path_segment = self._ParseShellItemPathSegment(shell_item)\n self._path_segments.append(path_segment)\n\n event_data = shell_item_events.ShellItemFileEntryEventData()\n event_data.origin = self._origin\n event_data.shell_item_path = self.CopyToPath()\n\n if isinstance(shell_item, pyfwsi.file_entry):\n event_data.name = shell_item.name\n\n for extension_block in shell_item.extension_blocks:\n if isinstance(extension_block, pyfwsi.file_entry_extension):\n long_name = extension_block.long_name\n localized_name = extension_block.localized_name\n file_reference = extension_block.file_reference\n if file_reference:\n file_reference = '{0:d}-{1:d}'.format(\n file_reference & 0xffffffffffff, file_reference >> 48)\n\n event_data.file_reference = file_reference\n event_data.localized_name = localized_name\n event_data.long_name = long_name\n\n fat_date_time = extension_block.get_creation_time_as_integer()\n if fat_date_time != 0:\n date_time = dfdatetime_fat_date_time.FATDateTime(\n fat_date_time=fat_date_time)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n fat_date_time = extension_block.get_access_time_as_integer()\n if fat_date_time != 0:\n date_time = dfdatetime_fat_date_time.FATDateTime(\n fat_date_time=fat_date_time)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n fat_date_time = shell_item.get_modification_time_as_integer()\n if fat_date_time != 0:\n date_time = dfdatetime_fat_date_time.FATDateTime(\n fat_date_time=fat_date_time)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a shell item.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n shell_item (pyfwsi.item): shell item.", "source": "juraj_google_style"} -{"code": "def resolve_one_of(tags, at_least_one):\n\n if len(tags) < len(at_least_one):\n return None\n for possible_resolution in choose_1_from_each(at_least_one):\n resolution = {}\n pr = possible_resolution[:]\n for entity_type in pr:\n last_end_index = -1\n if entity_type in resolution:\n last_end_index = resolution.get[entity_type][-1].get('end_token')\n tag, value, c = find_first_tag(tags, entity_type, after_index=last_end_index)\n if not tag:\n break\n else:\n if entity_type not in resolution:\n resolution[entity_type] = []\n resolution[entity_type].append(tag)\n if len(resolution) == len(possible_resolution):\n return resolution\n\n return None", "docstring": "This searches tags for Entites in at_least_one and returns any match\n\nArgs:\n tags(list): List of tags with Entities to search for Entities\n at_least_one(list): List of Entities to find in tags\n\nReturns:\n object: returns None if no match is found but returns any match as an object", "source": "juraj_google_style"} -{"code": "def FoldValue(self, value):\n\n if value is False and self._data_type_definition.false_value is not None:\n return self._data_type_definition.false_value\n\n if value is True and self._data_type_definition.true_value is not None:\n return self._data_type_definition.true_value\n\n raise ValueError('No matching True and False values')", "docstring": "Folds the data type into a value.\n\nArgs:\n value (object): value.\n\nReturns:\n object: folded value.\n\nRaises:\n ValueError: if the data type definition cannot be folded into the value.", "source": "juraj_google_style"} -{"code": "def newPacker(mode=PackingMode.Offline, \n bin_algo=PackingBin.BBF, \n pack_algo=MaxRectsBssf,\n sort_algo=SORT_AREA, \n rotation=True):\n\n packer_class = None\n\n # Online Mode\n if mode == PackingMode.Online:\n sort_algo=None\n if bin_algo == PackingBin.BNF:\n packer_class = PackerOnlineBNF\n elif bin_algo == PackingBin.BFF:\n packer_class = PackerOnlineBFF\n elif bin_algo == PackingBin.BBF:\n packer_class = PackerOnlineBBF\n else:\n raise AttributeError(\"Unsupported bin selection heuristic\")\n\n # Offline Mode\n elif mode == PackingMode.Offline:\n if bin_algo == PackingBin.BNF:\n packer_class = PackerBNF\n elif bin_algo == PackingBin.BFF:\n packer_class = PackerBFF\n elif bin_algo == PackingBin.BBF:\n packer_class = PackerBBF\n elif bin_algo == PackingBin.Global:\n packer_class = PackerGlobal\n sort_algo=None\n else:\n raise AttributeError(\"Unsupported bin selection heuristic\")\n\n else:\n raise AttributeError(\"Unknown packing mode.\")\n\n if sort_algo:\n return packer_class(pack_algo=pack_algo, sort_algo=sort_algo, \n rotation=rotation)\n else:\n return packer_class(pack_algo=pack_algo, rotation=rotation)", "docstring": "Packer factory helper function\n\nArgs:\n mode (PackingMode): Packing mode\n Online: Rectangles are packed as soon are they are added\n Offline: Rectangles aren't packed untils pack() is called\n bin_algo (PackingBin): Bin selection heuristic\n pack_algo (PackingAlgorithm): Algorithm used\n rotation (boolean): Enable or disable rectangle rotation.\n\nReturns:\n Packer: Initialized packer instance.", "source": "juraj_google_style"} -{"code": "def rewards_to_go(rewards, mask, gamma=0.99):\n r\n B, T = rewards.shape # pylint: disable=invalid-name,unused-variable\n\n masked_rewards = rewards * mask # (B, T)\n\n # We use the following recurrence relation, derived from the equation above:\n #\n # r2g[t+1] = (r2g[t] - r[t]) / gamma\n #\n # This means we'll need to calculate r2g[0] first and then r2g[1] and so on ..\n #\n # **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0\n # and gamma < 1.0, so the division keeps increasing.\n #\n # So we just run the recurrence in reverse, i.e.\n #\n # r2g[t] = r[t] + (gamma*r2g[t+1])\n #\n # This is much better, but might have lost updates since the (small) rewards\n # at earlier time-steps may get added to a (very?) large sum.\n\n # Compute r2g_{T-1} at the start and then compute backwards in time.\n r2gs = [masked_rewards[:, -1]]\n\n # Go from T-2 down to 0.\n for t in reversed(range(T - 1)):\n r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))\n\n # The list should have length T.\n assert T == len(r2gs)\n\n # First we stack them in the correct way to make it (B, T), but these are\n # still from newest (T-1) to oldest (0), so then we flip it on time axis.\n return np.flip(np.stack(r2gs, axis=1), axis=1)", "docstring": "r\"\"\"Computes rewards to go.\n\n Reward to go is defined as follows, the discounted reward that we have to\n yet collect, going forward from this point, i.e.:\n\n r2g_t = \\sum_{l=0}^{\\infty} (\\gamma^{l} * reward_{t+l})\n\nArgs:\n rewards: np.ndarray of shape (B, T) of rewards.\n mask: np.ndarray of shape (B, T) of mask for the rewards.\n gamma: float, discount factor.\n\nReturns:\n rewards to go, np.ndarray of shape (B, T).", "source": "juraj_google_style"} -{"code": "def _QueryHash(self, digest):\n\n if not self._url:\n self._url = '{0:s}://{1:s}:{2:d}/file/find'.format(\n self._protocol, self._host, self._port)\n\n request_data = {self.lookup_hash: digest}\n\n try:\n json_response = self.MakeRequestAndDecodeJSON(\n self._url, 'POST', data=request_data)\n\n except errors.ConnectionError as exception:\n json_response = None\n logger.error('Unable to query Viper with error: {0!s}.'.format(\n exception))\n\n return json_response", "docstring": "Queries the Viper Server for a specfic hash.\n\nArgs:\n digest (str): hash to look up.\n\nReturns:\n dict[str, object]: JSON response or None on error.", "source": "juraj_google_style"} -{"code": "def merge(self, workdir, pot_files, out_dvdb, delete_source=True):\n\n # We work with absolute paths.\n pot_files = [os.path.abspath(s) for s in list_strings(pot_files)]\n if not os.path.isabs(out_dvdb):\n out_dvdb = os.path.join(os.path.abspath(workdir), os.path.basename(out_dvdb))\n\n if self.verbose:\n print(\"Will merge %d files into output DVDB %s\" % (len(pot_files), out_dvdb))\n for i, f in enumerate(pot_files):\n print(\" [%d] %s\" % (i, f))\n\n # Handle the case of a single file since mrgddb uses 1 to denote GS files!\n if len(pot_files) == 1:\n with open(pot_files[0], \"r\") as inh, open(out_dvdb, \"w\") as out:\n for line in inh:\n out.write(line)\n return out_dvdb\n\n self.stdin_fname, self.stdout_fname, self.stderr_fname = \\\n map(os.path.join, 3 * [os.path.abspath(workdir)], [\"mrgdvdb.stdin\", \"mrgdvdb.stdout\", \"mrgdvdb.stderr\"])\n\n inp = StringIO()\n inp.write(out_dvdb + \"\\n\") # Name of the output file.\n inp.write(str(len(pot_files)) + \"\\n\") # Number of input POT files.\n\n # Names of the POT files.\n for fname in pot_files:\n inp.write(fname + \"\\n\")\n\n self.stdin_data = [s for s in inp.getvalue()]\n\n with open(self.stdin_fname, \"wt\") as fh:\n fh.writelines(self.stdin_data)\n # Force OS to write data to disk.\n fh.flush()\n os.fsync(fh.fileno())\n\n retcode = self.execute(workdir)\n if retcode == 0 and delete_source:\n # Remove pot files.\n for f in pot_files:\n try:\n os.remove(f)\n except IOError:\n pass\n\n return out_dvdb", "docstring": "Merge POT files containing 1st order DFPT potential\n return the absolute path of the new database in workdir.\n\nArgs:\n delete_source: True if POT1 files should be removed after (successful) merge.", "source": "juraj_google_style"} -{"code": "def __init__(self,\n description=None,\n **kwargs):\n\n super(DisplayAlarm, self).__init__(**kwargs)\n self.description = description", "docstring": "Instantiates a new :class:`ics.alarm.DisplayAlarm`.\n\n Adheres to RFC5545 VALARM standard: http://icalendar.org/iCalendar-RFC-5545/3-6-6-alarm-component.html\n\nArgs:\n description (string) : RFC5545 DESCRIPTION property\n kwargs (dict) : Args to :func:`ics.alarm.Alarm.__init__`", "source": "juraj_google_style"} -{"code": "def parse_raw_fact(raw_fact):\n\n def at_split(string):\n\n result = string.split('@', 1)\n length = len(result)\n if length == 1:\n front, back = result[0].strip(), None\n else:\n front, back = result\n front, back = front.strip(), back.strip()\n return (front, back)\n\n def comma_split(string):\n\n\n result = string.split(',', 1)\n length = len(result)\n if length == 1:\n category, description = result[0].strip(), None\n else:\n category, description = tuple(result)\n category, description = category.strip(), description.strip()\n return (category.strip(), description)\n\n time_info, rest = time_helpers.extract_time_info(raw_fact)\n activity_name, back = at_split(rest)\n\n if back:\n category_name, description = comma_split(back)\n else:\n category_name, description = None, None\n\n return {\n 'timeinfo': time_info,\n 'category': category_name,\n 'activity': activity_name,\n 'description': description,\n }", "docstring": "Extract semantically meaningful sub-components from a ``raw fact`` text.\n\nArgs:\n raw_fact (text_type): ``raw fact`` text to be parsed.\n\nReturns:\n dict: dict with sub-components as values.", "source": "juraj_google_style"} -{"code": "def __init__(self, window, root):\n\n self.root = root\n self.selenium = window.selenium\n self.wait = window.wait\n self.window = window", "docstring": "Create a Region object.\n\nArgs:\n window (:py:class:`BaseWindow`): Window object this region appears\n in.\n root\n (:py:class:`~selenium.webdriver.remote.webelement.WebElement`):\n WebDriver element object that serves as the root for the\n region.", "source": "juraj_google_style"} -{"code": "def glyph_has_ink(font: TTFont, name: Text) -> bool:\n\n if 'glyf' in font:\n return ttf_glyph_has_ink(font, name)\n elif ('CFF ' in font) or ('CFF2' in font):\n return cff_glyph_has_ink(font, name)\n else:\n raise Exception(\"Could not find 'glyf', 'CFF ', or 'CFF2' table.\")", "docstring": "Checks if specified glyph has any ink.\n\n That is, that it has at least one defined contour associated.\n Composites are considered to have ink if any of their components have ink.\n\nArgs:\n font: the font\n glyph_name: The name of the glyph to check for ink.\n\nReturns:\n True if the font has at least one contour associated with it.", "source": "juraj_google_style"} -{"code": "def Relay(self, inventory):\n\n inventory = InvPayload(type=inventory.InventoryType, hashes=[inventory.Hash.ToBytes()])\n m = Message(\"inv\", inventory)\n self.SendSerializedMessage(m)\n\n return True", "docstring": "Wrap the inventory in a InvPayload object and send it over the write to the remote node.\n\nArgs:\n inventory:\n\nReturns:\n bool: True (fixed)", "source": "juraj_google_style"} -{"code": "def get(self, status_item):\n\n lg = logging.getLogger(\"%s.%s\" % (self.ln, inspect.stack()[0][3]))\n lg.setLevel(self.log_level)\n\n sparql = \n value = self.conn.query(sparql=sparql.format(self.group, status_item))\n if len(value) > 0 and \\\n cbool(value[0].get('loaded',{}).get(\"value\",False)):\n return True\n else:\n return False", "docstring": "queries the database and returns that status of the item.\n\nArgs:\n status_item: the name of the item to check", "source": "juraj_google_style"} -{"code": "def check_panels(adapter, panels, default_panels=None):\n\n default_panels = default_panels or []\n panels_exist = True\n for panel in default_panels:\n if panel not in panels:\n log.warning(\"Default panels have to be defined in panels\")\n panels_exist = False\n for panel in panels:\n if not adapter.gene_panel(panel):\n log.warning(\"Panel {} does not exist in database\".format(panel))\n panels_exist = False\n return panels_exist", "docstring": "Make sure that the gene panels exist in the database\n Also check if the default panels are defined in gene panels\n\nArgs:\n adapter(MongoAdapter)\n panels(list(str)): A list with panel names\n\nReturns:\n panels_exists(bool)", "source": "juraj_google_style"} -{"code": "def are_equal(self, sp1, sp2):\n\n comp1 = Composition(sp1)\n comp2 = Composition(sp2)\n return comp1.get_el_amt_dict() == comp2.get_el_amt_dict()", "docstring": "True if element:amounts are exactly the same, i.e.,\n oxidation state is not considered.\n\nArgs:\n sp1: First species. A dict of {specie/element: amt} as per the\n definition in Site and PeriodicSite.\n sp2: Second species. A dict of {specie/element: amt} as per the\n definition in Site and PeriodicSite.\n\nReturns:\n Boolean indicating whether species are the same based on element\n and amounts.", "source": "juraj_google_style"} -{"code": "def key_for_namespace(cls, namespace):\n\n if namespace:\n return model.Key(cls.KIND_NAME, namespace)\n else:\n return model.Key(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID)", "docstring": "Return the Key for a namespace.\n\nArgs:\n namespace: A string giving the namespace whose key is requested.\n\nReturns:\n The Key for the namespace.", "source": "juraj_google_style"} -{"code": "def _create_min_max_boundaries(\n max_length, min_boundary=_MIN_BOUNDARY, boundary_scale=_BOUNDARY_SCALE):\n\n # Create bucket boundaries list by scaling the previous boundary or adding 1\n # (to ensure increasing boundary sizes).\n bucket_boundaries = []\n x = min_boundary\n while x < max_length:\n bucket_boundaries.append(x)\n x = max(x + 1, int(x * boundary_scale))\n\n # Create min and max boundary lists from the initial list.\n buckets_min = [0] + bucket_boundaries\n buckets_max = bucket_boundaries + [max_length + 1]\n return buckets_min, buckets_max", "docstring": "Create min and max boundary lists up to max_length.\n\n For example, when max_length=24, min_boundary=4 and boundary_scale=2, the\n returned values will be:\n buckets_min = [0, 4, 8, 16, 24]\n buckets_max = [4, 8, 16, 24, 25]\n\nArgs:\n max_length: The maximum length of example in dataset.\n min_boundary: Minimum length in boundary.\n boundary_scale: Amount to scale consecutive boundaries in the list.\n\nReturns:\n min and max boundary lists", "source": "juraj_google_style"} -{"code": "def _ParsePropertiesXMLFile(self, xml_data):\n\n xml_root = ElementTree.fromstring(xml_data)\n\n properties = {}\n for xml_element in xml_root.iter():\n if not xml_element.text:\n continue\n\n # The property name is formatted as: {URL}name\n # For example: {http://purl.org/dc/terms/}modified\n _, _, name = xml_element.tag.partition('}')\n\n # Do not including the 'lpstr' attribute because it is very verbose.\n if name == 'lpstr':\n continue\n\n property_name = self._PROPERTY_NAMES.get(name, None)\n if not property_name:\n property_name = self._FormatPropertyName(name)\n\n properties[property_name] = xml_element.text\n\n return properties", "docstring": "Parses a properties XML file.\n\nArgs:\n xml_data (bytes): data of a _rels/.rels XML file.\n\nReturns:\n dict[str, object]: properties.\n\nRaises:\n zipfile.BadZipfile: if the properties XML file cannot be read.", "source": "juraj_google_style"} -{"code": "def download_uniprot_file(uniprot_id, filetype, outdir='', force_rerun=False):\n\n\n my_file = '{}.{}'.format(uniprot_id, filetype)\n url = 'http://www.uniprot.org/uniprot/{}'.format(my_file)\n outfile = op.join(outdir, my_file)\n\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n urlretrieve(url, outfile)\n\n return outfile", "docstring": "Download a UniProt file for a UniProt ID/ACC\n\nArgs:\n uniprot_id: Valid UniProt ID\n filetype: txt, fasta, xml, rdf, or gff\n outdir: Directory to download the file\n\nReturns:\n str: Absolute path to file", "source": "juraj_google_style"} -{"code": "def load(path):\n\n\n importpath = path.replace(\"/\", \".\").replace(\"\\\\\", \".\")\n if importpath[-3:] == \".py\":\n importpath = importpath[:-3]\n\n try:\n importlib.import_module(importpath)\n except (ModuleNotFoundError, TypeError):\n exec(open(path).read())", "docstring": "Helper function that tries to load a filepath (or python module notation)\n as a python module and on failure `exec` it.\n\nArgs:\n path (str): Path or module to load\n\n The function tries to import `example.module` when either `example.module`,\n `example/module` or `example/module.py` is given.", "source": "juraj_google_style"} -{"code": "def draw_layer(ax, layer):\n\n ax.set_aspect('equal', 'datalim')\n ax.plot(*layer)\n ax.axis('off')", "docstring": "Draws a layer on the given matplotlib axis.\n\nArgs:\n ax (axis): the matplotlib axis to draw on\n layer (layer): the layers to plot", "source": "juraj_google_style"} -{"code": "def Process(self, parser_mediator, root_item=None, **kwargs):\n\n # This will raise if unhandled keyword arguments are passed.\n super(DefaultOLECFPlugin, self).Process(parser_mediator, **kwargs)\n\n if not root_item:\n raise ValueError('Root item not set.')\n\n if not self._ParseItem(parser_mediator, root_item):\n event_data = OLECFItemEventData()\n event_data.name = root_item.name\n event_data.offset = 0\n event_data.size = root_item.size\n\n # If no event was produced, produce at least one for the root item.\n date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an OLECF file.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n root_item (Optional[pyolecf.item]): root item of the OLECF file.\n\nRaises:\n ValueError: If the root item is not set.", "source": "juraj_google_style"} -{"code": "def reset(self, blocking=True):\n\n promise = self.call('reset')\n if blocking:\n return promise()\n else:\n return promise", "docstring": "Reset the environment.\n\nArgs:\n blocking: Whether to wait for the result.\n\nReturns:\n New observation when blocking, otherwise callable that returns the new\n observation.", "source": "juraj_google_style"} -{"code": "def extract_keywords_from_text(index_page, no_items=5):\n\n index_page = MLStripper.strip_tags(index_page)\n tokenized_index = TextBlob(index_page).lower()\n\n def to_str(key):\n if isinstance(key, unicode):\n return key.encode(\"utf-8\")\n\n return key\n\n present_keywords = [\n KEYWORDS_LOWER[key]\n for key in KEYWORDS_LOWER.keys()\n if len(key) > 3 and key in tokenized_index\n ]\n\n def to_source_string(key):\n source = \"Keyword analysis\"\n try:\n return SourceString(key, source)\n except UnicodeEncodeError:\n return SourceString(key.encode(\"utf-8\"), source)\n\n multi_keywords = [\n to_source_string(key)\n for key in present_keywords\n if tokenized_index.words.count(key) >= 1\n ]\n\n multi_keywords = sorted(multi_keywords, key=lambda x: len(x), reverse=True)\n\n if len(multi_keywords) > no_items:\n return multi_keywords[:no_items]\n\n return multi_keywords", "docstring": "Try to process text on the `index_page` deduce the keywords and then try\n to match them on the Aleph's dataset.\n\n Function returns maximally `no_items` items, to prevent spamming the user.\n\nArgs:\n index_page (str): Content of the page as UTF-8 string\n no_items (int, default 5): Number of items to return.\n\nReturns:\n list: List of :class:`.SourceString` objects.", "source": "juraj_google_style"} -{"code": "def decode_body(headers: MutableMapping, body: bytes) -> dict:\n\n\n type_, encoding = parse_content_type(headers)\n decoded_body = body.decode(encoding)\n\n # There is one api that just returns `ok` instead of json. In order to have a consistent API we decided to modify the returned payload into a dict.\n if type_ == \"application/json\":\n payload = json.loads(decoded_body)\n else:\n if decoded_body == \"ok\":\n payload = {\"ok\": True}\n else:\n payload = {\"ok\": False, \"data\": decoded_body}\n\n return payload", "docstring": "Decode the response body\n\n For 'application/json' content-type load the body as a dictionary\n\nArgs:\n headers: Response headers\n body: Response body\n\nReturns:\n decoded body", "source": "juraj_google_style"} -{"code": "def from_hoy(cls, hoy, leap_year=False):\n\n return cls.from_moy(round(hoy * 60), leap_year)", "docstring": "Create Ladybug Datetime from an hour of the year.\n\nArgs:\n hoy: A float value 0 <= and < 8760", "source": "juraj_google_style"} -{"code": "def getHostCertPath(self, name):\n\n path = s_common.genpath(self.certdir, 'hosts', '%s.crt' % name)\n if not os.path.isfile(path):\n return None\n return path", "docstring": "Gets the path to a host certificate.\n\nArgs:\n name (str): The name of the host keypair.\n\nExample:\n Get the path to the host certificate for the host \"myhost\":\n\n mypath = cdir.getHostCertPath('myhost')\n\nReturns:\n str: The path if exists.", "source": "juraj_google_style"} -{"code": "def SetEnvironmentVariable(self, name, value):\n\n if isinstance(value, py2to3.STRING_TYPES):\n value = self._PathStripPrefix(value)\n\n if value is not None:\n self._environment_variables[name.upper()] = value", "docstring": "Sets an environment variable in the Windows path helper.\n\nArgs:\n name (str): name of the environment variable without enclosing\n %-characters, e.g. SystemRoot as in %SystemRoot%.\n value (str): value of the environment variable.", "source": "juraj_google_style"} -{"code": "def create_and_register_access2000_db(filename: str,\n dsn: str,\n description: str) -> bool:\n\n fullfilename = os.path.abspath(filename)\n create_string = fullfilename + \" General\"\n # ... filename, space, sort order (\"General\" for English)\n return (create_user_dsn(access_driver, CREATE_DB4=create_string) and\n register_access_db(filename, dsn, description))", "docstring": "(Windows only.)\n Creates a Microsoft Access 2000 database and registers it with ODBC.\n\nArgs:\n filename: filename of the database to create\n dsn: ODBC data source name to create\n description: description of the database\n\nReturns:\n bool: was the DSN created?", "source": "juraj_google_style"} -{"code": "def query_put_bounders(query, partition_column, start, end):\n\n where = \" WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}\".format(\n partition_column, start, end\n )\n query_with_bounders = \"SELECT * FROM ({0}) AS TMP_TABLE {1}\".format(query, where)\n return query_with_bounders", "docstring": "Put bounders in the query\n\nArgs:\n query: SQL query string\n partition_column: partition_column name\n start: lower_bound\n end: upper_bound\n\nReturns:\n Query with bounders", "source": "juraj_google_style"} -{"code": "def contains(self, key):\n\n try:\n self._api.objects_get(self._bucket, key)\n except datalab.utils.RequestException as e:\n if e.status == 404:\n return False\n raise e\n except Exception as e:\n raise e\n return True", "docstring": "Checks if the specified item exists.\n\nArgs:\n key: the key of the item to lookup.\n\nReturns:\n True if the item exists; False otherwise.\n\nRaises:\n Exception if there was an error requesting information about the item.", "source": "juraj_google_style"} -{"code": "def diff(self, container):\n\n return self._result(\n self._get(self._url(\"/containers/{0}/changes\", container)), True\n )", "docstring": "Inspect changes on a container's filesystem.\n\nArgs:\n container (str): The container to diff\n\nReturns:\n (str)\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "juraj_google_style"} -{"code": "def sg_lookup(tensor, opt):\n r\n assert opt.emb is not None, 'emb is mandatory.'\n return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name)", "docstring": "r\"\"\"Looks up the `tensor`, which is the embedding matrix.\n\nArgs:\n tensor: A tensor ( automatically given by chain )\n opt:\n emb: A 2-D `Tensor`. An embedding matrix.\n name: If provided, replace current tensor's name.\n\nReturns:\n A `Tensor`.", "source": "juraj_google_style"} -{"code": "def __init__(self, args, assignment):\n\n self.args = args\n self.assignment = assignment", "docstring": "Constructor.\n\n PARAMETERS:\n args -- Namespace; parsed command line arguments by argparse.\n assignment -- dict; general information about the assignment.", "source": "juraj_google_style"} -{"code": "def setup_ui(uifile, base_instance=None):\n\n ui = QtCompat.loadUi(uifile) # Qt.py mapped function\n if not base_instance:\n return ui\n else:\n for member in dir(ui):\n if not member.startswith('__') and \\\n member is not 'staticMetaObject':\n setattr(base_instance, member, getattr(ui, member))\n return ui", "docstring": "Load a Qt Designer .ui file and returns an instance of the user interface\n\nArgs:\n uifile (str): Absolute path to .ui file\n base_instance (QWidget): The widget into which UI widgets are loaded\n\nReturns:\n QWidget: the base instance", "source": "juraj_google_style"} -{"code": "def tfidf_corpus(docs=CORPUS):\n\n vectorizer = TfidfVectorizer()\n vectorizer = vectorizer.fit(docs)\n return vectorizer, vectorizer.transform(docs)", "docstring": "Count the words in a corpus and return a TfidfVectorizer() as well as all the TFIDF vecgtors for the corpus\n\nArgs:\n docs (iterable of strs): a sequence of documents (strings)\n\nReturns:\n (TfidfVectorizer, tfidf_vectors)", "source": "juraj_google_style"} -{"code": "def get_realtime_urls(admin_view_func=lambda x: x):\n\n from .widgets import REALTIME_WIDGETS\n return [url(w.url_regex, admin_view_func(w.as_view()), name=w.url_name)\n for w in REALTIME_WIDGETS]", "docstring": "Get the URL for real-time widgets.\n\nArgs:\n admin_view_func (callable): an admin_view method from an AdminSite\n instance. By default: identity.\n\nReturns:\n list: the list of the real-time URLs as django's ``url()``.", "source": "juraj_google_style"} -{"code": "def _add_exac(self, variant_obj, info_dict):\n\n exac = None\n exac_keys = ['ExAC', 'EXAC', 'ExACAF', 'EXACAF']\n for key in exac_keys:\n if info_dict.get(key):\n exac = float(info_dict[key])\n #If not found in vcf search transcripts\n if not exac:\n for transcript in variant_obj.transcripts:\n exac_raw = transcript.ExAC_MAF\n if exac_raw:\n exac = float(exac_raw.split(':')[-1])\n\n if exac:\n variant_obj.add_frequency('ExAC', exac)", "docstring": "Add the gmaf frequency\n\nArgs:\n variant_obj (puzzle.models.Variant)\n info_dict (dict): A info dictionary", "source": "juraj_google_style"} -{"code": "def point_consensus(self, consensus_type):\n\n if \"mean\" in consensus_type:\n consensus_data = np.mean(self.data, axis=0)\n elif \"std\" in consensus_type:\n consensus_data = np.std(self.data, axis=0)\n elif \"median\" in consensus_type:\n consensus_data = np.median(self.data, axis=0)\n elif \"max\" in consensus_type:\n consensus_data = np.max(self.data, axis=0)\n elif \"percentile\" in consensus_type:\n percentile = int(consensus_type.split(\"_\")[1])\n consensus_data = np.percentile(self.data, percentile, axis=0)\n else:\n consensus_data = np.zeros(self.data.shape[1:])\n consensus = EnsembleConsensus(consensus_data, consensus_type, self.ensemble_name,\n self.run_date, self.variable, self.start_date, self.end_date, self.units)\n return consensus", "docstring": "Calculate grid-point statistics across ensemble members.\n\nArgs:\n consensus_type: mean, std, median, max, or percentile_nn\n\nReturns:\n EnsembleConsensus containing point statistic", "source": "juraj_google_style"} -{"code": "def DocumentPackages(self, packages, index_base=None, showprivate=False,\n notify=True, showinh=False, intro_pages=None,\n append_material=None, extra=None):\n\n if index_base is None:\n gram = ''\n if isinstance(packages, list) and len(packages) > 1:\n gram = 's'\n if len(packages) < 3:\n names = ' and '.join(['``%s``' % p.__name__ for p in packages])\n else:\n names = ['``%s``' % p.__name__ for p in packages]\n names[-1] = ' and %s' % names[-1]\n names = ', '.join(names)\n else:\n names = '``%s``' % packages.__name__\n index = SAMPLE_INDEX.format(names, gram)\n else:\n index = self.OpenIndex(index_base)\n app = self._DocPackageFromTop(packages, showprivate=showprivate, showinh=showinh)\n index += self._GenerateStaticsTable()\n index += \n if intro_pages is not None:\n if isinstance(intro_pages, str):\n intro_pages = [intro_pages]\n for page in intro_pages:\n index += ' {}\\n'.format(page.strip())\n index += '\\n'\n if append_material is not None:\n index += append_material\n index += app\n\n if extra is not None:\n index += extra\n if notify:\n index += \n self.WriteIndex(index)\n return None", "docstring": "This is the high level API to use to generate documentation pages for any given package(s).\n\nArgs:\n packages (list(module)): A list of packages that contain submodules to document\n index_base (str): The index page file name. This content will be appended\n showprivate (bool): A flag for whether or not to display private members", "source": "juraj_google_style"} -{"code": "def trim_docstring(docstring):\n\n if not docstring:\n return ''\n\n # If you've got a line longer than this you have other problems...\n max_indent = 1 << 29\n\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = docstring.expandtabs().splitlines()\n\n # Determine minimum indentation (first line doesn't count):\n indent = max_indent\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < max_indent:\n for line in lines[1:]:\n trimmed.append(line[indent:].rstrip())\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n # Return a single string:\n return '\\n'.join(trimmed)", "docstring": "Removes indentation from triple-quoted strings.\n\n This is the function specified in PEP 257 to handle docstrings:\n https://www.python.org/dev/peps/pep-0257/.\n\nArgs:\n docstring: str, a python docstring.\n\nReturns:\n str, docstring with indentation removed.", "source": "juraj_google_style"} -{"code": "def __init__(self, name, description, creator='', raw={}):\n\n\n self.name = name\n self.description = description\n self.creator = creator\n self.raw = raw", "docstring": "Constructor.\n\nArgs:\n name (string): Name of resource.\n description (string): Description of resource.\n creator (optional[string]): Resource creator.\n raw (optional[dictionary]): Holds JSON data returned by the Boss API on a POST (create) or GET operation.", "source": "juraj_google_style"} -{"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n\n super(DataTypeDefinitionWithMembers, self).__init__(\n name, aliases=aliases, description=description, urls=urls)\n self._byte_size = None\n self.members = []\n self.sections = []", "docstring": "Initializes a data type definition.\n\nArgs:\n name (str): name.\n aliases (Optional[list[str]]): aliases.\n description (Optional[str]): description.\n urls (Optional[list[str]]): URLs.", "source": "juraj_google_style"} -{"code": "def easeInBack(n, s=1.70158):\n\n _checkRange(n)\n return n * n * ((s + 1) * n - s)", "docstring": "A tween function that backs up first at the start and then goes to the destination.\n\nArgs:\n n (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj_google_style"} -{"code": "def get_arrive_stop(self, **kwargs):\n\n # Endpoint parameters\n params = {\n 'idStop': kwargs.get('stop_number'),\n 'cultureInfo': util.language_code(kwargs.get('lang'))\n }\n\n # Request\n result = self.make_request('geo', 'get_arrive_stop', **params)\n\n # Funny endpoint, no status code\n if not util.check_result(result, 'arrives'):\n return False, 'UNKNOWN ERROR'\n\n # Parse\n values = util.response_list(result, 'arrives')\n return True, [emtype.Arrival(**a) for a in values]", "docstring": "Obtain bus arrival info in target stop.\n\nArgs:\n stop_number (int): Stop number to query.\n lang (str): Language code (*es* or *en*).\n\nReturns:\n Status boolean and parsed response (list[Arrival]), or message string\n in case of error.", "source": "juraj_google_style"} -{"code": "def get_absl_log_prefix(record):\n\n created_tuple = time.localtime(record.created)\n created_microsecond = int(record.created % 1.0 * 1e6)\n\n critical_prefix = ''\n level = record.levelno\n if _is_non_absl_fatal_record(record):\n # When the level is FATAL, but not logged from absl, lower the level so\n # it's treated as ERROR.\n level = logging.ERROR\n critical_prefix = _CRITICAL_PREFIX\n severity = converter.get_initial_for_level(level)\n\n return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (\n severity,\n created_tuple.tm_mon,\n created_tuple.tm_mday,\n created_tuple.tm_hour,\n created_tuple.tm_min,\n created_tuple.tm_sec,\n created_microsecond,\n _get_thread_id(),\n record.filename,\n record.lineno,\n critical_prefix)", "docstring": "Returns the absl log prefix for the log record.\n\nArgs:\n record: logging.LogRecord, the record to get prefix for.", "source": "juraj_google_style"} -{"code": "def compress(a, b):\n\n from difflib import ndiff\n left = a.splitlines(1) if isinstance(a, string_types) else a\n right = b.splitlines(1) if isinstance(b, string_types) else b\n ldiff = list(ndiff(left, right))\n\n result = {}\n latest = None \n combo = None\n icombo = 0\n iorig = 0\n\n for i, line in enumerate(ldiff):\n cs = [l[0] for l in ldiff[i:min((i+4, len(ldiff)))]]\n if cs[0] != ' ':\n #Initialize a new entry in the diff list.\n if latest is None:\n latest = iorig\n result[latest] = []\n\n #We have to be careful. At a minimum, there may be a '-' or a '+' when the lines are \n #completely added or deleted. When they are *altered*, then we also expect one or\n #more '?' lines showing the differences.\n if combo is None:\n if cs[0] == '-':\n #Check whether the next lines have one of these combinations:\n if (len(cs) >=3 and cs[1] == '+' and cs[2] == '?'):\n combo = 3\n elif (len(cs) >= 4 and cs[1] == '?' and cs[2] == '+'\n and cs[3] == '?'):\n combo = 4\n else:\n #This is a stand-alone deletion.\n combo = 1\n elif cs[0] == '+':\n #This is for the stand-alone addition.\n combo = 1\n\n if icombo < combo:\n result[latest].append(line)\n icombo += 1\n\n if icombo == combo:\n if combo > 1:\n latest = None\n combo = None\n icombo = 0\n if cs[0] != '+':\n iorig += 1\n else:\n latest = None\n iorig += 1\n\n return result", "docstring": "Performs the *compressed* diff of `a` and `b` such that the original\n contents of the :func:`difflib.ndiff` call can be reconstructed using\n :func:`~acorn.logging.diff.restore`.\n\nArgs:\n a (str or list): *original* string or list of strings to diff.\n b (str or list): *edited* string or list of strings to diff.", "source": "juraj_google_style"} -{"code": "def fetch(self, payment_id, data={}, **kwargs):\n\n return super(Payment, self).fetch(payment_id, data, **kwargs)", "docstring": "Fetch Payment for given Id\n\nArgs:\n payment_id : Id for which payment object has to be retrieved\n\nReturns:\n Payment dict for given payment Id", "source": "juraj_google_style"} -{"code": "def run_bottleneck_on_image(sess, image_data, image_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n\n # First decode the JPEG image, resize it, and rescale the pixel values.\n resized_input_values = sess.run(decoded_image_tensor,\n {image_data_tensor: image_data})\n # Then run it through the recognition network.\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: resized_input_values})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values", "docstring": "Runs inference on an image to extract the 'bottleneck' summary layer.\n\nArgs:\n sess: Current active TensorFlow Session.\n image_data: String of raw JPEG data.\n image_data_tensor: Input data layer in the graph.\n decoded_image_tensor: Output of initial image resizing and preprocessing.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: Layer before the final softmax.\n\nReturns:\n Numpy array of bottleneck values.", "source": "juraj_google_style"} -{"code": "def piola_kirchoff_1(self, def_grad):\n\n if not self.is_symmetric:\n raise ValueError(\"The stress tensor is not symmetric, \\\n PK stress is based on a symmetric stress tensor.\")\n def_grad = SquareTensor(def_grad)\n return def_grad.det*np.dot(self, def_grad.inv.trans)", "docstring": "calculates the first Piola-Kirchoff stress\n\nArgs:\n def_grad (3x3 array-like): deformation gradient tensor", "source": "juraj_google_style"} -{"code": "def subprogram_signature(vo, fullname=None):\n\n\n if fullname is None:\n fullname = vo.name\n\n if isinstance(vo, VhdlFunction):\n plist = ','.join(p.data_type for p in vo.parameters)\n sig = '{}[{} return {}]'.format(fullname, plist, vo.return_type)\n else: # procedure\n plist = ','.join(p.data_type for p in vo.parameters)\n sig = '{}[{}]'.format(fullname, plist)\n\n return sig", "docstring": "Generate a signature string\n\nArgs:\n vo (VhdlFunction, VhdlProcedure): Subprogram object\n\nReturns:\n Signature string.", "source": "juraj_google_style"} -{"code": "def __init__(self, custID):\n\n self.custID = custID\n self.addr1 = None\n self.addr2 = None\n self.city = None\n self.postal = None\n self.province = None\n self.country = None\n self.name = None\n self.phone = None\n self.recordID = None", "docstring": "__init__\n\n Defines attributes for Personator Object.\n\nArgs:\n custID (str): ID for Melissa Data account", "source": "juraj_google_style"} -{"code": "def PilToRgb(pil):\n\n r = 0xff & pil\n g = 0xff & (pil >> 8)\n b = 0xff & (pil >> 16)\n return tuple((v / 255.0 for v in (r, g, b)))", "docstring": "Convert the color from a PIL-compatible integer to RGB.\n\n Parameters:\n pil: a PIL compatible color representation (0xBBGGRR)\n\nReturns:\n The color as an (r, g, b) tuple in the range:\n the range:\n r: [0...1]\n g: [0...1]\n b: [0...1]\n\n >>> '(%g, %g, %g)' % Color.PilToRgb(0x0080ff)\n '(1, 0.501961, 0)'", "source": "juraj_google_style"} -{"code": "def insert(self, part):\n\n params = {k: str(v) for k,v in part.params.items()}\n res=c.create_assembly_instance(self.uri.as_dict(), part.uri.as_dict(), params)\n return res", "docstring": "Insert a part into this assembly.\n\nArgs:\n - part (onshapepy.part.Part) A Part instance that will be inserted.\n\nReturns:\n - requests.Response: Onshape response data", "source": "juraj_google_style"} -{"code": "def Parse(self, filename, feed):\n\n dom = minidom.parse(filename)\n self.ParseDom(dom, feed)", "docstring": "Reads the kml file, parses it and updated the Google transit feed\n object with the extracted information.\n\nArgs:\n filename - kml file name\n feed - an instance of Schedule class to be updated", "source": "juraj_google_style"} -{"code": "def locate_resource(name, lang, filter=None):\n\n task_dir = resource_dir.get(name, name)\n package_id = u\"{}.{}\".format(task_dir, lang)\n p = path.join(polyglot_path, task_dir, lang)\n if not path.isdir(p):\n if downloader.status(package_id) != downloader.INSTALLED:\n raise ValueError(\"This resource is available in the index \"\n \"but not downloaded, yet. Try to run\\n\\n\"\n \"polyglot download {}\".format(package_id))\n return path.join(p, os.listdir(p)[0])", "docstring": "Return filename that contains specific language resource name.\n\nArgs:\n name (string): Name of the resource.\n lang (string): language code to be loaded.", "source": "juraj_google_style"} -{"code": "def register_flag_by_module_id(self, module_id, flag):\n\n flags_by_module_id = self.flags_by_module_id_dict()\n flags_by_module_id.setdefault(module_id, []).append(flag)", "docstring": "Records the module that defines a specific flag.\n\nArgs:\n module_id: int, the ID of the Python module.\n flag: Flag, the Flag instance that is key to the module.", "source": "juraj_google_style"} -{"code": "def get_servo_status(self):\n\n data = []\n data.append(0x09)\n data.append(self.servoid)\n data.append(RAM_READ_REQ)\n data.append(STATUS_ERROR_RAM)\n data.append(BYTE1)\n send_data(data)\n\n rxdata = []\n try:\n rxdata = SERPORT.read(12)\n return ord(rxdata[9])&0xFF\n except:\n raise HerkulexError(\"could not communicate with motors\")", "docstring": "Get the error status of servo\n\n This function gets the error status (if any) of the servo\n\nArgs:\n none\n\nReturns:\n int: an integer corresponding to the servo status\n * refer datasheet", "source": "juraj_google_style"} -{"code": "def generate(cls, strategy, **kwargs):\n\n assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY)\n action = getattr(cls, strategy)\n return action(**kwargs)", "docstring": "Generate a new instance.\n\n The instance will be created with the given strategy (one of\n BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).\n\nArgs:\n strategy (str): the strategy to use for generating the instance.\n\nReturns:\n object: the generated instance", "source": "juraj_google_style"} -{"code": "def unpackVersion(ver):\n\n major = (ver >> 20 * 2) & mask20\n minor = (ver >> 20) & mask20\n patch = ver & mask20\n return major, minor, patch", "docstring": "Unpack a system normalized integer representing a softare version into its component parts.\n\nArgs:\n ver (int): System normalized integer value to unpack into a tuple.\n\nReturns:\n (int, int, int): A tuple containing the major, minor and patch values shifted out of the integer.", "source": "juraj_google_style"} -{"code": "def exp(vector):\n\n weld_type = None\n if isinstance(vector, LazyOpResult):\n weld_type = vector.weld_type\n vector = vector.expr\n elif isinstance(vector, np.ndarray):\n weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[\n str(vector.dtype)]\n return NumpyArrayWeld(numpy_weld_impl.exp(vector, weld_type), WeldDouble())", "docstring": "Computes a per-element exponent of the passed-in vector.\n\nArgs:\n vector (TYPE): Description", "source": "juraj_google_style"} -{"code": "def multiply(self, other):\n\n if not isinstance(other, Number):\n raise QiskitError(\"other is not a number\")\n return Operator(other * self.data, self.input_dims(),\n self.output_dims())", "docstring": "Return the operator self + other.\n\nArgs:\n other (complex): a complex number.\n\nReturns:\n Operator: the operator other * self.\n\nRaises:\n QiskitError: if other is not a valid complex number.", "source": "juraj_google_style"} -{"code": "def json_dumps(self, data):\n\n return json.dumps(\n data, \n separators=(',', ':'), \n sort_keys=True, \n cls=self.json_encoder, \n ensure_ascii=False\n ).encode('utf8')", "docstring": "Standardized json.dumps function with separators and sorted keys set\n\nArgs:\n data (dict or list): data to be dumped\n\nReturns:\n string: json", "source": "juraj_google_style"} -{"code": "def remove_team_member(self, account_id=None, email_address=None):\n\n return self._add_remove_team_member(self.TEAM_REMOVE_MEMBER_URL, email_address, account_id)", "docstring": "Remove a user from your Team\n\nArgs:\n account_id (str): The id of the account of the user to remove from your team.\n\n email_address (str): The email address of the account to remove from your team. The account id prevails if both account_id and email_address are provided.\n\nReturns:\n A Team object", "source": "juraj_google_style"} -{"code": "def invitation_backend(backend=None, namespace=None):\n # type: (Optional[Text], Optional[Text]) -> BaseBackend\n\n backend = backend or ORGS_INVITATION_BACKEND\n class_module, class_name = backend.rsplit(\".\", 1)\n mod = import_module(class_module)\n return getattr(mod, class_name)(namespace=namespace)", "docstring": "Returns a specified invitation backend\n\nArgs:\n backend: dotted path to the invitation backend class\n namespace: URL namespace to use\n\nReturns:\n an instance of an InvitationBackend", "source": "juraj_google_style"} -{"code": "def entropy(rho: Density, base: float = None) -> float:\n\n op = asarray(rho.asoperator())\n probs = np.linalg.eigvalsh(op)\n probs = np.maximum(probs, 0.0) # Compensate for floating point errors\n return scipy.stats.entropy(probs, base=base)", "docstring": "Returns the von-Neumann entropy of a mixed quantum state.\n\nArgs:\n rho: A density matrix\n base: Optional logarithm base. Default is base e, and entropy is\n measures in nats. For bits set base to 2.\n\nReturns:\n The von-Neumann entropy of rho", "source": "juraj_google_style"} -{"code": "def true_num_reactions(model, custom_spont_id=None):\n\n true_num = 0\n for rxn in model.reactions:\n if len(rxn.genes) == 0:\n continue\n if len(rxn.genes) == 1 and is_spontaneous(list(rxn.genes)[0], custom_id=custom_spont_id):\n continue\n else:\n true_num += 1\n return true_num", "docstring": "Return the number of reactions associated with a gene.\n\nArgs:\n model (Model):\n custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``\n\nReturns:\n int: Number of reactions associated with a gene", "source": "juraj_google_style"} -{"code": "def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir':\n\n if case_sensitive:\n return PrettyDir(\n self.obj, [pattr for pattr in self.pattrs if term in pattr.name]\n )\n else:\n term = term.lower()\n return PrettyDir(\n self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()]\n )", "docstring": "Searches for names that match some pattern.\n\nArgs:\n term: String used to match names. A name is returned if it matches\n the whole search term.\n case_sensitive: Boolean to match case or not, default is False\n (case insensitive).\n\nReturns:\n A PrettyDir object with matched names.", "source": "juraj_google_style"} -{"code": "def get_column(self, X, column):\n\n if isinstance(X, pd.DataFrame):\n return X[column].values\n\n return X[:, column]", "docstring": "Return a column of the given matrix.\n\nArgs:\n X: `numpy.ndarray` or `pandas.DataFrame`.\n column: `int` or `str`.\n\nReturns:\n np.ndarray: Selected column.", "source": "juraj_google_style"} -{"code": "def replace_case(self, case_obj):\n\n # Todo: Figure out and describe when this method destroys a case if invoked instead of\n # update_case\n LOG.info(\"Saving case %s\", case_obj['_id'])\n # update updated_at of case to \"today\"\n\n case_obj['updated_at'] = datetime.datetime.now(),\n\n updated_case = self.case_collection.find_one_and_replace(\n {'_id': case_obj['_id']},\n case_obj,\n return_document=pymongo.ReturnDocument.AFTER\n )\n\n return updated_case", "docstring": "Replace a existing case with a new one\n\n Keeps the object id\n\nArgs:\n case_obj(dict)\n\nReturns:\n updated_case(dict)", "source": "juraj_google_style"} -{"code": "def __init__(self, **sections):\n\n self._sections = []\n for sct_name, sct_meta in sections.items():\n if _is_valid(sct_name):\n setattr(self, sct_name, Section(**sct_meta.def_))\n self._sections.append(sct_name)\n else:\n raise error.SectionError(sct_name)\n self._parser = None\n self._nosub_valid = False\n self._subcmds = {}\n self._config_files = ()", "docstring": "Initialization of instances.\n\nArgs:\n sections (:class:`~loam.manager.Section`): section metadata. The\n name of each *section* is the name of the keyword argument\n passed on to this function. Section names should be valid\n identifiers, otherwise a :class:`~loam.error.SectionError` is\n raised.", "source": "juraj_google_style"} -{"code": "def rotate(p, rad, o=(0, 0)):\n\n v = vector(o, p)\n fx = lambda x, y, d: x * cos(d) - y * sin(d)\n fy = lambda x, y, d: x * sin(d) + y * cos(d)\n rv = fx(v[0], v[1], rad), fy(v[0], v[1], rad)\n return translate(rv, o)", "docstring": "rotate vector\n\nArgs:\n p: point (x, y)\n rad: angle(radian)\n o: origin (x, y)", "source": "juraj_google_style"} -{"code": "def new(self, boot_system_id):\n # type: (bytes) -> None\n\n if self._initialized:\n raise Exception('Boot Record already initialized')\n\n self.boot_system_identifier = boot_system_id.ljust(32, b'\\x00')\n self.boot_identifier = b'\\x00' * 32\n self.boot_system_use = b'\\x00' * 197 # This will be set later\n\n self._initialized = True", "docstring": "A method to create a new Boot Record.\n\n Parameters:\n boot_system_id - The system identifier to associate with this Boot\n Record.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def log_error(cls, msg):\n\n cls.error_logger.error(msg)\n cls.debug_logger.debug(msg)", "docstring": "Logs the provided error message to both the error logger and the debug logger logging\n instances.\n\nArgs:\n msg: `str`. The error message to log.", "source": "juraj_google_style"} -{"code": "def _create(cls, model_class, *args, **kwargs):\n\n manager = cls._get_manager(model_class)\n\n return manager.create_user(*args, **kwargs)", "docstring": "Create a new user instance.\n\nArgs:\n model_class:\n The type of model to create an instance of.\n\nArgs:\n Positional arguments to create the instance with.\n kwargs:\n Keyword arguments to create the instance with.\n\nReturns:\n A new user instance of the type specified by\n ``model_class``.", "source": "juraj_google_style"} -{"code": "def from_function(cls, f, *args, **kwargs):\n\n\n return cls.from_code(six.get_function_code(f), *args, **kwargs)", "docstring": "Create a new instance from a function. Gets the code object from\n the function and passes it and any other specified parameters to\n :meth:`from_code`.\n\nArgs:\n f(function): The function to get the code object from.\n\nReturns:\n CodeObject: A new :class:`CodeObject` instance.", "source": "juraj_google_style"} -{"code": "def chi_squared(source_frequency, target_frequency):\n\n # Ignore any symbols from source that are not in target.\n # TODO: raise Error if source_len is 0?\n target_prob = frequency_to_probability(target_frequency)\n source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)\n\n result = 0\n for symbol, prob in target_prob.items():\n symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source\n result += _calculate_chi_squared(symbol_frequency, prob, source_len)\n\n return result", "docstring": "Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.\n\nExample:\n >>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})\n 0.1\n\nArgs:\n source_frequency (dict): Frequency map of the text you are analyzing\n target_frequency (dict): Frequency map of the target language to compare with\n\nReturns:\n Decimal value of the chi-squared statistic", "source": "juraj_google_style"} -{"code": "def mapping_ref(self, es_mappings):\n\n\n new_map = {}\n for key, value in es_mappings.items():\n for sub_key, sub_value in value.items():\n new_map[\"/\".join([key, sub_key])] = \\\n mapping_fields(sub_value['properties'])\n return new_map", "docstring": "Retruns a dictionary of mappings and the fiels names in dot notation\n\nArgs:\n mappings: es mapping defitions to parse", "source": "juraj_google_style"} -{"code": "def predict_features(self, df_features, df_target, idx=0, **kwargs):\n\n estimator = SVR(kernel='linear')\n selector = RFECV(estimator, step=1)\n selector = selector.fit(df_features.values, df_target.values[:, 0])\n\n return selector.grid_scores_", "docstring": "For one variable, predict its neighbouring nodes.\n\nArgs:\n df_features (pandas.DataFrame):\n df_target (pandas.Series):\n idx (int): (optional) for printing purposes\n kwargs (dict): additional options for algorithms\n\nReturns:\n list: scores of each feature relatively to the target", "source": "juraj_google_style"} -{"code": "def get_obj(self, objpath, metahash, dst_path):\n\n incachepath = self.path_in_cache(objpath, metahash)\n if not os.path.exists(incachepath):\n raise CacheMiss('%s not in cache.' % incachepath)\n else:\n log.debug('Cache hit! %s~%s', objpath, metahash.hexdigest())\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n os.link(incachepath, dst_path)", "docstring": "Get object from cache, write it to dst_path.\n\nArgs:\n objpath: filename relative to buildroot\n (example: mini-boot/blahblah/somefile.bin)\n metahash: metahash. See targets/base.py\n dst_path: Absolute path where the file should be written.\n\nRaises:\n CacheMiss: if the item is not in the cache", "source": "juraj_google_style"} -{"code": "def send_recv(self, message, timeout=10.0):\n\n response_queue = self.send(message)\n response = self.recv(response_queue, timeout)\n return response", "docstring": "Send a message to a PandABox and wait for the response\n\nArgs:\n message (str): The message to send\n timeout (float): How long to wait before raising queue.Empty\n\nReturns:\n str: The response", "source": "juraj_google_style"} -{"code": "def _read_tags(self):\n\n tags = self._config.get('tags', {})\n logging.info('Tags:')\n for tag_name in tags.keys():\n tag = {}\n tag['Key'] = tag_name\n tag['Value'] = tags[tag_name]\n self._tags.append(tag)\n logging.info('{} = {}'.format(tag_name, tags[tag_name]))\n\n logging.debug(json.dumps(\n self._tags,\n indent=2,\n sort_keys=True\n ))\n return True", "docstring": "Fill in the _tags dict from the tags file.\n\nArgs:\n None\n\nReturns:\n True\n\n Todo:\n Figure what could go wrong and at least acknowledge the\n the fact that Murphy was an optimist.", "source": "juraj_google_style"} -{"code": "def update_site(self, client_secret_expires_at=None):\n\n params = {\n \"oxd_id\": self.oxd_id,\n \"authorization_redirect_uri\": self.authorization_redirect_uri\n }\n\n if client_secret_expires_at:\n params[\"client_secret_expires_at\"] = client_secret_expires_at\n\n for param in self.opt_params:\n if self.config.get(\"client\", param):\n value = self.config.get(\"client\", param)\n params[param] = value\n\n for param in self.opt_list_params:\n if self.config.get(\"client\", param):\n value = self.config.get(\"client\", param).split(\",\")\n params[param] = value\n\n logger.debug(\"Sending `update_site` with params %s\",\n params)\n response = self.msgr.request(\"update_site\", **params)\n logger.debug(\"Received response: %s\", response)\n\n if response['status'] == 'error':\n raise OxdServerError(response['data'])\n\n return True", "docstring": "Function to update the site's information with OpenID Provider.\n This should be called after changing the values in the cfg file.\n\n Parameters:\n * **client_secret_expires_at (long, OPTIONAL):** milliseconds since 1970, can be used to extends client lifetime\n\nReturns:\n **bool:** The status for update. True for success and False for failure\n\nRaises:\n **OxdServerError:** When the update fails and oxd server returns error", "source": "juraj_google_style"} -{"code": "def maybe_broadcast_structure(from_structure: Any, to_structure: Any) -> Any:\n\n flat_from = tf.nest.flatten(from_structure)\n flat_to = tf.nest.flatten(to_structure)\n if len(flat_from) == 1:\n flat_from *= len(flat_to)\n return tf.nest.pack_sequence_as(to_structure, flat_from)", "docstring": "Maybe broadcasts `from_structure` to `to_structure`.\n\n If `from_structure` is a singleton, it is tiled to match the structure of\n `to_structure`. Note that the elements in `from_structure` are not copied if\n this tiling occurs.\n\nArgs:\n from_structure: A structure.\n to_structure: A structure.\n\nReturns:\n new_from_structure: Same structure as `to_structure`.", "source": "juraj_google_style"} -{"code": "def zip(self, *others):\n\n args = [_unwrap(item) for item in (self,) + others]\n ct = self.count()\n if not all(len(arg) == ct for arg in args):\n raise ValueError(\"Arguments are not all the same length\")\n return Collection(map(Wrapper.wrap, zip(*args)))", "docstring": "Zip the items of this collection with one or more\n other sequences, and wrap the result.\n\n Unlike Python's zip, all sequences must be the same length.\n\n Parameters:\n\n others: One or more iterables or Collections\n\nReturns:\n A new collection.\n\nExample:\n >>> c1 = Collection([Scalar(1), Scalar(2)])\n >>> c2 = Collection([Scalar(3), Scalar(4)])\n >>> c1.zip(c2).val()\n [(1, 3), (2, 4)]", "source": "juraj_google_style"} -{"code": "def request(self, endpoint, method='GET', headers=None, params=None, data=None):\n\n if not headers:\n headers = {}\n if method in ['POST', 'PATCH', 'PUT']:\n headers['Content-Type'] = 'application/json'\n url = '/'.join([self.api_url, 'v' + self.api_version, endpoint])\n data = json.dumps(data) if data else None\n try:\n response = requests.request(method=method, url=url, params=params, headers=headers, data=data)\n # TODO Does recreating the exception classes 'requests' use suck? Yes, but it sucks more to expose the underlying library I use\n except requests.exceptions.Timeout as e:\n raise wp_exceptions.TimeoutError(e)\n except requests.exceptions.ConnectionError as e:\n raise wp_exceptions.ConnectionError(e)\n self._validate_response(method, response)\n return response", "docstring": "Send a request to the given Wunderlist API endpoint\n\n Params:\n endpoint -- API endpoint to send request to\n\n Keyword Args:\n headers -- headers to add to the request\n method -- GET, PUT, PATCH, DELETE, etc.\n params -- parameters to encode in the request\n data -- data to send with the request", "source": "juraj_google_style"} -{"code": "def __le__(self, other):\n\n other = self._cast_to_frameset(other)\n if other is NotImplemented:\n return NotImplemented\n return self.items <= other.items", "docstring": "Check if `self` <= `other` via a comparison of the contents.\n If `other` is not a :class:`FrameSet`, but is a set, frozenset, or\n is iterable, it will be cast to a :class:`FrameSet`.\n\nArgs:\n other (:class:`FrameSet`): Also accepts an object that can be cast to a :class:`FrameSet`\n\nReturns:\n bool:\n :class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`", "source": "juraj_google_style"} -{"code": "def get_interpolated_value(self, energy, integrated=False):\n\n inter = {}\n for spin in self.cohp:\n if not integrated:\n inter[spin] = get_linear_interpolated_value(self.energies,\n self.cohp[spin],\n energy)\n elif self.icohp is not None:\n inter[spin] = get_linear_interpolated_value(self.energies,\n self.icohp[spin],\n energy)\n else:\n raise ValueError(\"ICOHP is empty.\")\n return inter", "docstring": "Returns the COHP for a particular energy.\n\nArgs:\n energy: Energy to return the COHP value for.", "source": "juraj_google_style"} -{"code": "def _get_anchor(module_to_name, fullname):\n\n if not _anchor_re.match(fullname):\n raise ValueError(\"'%s' is not a valid anchor\" % fullname)\n anchor = fullname\n for module_name in module_to_name.values():\n if fullname.startswith(module_name + \".\"):\n rest = fullname[len(module_name)+1:]\n # Use this prefix iff it is longer than any found before\n if len(anchor) > len(rest):\n anchor = rest\n return anchor", "docstring": "Turn a full member name into an anchor.\n\nArgs:\n module_to_name: Dictionary mapping modules to short names.\n fullname: Fully qualified name of symbol.\n\nReturns:\n HTML anchor string. The longest module name prefix of fullname is\n removed to make the anchor.\n\nRaises:\n ValueError: If fullname uses characters invalid in an anchor.", "source": "juraj_google_style"} -{"code": "def close_position(self, repay_only):\n\n params = {'repay_only': repay_only}\n return self._send_message('post', '/position/close',\n data=json.dumps(params))", "docstring": "Close position.\n\nArgs:\n repay_only (bool): Undocumented by cbpro.\n\nReturns:\n Undocumented", "source": "juraj_google_style"} -{"code": "def getElementDeclaration(cls, namespaceURI, name, isref=False, lazy=False):\n\n key = (namespaceURI, name)\n if isref:\n klass = cls.elements.get(key,None)\n if klass is not None and lazy is True:\n return _Mirage(klass)\n return klass\n\n typecode = cls.element_typecode_cache.get(key, None)\n if typecode is None:\n tcls = cls.elements.get(key,None)\n if tcls is not None:\n typecode = cls.element_typecode_cache[key] = tcls()\n typecode.typed = False\n\n return typecode", "docstring": "Grab an element declaration, returns a typecode instance\n representation or a typecode class definition. An element\n reference has its own facets, and is local so it will not be\n cached.\n\n Parameters:\n namespaceURI --\n name --\n isref -- if element reference, return class definition.", "source": "juraj_google_style"} -{"code": "def __init__(self, server, spider, key, exchange=None):\n\n self.server = server\n self.spider = spider\n self.key = key % {'spider': spider.name}", "docstring": "Initialize per-spider RabbitMQ queue.\n\n Parameters:\n server -- rabbitmq connection\n spider -- spider instance\n key -- key for this queue (e.g. \"%(spider)s:queue\")", "source": "juraj_google_style"} -{"code": "def _build_vocab(filename, vocab_dir, vocab_name):\n\n vocab_path = os.path.join(vocab_dir, vocab_name)\n if not tf.gfile.Exists(vocab_path):\n with tf.gfile.GFile(filename, \"r\") as f:\n data = f.read().split()\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n words, _ = list(zip(*count_pairs))\n encoder = text_encoder.TokenTextEncoder(None, vocab_list=words)\n encoder.store_to_file(vocab_path)\n else:\n encoder = text_encoder.TokenTextEncoder(vocab_path)\n return encoder", "docstring": "Reads a file to build a vocabulary.\n\nArgs:\n filename: file to read list of words from.\n vocab_dir: directory where to save the vocabulary.\n vocab_name: vocab file name.\n\nReturns:\n text encoder.", "source": "juraj_google_style"} -{"code": "def from_string(species_string: str):\n\n m = re.search(r\"([A-Z][a-z]*)([0-9.]*)([+\\-]*)(.*)\", species_string)\n if m:\n sym = m.group(1)\n if m.group(2) == \"\" and m.group(3) == \"\":\n oxi = 0\n else:\n oxi = 1 if m.group(2) == \"\" else float(m.group(2))\n oxi = -oxi if m.group(3) == \"-\" else oxi\n properties = None\n if m.group(4):\n toks = m.group(4).split(\"=\")\n properties = {toks[0]: float(toks[1])}\n return DummySpecie(sym, oxi, properties)\n raise ValueError(\"Invalid DummySpecies String\")", "docstring": "Returns a Dummy from a string representation.\n\nArgs:\n species_string (str): A string representation of a dummy\n species, e.g., \"X2+\", \"X3+\".\n\nReturns:\n A DummySpecie object.\n\nRaises:\n ValueError if species_string cannot be intepreted.", "source": "juraj_google_style"} -{"code": "def ip_network(address, strict=True):\n\n try:\n return IPv4Network(address, strict)\n except (AddressValueError, NetmaskValueError):\n pass\n\n try:\n return IPv6Network(address, strict)\n except (AddressValueError, NetmaskValueError):\n pass\n\n raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %\n address)", "docstring": "Take an IP string/int and return an object of the correct type.\n\nArgs:\n address: A string or integer, the IP network. Either IPv4 or\n IPv6 networks may be supplied; integers less than 2**32 will\n be considered to be IPv4 by default.\n\nReturns:\n An IPv4Network or IPv6Network object.\n\nRaises:\n ValueError: if the string passed isn't either a v4 or a v6\n address. Or if the network has host bits set.", "source": "juraj_google_style"} -{"code": "def ipv6_is_defined(address):\n\n\n # Initialize the IP address object.\n query_ip = IPv6Address(str(address))\n\n # Initialize the results named tuple\n results = namedtuple('ipv6_is_defined_results', 'is_defined, ietf_name, '\n 'ietf_rfc')\n # Multicast\n if query_ip.is_multicast:\n\n return results(True, 'Multicast', 'RFC 4291, Section 2.7')\n\n # Unspecified\n elif query_ip.is_unspecified:\n\n return results(True, 'Unspecified', 'RFC 4291, Section 2.5.2')\n\n # Loopback.\n elif query_ip.is_loopback:\n\n return results(True, 'Loopback', 'RFC 4291, Section 2.5.3')\n\n # Reserved\n elif query_ip.is_reserved:\n\n return results(True, 'Reserved', 'RFC 4291')\n\n # Link-Local\n elif query_ip.is_link_local:\n\n return results(True, 'Link-Local', 'RFC 4291, Section 2.5.6')\n\n # Site-Local\n elif query_ip.is_site_local:\n\n return results(True, 'Site-Local', 'RFC 4291, Section 2.5.7')\n\n # Unique Local Unicast\n elif query_ip.is_private:\n\n return results(True, 'Unique Local Unicast', 'RFC 4193')\n\n return results(False, '', '')", "docstring": "The function for checking if an IPv6 address is defined (does not need to\n be resolved).\n\nArgs:\n address (:obj:`str`): An IPv6 address.\n\nReturns:\n namedtuple:\n\n :is_defined (bool): True if given address is defined, otherwise\n False\n :ietf_name (str): IETF assignment name if given address is\n defined, otherwise ''\n :ietf_rfc (str): IETF assignment RFC if given address is defined,\n otherwise ''", "source": "juraj_google_style"} -{"code": "def external_ids(self, **kwargs):\n\n path = self._get_series_id_season_number_episode_number_path(\n 'external_ids')\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Get the external ids for a TV episode by combination of a season and\n episode number.\n\nArgs:\n language: (optional) ISO 639 code.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "source": "juraj_google_style"} -{"code": "def _initialize_slots(self, seed, hashvalues):\n\n self.seed = seed\n self.hashvalues = self._parse_hashvalues(hashvalues)", "docstring": "Initialize the slots of the LeanMinHash.\n\nArgs:\n seed (int): The random seed controls the set of random\n permutation functions generated for this LeanMinHash.\n hashvalues: The hash values is the internal state of the LeanMinHash.", "source": "juraj_google_style"} -{"code": "def match(self, subject: Union[Expression, FlatTerm]) -> Iterator[Tuple[T, Substitution]]:\n\n for index in self._match(subject):\n pattern, label = self._patterns[index]\n subst = Substitution()\n if subst.extract_substitution(subject, pattern.expression):\n for constraint in pattern.constraints:\n if not constraint(subst):\n break\n else:\n yield label, subst", "docstring": "Match the given subject against all patterns in the net.\n\nArgs:\n subject:\n The subject that is matched. Must be constant.\n\nYields:\n A tuple :code:`(final label, substitution)`, where the first component is the final label associated with\n the pattern as given when using :meth:`add()` and the second one is the match substitution.", "source": "juraj_google_style"} -{"code": "def get_summary(result):\n\n summary = {\n \"success\": result.wasSuccessful(),\n \"stat\": {\n 'total': result.testsRun,\n 'failures': len(result.failures),\n 'errors': len(result.errors),\n 'skipped': len(result.skipped),\n 'expectedFailures': len(result.expectedFailures),\n 'unexpectedSuccesses': len(result.unexpectedSuccesses)\n }\n }\n summary[\"stat\"][\"successes\"] = summary[\"stat\"][\"total\"] \\\n - summary[\"stat\"][\"failures\"] \\\n - summary[\"stat\"][\"errors\"] \\\n - summary[\"stat\"][\"skipped\"] \\\n - summary[\"stat\"][\"expectedFailures\"] \\\n - summary[\"stat\"][\"unexpectedSuccesses\"]\n\n summary[\"time\"] = {\n 'start_at': result.start_at,\n 'duration': result.duration\n }\n summary[\"records\"] = result.records\n\n return summary", "docstring": "get summary from test result\n\nArgs:\n result (instance): HtmlTestResult() instance\n\nReturns:\n dict: summary extracted from result.\n\n {\n \"success\": True,\n \"stat\": {},\n \"time\": {},\n \"records\": []\n }", "source": "juraj_google_style"} -{"code": "def select_segments(self, jsonpath: str) -> List[Segment]:\n\n path = self.etk.parse_json_path(jsonpath)\n matches = path.find(self.cdr_document)\n\n segments = list()\n for a_match in matches:\n this_segment = Segment(str(a_match.full_path), a_match.value, self)\n segments.append(this_segment)\n\n return segments", "docstring": "Dereferences the json_path inside the document and returns the selected elements.\n This method should compile and cache the compiled json_path in case the same path\n is reused by multiple extractors.\n\nArgs:\n jsonpath (str): a valid JSON path.\n\n Returns: A list of Segments object that contains the elements selected by the json path.", "source": "juraj_google_style"} -{"code": "def mkpath(self, url, path, filename=None, gzip=False):\n\n if path:\n url = \"%s/%s\" % (url.rstrip('/'), path.strip('/'))\n\n if filename:\n url = \"%s/%s\" % (url, filename.lstrip('/'))\n\n content_type = mimetypes.guess_type(url)[0]\n if gzip and content_type in mediasync.TYPES_TO_COMPRESS:\n url = \"%s.gzt\" % url\n\n cb = msettings['CACHE_BUSTER']\n if cb:\n # Cache busters help tell the client to re-download the file after\n # a change. This can either be a callable or a constant defined\n # in settings.py.\n cb_val = cb(url) if callable(cb) else cb\n url = \"%s?%s\" % (url, cb_val)\n\n return msettings['URL_PROCESSOR'](url)", "docstring": "Assembles various components to form a complete resource URL.\n\nArgs:\n url: (str) A base media URL.\n path: (str) The path on the host (specified in 'url') leading up\n to the file.\n filename: (str) The file name to serve.\n gzip: (bool) True if client should receive *.gzt version of file.", "source": "juraj_google_style"} -{"code": "def _kl_normal_normal(n_a, n_b, name=None):\n\n with tf.name_scope(name or \"kl_normal_normal\"):\n one = tf.constant(1, dtype=n_a.dtype)\n two = tf.constant(2, dtype=n_a.dtype)\n half = tf.constant(0.5, dtype=n_a.dtype)\n s_a_squared = tf.square(n_a.scale)\n s_b_squared = tf.square(n_b.scale)\n ratio = s_a_squared / s_b_squared\n return (tf.square(n_a.loc - n_b.loc) / (two * s_b_squared) + half *\n (ratio - one - tf.math.log(ratio)))", "docstring": "Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.\n\nArgs:\n n_a: instance of a Normal distribution object.\n n_b: instance of a Normal distribution object.\n name: (optional) Name to use for created operations.\n default is \"kl_normal_normal\".\n\nReturns:\n Batchwise KL(n_a || n_b)", "source": "juraj_google_style"} -{"code": "def get_conflicting_tools(self, request_only=False):\n\n from collections import defaultdict\n\n tool_sets = defaultdict(set)\n tools_dict = self.get_tools(request_only=request_only)\n for variant, tools in tools_dict.itervalues():\n for tool in tools:\n tool_sets[tool].add(variant)\n\n conflicts = dict((k, v) for k, v in tool_sets.iteritems() if len(v) > 1)\n return conflicts", "docstring": "Returns tools of the same name provided by more than one package.\n\nArgs:\n request_only: If True, only return the key from resolved packages\n that were also present in the request.\n\nReturns:\n Dict of {tool-name: set([Variant])}.", "source": "juraj_google_style"} -{"code": "def __init__(self, table_id=Meter.OFPM_ALL):\n\n super().__init__(InstructionType.OFPIT_GOTO_TABLE)\n self.table_id = table_id", "docstring": "Create a InstructionGotoTable with the optional parameters below.\n\nArgs:\n length (int): Length of this struct in bytes.\n table_id (int): set next table in the lookup pipeline.", "source": "juraj_google_style"} -{"code": "def __init__(self, attention_logit_mod, name=\"attention\"):\n\n super(AttentiveRead, self).__init__(name=name)\n\n self._attention_logit_mod = attention_logit_mod", "docstring": "Initialize AttentiveRead module.\n\nArgs:\n attention_logit_mod: Module that produces logit corresponding to a memory\n slot's compatibility. Must map a [batch_size * memory_size,\n memory_word_size + query_word_size]-shaped Tensor to a\n [batch_size * memory_size, 1] shape Tensor.\n name: string. Name for module.", "source": "juraj_google_style"} -{"code": "def execute_no_wait(self, cmd, walltime, envs={}):\n\n current_env = copy.deepcopy(self._envs)\n current_env.update(envs)\n\n try:\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=self.userhome,\n env=current_env,\n shell=True,\n preexec_fn=os.setpgrp\n )\n pid = proc.pid\n\n except Exception as e:\n print(\"Caught exception : {0}\".format(e))\n logger.warn(\"Execution of command [%s] failed due to \\n %s \", (cmd, e))\n\n return pid, proc", "docstring": "Synchronously execute a commandline string on the shell.\n\nArgs:\n - cmd (string) : Commandline string to execute\n - walltime (int) : walltime in seconds, this is not really used now.\n\nReturns:\n - retcode : Return code from the execution, -1 on fail\n - stdout : stdout string\n - stderr : stderr string\n\nRaises:\n None.", "source": "juraj_google_style"} -{"code": "def fields_equal(self, instance, fields_to_ignore=(\"id\", \"change_date\", \"changed_by\")):\n\n for field in self._meta.get_fields():\n if not field.many_to_many and field.name not in fields_to_ignore:\n if getattr(instance, field.name) != getattr(self, field.name):\n return False\n\n return True", "docstring": "Compares this instance's fields to the supplied instance to test for equality.\n This will ignore any fields in `fields_to_ignore`.\n\n Note that this method ignores many-to-many fields.\n\nArgs:\n instance: the model instance to compare\n fields_to_ignore: List of fields that should not be compared for equality. By default\n includes `id`, `change_date`, and `changed_by`.\n\n Returns: True if the checked fields are all equivalent, else False", "source": "juraj_google_style"} -{"code": "def on(self, event_name, *args, **kwargs):\n\n def decorator(f):\n self.add_event_handler(event_name, f, *args, **kwargs)\n return f\n return decorator", "docstring": "Decorator shortcut for add_event_handler.\n\nArgs:\n event_name: An event to attach the handler to. Valid events are from :class:`~ignite.engine.Events` or\n any `event_name` added by :meth:`~ignite.engine.Engine.register_events`.\n *args: optional args to be passed to `handler`.\n **kwargs: optional keyword args to be passed to `handler`.", "source": "juraj_google_style"} -{"code": "def __init__(\n self,\n size,\n window=3,\n stride=1,\n padding='SAME',\n bias=True,\n activation='relu',\n l2_regularization=0.0,\n l1_regularization=0.0,\n named_tensors=None,\n scope='conv1d',\n summary_labels=()\n ):\n\n self.size = size\n self.window = window\n self.stride = stride\n self.padding = padding\n self.bias = bias\n self.l2_regularization = l2_regularization\n self.l1_regularization = l1_regularization\n self.nonlinearity = Nonlinearity(summary_labels=summary_labels, **util.prepare_kwargs(activation))\n super(Conv1d, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "1D convolutional layer.\n\nArgs:\n size: Number of filters\n window: Convolution window size\n stride: Convolution stride\n padding: Convolution padding, one of 'VALID' or 'SAME'\n bias: If true, a bias is added\n activation: Type of nonlinearity, or dict with name & arguments\n l2_regularization: L2 regularization weight\n l1_regularization: L1 regularization weight", "source": "juraj_google_style"} -{"code": "def __init__(self, host='localhost', port=12223):\n\n\n channel = grpc.insecure_channel('{}:{}'.format(host, port))\n self._stub = clearly_pb2_grpc.ClearlyServerStub(channel)", "docstring": "Constructs a client instance.\n\nArgs:\n host (str): the hostname of the server\n port (int): the port of the server", "source": "juraj_google_style"} -{"code": "def indicators_from_tag(self, indicator, tag_name, filters=None, params=None):\n\n params = params or {}\n\n for t in self.pivot_from_tag(indicator, tag_name, filters=filters, params=params):\n yield t", "docstring": "Args:\n indicator:\n tag_name:\n filters:\n params:\n\n Return:", "source": "juraj_google_style"} -{"code": "def reaction_formula(reaction, compound_formula):\n\n\n def multiply_formula(compound_list):\n for compound, count in compound_list:\n yield count * compound_formula[compound.name]\n\n for compound, _ in reaction.compounds:\n if compound.name not in compound_formula:\n return None\n else:\n left_form = reduce(\n operator.or_, multiply_formula(reaction.left), Formula())\n right_form = reduce(\n operator.or_, multiply_formula(reaction.right), Formula())\n return left_form, right_form", "docstring": "Calculate formula compositions for both sides of the specified reaction.\n\n If the compounds in the reaction all have formula, then calculate and\n return the chemical compositions for both sides, otherwise return `None`.\n\nArgs:\n reaction: :class:`psamm.reaction.Reaction`.\n compound_formula: a map from compound id to formula.", "source": "juraj_google_style"} -{"code": "def seek(self, offset, whence=os.SEEK_SET):\n\n self._check_open()\n\n self._buffer.reset()\n self._buffer_future = None\n\n if whence == os.SEEK_SET:\n self._offset = offset\n elif whence == os.SEEK_CUR:\n self._offset += offset\n elif whence == os.SEEK_END:\n self._offset = self._file_size + offset\n else:\n raise ValueError('Whence mode %s is invalid.' % str(whence))\n\n self._offset = min(self._offset, self._file_size)\n self._offset = max(self._offset, 0)\n if self._remaining():\n self._request_next_buffer()", "docstring": "Set the file's current offset.\n\n Note if the new offset is out of bound, it is adjusted to either 0 or EOF.\n\nArgs:\n offset: seek offset as number.\n whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),\n os.SEEK_CUR (seek relative to the current position), and os.SEEK_END\n (seek relative to the end, offset should be negative).\n\nRaises:\n IOError: When this buffer is closed.\n ValueError: When whence is invalid.", "source": "juraj_google_style"} -{"code": "def launch(self, workflow):\n\n\n # hit workflow api\n try:\n r = self.gbdx_connection.post(self.workflows_url, json=workflow)\n try:\n r.raise_for_status()\n except:\n print(\"GBDX API Status Code: %s\" % r.status_code)\n print(\"GBDX API Response: %s\" % r.text)\n r.raise_for_status()\n workflow_id = r.json()['id']\n return workflow_id\n except TypeError:\n self.logger.debug('Workflow not launched!')", "docstring": "Launches GBDX workflow.\n\nArgs:\n workflow (dict): Dictionary specifying workflow tasks.\n\nReturns:\n Workflow id (str).", "source": "juraj_google_style"} -{"code": "def EnrolFleetspeakClient(self, client_id):\n\n client_urn = rdf_client.ClientURN(client_id)\n\n # If already enrolled, return.\n if data_store.RelationalDBEnabled():\n try:\n data_store.REL_DB.ReadClientMetadata(client_id)\n return False\n except db.UnknownClientError:\n pass\n else:\n if aff4.FACTORY.ExistsWithType(\n client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token):\n return False\n\n logging.info(\"Enrolling a new Fleetspeak client: %r\", client_id)\n\n if data_store.RelationalDBEnabled():\n now = rdfvalue.RDFDatetime.Now()\n data_store.REL_DB.WriteClientMetadata(\n client_id, first_seen=now, fleetspeak_enabled=True, last_ping=now)\n\n if data_store.AFF4Enabled():\n # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can\n # catch exceptions from it and forward them to Fleetspeak by failing its\n # gRPC call. Fleetspeak will then retry with a random, perhaps healthier,\n # instance of the GRR frontend.\n with aff4.FACTORY.Create(\n client_urn,\n aff4_type=aff4_grr.VFSGRRClient,\n mode=\"rw\",\n token=self.token) as client:\n\n client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True))\n\n index = client_index.CreateClientIndex(token=self.token)\n index.AddClient(client)\n if data_store.RelationalDBEnabled():\n client_obj = rdf_objects.ClientSnapshot(\n client_id=client_urn.Basename())\n index = client_index.ClientIndex()\n index.AddClient(client_obj)\n\n # Publish the client enrollment message.\n events.Events.PublishEvent(\"ClientEnrollment\", client_urn, token=self.token)\n return True", "docstring": "Enrols a Fleetspeak-enabled client for use with GRR.\n\nArgs:\n client_id: GRR client-id for the client.\n\nReturns:\n True if the client is new, and actually got enrolled. This method\n is a no-op if the client already exists (in which case False is returned).", "source": "juraj_google_style"} -{"code": "def site_name(self, site_name):\n\n # Set base class property site_name\n _Pybooru.site_name.fset(self, site_name)\n\n if ('api_version' and 'hashed_string') in SITE_LIST[site_name]:\n self.api_version = SITE_LIST[site_name]['api_version']\n self.hash_string = SITE_LIST[site_name]['hashed_string']", "docstring": "Sets api_version and hash_string.\n\n Parameters:\n site_name (str): The site name in 'SITE_LIST', default sites.\n\nRaises:\n PybooruError: When 'site_name' isn't valid.", "source": "juraj_google_style"} -{"code": "def get_bytes(obj):\n\n try:\n obj = obj.read(_NUM_SIGNATURE_BYTES)\n except AttributeError:\n # duck-typing as readable failed - we'll try the other options\n pass\n\n kind = type(obj)\n\n if kind is bytearray:\n return signature(obj)\n\n if kind is str:\n return get_signature_bytes(obj)\n\n if kind is bytes:\n return signature(obj)\n\n if kind is memoryview:\n return signature(obj).tolist()\n\n raise TypeError('Unsupported type as file input: %s' % kind)", "docstring": "Infers the input type and reads the first 262 bytes,\n returning a sliced bytearray.\n\nArgs:\n obj: path to readable, file, bytes or bytearray.\n\nReturns:\n First 262 bytes of the file content as bytearray type.\n\nRaises:\n TypeError: if obj is not a supported type.", "source": "juraj_google_style"} -{"code": "def ParseCloudEntryRow(\n self, parser_mediator, query, row, cache=None, database=None,\n **unused_kwargs):\n\n query_hash = hash(query)\n\n parent_resource_id = self._GetRowValue(\n query_hash, row, 'parent_resource_id')\n filename = self._GetRowValue(query_hash, row, 'filename')\n\n cloud_path = self.GetCloudPath(parent_resource_id, cache, database)\n cloud_filename = '{0:s}{1:s}'.format(cloud_path, filename)\n\n event_data = GoogleDriveSnapshotCloudEntryEventData()\n event_data.document_type = self._GetRowValue(query_hash, row, 'doc_type')\n event_data.path = cloud_filename\n event_data.query = query\n event_data.shared = bool(self._GetRowValue(query_hash, row, 'shared'))\n event_data.size = self._GetRowValue(query_hash, row, 'size')\n event_data.url = self._GetRowValue(query_hash, row, 'url')\n\n timestamp = self._GetRowValue(query_hash, row, 'modified')\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n timestamp = self._GetRowValue(query_hash, row, 'created')\n if timestamp:\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a cloud entry row.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n query (str): query that created the row.\n row (sqlite3.Row): row.\n cache (SQLiteCache): cache.\n database (SQLiteDatabase): database.", "source": "juraj_google_style"} -{"code": "def swap(self, left, right):\n\n if type(left) is not type(right):\n raise LayoutError('The method swap only works with elements of the same type.')\n temp = self[left]\n self[left] = self[right]\n self[right] = temp", "docstring": "Swaps the map between left and right.\n\nArgs:\n left (tuple or int): Item to swap with right.\n right (tuple or int): Item to swap with left.\n\nRaises:\n LayoutError: If left and right have not the same type.", "source": "juraj_google_style"} -{"code": "def _ParseRecord(\n self, parser_mediator, record_index, evtx_record, recovered=False):\n\n event_data = self._GetEventData(\n parser_mediator, record_index, evtx_record, recovered=recovered)\n\n try:\n written_time = evtx_record.get_written_time_as_integer()\n except OverflowError as exception:\n parser_mediator.ProduceExtractionWarning((\n 'unable to read written time from event record: {0:d} '\n 'with error: {1!s}').format(record_index, exception))\n\n written_time = None\n\n if not written_time:\n date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n else:\n date_time = dfdatetime_filetime.Filetime(timestamp=written_time)\n\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract data from a Windows XML EventLog (EVTX) record.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n record_index (int): event record index.\n evtx_record (pyevtx.record): event record.\n recovered (Optional[bool]): True if the record was recovered.", "source": "juraj_google_style"} -{"code": "def list_attributes(self, name):\n\n result = self.client.service.getListAttributes(name, self.proxy_id)\n if isinstance(result, list) and len(result) == 1:\n return result[0]\n return result", "docstring": "Look up the attributes of a list.\n\nArgs:\n name (str): The name of the list\n\nReturns:\n dict: attributes of the list", "source": "juraj_google_style"} -{"code": "def _CheckStatusAnalysisProcess(self, pid):\n\n # TODO: Refactor this method, simplify and separate concerns (monitoring\n # vs management).\n self._RaiseIfNotRegistered(pid)\n\n if pid in self._completed_analysis_processes:\n status_indicator = definitions.STATUS_INDICATOR_COMPLETED\n process_status = {\n 'processing_status': status_indicator}\n used_memory = 0\n\n else:\n process = self._processes_per_pid[pid]\n\n process_status = self._QueryProcessStatus(process)\n if process_status is None:\n process_is_alive = False\n else:\n process_is_alive = True\n\n process_information = self._process_information_per_pid[pid]\n used_memory = process_information.GetUsedMemory() or 0\n\n if self._worker_memory_limit and used_memory > self._worker_memory_limit:\n logger.warning((\n 'Process: {0:s} (PID: {1:d}) killed because it exceeded the '\n 'memory limit: {2:d}.').format(\n process.name, pid, self._worker_memory_limit))\n self._KillProcess(pid)\n\n if isinstance(process_status, dict):\n self._rpc_errors_per_pid[pid] = 0\n status_indicator = process_status.get('processing_status', None)\n\n if status_indicator == definitions.STATUS_INDICATOR_COMPLETED:\n self._completed_analysis_processes.add(pid)\n\n else:\n rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1\n self._rpc_errors_per_pid[pid] = rpc_errors\n\n if rpc_errors > self._MAXIMUM_RPC_ERRORS:\n process_is_alive = False\n\n if process_is_alive:\n rpc_port = process.rpc_port.value\n logger.warning((\n 'Unable to retrieve process: {0:s} (PID: {1:d}) status via '\n 'RPC socket: http://localhost:{2:d}').format(\n process.name, pid, rpc_port))\n\n processing_status_string = 'RPC error'\n status_indicator = definitions.STATUS_INDICATOR_RUNNING\n else:\n processing_status_string = 'killed'\n status_indicator = definitions.STATUS_INDICATOR_KILLED\n\n process_status = {\n 'processing_status': processing_status_string}\n\n self._UpdateProcessingStatus(pid, process_status, used_memory)\n\n if status_indicator in definitions.ERROR_STATUS_INDICATORS:\n logger.error((\n 'Process {0:s} (PID: {1:d}) is not functioning correctly. '\n 'Status code: {2!s}.').format(\n process.name, pid, status_indicator))\n\n self._TerminateProcessByPid(pid)", "docstring": "Checks the status of an analysis process.\n\nArgs:\n pid (int): process ID (PID) of a registered analysis process.\n\nRaises:\n KeyError: if the process is not registered with the engine.", "source": "juraj_google_style"} -{"code": "def get_default_settings(sub_scripts, script_order, script_execution_freq, iterator_type):\n\n def populate_sweep_param(scripts, parameter_list, trace=''):\n\n\n def get_parameter_from_dict(trace, dic, parameter_list, valid_values=None):\n\n if valid_values is None and isinstance(dic, Parameter):\n valid_values = dic.valid_values\n\n for key, value in dic.items():\n if isinstance(value, dict): # for nested parameters ex {point: {'x': int, 'y': int}}\n parameter_list = get_parameter_from_dict(trace + '.' + key, value, parameter_list,\n dic.valid_values[key])\n elif (valid_values[key] in (float, int)) or \\\n (isinstance(valid_values[key], list) and valid_values[key][0] in (float, int)):\n parameter_list.append(trace + '.' + key)\n else: # once down to the form {key: value}\n # in all other cases ignore parameter\n print(('ignoring sweep parameter', key))\n\n return parameter_list\n\n for script_name in list(scripts.keys()):\n from pylabcontrol.core import ScriptIterator\n script_trace = trace\n if script_trace == '':\n script_trace = script_name\n else:\n script_trace = script_trace + '->' + script_name\n if issubclass(scripts[script_name], ScriptIterator): # gets subscripts of ScriptIterator objects\n populate_sweep_param(vars(scripts[script_name])['_SCRIPTS'], parameter_list=parameter_list,\n trace=script_trace)\n else:\n # use inspect instead of vars to get _DEFAULT_SETTINGS also for classes that inherit _DEFAULT_SETTINGS from a superclass\n for setting in \\\n [elem[1] for elem in inspect.getmembers(scripts[script_name]) if elem[0] == '_DEFAULT_SETTINGS'][0]:\n parameter_list = get_parameter_from_dict(script_trace, setting, parameter_list)\n\n return parameter_list\n\n if iterator_type == 'loop':\n script_default_settings = [\n Parameter('script_order', script_order),\n Parameter('script_execution_freq', script_execution_freq),\n Parameter('num_loops', 0, int, 'times the subscripts will be executed'),\n Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')\n ]\n\n elif iterator_type == 'sweep':\n\n sweep_params = populate_sweep_param(sub_scripts, [])\n\n script_default_settings = [\n Parameter('script_order', script_order),\n Parameter('script_execution_freq', script_execution_freq),\n Parameter('sweep_param', sweep_params[0], sweep_params, 'variable over which to sweep'),\n Parameter('sweep_range',\n [Parameter('min_value', 0, float, 'min parameter value'),\n Parameter('max_value', 0, float, 'max parameter value'),\n Parameter('N/value_step', 0, float,\n 'either number of steps or parameter value step, depending on mode')]),\n Parameter('stepping_mode', 'N', ['N', 'value_step'],\n 'Switch between number of steps and step amount'),\n Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')\n ]\n else:\n print(('unknown iterator type ' + iterator_type))\n raise TypeError('unknown iterator type ' + iterator_type)\n\n return script_default_settings", "docstring": "assigning the actual script settings depending on the iterator type\n\n this might be overwritten by classes that inherit form ScriptIterator\n\nArgs:\n sub_scripts: dictionary with the subscripts\n script_order: execution order of subscripts\n script_execution_freq: execution frequency of subscripts\n\nReturns:\n the default setting for the iterator", "source": "juraj_google_style"} -{"code": "def set_position_p(self, pvalue):\n\n pvalue_msb = int(pvalue) >> 8\n pvalue_lsb = int(pvalue) & 0xff\n data = []\n data.append(0x0B)\n data.append(self.servoid)\n data.append(RAM_WRITE_REQ)\n data.append(POSITION_KP_RAM)\n data.append(BYTE2)\n data.append( pvalue_lsb)\n data.append( pvalue_msb)\n send_data(data)", "docstring": "Set the P gain of the position PID\n\nArgs:\n pvalue (int): P value", "source": "juraj_google_style"} -{"code": "def ed25519_public_key_to_string(key):\n\n return base64.b64encode(key.public_bytes(\n encoding=serialization.Encoding.Raw,\n format=serialization.PublicFormat.Raw,\n ), None).decode('utf-8')", "docstring": "Convert an ed25519 public key to a base64-encoded string.\n\nArgs:\n key (Ed25519PublicKey): the key to write to the file.\n\nReturns:\n str: the key representation as a str", "source": "juraj_google_style"} -{"code": "def get_paths(self, key):\n\n final_paths = []\n\n if key in self.__cli:\n paths = self.__cli[key] or []\n from_conf = False\n else:\n paths = self.__config.get(key) or []\n from_conf = True\n\n for path in flatten_list(paths):\n final_path = self.__abspath(path, from_conf)\n if final_path:\n final_paths.append(final_path)\n\n return final_paths", "docstring": "Same as `ConfigParser.get_path` for a list of paths.\n\nArgs:\n key: str, the key to lookup the paths with\n\nReturns:\n list: The paths.", "source": "juraj_google_style"} -{"code": "def delete(filething):\n\n\n t = OggFLAC(filething)\n filething.fileobj.seek(0)\n t.delete(filething)", "docstring": "delete(filething)\n\nArgs:\n filething (filething)\n\nRaises:\n mutagen.MutagenError\n\n Remove tags from a file.", "source": "juraj_google_style"} -{"code": "def fib_iter(n):\n\n\n # precondition\n assert n >= 0, 'n must be positive integer'\n\n fib_1 = 0\n fib_2 = 1\n sum = 0\n if n <= 1:\n return n\n for _ in range(n-1):\n sum = fib_1 + fib_2\n fib_1 = fib_2\n fib_2 = sum\n return sum", "docstring": "[summary]\n Works iterative approximate O(n)\n\nArgs:\n n {[int]} -- [description]\n\nReturns:\n [int] -- [description]", "source": "juraj_google_style"} -{"code": "def session_ended(self, f):\n\n self._session_ended_view_func = f\n\n @wraps(f)\n def wrapper(*args, **kw):\n self._flask_view_func(*args, **kw)\n return f", "docstring": "Decorator routes Alexa SessionEndedRequest to the wrapped view function to end the skill.\n\n @ask.session_ended\n def session_ended():\n return \"{}\", 200\n\n The wrapped function is registered as the session_ended view function\n and renders the response for requests to the end of the session.\n\nArgs:\n f {function} -- session_ended view function", "source": "juraj_google_style"} -{"code": "def trace(state: State, fn: TransitionOperator, num_steps: IntTensor,\n trace_fn: Callable[[State, TensorNest], TensorNest]\n ) -> Tuple[State, TensorNest]:\n\n\n def fn_wrapper(args, _):\n return tf.nest.map_structure(tf.convert_to_tensor, call_fn(fn, args[0]))\n\n def trace_fn_wrapper(args):\n return tf.nest.map_structure(tf.convert_to_tensor, call_fn(trace_fn, args))\n\n state = call_fn(fn, state)\n first_trace = trace_fn_wrapper(state)\n\n state, full_trace = mcmc_util.trace_scan(\n fn_wrapper, state, tf.ones(num_steps - 1), trace_fn=trace_fn_wrapper)\n\n prepend = lambda x, y: tf.concat( # pylint: disable=g-long-lambda\n [tf.convert_to_tensor(value=x)[tf.newaxis], y], 0)\n\n return state, tf.nest.map_structure(prepend, first_trace, full_trace)", "docstring": "`TransitionOperator` that runs `fn` repeatedly and traces its outputs.\n\nArgs:\n state: A nest of `Tensor`s or None.\n fn: A `TransitionOperator`.\n num_steps: Number of steps to run the function for. Must be greater than 1.\n trace_fn: Callable that the unpacked outputs of `fn` and returns a nest of\n `Tensor`s. These will be stacked and returned.\n\nReturns:\n state: The final state returned by `fn`.\n traces: Stacked outputs of `trace_fn`.", "source": "juraj_google_style"} -{"code": "def copy_function(func, name=None):\n\n code = func.__code__\n newname = name or func.__name__\n newcode = CodeType(\n code.co_argcount,\n code.co_kwonlyargcount,\n code.co_nlocals,\n code.co_stacksize,\n code.co_flags,\n code.co_code,\n code.co_consts,\n code.co_names,\n code.co_varnames,\n code.co_filename,\n newname,\n code.co_firstlineno,\n code.co_lnotab,\n code.co_freevars,\n code.co_cellvars,\n )\n newfunc = FunctionType(\n newcode,\n func.__globals__,\n newname,\n func.__defaults__,\n func.__closure__,\n )\n newfunc.__dict__.update(func.__dict__)\n return newfunc", "docstring": "Copy a function object with different name.\n\nArgs:\n func (function): Function to be copied.\n name (string, optional): Name of the new function.\n If not spacified, the same name of `func` will be used.\n\nReturns:\n newfunc (function): New function with different name.", "source": "juraj_google_style"} -{"code": "def _DefaultValueConstructorForField(field):\n\n\n if _IsMapField(field):\n return _GetInitializeDefaultForMap(field)\n\n if field.label == _FieldDescriptor.LABEL_REPEATED:\n if field.has_default_value and field.default_value != []:\n raise ValueError('Repeated field default value not empty list: %s' % (\n field.default_value))\n if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:\n # We can't look at _concrete_class yet since it might not have\n # been set. (Depends on order in which we initialize the classes).\n message_type = field.message_type\n def MakeRepeatedMessageDefault(message):\n return containers.RepeatedCompositeFieldContainer(\n message._listener_for_children, field.message_type)\n return MakeRepeatedMessageDefault\n else:\n type_checker = type_checkers.GetTypeChecker(field)\n def MakeRepeatedScalarDefault(message):\n return containers.RepeatedScalarFieldContainer(\n message._listener_for_children, type_checker)\n return MakeRepeatedScalarDefault\n\n if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:\n # _concrete_class may not yet be initialized.\n message_type = field.message_type\n def MakeSubMessageDefault(message):\n result = message_type._concrete_class()\n result._SetListener(\n _OneofListener(message, field)\n if field.containing_oneof is not None\n else message._listener_for_children)\n return result\n return MakeSubMessageDefault\n\n def MakeScalarDefault(message):\n # TODO(protobuf-team): This may be broken since there may not be\n # default_value. Combine with has_default_value somehow.\n return field.default_value\n return MakeScalarDefault", "docstring": "Returns a function which returns a default value for a field.\n\nArgs:\n field: FieldDescriptor object for this field.\n\n The returned function has one argument:\n message: Message instance containing this field, or a weakref proxy\n of same.\n\n That function in turn returns a default value for this field. The default\n value may refer back to |message| via a weak reference.", "source": "juraj_google_style"} -{"code": "def write_env_vars(env_vars=None): # type: (dict) -> None\n\n env_vars = env_vars or {}\n env_vars['PYTHONPATH'] = ':'.join(sys.path)\n\n for name, value in env_vars.items():\n os.environ[name] = value", "docstring": "Write the dictionary env_vars in the system, as environment variables.\n\nArgs:\n env_vars ():\n\n Returns:", "source": "juraj_google_style"} -{"code": "def get_dialect_name(mixed: Union[SQLCompiler, Engine, Dialect]) -> str:\n\n dialect = get_dialect(mixed)\n # noinspection PyUnresolvedReferences\n return dialect.name", "docstring": "Finds the name of the SQLAlchemy dialect in use.\n\nArgs:\n mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or\n :class:`Dialect` object\n\n Returns: the SQLAlchemy dialect name being used", "source": "juraj_google_style"} -{"code": "def _get_class_frame_source(class_name):\n # type: (str) -> Optional[str]\n\n for frame_info in inspect.stack():\n try:\n with open(frame_info[1]) as fp:\n src = \"\".join(fp.readlines()[frame_info[2] - 1 :])\n except IOError:\n continue\n if re.search(r\"\\bclass\\b\\s+\\b{}\\b\".format(class_name), src):\n reader = six.StringIO(src).readline\n tokens = tokenize.generate_tokens(reader)\n source_tokens = []\n indent_level = 0\n base_indent_level = 0\n has_base_level = False\n for token, value, _, _, _ in tokens: # type: ignore\n source_tokens.append((token, value))\n if token == tokenize.INDENT:\n indent_level += 1\n elif token == tokenize.DEDENT:\n indent_level -= 1\n if has_base_level and indent_level <= base_indent_level:\n return (\n tokenize.untokenize(source_tokens),\n frame_info[0].f_globals,\n frame_info[0].f_locals,\n )\n elif not has_base_level:\n has_base_level = True\n base_indent_level = indent_level\n raise TypeError(\n 'Unable to retrieve source for class \"{}\"'.format(class_name)\n )", "docstring": "Return the source code for a class by checking the frame stack.\n\n This is necessary because it is not possible to get the source of a class\n being created by a metaclass directly.\n\nArgs:\n class_name: The class to look for on the stack.\n\nReturns:\n The source code for the requested class if the class was found and the\n source was accessible.", "source": "juraj_google_style"} -{"code": "def _ProcessSources(\n self, source_path_specs, storage_writer, filter_find_specs=None):\n\n if self._processing_profiler:\n self._processing_profiler.StartTiming('process_sources')\n\n self._status = definitions.STATUS_INDICATOR_COLLECTING\n self._number_of_consumed_event_tags = 0\n self._number_of_consumed_events = 0\n self._number_of_consumed_reports = 0\n self._number_of_consumed_sources = 0\n self._number_of_consumed_warnings = 0\n self._number_of_produced_event_tags = 0\n self._number_of_produced_events = 0\n self._number_of_produced_reports = 0\n self._number_of_produced_sources = 0\n self._number_of_produced_warnings = 0\n\n path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(\n source_path_specs, find_specs=filter_find_specs,\n recurse_file_system=False, resolver_context=self._resolver_context)\n\n for path_spec in path_spec_generator:\n if self._abort:\n break\n\n # TODO: determine if event sources should be DataStream or FileEntry\n # or both.\n event_source = event_sources.FileEntryEventSource(path_spec=path_spec)\n storage_writer.AddEventSource(event_source)\n\n self._number_of_produced_sources = storage_writer.number_of_event_sources\n\n # Update the foreman process status in case we are using a filter file.\n self._UpdateForemanProcessStatus()\n\n if self._status_update_callback:\n self._status_update_callback(self._processing_status)\n\n self._ScheduleTasks(storage_writer)\n\n if self._abort:\n self._status = definitions.STATUS_INDICATOR_ABORTED\n else:\n self._status = definitions.STATUS_INDICATOR_COMPLETED\n\n self._number_of_produced_events = storage_writer.number_of_events\n self._number_of_produced_sources = storage_writer.number_of_event_sources\n self._number_of_produced_warnings = storage_writer.number_of_warnings\n\n if self._processing_profiler:\n self._processing_profiler.StopTiming('process_sources')\n\n # Update the foreman process and task status in case we are using\n # a filter file.\n self._UpdateForemanProcessStatus()\n\n tasks_status = self._task_manager.GetStatusInformation()\n if self._task_queue_profiler:\n self._task_queue_profiler.Sample(tasks_status)\n\n self._processing_status.UpdateTasksStatus(tasks_status)\n\n if self._status_update_callback:\n self._status_update_callback(self._processing_status)", "docstring": "Processes the sources.\n\nArgs:\n source_path_specs (list[dfvfs.PathSpec]): path specifications of\n the sources to process.\n storage_writer (StorageWriter): storage writer for a session storage.\n filter_find_specs (Optional[list[dfvfs.FindSpec]]): find specifications\n used in path specification extraction. If set, path specifications\n that match the find specification will be processed.", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.ListInstances = channel.unary_unary(\n \"/google.cloud.redis.v1beta1.CloudRedis/ListInstances\",\n request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.ListInstancesRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.ListInstancesResponse.FromString,\n )\n self.GetInstance = channel.unary_unary(\n \"/google.cloud.redis.v1beta1.CloudRedis/GetInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.GetInstanceRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.Instance.FromString,\n )\n self.CreateInstance = channel.unary_unary(\n \"/google.cloud.redis.v1beta1.CloudRedis/CreateInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.CreateInstanceRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.UpdateInstance = channel.unary_unary(\n \"/google.cloud.redis.v1beta1.CloudRedis/UpdateInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.UpdateInstanceRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.DeleteInstance = channel.unary_unary(\n \"/google.cloud.redis.v1beta1.CloudRedis/DeleteInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.DeleteInstanceRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def execute_by_options(args):\n\n if args['subcommand'] == 'sphinx':\n s = Sphinx(proj_info)\n if args['quickstart']:\n s.quickstart()\n elif args['gen_code_api']:\n s.gen_code_api()\n elif args['rst2html']:\n s.rst2html()\n pass\n elif args['subcommand'] == 'offline_dist':\n pod = PyOfflineDist()\n if args['freeze_deps']:\n pod.freeze_deps()\n elif args['download_deps']:\n pod.download_deps()\n elif args['install_deps']:\n pod.install_deps()\n elif args['clean_deps']:\n pod.clean_deps()\n elif args['mkbinary']:\n pod.pyinstaller_mkbinary(args['mkbinary'])\n elif args['clean_binary']:\n pod.clean_binary()\n\n pass", "docstring": "execute by argument dictionary\n\nArgs:\n args (dict): command line argument dictionary", "source": "juraj_google_style"} -{"code": "def from_pil(pil, alpha=1.0, wref=_DEFAULT_WREF):\n\n return Color(pil_to_rgb(pil), 'rgb', alpha, wref)", "docstring": "Create a new instance based on the specifed PIL color.\n\n Parameters:\n :pil:\n A PIL compatible color representation (0xBBGGRR)\n :alpha:\n The color transparency [0...1], default is opaque\n :wref:\n The whitepoint reference, default is 2° D65.\n\nReturns:\n A grapefruit.Color instance.\n\n >>> Color.from_pil(0x0080ff)\n Color(1.0, 0.501961, 0.0, 1.0)\n >>> Color.from_pil(0x0080ff, 0.5)\n Color(1.0, 0.501961, 0.0, 0.5)", "source": "juraj_google_style"} -{"code": "def parse_response(service, response, search_type):\n\n _LOG.debug('Parse response \"%s\" from service \"%s\" of type \"%s\"', response,\n service, search_type)\n items = []\n # The result to be parsed is in either searchResult or getMetadataResult\n if 'searchResult' in response:\n response = response['searchResult']\n elif 'getMetadataResult' in response:\n response = response['getMetadataResult']\n else:\n raise ValueError('\"response\" should contain either the key '\n '\"searchResult\" or \"getMetadataResult\"')\n\n # Form the search metadata\n search_metadata = {\n 'number_returned': response['count'],\n 'total_matches': None,\n 'search_type': search_type,\n 'update_id': None,\n }\n\n for result_type in ('mediaCollection', 'mediaMetadata'):\n # Upper case the first letter (used for the class_key)\n result_type_proper = result_type[0].upper() + result_type[1:]\n raw_items = response.get(result_type, [])\n # If there is only 1 result, it is not put in an array\n if isinstance(raw_items, OrderedDict):\n raw_items = [raw_items]\n\n for raw_item in raw_items:\n # Form the class_key, which is a unique string for this type,\n # formed by concatenating the result type with the item type. Turns\n # into e.g: MediaMetadataTrack\n class_key = result_type_proper + raw_item['itemType'].title()\n cls = get_class(class_key)\n items.append(cls.from_music_service(service, raw_item))\n return SearchResult(items, **search_metadata)", "docstring": "Parse the response to a music service query and return a SearchResult\n\nArgs:\n service (MusicService): The music service that produced the response\n response (OrderedDict): The response from the soap client call\n search_type (str): A string that indicates the search type that the\n response is from\n\nReturns:\n SearchResult: A SearchResult object", "source": "juraj_google_style"} -{"code": "def __init__(self, mtf_graph, mesh_shape, mtf_outputs=()):\n\n self.mtf_graph = mtf_graph\n self.mesh_shape = mesh_shape\n self.mtf_outputs = mtf_outputs\n\n self._layout_validator = None # valid_layouts.LayoutValidator\n self._graph_interface = None", "docstring": "Initializer.\n\nArgs:\n mtf_graph: a mtf.Graph.\n mesh_shape: an mtf.Shape.\n mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs\n of the computation.", "source": "juraj_google_style"} -{"code": "def build_graph(steps):\n\n\n graph = Graph()\n\n for step in steps:\n graph.add_step(step)\n\n for step in steps:\n for dep in step.requires:\n graph.connect(step.name, dep)\n\n for parent in step.required_by:\n graph.connect(parent, step.name)\n\n return graph", "docstring": "Builds a graph of steps.\n\nArgs:\n steps (list): a list of :class:`Step` objects to execute.", "source": "juraj_google_style"} -{"code": "def _FormatSubjectExOrProcessExToken(self, token_data):\n\n if token_data.net_type == 4:\n ip_address = self._FormatPackedIPv4Address(token_data.ip_address)\n elif token_data.net_type == 16:\n ip_address = self._FormatPackedIPv6Address(token_data.ip_address)\n else:\n ip_address = 'unknown'\n\n return {\n 'aid': token_data.audit_user_identifier,\n 'euid': token_data.effective_user_identifier,\n 'egid': token_data.effective_group_identifier,\n 'uid': token_data.real_user_identifier,\n 'gid': token_data.real_group_identifier,\n 'pid': token_data.process_identifier,\n 'session_id': token_data.session_identifier,\n 'terminal_port': token_data.terminal_port,\n 'terminal_ip': ip_address}", "docstring": "Formats a subject or process token as a dictionary of values.\n\nArgs:\n token_data (bsm_token_data_subject32_ex|bsm_token_data_subject64_ex):\n AUT_SUBJECT32_EX, AUT_PROCESS32_EX, AUT_SUBJECT64_EX or\n AUT_PROCESS64_EX token data.\n\nReturns:\n dict[str, str]: token values.", "source": "juraj_google_style"} -{"code": "def parse_args(args=None):\n\n parser = argparse.ArgumentParser(description=\"Main script to run LIVVkit.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n fromfile_prefix_chars='@')\n\n parser.add_argument('-o', '--out-dir',\n default=os.path.join(os.getcwd(), \"vv_\" + time.strftime(\"%Y-%m-%d\")),\n help='Location to output the LIVVkit webpages.'\n )\n\n parser.add_argument('-v', '--verify',\n nargs=2,\n default=None,\n help=' '.join(['Specify the locations of the test and bench bundle to',\n 'compare (respectively).'\n ])\n )\n\n parser.add_argument('-V', '--validate',\n action='store',\n nargs='+',\n default=None,\n help=' '.join(['Specify the location of the configuration files for',\n 'validation tests.'\n ])\n )\n\n # FIXME: this just short-circuits to the validation option, and should become its own module\n parser.add_argument('-e', '--extension',\n action='store',\n nargs='+',\n default=None,\n dest='validate',\n metavar='EXTENSION',\n help=' '.join(['Specify the location of the configuration files for',\n 'LIVVkit extensions.'\n ])\n )\n\n parser.add_argument('-p', '--publish',\n action='store_true',\n help=' '.join(['Also produce a publication quality copy of the figure in',\n 'the output directory (eps, 600d pi).'\n ])\n )\n\n parser.add_argument('-s', '--serve',\n nargs='?', type=int, const=8000,\n help=' '.join(['Start a simple HTTP server for the output website specified',\n 'by OUT_DIR on port SERVE.'\n ])\n )\n\n parser.add_argument('--version',\n action='version',\n version='LIVVkit {}'.format(livvkit.__version__),\n help=\"Show LIVVkit's version number and exit\"\n )\n\n return init(parser.parse_args(args))", "docstring": "Handles the parsing of options for LIVVkit's command line interface\n\nArgs:\n args: The list of arguments, typically sys.argv[1:]", "source": "juraj_google_style"} -{"code": "def ng(self, wavelength):\n\n return self.n(wavelength) - (wavelength*1.e-9)*self.nDer1(wavelength)", "docstring": "The group index with respect to wavelength.\n\nArgs:\n wavelength (float, list, None): The wavelength(s) the group\n index will be evaluated at.\n\nReturns:\n float, list: The group index at the target wavelength(s).", "source": "juraj_google_style"} -{"code": "def depth_october_average_ground_temperature(self, value=None):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type float '\n 'for field `depth_october_average_ground_temperature`'.format(value))\n\n self._depth_october_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_october_average_ground_temperature`\n\nArgs:\n value (float): value for IDD Field `depth_october_average_ground_temperature`\n Unit: C\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def _spin_product(variables):\n\n multiplier, multiplicand, product, aux = variables\n\n return BinaryQuadraticModel({multiplier: -.5,\n multiplicand: -.5,\n product: -.5,\n aux: -1.},\n {(multiplier, multiplicand): .5,\n (multiplier, product): .5,\n (multiplier, aux): 1.,\n (multiplicand, product): .5,\n (multiplicand, aux): 1.,\n (product, aux): 1.},\n 2.,\n Vartype.SPIN)", "docstring": "Create a bqm with a gap of 2 that represents the product of two variables.\n\n Note that spin-product requires an auxiliary variable.\n\nArgs:\n variables (list):\n multiplier, multiplicand, product, aux\n\nReturns:\n :obj:`.BinaryQuadraticModel`", "source": "juraj_google_style"} -{"code": "def get_street_from_xy(self, **kwargs):\n\n # Endpoint parameters\n params = {\n 'coordinateX': kwargs.get('longitude'),\n 'coordinateY': kwargs.get('latitude'),\n 'Radius': kwargs.get('radius'),\n 'cultureInfo': util.language_code(kwargs.get('lang'))\n }\n\n # Request\n result = self.make_request('geo', 'get_street_from_xy', **params)\n\n # Funny endpoint, no status code\n if not util.check_result(result, 'site'):\n return False, 'UNKNOWN ERROR'\n\n # Parse\n values = util.response_list(result, 'site')\n return True, [emtype.Street(**a) for a in values]", "docstring": "Obtain a list of streets around the specified point.\n\nArgs:\n latitude (double): Latitude in decimal degrees.\n longitude (double): Longitude in decimal degrees.\n radius (int): Radius (in meters) of the search.\n lang (str): Language code (*es* or *en*).\n\nReturns:\n Status boolean and parsed response (list[Street]), or message string\n in case of error.", "source": "juraj_google_style"} -{"code": "def delete(self, wait_for_deletion=True):\n\n if self.exists():\n try:\n self._api.objects_delete(self._bucket, self._key)\n except Exception as e:\n raise e\n if wait_for_deletion:\n for _ in range(_MAX_POLL_ATTEMPTS):\n objects = Objects(self._bucket, prefix=self.key, delimiter='/',\n context=self._context)\n if any(o.key == self.key for o in objects):\n time.sleep(_POLLING_SLEEP)\n continue\n break\n else:\n logging.error('Failed to see object deletion after %d attempts.',\n _MAX_POLL_ATTEMPTS)", "docstring": "Deletes this object from its bucket.\n\nArgs:\n wait_for_deletion: If True, we poll until this object no longer appears in\n objects.list operations for this bucket before returning.\n\nRaises:\n Exception if there was an error deleting the object.", "source": "juraj_google_style"} -{"code": "def recursive_getitem(d: Mapping[str, Any], keys: Union[str, Sequence[str]]) -> Any:\n\n # If only a string, then just just return the item\n if isinstance(keys, str):\n return d[keys]\n else:\n return functools.reduce(operator.getitem, keys, d)", "docstring": "Recursively retrieve an item from a nested dict.\n\n Credit to: https://stackoverflow.com/a/52260663\n\nArgs:\n d: Mapping of strings to objects.\n keys: Names of the keys under which the object is stored. Can also just be a single string.\n\nReturns:\n The object stored under the keys.\n\nRaises:\n KeyError: If one of the keys isnt' found.", "source": "juraj_google_style"} -{"code": "def normalize(self, inplace=False):\n\n if inplace:\n nrm = self.norm()\n self.data /= nrm\n return None\n nrm = self.norm()\n data_copy = np.array(self.data, copy=True)\n data_copy /= nrm\n return Quaternion(data_copy)", "docstring": "Normalizes a Quaternion to unit length\n so that it represents a valid rotation.\n\nArgs:\n inplace (bool): Do an inplace normalization.\n\nReturns:\n Quaternion: Normalized quaternion.", "source": "juraj_google_style"} -{"code": "def generate_matches(self, nodes):\n\n if self.content is None:\n # Shortcut for special case (see __init__.__doc__)\n for count in xrange(self.min, 1 + min(len(nodes), self.max)):\n r = {}\n if self.name:\n r[self.name] = nodes[:count]\n yield count, r\n elif self.name == \"bare_name\":\n yield self._bare_name_matches(nodes)\n else:\n # The reason for this is that hitting the recursion limit usually\n # results in some ugly messages about how RuntimeErrors are being\n # ignored. We don't do this on non-CPython implementation because\n # they don't have this problem.\n if hasattr(sys, \"getrefcount\"):\n save_stderr = sys.stderr\n sys.stderr = StringIO()\n try:\n for count, r in self._recursive_matches(nodes, 0):\n if self.name:\n r[self.name] = nodes[:count]\n yield count, r\n except RuntimeError:\n # We fall back to the iterative pattern matching scheme if the recursive\n # scheme hits the recursion limit.\n for count, r in self._iterative_matches(nodes):\n if self.name:\n r[self.name] = nodes[:count]\n yield count, r\n finally:\n if hasattr(sys, \"getrefcount\"):\n sys.stderr = save_stderr", "docstring": "Generator yielding matches for a sequence of nodes.\n\nArgs:\n nodes: sequence of nodes\n\nYields:\n (count, results) tuples where:\n count: the match comprises nodes[:count];\n results: dict containing named submatches.", "source": "juraj_google_style"} -{"code": "def __init__(self, storage_writer, path):\n\n super(SQLiteStorageMergeReader, self).__init__(storage_writer)\n self._active_container_type = None\n self._active_cursor = None\n self._add_active_container_method = None\n self._add_container_type_methods = {}\n self._compression_format = definitions.COMPRESSION_FORMAT_NONE\n self._connection = None\n self._container_types = None\n self._cursor = None\n self._event_data_identifier_mappings = {}\n self._path = path\n\n # Create a runtime lookup table for the add container type method. This\n # prevents having to create a series of if-else checks for container types.\n # The table is generated at runtime as there are no forward function\n # declarations in Python.\n for container_type, method_name in self._ADD_CONTAINER_TYPE_METHODS.items():\n method = getattr(self, method_name, None)\n if not method:\n raise RuntimeError(\n 'Add method missing for container type: {0:s}'.format(\n container_type))\n\n self._add_container_type_methods[container_type] = method", "docstring": "Initializes a storage merge reader.\n\nArgs:\n storage_writer (StorageWriter): storage writer.\n path (str): path to the input file.\n\nRaises:\n IOError: if the input file cannot be opened.\n RuntimeError: if an add container method is missing.", "source": "juraj_google_style"} -{"code": "def process_tokens(self, tokens):\n\n for tok_type, token, (start_row, start_col), _, _ in tokens:\n if tok_type == tokenize.STRING:\n # 'token' is the whole un-parsed token; we can look at the start\n # of it to see whether it's a raw or unicode string etc.\n self._process_string_token(token, start_row, start_col)", "docstring": "Process the token stream.\n\n This is required to override the parent class' implementation.\n\nArgs:\n tokens: the tokens from the token stream to process.", "source": "juraj_google_style"} -{"code": "def __init__(self,\n html_id=None,\n name=None,\n content=None,\n template=None,\n classes=None,\n **kwargs):\n\n if html_id is not None:\n try:\n self.html_id = html_id\n except AttributeError:\n self._html_id = html_id\n if name is not None:\n try:\n self.name = name\n except AttributeError:\n self._name = name\n if content is not None:\n try:\n self.content = content\n except AttributeError:\n self._content = content\n if template is not None:\n try:\n self.template = template\n except AttributeError:\n self._template = template\n if classes is not None:\n try:\n self.classes = classes\n except AttributeError:\n self._classes = classes\n\n if not hasattr(self, 'template'):\n raise AttributeError('template is a required widget attribute')\n\n for kw, arg in kwargs.items():\n setattr(self, kw, arg)", "docstring": "Init method.\n\nArgs:\n html_id (str): an ID to set on the HTML item.\n name (str): the name of the item, displayed in HTML.\n content (): suitable content according to chosen display.\n template (str): the template responsible for display.\n classes (str): additional classes to pass to the HTML item.", "source": "juraj_google_style"} -{"code": "def __init__(self, component1=None, component2=None):\n\n if component1 is None and component2 is not None:\n component1 = component2\n component2 = None\n\n self._llhead = None\n self._lltail = None\n #self._length = 0\n #self._offset = 0\n\n if isinstance(component1, CompositeBitarray):\n self._llhead = component1._llhead\n self._lltail = component1._lltail\n self._offset = component1._offset\n self._tailbitsused = component1._tailbitsused\n self._length = len(component1)\n else:\n self._llhead = self._lltail = _DLLNode(component1)\n self._offset = 0\n self._tailbitsused = len(component1)\n self._length = self._tailbitsused\n\n\n if component2 is not None:\n oldtail = self._lltail\n if isinstance(component2, CompositeBitarray):\n if self._lltail is component2._llhead:\n if self._tail_end != component2._offset:\n raise ProteusDataJoinError()\n\n if component2._is_single_llnode:\n self._tailbitsused += component2._tailbitsused\n else:\n self._tailbitsused = component2._tailbitsused\n self._lltail = component2._lltail\n self._length += len(component2)\n elif self._lltail.next is component2._llhead and\\\n self._tailoffset == 0 and\\\n component2._offset == 0:\n self._lltail = component2._lltail\n self._tailbitsused = component2._tailbitsused\n self._length += len(component2)\n elif component2._llhead.prev is not None or\\\n self._lltail.next is not None or\\\n component2._offset or self._tailoffset or\\\n self._llhead is component2._lltail:\n #Will not catch everything. Good enough to\n #prevent most accidents. A 'perfect' version\n #would require walking the whole tree. No way.\n raise ProteusDataJoinError()\n else:\n self._length += len(component2)\n self._lltail.next = component2._llhead\n self._lltail = component2._lltail\n self._tailbitsused = component2._tailbitsused\n else:\n if self._tailoffset or self._lltail.next is not None:\n raise ProteusDataJoinError()\n self._tailbitsused = len(component2)\n self._length += self._tailbitsused\n node = _DLLNode(component2)\n node.prev = self._lltail\n self._lltail = node\n\n\n #WHEN IT IS OK TO MERGE\n #oldtail can merge right if (oldtail is not head or offset is 0) and (oldtail.next is not tail or tailbitsused is len of node) and data is combinable. Do it recursive?\n #Merging can happen right until can't. Move back node and merge until can't. Repeat till new left node is incompatible.\n #if merging with the tail node, the tail node is fully used\n #Merge will start at seam, or have nothing to do.\n if oldtail is not self._llhead or self._offset == 0:\n self._do_merge(oldtail)", "docstring": "Create a bitarray object that stores its components by reference).\n\nArgs:\n *components: Any number of bitarray instances to store in this composition.", "source": "juraj_google_style"} -{"code": "def system_add_keyspace(self, ks_def):\n\n self._seqid += 1\n d = self._reqs[self._seqid] = defer.Deferred()\n self.send_system_add_keyspace(ks_def)\n return d", "docstring": "adds a keyspace and any column families that are part of it. returns the new schema id.\n\n Parameters:\n - ks_def", "source": "juraj_google_style"} -{"code": "def prune_graph(graph_str, package_name):\n\n # find nodes of interest\n g = read_dot(graph_str)\n nodes = set()\n\n for node, attrs in g.node_attr.iteritems():\n attr = [x for x in attrs if x[0] == \"label\"]\n if attr:\n label = attr[0][1]\n try:\n req_str = _request_from_label(label)\n request = PackageRequest(req_str)\n except PackageRequestError:\n continue\n\n if request.name == package_name:\n nodes.add(node)\n\n if not nodes:\n raise ValueError(\"The package %r does not appear in the graph.\"\n % package_name)\n\n # find nodes upstream from these nodes\n g_rev = g.reverse()\n accessible_nodes = set()\n access = accessibility(g_rev)\n for node in nodes:\n nodes_ = access.get(node, [])\n accessible_nodes |= set(nodes_)\n\n # remove inaccessible nodes\n inaccessible_nodes = set(g.nodes()) - accessible_nodes\n for node in inaccessible_nodes:\n g.del_node(node)\n\n return write_dot(g)", "docstring": "Prune a package graph so it only contains nodes accessible from the\n given package.\n\nArgs:\n graph_str (str): Dot-language graph string.\n package_name (str): Name of package of interest.\n\nReturns:\n Pruned graph, as a string.", "source": "juraj_google_style"} -{"code": "def authenticated_request(self, endpoint, method='GET', params=None, data=None):\n\n headers = {\n 'X-Access-Token' : self.access_token,\n 'X-Client-ID' : self.client_id\n }\n return self.api.request(endpoint, method=method, headers=headers, params=params, data=data)", "docstring": "Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type\n\n Params:\n endpoint -- API endpoint to send request to\n\n Keyword Args:\n method -- GET, PUT, PATCH, DELETE, etc.\n params -- parameters to encode in the request\n data -- data to send with the request", "source": "juraj_google_style"} -{"code": "def __init__(self, elements: Sequence[Union[DataSource, DataSink]], transformers: Iterable[DataTransformer] = None) -> None:\n\n if not elements:\n raise ValueError(\"Elements must be a non-empty sequence of DataSources and DataSinks\")\n\n if transformers is None:\n transformers = set()\n\n sources = set() # type: Set[DataSource]\n sinks = set() # type: Set[DataSink]\n targets = [] # type: List[Tuple[DataSource, Set[DataSink]]]\n for element in elements:\n if isinstance(element, DataSource):\n sources.add(element)\n targets.append((element, set(sinks)))\n\n if isinstance(element, DataSink):\n sinks.add(element)\n\n LOGGER.info(\"Beginning construction of type graph\")\n # noinspection PyTypeChecker\n self._type_graph = _build_type_graph(sources, sinks, transformers)\n LOGGER.info(\"Completed construction of type graph\")\n self._sources = targets\n self._sinks = sinks\n self._get_types = {}\n self._put_types = {}", "docstring": "Initializes a data pipeline.\n\nArgs:\n elements: The data stores and data sinks for this pipeline.\n transformers: The data transformers for this pipeline.", "source": "juraj_google_style"} -{"code": "def reindex(self, ind):\n\n\n if isinstance(ind, pd.MultiIndex):\n try:\n return self.new(self.data.reindex(ind))\n except:\n raise RuntimeError('QADATASTRUCT ERROR: CANNOT REINDEX')\n else:\n raise RuntimeError(\n 'QADATASTRUCT ERROR: ONLY ACCEPT MULTI-INDEX FORMAT'\n )", "docstring": "reindex\n\nArgs:\n ind {[type]} -- [description]\n\nRaises:\n RuntimeError -- [description]\n RuntimeError -- [description]\n\nReturns:\n [type] -- [description]", "source": "juraj_google_style"} -{"code": "def generate_markdown_doc(app_name, spec):\n\n\n # Apply standard headers.\n sections = [\n HEADER.format(app_name=app_name),\n SOURCES_HEADER.format(app_name=app_name)\n ]\n\n # Generate the sources section of the documentation\n sorted_labels = sorted(list(spec.sources))\n for label in sorted_labels:\n sections.append(\n _generate_source_section(label, spec.sources[label], app_name)\n )\n\n # Generate the config section.\n sections.append(CONFIG_HEADER.format(app_name=app_name))\n table_rows, item_sections = _generate_item_sections(\n _sorted_dict_values(spec.items),\n app_name\n )\n\n headers = {\n 'name': 'Name',\n 'type': 'Type',\n 'default': 'Default',\n 'description': 'Description'\n }\n\n sections.append(\n build_markdown_table(\n headers,\n table_rows,\n ['name', 'type', 'default', 'description'],\n )\n )\n for item_section in item_sections:\n sections.append(item_section)\n\n return '\\n'.join([section for section in sections])", "docstring": "Generate Markdown Documentation for the given spec/app name.\n\nArgs:\n app_name (str): The name of the application.\n spec (YapconfSpec): A yapconf specification with sources loaded.\n\n Returns (str):\n A valid, markdown string representation of the documentation for\n the given specification.", "source": "juraj_google_style"} -{"code": "def getToC(doc, simple = True):\n\n\n def recurse(olItem, liste, lvl):\n\n while olItem:\n if olItem.title:\n title = olItem.title\n else:\n title = \" \"\n\n if not olItem.isExternal:\n if olItem.uri:\n page = olItem.page + 1\n else:\n page = -1\n else:\n page = -1\n\n if not simple:\n link = getLinkDict(olItem)\n liste.append([lvl, title, page, link])\n else:\n liste.append([lvl, title, page])\n\n if olItem.down:\n liste = recurse(olItem.down, liste, lvl+1)\n olItem = olItem.next\n return liste\n\n # check if document is open and not encrypted\n if doc.isClosed:\n raise ValueError(\"illegal operation on closed document\")\n\n olItem = doc.outline\n\n if not olItem: return []\n lvl = 1\n liste = []\n return recurse(olItem, liste, lvl)", "docstring": "Create a table of contents.\n\nArgs:\n simple: a bool to control output. Returns a list, where each entry consists of outline level, title, page number and link destination (if simple = False). For details see PyMuPDF's documentation.", "source": "juraj_google_style"} -{"code": "def _initialize_memory(self, policy_params):\n\n # We store observation, action, policy parameters, and reward.\n template = (\n self._batch_env.observ[0],\n self._batch_env.action[0],\n tools.nested.map(lambda x: x[0, 0], policy_params),\n self._batch_env.reward[0])\n with tf.variable_scope('ppo_temporary'):\n self._current_episodes = parts.EpisodeMemory(\n template, len(self._batch_env), self._config.max_length, 'episodes')\n self._finished_episodes = parts.EpisodeMemory(\n template, self._config.update_every, self._config.max_length, 'memory')\n self._num_finished_episodes = tf.Variable(0, False)", "docstring": "Initialize temporary and permanent memory.\n\nArgs:\n policy_params: Nested tuple of policy parameters with all dimensions set.\n\n Initializes the attributes `self._current_episodes`,\n `self._finished_episodes`, and `self._num_finished_episodes`. The episodes\n memory serves to collect multiple episodes in parallel. Finished episodes\n are copied into the next free slot of the second memory. The memory index\n points to the next free slot.", "source": "juraj_google_style"} -{"code": "def format_time(\n self,\n hour_expression,\n minute_expression,\n second_expression=''\n ):\n\n hour = int(hour_expression)\n\n period = ''\n if self._options.use_24hour_time_format is False:\n period = \" PM\" if (hour >= 12) else \" AM\"\n if hour > 12:\n hour -= 12\n\n minute = str(int(minute_expression)) # !FIXME WUT ???\n second = ''\n if second_expression is not None and second_expression:\n second = \"{}{}\".format(\":\", str(int(second_expression)).zfill(2))\n\n return \"{0}:{1}{2}{3}\".format(str(hour).zfill(2), minute.zfill(2), second, period)", "docstring": "Given time parts, will contruct a formatted time description\n\nArgs:\n hour_expression: Hours part\n minute_expression: Minutes part\n second_expression: Seconds part\n\nReturns:\n Formatted time description", "source": "juraj_google_style"} -{"code": "def rvs(self, size=1):\n\n return np.random.multivariate_normal(self.mean, self.cov, size)", "docstring": "Convenience method to sample from this distribution.\n\nArgs:\n size (int or tuple): Shape of return value. Each element is drawn\n independently from this distribution.", "source": "juraj_google_style"} -{"code": "def store(self, df, attribute_columns):\n\n\n # ID start values depend on currently stored entities/attributes!\n entity_id_start = models.Entity.get_max_id(self.session) + 1\n attribute_id_start = models.Attribute.get_max_id(self.session) + 1\n\n # append ID and type columns\n df['id'] = range(entity_id_start, entity_id_start + len(df))\n df['type'] = self.type\n\n # store entities\n df[['id', 'type']].to_sql(name=models.Entity.__tablename__,\n con=self.client.engine,\n if_exists='append',\n index=False)\n\n # store attributes\n for col in attribute_columns:\n # ID column of df is the entity ID of the attribute\n attr_df = df[[col, 'id']].rename(columns={'id': 'entity_id',\n col: 'value'})\n attr_df['name'] = col\n\n # add entity ID column, need to respect already existing entities\n attr_df['id'] = range(attribute_id_start, attribute_id_start + len(df))\n attribute_id_start += len(df)\n\n # store\n attr_df.to_sql(name=models.Attribute.__tablename__,\n con=self.client.engine,\n if_exists='append',\n index=False)", "docstring": "Store entities and their attributes\n\nArgs:\n df (pandas.DataFrame): data to store (storing appends 'id' and 'type' columns!)\n attribute_columns (list(str)): list of column labels that define attributes", "source": "juraj_google_style"} -{"code": "def from_node(cls, node):\n\n if not isinstance(node, aioxmpp.stanza.Message):\n raise AttributeError(\"node must be a aioxmpp.stanza.Message instance\")\n msg = cls()\n msg._to = node.to\n msg._sender = node.from_\n if None in node.body:\n msg.body = node.body[None]\n else:\n for key in node.body.keys():\n msg.body = node.body[key]\n break\n\n for data in node.xep0004_data:\n if data.title == SPADE_X_METADATA:\n for field in data.fields:\n if field.var != \"_thread_node\":\n msg.set_metadata(field.var, field.values[0])\n else:\n msg.thread = field.values[0]\n\n return msg", "docstring": "Creates a new spade.message.Message from an aixoxmpp.stanza.Message\n\nArgs:\n node (aioxmpp.stanza.Message): an aioxmpp Message\n\nReturns:\n spade.message.Message: a new spade Message", "source": "juraj_google_style"} -{"code": "def set_slats_level(self, slatsLevel=0.0, shutterLevel=None):\n\n if shutterLevel is None:\n shutterLevel = self.shutterLevel\n data = {\n \"channelIndex\": 1,\n \"deviceId\": self.id,\n \"slatsLevel\": slatsLevel,\n \"shutterLevel\": shutterLevel,\n }\n return self._restCall(\"device/control/setSlatsLevel\", json.dumps(data))", "docstring": "sets the slats and shutter level\n\nArgs:\n slatsLevel(float): the new level of the slats. 0.0 = open, 1.0 = closed,\n shutterLevel(float): the new level of the shutter. 0.0 = open, 1.0 = closed, None = use the current value\n\nReturns:\n the result of the _restCall", "source": "juraj_google_style"} -{"code": "def update(self, span: typing.Tuple[int, int], line_type: LineType) -> None:\n\n first_block_line, last_block_line = span\n for i in range(first_block_line, last_block_line + 1):\n try:\n self.__setitem__(i, line_type)\n except ValueError as error:\n raise ValidationError(i + self.fn_offset, 1, 'AAA99 {}'.format(error))", "docstring": "Updates line types for a block's span.\n\nArgs:\n span: First and last relative line number of a Block.\n line_type: The type of line to update to.\n\nRaises:\n ValidationError: A special error on collision. This prevents Flake8\n from crashing because it is converted to a Flake8 error tuple,\n but it indicates to the user that something went wrong with\n processing the function.", "source": "juraj_google_style"} -{"code": "def DtypeToNumberConverter(self, dtype):\n\n if np.issubdtype(dtype, np.datetime64):\n\n def DatetimesToNumbers(dt_list):\n return np.array([pd.Timestamp(dt).value for dt in dt_list])\n\n return DatetimesToNumbers\n elif np.issubdtype(dtype, np.timedelta64):\n\n def TimedetlasToNumbers(td_list):\n return np.array([pd.Timedelta(td).value for td in td_list])\n\n return TimedetlasToNumbers\n else:\n return None", "docstring": "Converts a Numpy dtype to a converter method if applicable.\n The converter method takes in a numpy array of objects of the provided\n dtype\n and returns a numpy array of the numbers backing that object for\n statistical\n analysis. Returns None if no converter is necessary.\n\nArgs:\n dtype: The numpy dtype to make a converter for.\n\nReturns:\n The converter method or None.", "source": "juraj_google_style"} -{"code": "def p44(msg):\n\n d = hex2bin(data(msg))\n\n if d[34] == '0':\n return None\n\n p = bin2int(d[35:46]) # hPa\n\n return p", "docstring": "Static pressure.\n\nArgs:\n msg (String): 28 bytes hexadecimal message string\n\nReturns:\n int: static pressure in hPa", "source": "juraj_google_style"} -{"code": "def EqualTo(self, value):\n\n self._awql = self._CreateSingleValueCondition(value, '=')\n return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"equal to\".\n\nArgs:\n value: The value to be used in the WHERE condition.\n\nReturns:\n The query builder that this WHERE builder links to.", "source": "juraj_google_style"} -{"code": "def ws45(msg):\n\n d = hex2bin(data(msg))\n if d[3] == '0':\n return None\n\n ws = bin2int(d[4:6])\n return ws", "docstring": "Wind shear.\n\nArgs:\n msg (String): 28 bytes hexadecimal message string\n\nReturns:\n int: Wind shear level. 0=NIL, 1=Light, 2=Moderate, 3=Severe", "source": "juraj_google_style"} -{"code": "def extract_numerics_alert(event):\n\n value = event.summary.value[0]\n debugger_plugin_metadata_content = None\n if value.HasField(\"metadata\"):\n plugin_data = value.metadata.plugin_data\n if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME:\n debugger_plugin_metadata_content = plugin_data.content\n\n if not debugger_plugin_metadata_content:\n raise ValueError(\"Event proto input lacks debugger plugin SummaryMetadata.\")\n\n debugger_plugin_metadata_content = tf.compat.as_text(\n debugger_plugin_metadata_content)\n try:\n content_object = json.loads(debugger_plugin_metadata_content)\n device_name = content_object[\"device\"]\n except (KeyError, ValueError) as e:\n raise ValueError(\"Could not determine device from JSON string %r, %r\" %\n (debugger_plugin_metadata_content, e))\n\n debug_op_suffix = \":DebugNumericSummary\"\n if not value.node_name.endswith(debug_op_suffix):\n raise ValueError(\n \"Event proto input does not have the expected debug op suffix %s\" %\n debug_op_suffix)\n tensor_name = value.node_name[:-len(debug_op_suffix)]\n\n elements = tf_debug.load_tensor_from_event(event)\n nan_count = elements[constants.NAN_NUMERIC_SUMMARY_OP_INDEX]\n neg_inf_count = elements[constants.NEG_INF_NUMERIC_SUMMARY_OP_INDEX]\n pos_inf_count = elements[constants.POS_INF_NUMERIC_SUMMARY_OP_INDEX]\n if nan_count > 0 or neg_inf_count > 0 or pos_inf_count > 0:\n return NumericsAlert(\n device_name, tensor_name, event.wall_time, nan_count, neg_inf_count,\n pos_inf_count)\n return None", "docstring": "Determines whether a health pill event contains bad values.\n\n A bad value is one of NaN, -Inf, or +Inf.\n\nArgs:\n event: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary`\n ops.\n\nReturns:\n An instance of `NumericsAlert`, if bad values are found.\n `None`, if no bad values are found.\n\nRaises:\n ValueError: if the event does not have the expected tag prefix or the\n debug op name is not the expected debug op name suffix.", "source": "juraj_google_style"} -{"code": "def flash(self, partition, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):\n\n return self._simple_command('flash', arg=partition, info_cb=info_cb,\n timeout_ms=timeout_ms)", "docstring": "Flashes the last downloaded file to the given partition.\n\nArgs:\n partition: Partition to flash.\n timeout_ms: Optional timeout in milliseconds to wait for it to finish.\n info_cb: See Download. Usually no messages.\n\nReturns:\n Response to a download request, normally nothing.", "source": "juraj_google_style"} -{"code": "def initialize_particle(rng, domain, fitness_function):\n\n position = rng.uniform(domain.lower, domain.upper, domain.dimension)\n fitness = fitness_function(position)\n return Particle(position=position,\n velocity=np.zeros(domain.dimension),\n fitness=fitness,\n best_fitness=fitness,\n best_position=position)", "docstring": "Initializes a particle within a domain.\n\nArgs:\n rng: numpy.random.RandomState: The random number generator.\n domain: cipy.problems.core.Domain: The domain of the problem.\n\nReturns:\n cipy.algorithms.pso.Particle: A new, fully initialized particle.", "source": "juraj_google_style"} -{"code": "def AnalyzeClient(self, client):\n\n\n client_id = self._ClientIdFromURN(client.urn)\n\n # Start with both the client id itself, and a universal keyword, used to\n # find all clients.\n #\n # TODO(user): Remove the universal keyword once we have a better way\n # to do this, i.e., once we have a storage library which can list all\n # clients directly.\n\n keywords = [self._NormalizeKeyword(client_id), \".\"]\n\n def TryAppend(prefix, keyword):\n precondition.AssertType(prefix, Text)\n if keyword:\n keyword_string = self._NormalizeKeyword(Text(keyword))\n keywords.append(keyword_string)\n if prefix:\n keywords.append(prefix + \":\" + keyword_string)\n\n def TryAppendPrefixes(prefix, keyword, delimiter):\n if keyword is None:\n return 0\n TryAppend(prefix, keyword)\n segments = keyword.split(delimiter)\n for i in range(1, len(segments)):\n TryAppend(prefix, delimiter.join(segments[0:i]))\n return len(segments)\n\n def TryAppendIP(ip):\n TryAppend(\"ip\", ip)\n # IP4v?\n if TryAppendPrefixes(\"ip\", str(ip), \".\") == 4:\n return\n # IP6v?\n TryAppendPrefixes(\"ip\", str(ip), \":\")\n\n def TryAppendMac(mac):\n TryAppend(\"mac\", mac)\n if len(mac) == 12:\n # If looks like a mac address without \":\" symbols, also add the keyword\n # with them.\n TryAppend(\"mac\", \":\".join([mac[i:i + 2] for i in range(0, 12, 2)]))\n\n s = client.Schema\n TryAppend(\"host\", client.Get(s.HOSTNAME))\n TryAppendPrefixes(\"host\", client.Get(s.HOSTNAME), \"-\")\n TryAppend(\"host\", client.Get(s.FQDN))\n TryAppendPrefixes(\"host\", client.Get(s.FQDN), \".\")\n TryAppend(\"\", client.Get(s.SYSTEM))\n TryAppend(\"\", client.Get(s.UNAME))\n TryAppend(\"\", client.Get(s.OS_RELEASE))\n TryAppend(\"\", client.Get(s.OS_VERSION))\n TryAppend(\"\", client.Get(s.KERNEL))\n TryAppend(\"\", client.Get(s.ARCH))\n\n kb = client.Get(s.KNOWLEDGE_BASE)\n if kb:\n for user in kb.users:\n TryAppend(\"user\", user.username)\n TryAppend(\"\", user.full_name)\n if user.full_name:\n for name in user.full_name.split():\n # full_name often includes nicknames and similar, wrapped in\n # punctuation, e.g. \"Thomas 'TJ' Jones\". We remove the most common\n # wrapping characters.\n TryAppend(\"\", name.strip(\"\\\"'()\"))\n\n for username in client.Get(s.USERNAMES, []):\n TryAppend(\"user\", username)\n\n for interface in client.Get(s.INTERFACES, []):\n if interface.mac_address:\n TryAppendMac(interface.mac_address.human_readable_address)\n for ip in interface.GetIPAddresses():\n TryAppendIP(ip)\n\n # We should have all mac and ip addresses already, but some test data only\n # has it attached directly, so just in case we look there also.\n if client.Get(s.MAC_ADDRESS):\n for mac in str(client.Get(s.MAC_ADDRESS)).split(\"\\n\"):\n TryAppendMac(mac)\n ip_list = client.Get(s.HOST_IPS, \"\")\n for ip in str(ip_list).split(\"\\n\"):\n TryAppendIP(ip)\n\n client_info = client.Get(s.CLIENT_INFO)\n if client_info:\n TryAppend(\"client\", client_info.client_name)\n TryAppend(\"client\", client_info.client_version)\n if client_info.labels:\n for label in client_info.labels:\n TryAppend(\"label\", label)\n\n for label in client.GetLabelsNames():\n TryAppend(\"label\", label)\n\n return client_id, keywords", "docstring": "Finds the client_id and keywords for a client.\n\nArgs:\n client: A VFSGRRClient record to find keywords for.\n\nReturns:\n A tuple (client_id, keywords) where client_id is the client identifier and\n keywords is a list of keywords related to client.", "source": "juraj_google_style"} -{"code": "def write(self, output):\n\n view_str = output.encode('ascii', 'ignore')\n if (len(view_str) > 0):\n self.m_ser.write(view_str)\n self.m_ser.flush()\n self.m_ser.reset_input_buffer()\n time.sleep(self.m_force_wait)\n pass", "docstring": "Passthrough for pyserial Serial.write().\n\nArgs:\n output (str): Block to write to port", "source": "juraj_google_style"} -{"code": "def read(self, len=1024, buffer=None):\n\n\n try:\n return self._wrap_socket_library_call(\n lambda: SSL_read(self._ssl.value, len, buffer), ERR_READ_TIMEOUT)\n except openssl_error() as err:\n if err.ssl_error == SSL_ERROR_SYSCALL and err.result == -1:\n raise_ssl_error(ERR_PORT_UNREACHABLE, err)\n raise", "docstring": "Read data from connection\n\n Read up to len bytes and return them.\n\nArgs:\n len -- maximum number of bytes to read\n\n Return value:\n string containing read bytes", "source": "juraj_google_style"} -{"code": "def parse_clnsig(acc, sig, revstat, transcripts):\n\n clnsig_accsessions = []\n\n if acc:\n # New format of clinvar allways have integers as accession numbers\n try:\n acc = int(acc)\n except ValueError:\n pass\n # There are sometimes different separators so we need to check which\n # one to use\n if isinstance(acc, int):\n revstat_groups = []\n if revstat:\n revstat_groups = [rev.lstrip('_') for rev in revstat.split(',')]\n\n sig_groups = []\n if sig:\n for significance in sig.split('/'):\n splitted_word = significance.split('_')\n sig_groups.append(' '.join(splitted_word[:2]))\n\n for sign_term in sig_groups:\n clnsig_accsessions.append({\n 'value': sign_term,\n 'accession': int(acc),\n 'revstat': ', '.join(revstat_groups),\n })\n else:\n # There are sometimes different separators so we need to check which\n # one to use\n acc_groups = acc.split('|')\n sig_groups = sig.split('|')\n revstat_groups = revstat.split('|')\n for acc_group, sig_group, revstat_group in zip(acc_groups, sig_groups, revstat_groups):\n accessions = acc_group.split(',')\n significances = sig_group.split(',')\n revstats = revstat_group.split(',')\n for accession, significance, revstat in zip(accessions, significances, revstats):\n clnsig_accsessions.append({\n 'value': int(significance),\n 'accession': accession,\n 'revstat': revstat,\n })\n\n elif transcripts:\n clnsig = set()\n for transcript in transcripts:\n for annotation in transcript.get('clinsig', []):\n clnsig.add(annotation)\n for annotation in clnsig:\n clnsig_accsessions.append({'value': annotation})\n\n return clnsig_accsessions", "docstring": "Get the clnsig information\n\nArgs:\n acc(str): The clnsig accession number, raw from vcf\n sig(str): The clnsig significance score, raw from vcf\n revstat(str): The clnsig revstat, raw from vcf\n transcripts(iterable(dict))\n\nReturns:\n clnsig_accsessions(list): A list with clnsig accessions", "source": "juraj_google_style"} -{"code": "def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator):\n\n for idx, func in enumerate(iterator):\n assert callable(func), 'Test generators must yield callables, got %r' % (\n func,)\n if getattr(func, '__x_use_name__', False):\n new_name = func.__name__\n else:\n new_name = '%s%s%d' % (name, _SEPARATOR, idx)\n assert new_name not in dct, (\n 'Name of parameterized test case \"%s\" not unique' % (new_name,))\n dct[new_name] = func\n id_suffix[new_name] = getattr(func, '__x_extra_id__', '')", "docstring": "Adds individual test cases to a dictionary.\n\nArgs:\n dct: The target dictionary.\n id_suffix: The dictionary for mapping names to test IDs.\n name: The original name of the test case.\n iterator: The iterator generating the individual test cases.", "source": "juraj_google_style"} -{"code": "def ParseFileDownloadedRow(\n self, parser_mediator, query, row, **unused_kwargs):\n\n query_hash = hash(query)\n\n event_data = ChromeHistoryFileDownloadedEventData()\n event_data.full_path = self._GetRowValue(query_hash, row, 'target_path')\n event_data.offset = self._GetRowValue(query_hash, row, 'id')\n event_data.query = query\n event_data.received_bytes = self._GetRowValue(\n query_hash, row, 'received_bytes')\n event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes')\n event_data.url = self._GetRowValue(query_hash, row, 'url')\n\n timestamp = self._GetRowValue(query_hash, row, 'start_time')\n date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a file downloaded row.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n query (str): query that created the row.\n row (sqlite3.Row): row.", "source": "juraj_google_style"} -{"code": "def SetHeaders(self, soap_headers, http_headers):\n\n self.suds_client.set_options(soapheaders=soap_headers, headers=http_headers)", "docstring": "Set the headers for the underlying client.\n\nArgs:\n soap_headers: A SOAP element for the SOAP headers.\n http_headers: A dictionary for the http headers.", "source": "juraj_google_style"} -{"code": "def fill_treeview(self, tree, input_dict):\n\n\n tree.model().removeRows(0, tree.model().rowCount())\n\n def add_element(item, key, value):\n child_name = QtWidgets.QStandardItem(key)\n\n if isinstance(value, dict):\n for key_child, value_child in value.items():\n add_element(child_name, key_child, value_child)\n item.appendRow(child_name)\n else:\n child_value = QtWidgets.QStandardItem(str(value))\n\n item.appendRow([child_name, child_value])\n\n for index, (key, value) in enumerate(input_dict.items()):\n\n if isinstance(value, dict):\n item = QtWidgets.QStandardItem(key)\n for sub_key, sub_value in value.items():\n add_element(item, sub_key, sub_value)\n tree.model().appendRow(item)\n elif isinstance(value, str):\n item = QtGui.QStandardItem(key)\n item_value = QtGui.QStandardItem(value)\n item_value.setEditable(True)\n item_value.setSelectable(True)\n tree.model().appendRow([item, item_value])", "docstring": "fills a treeview with nested parameters\n\nArgs:\n tree: QtWidgets.QTreeView\n parameters: dictionary or Parameter object\n\n Returns:", "source": "juraj_google_style"} -{"code": "def parse_cgmlst_alleles(cgmlst_fasta):\n\n out = defaultdict(list)\n for header, seq in parse_fasta(cgmlst_fasta):\n if not '|' in header:\n raise Exception('Unexpected format for cgMLST fasta file header. No \"|\" (pipe) delimiter present! Header=\"{}\"'.format(header))\n marker_name, allele_name = header.split('|')\n out[marker_name].append(seq)\n return out", "docstring": "Parse cgMLST alleles from fasta file\n cgMLST FASTA file must have a header format of \">{marker name}|{allele name}\"\n\nArgs:\n cgmlst_fasta (str): cgMLST fasta file path\n\nReturns:\n dict of list: Marker name to list of allele sequences", "source": "juraj_google_style"} -{"code": "def __init__(self, maximum_number_of_items=50000):\n\n super(_EventSourceHeap, self).__init__()\n self._heap = []\n self._maximum_number_of_items = maximum_number_of_items", "docstring": "Initializes an event source heap.\n\nArgs:\n maximum_number_of_items (Optional[int]): maximum number of items\n in the heap.", "source": "juraj_google_style"} -{"code": "def sections_list(self, cmd=None):\n\n sections = list(self.common.sections)\n if not cmd:\n if self.bare is not None:\n sections.extend(self.bare.sections)\n return sections\n return []\n sections.extend(self.subcmds[cmd].sections)\n if cmd in self._conf:\n sections.append(cmd)\n return sections", "docstring": "List of config sections used by a command.\n\nArgs:\n cmd (str): command name, set to ``None`` or ``''`` for the bare\n command.\n\nReturns:\n list of str: list of configuration sections used by that command.", "source": "juraj_google_style"} -{"code": "def github_belspec_files(spec_dir, force: bool = False):\n\n\n if not force:\n dtnow = datetime.datetime.utcnow()\n delta = datetime.timedelta(1)\n yesterday = dtnow - delta\n\n for fn in glob.glob(f\"{spec_dir}/bel*yaml\"):\n if datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday:\n log.info(\"Skipping BEL Specification update - specs less than 1 day old\")\n return\n\n repo_url = \"https://api.github.com/repos/belbio/bel_specifications/contents/specifications\"\n params = {}\n github_access_token = os.getenv(\"GITHUB_ACCESS_TOKEN\", \"\")\n if github_access_token:\n params = {\"access_token\": github_access_token}\n\n r = requests.get(repo_url, params=params)\n if r.status_code == 200:\n results = r.json()\n for f in results:\n url = f[\"download_url\"]\n fn = os.path.basename(url)\n\n if \"yaml\" not in fn and \"yml\" in fn:\n fn = fn.replace(\"yml\", \"yaml\")\n\n r = requests.get(url, params=params, allow_redirects=True)\n if r.status_code == 200:\n open(f\"{spec_dir}/{fn}\", \"wb\").write(r.content)\n else:\n sys.exit(\n f\"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}\"\n )\n else:\n sys.exit(\n f\"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}\"\n )", "docstring": "Get belspec files from Github repo\n\nArgs:\n spec_dir: directory to store the BEL Specification and derived files\n force: force update of BEL Specifications from Github - skipped if local files less than 1 day old", "source": "juraj_google_style"} -{"code": "def drop_account(self, account_cookie):\n\n\n if account_cookie in self.account_list:\n res = self.account_list.remove(account_cookie)\n self.cash.append(\n self.cash[-1] + self.get_account_by_cookie(res).init_cash)\n return True\n else:\n raise RuntimeError(\n 'account {} is not in the portfolio'.format(account_cookie)\n )", "docstring": "删除一个account\n\nArgs:\n account_cookie {[type]} -- [description]\n\nRaises:\n RuntimeError -- [description]", "source": "juraj_google_style"} -{"code": "def artist_commentary_revert(self, id_, version_id):\n\n params = {'version_id': version_id}\n return self._get('artist_commentaries/{0}/revert.json'.format(id_),\n params, method='PUT', auth=True)", "docstring": "Revert artist commentary (Requires login) (UNTESTED).\n\n Parameters:\n id_ (int): The artist commentary id.\n version_id (int): The artist commentary version id to\n revert to.", "source": "juraj_google_style"} -{"code": "def do_hook_actions(self, actions, hook_type):\n\n logger.log_debug(\"call {} hook actions.\".format(hook_type))\n for action in actions:\n\n if isinstance(action, dict) and len(action) == 1:\n # format 1\n # {\"var\": \"${func()}\"}\n var_name, hook_content = list(action.items())[0]\n hook_content_eval = self.session_context.eval_content(hook_content)\n logger.log_debug(\n \"assignment with hook: {} = {} => {}\".format(\n var_name, hook_content, hook_content_eval\n )\n )\n self.session_context.update_test_variables(\n var_name, hook_content_eval\n )\n else:\n # format 2\n logger.log_debug(\"call hook function: {}\".format(action))\n # TODO: check hook function if valid\n self.session_context.eval_content(action)", "docstring": "call hook actions.\n\nArgs:\n actions (list): each action in actions list maybe in two format.\n\n format1 (dict): assignment, the value returned by hook function will be assigned to variable.\n {\"var\": \"${func()}\"}\n format2 (str): only call hook functions.\n ${func()}\n\n hook_type (enum): setup/teardown", "source": "juraj_google_style"} -{"code": "def __init__(self, logger):\n\n self.logger = logger\n self.oslogin_installed = True\n self.update_time = 0", "docstring": "Constructor.\n\nArgs:\n logger: logger object, used to write to SysLog and serial port.", "source": "juraj_google_style"} -{"code": "def add_permission(self, perm):\n\n self.Permissions(permission=perm)\n PermissionCache.flush()\n self.save()", "docstring": "Soyut Role Permission nesnesi tanımlamayı sağlar.\n\nArgs:\n perm (object):", "source": "juraj_google_style"} -{"code": "def asset(self, asset_id, asset_type, action='GET'):\n\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n\n if asset_type == 'PHONE':\n return self.tc_requests.adversary_phone_asset(\n self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n )\n if asset_type == 'HANDLER':\n return self.tc_requests.adversary_handle_asset(\n self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n )\n if asset_type == 'URL':\n return self.tc_requests.adversary_url_asset(\n self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n )\n self._tcex.handle_error(\n 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]\n )\n return None", "docstring": "Gets the asset with the provided id\n\nArgs:\n asset_id: The id of the asset to be retrieved\n asset_type: (str) Either PHONE, HANDLER, or URL\n action:\n\n Returns:", "source": "juraj_google_style"} -{"code": "def __init__(self, server):\n\n self.ready = False\n self.server = server\n\n self.requests_seen = 0\n self.bytes_read = 0\n self.bytes_written = 0\n self.start_time = None\n self.work_time = 0\n self.stats = {\n 'Requests': lambda s: self.requests_seen + (\n self.start_time is None\n and trueyzero\n or self.conn.requests_seen\n ),\n 'Bytes Read': lambda s: self.bytes_read + (\n self.start_time is None\n and trueyzero\n or self.conn.rfile.bytes_read\n ),\n 'Bytes Written': lambda s: self.bytes_written + (\n self.start_time is None\n and trueyzero\n or self.conn.wfile.bytes_written\n ),\n 'Work Time': lambda s: self.work_time + (\n self.start_time is None\n and trueyzero\n or time.time() - self.start_time\n ),\n 'Read Throughput': lambda s: s['Bytes Read'](s) / (\n s['Work Time'](s) or 1e-6\n ),\n 'Write Throughput': lambda s: s['Bytes Written'](s) / (\n s['Work Time'](s) or 1e-6\n ),\n }\n threading.Thread.__init__(self)", "docstring": "Initialize WorkerThread instance.\n\nArgs:\n server (cheroot.server.HTTPServer): web server object\n receiving this request", "source": "juraj_google_style"} -{"code": "def find_exe(name, filepath=None):\n\n if filepath:\n if not os.path.exists(filepath):\n open(filepath) # raise IOError\n elif not os.path.isfile(filepath):\n raise RezBindError(\"not a file: %s\" % filepath)\n else:\n filepath = which(name)\n if not filepath:\n raise RezBindError(\"could not find executable: %s\" % name)\n\n return filepath", "docstring": "Find an executable.\n\nArgs:\n name: Name of the program, eg 'python'.\n filepath: Path to executable, a search is performed if None.\n\nReturns:\n Path to the executable if found, otherwise an error is raised.", "source": "juraj_google_style"} -{"code": "def isfile(self, path, follow_symlinks=True):\n\n return self._is_of_type(path, S_IFREG, follow_symlinks)", "docstring": "Determine if path identifies a regular file.\n\nArgs:\n path: Path to filesystem object.\n\nReturns:\n `True` if path points to a regular file (following symlinks).\n\nRaises:\n TypeError: if path is None.", "source": "juraj_google_style"} -{"code": "def get_logging_metric_hook(benchmark_log_dir=None,\n tensors_to_log=None,\n every_n_secs=600,\n **kwargs): # pylint: disable=unused-argument\n\n if benchmark_log_dir is None:\n raise ValueError(\"metric_log_dir should be provided to use metric logger\")\n if tensors_to_log is None:\n tensors_to_log = _TENSORS_TO_LOG\n return metric_hook.LoggingMetricHook(\n tensors=tensors_to_log,\n log_dir=benchmark_log_dir,\n every_n_secs=every_n_secs)", "docstring": "Function to get LoggingMetricHook.\n\nArgs:\n benchmark_log_dir: `string`, directory path to save the metric log.\n tensors_to_log: List of tensor names or dictionary mapping labels to tensor\n names. If not set, log _TENSORS_TO_LOG by default.\n every_n_secs: `int`, the frequency for logging the metric. Default to every\n 10 mins.\n\nReturns:\n Returns a ProfilerHook that writes out timelines that can be loaded into\n profiling tools like chrome://tracing.", "source": "juraj_google_style"} -{"code": "def add_points(self, points, meth='s'):\n\n if not isinstance(points[0], (list, np.ndarray)):\n points = [[points[0]], [points[1]], [points[2]]]\n points = np.array(points)\n if meth == 's':\n if len(points[0]) == 1:\n pnts = np.array([[points[0][0]],\n [points[1][0]], [points[2][0]]])\n pnts = np.append(pnts, points, axis=1)\n else:\n pnts = points\n self.points.append(pnts)\n self.point_style.append('s')\n elif meth == 'l':\n self.points.append(points)\n self.point_style.append('l')\n else:\n self.points.append(points)\n self.point_style.append('m')", "docstring": "Add a list of data points to bloch sphere.\n\nArgs:\n points (array_like):\n Collection of data points.\n meth (str):\n Type of points to plot, use 'm' for multicolored, 'l' for points\n connected with a line.", "source": "juraj_google_style"} -{"code": "def transform_to_mods_mono(marc_xml, uuid, url):\n\n marc_xml = _read_content_or_path(marc_xml)\n\n transformed = xslt_transformation(\n marc_xml,\n _absolute_template_path(\"MARC21slim2MODS3-4-NDK.xsl\")\n )\n\n return _apply_postprocessing(\n marc_xml=marc_xml,\n xml=transformed,\n func=mods_postprocessor.postprocess_monograph,\n uuid=uuid,\n url=url,\n )", "docstring": "Convert `marc_xml` to MODS data format.\n\nArgs:\n marc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\n filename.\n uuid (str): UUID string giving the package ID.\n url (str): URL of the publication (public or not).\n\nReturns:\n list: Collection of transformed xml strings.", "source": "juraj_google_style"} -{"code": "def solve_tsp(V,c):\n\n\n def addcut(cut_edges):\n G = networkx.Graph()\n G.add_edges_from(cut_edges)\n Components = list(networkx.connected_components(G))\n if len(Components) == 1:\n return False\n model.freeTransform()\n for S in Components:\n model.addCons(quicksum(x[i,j] for i in S for j in S if j>i) <= len(S)-1)\n print(\"cut: len(%s) <= %s\" % (S,len(S)-1))\n return True\n\n\n def addcut2(cut_edges):\n G = networkx.Graph()\n G.add_edges_from(cut_edges)\n Components = list(networkx.connected_components(G))\n\n if len(Components) == 1:\n return False\n model.freeTransform()\n for S in Components:\n T = set(V) - set(S)\n print(\"S:\",S)\n print(\"T:\",T)\n model.addCons(quicksum(x[i,j] for i in S for j in T if j>i) +\n quicksum(x[i,j] for i in T for j in S if j>i) >= 2)\n print(\"cut: %s >= 2\" % \"+\".join([(\"x[%s,%s]\" % (i,j)) for i in S for j in T if j>i]))\n return True\n\n # main part of the solution process:\n model = Model(\"tsp\")\n\n model.hideOutput() # silent/verbose mode\n x = {}\n for i in V:\n for j in V:\n if j > i:\n x[i,j] = model.addVar(ub=1, name=\"x(%s,%s)\"%(i,j))\n\n for i in V:\n model.addCons(quicksum(x[j,i] for j in V if j < i) + \\\n quicksum(x[i,j] for j in V if j > i) == 2, \"Degree(%s)\"%i)\n\n model.setObjective(quicksum(c[i,j]*x[i,j] for i in V for j in V if j > i), \"minimize\")\n\n EPS = 1.e-6\n isMIP = False\n while True:\n model.optimize()\n edges = []\n for (i,j) in x:\n if model.getVal(x[i,j]) > EPS:\n edges.append( (i,j) )\n\n if addcut(edges) == False:\n if isMIP: # integer variables, components connected: solution found\n break\n model.freeTransform()\n for (i,j) in x: # all components connected, switch to integer model\n model.chgVarType(x[i,j], \"B\")\n isMIP = True\n\n return model.getObjVal(),edges", "docstring": "solve_tsp -- solve the traveling salesman problem\n - start with assignment model\n - add cuts until there are no sub-cycles\n Parameters:\n - V: set/list of nodes in the graph\n - c[i,j]: cost for traversing edge (i,j)\n Returns the optimum objective value and the list of edges used.", "source": "juraj_google_style"} -{"code": "def fill(self, config, section):\n\n if config.has_section(section):\n default_url = self.DEFAULT_REPOSITORIES.get(self.name, '')\n self.url = RepositoryURL(config_get(config, section, 'repository', default_url))\n self.username = config_get(config, section, 'username', '')\n self.password = config_get(config, section, 'password', '')", "docstring": "Fill data from a given configuration section.\n\nArgs:\n config (configparser): the configuration file\n section (str): the section to use", "source": "juraj_google_style"} -{"code": "def FindExtensionByName(self, full_name):\n\n full_name = _NormalizeFullyQualifiedName(full_name)\n message_name, _, extension_name = full_name.rpartition('.')\n try:\n # Most extensions are nested inside a message.\n scope = self.FindMessageTypeByName(message_name)\n except KeyError:\n # Some extensions are defined at file scope.\n scope = self.FindFileContainingSymbol(full_name)\n return scope.extensions_by_name[extension_name]", "docstring": "Loads the named extension descriptor from the pool.\n\nArgs:\n full_name: The full name of the extension descriptor to load.\n\nReturns:\n A FieldDescriptor, describing the named extension.", "source": "juraj_google_style"} -{"code": "def readTraining(self, training_file):\n\n\n logger.info('reading training from file')\n training_pairs = json.load(training_file,\n cls=serializer.dedupe_decoder)\n self.markPairs(training_pairs)", "docstring": "Read training from previously built training data file object\n\nArgs:\n training_file -- file object containing the training data", "source": "juraj_google_style"} -{"code": "def set_cn_energies( self, cn_energies ):\n\n for site in self.sites:\n site.set_cn_occupation_energies( cn_energies[ site.label ] )\n self.cn_energies = cn_energies", "docstring": "Set the coordination number dependent energies for this lattice.\n\nArgs:\n cn_energies (Dict(Str:Dict(Int:Float))): Dictionary of dictionaries specifying the coordination number dependent energies for each site type. e.g.::\n\n { 'A' : { 0 : 0.0, 1 : 1.0, 2 : 2.0 }, 'B' : { 0 : 0.0, 1 : 2.0 } }\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def get_numeric_feature_names(example):\n\n numeric_features = ('float_list', 'int64_list')\n features = get_example_features(example)\n return sorted([\n feature_name for feature_name in features\n if features[feature_name].WhichOneof('kind') in numeric_features\n ])", "docstring": "Returns a list of feature names for float and int64 type features.\n\nArgs:\n example: An example.\n\nReturns:\n A list of strings of the names of numeric features.", "source": "juraj_google_style"} -{"code": "def _dilated_conv_layer(self, output_channels, dilation_rate, apply_relu,\n name):\n\n layer_components = [\n conv.Conv2D(\n output_channels, [3, 3],\n initializers=self._initializers,\n regularizers=self._regularizers,\n rate=dilation_rate,\n name=\"dilated_conv_\" + name),\n ]\n if apply_relu:\n layer_components.append(lambda net: tf.nn.relu(net, name=\"relu_\" + name))\n return sequential.Sequential(layer_components, name=name)", "docstring": "Create a dilated convolution layer.\n\nArgs:\n output_channels: int. Number of output channels for each pixel.\n dilation_rate: int. Represents how many pixels each stride offset will\n move. A value of 1 indicates a standard convolution.\n apply_relu: bool. If True, a ReLU non-linearlity is added.\n name: string. Name for layer.\n\nReturns:\n a sonnet Module for a dilated convolution.", "source": "juraj_google_style"} -{"code": "def dbmax_stddev(self, value=None):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `dbmax_stddev`'.format(value))\n\n self._dbmax_stddev = value", "docstring": "Corresponds to IDD Field `dbmax_stddev`\n Standard deviation of extreme annual maximum dry-bulb temperature\n\nArgs:\n value (float): value for IDD Field `dbmax_stddev`\n Unit: C\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def get_best_blockhash(self, id=None, endpoint=None):\n\n return self._call_endpoint(GET_BEST_BLOCK_HASH, id=id, endpoint=endpoint)", "docstring": "Get the hash of the highest block\n\nArgs:\n id: (int, optional) id to use for response tracking\n endpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\n json object of the result or the error encountered in the RPC call", "source": "juraj_google_style"} -{"code": "def __init__(self, message_type, name=None,\n indexed_fields=None, protocol=None, **kwds):\n\n if not (isinstance(message_type, type) and\n issubclass(message_type, messages.Message)):\n raise TypeError('MessageProperty argument must be a Message subclass')\n self._message_type = message_type\n if indexed_fields is not None:\n # TODO: Check they are all strings naming fields.\n self._indexed_fields = tuple(indexed_fields)\n # NOTE: Otherwise the class default i.e. (), prevails.\n if protocol is None:\n protocol = _default_protocol\n self._protocol = protocol\n self._protocol_impl = _protocols_registry.lookup_by_name(protocol)\n blob_prop = model.BlobProperty('__%s__' % self._protocol)\n # TODO: Solve this without reserving 'blob_'.\n message_class = _make_model_class(message_type, self._indexed_fields,\n blob_=blob_prop)\n super(MessageProperty, self).__init__(message_class, name, **kwds)", "docstring": "Constructor.\n\nArgs:\n message_tyoe: A subclass of protorpc.messages.Message.\n name: Optional datastore name (defaults to the property name).\n indexed_fields: Optional list of dotted and undotted field names.\n protocol: Optional protocol name default 'protobuf'.\n\n Additional keywords arguments specify the same options as\n supported by StructuredProperty, except 'indexed'.", "source": "juraj_google_style"} -{"code": "def swapdim(P, dim1=1, dim2=0):\n\n if not isinstance(P, Poly):\n return numpy.swapaxes(P, dim1, dim2)\n\n dim = P.dim\n shape = P.shape\n dtype = P.dtype\n\n if dim1==dim2:\n return P\n\n m = max(dim1, dim2)\n if P.dim <= m:\n P = chaospy.poly.dimension.setdim(P, m+1)\n dim = m+1\n\n A = {}\n\n for key in P.keys:\n\n val = P.A[key]\n key = list(key)\n key[dim1], key[dim2] = key[dim2], key[dim1]\n A[tuple(key)] = val\n\n return Poly(A, dim, shape, dtype)", "docstring": "Swap the dim between two variables.\n\nArgs:\n P (Poly):\n Input polynomial.\n dim1 (int):\n First dim\n dim2 (int):\n Second dim.\n\nReturns:\n (Poly):\n Polynomial with swapped dimensions.\n\nExample:\n >>> x,y = variable(2)\n >>> P = x**4-y\n >>> print(P)\n q0^4-q1\n >>> print(swapdim(P))\n q1^4-q0", "source": "juraj_google_style"} -{"code": "def get_enterprise_user_id(self, obj):\n\n # An enterprise learner can not belong to multiple enterprise customer at the same time\n # but if such scenario occurs we will pick the first.\n enterprise_learner = EnterpriseCustomerUser.objects.filter(user_id=obj.id).first()\n\n return enterprise_learner and enterprise_learner.id", "docstring": "Get enterprise user id from user object.\n\nArgs:\n obj (User): Django User object\n\nReturns:\n (int): Primary Key identifier for enterprise user object.", "source": "juraj_google_style"} -{"code": "def _split_generators(self, dl_manager):\n\n splits = super(Imagenet2012Corrupted, self)._split_generators(dl_manager)\n validation = splits[1]\n return [validation]", "docstring": "Return the validation split of ImageNet2012.\n\nArgs:\n dl_manager: download manager object.\n\nReturns:\n validation split.", "source": "juraj_google_style"} -{"code": "def scatter_matrix(df,theme=None,bins=10,color='grey',size=2):\n\n\tif not theme:\n\t\ttheme = auth.get_config_file()['theme']\n\n\tfigs=[]\n\tfor i in df.columns:\n\t\tfor j in df.columns:\n\t\t\tif i==j:\n\t\t\t\tfig=df.iplot(kind='histogram',keys=[i],asFigure=True,bins=bins)\n\t\t\t\tfigs.append(fig)\n\t\t\telse:\n\t\t\t\tfigs.append(df.iplot(kind='scatter',mode='markers',x=j,y=i,asFigure=True,size=size,colors=[color]))\n\tlayout=getLayout(theme)\n\tlayout['xaxis'].update(showgrid=False)\n\tlayout['yaxis'].update(showgrid=False)\n\tsm=subplots(figs,shape=(len(df.columns),len(df.columns)),shared_xaxes=False,shared_yaxes=False,\n\t\t\t\t\t horizontal_spacing=.05,vertical_spacing=.07,base_layout=layout)\n\tsm['layout'].update(bargap=.02,showlegend=False)\n\treturn sm", "docstring": "Displays a matrix with scatter plot for each pair of\n Series in the DataFrame.\n The diagonal shows a histogram for each of the Series\n\n Parameters:\n -----------\n df : DataFrame\n Pandas DataFrame\n theme : string\n Theme to be used (if not the default)\n bins : int\n Number of bins to use for histogram\n color : string\n Color to be used for each scatter plot\n size : int\n Size for each marker on the scatter plot", "source": "juraj_google_style"} -{"code": "def create(labels=None, **kw):\n\n if labels is not None:\n kw[u'labels'] = encoding.PyValueToMessage(MetricValue.LabelsValue,\n labels)\n return MetricValue(**kw)", "docstring": "Constructs a new metric value.\n\n This acts as an alternate to MetricValue constructor which\n simplifies specification of labels. Rather than having to create\n a MetricValue.Labels instance, all that's necessary to specify the\n required string.\n\nArgs:\n labels (dict([string, [string]]):\n **kw: any other valid keyword args valid in the MetricValue constructor\n\n Returns\n :class:`MetricValue`: the created instance", "source": "juraj_google_style"} -{"code": "def handle_api_explorer_request(self, request, start_response):\n\n redirect_url = self._get_explorer_redirect_url(\n request.server, request.port, request.base_path)\n return util.send_wsgi_redirect_response(redirect_url, start_response)", "docstring": "Handler for requests to {base_path}/explorer.\n\n This calls start_response and returns the response body.\n\nArgs:\n request: An ApiRequest, the request from the user.\n start_response: A function with semantics defined in PEP-333.\n\nReturns:\n A string containing the response body (which is empty, in this case).", "source": "juraj_google_style"} -{"code": "def parse(self, argument):\n\n if not isinstance(argument, six.string_types):\n raise TypeError('flag value must be a string, found \"{}\"'.format(\n type(argument)))\n return argument", "docstring": "Parses the string argument and returns the native value.\n\n By default it returns its argument unmodified.\n\nArgs:\n argument: string argument passed in the commandline.\n\nRaises:\n ValueError: Raised when it fails to parse the argument.\n TypeError: Raised when the argument has the wrong type.\n\nReturns:\n The parsed value in native type.", "source": "juraj_google_style"} -{"code": "def next_power_of_2(x):\n\n\n power_of_2 = 1 if x == 0 else 2 ** np.ceil(np.log2(x))\n return power_of_2", "docstring": "Finds the next power of 2 value\n\nArgs:\n x: Input value\n\nReturns:\n power_of_2: Next power of 2 value", "source": "juraj_google_style"} -{"code": "def from_json(cls, key, scopes, subject=None):\n\n credentials_type = key['type']\n if credentials_type != 'service_account':\n raise ValueError('key: expected type service_account '\n '(got %s)' % credentials_type)\n email = key['client_email']\n key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,\n key['private_key'])\n\n return cls(key=key, email=email, scopes=scopes, subject=subject)", "docstring": "Alternate constructor intended for using JSON format of private key.\n\nArgs:\n key (dict) - Parsed JSON with service account credentials.\n scopes (Union[str, collections.Iterable[str]]) -\n List of permissions that the application requests.\n subject (str) - The email address of the user for which\n the application is requesting delegated access.\n\nReturns:\n ServiceAccount", "source": "juraj_google_style"} -{"code": "def __init__(self, api_url='https://api.paystack.co/',\n headers=None):\n\n self.API_BASE_URL = '{api_url}'.format(**locals())\n self.headers = headers", "docstring": "Initialize Paystack Request object for browsing resource.\n\nArgs:\n api_url: str\n headers: dict", "source": "juraj_google_style"} -{"code": "def _to_proto_sparse_tensor(sparse_tensor, nested_proto,\n process_leafs, already_processed):\n\n already_processed.add(id(sparse_tensor))\n nested_proto.named_tuple.name = _SPARSE_TENSOR_NAME\n for str_key in _SPARSE_TENSOR_FIELD:\n tensor = getattr(sparse_tensor, str_key)\n nested_proto.named_tuple.map[str_key].value = process_leafs(tensor)", "docstring": "Serializes a `tf.SparseTensor` into `nested_proto`.\n\nArgs:\n sparse_tensor: An instance of `tf.SparseTensor`.\n nested_proto: A `module_pb2.NestedData` instance to be filled from\n `sparse_tensor`.\n process_leafs: A function to be applied to the leaf valued of the nested\n structure.\n already_processed: Set of already processed objects (used to avoid\n infinite recursion).", "source": "juraj_google_style"} -{"code": "def run_the_target(G, target, settings):\n\n sprint = settings[\"sprint\"]\n sprint(\"Running target {}\".format(target))\n the_formula = get_the_node_dict(G, target)[\"formula\"]\n run_commands(the_formula, settings)", "docstring": "Wrapper function that sends to commands in a target's 'formula'\n to run_commands()\n\nArgs:\n The graph we are going to build\n The target to run\n The settings dictionary", "source": "juraj_google_style"} -{"code": "def hex_to_hsv(color):\n\n color = normalize(color)\n color = color[1:]\n # color=tuple(ord(c)/255.0 for c in color.decode('hex'))\n color = (int(color[0:2], base=16) / 255.0, int(color[2:4],\n base=16) / 255.0, int(color[4:6], base=16) / 255.0)\n return colorsys.rgb_to_hsv(*color)", "docstring": "Converts from hex to hsv\n\n Parameters:\n -----------\n color : string\n Color representation on color\n\nExample:\n hex_to_hsv('#ff9933')", "source": "juraj_google_style"} -{"code": "def from_json(cls, json_data):\n\n data = json.loads(_helpers._from_bytes(json_data))\n if (data.get('token_expiry') and\n not isinstance(data['token_expiry'], datetime.datetime)):\n try:\n data['token_expiry'] = datetime.datetime.strptime(\n data['token_expiry'], EXPIRY_FORMAT)\n except ValueError:\n data['token_expiry'] = None\n retval = cls(\n data['access_token'],\n data['client_id'],\n data['client_secret'],\n data['refresh_token'],\n data['token_expiry'],\n data['token_uri'],\n data['user_agent'],\n revoke_uri=data.get('revoke_uri', None),\n id_token=data.get('id_token', None),\n id_token_jwt=data.get('id_token_jwt', None),\n token_response=data.get('token_response', None),\n scopes=data.get('scopes', None),\n token_info_uri=data.get('token_info_uri', None))\n retval.invalid = data['invalid']\n return retval", "docstring": "Instantiate a Credentials object from a JSON description of it.\n\n The JSON should have been produced by calling .to_json() on the object.\n\nArgs:\n json_data: string or bytes, JSON to deserialize.\n\nReturns:\n An instance of a Credentials subclass.", "source": "juraj_google_style"} -{"code": "def generate_defect_structure(self, supercell=(1, 1, 1)):\n\n defect_structure = self.bulk_structure.copy()\n defect_structure.make_supercell(supercell)\n\n # consider modifying velocity property to make sure defect site is decorated\n # consistently with bulk structure for final defect_structure\n defect_properties = self.site.properties.copy()\n if ('velocities' in self.bulk_structure.site_properties) and \\\n 'velocities' not in defect_properties:\n if all( vel == self.bulk_structure.site_properties['velocities'][0]\n for vel in self.bulk_structure.site_properties['velocities']):\n defect_properties['velocities'] = self.bulk_structure.site_properties['velocities'][0]\n else:\n raise ValueError(\"No velocity property specified for defect site and \"\n \"bulk_structure velocities are not homogeneous. Please specify this \"\n \"property within the initialized defect_site object.\")\n\n #create a trivial defect structure to find where supercell transformation moves the defect site\n site_properties_for_fake_struct = {prop: [val] for prop,val in defect_properties.items()}\n struct_for_defect_site = Structure( self.bulk_structure.copy().lattice,\n [self.site.specie],\n [self.site.frac_coords],\n to_unit_cell=True,\n site_properties = site_properties_for_fake_struct)\n struct_for_defect_site.make_supercell(supercell)\n defect_site = struct_for_defect_site[0]\n\n defect_structure.append(self.site.specie.symbol, defect_site.coords, coords_are_cartesian=True,\n properties = defect_site.properties)\n defect_structure.set_charge(self.charge)\n return defect_structure", "docstring": "Returns Defective Interstitial structure, decorated with charge\n\nArgs:\n supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix", "source": "juraj_google_style"} -{"code": "def linear_interpolate_rank(tensor1, tensor2, coeffs, rank=1):\n\n # sum across space, max across channels.\n _, _, _, num_channels = common_layers.shape_list(tensor1)\n diff_sq_sum = tf.reduce_sum((tensor1 - tensor2)**2, axis=(0, 1, 2))\n _, feature_ranks = tf.math.top_k(diff_sq_sum, k=rank)\n feature_rank = feature_ranks[-1]\n channel_inds = tf.range(num_channels, dtype=tf.int32)\n channel_mask = tf.equal(channel_inds, feature_rank)\n ones_t = tf.ones(num_channels, dtype=tf.float32)\n zeros_t = tf.zeros(num_channels, dtype=tf.float32)\n\n interp_tensors = []\n for coeff in coeffs:\n curr_coeff = tf.where(channel_mask, coeff * ones_t, zeros_t)\n interp_tensor = tensor1 + curr_coeff * (tensor2 - tensor1)\n interp_tensors.append(interp_tensor)\n return tf.concat(interp_tensors, axis=0)", "docstring": "Linearly interpolate channel at \"rank\" between two tensors.\n\n The channels are ranked according to their L2 norm between tensor1[channel]\n and tensor2[channel].\n\nArgs:\n tensor1: 4-D Tensor, NHWC\n tensor2: 4-D Tensor, NHWC\n coeffs: list of floats.\n rank: integer.\n\nReturns:\n interp_latents: list of interpolated 4-D Tensors, shape=(NHWC)", "source": "juraj_google_style"} -{"code": "def get_type_from_api_entity(self, api_entity):\n\n merged = self.group_types_data.copy()\n merged.update(self.indicator_types_data)\n print(merged)\n for (key, value) in merged.items():\n if value.get('apiEntity') == api_entity:\n return key\n return None", "docstring": "Returns the object type as a string given a api entity.\n\nArgs:\n api_entity:\n\n Returns:", "source": "juraj_google_style"} -{"code": "def save(self, path=None, complevel=1, complib='zlib'):\n\n if path is None:\n path = self.hexuid + '.hdf5'\n elif os.path.isdir(path):\n path += os.sep + self.hexuid + '.hdf5'\n elif not (path.endswith('.hdf5') or path.endswith('.hdf')):\n raise ValueError('File path must have a \".hdf5\" or \".hdf\" extension.')\n with pd.HDFStore(path, 'w', complevel=complevel, complib=complib) as store:\n store['kwargs'] = pd.Series()\n store.get_storer('kwargs').attrs.metadata = self._rel()\n fc = 0 # Field counter (see special handling of fields below)\n for name, data in self._data().items():\n if hasattr(data, '_revert_categories'):\n data._revert_categories()\n name = name[1:] if name.startswith('_') else name\n if isinstance(data, Field): # Fields are handled separately\n fname = 'FIELD{}_'.format(fc) + name + '/'\n store[fname + 'data'] = pd.DataFrame(data)\n for i, field in enumerate(data.field_values):\n ffname = fname + 'values' + str(i)\n if isinstance(field, pd.Series):\n store[ffname] = pd.Series(field)\n else:\n store[ffname] = pd.DataFrame(field)\n fc += 1\n elif isinstance(data, Series):\n s = pd.Series(data)\n if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):\n s = s.astype('O')\n store[name] = s\n elif isinstance(data, DataFrame):\n store[name] = pd.DataFrame(data)\n elif isinstance(data, SparseSeries):\n s = pd.SparseSeries(data)\n if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):\n s = s.astype('O')\n store[name] = s\n elif isinstance(data, SparseDataFrame):\n store[name] = pd.SparseDataFrame(data)\n else:\n if hasattr(data, 'dtype') and isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):\n data = data.astype('O')\n else:\n for col in data:\n if isinstance(data[col].dtype, pd.types.dtypes.CategoricalDtype):\n data[col] = data[col].astype('O')\n store[name] = data\n if hasattr(data, '_set_categories'):\n data._set_categories()", "docstring": "Save the container as an HDF5 archive.\n\nArgs:\n path (str): Path where to save the container", "source": "juraj_google_style"} -{"code": "def _compile_output_step(outputs):\n\n if not outputs:\n raise GraphQLCompilationError(u'No fields were selected for output! Please mark at least '\n u'one field with the @output directive.')\n\n output_fields = {}\n for output_name, output_context in six.iteritems(outputs):\n location = output_context['location']\n optional = output_context['optional']\n graphql_type = output_context['type']\n\n expression = None\n existence_check = None\n # pylint: disable=redefined-variable-type\n if isinstance(location, FoldScopeLocation):\n if optional:\n raise AssertionError(u'Unreachable state reached, optional in fold: '\n u'{}'.format(output_context))\n\n if location.field == COUNT_META_FIELD_NAME:\n expression = expressions.FoldCountContextField(location)\n else:\n expression = expressions.FoldedContextField(location, graphql_type)\n else:\n expression = expressions.OutputContextField(location, graphql_type)\n\n if optional:\n existence_check = expressions.ContextFieldExistence(location.at_vertex())\n\n if existence_check:\n expression = expressions.TernaryConditional(\n existence_check, expression, expressions.NullLiteral)\n # pylint: enable=redefined-variable-type\n\n output_fields[output_name] = expression\n\n return blocks.ConstructResult(output_fields)", "docstring": "Construct the final ConstructResult basic block that defines the output format of the query.\n\nArgs:\n outputs: dict, output name (string) -> output data dict, specifying the location\n from where to get the data, and whether the data is optional (and therefore\n may be missing); missing optional data is replaced with 'null'\n\nReturns:\n a ConstructResult basic block that constructs appropriate outputs for the query", "source": "juraj_google_style"} -{"code": "def process_files(self, path, recursive=False):\n\n self._logger.info('Processing files in \"%s\"', path)\n\n for (path, file) in files_generator(path, recursive):\n if not file.endswith(BATCH_EXTENSION):\n self.process_file(os.path.join(path, file))", "docstring": "Apply normalizations over all files in the given directory.\n\n Iterate over all files in a given directory. Normalizations\n will be applied to each file, storing the result in a new file.\n The extension for the new file will be the one defined in\n BATCH_EXTENSION.\n\nArgs:\n path: Path to the directory.\n recursive: Whether to find files recursively or not.", "source": "juraj_google_style"} -{"code": "def get_config_file(basename):\n\n locations = [\n os.path.join(os.curdir, basename),\n os.path.join(\n os.path.expanduser(\"~\"),\n \".config\",\n \"scriptabit\",\n basename),\n resource_filename(\n Requirement.parse(\"scriptabit\"),\n os.path.join('scriptabit', basename))\n ]\n\n for location in locations:\n if os.path.isfile(location):\n return location", "docstring": "Looks for a configuration file in 3 locations:\n\n - the current directory\n - the user config directory (~/.config/scriptabit)\n - the version installed with the package (using setuptools resource API)\n\nArgs:\n basename (str): The base filename.\n\nReturns:\n str: The full path to the configuration file.", "source": "juraj_google_style"} -{"code": "def __init__(self, events_per_batch=25000):\n\n assert isinstance(events_per_batch, int), \\\n \"Events per batch must be integer.\"\n assert events_per_batch > 0, \"Events per batch must be positive\"\n self.events_per_batch = events_per_batch\n self.count = 0\n self.stores = []", "docstring": "Construct a persisted event store that is stored on disk.\n\n Parameters:\n events_per_batch -- number of events stored in a batch before rotating\n the files. Defaults to 25000. That number is\n arbitrary and should probably be configures so that\n files do not grow out of proportion.", "source": "juraj_google_style"} -{"code": "def permute(self, ordering: np.ndarray, *, axis: int) -> None:\n\n\t\tif axis == 0:\n\t\t\tself.values = self.values[ordering, :]\n\t\telif axis == 1:\n\t\t\tself.values = self.values[:, ordering]\n\t\telse:\n\t\t\traise ValueError(\"axis must be 0 or 1\")", "docstring": "Permute the layer along an axis\n\nArgs:\n axis: The axis to permute (0, permute the rows; 1, permute the columns)\n ordering: The permutation vector", "source": "juraj_google_style"} -{"code": "def parse(self, rrstr):\n # type: (bytes) -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('ER record already initialized!')\n\n (su_len, su_entry_version_unused, len_id, len_des, len_src,\n self.ext_ver) = struct.unpack_from('=BBBBBB', rrstr[:8], 2)\n\n # We assume that the caller has already checked the su_entry_version,\n # so we don't bother.\n\n # Ensure that the length isn't crazy\n if su_len > len(rrstr):\n raise pycdlibexception.PyCdlibInvalidISO('Length of ER record much too long')\n\n # Also ensure that the combination of len_id, len_des, and len_src\n # doesn't overrun su_len; because of the check above, this means it\n # can't overrun len(rrstr) either\n total_length = len_id + len_des + len_src\n if total_length > su_len:\n raise pycdlibexception.PyCdlibInvalidISO('Combined length of ER ID, des, and src longer than record')\n\n fmtstr = '=%ds%ds%ds' % (len_id, len_des, len_src)\n (self.ext_id, self.ext_des, self.ext_src) = struct.unpack_from(fmtstr, rrstr, 8)\n\n self._initialized = True", "docstring": "Parse a Rock Ridge Extensions Reference record out of a string.\n\n Parameters:\n rrstr - The string to parse the record out of.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def ensure_exe(exe_name: str, *paths: str): # pragma: no cover\n\n if not elib_run.find_executable(exe_name, *paths):\n LOGGER.error('could not find \"%s.exe\" on this system', exe_name)\n sys.exit(-1)", "docstring": "Makes sure that an executable can be found on the system path.\n Will exit the program if the executable cannot be found\n\nArgs:\n exe_name: name of the executable\n paths: optional path(s) to be searched; if not specified, search the whole system", "source": "juraj_google_style"} -{"code": "def cos(duration: int, amp: complex, freq: float = None,\n phase: float = 0, name: str = None) -> SamplePulse:\n\n if freq is None:\n freq = 1/duration\n\n return _sampled_cos_pulse(duration, amp, freq, phase=phase, name=name)", "docstring": "Generates cosine wave `SamplePulse`.\n\n Applies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\n duration: Duration of pulse. Must be greater than zero.\n amp: Pulse amplitude.\n freq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle.\n phase: Pulse phase.\n name: Name of pulse.", "source": "juraj_google_style"} -{"code": "def delete_all_pipelines(self):\n\n\t\tcode, data = self.get_pipeline()\n\t\tif code == requests.codes.ok:\n\t\t\tfor pl_data in data:\n\t\t\t\tc, d = self.delete_pipeline(pl_data['pipelineKey'])\n\t\t\t\tif c != requests.codes.ok:\n\t\t\t\t\tcode = c\n\t\t\t\t\tdata = d\n\t\treturn code, data", "docstring": "Deletes all pipelines\n\nArgs:\n returns\t\tOK for overall success or last error code, resp data.", "source": "juraj_google_style"} -{"code": "def deleted(self, deleted_since, filters=None, params=None):\n\n\n return self.tc_requests.deleted(\n self.api_type,\n self.api_sub_type,\n deleted_since,\n owner=self.owner,\n filters=filters,\n params=params,\n )", "docstring": "Gets the indicators deleted.\n\nArgs:\n params:\n filters:\n deleted_since: Date since its been deleted", "source": "juraj_google_style"} -{"code": "def FromString(cls, desc):\n\n if language.stream is None:\n language.get_language()\n\n parse_exp = Optional(time_interval('time') - Literal(':').suppress()) - language.stream('stream') - Literal('=').suppress() - number('value')\n\n try:\n data = parse_exp.parseString(desc)\n time = 0\n if 'time' in data:\n time = data['time'][0]\n\n return SimulationStimulus(time, data['stream'][0], data['value'])\n except (ParseException, ParseSyntaxException):\n raise ArgumentError(\"Could not parse stimulus descriptor\", descriptor=desc)", "docstring": "Create a new stimulus from a description string.\n\n The string must have the format:\n\n [time: ][system ]input X = Y\n where X and Y are integers. The time, if given must\n be a time_interval, which is an integer followed by a\n time unit such as second(s), minute(s), etc.\n\nArgs:\n desc (str): A string description of the stimulus.\n\nReturns:\n SimulationStimulus: The parsed stimulus object.", "source": "juraj_google_style"} -{"code": "def _GetBetweenQEqualsAndAmpersand(self, url):\n\n # Make sure we're analyzing the query part of the URL.\n _, _, url = url.partition('?')\n # Look for a key value pair named 'q'.\n _, _, url = url.partition('q=')\n if not url:\n return ''\n\n # Strip additional key value pairs.\n url, _, _ = url.partition('&')\n return url", "docstring": "Retrieves the substring between the substrings 'q=' and '&'.\n\nArgs:\n url (str): URL.\n\nReturns:\n str: search query, the value between 'q=' and '&' or None if no query\n was found.", "source": "juraj_google_style"} -{"code": "def set_row_gap(self, value):\n\n value = str(value) + 'px'\n value = value.replace('pxpx', 'px')\n self.style['grid-row-gap'] = value", "docstring": "Sets the gap value between rows\n\nArgs:\n value (int or str): gap value (i.e. 10 or \"10px\")", "source": "juraj_google_style"} -{"code": "def __init__(self, resolver_context):\n\n super(FakeFileSystem, self).__init__(resolver_context)\n self._paths = {}\n\n self.AddFileEntry(\n self.LOCATION_ROOT,\n file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY)", "docstring": "Initializes a file system.\n\nArgs:\n resolver_context (Context): a resolver context.", "source": "juraj_google_style"} -{"code": "def write(self, data):\n\n\n self._process.poll()\n if self._process.returncode is not None:\n raise EOFError('Process ended')\n self._process.stdin.write(data)", "docstring": "Write *n* bytes to the subprocess' input channel.\n\nArgs:\n data(bytes): The data to write.\n\nRaises:\n EOFError: If the process exited.", "source": "juraj_google_style"} -{"code": "def verified_excel_file(store, institute_list, temp_excel_dir):\n\n document_lines = []\n written_files = 0\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n LOG.info('Creating verified variant document..')\n\n for cust in institute_list:\n verif_vars = store.verified(institute_id=cust)\n LOG.info('Found {} verified variants for customer {}'.format(len(verif_vars), cust))\n\n if not verif_vars:\n continue\n unique_callers = set()\n for var_type, var_callers in CALLERS.items():\n for caller in var_callers:\n unique_callers.add(caller.get('id'))\n cust_verified = export_verified_variants(verif_vars, unique_callers)\n\n document_name = '.'.join([cust, '_verified_variants', today]) + '.xlsx'\n workbook = Workbook(os.path.join(temp_excel_dir,document_name))\n Report_Sheet = workbook.add_worksheet()\n\n # Write the column header\n row = 0\n for col,field in enumerate(VERIFIED_VARIANTS_HEADER + list(unique_callers)):\n Report_Sheet.write(row,col,field)\n\n # Write variant lines, after header (start at line 1)\n for row, line in enumerate(cust_verified,1): # each line becomes a row in the document\n for col, field in enumerate(line): # each field in line becomes a cell\n Report_Sheet.write(row,col,field)\n workbook.close()\n\n if os.path.exists(os.path.join(temp_excel_dir,document_name)):\n written_files += 1\n\n return written_files", "docstring": "Collect all verified variants in a list on institutes and save them to file\n\nArgs:\n store(adapter.MongoAdapter)\n institute_list(list): a list of institute ids\n temp_excel_dir(os.Path): folder where the temp excel files are written to\n\nReturns:\n written_files(int): the number of files written to temp_excel_dir", "source": "juraj_google_style"} -{"code": "def load_data(path):\n\n\n\n # check that path exists\n if not os.path.exists(path):\n print(path)\n raise AttributeError('Path given does not exist!')\n\n # windows can't deal with long filenames (>260 chars) so we have to use the prefix '\\\\\\\\?\\\\'\n # if len(path.split('\\\\\\\\?\\\\')) == 1:\n # path = '\\\\\\\\?\\\\' + os.path.abspath(path)\n\n\n # if raw_data folder exists, get a list of directories from within it; otherwise, get names of all .csv files in\n # current directory\n data = {}\n # if self.RAW_DATA_DIR in os.listdir(path): #8/26/16 AK: self not defined in static context\n # data_files = os.listdir(os.path.join(path, self.RAW_DATA_DIR + '/'))\n # path = os.path.join(path, self.RAW_DATA_DIR + '/')\n #\n # else:\n if 'raw_data' in os.listdir(path): #temporarily hardcoded\n data_files = os.listdir(os.path.join(path, 'raw_data' + '/'))\n path = os.path.join(path, 'raw_data' + '/')\n\n else:\n data_files = glob.glob(os.path.join(path, '*.csv'))\n\n # If no data files were found, raise error\n if not data_files:\n raise AttributeError('Could not find data files in {:s}'.format(path))\n\n # import data from each csv\n for data_file in data_files:\n # get data name, read the data from the csv, and save it to dictionary\n data_name = data_file.split('-')[-1][0:-4] # JG: why do we strip of the date?\n imported_data_df = pd.read_csv(os.path.join(path, data_file))\n\n # check if there are real headers, if the headers are digits than we ignore them because then they are just indecies\n # real headers are strings (however, the digits are also of type str! that why we use the isdigit method)\n column_headers = list(imported_data_df.columns.values)\n if sum([int(x.isdigit()) for x in column_headers]) != len(column_headers):\n data[data_name] = {h: imported_data_df[h].as_matrix() for h in column_headers}\n else:\n # note, np.squeeze removes extraneous length-1 dimensions from the returned 'matrix' from the dataframe\n data[data_name] = np.squeeze(imported_data_df.as_matrix())\n\n return data", "docstring": "loads the data that has been save with Script.save.\n\nArgs:\n path: path to folder saved by Script.save or raw_data folder within\n\nReturns:\n a dictionary with the data of form\n data = {param_1_name: param_1_data, ...}", "source": "juraj_google_style"} -{"code": "def __init__(self, encoder=None, encoder_config=None):\n\n if encoder and encoder_config:\n raise ValueError(\"If encoder is provided, encoder_config must be None.\")\n if encoder:\n encoder_config = text_lib.TextEncoderConfig(\n encoder_cls=type(encoder),\n vocab_size=encoder.vocab_size)\n elif encoder_config:\n encoder = encoder_config.encoder\n\n self._encoder = encoder\n self._encoder_config = encoder_config", "docstring": "Constructs a Text FeatureConnector.\n\nArgs:\n encoder: `tfds.features.text.TextEncoder`, an encoder that can convert\n text to integers. If None, the text will be utf-8 byte-encoded.\n encoder_config: `tfds.features.text.TextEncoderConfig`, needed if\n restoring from a file with `load_metadata`.", "source": "juraj_google_style"} -{"code": "def edges(self, nodes=None):\n\n # If a Node has been directly updated (__not__ recommended)\n # then the Graph will not know the added nodes and therefore will\n # miss half of their edges.\n edges = set()\n for node in (nodes or self.iterkeys()):\n ends = self[node].nodes()\n edges.update([(node, end) for end in ends])\n return tuple(edges)", "docstring": "Returns a ``tuple`` of all edges in the ``DictGraph`` an edge is a pair\n of **node objects**.\n\nArgs:\n - nodes(iterable) [default: ``None``] iterable of **node objects** if\n specified the edges will be limited to those outgoing from one of\n the specified nodes.", "source": "juraj_google_style"} -{"code": "def default_memcache_timeout_policy(key):\n\n timeout = None\n if key is not None and isinstance(key, model.Key):\n modelclass = model.Model._kind_map.get(key.kind())\n if modelclass is not None:\n policy = getattr(modelclass, '_memcache_timeout', None)\n if policy is not None:\n if isinstance(policy, (int, long)):\n timeout = policy\n else:\n timeout = policy(key)\n return timeout", "docstring": "Default memcache timeout policy.\n\n This defers to _memcache_timeout on the Model class.\n\nArgs:\n key: Key instance.\n\nReturns:\n Memcache timeout to use (integer), or None.", "source": "juraj_google_style"} -{"code": "def _parse_line(cls, line):\n\n try:\n pkg, rest = line.split(None, 1)\n except ValueError:\n rpm = cls._parse_package(line.strip())\n return rpm\n rpm = cls._parse_package(pkg)\n rest = rest.split('\\t')\n for i, value in enumerate(rest):\n rpm[cls.SOSREPORT_KEYS[i]] = value\n return rpm", "docstring": "Helper method for parsing package line with or without SOS report information.\n\nArgs:\n line (str): package line with or without SOS report information\n\nReturns:\n dict: dictionary containing 'name', 'version', 'release' and 'arch' keys plus\n additionally 'installtime', 'buildtime', 'vendor', 'buildserver', 'pgpsig',\n 'pgpsig_short' if these are present.", "source": "juraj_google_style"} -{"code": "def assert_no_text(self, *args, **kwargs):\n\n\n query = TextQuery(*args, **kwargs)\n\n @self.synchronize(wait=query.wait)\n def assert_no_text():\n count = query.resolve_for(self)\n\n if matches_count(count, query.options) and (\n count > 0 or expects_none(query.options)):\n raise ExpectationNotMet(query.negative_failure_message)\n\n return True\n\n return assert_no_text()", "docstring": "Asserts that the page or current node doesn't have the given text content, ignoring any\n HTML tags.\n\nArgs:\n *args: Variable length argument list for :class:`TextQuery`.\n **kwargs: Arbitrary keyword arguments for :class:`TextQuery`.\n\nReturns:\n True\n\nRaises:\n ExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "juraj_google_style"} -{"code": "def __init__(\n self, script_type, default_shell=None, run_dir=None, debug=False):\n\n self.script_type = script_type\n self.default_shell = default_shell\n name = '%s-script' % self.script_type\n facility = logging.handlers.SysLogHandler.LOG_DAEMON\n self.logger = logger.Logger(name=name, debug=debug, facility=facility)\n self.retriever = script_retriever.ScriptRetriever(self.logger, script_type)\n self.executor = script_executor.ScriptExecutor(\n self.logger, script_type, default_shell=default_shell)\n self._RunScripts(run_dir=run_dir)", "docstring": "Constructor.\n\nArgs:\n script_type: string, the metadata script type to run.\n default_shell: string, the default shell to execute the script.\n run_dir: string, the base directory location of the temporary directory.\n debug: bool, True if debug output should write to the console.", "source": "juraj_google_style"} -{"code": "def create_document(self, doc: Dict, mime_type: str = None, url: str = \"http://ex.com/123\",\n doc_id=None, type_=None) -> Document:\n\n return Document(self, doc, mime_type, url, doc_id=doc_id).with_type(type_)", "docstring": "Factory method to wrap input JSON docs in an ETK Document object.\n\nArgs:\n doc (object): a JSON object containing a document in CDR format.\n mime_type (str): if doc is a string, the mime_type tells what it is\n url (str): if the doc came from the web, specifies the URL for it\n doc_id\n type_\n\n Returns: wrapped Document", "source": "juraj_google_style"} -{"code": "def rhombohedral(a: float, alpha: float):\n\n return Lattice.from_parameters(a, a, a, alpha, alpha, alpha)", "docstring": "Convenience constructor for a rhombohedral lattice.\n\nArgs:\n a (float): *a* lattice parameter of the rhombohedral cell.\n alpha (float): Angle for the rhombohedral lattice in degrees.\n\nReturns:\n Rhombohedral lattice of dimensions a x a x a.", "source": "juraj_google_style"} -{"code": "def export_model(model, model_type, export_dir, model_column_fn):\n\n wide_columns, deep_columns = model_column_fn()\n if model_type == 'wide':\n columns = wide_columns\n elif model_type == 'deep':\n columns = deep_columns\n else:\n columns = wide_columns + deep_columns\n feature_spec = tf.feature_column.make_parse_example_spec(columns)\n example_input_fn = (\n tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec))\n model.export_savedmodel(export_dir, example_input_fn,\n strip_default_attrs=True)", "docstring": "Export to SavedModel format.\n\nArgs:\n model: Estimator object\n model_type: string indicating model type. \"wide\", \"deep\" or \"wide_deep\"\n export_dir: directory to export the model.\n model_column_fn: Function to generate model feature columns.", "source": "juraj_google_style"} -{"code": "def extract(self, tokens: List[Token]) -> List[Extraction]:\n\n results = list()\n\n if len(tokens) > 0:\n if self._case_sensitive:\n new_tokens = [x.orth_ if isinstance(x, Token) else x for x in tokens]\n else:\n new_tokens = [x.lower_ if isinstance(x, Token) else x.lower() for x in tokens]\n else:\n return results\n\n try:\n ngrams_iter = self._generate_ngrams_with_context(new_tokens)\n results.extend(map(lambda term: self._wrap_value_with_context(tokens, term[1], term[2]),\n filter(lambda term: isinstance(term[0], str),\n map(lambda term: (self._glossary.get(term[0]), term[1], term[2]),\n map(lambda term: (\n self._combine_ngrams(term[0], self._joiner), term[1], term[2]),\n ngrams_iter)))))\n except Exception as e:\n raise ExtractorError('GlossaryExtractor: Failed to extract with ' + self.name + '. Catch ' + str(e) + '. ')\n return results", "docstring": "Extracts information from a string(TEXT) with the GlossaryExtractor instance\n\nArgs:\n token (List[Token]): list of spaCy token to be processed.\n\nReturns:\n List[Extraction]: the list of extraction or the empty list if there are no matches.", "source": "juraj_google_style"} -{"code": "def _validate_isvalid_composition(self, isvalid_composition, field, value):\n\n sum_amount = 0.0\n if value['kind'] in ['mass fraction', 'mole fraction']:\n low_lim = 0.0\n up_lim = 1.0\n total_amount = 1.0\n elif value['kind'] in ['mole percent']:\n low_lim = 0.0\n up_lim = 100.0\n total_amount = 100.0\n else:\n self._error(field, 'composition kind must be \"mole percent\", \"mass fraction\", or '\n '\"mole fraction\"')\n return False\n\n for sp in value['species']:\n amount = sp['amount'][0]\n sum_amount += amount\n\n # Check that amount within bounds, based on kind specified\n if amount < low_lim:\n self._error(field, 'Species ' + sp['species-name'] + ' ' +\n value['kind'] + ' must be greater than {:.1f}'.format(low_lim)\n )\n elif amount > up_lim:\n self._error(field, 'Species ' + sp['species-name'] + ' ' +\n value['kind'] + ' must be less than {:.1f}'.format(up_lim)\n )\n\n # Make sure mole/mass fraction sum to 1\n if not np.isclose(total_amount, sum_amount):\n self._error(field, 'Species ' + value['kind'] +\n 's do not sum to {:.1f}: '.format(total_amount) +\n '{:f}'.format(sum_amount)\n )", "docstring": "Checks for valid specification of composition.\n\nArgs:\n isvalid_composition (bool): flag from schema indicating\n composition to be checked.\n field (str): 'composition'\n value (dict): dictionary of composition\n\n The rule's arguments are validated against this schema:\n {'isvalid_composition': {'type': 'bool'}, 'field': {'type': 'str'},\n 'value': {'type': 'dict'}}", "source": "juraj_google_style"} -{"code": "def refresh_role(self, role, file_hierarchy):\n\n if role not in self.cache:\n self.cache[role] = {}\n was_change = self._refresh_hierarchy_recursive(self.cache[role], file_hierarchy)\n if was_change:\n cf = open(self.cache_file, 'w')\n yaml.dump(self.cache, cf, Dumper=Dumper)\n cf.close()", "docstring": "Checks and refreshes (if needed) all assistants with given role.\n\nArgs:\n role: role of assistants to refresh\n file_hierarchy: hierarchy as returned by devassistant.yaml_assistant_loader.\\\n YamlAssistantLoader.get_assistants_file_hierarchy", "source": "juraj_google_style"} -{"code": "def add_method(self, m, **kwargs):\n\n if isinstance(m, types.FunctionType):\n self['function', id(m)] = m\n else:\n f, obj = get_method_vars(m)\n wrkey = (f, id(obj))\n self[wrkey] = obj", "docstring": "Add an instance method or function\n\nArgs:\n m: The instance method or function to store", "source": "juraj_google_style"} -{"code": "def select_char_code_table(self, table):\n\n tables = {'standard': 0,\n 'eastern european': 1,\n 'western european': 2,\n 'spare': 3\n }\n if table in tables:\n self.send(chr(27)+'t'+chr(tables[table]))\n else:\n raise RuntimeError('Invalid char table.')", "docstring": "Select character code table, from tree built in ones.\n\nArgs:\n table: The desired character code table. Choose from 'standard', 'eastern european', 'western european', and 'spare'\n\nReturns:\n None\n\nRaises:\n RuntimeError: Invalid chartable.", "source": "juraj_google_style"} -{"code": "def setOption(self, key, value):\n\n self.send_setOption(key, value)\n self.recv_setOption()", "docstring": "Sets an option\n\n Parameters:\n - key\n - value", "source": "juraj_google_style"} -{"code": "def __init__(self, **cfg):\n\n self.node_names = cfg[\"nodes\"]\n self.field_names = cfg.get(\"fields\", {})", "docstring": "Initialize speaker instance, for a set of AST nodes.\n\nArgs:\n nodes: dictionary of node names, and their human friendly names.\n Each entry for a node may also be a dictionary containing\n name: human friendly name, fields: a dictionary to override\n the field names for that node.\n fields: dictionary of human friendly field names, used as a default\n for each node.", "source": "juraj_google_style"} -{"code": "def __batch_update(self, train_events, test_events, n_epoch):\n\n for epoch in range(n_epoch):\n # SGD requires us to shuffle events in each iteration\n # * if n_epoch == 1\n # => shuffle is not required because it is a deterministic training (i.e. matrix sketching)\n if n_epoch != 1:\n np.random.shuffle(train_events)\n\n # train\n for e in train_events:\n self.rec.update(e, batch_train=True)\n\n # test\n MPR = self.__batch_evaluate(test_events)\n if self.debug:\n logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR))", "docstring": "Batch update called by the fitting method.\n\nArgs:\n train_events (list of Event): Positive training events.\n test_events (list of Event): Test events.\n n_epoch (int): Number of epochs for the batch training.", "source": "juraj_google_style"} -{"code": "def set_start_location(self, new_location):\n # type: (int) -> None\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor not initialized')\n self.part_start_location = new_location", "docstring": "A method to set the location of the start of the partition.\n\n Parameters:\n new_location - The new extent the UDF partition should start at.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def custom_line_color_map(self, values):\n\n if not isinstance(values, list):\n raise TypeError(\"custom_line_color_map must be a list\")\n\n self.options[\"custom_line_color_map\"] = values", "docstring": "Set the custom line color map.\n\nArgs:\n values (list): list of colors.\n\nRaises:\n TypeError: Custom line color map must be a list.", "source": "juraj_google_style"} -{"code": "def populate_sites( self, number_of_atoms, selected_sites=None ):\n\n if number_of_atoms > self.number_of_sites:\n raise ValueError\n if selected_sites:\n atoms = [ atom.Atom( initial_site = site ) for site in random.sample( [ s for s in self.sites if s.label in selected_sites ], number_of_atoms ) ]\n else:\n atoms = [ atom.Atom( initial_site = site ) for site in random.sample( self.sites, number_of_atoms ) ]\n self.number_of_occupied_sites = number_of_atoms\n return atoms", "docstring": "Populate the lattice sites with a specific number of atoms.\n\nArgs:\n number_of_atoms (Int): The number of atoms to populate the lattice sites with.\n selected_sites (:obj:List, optional): List of site labels if only some sites are to be occupied. Defaults to None.\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def locked_put(self, credentials):\n\n entity = self._model.get_or_insert(self._key_name)\n setattr(entity, self._property_name, credentials)\n entity.put()\n if self._cache:\n self._cache.set(self._key_name, credentials.to_json())", "docstring": "Write a Credentials to the datastore.\n\nArgs:\n credentials: Credentials, the credentials to store.", "source": "juraj_google_style"} -{"code": "def en020(self, value=None):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `en020`'.format(value))\n\n self._en020 = value", "docstring": "Corresponds to IDD Field `en020`\n mean coincident dry-bulb temperature to\n Enthalpy corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\n value (float): value for IDD Field `en020`\n Unit: kJ/kg\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def eval_stats(values, mode):\n\n if mode == 'raw':\n return values.tolist()\n if mode == 'total':\n mode = 'sum'\n\n try:\n return getattr(np, mode)(values, axis=0)\n except ValueError:\n pass\n\n return None", "docstring": "Extract a summary statistic from an array of list of values\n\n Parameters:\n values: numpy array of values\n mode: summary stat to extract. One of ['min', 'max', 'median', 'mean', 'std', 'raw']\n\n Note: fails silently if values is empty, and None is returned", "source": "juraj_google_style"} -{"code": "def _get_bradcrack_data(bravais):\n r\n json_file = pkg_resources.resource_filename(__name__, 'bradcrack.json')\n with open(json_file, 'r') as f:\n bradcrack_data = load_json(f)\n return bradcrack_data[bravais]", "docstring": "r\"\"\"Read Bradley--Cracknell k-points path from data file\n\nArgs:\n bravais (str): Lattice code including orientation e.g. 'trig_p_c'\n\nReturns:\n dict: kpoint path and special point locations, formatted as e.g.::\n\n {'kpoints': {'\\Gamma': [0., 0., 0.], 'X': [0., 0.5, 0.], ...},\n 'path': [['\\Gamma', 'X', ..., 'P'], ['H', 'N', ...]]}", "source": "juraj_google_style"} -{"code": "def wait_for_contract(self, contract_address_hex, timeout=None):\n\n contract_address = decode_hex(contract_address_hex)\n start_time = time.time()\n result = self._raiden.chain.client.web3.eth.getCode(\n to_checksum_address(contract_address),\n )\n\n current_time = time.time()\n while not result:\n if timeout and start_time + timeout > current_time:\n return False\n\n result = self._raiden.chain.client.web3.eth.getCode(\n to_checksum_address(contract_address),\n )\n gevent.sleep(0.5)\n\n current_time = time.time()\n\n return len(result) > 0", "docstring": "Wait until a contract is mined\n\nArgs:\n contract_address_hex (string): hex encoded address of the contract\n timeout (int): time to wait for the contract to get mined\n\nReturns:\n True if the contract got mined, false otherwise", "source": "juraj_google_style"} -{"code": "def save_to_text_file(monsoon_data, file_path):\n\n if not monsoon_data:\n raise MonsoonError(\"Attempting to write empty Monsoon data to \"\n \"file, abort\")\n utils.create_dir(os.path.dirname(file_path))\n with io.open(file_path, 'w', encoding='utf-8') as f:\n for md in monsoon_data:\n f.write(str(md))\n f.write(MonsoonData.delimiter)", "docstring": "Save multiple MonsoonData objects to a text file.\n\nArgs:\n monsoon_data: A list of MonsoonData objects to write to a text\n file.\n file_path: The full path of the file to save to, including the file\n name.", "source": "juraj_google_style"} -{"code": "def file_crc32(filePath):\n\n crc = 0\n with open(filePath, 'rb') as f:\n for block in _file_iter(f, _BLOCK_SIZE):\n crc = binascii.crc32(block, crc) & 0xFFFFFFFF\n return crc", "docstring": "计算文件的crc32检验码:\n\nArgs:\n filePath: 待计算校验码的文件路径\n\nReturns:\n 文件内容的crc32校验码。", "source": "juraj_google_style"} -{"code": "def find_all(self, kw: YangIdentifier,\n pref: YangIdentifier = None) -> List[\"Statement\"]:\n\n return [c for c in self.substatements\n if c.keyword == kw and c.prefix == pref]", "docstring": "Return the list all substatements with the given keyword and prefix.\n\nArgs:\n kw: Statement keyword (local part for extensions).\n pref: Keyword prefix (``None`` for built-in statements).", "source": "juraj_google_style"} -{"code": "def __init__(self, optimizer, scope='meta-optimizer', summary_labels=(), **kwargs):\n\n self.optimizer = Optimizer.from_spec(spec=optimizer, kwargs=kwargs)\n\n super(MetaOptimizer, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new meta optimizer instance.\n\nArgs:\n optimizer: The optimizer which is modified by this meta optimizer.", "source": "juraj_google_style"} -{"code": "def archs(self, as_list=False):\n\n\n archs = self.arch_list().split('/')\n\n if as_list:\n return archs\n\n return set(archs)", "docstring": "Return all of the architectures for this target.\n\nArgs:\n as_list (bool): Return a list instead of the default set object.\n\nReturns:\n set or list: All of the architectures used in this TargetSettings object.", "source": "juraj_google_style"} -{"code": "def backup(filenames, prefix=\"error\"):\n\n num = max([0] + [int(f.split(\".\")[1])\n for f in glob(\"{}.*.tar.gz\".format(prefix))])\n filename = \"{}.{}.tar.gz\".format(prefix, num + 1)\n logging.info(\"Backing up run to {}.\".format(filename))\n with tarfile.open(filename, \"w:gz\") as tar:\n for fname in filenames:\n for f in glob(fname):\n tar.add(f)", "docstring": "Backup files to a tar.gz file. Used, for example, in backing up the\n files of an errored run before performing corrections.\n\nArgs:\n filenames ([str]): List of files to backup. Supports wildcards, e.g.,\n *.*.\n prefix (str): prefix to the files. Defaults to error, which means a\n series of error.1.tar.gz, error.2.tar.gz, ... will be generated.", "source": "juraj_google_style"} -{"code": "def col_isdtz(df,col_name = None):\n\n col_list = df.select_dtypes(include = 'datetimetz').columns\n if col_name is None:\n return col_list\n else:\n return col_name in col_list", "docstring": "Returns a list of columns that are of type 'datetimetz'. If col_name is specified, returns\n whether the column in the DataFrame is of type 'datetimetz' instead.\n Parameters:\n df - DataFrame\n DataFrame to check\n col_name - string, default None\n If specified, this function will True if df[col_name] is of type 'datetimetz'", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.ListInstances = channel.unary_unary(\n \"/google.cloud.redis.v1.CloudRedis/ListInstances\",\n request_serializer=google_dot_cloud_dot_redis__v1_dot_proto_dot_cloud__redis__pb2.ListInstancesRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_redis__v1_dot_proto_dot_cloud__redis__pb2.ListInstancesResponse.FromString,\n )\n self.GetInstance = channel.unary_unary(\n \"/google.cloud.redis.v1.CloudRedis/GetInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1_dot_proto_dot_cloud__redis__pb2.GetInstanceRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_redis__v1_dot_proto_dot_cloud__redis__pb2.Instance.FromString,\n )\n self.CreateInstance = channel.unary_unary(\n \"/google.cloud.redis.v1.CloudRedis/CreateInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1_dot_proto_dot_cloud__redis__pb2.CreateInstanceRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.UpdateInstance = channel.unary_unary(\n \"/google.cloud.redis.v1.CloudRedis/UpdateInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1_dot_proto_dot_cloud__redis__pb2.UpdateInstanceRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.ImportInstance = channel.unary_unary(\n \"/google.cloud.redis.v1.CloudRedis/ImportInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1_dot_proto_dot_cloud__redis__pb2.ImportInstanceRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.ExportInstance = channel.unary_unary(\n \"/google.cloud.redis.v1.CloudRedis/ExportInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1_dot_proto_dot_cloud__redis__pb2.ExportInstanceRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.FailoverInstance = channel.unary_unary(\n \"/google.cloud.redis.v1.CloudRedis/FailoverInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1_dot_proto_dot_cloud__redis__pb2.FailoverInstanceRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.DeleteInstance = channel.unary_unary(\n \"/google.cloud.redis.v1.CloudRedis/DeleteInstance\",\n request_serializer=google_dot_cloud_dot_redis__v1_dot_proto_dot_cloud__redis__pb2.DeleteInstanceRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def Parse(text):\n\n precondition.AssertType(text, Text)\n\n if compatibility.PY2:\n text = text.encode(\"utf-8\")\n\n return yaml.safe_load(text)", "docstring": "Parses a YAML source into a Python object.\n\nArgs:\n text: A YAML source to parse.\n\nReturns:\n A Python data structure corresponding to the YAML source.", "source": "juraj_google_style"} -{"code": "def set_status(self, name: str = None):\n\n game = None\n if name:\n game = {\n 'name': name\n }\n payload = {\n 'op': WebSocketEvent.STATUS_UPDATE.value,\n 'd': {\n 'game': game,\n 'status': 'online',\n 'afk': False,\n 'since': 0.0\n }\n }\n data = json.dumps(payload, indent=2)\n self.logger.debug(f'Sending status update payload: {data}')\n self._ws.send(data)", "docstring": "Updates the bot's status\n\n This is used to get the game that the bot is \"playing\" or to clear it.\n If you want to set a game, pass a name; if you want to clear it, either\n call this method without the optional ``name`` parameter or explicitly\n pass ``None``.\n\nArgs:\n name: the game's name, or None", "source": "juraj_google_style"} -{"code": "def get_nested_streams(dmap):\n\n return list({s for dmap in get_nested_dmaps(dmap) for s in dmap.streams})", "docstring": "Recurses supplied DynamicMap to find all streams\n\nArgs:\n dmap: DynamicMap to recurse to look for streams\n\nReturns:\n List of streams that were found", "source": "juraj_google_style"} -{"code": "def send(self, *args, **kwargs):\n\n return self.send_with_options(args=args, kwargs=kwargs)", "docstring": "Asynchronously send a message to this actor.\n\n Parameters:\n *args(tuple): Positional arguments to send to the actor.\n **kwargs(dict): Keyword arguments to send to the actor.\n\nReturns:\n Message: The enqueued message.", "source": "juraj_google_style"} -{"code": "def _serialize_container_factory(suffix, container_map):\n\n def serialize(ion_event):\n if not ion_event.ion_type.is_container:\n raise TypeError('Expected container type')\n return container_map[ion_event.ion_type]\n serialize.__name__ = '_serialize_container_' + suffix\n return serialize", "docstring": "Returns a function that serializes container start/end.\n\nArgs:\n suffix (str): The suffix to name the function with.\n container_map (Dictionary[core.IonType, bytes]): The\n\nReturns:\n function: The closure for serialization.", "source": "juraj_google_style"} -{"code": "def add_user(self, user_obj):\n\n LOG.info(\"Adding user %s to the database\", user_obj['email'])\n if not '_id' in user_obj:\n user_obj['_id'] = user_obj['email']\n\n try:\n self.user_collection.insert_one(user_obj)\n LOG.debug(\"User inserted\")\n except DuplicateKeyError as err:\n raise IntegrityError(\"User {} already exists in database\".format(user_obj['email']))\n\n return user_obj", "docstring": "Add a user object to the database\n\nArgs:\n user_obj(scout.models.User): A dictionary with user information\n\nReturns:\n user_info(dict): a copy of what was inserted", "source": "juraj_google_style"} -{"code": "def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:\n\n path = self._insert_vars(path, path_data or {})\n path = self.BASE_URL + path\n self._try_refresh_access_token()\n return self.session.post(path, json=post_data).json()", "docstring": "Modifies the ESI by an endpoint URL.\n\n This method is not marked \"private\" as it _can_ be used\n by consuming code, but it's probably easier to call the\n `get_op` method instead.\n\nArgs:\n path: raw ESI URL path\n path_data: data to format the path with (can be None)\n post_data: data to send to ESI\n\nReturns:\n ESI data", "source": "juraj_google_style"} -{"code": "def In(self, *values):\n\n self._awql = self._CreateMultipleValuesCondition(values, 'IN')\n return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"in\".\n\nArgs:\n *values: The values to be used in the WHERE condition.\n\nReturns:\n The query builder that this WHERE builder links to.", "source": "juraj_google_style"} -{"code": "def _get_module_commands(module):\n # type: (ast.Module) -> typing.Generator[_EntryPoint, None, None]\n\n cls = next((n for n in module.body\n if isinstance(n, ast.ClassDef) and n.name == 'Command'), None)\n if not cls:\n return\n methods = (n.name for n in cls.body if isinstance(n, ast.FunctionDef))\n if '__call__' not in methods:\n return\n docstring = ast.get_docstring(module)\n for commands, _ in usage.parse_commands(docstring):\n yield _EntryPoint(commands[0], next(iter(commands[1:]), None), None)", "docstring": "Yield all Command objects represented by the python module.\n\n Module commands consist of a docopt-style module docstring and a callable\n Command class.\n\nArgs:\n module: An ast.Module object used to retrieve docopt-style commands.\n\nYields:\n Command objects that represent entry points to append to setup.py.", "source": "juraj_google_style"} -{"code": "def _extractClipData(self, audioClipSpec, showLogs=False):\n\n command = [self._ffmpegPath]\n\n if not showLogs:\n command += ['-nostats', '-loglevel', '0']\n\n command += [\n '-i', self._audioFilePath,\n '-ss', '%.3f' % audioClipSpec.start,\n '-t', '%.3f' % audioClipSpec.duration(),\n '-c', 'copy',\n '-map', '0',\n '-acodec', 'libmp3lame',\n '-ab', '128k',\n '-f', 'mp3'\n ]\n\n # Add clip TEXT as metadata and set a few more to default\n metadata = { self._textMetadataName: audioClipSpec.text }\n\n for k, v in metadata.items():\n command.append('-metadata')\n command.append(\"{}='{}'\".format(k, v))\n\n command.append('pipe:1')\n\n return subprocess.check_output(command)", "docstring": "Extracts a single clip according to audioClipSpec.\n\nArgs:\n audioClipSpec (AudioClipSpec): Clip specification\n showLogs (bool): Show ffmpeg output", "source": "juraj_google_style"} -{"code": "def take_shas_of_all_files(G, settings):\n\n global ERROR_FN\n sprint = settings[\"sprint\"]\n error = settings[\"error\"]\n ERROR_FN = error\n sha_dict = {}\n all_files = []\n for target in G.nodes(data=True):\n sprint(\"About to take shas of files in target '{}'\".format(target[0]),\n level=\"verbose\")\n if 'dependencies' in target[1]:\n sprint(\"It has dependencies\", level=\"verbose\")\n deplist = []\n for dep in target[1]['dependencies']:\n glist = glob.glob(dep)\n if glist:\n for oneglob in glist:\n deplist.append(oneglob)\n else:\n deplist.append(dep)\n target[1]['dependencies'] = list(deplist)\n for dep in target[1]['dependencies']:\n sprint(\" - {}\".format(dep), level=\"verbose\")\n all_files.append(dep)\n if 'output' in target[1]:\n sprint(\"It has outputs\", level=\"verbose\")\n for out in acts.get_all_outputs(target[1]):\n sprint(\" - {}\".format(out), level=\"verbose\")\n all_files.append(out)\n if len(all_files):\n sha_dict['files'] = {}\n # check if files exist and de-dupe\n extant_files = []\n for item in all_files:\n if item not in extant_files and os.path.isfile(item):\n extant_files.append(item)\n pool = Pool()\n results = pool.map(get_sha, extant_files)\n pool.close()\n pool.join()\n for fn, sha in zip(extant_files, results):\n sha_dict['files'][fn] = {'sha': sha}\n return sha_dict\n sprint(\"No dependencies\", level=\"verbose\")", "docstring": "Takes sha1 hash of all dependencies and outputs of all targets\n\nArgs:\n The graph we are going to build\n The settings dictionary\n\nReturns:\n A dictionary where the keys are the filenames and the\n value is the sha1 hash", "source": "juraj_google_style"} -{"code": "def update_caseid(self, case_obj, family_id):\n\n new_case = deepcopy(case_obj)\n new_case['_id'] = family_id\n\n # update suspects and causatives\n for case_variants in ['suspects', 'causatives']:\n new_variantids = []\n for variant_id in case_obj.get(case_variants, []):\n case_variant = self.variant(variant_id)\n if not case_variant:\n continue\n new_variantid = get_variantid(case_variant, family_id)\n new_variantids.append(new_variantid)\n new_case[case_variants] = new_variantids\n\n # update ACMG\n for acmg_obj in self.acmg_collection.find({'case_id': case_obj['_id']}):\n LOG.info(\"update ACMG classification: %s\", acmg_obj['classification'])\n acmg_variant = self.variant(acmg_obj['variant_specific'])\n new_specific_id = get_variantid(acmg_variant, family_id)\n self.acmg_collection.find_one_and_update(\n {'_id': acmg_obj['_id']},\n {'$set': {'case_id': family_id, 'variant_specific': new_specific_id}},\n )\n\n # update events\n institute_obj = self.institute(case_obj['owner'])\n for event_obj in self.events(institute_obj, case=case_obj):\n LOG.info(\"update event: %s\", event_obj['verb'])\n self.event_collection.find_one_and_update(\n {'_id': event_obj['_id']},\n {'$set': {'case': family_id}},\n )\n\n # insert the updated case\n self.case_collection.insert_one(new_case)\n # delete the old case\n self.case_collection.find_one_and_delete({'_id': case_obj['_id']})\n return new_case", "docstring": "Update case id for a case across the database.\n\n This function is used when a case is a rerun or updated for another reason.\n\nArgs:\n case_obj(dict)\n family_id(str): The new family id\n\nReturns:\n new_case(dict): The updated case object", "source": "juraj_google_style"} -{"code": "def __init__(self, parser):\n\n self.parser = parser\n args = getattr(self, 'args', ())\n for arg in args:\n flags = arg[0]\n if not isinstance(flags, tuple):\n flags = (flags,)\n self.parser.add_argument(*flags, **arg[1])", "docstring": "Initialize the subcommand with its parser\n\nArgs:\n parser (Parser) : an Argparse ``Parser`` instance to configure\n with the args for this subcommand.\n\n This method will automatically add all the arguments described in\n ``self.args``. Subclasses can perform any additional customizations\n on ``self.parser``.", "source": "juraj_google_style"} -{"code": "def start_trial(self, trial, checkpoint=None):\n\n\n self._commit_resources(trial.resources)\n try:\n self._start_trial(trial, checkpoint)\n except Exception as e:\n logger.exception(\"Error starting runner for Trial %s\", str(trial))\n error_msg = traceback.format_exc()\n time.sleep(2)\n self._stop_trial(trial, error=True, error_msg=error_msg)\n if isinstance(e, AbortTrialExecution):\n return # don't retry fatal Tune errors\n try:\n # This forces the trial to not start from checkpoint.\n trial.clear_checkpoint()\n logger.info(\n \"Trying to start runner for Trial %s without checkpoint.\",\n str(trial))\n self._start_trial(trial)\n except Exception:\n logger.exception(\n \"Error starting runner for Trial %s, aborting!\",\n str(trial))\n error_msg = traceback.format_exc()\n self._stop_trial(trial, error=True, error_msg=error_msg)", "docstring": "Starts the trial.\n\n Will not return resources if trial repeatedly fails on start.\n\nArgs:\n trial (Trial): Trial to be started.\n checkpoint (Checkpoint): A Python object or path storing the state\n of trial.", "source": "juraj_google_style"} -{"code": "def from_json(data):\n\n memfiles = InMemoryFiles()\n memfiles.files = json.loads(data)\n return memfiles", "docstring": "Convert JSON into a in memory file storage.\n\nArgs:\n data (str): valid JSON with path and filenames and\n the base64 encoding of the file content.\n\nReturns:\n InMemoryFiles: in memory file storage", "source": "juraj_google_style"} -{"code": "def apply_filter(self, structure_filter):\n\n\n def test_transformed_structure(ts):\n return structure_filter.test(ts.final_structure)\n\n self.transformed_structures = list(filter(test_transformed_structure,\n self.transformed_structures))\n for ts in self.transformed_structures:\n ts.append_filter(structure_filter)", "docstring": "Applies a structure_filter to the list of TransformedStructures\n in the transmuter.\n\nArgs:\n structure_filter: StructureFilter to apply.", "source": "juraj_google_style"} -{"code": "def _GetOutputValues(self, event):\n\n if not hasattr(event, 'timestamp'):\n logger.error('Unable to output event without timestamp.')\n return None\n\n # TODO: add function to pass event_values to GetFormattedMessages.\n message, message_short = self._output_mediator.GetFormattedMessages(event)\n if message is None or message_short is None:\n data_type = getattr(event, 'data_type', 'UNKNOWN')\n raise errors.NoFormatterFound(\n 'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n # TODO: add function to pass event_values to GetFormattedSources.\n source_short, source = self._output_mediator.GetFormattedSources(event)\n if source is None or source_short is None:\n data_type = getattr(event, 'data_type', 'UNKNOWN')\n raise errors.NoFormatterFound(\n 'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n # TODO: preserve dfdatetime as an object.\n # TODO: add support for self._output_mediator.timezone\n date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n timestamp=event.timestamp)\n\n format_variables = self._output_mediator.GetFormatStringAttributeNames(\n event)\n if format_variables is None:\n data_type = getattr(event, 'data_type', 'UNKNOWN')\n raise errors.NoFormatterFound(\n 'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n extra_attributes = []\n for attribute_name, attribute_value in sorted(event.GetAttributes()):\n if (attribute_name in definitions.RESERVED_VARIABLE_NAMES or\n attribute_name in format_variables):\n continue\n\n # With ! in {1!s} we force a string conversion since some of\n # the extra attributes values can be integer, float point or\n # boolean values.\n extra_attributes.append(\n '{0:s}: {1!s}'.format(attribute_name, attribute_value))\n\n extra_attributes = '; '.join(extra_attributes)\n extra_attributes = extra_attributes.replace('\\n', '-').replace('\\r', '')\n\n inode = getattr(event, 'inode', None)\n if inode is None:\n if hasattr(event, 'pathspec') and hasattr(\n event.pathspec, 'image_inode'):\n inode = event.pathspec.image_inode\n if inode is None:\n inode = '-'\n\n hostname = self._FormatHostname(event)\n username = self._FormatUsername(event)\n\n notes = []\n note_string = getattr(event, 'notes', None)\n if note_string:\n notes.append(note_string)\n\n tag = getattr(event, 'tag', None)\n if tag:\n notes.extend(tag.labels)\n\n if not notes:\n notes.append('-')\n\n year, month, day_of_month = date_time.GetDate()\n hours, minutes, seconds = date_time.GetTimeOfDay()\n try:\n date_string = '{0:02d}/{1:02d}/{2:04d}'.format(month, day_of_month, year)\n time_string = '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)\n except (TypeError, ValueError):\n self._ReportEventError(event, (\n 'unable to copy timestamp: {0!s} to a human readable date and time. '\n 'Defaulting to: \"00/00/0000\" \"--:--:--\"').format(event.timestamp))\n\n date_string = '00/00/0000'\n time_string = '--:--:--'\n\n output_values = [\n date_string,\n time_string,\n '{0!s}'.format(self._output_mediator.timezone),\n '....',\n source_short,\n source,\n '-',\n username,\n hostname,\n message_short,\n message,\n '2',\n getattr(event, 'display_name', '-'),\n '{0!s}'.format(inode),\n ' '.join(notes),\n getattr(event, 'parser', '-'),\n extra_attributes]\n\n return output_values", "docstring": "Retrieves output values.\n\nArgs:\n event (EventObject): event.\n\nReturns:\n list[str]: output values or None if no timestamp was present in the event.\n\nRaises:\n NoFormatterFound: If no event formatter can be found to match the data\n type in the event.", "source": "juraj_google_style"} -{"code": "def _get_params(mcs, bases, namespace):\n\n params = [\n (name, namespace.pop(name))\n for name, attribute\n in list(namespace.items())\n if isinstance(attribute, BaseParam)\n ]\n\n for base in reversed(bases):\n if hasattr(base, mcs._params_storage_key):\n params = list(\n getattr(base, mcs._params_storage_key).items()\n ) + params\n\n return OrderedDict(params)", "docstring": "Create params dictionary to be used in resource class namespace.\n\n Pop all parameter objects from attributes dict (namespace)\n and store them under _params_storage_key atrribute.\n Also collect all params from base classes in order that ensures\n params can be overriden.\n\nArgs:\n bases: all base classes of created resource class\n namespace (dict): namespace as dictionary of attributes", "source": "juraj_google_style"} -{"code": "def db_dp020(self, value=None):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `db_dp020`'.format(value))\n\n self._db_dp020 = value", "docstring": "Corresponds to IDD Field `db_dp020`\n mean coincident dry-bulb temperature to\n Dew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\n value (float): value for IDD Field `db_dp020`\n Unit: C\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def _ParseTriggerEndTime(self, parser_mediator, trigger):\n\n time_elements_tuple = (\n trigger.end_date.year, trigger.end_date.month,\n trigger.end_date.day_of_month, 0, 0, 0)\n\n date_time = None\n if time_elements_tuple != (0, 0, 0, 0, 0, 0):\n try:\n date_time = dfdatetime_time_elements.TimeElements(\n time_elements_tuple=time_elements_tuple)\n date_time.is_local_time = True\n # TODO: add functionality to dfdatetime to control precision.\n date_time._precision = dfdatetime_definitions.PRECISION_1_DAY # pylint: disable=protected-access\n except ValueError:\n parser_mediator.ProduceExtractionWarning(\n 'invalid trigger end time: {0!s}'.format(time_elements_tuple))\n\n return date_time", "docstring": "Parses the end time from a trigger.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n trigger (job_trigger): a trigger.\n\nReturns:\n dfdatetime.DateTimeValues: last run date and time or None if not\n available.", "source": "juraj_google_style"} -{"code": "def set_day_time(self, hour):\n\n self._should_write_to_command_buffer = True\n command_to_send = DayTimeCommand(hour % 24)\n self._commands.add_command(command_to_send)", "docstring": "Queue up a change day time command. It will be applied when `tick` or `step` is called next.\n By the next tick, the lighting and the skysphere will be updated with the new hour. If there is no skysphere\n or directional light in the world, the command will not function properly but will not cause a crash.\n\nArgs:\n hour (int): The hour in military time, between 0 and 23 inclusive.", "source": "juraj_google_style"} -{"code": "def lines_from_file(path, as_interned=False, encoding=None):\n\n lines = None\n with io.open(path, encoding=encoding) as f:\n if as_interned:\n lines = [sys.intern(line) for line in f.read().splitlines()]\n else:\n lines = f.read().splitlines()\n return lines", "docstring": "Create a list of file lines from a given filepath.\n\nArgs:\n path (str): File path\n as_interned (bool): List of \"interned\" strings (default False)\n\nReturns:\n strings (list): File line list", "source": "juraj_google_style"} -{"code": "def add(self, command, *args):\n\n\n cmd = Command(command, args)\n self.commands.append(cmd)", "docstring": "Add a command to this command file.\n\nArgs:\n command (str): The command to add\n *args (str): The parameters to call the command with", "source": "juraj_google_style"} -{"code": "def plugin_privileges(self, name):\n\n params = {\n 'remote': name,\n }\n\n headers = {}\n registry, repo_name = auth.resolve_repository_name(name)\n header = auth.get_config_header(self, registry)\n if header:\n headers['X-Registry-Auth'] = header\n\n url = self._url('/plugins/privileges')\n return self._result(\n self._get(url, params=params, headers=headers), True\n )", "docstring": "Retrieve list of privileges to be granted to a plugin.\n\nArgs:\n name (string): Name of the remote plugin to examine. The\n ``:latest`` tag is optional, and is the default if omitted.\n\nReturns:\n A list of dictionaries representing the plugin's\n permissions", "source": "juraj_google_style"} -{"code": "def fit(\n self,\n df,\n similarity_type=\"jaccard\",\n time_decay_coefficient=30,\n time_now=None,\n timedecay_formula=False,\n threshold=1,\n ):\n\n\n # threshold - items below this number get set to zero in coocurrence counts\n assert threshold > 0\n\n df.createOrReplaceTempView(\"{prefix}df_train_input\".format(**self.header))\n\n if timedecay_formula:\n # WARNING: previously we would take the last value in training dataframe and set it\n # as a matrix U element\n # for each user-item pair. Now with time decay, we compute a sum over ratings given\n # by a user in the case\n # when T=np.inf, so user gets a cumulative sum of ratings for a particular item and\n # not the last rating.\n # Time Decay\n # do a group by on user item pairs and apply the formula for time decay there\n # Time T parameter is in days and input time is in seconds\n # so we do dt/60/(T*24*60)=dt/(T*24*3600)\n # the folling is the query which we want to run\n\n query = self.f(\n ,\n time_now=time_now,\n time_decay_coefficient=time_decay_coefficient,\n )\n\n # replace with timedecayed version\n df = self.spark.sql(query)\n else:\n # since SQL is case insensitive, this check needs to be performed similar\n if self.header['col_timestamp'].lower() in [s.name.lower() for s in df.schema]:\n # we need to de-duplicate items by using the latest item\n query = self.f(\n\n )\n\n df = self.spark.sql(query)\n\n df.createOrReplaceTempView(self.f(\"{prefix}df_train\"))\n\n log.info(\"sarplus.fit 1/2: compute item cooccurences...\")\n\n # compute cooccurrence above minimum threshold\n query = self.f(\n ,\n threshold=threshold,\n )\n\n item_cooccurrence = self.spark.sql(query)\n item_cooccurrence.write.mode(\"overwrite\").saveAsTable(\n self.f(\"{prefix}item_cooccurrence\")\n )\n\n # compute the diagonal used later for Jaccard and Lift\n if similarity_type == SIM_LIFT or similarity_type == SIM_JACCARD:\n item_marginal = self.spark.sql(\n self.f(\n \"SELECT i1 i, value AS margin FROM {prefix}item_cooccurrence WHERE i1 = i2\"\n )\n )\n item_marginal.createOrReplaceTempView(self.f(\"{prefix}item_marginal\"))\n\n if similarity_type == SIM_COOCCUR:\n self.item_similarity = item_cooccurrence\n elif similarity_type == SIM_JACCARD:\n query = self.f(\n\n )\n self.item_similarity = self.spark.sql(query)\n elif similarity_type == SIM_LIFT:\n query = self.f(\n\n )\n self.item_similarity = self.spark.sql(query)\n else:\n raise ValueError(\"Unknown similarity type: {0}\".format(similarity_type))\n\n\n # store upper triangular\n log.info(\"sarplus.fit 2/2: compute similiarity metric %s...\" % similarity_type)\n self.item_similarity.write.mode(\"overwrite\").saveAsTable(\n self.f(\"{prefix}item_similarity_upper\")\n )\n\n # expand upper triangular to full matrix\n\n query = self.f(\n\n )\n\n self.item_similarity = self.spark.sql(query)\n self.item_similarity.write.mode(\"overwrite\").saveAsTable(\n self.f(\"{prefix}item_similarity\")\n )\n\n # free space\n self.spark.sql(self.f(\"DROP TABLE {prefix}item_cooccurrence\"))\n self.spark.sql(self.f(\"DROP TABLE {prefix}item_similarity_upper\"))\n\n self.item_similarity = self.spark.table(self.f(\"{prefix}item_similarity\"))", "docstring": "Main fit method for SAR. Expects the dataframes to have row_id, col_id columns which are indexes,\n i.e. contain the sequential integer index of the original alphanumeric user and item IDs.\n Dataframe also contains rating and timestamp as floats; timestamp is in seconds since Epoch by default.\n\nArgs:\n df (pySpark.DataFrame): input dataframe which contains the index of users and items.", "source": "juraj_google_style"} -{"code": "def _parse_trunk_allowed_vlans(self, config):\n\n match = re.search(r'switchport trunk allowed vlan (.+)$', config, re.M)\n return dict(trunk_allowed_vlans=match.group(1))", "docstring": "Scans the specified config and parse the trunk allowed vlans value\n\nArgs:\n config (str): The interface configuration block to scan\n\nReturns:\n dict: A Python dict object with the value of switchport trunk\n allowed vlans value. The dict returned is intended to be\n merged into the resource dict", "source": "juraj_google_style"} -{"code": "def lresolve(self, path):\n\n path = make_string_path(path)\n if path == self.root.name:\n # The root directory will never be a link\n return self.root\n\n # remove trailing separator\n path = self._path_without_trailing_separators(path)\n path = self._original_path(path)\n\n parent_directory, child_name = self.splitpath(path)\n if not parent_directory:\n parent_directory = self.cwd\n try:\n parent_obj = self.resolve(parent_directory)\n assert parent_obj\n if not isinstance(parent_obj, FakeDirectory):\n if not self.is_windows_fs and isinstance(parent_obj, FakeFile):\n self.raise_io_error(errno.ENOTDIR, path)\n self.raise_io_error(errno.ENOENT, path)\n return parent_obj.get_entry(child_name)\n except KeyError:\n self.raise_io_error(errno.ENOENT, path)", "docstring": "Search for the specified object, resolving only parent links.\n\n This is analogous to the stat/lstat difference. This resolves links\n *to* the object but not of the final object itself.\n\nArgs:\n path: Specifies target FakeFile object to retrieve.\n\nReturns:\n The FakeFile object corresponding to path.\n\nRaises:\n IOError: if the object is not found.", "source": "juraj_google_style"} -{"code": "def fit(self, dpp_vector):\n\n\n # decompose X and generate the rankings of the elements in the\n # decomposed matrix\n dpp_vector_decomposed = self.mf_model.transform(dpp_vector)\n dpp_vector_ranked = stats.rankdata(\n dpp_vector_decomposed,\n method='dense',\n )\n\n max_agreement_index = None\n max_agreement = -1 # min value of Kendall Tau agremment\n for i in range(self.dpp_ranked.shape[0]):\n # calculate agreement between current row and X\n agreement, _ = stats.kendalltau(\n dpp_vector_ranked,\n self.dpp_ranked[i, :],\n )\n if agreement > max_agreement:\n max_agreement_index = i\n max_agreement = agreement\n\n if max_agreement_index is None:\n max_agreement_index = np.random.randint(self.dpp_matrix.shape[0])\n # store the row with the highest agreement for prediction\n self.matching_dataset = self.dpp_matrix[max_agreement_index, :]", "docstring": "Finds row of self.dpp_matrix most closely corresponds to X by means\n of Kendall tau distance.\n https://en.wikipedia.org/wiki/Kendall_tau_distance\n\nArgs:\n dpp_vector (np.array): Array with shape (n_components, )", "source": "juraj_google_style"} -{"code": "def download(path='.', url=None, unpack=False):\n\n\n if url is None:\n url = 'https://github.com/neurosynth/neurosynth-data/blob/master/current_data.tar.gz?raw=true'\n if os.path.exists(path) and os.path.isdir(path):\n basename = os.path.basename(url).split('?')[0]\n filename = os.path.join(path, basename)\n else:\n filename = path\n\n f = open(filename, 'wb')\n\n u = urlopen(url)\n file_size = int(u.headers[\"Content-Length\"][0])\n print(\"Downloading the latest Neurosynth files: {0} bytes: {1}\".format(\n url, file_size))\n\n bytes_dl = 0\n block_size = 8192\n while True:\n buffer = u.read(block_size)\n if not buffer:\n break\n bytes_dl += len(buffer)\n f.write(buffer)\n p = float(bytes_dl) / file_size\n status = r\"{0} [{1:.2%}]\".format(bytes_dl, p)\n status = status + chr(8) * (len(status) + 1)\n sys.stdout.write(status)\n\n f.close()\n\n if unpack:\n import tarfile\n tarfile.open(filename, 'r:gz').extractall(os.path.dirname(filename))", "docstring": "Download the latest data files.\n\nArgs:\n path (str): Location to save the retrieved data files. Defaults to\n current directory.\n unpack (bool): If True, unzips the data file post-download.", "source": "juraj_google_style"} -{"code": "def set_config(config):\n\n # Deep copy the default config into bigchaindb.config\n bigchaindb.config = copy.deepcopy(bigchaindb._config)\n # Update the default config with whatever is in the passed config\n update(bigchaindb.config, update_types(config, bigchaindb.config))\n bigchaindb.config['CONFIGURED'] = True", "docstring": "Set bigchaindb.config equal to the default config dict,\n then update that with whatever is in the provided config dict,\n and then set bigchaindb.config['CONFIGURED'] = True\n\nArgs:\n config (dict): the config dict to read for changes\n to the default config\n\nNote:\n Any previous changes made to ``bigchaindb.config`` will be lost.", "source": "juraj_google_style"} -{"code": "def network_connect(self, value):\n\n if value == self._defaults['networkConnect'] and 'networkConnect' in self._values:\n del self._values['networkConnect']\n else:\n self._values['networkConnect'] = value", "docstring": "The network_connect property.\n\nArgs:\n value (string). the property value.", "source": "juraj_google_style"} -{"code": "def get_next_iteration(self, iteration, iteration_kwargs={}):\n\n\n\t\tmin_budget = max( self.min_budget, self.config_generator.largest_budget_with_model())\n\t\tmax_budget = self.max_budget\n\t\teta = self.eta\n\n\t\t# precompute some HB stuff\n\t\tmax_SH_iter = -int(np.log(min_budget/max_budget)/np.log(eta)) + 1\n\t\tbudgets = max_budget * np.power(eta, -np.linspace(max_SH_iter-1, 0, max_SH_iter))\n\n\n\t\t# number of 'SH rungs'\n\t\ts = max_SH_iter - 1\n\t\t# number of configurations in that bracket\n\t\tn0 = int(np.floor((self.max_SH_iter)/(s+1)) * eta**s)\n\t\tns = np.array([max(int(n0*(eta**(-i))), 1) for i in range(s+1)])\n\n\t\twhile (ns * budgets[-s-1:]).sum() <= self.budget_per_iteration:\n\t\t n0 += 1\n\t\t ns = np.array([max(int(n0*(eta**(-i))), 1) for i in range(s+1)])\n\n\t\tn0 -= 1\n\t\tns = np.array([max(int(n0*(eta**(-i))), 1) for i in range(s+1)])\n\n\t\tassert (ns * budgets[-s-1:]).sum() <= self.budget_per_iteration, 'Sampled iteration exceeds the budget per iteration!'\n\n\t\treturn(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=budgets, config_sampler=self.config_generator.get_config, **iteration_kwargs))", "docstring": "BO-HB uses (just like Hyperband) SuccessiveHalving for each iteration.\n See Li et al. (2016) for reference.\n\n Parameters:\n -----------\n iteration: int\n the index of the iteration to be instantiated\n\nReturns:\n --------\n SuccessiveHalving: the SuccessiveHalving iteration with the\n corresponding number of configurations", "source": "juraj_google_style"} -{"code": "def GetMessages(self, formatter_mediator, event):\n\n if self.DATA_TYPE != event.data_type:\n raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n event.data_type))\n\n event_values = event.CopyToDict()\n\n page_transition_type = event_values.get('page_transition_type', None)\n if page_transition_type is not None:\n page_transition, page_transition_long = self._PAGE_TRANSITIONS.get(\n page_transition_type, self._UNKNOWN_PAGE_TRANSITION)\n\n if page_transition_long:\n event_values['page_transition'] = '{0:s} - {1:s}'.format(\n page_transition, page_transition_long)\n else:\n event_values['page_transition'] = page_transition\n\n visit_source = event_values.get('visit_source', None)\n if visit_source is not None:\n event_values['visit_source'] = self._VISIT_SOURCE.get(\n visit_source, 'UNKNOWN')\n\n extras = []\n\n url_hidden = event_values.get('url_hidden', False)\n if url_hidden:\n extras.append('(url hidden)')\n\n typed_count = event_values.get('typed_count', 0)\n if typed_count == 0:\n extras.append('(URL not typed directly - no typed count)')\n elif typed_count == 1:\n extras.append('(type count {0:d} time)'.format(typed_count))\n else:\n extras.append('(type count {0:d} times)'.format(typed_count))\n\n event_values['extra'] = ' '.join(extras)\n\n return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\n formatter_mediator (FormatterMediator): mediates the interactions between\n formatters and other components, such as storage and Windows EventLog\n resources.\n event (EventObject): event.\n\nReturns:\n tuple(str, str): formatted message string and short message string.\n\nRaises:\n WrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj_google_style"} -{"code": "def AddForwardedIp(self, address, interface):\n\n address = address if IP_ALIAS_REGEX.match(address) else '%s/32' % address\n args = ['add', 'to', 'local', address]\n options = self._CreateRouteOptions(dev=interface)\n self._RunIpRoute(args=args, options=options)", "docstring": "Configure a new IP address on the network interface.\n\nArgs:\n address: string, the IP address to configure.\n interface: string, the output device to use.", "source": "juraj_google_style"} -{"code": "def index_sample(self, md5, index_name):\n\n generator = self.stream_sample(md5)\n for row in generator:\n self.indexer.index_data(row, index_name)", "docstring": "Index a stored sample with the Indexer.\n\nArgs:\n md5: the md5 of the sample\n index_name: the name of the index\n\nReturns:\n Nothing", "source": "juraj_google_style"} -{"code": "def delete(self, entity):\n\n key = _normalize_key(entity)\n if key is None:\n return self.ndb_delete(entity)\n self.deletes.append(key)", "docstring": "Registers entity to delete from datastore.\n\nArgs:\n entity: an entity, model instance, or key to delete.", "source": "juraj_google_style"} -{"code": "def find_file_in_load_dirs(relpath):\n\n if relpath.startswith(os.path.sep):\n relpath = relpath.lstrip(os.path.sep)\n\n for ld in settings.DATA_DIRECTORIES:\n possible_path = os.path.join(ld, relpath)\n if os.path.exists(possible_path):\n return possible_path", "docstring": "If given relative path exists in one of DevAssistant load paths,\n return its full path.\n\nArgs:\n relpath: a relative path, e.g. \"assitants/crt/test.yaml\"\n\nReturns:\n absolute path of the file, e.g. \"/home/x/.devassistant/assistanta/crt/test.yaml\n or None if file is not found", "source": "juraj_google_style"} -{"code": "def yesterdayDF(symbol, token='', version=''):\n\n y = yesterday(symbol, token, version)\n if y:\n df = pd.io.json.json_normalize(y)\n _toDatetime(df)\n _reindex(df, 'symbol')\n else:\n df = pd.DataFrame()\n return df", "docstring": "This returns previous day adjusted price data for one or more stocks\n\n https://iexcloud.io/docs/api/#previous-day-prices\n Available after 4am ET Tue-Sat\n\nArgs:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\nReturns:\n DataFrame: result", "source": "juraj_google_style"} -{"code": "def CER(prediction, true_labels):\n\n\n errors = (prediction != true_labels).sum()\n return float(errors)/len(prediction)", "docstring": "Calculates the classification error rate for an N-class classification problem\n\n\n Parameters:\n\n prediction (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing your\n prediction\n\n true_labels (numpy.ndarray): A 1D :py:class:`numpy.ndarray`\n containing the ground truth labels for the input array, organized in the\n same order.", "source": "juraj_google_style"} -{"code": "def __deleteOutputCache(self, modelID):\n\n\n # If this is our output, we should close the connection\n if modelID == self._modelID and self._predictionLogger is not None:\n self._predictionLogger.close()\n del self.__predictionCache\n self._predictionLogger = None\n self.__predictionCache = None", "docstring": "Delete's the output cache associated with the given modelID. This actually\n clears up the resources associated with the cache, rather than deleting al\n the records in the cache\n\n Parameters:\n -----------------------------------------------------------------------\n modelID: The id of the model whose output cache is being deleted", "source": "juraj_google_style"} -{"code": "def get_tag(self, key, *, case_sensitive=True):\n\n key = key if case_sensitive else key.lower()\n for tag in self.resource.tags:\n if not case_sensitive:\n if tag.key.lower() == key:\n return tag\n elif key == tag.key:\n return tag\n\n return None", "docstring": "Return a tag by key, if found\n\nArgs:\n key (str): Name/key of the tag to locate\n case_sensitive (bool): Should tag keys be treated case-sensitive (default: true)\n\nReturns:\n `Tag`,`None`", "source": "juraj_google_style"} -{"code": "def AddMonths(start_date, months):\n\n current_date = start_date\n i = 0\n while i < months:\n month_days = calendar.monthrange(current_date.year, current_date.month)[1]\n current_date += timedelta(days=month_days)\n i += 1\n return current_date", "docstring": "A simple convenience utility for adding months to a given start date.\n\n This increments the months by adding the number of days in the current month\n to the current month, for each month.\n\nArgs:\n start_date: date The date months are being added to.\n months: int The number of months to add.\n\nReturns:\n A date equal to the start date incremented by the given number of months.", "source": "juraj_google_style"} -{"code": "def __init__(self, app, env, region, prop_path, artifact_path, artifact_version, primary_region='us-east-1'):\n\n self.app_name = app\n self.env = env\n self.region = region\n self.artifact_path = artifact_path\n self.version = artifact_version\n self.properties = get_properties(prop_path, env=self.env, region=self.region)\n self.s3props = self.properties['s3']\n generated = get_details(app=app, env=env, region=region)\n\n include_region = True\n if self.region == primary_region:\n include_region = False\n if self.s3props.get('shared_bucket_master'):\n self.bucket = generated.shared_s3_app_bucket(include_region=include_region)\n self.s3path = app\n elif self.s3props.get('shared_bucket_target'):\n shared_app = self.s3props['shared_bucket_target']\n newgenerated = get_details(app=shared_app, env=env, region=region)\n self.bucket = newgenerated.shared_s3_app_bucket(include_region=include_region)\n self.s3path = app\n else:\n self.bucket = generated.s3_app_bucket(include_region=include_region)\n self.s3path = self.s3props['path'].lstrip('/')\n\n self.s3_version_uri = ''\n self.s3_latest_uri = ''\n self.setup_pathing()", "docstring": "S3 deployment object.\n\nArgs:\n app (str): Application name\n env (str): Environment/Account\n region (str): AWS Region\n prop_path (str): Path of environment property file\n artifact_path (str): Path to tar.gz artifact\n primary_region (str): The primary region for the application.", "source": "juraj_google_style"} -{"code": "def _prefix_from_prefix_string(self, prefixlen_str):\n\n try:\n if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):\n raise ValueError\n prefixlen = int(prefixlen_str)\n if not (0 <= prefixlen <= self._max_prefixlen):\n raise ValueError\n except ValueError:\n raise NetmaskValueError('%s is not a valid prefix length' %\n prefixlen_str)\n return prefixlen", "docstring": "Turn a prefix length string into an integer.\n\nArgs:\n prefixlen_str: A decimal string containing the prefix length.\n\nReturns:\n The prefix length as an integer.\n\nRaises:\n NetmaskValueError: If the input is malformed or out of range.", "source": "juraj_google_style"} -{"code": "def recordsDF(token='', version=''):\n\n df = pd.DataFrame(records(token, version))\n _toDatetime(df)\n return df", "docstring": "https://iexcloud.io/docs/api/#stats-records\n\nArgs:\n token (string); Access token\n version (string); API version\n\nReturns:\n DataFrame: result", "source": "juraj_google_style"} -{"code": "def delete_records(server, token, data):\n\n method = 'DELETE'\n uri = 'https://' + server + '/zone'\n for i in data:\n connect.tonicdns_client(uri, method, token, i)", "docstring": "Delete records of specific domain.\n\nArgs:\n server: TonicDNS API server\n token: TonicDNS API authentication token\n data: Delete records\n\n ContentType: application/json\n x-authentication-token: token", "source": "juraj_google_style"} -{"code": "def to_concat_skip_model(self, start_id, end_id):\n\n self.operation_history.append((\"to_concat_skip_model\", start_id, end_id))\n filters_end = self.layer_list[end_id].output.shape[-1]\n filters_start = self.layer_list[start_id].output.shape[-1]\n start_node_id = self.layer_id_to_output_node_ids[start_id][0]\n\n pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0]\n end_node_id = self.layer_id_to_output_node_ids[end_id][0]\n\n skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id)\n\n concat_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id]))\n self._redirect_edge(pre_end_node_id, end_node_id, concat_input_node_id)\n\n concat_layer = StubConcatenate()\n concat_layer.input = [\n self.node_list[concat_input_node_id],\n self.node_list[skip_output_id],\n ]\n concat_output_node_id = self._add_node(Node(concat_layer.output_shape))\n self._add_edge(concat_layer, concat_input_node_id, concat_output_node_id)\n self._add_edge(concat_layer, skip_output_id, concat_output_node_id)\n concat_layer.output = self.node_list[concat_output_node_id]\n self.node_list[concat_output_node_id].shape = concat_layer.output_shape\n\n # Add the concatenate layer.\n new_conv_layer = get_conv_class(self.n_dim)(\n filters_start + filters_end, filters_end, 1\n )\n self._add_edge(new_conv_layer, concat_output_node_id, end_node_id)\n new_conv_layer.input = self.node_list[concat_output_node_id]\n new_conv_layer.output = self.node_list[end_node_id]\n self.node_list[end_node_id].shape = new_conv_layer.output_shape\n\n if self.weighted:\n filter_shape = (1,) * self.n_dim\n weights = np.zeros((filters_end, filters_end) + filter_shape)\n for i in range(filters_end):\n filter_weight = np.zeros((filters_end,) + filter_shape)\n center_index = (i,) + (0,) * self.n_dim\n filter_weight[center_index] = 1\n weights[i, ...] = filter_weight\n weights = np.concatenate(\n (weights, np.zeros((filters_end, filters_start) + filter_shape)), axis=1\n )\n bias = np.zeros(filters_end)\n new_conv_layer.set_weights(\n (add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1])))\n )", "docstring": "Add a weighted add concatenate connection from after start node to end node.\n\nArgs:\n start_id: The convolutional layer ID, after which to start the skip-connection.\n end_id: The convolutional layer ID, after which to end the skip-connection.", "source": "juraj_google_style"} -{"code": "def _create_image_url(self, file_path, type_, target_size):\n\n if self.image_config is None:\n logger.warning('no image configuration available')\n return\n return ''.join([\n self.image_config['secure_base_url'],\n self._image_size(self.image_config, type_, target_size),\n file_path,\n ])", "docstring": "The the closest available size for specified image type.\n\nArgs:\n file_path (:py:class:`str`): The image file path.\n type_ (:py:class:`str`): The type of image to create a URL\n for, (``'poster'`` or ``'profile'``).\n target_size (:py:class:`int`): The size of image to aim for (used\n as either width or height).", "source": "juraj_google_style"} -{"code": "def send_course_completion_statement(lrs_configuration, user, course_overview, course_grade):\n\n user_details = LearnerInfoSerializer(user)\n course_details = CourseInfoSerializer(course_overview)\n\n statement = LearnerCourseCompletionStatement(\n user,\n course_overview,\n user_details.data,\n course_details.data,\n course_grade,\n )\n EnterpriseXAPIClient(lrs_configuration).save_statement(statement)", "docstring": "Send xAPI statement for course completion.\n\nArgs:\n lrs_configuration (XAPILRSConfiguration): XAPILRSConfiguration instance where to send statements.\n user (User): Django User object.\n course_overview (CourseOverview): Course over view object containing course details.\n course_grade (CourseGrade): course grade object.", "source": "juraj_google_style"} -{"code": "def _QueryHashes(self, digests):\n\n url_parameters = {'apikey': self._api_key, 'resource': ', '.join(digests)}\n\n try:\n json_response = self.MakeRequestAndDecodeJSON(\n self._VIRUSTOTAL_API_REPORT_URL, 'GET', params=url_parameters)\n except errors.ConnectionError as exception:\n json_response = None\n logger.error('Unable to query VirusTotal with error: {0!s}.'.format(\n exception))\n\n return json_response", "docstring": "Queries VirusTotal for a specfic hashes.\n\nArgs:\n digests (list[str]): hashes to look up.\n\nReturns:\n dict[str, object]: JSON response or None on error.", "source": "juraj_google_style"} -{"code": "def repository_compare(self, from_, to, **kwargs):\n\n path = '/projects/%s/repository/compare' % self.get_id()\n query_data = {'from': from_, 'to': to}\n return self.manager.gitlab.http_get(path, query_data=query_data,\n **kwargs)", "docstring": "Return a diff between two branches/commits.\n\nArgs:\n from_(str): Source branch/SHA\n to(str): Destination branch/SHA\n **kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabGetError: If the server failed to perform the request\n\nReturns:\n str: The diff", "source": "juraj_google_style"} -{"code": "def get_box_reminders(self, box_key):\n\n\t\t#required sanity check\n\t\tif box_key:\n\t\t\treturn requests.codes.bad_request, None\n\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.boxes_suffix, \n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.reminders_suffix\n\t\t\t\t\t\t])\n\n\t\treturn self._req('get', uri)", "docstring": "Gets all reminders for a box\n\nArgs:\n reminder\t\tupdated reminder of StreakReminder type\n return\t\t\t(status code, reminder dict)", "source": "juraj_google_style"} -{"code": "def execute(self, sensor_graph, scope_stack):\n\n\n parent = scope_stack[-1]\n\n try:\n slot = parent.resolve_identifier('current_slot', SlotIdentifier)\n except UnresolvedIdentifierError:\n raise SensorGraphSemanticError(\"set config statement used outside of config block\")\n\n if self.explicit_type is None or not isinstance(self.identifier, int):\n raise SensorGraphSemanticError(\"Config variable type definitions are not yet supported\")\n\n if isinstance(self.value, (bytes, bytearray)) and not self.explicit_type == 'binary':\n raise SensorGraphSemanticError(\"You must pass the binary variable type when using encoded binary data\")\n\n if not isinstance(self.value, (bytes, bytearray)) and self.explicit_type == 'binary':\n raise SensorGraphSemanticError(\"You must pass an encoded binary value with binary type config variables\")\n\n sensor_graph.add_config(slot, self.identifier, self.explicit_type, self.value)", "docstring": "Execute this statement on the sensor_graph given the current scope tree.\n\n This adds a single config variable assignment to the current sensor graph\n\nArgs:\n sensor_graph (SensorGraph): The sensor graph that we are building or\n modifying\n scope_stack (list(Scope)): A stack of nested scopes that may influence\n how this statement allocates clocks or other stream resources.", "source": "juraj_google_style"} -{"code": "def decode_schedule(string):\n\n splits = string.split()\n steps = [int(x[1:]) for x in splits[1:] if x[0] == '@']\n pmfs = np.reshape(\n [float(x) for x in splits[1:] if x[0] != '@'], [len(steps), -1])\n return splits[0], tuplize(steps), tuplize(pmfs)", "docstring": "Decodes a string into a schedule tuple.\n\nArgs:\n string: The string encoding of a schedule tuple.\n\nReturns:\n A schedule tuple, see encode_schedule for details.", "source": "juraj_google_style"} -{"code": "def AddArguments(cls, argument_group):\n\n argument_group.add_argument(\n '--server', dest='server', type=str, action='store',\n default=cls._DEFAULT_SERVER, metavar='HOSTNAME',\n help='The hostname or server IP address of the server.')\n argument_group.add_argument(\n '--port', dest='port', type=int, action='store',\n default=cls._DEFAULT_PORT, metavar='PORT',\n help='The port number of the server.')", "docstring": "Adds command line arguments the helper supports to an argument group.\n\n This function takes an argument parser or an argument group object and adds\n to it all the command line arguments this helper supports.\n\nArgs:\n argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\n argparse group.", "source": "juraj_google_style"} -{"code": "def generate_full_symmops(symmops, tol):\n\n # Uses an algorithm described in:\n # Gregory Butler. Fundamental Algorithms for Permutation Groups.\n # Lecture Notes in Computer Science (Book 559). Springer, 1991. page 15\n UNIT = np.eye(4)\n generators = [op.affine_matrix for op in symmops\n if not np.allclose(op.affine_matrix, UNIT)]\n if not generators:\n # C1 symmetry breaks assumptions in the algorithm afterwards\n return symmops\n else:\n full = list(generators)\n\n for g in full:\n for s in generators:\n op = np.dot(g, s)\n d = np.abs(full - op) < tol\n if not np.any(np.all(np.all(d, axis=2), axis=1)):\n full.append(op)\n\n d = np.abs(full - UNIT) < tol\n if not np.any(np.all(np.all(d, axis=2), axis=1)):\n full.append(UNIT)\n return [SymmOp(op) for op in full]", "docstring": "Recursive algorithm to permute through all possible combinations of the\n initially supplied symmetry operations to arrive at a complete set of\n operations mapping a single atom to all other equivalent atoms in the\n point group. This assumes that the initial number already uniquely\n identifies all operations.\n\nArgs:\n symmops ([SymmOp]): Initial set of symmetry operations.\n\nReturns:\n Full set of symmetry operations.", "source": "juraj_google_style"} -{"code": "def vstack(tup):\n\n # Follow numpy.vstack behavior for 1D arrays:\n arrays = list(tup)\n for i in range(len(arrays)):\n if arrays[i].ndim is 1:\n arrays[i] = arrays[i][np.newaxis, :]\n return concatenate(tup, axis=0)", "docstring": "Stack arrays in sequence vertically (row wise),\n handling ``RemoteArray`` and ``DistArray`` without moving data.\n\nArgs:\n tup (sequence of array_like)\n\nReturns:\n res: `ndarray`, if inputs were all local\n `RemoteArray`, if inputs were all on the same remote engine\n `DistArray`, if inputs were already scattered on different engines", "source": "juraj_google_style"} -{"code": "def set_tick(self, index, interval):\n\n\n name = self.tick_name(index)\n if name is None:\n return pack_error(ControllerSubsystem.SENSOR_GRAPH, Error.INVALID_ARRAY_KEY)\n\n self.ticks[name] = interval\n return Error.NO_ERROR", "docstring": "Update the a tick's interval.\n\nArgs:\n index (int): The index of the tick that you want to fetch.\n interval (int): The number of seconds between ticks.\n Setting this to 0 will disable the tick.\n\nReturns:\n int: An error code.", "source": "juraj_google_style"} -{"code": "def add_perfdata(self, *args, **kwargs):\n\n self._perfdata.append(Perfdata(*args, **kwargs))", "docstring": "add a perfdata to the internal perfdata list\n\nArgs:\n the same arguments as for Perfdata()", "source": "juraj_google_style"} -{"code": "def load(self,cache_genotype=False,cache_phenotype=True):\n\n self.f = h5py.File(self.file_name,'r')\n self.pheno = self.f['phenotype']\n self.geno = self.f['genotype']\n #TODO: load all row and column headers for genotype and phenotype\n\n #parse out thse we alwasy need for convenience\n self.genoM = self.geno['matrix']\n\n self.phenoM = self.pheno['matrix']\n self.sample_ID = self.geno['row_header']['sample_ID'][:]\n self.genoChrom = self.geno['col_header']['chrom'][:]\n self.genoPos = self.geno['col_header']['pos'][:]\n if 'pos_cum' in list(self.geno['col_header'].keys()):\n self.genoPos_cum = self.geno['col_header']['pos_cum'][:]\n else:\n self.genoPos_cum = None\n self.phenotype_ID = self.pheno['col_header']['phenotype_ID'][:]\n\n\n #cache?\n if cache_genotype:\n self.genoM = self.genoM[:]\n if cache_phenotype:\n self.phenoM = self.phenoM[:]\n\n # Additional pheno col header\n headers = list(self.pheno['col_header'].keys())\n if 'gene_ID' in headers:\n self.eqtl = True\n self.geneID = self.pheno['col_header']['gene_ID'][:]\n self.gene_pos = SP.array([self.pheno['col_header']['gene_chrom'][:],self.pheno['col_header']['gene_start'][:],self.pheno['col_header']['gene_end']],dtype='int').T\n self.geneIDs= list(set(self.geneID))\n else:\n self.eqtl = False\n if 'environment' in headers:\n self.E = self.pheno['col_header/environment'][:]\n self.Es = list(set(self.E))\n else:\n self.E = None\n\n #dimensions\n self.N = self.genoM.shape[0]\n self.S = self.genoM.shape[1]\n self.P = self.phenoM.shape[1]\n assert (self.genoM.shape[0]==self.phenoM.shape[0]), 'dimension missmatch'", "docstring": "load data file\n\nArgs:\n cache_genotype: load genotypes fully into memory (default: False)\n cache_phenotype: load phentopyes fully intro memry (default: True)", "source": "juraj_google_style"} -{"code": "def proc_ovrds(**kwargs):\n\n return [\n (k, v) for k, v in kwargs.items()\n if k not in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()) + PRSV_COLS\n ]", "docstring": "Bloomberg overrides\n\nArgs:\n **kwargs: overrides\n\nReturns:\n list of tuples\n\nExample:\n >>> proc_ovrds(DVD_Start_Dt='20180101')\n [('DVD_Start_Dt', '20180101')]\n >>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True)\n [('DVD_Start_Dt', '20180101')]", "source": "juraj_google_style"} -{"code": "def execute_cql_query(self, query, compression):\n\n self._seqid += 1\n d = self._reqs[self._seqid] = defer.Deferred()\n self.send_execute_cql_query(query, compression)\n return d", "docstring": "Executes a CQL (Cassandra Query Language) statement and returns a\n CqlResult containing the results.\n\n Parameters:\n - query\n - compression", "source": "juraj_google_style"} -{"code": "def from_json(cls, data):\n\n # Check required and optional keys\n optional_keys = {'wind_direction': 0, 'rain': False, 'snow_on_ground': False}\n assert 'wind_speed' in data, 'Required key \"wind_speed\" is missing!'\n for key, val in optional_keys.items():\n if key not in data:\n data[key] = val\n\n return cls(data['wind_speed'], data['wind_direction'], data['rain'],\n data['snow_on_ground'])", "docstring": "Create a Wind Condition from a dictionary.\n\nArgs:\n data = {\n \"wind_speed\": float,\n \"wind_direction\": float,\n \"rain\": bool,\n \"snow_on_ground\": bool}", "source": "juraj_google_style"} -{"code": "def kmip_version(self, value):\n\n if isinstance(value, enums.KMIPVersion):\n self._kmip_version = value\n else:\n raise ValueError(\"KMIP version must be a KMIPVersion enumeration\")", "docstring": "Set the KMIP version for the client.\n\nArgs:\n value (KMIPVersion): A KMIPVersion enumeration\n\nReturns:\n None\n\nRaises:\n ValueError: if value is not a KMIPVersion enumeration\n\nExample:\n >>> client.kmip_version = enums.KMIPVersion.KMIP_1_1\n >>>", "source": "juraj_google_style"} -{"code": "def discretize(self, data):\n\n ret = data.copy()\n for feature in self.lambdas:\n if len(data.shape) == 1:\n ret[feature] = int(self.lambdas[feature](ret[feature]))\n else:\n ret[:, feature] = self.lambdas[feature](\n ret[:, feature]).astype(int)\n return ret", "docstring": "Discretizes the data.\n\nArgs:\n data: numpy 2d or 1d array\n\nReturns:\n numpy array of same dimension, discretized.", "source": "juraj_google_style"} -{"code": "def _create_w_objective(m, X, Z=None):\n\n genes, clusters = m.shape\n cells = X.shape[1]\n nonzeros = (X!=0)\n def objective(w):\n # convert w into a matrix first... because it's a vector for\n # optimization purposes\n w = w.reshape((m.shape[1], X.shape[1]))\n d = m.dot(w)+eps\n # derivative of objective wrt all elements of w\n # for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus \n # x_ij\n temp = X/d\n m_sum = m.T.dot(nonzeros)\n m2 = m.T.dot(temp)\n deriv = m_sum - m2\n return np.sum(nonzeros*(d - X*np.log(d)))/genes, deriv.flatten()/genes\n return objective", "docstring": "Creates an objective function and its derivative for W, given M and X (data)\n\nArgs:\n m (array): genes x clusters\n X (array): genes x cells\n Z (array): zero-inflation parameters - genes x 1", "source": "juraj_google_style"} -{"code": "def execute(api):\n\n try:\n return api.execute()\n except Exception as exception:\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n _print_error('%s: Exception %s: %s' % (now, type(exception).__name__,\n str(exception)))\n # Re-raise exception to be handled by retry logic\n raise exception", "docstring": "Executes operation.\n\nArgs:\n api: The base API object\n\nReturns:\n A response body object", "source": "juraj_google_style"} -{"code": "def execute_code(self, code, filename=None, isolate=False):\n\n def _apply():\n self.compile_code(code=code,\n filename=filename,\n exec_namespace=self.globals)\n\n # we want to execute the code using self.globals - if for no other\n # reason that self.formatter is pointing at self.globals, so if we\n # passed in a copy, we would also need to make self.formatter \"look\" at\n # the same copy - but we don't want to \"pollute\" our namespace, because\n # the same executor may be used to run multiple packages. Therefore,\n # we save a copy of self.globals before execution, and restore it after\n #\n if isolate:\n saved_globals = dict(self.globals)\n\n try:\n _apply()\n finally:\n self.globals.clear()\n self.globals.update(saved_globals)\n else:\n _apply()", "docstring": "Execute code within the execution context.\n\nArgs:\n code (str or SourceCode): Rex code to execute.\n filename (str): Filename to report if there are syntax errors.\n isolate (bool): If True, do not affect `self.globals` by executing\n this code.", "source": "juraj_google_style"} -{"code": "def GetPathSegmentAndSuffix(self, base_path, path):\n\n if path is None or base_path is None or not path.startswith(base_path):\n return None, None\n\n path_index = len(base_path)\n if base_path and not base_path.endswith(self.PATH_SEPARATOR):\n path_index += 1\n\n if path_index == len(path):\n return '', ''\n\n path_segment, _, suffix = path[path_index:].partition(self.PATH_SEPARATOR)\n return path_segment, suffix", "docstring": "Determines the path segment and suffix of the path.\n\n None is returned if the path does not start with the base path and\n an empty string if the path exactly matches the base path.\n\nArgs:\n base_path (str): base path.\n path (str): path.\n\nReturns:\n tuple[str, str]: path segment and suffix string.", "source": "juraj_google_style"} -{"code": "def Match(self, artifact=None, os_name=None, cpe=None, label=None):\n\n return [\n c for c in self.conditions if c.Match(artifact, os_name, cpe, label)\n ]", "docstring": "Test if host data should trigger a check.\n\nArgs:\n artifact: An artifact name.\n os_name: An OS string.\n cpe: A CPE string.\n label: A label string.\n\nReturns:\n A list of conditions that match.", "source": "juraj_google_style"} -{"code": "def HsvToRgb(h, s, v):\n\n if s==0: return (v, v, v) # achromatic (gray)\n\n h /= 60.0\n h = h % 6.0\n\n i = int(h)\n f = h - i\n if not(i&1): f = 1-f # if i is even\n\n m = v * (1.0 - s)\n n = v * (1.0 - (s * f))\n\n if i==0: return (v, n, m)\n if i==1: return (n, v, m)\n if i==2: return (m, v, n)\n if i==3: return (m, n, v)\n if i==4: return (n, m, v)\n return (v, m, n)", "docstring": "Convert the color from RGB coordinates to HSV.\n\n Parameters:\n :h:\n The Hus component value [0...1]\n :s:\n The Saturation component value [0...1]\n :v:\n The Value component [0...1]\n\nReturns:\n The color as an (r, g, b) tuple in the range:\n r[0...1],\n g[0...1],\n b[0...1]\n\n >>> Color.HslToRgb(30.0, 1.0, 0.5)\n (1.0, 0.5, 0.0)", "source": "juraj_google_style"} -{"code": "def learn(self, grad_arr):\n\n if grad_arr.ndim > 3:\n grad_arr = grad_arr.reshape((\n grad_arr.shape[0],\n grad_arr.shape[1],\n -1\n ))\n grad_arr = grad_arr[:, -1]\n elif grad_arr.ndim == 3:\n grad_arr = grad_arr[:, -1]\n\n delta_arr, _, grads_list = self.__lstm_model.hidden_back_propagate(grad_arr)\n grads_list.insert(0, None)\n grads_list.insert(0, None)\n\n self.__lstm_model.optimize(\n grads_list,\n self.__learning_rate,\n 1\n )\n\n return delta_arr", "docstring": "Update this Discriminator by ascending its stochastic gradient.\n\nArgs:\n grad_arr: `np.ndarray` of gradients.\n\nReturns:\n `np.ndarray` of delta or gradients.", "source": "juraj_google_style"} -{"code": "def index_filename_rel_other_index(self, other: str) -> str:\n\n return relpath(self.index_filename, start=dirname(other))", "docstring": "Returns the filename of this index, relative to the director of another\n index. (For inserting a reference to this index into ``other``.)\n\nArgs:\n other: the other index\n\nReturns:\n relative filename of our index", "source": "juraj_google_style"} -{"code": "def GetVolumeByIndex(self, volume_index):\n\n if not self._is_parsed:\n self._Parse()\n self._is_parsed = True\n\n if volume_index < 0 or volume_index >= len(self._volume_identifiers):\n return None\n\n volume_identifier = self._volume_identifiers[volume_index]\n return self._volumes[volume_identifier]", "docstring": "Retrieves a specific volume based on the index.\n\nArgs:\n volume_index (int): index of the volume.\n\nReturns:\n Volume: a volume or None if not available.", "source": "juraj_google_style"} -{"code": "def _delete(self, identifier=None):\n\n query = text()\n self.backend.library.database.connection.execute(query, identifier=identifier)", "docstring": "Deletes given identifier from index.\n\nArgs:\n identifier (str): identifier of the document to delete.", "source": "juraj_google_style"} -{"code": "def _prepare_babi_data(tmp_dir, data_dir):\n\n if not tf.gfile.Exists(data_dir):\n tf.gfile.MakeDirs(data_dir)\n\n file_path = os.path.join(tmp_dir, _TAR)\n headers = {\"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/63.0.3239.132 Safari/537.36\"}\n resp = requests.get(_URL, headers=headers)\n with open(file_path, \"wb\") as f:\n f.write(resp.content)\n\n tar = tarfile.open(file_path)\n tar.extractall(tmp_dir)\n tar.close()\n\n return tmp_dir", "docstring": "Downloads and extracts the dataset.\n\nArgs:\n tmp_dir: temp directory to download and extract the dataset\n data_dir: The base directory where data and vocab files are stored.\n\nReturns:\n tmp_dir: temp directory containing the raw data.", "source": "juraj_google_style"} -{"code": "def recordbatch(self, auth, resource, entries, defer=False):\n\n return self._call('recordbatch', auth, [resource, entries], defer)", "docstring": "Records a list of historical entries to the resource specified.\n\n Calls a function that bulids a request that writes a list of historical entries to the\n specified resource.\n\nArgs:\n auth: Takes the device cik\n resource: Takes the dataport alias or rid.\n entries: A list of entries to write to the resource.", "source": "juraj_google_style"} -{"code": "def _setup_selection_range(self, f_start=None, f_stop=None, t_start=None, t_stop=None, init=False):\n\n\n # This avoids resetting values\n if init is True:\n if t_start is None:\n t_start = self.t_begin\n if t_stop is None:\n t_stop = self.t_end\n if f_start is None:\n f_start = self.f_begin\n if f_stop is None:\n f_stop = self.f_end\n else:\n if f_start is None:\n f_start = self.f_start\n if f_stop is None:\n f_stop = self.f_stop\n if t_start is None:\n t_start = self.t_start\n if t_stop is None:\n t_stop = self.t_stop\n\n # By now, all values start/stop are populated.\n\n if t_stop >= 0 and t_start >= 0 and t_stop < t_start:\n t_stop, t_start = t_start,t_stop\n logger.warning('Given t_stop < t_start, assuming reversed values.')\n if f_stop and f_start and f_stop < f_start:\n f_stop, f_start = f_start,f_stop\n logger.warning('Given f_stop < f_start, assuming reversed values.')\n\n if t_start >= self.t_begin and t_start < self.t_end:\n self.t_start = int(t_start)\n else:\n if init is False or t_start != None:\n logger.warning('Setting t_start = %f, since t_start not given or not valid.'%self.t_begin)\n self.t_start = self.t_begin\n\n if t_stop <= self.t_end and t_stop > self.t_begin:\n self.t_stop = int(t_stop)\n else:\n if init is False or t_stop:\n logger.warning('Setting t_stop = %f, since t_stop not given or not valid.'%self.t_end)\n self.t_stop = self.t_end\n\n if f_start >= self.f_begin and f_start < self.f_end:\n self.f_start = f_start\n else:\n if init is False or f_start:\n logger.warning('Setting f_start = %f, since f_start not given or not valid.'%self.f_begin)\n self.f_start = self.f_begin\n\n if f_stop <= self.f_end and f_stop > self.f_begin:\n self.f_stop = f_stop\n else:\n if init is False or f_stop:\n logger.warning('Setting f_stop = %f, since f_stop not given or not valid.'%self.f_end)\n self.f_stop = self.f_end\n\n # Now we have setup bounds, we can calculate shape of selection\n self.selection_shape = self._calc_selection_shape()", "docstring": "Making sure the selection if time and frequency are within the file limits.\n\nArgs:\n init (bool): If call during __init__", "source": "juraj_google_style"} -{"code": "def dumps(o, encoder=None):\n\n\n retval = \"\"\n if encoder is None:\n encoder = TomlEncoder(o.__class__)\n addtoretval, sections = encoder.dump_sections(o, \"\")\n retval += addtoretval\n while sections:\n newsections = encoder.get_empty_table()\n for section in sections:\n addtoretval, addtosections = encoder.dump_sections(\n sections[section], section)\n\n if addtoretval or (not addtoretval and not addtosections):\n if retval and retval[-2:] != \"\\n\\n\":\n retval += \"\\n\"\n retval += \"[\" + section + \"]\\n\"\n if addtoretval:\n retval += addtoretval\n for s in addtosections:\n newsections[section + \".\" + s] = addtosections[s]\n sections = newsections\n return retval", "docstring": "Stringifies input dict as toml\n\nArgs:\n o: Object to dump into toml\n\n preserve: Boolean parameter. If true, preserve inline tables.\n\nReturns:\n String containing the toml corresponding to dict", "source": "juraj_google_style"} -{"code": "def _dirint_bins(ktp, alt, w, dktp):\n\n it = range(len(ktp))\n\n # Create kt_prime bins\n ktp_bin = [-1] * len(ktp)\n ktp_bin = [0 if ktp[i] >= 0 and ktp[i] < 0.24 else ktp_bin[i] for i in it]\n ktp_bin = [1 if ktp[i] >= 0.24 and ktp[i] < 0.4 else ktp_bin[i] for i in it]\n ktp_bin = [2 if ktp[i] >= 0.4 and ktp[i] < 0.56 else ktp_bin[i] for i in it]\n ktp_bin = [3 if ktp[i] >= 0.56 and ktp[i] < 0.7 else ktp_bin[i] for i in it]\n ktp_bin = [4 if ktp[i] >= 0.7 and ktp[i] < 0.8 else ktp_bin[i] for i in it]\n ktp_bin = [5 if ktp[i] >= 0.8 and ktp[i] <= 1 else ktp_bin[i] for i in it]\n\n # Create altitude angle bins\n alt_bin = [-1] * len(alt)\n alt_bin = [0 if alt[i] <= 90 and alt[i] > 65 else alt_bin[i] for i in it]\n alt_bin = [1 if alt[i] <= 65 and alt[i] > 50 else alt_bin[i] for i in it]\n alt_bin = [2 if alt[i] <= 50 and alt[i] > 35 else alt_bin[i] for i in it]\n alt_bin = [3 if alt[i] <= 35 and alt[i] > 20 else alt_bin[i] for i in it]\n alt_bin = [4 if alt[i] <= 20 and alt[i] > 10 else alt_bin[i] for i in it]\n alt_bin = [5 if alt[i] <= 10 else alt_bin[i] for i in it]\n\n # Create the bins for w based on dew point temperature\n w_bin = [-1] * len(w)\n w_bin = [0 if w[i] >= 0 and w[i] < 1 else w_bin[i] for i in it]\n w_bin = [1 if w[i] >= 1 and w[i] < 2 else w_bin[i] for i in it]\n w_bin = [2 if w[i] >= 2 and w[i] < 3 else w_bin[i] for i in it]\n w_bin = [3 if w[i] >= 3 else w_bin[i] for i in it]\n w_bin = [4 if w[i] == -1 else w_bin[i] for i in it]\n\n # Create delta_kt_prime binning.\n dktp_bin = [-1] * len(dktp)\n dktp_bin = [0 if dktp[i] >= 0 and dktp[i] < 0.015 else dktp_bin[i] for i in it]\n dktp_bin = [1 if dktp[i] >= 0.015 and dktp[i] < 0.035 else dktp_bin[i] for i in it]\n dktp_bin = [2 if dktp[i] >= 0.035 and dktp[i] < 0.07 else dktp_bin[i] for i in it]\n dktp_bin = [3 if dktp[i] >= 0.07 and dktp[i] < 0.15 else dktp_bin[i] for i in it]\n dktp_bin = [4 if dktp[i] >= 0.15 and dktp[i] < 0.3 else dktp_bin[i] for i in it]\n dktp_bin = [5 if dktp[i] >= 0.3 and dktp[i] <= 1 else dktp_bin[i] for i in it]\n dktp_bin = [6 if dktp[i] == -1 else dktp_bin[i] for i in it]\n\n return ktp_bin, alt_bin, w_bin, dktp_bin", "docstring": "Determine the bins for the DIRINT coefficients.\n\nArgs:\n ktp : Altitude-independent clearness index\n alt : Solar altitude angle\n w : precipitable water estimated from surface dew-point temperature\n dktp : stability index\n\nReturns:\n tuple of ktp_bin, alt_bin, w_bin, dktp_bin", "source": "juraj_google_style"} -{"code": "def _get_resource_hash(zone_name, record):\n\n record_data = defaultdict(int, record)\n if type(record_data['GeoLocation']) == dict:\n record_data['GeoLocation'] = \":\".join([\"{}={}\".format(k, v) for k, v in record_data['GeoLocation'].items()])\n\n args = [\n zone_name,\n record_data['Name'],\n record_data['Type'],\n record_data['Weight'],\n record_data['Region'],\n record_data['GeoLocation'],\n record_data['Failover'],\n record_data['HealthCheckId'],\n record_data['TrafficPolicyInstanceId']\n ]\n\n return get_resource_id('r53r', args)", "docstring": "Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique\n resource IDs\n\nArgs:\n zone_name (`str`): The name of the DNS Zone the record belongs to\n record (`dict`): A record dict to generate the hash from\n\nReturns:\n `str`", "source": "juraj_google_style"} -{"code": "def _OpenFile(self, path):\n\n if not self._registry_file_reader:\n return None\n\n return self._registry_file_reader.Open(\n path, ascii_codepage=self._ascii_codepage)", "docstring": "Opens a Windows Registry file.\n\nArgs:\n path (str): path of the Windows Registry file.\n\nReturns:\n WinRegistryFile: Windows Registry file or None if not available.", "source": "juraj_google_style"} -{"code": "def mcf(n,c):\n\n model = Model(\"mcf\")\n\n x,f = {},{}\n for i in range(1,n+1):\n for j in range(1,n+1):\n if i != j:\n x[i,j] = model.addVar(vtype=\"B\", name=\"x(%s,%s)\"%(i,j))\n if i != j and j != 1:\n for k in range(2,n+1):\n if i != k:\n f[i,j,k] = model.addVar(ub=1, vtype=\"C\", name=\"f(%s,%s,%s)\"%(i,j,k))\n\n for i in range(1,n+1):\n model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, \"Out(%s)\"%i)\n model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, \"In(%s)\"%i)\n\n for k in range(2,n+1):\n model.addCons(quicksum(f[1,i,k] for i in range(2,n+1) if (1,i,k) in f) == 1, \"FlowOut(%s)\"%k)\n model.addCons(quicksum(f[i,k,k] for i in range(1,n+1) if (i,k,k) in f) == 1, \"FlowIn(%s)\"%k)\n\n for i in range(2,n+1):\n if i != k:\n model.addCons(quicksum(f[j,i,k] for j in range(1,n+1) if (j,i,k) in f) == \\\n quicksum(f[i,j,k] for j in range(1,n+1) if (i,j,k) in f),\n \"FlowCons(%s,%s)\"%(i,k))\n\n for (i,j,k) in f:\n model.addCons(f[i,j,k] <= x[i,j], \"FlowUB(%s,%s,%s)\"%(i,j,k))\n\n model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), \"minimize\")\n\n model.data = x,f\n return model", "docstring": "mcf: multi-commodity flow formulation for the (asymmetric) traveling salesman problem\n Parameters:\n - n: number of nodes\n - c[i,j]: cost for traversing arc (i,j)\n Returns a model, ready to be solved.", "source": "juraj_google_style"} -{"code": "def StatResultFromStatEntry(\n stat_entry):\n\n values = []\n for attr in _STAT_ATTRS[:10]:\n values.append(stat_entry.Get(attr))\n return os.stat_result(values)", "docstring": "Returns a `os.stat_result` with most information from `StatEntry`.\n\n This is a lossy conversion, only the 10 first stat_result fields are\n populated, because the os.stat_result constructor is inflexible.\n\nArgs:\n stat_entry: An instance of rdf_client_fs.StatEntry.\n\nReturns:\n An instance of `os.stat_result` with basic fields populated.", "source": "juraj_google_style"} -{"code": "def to_dict(self, rw = False):\n\n\t\treturn {k:v for (k,v) in self.attributes.items() \n\t\t\t\tif (v is not None and (not rw or (k in self.rw_attr_keys)))}", "docstring": "Returns relevant attributes as a dict.\n\nArgs:\n rw \t\t\tif True only returns the read/write enabled object attributes", "source": "juraj_google_style"} -{"code": "def value_to_string(self, obj):\n\n value = self._get_val_from_obj(obj)\n return self.get_prep_value(value)", "docstring": "Convert the field value from the provided model to a string.\n\n Used during model serialization.\n\nArgs:\n obj: db.Model, model object\n\nReturns:\n string, the serialized field value", "source": "juraj_google_style"} -{"code": "def push_file(self, local_source, remote_dir):\n\n remote_dest = remote_dir + '/' + os.path.basename(local_source)\n\n try:\n self.makedirs(remote_dir, exist_ok=True)\n except IOError as e:\n logger.exception(\"Pushing {0} to {1} failed\".format(local_source, remote_dir))\n if e.errno == 2:\n raise BadScriptPath(e, self.hostname)\n elif e.errno == 13:\n raise BadPermsScriptPath(e, self.hostname)\n else:\n logger.exception(\"File push failed due to SFTP client failure\")\n raise FileCopyException(e, self.hostname)\n try:\n self.sftp_client.put(local_source, remote_dest, confirm=True)\n # Set perm because some systems require the script to be executable\n self.sftp_client.chmod(remote_dest, 0o777)\n except Exception as e:\n logger.exception(\"File push from local source {} to remote destination {} failed\".format(\n local_source, remote_dest))\n raise FileCopyException(e, self.hostname)\n\n return remote_dest", "docstring": "Transport a local file to a directory on a remote machine\n\nArgs:\n - local_source (string): Path\n - remote_dir (string): Remote path\n\nReturns:\n - str: Path to copied file on remote machine\n\nRaises:\n - BadScriptPath : if script path on the remote side is bad\n - BadPermsScriptPath : You do not have perms to make the channel script dir\n - FileCopyException : FileCopy failed.", "source": "juraj_google_style"} -{"code": "def record(self):\n # type: () -> bytes\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('TF record not yet initialized!')\n\n outlist = [b'TF', struct.pack('=BBB', RRTFRecord.length(self.time_flags), SU_ENTRY_VERSION, self.time_flags)]\n for fieldname in self.FIELDNAMES:\n field = getattr(self, fieldname)\n if field is not None:\n outlist.append(field.record())\n\n return b''.join(outlist)", "docstring": "Generate a string representing the Rock Ridge Time Stamp record.\n\n Parameters:\n None.\n\nReturns:\n String containing the Rock Ridge record.", "source": "juraj_google_style"} -{"code": "def accuracy_score(gold, pred, ignore_in_gold=[], ignore_in_pred=[]):\n\n gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)\n\n if len(gold) and len(pred):\n acc = np.sum(gold == pred) / len(gold)\n else:\n acc = 0\n\n return acc", "docstring": "Calculate (micro) accuracy.\n\nArgs:\n gold: A 1d array-like of gold labels\n pred: A 1d array-like of predicted labels (assuming abstain = 0)\n ignore_in_gold: A list of labels for which elements having that gold\n label will be ignored.\n ignore_in_pred: A list of labels for which elements having that pred\n label will be ignored.\n\nReturns:\n A float, the (micro) accuracy score", "source": "juraj_google_style"} -{"code": "def mahalanobis_distances(df, axis=0):\n\n df = df.transpose() if axis == 1 else df\n means = df.mean()\n try:\n inv_cov = np.linalg.inv(df.cov())\n except LinAlgError:\n return pd.Series([np.NAN] * len(df.index), df.index,\n name='Mahalanobis')\n dists = []\n for i, sample in df.iterrows():\n dists.append(mahalanobis(sample, means, inv_cov))\n\n return pd.Series(dists, df.index, name='Mahalanobis')", "docstring": "Returns a pandas Series with Mahalanobis distances for each sample on the\n axis.\n\n Note: does not work well when # of observations < # of dimensions\n Will either return NaN in answer\n or (in the extreme case) fail with a Singular Matrix LinAlgError\n\nArgs:\n df: pandas DataFrame with columns to run diagnostics on\n axis: 0 to find outlier rows, 1 to find outlier columns", "source": "juraj_google_style"} -{"code": "def to_file(self, fp, format_, fps=None, **kwargs):\n\n impl = get_format_class(format_)\n impl.to_file(self, fp, format_, fps=fps, **kwargs)", "docstring": "Write subtitle file to file object.\n\n See :meth:`SSAFile.save()` for full description.\n\nNote:\n This is a low-level method. Usually, one of :meth:`SSAFile.save()`\n or :meth:`SSAFile.to_string()` is preferable.\n\nArgs:\n fp (file object): A file object, ie. :class:`io.TextIOBase` instance.\n Note that the file must be opened in text mode (as opposed to binary).", "source": "juraj_google_style"} -{"code": "def qualNormGaussian(data, qualitative):\n\n genes, cells = data.shape\n clusters = qualitative.shape[1]\n output = np.zeros((genes, clusters))\n missing_indices = []\n qual_indices = []\n for i in range(genes):\n if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:\n missing_indices.append(i)\n continue\n qual_indices.append(i)\n threshold = (qualitative[i,:].max() - qualitative[i,:].min())/2.0\n kmeans = KMeans(n_clusters = 2).fit(data[i,:].reshape((1, cells)))\n assignments = kmeans.labels_\n means = kmeans.cluster_centers_\n high_mean = means.max()\n low_mean = means.min()\n for k in range(clusters):\n if qualitative[i,k]>threshold:\n output[i,k] = high_mean\n else:\n output[i,k] = low_mean\n if missing_indices:\n #generating centers for missing indices \n M_init = output[qual_indices, :]\n kmeans = KMeans(n_clusters = 2, init = M_init, max_iter = 1).fit(data[qual_indices, :])\n assignments = kmeans.labels_\n #assignments, means = poisson_cluster(data[qual_indices, :], clusters, output[qual_indices, :], max_iters=1)\n for ind in missing_indices:\n for k in range(clusters):\n output[ind, k] = np.mean(data[ind, assignments==k])\n # TODO: assign to closest\n return output", "docstring": "Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.\n\nArgs:\n data (array): 2d array of genes x cells\n qualitative (array): 2d array of numerical data - genes x clusters\n\nReturns:\n Array of starting positions for state estimation or\n clustering, with shape genes x clusters", "source": "juraj_google_style"} -{"code": "def set_target(self, target: EventDispatcherBase) -> None:\n\n if self._target is not None:\n raise PermissionError(\"The target property already has a valid value.\")\n\n if not isinstance(target, EventDispatcherBase):\n raise TypeError(\"Invalid target type: {}\".format(target))\n\n self._target = target", "docstring": "This method should be called by the event dispatcher that dispatches this event\n to set its target property.\n\nArgs:\n target (EventDispatcherBase): The event dispatcher that will dispatch this event.\n\nRaises:\n PermissionError: If the target property of the event has already been set.\n TypeError: If `target` is not an `EventDispatcherBase` instance.", "source": "juraj_google_style"} -{"code": "def remove_duplicate_sg(security_groups):\n\n for each_sg, duplicate_sg_name in SECURITYGROUP_REPLACEMENTS.items():\n if each_sg in security_groups and duplicate_sg_name in security_groups:\n LOG.info('Duplicate SG found. Removing %s in favor of %s.', duplicate_sg_name, each_sg)\n security_groups.remove(duplicate_sg_name)\n\n return security_groups", "docstring": "Removes duplicate Security Groups that share a same name alias\n\nArgs:\n security_groups (list): A list of security group id to compare against SECURITYGROUP_REPLACEMENTS\n\nReturns:\n security_groups (list): A list of security groups with duplicate aliases removed", "source": "juraj_google_style"} -{"code": "def _parse_batch_lastlog(last_log):\n\n regexp = re.compile('(-?[0-9]\\d*):\\W+(.*)')\n\n wrong_commands = list()\n for line in last_log:\n result = regexp.match(line)\n if result is not None:\n status_code = result.group(1)\n command = result.group(2)\n if int(status_code) < 0:\n wrong_commands.append((status_code, command))\n\n return wrong_commands", "docstring": "This static method will help reading the result of the commit, command by command.\n\nArgs:\n last_log(list): A list containing, line by line, the result of committing the changes.\n\nReturns:\n A list of tuples that went wrong. The tuple will contain (*status_code*, *command*)", "source": "juraj_google_style"} -{"code": "def run(self, timeout=-1):\n\n def target():\n self.process = subprocess.Popen(self.cmd,\n stdout=self.stdout_dest,\n stderr=self.stderr_dest,\n shell=self.shell)\n stdout, stderr = self.process.communicate()\n\n # Decode output if the user wants, and if there is any.\n if self.decode_out:\n if stdout:\n self.stdout = stdout.decode(\"utf-8\")\n if stderr:\n self.stderr = stderr.decode(\"utf-8\")\n\n thread = threading.Thread(target=target)\n thread.start()\n\n if timeout > 0:\n thread.join(timeout)\n if thread.is_alive():\n self.process.terminate()\n thread.join()\n raise SubprocessError((\"Reached timeout after {t} seconds\"\n .format(t=timeout)))\n else:\n thread.join()\n\n return self.process.returncode, self.stdout, self.stderr", "docstring": "Run the subprocess.\n\nArgs:\n timeout (optional) If a positive real value, then timout after\n the given number of seconds.\n\nRaises:\n SubprocessError If subprocess has not completed after \"timeout\"\n seconds.", "source": "juraj_google_style"} -{"code": "def search_users(self, user):\n\n user_url = \"%s/%s/%s\" % (self.url, \"user\", user)\n response = self.jss.get(user_url)\n return LDAPUsersResults(self.jss, response)", "docstring": "Search for LDAP users.\n\nArgs:\n user: User to search for. It is not entirely clear how the\n JSS determines the results- are regexes allowed, or\n globbing?\n\nReturns:\n LDAPUsersResult object.\n\nRaises:\n Will raise a JSSGetError if no results are found.", "source": "juraj_google_style"} -{"code": "def set_hash_value(self, key, field, value, pipeline=False):\n\n # FIXME(BMo): new name for this function -> save_dict_value ?\n if pipeline:\n self._pipeline.hset(key, field, str(value))\n else:\n self._db.hset(key, field, str(value))", "docstring": "Set the value of field in a hash stored at key.\n\nArgs:\n key (str): key (name) of the hash\n field (str): Field within the hash to set\n value: Value to set\n pipeline (bool): True, start a transaction block. Default false.", "source": "juraj_google_style"} -{"code": "def to_wea(self, file_path, hoys=None):\n\n hoys = hoys or xrange(len(self.direct_normal_radiation.datetimes))\n if not file_path.lower().endswith('.wea'):\n file_path += '.wea'\n\n originally_ip = False\n if self.is_ip is True:\n self.convert_to_si()\n originally_ip = True\n\n # write header\n lines = [self._get_wea_header()]\n # write values\n datetimes = self.direct_normal_radiation.datetimes\n for hoy in hoys:\n dir_rad = self.direct_normal_radiation[hoy]\n dif_rad = self.diffuse_horizontal_radiation[hoy]\n line = \"%d %d %.3f %d %d\\n\" \\\n % (datetimes[hoy].month,\n datetimes[hoy].day,\n datetimes[hoy].hour + 0.5,\n dir_rad, dif_rad)\n lines.append(line)\n\n file_data = ''.join(lines)\n write_to_file(file_path, file_data, True)\n\n if originally_ip is True:\n self.convert_to_ip()\n\n return file_path", "docstring": "Write an wea file from the epw file.\n\n WEA carries radiation values from epw. Gendaymtx uses these values to\n generate the sky. For an annual analysis it is identical to using epw2wea.\n\nArgs:\n file_path: Full file path for output file.\n hoys: List of hours of the year. Default is 0-8759.", "source": "juraj_google_style"} -{"code": "def _get_and_write_fp(self, iso_path, outfp, blocksize):\n # type: (bytes, BinaryIO, int) -> None\n\n try:\n return self._get_file_from_iso_fp(outfp, blocksize, None, None, iso_path)\n except pycdlibexception.PyCdlibException:\n pass\n\n try:\n return self._get_file_from_iso_fp(outfp, blocksize, iso_path, None, None)\n except pycdlibexception.PyCdlibException:\n pass\n\n self._get_file_from_iso_fp(outfp, blocksize, None, iso_path, None)", "docstring": "An internal method to fetch a single file from the ISO and write it out\n to the file object.\n\n Parameters:\n iso_path - The absolute path to the file to get data from.\n outfp - The file object to write data to.\n blocksize - The blocksize to use when copying data.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def _ParseMRUListValue(self, registry_key):\n\n mrulist_value = registry_key.GetValueByName('MRUList')\n\n # The key exists but does not contain a value named \"MRUList\".\n if not mrulist_value:\n return None\n\n mrulist_entries_map = self._GetDataTypeMap('mrulist_entries')\n\n context = dtfabric_data_maps.DataTypeMapContext(values={\n 'data_size': len(mrulist_value.data)})\n\n return self._ReadStructureFromByteStream(\n mrulist_value.data, 0, mrulist_entries_map, context=context)", "docstring": "Parses the MRUList value in a given Registry key.\n\nArgs:\n registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains\n the MRUList value.\n\nReturns:\n mrulist_entries: MRUList entries or None if not available.", "source": "juraj_google_style"} -{"code": "def get(self, position=0):\n\n n = len(self)\n if n == 1:\n return self[0]\n\n pos = position\n\n if self.length and self.autoscale:\n pos *= len(self)\n pos /= self.length\n\n pos *= self.scale\n pos += self.offset\n\n if not self.continuous:\n if not self.serpentine:\n return self[int(pos % n)]\n\n # We want a color sequence of length 2n-2\n # e.g. for n=5: a b c d | e d c b | a b c d ...\n m = (2 * n) - 2\n pos %= m\n if pos < n:\n return self[int(pos)]\n else:\n return self[int(m - pos)]\n\n if self.serpentine:\n pos %= (2 * n)\n if pos > n:\n pos = (2 * n) - pos\n else:\n pos %= n\n\n # p is a number in [0, n): scale it to be in [0, n-1)\n pos *= n - 1\n pos /= n\n\n index = int(pos)\n fade = pos - index\n if not fade:\n return self[index]\n\n r1, g1, b1 = self[index]\n r2, g2, b2 = self[(index + 1) % len(self)]\n dr, dg, db = r2 - r1, g2 - g1, b2 - b1\n\n return r1 + fade * dr, g1 + fade * dg, b1 + fade * db", "docstring": "Return a color interpolated from the Palette.\n\n In the case where continuous=False, serpentine=False, scale=1,\n autoscale=False, and offset=0, this is exactly the same as plain old []\n indexing, but with a wrap-around.\n\n The constructor parameters affect this result as documented in the\n constructor.\n\nArgs:\n ``position``:\n May be any integer or floating point number", "source": "juraj_google_style"} -{"code": "def watchlist_movies(self, **kwargs):\n\n path = self._get_id_path('watchlist_movies')\n kwargs.update({'session_id': self.session_id})\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Get the list of movies on an account watchlist.\n\nArgs:\n page: (optional) Minimum 1, maximum 1000.\n sort_by: (optional) 'created_at.asc' | 'created_at.desc'\n language: (optional) ISO 639-1 code.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "source": "juraj_google_style"} -{"code": "def FileEntryExistsByPathSpec(self, path_spec):\n\n # All checks for correct path spec is done in SQLiteBlobFile.\n # Therefore, attempt to open the path specification and\n # check if errors occurred.\n try:\n file_object = resolver.Resolver.OpenFileObject(\n path_spec, resolver_context=self._resolver_context)\n except (IOError, ValueError, errors.AccessError, errors.PathSpecError):\n return False\n\n file_object.close()\n return True", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\n path_spec (PathSpec): path specification.\n\nReturns:\n bool: True if the file entry exists.", "source": "juraj_google_style"} -{"code": "def make_noise_surface(dims=DEFAULT_DIMS, blur=10, seed=None):\n\n if seed is not None:\n np.random.seed(seed)\n\n return gaussian_filter(np.random.normal(size=dims), blur)", "docstring": "Makes a surface by generating random noise and blurring it.\n\nArgs:\n dims (pair): the dimensions of the surface to create\n blur (float): the amount of Gaussian blur to apply\n seed (int): a random seed to use (optional)\n\nReturns:\n surface: A surface.", "source": "juraj_google_style"} -{"code": "def learn_sfa(self, mma=None):\n\n logging.info('Initializing learning procedure.')\n if mma:\n self._init_table_from_dfa(mma)\n else:\n self._init_table()\n\n logging.info('Generating a closed and consistent observation table.')\n while True:\n\n closed = False\n # Make sure that the table is closed\n while not closed:\n logging.debug('Checking if table is closed.')\n closed, s = self.observation_table.is_closed()\n if not closed:\n logging.debug('Closing table.')\n self._ot_make_closed(s)\n else:\n logging.debug('Table closed.')\n\n # Create conjecture\n sfa = self.get_sfa_conjecture()\n\n logging.info('Generated conjecture machine with %d states.',\n len(list(sfa.states)))\n\n # _check correctness\n logging.debug('Running equivalence query.')\n found, counter_example = self._equivalence_query(sfa)\n\n # Are we done?\n if found:\n logging.info('No counterexample found. Hypothesis is correct!')\n break\n\n # Add the new experiments into the table to reiterate the\n # learning loop\n logging.info(\n 'Processing counterexample %s with length %d.',\n counter_example,\n len(counter_example))\n self._process_counter_example(sfa, counter_example)\n\n logging.info('Learning complete.')\n return '', sfa", "docstring": "Implements the high level loop of the algorithm for learning a\n Mealy machine.\n\nArgs:\n mma:\n\nReturns:\n MealyMachine: A model for the Mealy machine to be learned.", "source": "juraj_google_style"} -{"code": "def get_placeholders(arg, check_duplicates=False):\n\n placeholders = []\n last_match = None\n arg = normalize_placeholders(arg)\n for cur_match in re.finditer(r'\\s*{{|}}\\s*', arg):\n matched_text = cur_match.group().strip()\n if not last_match and matched_text == '{{':\n last_match = cur_match\n continue\n\n last_matched_text = '' if not last_match else last_match.group().strip()\n # Check if the positional argument is enclosed with {{ }} properly\n if (not last_matched_text and matched_text == '}}') or (last_matched_text == '{{' and matched_text != '}}'):\n raise CLIError(PLACEHOLDER_BRACKETS_ERROR.format(arg))\n elif last_matched_text == '{{' and matched_text == '}}':\n # Extract start and end index of the placeholder name\n start_index, end_index = last_match.span()[1], cur_match.span()[0]\n placeholders.append(arg[start_index: end_index].strip())\n last_match = None\n\n # last_match did not reset - that means brackets are not enclosed properly\n if last_match:\n raise CLIError(PLACEHOLDER_BRACKETS_ERROR.format(arg))\n\n # Make sure there is no duplicated placeholder names\n if check_duplicates and len(placeholders) != len(set(placeholders)):\n raise CLIError(DUPLICATED_PLACEHOLDER_ERROR.format(arg))\n\n return placeholders", "docstring": "Get all the placeholders' names in order.\n Use the regex below to locate all the opening ({{) and closing brackets (}}).\n After that, extract \"stuff\" inside the brackets.\n\nArgs:\n arg: The word which this function performs searching on.\n check_duplicates: True if we want to check for duplicated positional arguments.\n\nReturns:\n A list of positional arguments in order.", "source": "juraj_google_style"} -{"code": "def search(self,limit,start_date=None,end_date=None,clipper=None):\n\n\n search_string = self._query_builder(start_date,\n end_date,\n clipper\n )\n\n # Have to manually build the URI to bypass requests URI encoding\n # The api server doesn't accept encoded URIs\n #r = requests.get('%s?%s&&maxRecords=%s' % (self.api_url,\n # search_string,\n # limit))\n\n try:\n r = requests.get('%s?%s&&maxRecords=%s' % (self.api_url,\n search_string,\n limit)) \n r.raise_for_status()\n except requests.HTTPError, e:\n exit (\"site is not available\")\n\n r_dict = json.loads(r.text)\n\n result={}\n\n if (r_dict['features'] == 0):\n result['status'] = u'error'\n result['message'] = \"error while loading datas\"\n\n else:\n result['status'] = u'SUCCESS'\n result['total'] = len(r_dict['features'])\n result['limit'] = limit\n result['ID']=[i['id'] for i in r_dict['features']]\n result['downloads']=[{\"download\" : i['properties']['services']['download']['url'],\n \"id\" : i['id']}\n for i in r_dict['features']]\n result['results'] = {\n \"features\": [{\n 'properties':{'sceneID': i['id'],\n 'sat_type': i['properties']['platform'],\n 'thumbnail': i['properties']['thumbnail'],\n 'date': i['properties']['completionDate'],\n 'download': i['properties']['services']['download']['url']}\n ,\n 'geometry': i['geometry'],\n \"type\": \"Feature\"}\n for i in r_dict['features']],\n \"type\": \"FeatureCollection\"\n }\n\n\n\n return result", "docstring": "The main method of Search class. It searches tTheia Landsat API\n Returns python dictionary\n\nArgs:\n start_date -- date string. format: YYYY-MM-DD\n end_date -- date string. format: YYYY-MM-DD\n limit -- integer specigying the maximum results return.\n clipper -- clipper object : clipper.bbox / clipper.town", "source": "juraj_google_style"} -{"code": "def __init__(self, executable_path: _PATH = 'default', port: Union[int, str] = 5037, env: Dict = None, service_args: Union[list, tuple] = None) -> None:\n\n\n self._service_args = service_args or []\n\n super(Service, self).__init__(executable_path, port=port, env=env)", "docstring": "Creates a new instance of the Service.\n\nArgs:\n executable_path: Path to the AndroidDriver.\n port: Port the service is running on.\n env: Environment variables.\n service_args: List of args to pass to the androiddriver service.", "source": "juraj_google_style"} -{"code": "def mapIdentityResponse(\n self, primarySubject, secondarySubject, vendorSpecific=None\n ):\n\n mmp_dict = {\n 'primarySubject': primarySubject.toxml('utf-8'),\n 'secondarySubject': secondarySubject.toxml('utf-8'),\n }\n return self.POST(['accounts', 'map'], fields=mmp_dict, headers=vendorSpecific)", "docstring": "CNIdentity.mapIdentity(session, subject) → boolean\n https://releases.dataone.org/online/api-\n documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.mapIdentity.\n\nArgs:\n primarySubject:\n secondarySubject:\n vendorSpecific:\n\n Returns:", "source": "juraj_google_style"} -{"code": "def activate(self, experiment_key, user_id, attributes=None):\n\n\n if not self.is_valid:\n self.logger.error(enums.Errors.INVALID_DATAFILE.format('activate'))\n return None\n\n if not validator.is_non_empty_string(experiment_key):\n self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))\n return None\n\n if not isinstance(user_id, string_types):\n self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n return None\n\n variation_key = self.get_variation(experiment_key, user_id, attributes)\n\n if not variation_key:\n self.logger.info('Not activating user \"%s\".' % user_id)\n return None\n\n experiment = self.config.get_experiment_from_key(experiment_key)\n variation = self.config.get_variation_from_key(experiment_key, variation_key)\n\n # Create and dispatch impression event\n self.logger.info('Activating user \"%s\" in experiment \"%s\".' % (user_id, experiment.key))\n self._send_impression_event(experiment, variation, user_id, attributes)\n\n return variation.key", "docstring": "Buckets visitor and sends impression event to Optimizely.\n\nArgs:\n experiment_key: Experiment which needs to be activated.\n user_id: ID for user.\n attributes: Dict representing user attributes and values which need to be recorded.\n\nReturns:\n Variation key representing the variation the user will be bucketed in.\n None if user is not in experiment or if experiment is not Running.", "source": "juraj_google_style"} -{"code": "def is_module_function(obj, prop):\n\n python_version = sys.version_info[0]\n if python_version == 3:\n unicode = str\n\n if prop and (isinstance(prop, str) or isinstance(prop, unicode)): #property\n if prop in dir(obj):\n if (\n isinstance(getattr(obj, prop), FunctionType)\n or isinstance(getattr(obj, prop), BuiltinFunctionType)\n or inspect.ismethod(getattr(obj, prop))\n ):\n #inspect.ismethod for python2.7\n #isinstance(...) for python3.x\n return True\n else:\n ErrorHandler.prop_is_func_error(obj, prop)\n else:\n ErrorHandler.prop_in_obj_error(obj, prop)\n elif prop:\n ErrorHandler.prop_type_error(prop)\n return False", "docstring": "Checking and setting type to MODULE_FUNCTION\n\nArgs:\n obj: ModuleType\n prop: FunctionType\n\nReturns:\n Boolean\n\nRaises:\n prop_type_error: When the type of prop is not valid\n prop_in_obj_error: When prop is not in the obj(module/class)\n prop_is_func_error: When prop is not a callable stuff", "source": "juraj_google_style"} -{"code": "def __init__(self, resolver_context):\n\n super(OSFile, self).__init__(resolver_context)\n self._file_object = None\n self._size = 0", "docstring": "Initializes a file-like object.\n\nArgs:\n resolver_context (Context): resolver context.", "source": "juraj_google_style"} -{"code": "def reconstruct_text(tokens: List[Token]) -> str:\n\n return \"\".join([x.text_with_ws for x in tokens])", "docstring": "Given a list of tokens, reconstruct the original text with as much fidelity as possible.\n\nArgs:\n [tokens]:\n\n Returns: a string.", "source": "juraj_google_style"} -{"code": "def BalanceFor(self, assetId):\n\n for key, fixed8 in self.Balances.items():\n if key == assetId:\n return fixed8\n return Fixed8(0)", "docstring": "Get the balance for a given asset id.\n\nArgs:\n assetId (UInt256):\n\nReturns:\n Fixed8: balance value.", "source": "juraj_google_style"} -{"code": "def __live_receivers(signal):\n\n with __lock:\n __purge()\n receivers = [funcref() for funcref in __receivers[signal]]\n\n return receivers", "docstring": "Return all signal handlers that are currently still alive for the\n input `signal`.\n\nArgs:\n signal: A signal name.\n\nReturns:\n A list of callable receivers for the input signal.", "source": "juraj_google_style"} -{"code": "def get_service_name(self, service_id: str) -> str:\n\n # Raise an exception if we are not a manager\n if not self._manager:\n raise RuntimeError('Only the Swarm manager node can retrieve all'\n ' the services details.')\n\n service = self._client.services.get(service_id)\n return service.name", "docstring": "Get the name of the docker service.\n\n Only the manager nodes can retrieve service name\n\nArgs:\n service_id (string): List of service ID\n\nReturns:\n string, name of the docker service", "source": "juraj_google_style"} -{"code": "def check_cmms_gender(self, ind_object):\n\n ind_id = ind_object.individual_id.split('-')\n sex = ind_object.sex\n sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even\n if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):\n raise WrongGender(ind_object.individual_id, sex, sex_code)\n\n return True", "docstring": "Check if the phenotype is correct.\n\nArgs:\n ind_object : An Individuals object\n\nYields:\n bool : True if phenotype status is correct\n False otherwise", "source": "juraj_google_style"} -{"code": "def data(self, rows=None):\n\n rows = tf.range(self._capacity) if rows is None else rows\n assert rows.shape.ndims == 1\n episode = tools.nested.map(lambda var: tf.gather(var, rows), self._buffers)\n length = tf.gather(self._length, rows)\n return episode, length", "docstring": "Access a batch of episodes from the memory.\n\n Padding elements after the length of each episode are unspecified and might\n contain old data.\n\nArgs:\n rows: Episodes to select, defaults to all.\n\nReturns:\n Tuple containing a tuple of transition quantities with batch and time\n dimensions, and a batch of sequence lengths.", "source": "juraj_google_style"} -{"code": "def yesno(question, default=None):\n\n\n if default is not None:\n if isinstance(default, bool):\n pass\n else:\n default_ = default.upper()\n if default_ not in ('Y', 'YES', 'N', 'NO'):\n raise RuntimeError(\"Invalid default value: '{}'\".format(default))\n default = default_ in ('Y', 'YES')\n\n while True:\n ans = input(\"{} ({}/{})? \".format(question, \"Y\" if default == True else \"y\",\n \"N\" if default == False else \"n\")).upper()\n if ans == \"\" and default is not None:\n ret = default\n break\n elif ans in (\"N\", \"NO\"):\n ret = False\n break\n elif ans in (\"Y\", \"YES\"):\n ret = True\n break\n return ret", "docstring": "Asks a yes/no question\n\nArgs:\n question: string **without** the question mark and without the options.\n Example: 'Create links'\n default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of\n these valus (this argument is case-insensitive)\n\nReturns:\n bool: True if user answered Yes, False otherwise", "source": "juraj_google_style"} -{"code": "def add_filter(self, name, operator, value):\n\n self._filters.append({'name': name, 'operator': operator, 'value': value})", "docstring": "Add ThreatConnect API Filter for this resource request.\n\n External Reference:\n https://docs.threatconnect.com\n\nArgs:\n name (string): The filter field name.\n operator (string): The filter comparison operator.\n value (string): The filter value.", "source": "juraj_google_style"} -{"code": "def from_json(cls, data):\n\n assert 'name' in data, 'Required keyword \"name\" is missing!'\n assert 'data_type' in data, 'Required keyword \"data_type\" is missing!'\n if cls._type_enumeration is None:\n cls._type_enumeration = _DataTypeEnumeration(import_modules=False)\n\n if data['data_type'] == 'GenericType':\n assert 'base_unit' in data, \\\n 'Keyword \"base_unit\" is missing and is required for GenericType.'\n return cls._type_enumeration._GENERICTYPE(data['name'], data['base_unit'])\n elif data['data_type'] in cls._type_enumeration._TYPES:\n clss = cls._type_enumeration._TYPES[data['data_type']]\n if data['data_type'] == data['name'].title().replace(' ', ''):\n return clss()\n else:\n instance = clss()\n instance._name = data['name']\n return instance\n else:\n raise ValueError(\n 'Data Type {} could not be recognized'.format(data['data_type']))", "docstring": "Create a data type from a dictionary.\n\nArgs:\n data: Data as a dictionary.\n {\n \"name\": data type name of the data type as a string\n \"data_type\": the class name of the data type as a string\n \"base_unit\": the base unit of the data type\n }", "source": "juraj_google_style"} -{"code": "def _handle_error_response(response_body):\n\n try:\n error_data = json.loads(response_body)\n error_details = '{}: {}'.format(\n error_data['error'],\n error_data.get('error_description'))\n # If no details could be extracted, use the response data.\n except (KeyError, ValueError):\n error_details = response_body\n\n raise exceptions.RefreshError(\n error_details, response_body)", "docstring": "Translates an error response into an exception.\n\nArgs:\n response_body (str): The decoded response data.\n\nRaises:\n google.auth.exceptions.RefreshError", "source": "juraj_google_style"} -{"code": "def convert_to_uri(self, value, strip_iri=True):\n\n parsed = self.parse_uri(str(value))\n\n try:\n new_uri = \"%s%s\" % (self.ns_dict[parsed[0]], parsed[1])\n if not strip_iri:\n return self.iri(new_uri)\n return new_uri\n except KeyError:\n return self.rpyhttp(value)", "docstring": "converts a prefixed rdf ns equivalent value to its uri form.\n If not found returns the value as is\n\nArgs:\n value: the URI/IRI to convert\n strip_iri: removes the < and > signs\n rdflib_uri: returns an rdflib URIRef", "source": "juraj_google_style"} -{"code": "def wait(self, timeout=None):\n\n poll = 30\n while not self._is_complete:\n try:\n query_result = self._api.jobs_query_results(self._job_id,\n project_id=self._context.project_id,\n page_size=0,\n timeout=poll * 1000)\n except Exception as e:\n raise e\n if query_result['jobComplete']:\n if 'totalBytesProcessed' in query_result:\n self._bytes_processed = int(query_result['totalBytesProcessed'])\n self._cache_hit = query_result.get('cacheHit', None)\n if 'totalRows' in query_result:\n self._total_rows = int(query_result['totalRows'])\n break\n\n if timeout is not None:\n timeout -= poll\n if timeout <= 0:\n break\n\n self._refresh_state()\n return self", "docstring": "Wait for the job to complete, or a timeout to happen.\n\n This is more efficient than the version in the base Job class, in that we can\n use a call that blocks for the poll duration rather than a sleep. That means we\n shouldn't block unnecessarily long and can also poll less.\n\nArgs:\n timeout: how long to wait (in seconds) before giving up; default None which means no timeout.\n\nReturns:\n The QueryJob", "source": "juraj_google_style"} -{"code": "def QA_fetch_user(user_cookie, db=DATABASE):\n\n collection = DATABASE.account\n\n return [res for res in collection.find({'user_cookie': user_cookie}, {\"_id\": 0})]", "docstring": "get the user\n\nArgs:\n user_cookie : str the unique cookie_id for a user\n Keyword Arguments:\n db: database for query\n\nReturns:\n list --- [ACCOUNT]", "source": "juraj_google_style"} -{"code": "def execute(mp):\n\n # Reading and writing data works like this:\n with mp.open(\"file1\", resampling=\"bilinear\") as raster_file:\n if raster_file.is_empty():\n return \"empty\"\n # This assures a transparent tile instead of a pink error tile\n # is returned when using mapchete serve.\n dem = raster_file.read()\n return dem", "docstring": "Example process for testing.\n\n Inputs:\n -------\n file1\n raster file\n\n Parameters:\n -----------\n\n Output:\n -------\n np.ndarray", "source": "juraj_google_style"} -{"code": "def _lookup_global(self, symbol):\n\n assert symbol.parts\n namespace = self.namespaces\n if len(symbol.parts) == 1:\n # If there is only one part, look in globals.\n namespace = self.namespaces[None]\n try:\n # Try to do a normal, global namespace lookup.\n return self._lookup_namespace(symbol, namespace)\n except Error as orig_exc:\n try:\n # The normal lookup can fail if all of the parts aren't\n # namespaces. This happens with OuterClass::Inner.\n namespace = self.namespaces[None]\n return self._lookup_namespace(symbol, namespace)\n except Error:\n raise orig_exc", "docstring": "Helper for lookup_symbol that only looks up global variables.\n\nArgs:\n symbol: Symbol", "source": "juraj_google_style"} -{"code": "def get_clinvar_submission(store, institute_id, case_name, variant_id, submission_id):\n\n\n institute_obj, case_obj = institute_and_case(store, institute_id, case_name)\n pinned = [store.variant(variant_id) or variant_id for variant_id in\n case_obj.get('suspects', [])]\n variant_obj = store.variant(variant_id)\n clinvar_submission_objs = store.clinvars(submission_id=submission_id)\n return dict(\n today = str(date.today()),\n institute=institute_obj,\n case=case_obj,\n variant=variant_obj,\n pinned_vars=pinned,\n clinvars = clinvar_submission_objs\n )", "docstring": "Collects all variants from the clinvar submission collection with a specific submission_id\n\nArgs:\n store(scout.adapter.MongoAdapter)\n institute_id(str): Institute ID\n case_name(str): case ID\n variant_id(str): variant._id\n submission_id(str): clinvar submission id, i.e. SUB76578\n\nReturns:\n A dictionary with all the data to display the clinvar_update.html template page", "source": "juraj_google_style"} -{"code": "def add_answer(self, vote, rationale):\n\n self.raw_answers.append({\n VOTE_KEY: vote,\n RATIONALE_KEY: rationale,\n })", "docstring": "Add an answer\n\nArgs:\n vote (int): the option that student voted for\n rationale (str): the reason why the student vote for the option", "source": "juraj_google_style"} -{"code": "def dict_from_items_with_values(*dictionaries, **items):\n\n dict_list = list(dictionaries)\n dict_list.append(items)\n result = {}\n for d in dict_list:\n for key, value in d.items():\n if value is not None:\n result[key] = value\n return result", "docstring": "Creates a dict with the inputted items; pruning any that are `None`.\n\nArgs:\n *dictionaries(dict): Dictionaries of items to be pruned and included.\n **items: Items to be pruned and included.\n\nReturns:\n dict: A dictionary containing all of the items with a 'non-None' value.", "source": "juraj_google_style"} -{"code": "def get_access_token(self, code):\n\n\n payload = {'redirect_uri': self.redirect_uri,\n 'code': code,\n 'grant_type': 'authorization_code'}\n\n headers = self._make_authorization_headers()\n\n response = requests.post(self.OAUTH_TOKEN_URL, data=payload,\n headers=headers, verify=LOGIN_VERIFY_SSL_CERT)\n if response.status_code is not 200:\n raise MercedesMeAuthError(response.reason)\n token_info = response.json()\n token_info = self._add_custom_values_to_token_info(token_info)\n self._save_token_info(token_info)\n return token_info", "docstring": "Gets the access token for the app given the code\n\n Parameters:\n - code - the response code", "source": "juraj_google_style"} -{"code": "def ExpandGroups(path):\n\n precondition.AssertType(path, Text)\n\n chunks = []\n offset = 0\n\n for match in PATH_GROUP_REGEX.finditer(path):\n chunks.append([path[offset:match.start()]])\n chunks.append(match.group(\"alts\").split(\",\"))\n offset = match.end()\n\n chunks.append([path[offset:]])\n\n for prod in itertools.product(*chunks):\n yield \"\".join(prod)", "docstring": "Performs group expansion on a given path.\n\n For example, given path `foo/{bar,baz}/{quux,norf}` this method will yield\n `foo/bar/quux`, `foo/bar/norf`, `foo/baz/quux`, `foo/baz/norf`.\n\nArgs:\n path: A path to expand.\n\nYields:\n Paths that can be obtained from given path by expanding groups.", "source": "juraj_google_style"} -{"code": "def ResultCollectionForFID(cls, flow_id):\n\n # TODO: Disallow/remove URNs after migration.\n if not isinstance(flow_id, rdfvalue.RDFURN):\n flow_id = rdfvalue.RDFURN(flow_id)\n\n return sequential_collection.GeneralIndexedCollection(\n flow_id.Add(RESULTS_SUFFIX))", "docstring": "Returns the ResultCollection for the flow with a given flow_id.\n\nArgs:\n flow_id: The id of the flow, a RDFURN of the form aff4:/flows/F:123456.\n\nReturns:\n The collection containing the results for the flow identified by the id.", "source": "juraj_google_style"} -{"code": "def _ParseComment(self, structure):\n\n if structure[1] == 'Date:':\n self._year, self._month, self._day_of_month, _, _, _ = structure.date_time\n elif structure[1] == 'Fields:':\n self._ParseFieldsMetadata(structure)", "docstring": "Parses a comment.\n\nArgs:\n structure (pyparsing.ParseResults): structure parsed from the log file.", "source": "juraj_google_style"} -{"code": "def lazy(function, *args, **kwargs):\n\n\n NOT_EVALUATED = object()\n retval = [NOT_EVALUATED]\n\n def evaluate():\n if retval[0] is NOT_EVALUATED:\n retval[0] = function(*args, **kwargs)\n return retval[0]\n\n return evaluate", "docstring": "Produces a callable so that functions can be lazily evaluated in\n templates.\n\nArgs:\n function (callable): The function to call at evaluation time.\n\n args: Positional arguments, passed directly to ``function``.\n\n kwargs: Keyword arguments, passed directly to ``function``.\n\nReturns:\n callable: A callable that will evaluate a call to ``function`` with\n the specified arguments.", "source": "juraj_google_style"} -{"code": "def __init__(self, examples):\n\n self.config = {}\n self.set_examples(examples)\n self.set_model_type('classification')\n self.set_label_vocab([])", "docstring": "Constructs the WitConfigBuilder object.\n\nArgs:\n examples: A list of tf.Example or tf.SequenceExample proto objects.\n These are the examples that will be displayed in WIT. If not model to\n infer these examples with is specified through the methods on this class,\n then WIT will display the examples for exploration, but no model inference\n will be performed by the tool.", "source": "juraj_google_style"} -{"code": "def init_logger(level, printout=True):\n\n root_logger = logging.getLogger(\"boussole\")\n root_logger.setLevel(level)\n\n # Redirect outputs to the void space, mostly for usage within unittests\n if not printout:\n from io import StringIO\n dummystream = StringIO()\n handler = logging.StreamHandler(dummystream)\n # Standard output with colored messages\n else:\n handler = logging.StreamHandler()\n handler.setFormatter(\n colorlog.ColoredFormatter(\n '%(asctime)s - %(log_color)s%(message)s',\n datefmt=\"%H:%M:%S\"\n )\n )\n\n root_logger.addHandler(handler)\n\n return root_logger", "docstring": "Initialize app logger to configure its level/handler/formatter/etc..\n\n Todo:\n * A mean to raise click.Abort or sys.exit when CRITICAL is used;\n\nArgs:\n level (str): Level name (``debug``, ``info``, etc..).\n\n Keyword Arguments:\n printout (bool): If False, logs will never be outputed.\n\nReturns:\n logging.Logger: Application logger.", "source": "juraj_google_style"} -{"code": "def call(self, inputs):\n\n del inputs # unused\n with tf.compat.v1.name_scope(self._name):\n return tfd.MultivariateNormalDiag(self.loc, self.scale_diag)", "docstring": "Runs the model to generate multivariate normal distribution.\n\nArgs:\n inputs: Unused.\n\nReturns:\n A MultivariateNormalDiag distribution with event shape\n [dimensions], batch shape [], and sample shape [sample_shape,\n dimensions].", "source": "juraj_google_style"} -{"code": "def SetTimelineOwner(self, username):\n\n self._timeline_owner = username\n logger.info('Owner of the timeline: {0!s}'.format(self._timeline_owner))", "docstring": "Sets the username of the user that should own the timeline.\n\nArgs:\n username (str): username.", "source": "juraj_google_style"} -{"code": "def RegisterHasher(cls, hasher_class):\n\n hasher_name = hasher_class.NAME.lower()\n if hasher_name in cls._hasher_classes:\n raise KeyError((\n 'hasher class already set for name: {0:s}.').format(\n hasher_class.NAME))\n\n cls._hasher_classes[hasher_name] = hasher_class", "docstring": "Registers a hasher class.\n\n The hasher classes are identified based on their lower case name.\n\nArgs:\n hasher_class (type): class object of the hasher.\n\nRaises:\n KeyError: if hasher class is already set for the corresponding name.", "source": "juraj_google_style"} -{"code": "def SetFlushInterval(self, flush_interval):\n\n self._flush_interval = flush_interval\n logger.debug('Elasticsearch flush interval: {0:d}'.format(flush_interval))", "docstring": "Set the flush interval.\n\nArgs:\n flush_interval (int): number of events to buffer before doing a bulk\n insert.", "source": "juraj_google_style"} -{"code": "def load_scout(adapter, config, ped=None, update=False):\n\n log.info(\"Check that the panels exists\")\n if not check_panels(adapter, config.get('gene_panels', []),\n config.get('default_gene_panels')):\n raise ConfigError(\"Some panel(s) does not exist in the database\")\n case_obj = adapter.load_case(config, update=update)\n return case_obj", "docstring": "Load a new case from a Scout config.\n\nArgs:\n adapter(MongoAdapter)\n config(dict): loading info\n ped(Iterable(str)): Pedigree ingformation\n update(bool): If existing case should be updated", "source": "juraj_google_style"} -{"code": "def zopen(filename, *args, **kwargs):\n\n if Path is not None and isinstance(filename, Path):\n filename = str(filename)\n\n name, ext = os.path.splitext(filename)\n ext = ext.upper()\n if ext == \".BZ2\":\n if PY_VERSION[0] >= 3:\n return bz2.open(filename, *args, **kwargs)\n else:\n args = list(args)\n if len(args) > 0:\n args[0] = \"\".join([c for c in args[0] if c != \"t\"])\n if \"mode\" in kwargs:\n kwargs[\"mode\"] = \"\".join([c for c in kwargs[\"mode\"]\n if c != \"t\"])\n return bz2.BZ2File(filename, *args, **kwargs)\n elif ext in (\".GZ\", \".Z\"):\n return gzip.open(filename, *args, **kwargs)\n else:\n return io.open(filename, *args, **kwargs)", "docstring": "This function wraps around the bz2, gzip and standard python's open\n function to deal intelligently with bzipped, gzipped or standard text\n files.\n\nArgs:\n filename (str/Path): filename or pathlib.Path.\n \\*args: Standard args for python open(..). E.g., 'r' for read, 'w' for\n write.\n \\*\\*kwargs: Standard kwargs for python open(..).\n\nReturns:\n File-like object. Supports with context.", "source": "juraj_google_style"} -{"code": "def attach_socket(self, **kwargs):\n\n return self.client.api.attach_socket(self.id, **kwargs)", "docstring": "Like :py:meth:`attach`, but returns the underlying socket-like object\n for the HTTP request.\n\nArgs:\n params (dict): Dictionary of request parameters (e.g. ``stdout``,\n ``stderr``, ``stream``).\n ws (bool): Use websockets instead of raw HTTP.\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.", "source": "juraj_google_style"} -{"code": "def _compute_args(self, data=dict(), **kwargs):\n\n\n for name, remote_attribute in self._attributes.items():\n default_value = BambouConfig.get_default_attribute_value(self.__class__, name, remote_attribute.attribute_type)\n setattr(self, name, default_value)\n\n if len(data) > 0:\n self.from_dict(data)\n\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)", "docstring": "Compute the arguments\n\n Try to import attributes from data.\n Otherwise compute kwargs arguments.\n\nArgs:\n data: a dict()\n kwargs: a list of arguments", "source": "juraj_google_style"} -{"code": "def batch_insert(self, records, typecast=False):\n\n return self._batch_request(self.insert, records)", "docstring": "Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)\n To change the rate limit use ``airtable.API_LIMIT = 0.2``\n (5 per second)\n\n >>> records = [{'Name': 'John'}, {'Name': 'Marc'}]\n >>> airtable.batch_insert(records)\n\nArgs:\n records(``list``): Records to insert\n typecast(``boolean``): Automatic data conversion from string values.\n\nReturns:\n records (``list``): list of added records", "source": "juraj_google_style"} -{"code": "def apply(self, func, **kwargs):\n\n import dask\n\n # applies the func lazily\n delayed_call = self.delayed_call\n self.delayed_call = self.dask_obj\n return self.__class__(dask.delayed(func)(delayed_call, **kwargs))", "docstring": "Apply some callable function to the data in this partition.\n\n Note: It is up to the implementation how kwargs are handled. They are\n an important part of many implementations. As of right now, they\n are not serialized.\n\nArgs:\n func: The lambda to apply (may already be correctly formatted)\n\nReturns:\n A new `BaseFramePartition` containing the object that has had `func`\n applied to it.", "source": "juraj_google_style"} -{"code": "def _iterator(self, plugins, context):\n\n\n test = pyblish.logic.registered_test()\n\n for plug, instance in pyblish.logic.Iterator(plugins, context):\n if not plug.active:\n continue\n\n if instance is not None and instance.data.get(\"publish\") is False:\n continue\n\n self.processing[\"nextOrder\"] = plug.order\n\n if not self.is_running:\n raise StopIteration(\"Stopped\")\n\n if test(**self.processing):\n raise StopIteration(\"Stopped due to %s\" % test(\n **self.processing))\n\n yield plug, instance", "docstring": "Yield next plug-in and instance to process.\n\nArgs:\n plugins (list): Plug-ins to process\n context (pyblish.api.Context): Context to process", "source": "juraj_google_style"} -{"code": "def CaptureNamedVariable(self, name, value, depth, limits):\n\n if not hasattr(name, '__dict__'):\n name = str(name)\n else: # TODO(vlif): call str(name) with immutability verifier here.\n name = str(id(name))\n self._total_size += len(name)\n\n v = (self.CheckDataVisiblity(value) or\n self.CaptureVariable(value, depth, limits))\n v['name'] = name\n return v", "docstring": "Appends name to the product of CaptureVariable.\n\nArgs:\n name: name of the variable.\n value: data to capture\n depth: nested depth of dictionaries and vectors so far.\n limits: Per-object limits for capturing variable data.\n\nReturns:\n Formatted captured data as per Variable proto with name.", "source": "juraj_google_style"} -{"code": "def install_requirements(self, path, index=None):\n\n cmd = 'install -r {0}'.format(path)\n if index:\n\n cmd = 'install --index-url {0} -r {1}'.format(index, path)\n\n self.pip(cmd)", "docstring": "Install packages from a requirements.txt file.\n\nArgs:\n path (str): The path to the requirements file.\n index (str): The URL for a pypi index to use.", "source": "juraj_google_style"} -{"code": "def _get_register_specs(bit_labels):\n\n it = itertools.groupby(bit_labels, operator.itemgetter(0))\n for register_name, sub_it in it:\n yield register_name, max(ind[1] for ind in sub_it) + 1", "docstring": "Get the number and size of unique registers from bit_labels list.\n\nArgs:\n bit_labels (list): this list is of the form::\n\n [['reg1', 0], ['reg1', 1], ['reg2', 0]]\n\n which indicates a register named \"reg1\" of size 2\n and a register named \"reg2\" of size 1. This is the\n format of classic and quantum bit labels in qobj\n header.\n\nYields:\n tuple: iterator of register_name:size pairs.", "source": "juraj_google_style"} -{"code": "def VerifyRow(self, parser_mediator, row):\n\n if len(row) < self.MIN_COLUMNS:\n return False\n\n # Check the date format!\n # If it doesn't parse, then this isn't a Trend Micro AV log.\n try:\n timestamp = self._ConvertToTimestamp(row['date'], row['time'])\n except (ValueError, TypeError):\n return False\n\n if timestamp is None:\n return False\n\n # Check that the action value is plausible.\n try:\n action = int(row['action'], 10)\n except (ValueError, TypeError):\n return False\n\n if action not in formatter.SCAN_RESULTS:\n return False\n return True", "docstring": "Verifies if a line of the file is in the expected format.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n row (dict[str, str]): fields of a single row, as specified in COLUMNS.\n\nReturns:\n bool: True if this is the correct parser, False otherwise.", "source": "juraj_google_style"} -{"code": "def GetAPFSFileEntryByPathSpec(self, path_spec):\n\n # Opening a file by identifier is faster than opening a file by location.\n location = getattr(path_spec, 'location', None)\n identifier = getattr(path_spec, 'identifier', None)\n\n if identifier is not None:\n fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(\n identifier)\n elif location is not None:\n fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location)\n else:\n raise errors.PathSpecError(\n 'Path specification missing location and identifier.')\n\n return fsapfs_file_entry", "docstring": "Retrieves the APFS file entry for a path specification.\n\nArgs:\n path_spec (PathSpec): a path specification.\n\nReturns:\n pyfsapfs.file_entry: file entry.\n\nRaises:\n PathSpecError: if the path specification is missing location and\n identifier.", "source": "juraj_google_style"} -{"code": "def register_hooked(self,\n hooks, # type: Union[Type[Hook], Sequence[Type[Hook]]]\n func, # type: Hooked\n args_gen=None # type: Optional[ArgsGen]\n ):\n # type: (Type[Hook], Callable, Optional[Callable]) -> None\n\n if self.hooked is None:\n self.hooked = {}\n if args_gen is None:\n args_gen = getattr(func, \"call_types\", {}).keys\n if not isinstance(hooks, Sequence):\n hooks = [hooks]\n for hook_cls in hooks:\n self.hooked[hook_cls] = (func, args_gen)", "docstring": "Register func to be run when any of the hooks are run by parent\n\nArgs:\n hooks: A Hook class or list of Hook classes of interest\n func: The callable that should be run on that Hook\n args_gen: Optionally specify the argument names that should be\n passed to func. If not given then use func.call_types.keys", "source": "juraj_google_style"} -{"code": "def to_string(cls, error_code):\n\n if error_code == cls.COMPARE_ERROR:\n return 'Error comparing flash content to programming data.'\n elif error_code == cls.PROGRAM_ERASE_ERROR:\n return 'Error during program/erase phase.'\n elif error_code == cls.VERIFICATION_ERROR:\n return 'Error verifying programmed data.'\n return super(JLinkFlashErrors, cls).to_string(error_code)", "docstring": "Returns the string message for the given ``error_code``.\n\nArgs:\n cls (JLinkFlashErrors): the ``JLinkFlashErrors`` class\n error_code (int): error code to convert\n\nReturns:\n An error string corresponding to the error code.\n\nRaises:\n ValueError: if the error code is invalid.", "source": "juraj_google_style"} -{"code": "def ParseOptions(self, options):\n\n # The extraction options are dependent on the data location.\n helpers_manager.ArgumentHelperManager.ParseOptions(\n options, self, names=['data_location'])\n\n self._ReadParserPresetsFromFile()\n\n # Check the list options first otherwise required options will raise.\n argument_helper_names = ['hashers', 'parsers', 'profiling']\n helpers_manager.ArgumentHelperManager.ParseOptions(\n options, self, names=argument_helper_names)\n\n self._ParseTimezoneOption(options)\n\n self.list_hashers = self._hasher_names_string == 'list'\n self.list_parsers_and_plugins = self._parser_filter_expression == 'list'\n self.list_profilers = self._profilers == 'list'\n\n self.show_info = getattr(options, 'show_info', False)\n self.show_troubleshooting = getattr(options, 'show_troubleshooting', False)\n\n if getattr(options, 'use_markdown', False):\n self._views_format_type = views.ViewsFactory.FORMAT_TYPE_MARKDOWN\n\n self.dependencies_check = getattr(options, 'dependencies_check', True)\n\n if (self.list_hashers or self.list_parsers_and_plugins or\n self.list_profilers or self.list_timezones or self.show_info or\n self.show_troubleshooting):\n return\n\n self._ParseInformationalOptions(options)\n\n argument_helper_names = [\n 'artifact_definitions', 'artifact_filters', 'extraction',\n 'filter_file', 'status_view', 'storage_file', 'storage_format',\n 'text_prepend', 'yara_rules']\n helpers_manager.ArgumentHelperManager.ParseOptions(\n options, self, names=argument_helper_names)\n\n self._ParseLogFileOptions(options)\n\n self._ParseStorageMediaOptions(options)\n\n self._ParsePerformanceOptions(options)\n self._ParseProcessingOptions(options)\n\n if not self._storage_file_path:\n raise errors.BadConfigOption('Missing storage file option.')\n\n serializer_format = getattr(\n options, 'serializer_format', definitions.SERIALIZER_FORMAT_JSON)\n if serializer_format not in definitions.SERIALIZER_FORMATS:\n raise errors.BadConfigOption(\n 'Unsupported storage serializer format: {0:s}.'.format(\n serializer_format))\n self._storage_serializer_format = serializer_format\n\n # TODO: where is this defined?\n self._operating_system = getattr(options, 'os', None)\n\n if self._operating_system:\n self._mount_path = getattr(options, 'filename', None)\n\n helpers_manager.ArgumentHelperManager.ParseOptions(\n options, self, names=['status_view'])\n\n self._enable_sigsegv_handler = getattr(options, 'sigsegv_handler', False)\n\n self._EnforceProcessMemoryLimit(self._process_memory_limit)", "docstring": "Parses the options.\n\nArgs:\n options (argparse.Namespace): command line arguments.\n\nRaises:\n BadConfigOption: if the options are invalid.", "source": "juraj_google_style"} -{"code": "def _request(self, resource, action, data=None, headers=None):\n\n url, httpmethod = res_to_url(resource, action)\n return self.ajax(url, httpmethod, data, headers)", "docstring": "Send request\n\nArgs:\n resource: resource\n action: action\n data: string or object which can be json.dumps\n headers: http headers", "source": "juraj_google_style"} -{"code": "def __init__(self,\n message: str,\n status_code: int=None):\n\n super().__init__(message)\n if status_code is not None:\n self.status_code = status_code", "docstring": "初始化异常.\n\n Parameters:\n\n message (str): - 异常信息\n status_code (int): - 状态码", "source": "juraj_google_style"} -{"code": "def gnuplot_2d(x, y, filename, title='', x_label='', y_label=''):\n\n _, ext = os.path.splitext(filename)\n if ext != '.png':\n filename += '.png'\n\n gnuplot_cmds = \\\n\n scr = _GnuplotScriptTemp(gnuplot_cmds)\n data = _GnuplotDataTemp(x, y)\n\n args_dict = {\n 'filename': filename,\n 'filename_data': data.name,\n 'title': title,\n 'x_label': x_label,\n 'y_label': y_label\n }\n gnuplot(scr.name, args_dict)", "docstring": "Function to produce a general 2D plot.\n\nArgs:\n x (list): x points.\n y (list): y points.\n filename (str): Filename of the output image.\n title (str): Title of the plot. Default is '' (no title).\n x_label (str): x-axis label.\n y_label (str): y-axis label.", "source": "juraj_google_style"} -{"code": "def find(self, title):\n\n if title not in self._titles:\n raise KeyError(title)\n return self._titles[title][0]", "docstring": "Return the first worksheet with the given title.\n\nArgs:\n title(str): title/name of the worksheet to return\n\nReturns:\n WorkSheet: contained worksheet object\n\nRaises:\n KeyError: if the spreadsheet has no no worksheet with the given ``title``", "source": "juraj_google_style"} -{"code": "def _transform(transformer_chain: Sequence[Tuple[DataTransformer, Type]], data: S, context: PipelineContext = None) -> T:\n\n for transformer, target_type in transformer_chain:\n # noinspection PyTypeChecker\n data = transformer.transform(target_type, data, context)\n return data", "docstring": "Transform data to a new type.\n\nArgs:\n transformer_chain: A sequence of (transformer, type) pairs to convert the data.\n data: The data to be transformed.\n context: The context of the transformations (mutable).\n\nReturns:\n The transformed data.", "source": "juraj_google_style"} -{"code": "def hrs_84_and_db12_8_or_20_6(self, value=None):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type float '\n 'for field `hrs_84_and_db12_8_or_20_6`'.format(value))\n\n self._hrs_84_and_db12_8_or_20_6 = value", "docstring": "Corresponds to IDD Field `hrs_84_and_db12_8_or_20_6`\n Number of hours between 8 AM and 4 PM (inclusive) with dry-bulb temperature between 12.8 and 20.6 C\n\nArgs:\n value (float): value for IDD Field `hrs_84_and_db12_8_or_20_6`\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def LateBind(self, target=None):\n\n if not issubclass(target, RDFProtoStruct):\n raise TypeError(\"Field %s expects a protobuf, but target is %s\" %\n (self, target))\n\n self.late_bound = False\n\n # The target type is now resolved.\n self.type = target\n\n # Register us in our owner.\n self.owner.AddDescriptor(self)", "docstring": "Late binding callback.\n\n This method is called on this field descriptor when the target RDFValue\n class is finally defined. It gives the field descriptor an opportunity to\n initialize after the point of definition.\n\nArgs:\n target: The target nested class.\n\nRaises:\n TypeError: If the target class is not of the expected type.", "source": "juraj_google_style"} -{"code": "def children(self, as_resources=False):\n\n\n\n\t\tchildren = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))]\n\n\t\t# if as_resources, issue GET requests for children and return\n\t\tif as_resources:\n\t\t\tlogger.debug('retrieving children as resources')\n\t\t\tchildren = [ self.repo.get_resource(child) for child in children ]\n\n\t\treturn children", "docstring": "method to return hierarchical children of this resource\n\nArgs:\n as_resources (bool): if True, opens each as appropriate resource type instead of return URI only\n\nReturns:\n (list): list of resources", "source": "juraj_google_style"} -{"code": "def collect_hunt_results(self, hunt):\n\n if not os.path.isdir(self.output_path):\n os.makedirs(self.output_path)\n\n output_file_path = os.path.join(\n self.output_path, '.'.join((self.hunt_id, 'zip')))\n\n if os.path.exists(output_file_path):\n print('{0:s} already exists: Skipping'.format(output_file_path))\n return None\n\n self._check_approval_wrapper(\n hunt, self._get_and_write_archive, hunt, output_file_path)\n\n results = self._extract_hunt_results(output_file_path)\n print('Wrote results of {0:s} to {1:s}'.format(\n hunt.hunt_id, output_file_path))\n return results", "docstring": "Download current set of files in results.\n\nArgs:\n hunt: The GRR hunt object to download files from.\n\nReturns:\n list: tuples containing:\n str: human-readable description of the source of the collection. For\n example, the name of the source host.\n str: path to the collected data.\n\nRaises:\n ValueError: if approval is needed and approvers were not specified.", "source": "juraj_google_style"} -{"code": "def complement(self, alphabet):\n\n states = sorted(self.states, key=attrgetter('initial'), reverse=True)\n for state in states:\n if state.final:\n state.final = False\n else:\n state.final = True", "docstring": "Returns the complement of DFA\n\nArgs:\n alphabet (list): The input alphabet\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def _CreateDictReader(self, line_reader):\n\n delimiter = self.DELIMITER\n quotechar = self.QUOTE_CHAR\n magic_test_string = self._MAGIC_TEST_STRING\n # Python 3 csv module requires arguments to constructor to be of type str.\n if py2to3.PY_3:\n delimiter = delimiter.decode(self._encoding)\n quotechar = quotechar.decode(self._encoding)\n magic_test_string = magic_test_string.decode(self._encoding)\n\n return csv.DictReader(\n line_reader, delimiter=delimiter, fieldnames=self.COLUMNS,\n quotechar=quotechar, restkey=magic_test_string,\n restval=magic_test_string)", "docstring": "Returns a reader that processes each row and yields dictionaries.\n\n csv.DictReader does this job well for single-character delimiters; parsers\n that need multi-character delimiters need to override this method.\n\nArgs:\n line_reader (iter): yields lines from a file-like object.\n\nReturns:\n iter: a reader of dictionaries, as returned by csv.DictReader().", "source": "juraj_google_style"} -{"code": "def yuv_to_rgb(y, u=None, v=None):\n\n if type(y) in [list,tuple]:\n y, u, v = y\n r = y + (v * 1.13983)\n g = y - (u * 0.39465) - (v * 0.58060)\n b = y + (u * 2.03211)\n return (r, g, b)", "docstring": "Convert the color from YUV coordinates to RGB.\n\n Parameters:\n :y:\n The Y component value [0...1]\n :u:\n The U component value [-0.436...0.436]\n :v:\n The V component value [-0.615...0.615]\n\nReturns:\n The color as an (r, g, b) tuple in the range:\n r[0...1],\n g[0...1],\n b[0...1]\n\n >>> '(%g, %g, %g)' % yuv_to_rgb(0.5925, -0.2916, 0.3575)\n '(0.999989, 0.500015, -6.3276e-05)'", "source": "juraj_google_style"} -{"code": "def define_grid(self, matrix):\n\n self.style['grid-template-areas'] = ''.join(\"'%s'\"%(' '.join(x)) for x in matrix)", "docstring": "Populates the Table with a list of tuples of strings.\n\nArgs:\n matrix (list): list of iterables of strings (lists or something else).\n Items in the matrix have to correspond to a key for the children.", "source": "juraj_google_style"} -{"code": "def capture_widget(widget, path=None):\n\n if use_qt5:\n pixmap = widget.grab()\n else:\n pixmap = QtGui.QPixmap.grabWidget(widget)\n\n if path:\n pixmap.save(path)\n\n else:\n image_buffer = QtCore.QBuffer()\n image_buffer.open(QtCore.QIODevice.ReadWrite)\n\n pixmap.save(image_buffer, \"PNG\")\n\n return image_buffer.data().data()", "docstring": "Grab an image of a Qt widget\n\nArgs:\n widget: The Qt Widget to capture\n path (optional): The path to save to. If not provided - will return image data.\n\nReturns:\n If a path is provided, the image will be saved to it.\n If not, the PNG buffer will be returned.", "source": "juraj_google_style"} -{"code": "def gcopy(pattern, destination):\n\n for item in glob.glob(pattern):\n if not copy(item, destination):\n return False\n return True", "docstring": "Copy all file found by glob.glob(pattern) to destination directory.\n\nArgs:\n pattern (str): Glob pattern\n destination (str): Path to the destination directory.\n\nReturns:\n bool: True if the operation is successful, False otherwise.", "source": "juraj_google_style"} -{"code": "def create_switch(type, settings, pin):\n\n\n\tswitch = None\n\tif type == \"A\":\n\t\tgroup, device = settings.split(\",\")\n\t\tswitch = pi_switch.RCSwitchA(group, device)\n\n\telif type == \"B\":\n\t\taddr, channel = settings.split(\",\")\n\t\taddr = int(addr)\n\t\tchannel = int(channel)\n\t\tswitch = pi_switch.RCSwitchB(addr, channel)\n\n\telif type == \"C\":\n\t\tfamily, group, device = settings.split(\",\")\n\t\tgroup = int(group)\n\t\tdevice = int(device)\n\t\tswitch = pi_switch.RCSwitchC(family, group, device)\n\n\telif type == \"D\":\n\t\tgroup, device = settings.split(\",\")\n\t\tdevice = int(device)\n\t\tswitch = pi_switch.RCSwitchD(group, device)\n\n\telse:\n\t\tprint \"Type %s is not supported!\" % type\n\t\tsys.exit()\n\n\tswitch.enableTransmit(pin)\n\treturn switch", "docstring": "Create a switch.\n\nArgs:\n type: (str): type of the switch [A,B,C,D]\n settings (str): a comma separted list\n pin (int): wiringPi pin\n\nReturns:\n switch", "source": "juraj_google_style"} -{"code": "def set_charge_and_spin(self, charge, spin_multiplicity=None):\n\n self._charge = charge\n nelectrons = 0\n for site in self._sites:\n for sp, amt in site.species.items():\n if not isinstance(sp, DummySpecie):\n nelectrons += sp.Z * amt\n nelectrons -= charge\n self._nelectrons = nelectrons\n if spin_multiplicity:\n if (nelectrons + spin_multiplicity) % 2 != 1:\n raise ValueError(\n \"Charge of {} and spin multiplicity of {} is\"\n \" not possible for this molecule\".format(\n self._charge, spin_multiplicity))\n self._spin_multiplicity = spin_multiplicity\n else:\n self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2", "docstring": "Set the charge and spin multiplicity.\n\nArgs:\n charge (int): Charge for the molecule. Defaults to 0.\n spin_multiplicity (int): Spin multiplicity for molecule.\n Defaults to None, which means that the spin multiplicity is\n set to 1 if the molecule has no unpaired electrons and to 2\n if there are unpaired electrons.", "source": "juraj_google_style"} -{"code": "def _get_object_from_version(cls, operations, ident):\n\n version, objname = ident.split(\".\")\n\n module_ = operations.get_context().script.get_revision(version).module\n obj = getattr(module_, objname)\n return obj", "docstring": "Returns a Python object from an Alembic migration module (script).\n\nArgs:\n operations: instance of ``alembic.operations.base.Operations``\n ident: string of the format ``version.objname``\n\nReturns:\n the object whose name is ``objname`` within the Alembic migration\n script identified by ``version``", "source": "juraj_google_style"} -{"code": "def add_info_field(self, field):\n\n if field in self.info_dict:\n msg = \"New info field [{}] already exists.\".format(field)\n raise KeyError(msg)\n\n if \"=\" in field:\n key, value = field.split(\"=\")\n self.info_dict[key] = value\n else:\n self.info_dict[field] = field\n\n self._join_info_fields()", "docstring": "Adds new info field (flag or key=value pair).\n\nArgs:\n field: String flag (e.g. \"SOMATIC\") or key-value (\"NEW_DP=42\")\n\nRaises:\n KeyError: if info field already exists", "source": "juraj_google_style"} -{"code": "def _bash_comp_command(self, cmd, add_help=True):\n\n out = ['-h', '--help'] if add_help else []\n cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_bare\n for opt, sct in cmd_dict:\n out.extend(_names(self._conf[sct], opt))\n return out", "docstring": "Build a list of all options for a given command.\n\nArgs:\n cmd (str): command name, set to None or '' for bare command.\n add_help (bool): add an help option.\n\nReturns:\n list of str: list of CLI options strings.", "source": "juraj_google_style"} -{"code": "def validate(self, corpus):\n\n invalid_utterances = {}\n\n for utterance in corpus.utterances.values():\n if self.label_list_idx in utterance.label_lists.keys():\n ll = utterance.label_lists[self.label_list_idx]\n\n if len(ll) < self.min_number_of_labels:\n invalid_utterances[utterance.idx] = 'Only {} labels'.format(len(ll))\n else:\n invalid_utterances[utterance.idx] = 'No label-list {}'.format(self.label_list_idx)\n\n passed = len(invalid_utterances) <= 0\n info = {\n 'Min. number of labels': str(self.min_number_of_labels),\n 'Label-List ID': self.label_list_idx\n }\n\n return base.InvalidUtterancesResult(passed, invalid_utterances, name=self.name(), info=info)", "docstring": "Perform the validation on the given corpus.\n\nArgs:\n corpus (Corpus): The corpus to test/validate.\n\nReturns:\n InvalidUtterancesResult: Validation result.", "source": "juraj_google_style"} -{"code": "def get_boards(board_name_list, *args, **kwargs):\n\n if isinstance(board_name_list, basestring):\n board_name_list = board_name_list.split()\n return [Board(name, *args, **kwargs) for name in board_name_list]", "docstring": "Given a list of boards, return :class:`basc_py4chan.Board` objects.\n\nArgs:\n board_name_list (list): List of board names to get, eg: ['b', 'tg']\n\nReturns:\n dict of :class:`basc_py4chan.Board`: Requested boards.", "source": "juraj_google_style"} -{"code": "def parse_simple_id(chrom, pos, ref, alt):\n\n return '_'.join([chrom, pos, ref, alt])", "docstring": "Parse the simple id for a variant\n\n Simple id is used as a human readable reference for a position, it is\n in no way unique.\n\nArgs:\n chrom(str)\n pos(str)\n ref(str)\n alt(str)\n\nReturns:\n simple_id(str): The simple human readable variant id", "source": "juraj_google_style"} -{"code": "def __squid_to_guid(self, squid):\n\n if not squid:\n return ''\n squid_match = self.__squid_pattern.match(squid)\n guid = ''\n if squid_match is not None:\n guid = '{' +\\\n squid_match.group(1)[::-1]+'-' +\\\n squid_match.group(2)[::-1]+'-' +\\\n squid_match.group(3)[::-1]+'-' +\\\n squid_match.group(4)[::-1]+squid_match.group(5)[::-1] + '-'\n for index in range(6, 12):\n guid += squid_match.group(index)[::-1]\n guid += '}'\n return guid", "docstring": "Squished GUID (SQUID) to GUID.\n\n A SQUID is a Squished/Compressed version of a GUID to use up less space\n in the registry.\n\nArgs:\n squid (str): Squished GUID.\n\nReturns:\n str: the GUID if a valid SQUID provided.", "source": "juraj_google_style"} -{"code": "def Overlay_setShowFPSCounter(self, show):\n\n\t\tassert isinstance(show, (bool,)\n\t\t ), \"Argument 'show' must be of type '['bool']'. Received type: '%s'\" % type(\n\t\t show)\n\t\tsubdom_funcs = self.synchronous_command('Overlay.setShowFPSCounter', show\n\t\t =show)\n\t\treturn subdom_funcs", "docstring": "Function path: Overlay.setShowFPSCounter\n Domain: Overlay\n Method name: setShowFPSCounter\n\n Parameters:\n Required arguments:\n 'show' (type: boolean) -> True for showing the FPS counter\n No return value.\n\n Description: Requests that backend shows the FPS counter", "source": "juraj_google_style"} -{"code": "def __init__(self, length=None, experimenter=None):\n\n super().__init__(action_type=ActionType.OFPAT_EXPERIMENTER)\n self.length = length\n self.experimenter = experimenter", "docstring": "Create ActionExperimenterHeader with the optional parameters below.\n\nArgs:\n experimenter (int): The experimenter field is the Experimenter ID,\n which takes the same form as in struct ofp_experimenter.", "source": "juraj_google_style"} -{"code": "def __strip_tags(self, node: yaml.Node) -> None:\n\n if isinstance(node, yaml.SequenceNode):\n for subnode in node.value:\n self.__strip_tags(subnode)\n elif isinstance(node, yaml.MappingNode):\n node.tag = 'tag:yaml.org,2002:map'\n for key_node, value_node in node.value:\n self.__strip_tags(key_node)\n self.__strip_tags(value_node)", "docstring": "Strips tags from mappings in the tree headed by node.\n\n This keeps yaml from constructing any objects in this tree.\n\nArgs:\n node: Head of the tree to strip", "source": "juraj_google_style"} -{"code": "def parseEquation(self, inp):\n\n inp = MathService._preprocess(inp)\n split = inp.split(' ')\n\n # Recursive call on unary operators\n for i, w in enumerate(split):\n if w in self.__unaryOperators__:\n op = self.__unaryOperators__[w]\n\n # Split equation into halves\n eq1 = ' '.join(split[:i])\n eq2 = ' '.join(split[i + 1:])\n\n # Calculate second half\n result = MathService._applyUnary(self.parseEquation(eq2), op)\n\n return self.parseEquation(eq1 + \" \" + str(result))\n\n def extractNumbersAndSymbols(inp):\n numbers = []\n symbols = []\n\n # Divide into values (numbers), operators (symbols)\n next_number = \"\"\n for w in inp.split(' '):\n if w in self.__binaryOperators__:\n symbols.append(self.__binaryOperators__[w])\n\n if next_number:\n numbers.append(next_number)\n next_number = \"\"\n\n else:\n if next_number:\n next_number += \" \"\n next_number += w\n\n if next_number:\n numbers.append(next_number)\n\n # Cast numbers from words to integers\n def convert(n):\n if n in self.__constants__:\n return self.__constants__[n]\n\n converter = NumberService()\n return converter.parse(n)\n\n numbers = [convert(n) for n in numbers]\n\n return numbers, symbols\n\n numbers, symbols = extractNumbersAndSymbols(inp)\n\n return MathService._calculate(numbers, symbols)", "docstring": "Solves the equation specified by the input string.\n\nArgs:\n inp (str): An equation, specified in words, containing some\n combination of numbers, binary, and unary operations.\n\nReturns:\n The floating-point result of carrying out the computation.", "source": "juraj_google_style"} -{"code": "def hint_for_accuracy(self, accuracy=\"normal\"):\n\n if not self.has_dojo_report:\n return Hint(ecut=0., pawecutdg=0.)\n\n # Get hints from dojoreport. Try first in hints then in ppgen_hints.\n if \"hints\" in self.dojo_report:\n return Hint.from_dict(self.dojo_report[\"hints\"][accuracy])\n elif \"ppgen_hints\" in self.dojo_report:\n return Hint.from_dict(self.dojo_report[\"ppgen_hints\"][accuracy])\n return Hint(ecut=0., pawecutdg=0.)", "docstring": "Returns a :class:`Hint` object with the suggested value of ecut [Ha] and\n pawecutdg [Ha] for the given accuracy.\n ecut and pawecutdg are set to zero if no hint is available.\n\nArgs:\n accuracy: [\"low\", \"normal\", \"high\"]", "source": "juraj_google_style"} -{"code": "def from_dict(cls, content):\n\n # Do we really need this constructor? Could use DidlObject(**content)\n # instead. -- We do now\n if 'resources' in content:\n content['resources'] = [DidlResource.from_dict(x)\n for x in content['resources']]\n return cls(**content)", "docstring": "Create an instance from a dict.\n\n An alternative constructor. Equivalent to ``DidlObject(**content)``.\n\nArgs:\n content (dict): a dict containing metadata information. Required.\n Valid keys are the same as the parameters for `DidlObject`.", "source": "juraj_google_style"} -{"code": "def ReadFrom(self, byte_stream):\n\n try:\n return self._struct.unpack_from(byte_stream)\n except (TypeError, struct.error) as exception:\n raise IOError('Unable to read byte stream with error: {0!s}'.format(\n exception))", "docstring": "Read values from a byte stream.\n\nArgs:\n byte_stream (bytes): byte stream.\n\nReturns:\n tuple[object, ...]: values copies from the byte stream.\n\nRaises:\n IOError: if byte stream cannot be read.\n OSError: if byte stream cannot be read.", "source": "juraj_google_style"} -{"code": "def verify_oauth2_token(id_token, request, audience=None):\n\n return verify_token(\n id_token, request, audience=audience,\n certs_url=_GOOGLE_OAUTH2_CERTS_URL)", "docstring": "Verifies an ID Token issued by Google's OAuth 2.0 authorization server.\n\nArgs:\n id_token (Union[str, bytes]): The encoded token.\n request (google.auth.transport.Request): The object used to make\n HTTP requests.\n audience (str): The audience that this token is intended for. This is\n typically your application's OAuth 2.0 client ID. If None then the\n audience is not verified.\n\nReturns:\n Mapping[str, Any]: The decoded token.", "source": "juraj_google_style"} -{"code": "def __init__(self,\n unique_identifier=None):\n\n super(RevokeResponsePayload, self).__init__(\n tag=enums.Tags.RESPONSE_PAYLOAD)\n if unique_identifier is None:\n self.unique_identifier = attributes.UniqueIdentifier()\n else:\n self.unique_identifier = unique_identifier\n self.validate()", "docstring": "Construct a RevokeResponsePayload object.\n\nArgs:\n unique_identifier (UniqueIdentifier): The UUID of a managed\n cryptographic object.", "source": "juraj_google_style"} -{"code": "def show_bokehjs(bokehjs_action, develop=False):\n\n print()\n if develop:\n print(\"Installed Bokeh for DEVELOPMENT:\")\n else:\n print(\"Installed Bokeh:\")\n if bokehjs_action in ['built', 'installed']:\n print(\" - using %s built BokehJS from bokehjs/build\\n\" % (bright(yellow(\"NEWLY\")) if bokehjs_action=='built' else bright(yellow(\"PREVIOUSLY\"))))\n else:\n print(\" - using %s BokehJS, located in 'bokeh.server.static'\\n\" % bright(yellow(\"PACKAGED\")))\n print()", "docstring": "Print a useful report after setuptools output describing where and how\n BokehJS is installed.\n\nArgs:\n bokehjs_action (str) : one of 'built', 'installed', or 'packaged'\n how (or if) BokehJS was installed into the python source tree\n\n develop (bool, optional) :\n whether the command was for \"develop\" mode (default: False)\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def field_to_markdown(field):\n\n if \"title\" in field:\n field_title = \"**{}**\".format(field[\"title\"])\n else:\n raise Exception(\"Es necesario un `title` para describir un campo.\")\n\n field_type = \" ({})\".format(field[\"type\"]) if \"type\" in field else \"\"\n field_desc = \": {}\".format(\n field[\"description\"]) if \"description\" in field else \"\"\n\n text_template = \"{title}{type}{description}\"\n text = text_template.format(title=field_title, type=field_type,\n description=field_desc)\n\n return text", "docstring": "Genera texto en markdown a partir de los metadatos de un `field`.\n\nArgs:\n field (dict): Diccionario con metadatos de un `field`.\n\nReturns:\n str: Texto que describe un `field`.", "source": "juraj_google_style"} -{"code": "def _CopyFromDateTimeValues(self, date_time_values):\n\n year = date_time_values.get('year', 0)\n month = date_time_values.get('month', 0)\n day_of_month = date_time_values.get('day_of_month', 0)\n hours = date_time_values.get('hours', 0)\n minutes = date_time_values.get('minutes', 0)\n seconds = date_time_values.get('seconds', 0)\n\n self._normalized_timestamp = None\n self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n year, month, day_of_month, hours, minutes, seconds)\n self._time_elements_tuple = (\n year, month, day_of_month, hours, minutes, seconds)\n\n self.is_local_time = False", "docstring": "Copies time elements from date and time values.\n\nArgs:\n date_time_values (dict[str, int]): date and time values, such as year,\n month, day of month, hours, minutes, seconds, microseconds.", "source": "juraj_google_style"} -{"code": "def GetSubkeyByName(self, name):\n\n pyregf_key = self._pyregf_key.get_sub_key_by_name(name)\n if not pyregf_key:\n return None\n\n key_path = key_paths.JoinKeyPath([self._key_path, pyregf_key.name])\n return REGFWinRegistryKey(pyregf_key, key_path=key_path)", "docstring": "Retrieves a subkey by name.\n\nArgs:\n name (str): name of the subkey.\n\nReturns:\n WinRegistryKey: Windows Registry subkey or None if not found.", "source": "juraj_google_style"} -{"code": "def tracer_diffusion_coefficient( self ):\n\n if self.has_run:\n return self.atoms.sum_dr_squared() / ( 6.0 * float( self.number_of_atoms ) * self.lattice.time )\n else:\n return None", "docstring": "Tracer diffusion coefficient, D*.\n\nArgs:\n None\n\nReturns:\n (Float): The tracer diffusion coefficient, D*.", "source": "juraj_google_style"} -{"code": "def time_stats(self, **kwargs):\n\n # Use the existing time_stats attribute if it exist, otherwise make an\n # API call\n if 'time_stats' in self.attributes:\n return self.attributes['time_stats']\n\n path = '%s/%s/time_stats' % (self.manager.path, self.get_id())\n return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Get time stats for the object.\n\nArgs:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabTimeTrackingError: If the time tracking update cannot be done", "source": "juraj_google_style"} -{"code": "def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False):\n\n if unique_matches:\n pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches)\n else:\n pairings = self.track_matcher.neighbor_matches(model_tracks, obs_tracks)\n return pairings", "docstring": "Match forecast and observed tracks.\n\nArgs:\n model_tracks:\n obs_tracks:\n unique_matches:\n closest_matches:\n\n Returns:", "source": "juraj_google_style"} -{"code": "def potential_purviews(self, direction, mechanism):\n\n all_purviews = utils.powerset(self._node_indices)\n return irreducible_purviews(self.cm, direction, mechanism,\n all_purviews)", "docstring": "All purviews which are not clearly reducible for mechanism.\n\nArgs:\n direction (Direction): |CAUSE| or |EFFECT|.\n mechanism (tuple[int]): The mechanism which all purviews are\n checked for reducibility over.\n\nReturns:\n list[tuple[int]]: All purviews which are irreducible over\n ``mechanism``.", "source": "juraj_google_style"} -{"code": "def fetch_or_load(spec_path):\n\n\n headers = {}\n\n try:\n modified = datetime.utcfromtimestamp(os.path.getmtime(spec_path))\n date = modified.strftime(\"%a, %d %b %Y %I:%M:%S UTC\")\n headers[\"If-Modified-Since\"] = date\n except OSError as error:\n if error.errno != errno.ENOENT:\n raise\n\n request = urllib.Request(VALIDATION_SPEC, headers=headers)\n response = urllib.urlopen(request, cafile=certifi.where())\n\n if response.code == 200:\n with open(spec_path, \"w+b\") as spec:\n spec.writelines(response)\n spec.seek(0)\n return html.parse(spec)\n\n with open(spec_path) as spec:\n return html.parse(spec)", "docstring": "Fetch a new specification or use the cache if it's current.\n\nArgs:\n cache_path:\n\n the path to a cached specification", "source": "juraj_google_style"} -{"code": "def convert_elementwise_sub(\n params, w_name, scope_name, inputs, layers, weights, names\n):\n\n print('Converting elementwise_sub ...')\n model0 = layers[inputs[0]]\n model1 = layers[inputs[1]]\n\n if names == 'short':\n tf_name = 'S' + random_string(7)\n elif names == 'keep':\n tf_name = w_name\n else:\n tf_name = w_name + str(random.random())\n\n sub = keras.layers.Subtract(name=tf_name)\n layers[scope_name] = sub([model0, model1])", "docstring": "Convert elementwise subtraction.\n\nArgs:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers", "source": "juraj_google_style"} -{"code": "def make_qs(n, m=None):\n\n try:\n import sympy\n except ImportError:\n raise ImportError(\"This function requires sympy. Please install it.\")\n if m is None:\n syms = sympy.symbols(\" \".join(f\"q{i}\" for i in range(n)))\n if isinstance(syms, tuple):\n return syms\n else:\n return (syms,)\n syms = sympy.symbols(\" \".join(f\"q{i}\" for i in range(n, m)))\n if isinstance(syms, tuple):\n return syms\n else:\n return (syms,)", "docstring": "Make sympy symbols q0, q1, ...\n\nArgs:\n n(int), m(int, optional):\n If specified both n and m, returns [qn, q(n+1), ..., qm],\n Only n is specified, returns[q0, q1, ..., qn].\n\nReturns:\n tuple(Symbol): Tuple of sympy symbols.", "source": "juraj_google_style"} -{"code": "def to_representation(self, instance):\n\n if self.id_only():\n return instance.pk\n\n pk = getattr(instance, 'pk', None)\n if not settings.ENABLE_SERIALIZER_OBJECT_CACHE or pk is None:\n return self._to_representation(instance)\n else:\n if pk not in self.obj_cache:\n self.obj_cache[pk] = self._to_representation(instance)\n return self.obj_cache[pk]", "docstring": "Modified to_representation method. Optionally may cache objects.\n\nArgs:\n instance: A model instance or data object.\n\nReturns:\n Instance ID if the serializer is meant to represent its ID.\n Otherwise, a tagged data dict representation.", "source": "juraj_google_style"} -{"code": "def _MakeCacheInvariant(self, urn, age):\n\n precondition.AssertType(urn, Text)\n return \"%s:%s\" % (urn, self.ParseAgeSpecification(age))", "docstring": "Returns an invariant key for an AFF4 object.\n\n The object will be cached based on this key. This function is specifically\n extracted to ensure that we encapsulate all security critical aspects of the\n AFF4 object so that objects do not leak across security boundaries.\n\nArgs:\n urn: The urn of the object.\n age: The age policy used to build this object. Should be one of\n ALL_TIMES, NEWEST_TIME or a range.\n\nReturns:\n A key into the cache.", "source": "juraj_google_style"} -{"code": "def chat_meMessage(self, *, channel: str, text: str, **kwargs) -> SlackResponse:\n\n kwargs.update({\"channel\": channel, \"text\": text})\n return self.api_call(\"chat.meMessage\", json=kwargs)", "docstring": "Share a me message into a channel.\n\nArgs:\n channel (str): The channel id. e.g. 'C1234567890'\n text (str): The message you'd like to share. e.g. 'Hello world'", "source": "juraj_google_style"} -{"code": "def new(self, file_type):\n # type: (str) -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('UDF ICB Tag already initialized')\n\n self.prior_num_direct_entries = 0 # FIXME: let the user set this\n self.strategy_type = 4\n self.strategy_param = 0 # FIXME: let the user set this\n self.max_num_entries = 1\n if file_type == 'dir':\n self.file_type = 4\n elif file_type == 'file':\n self.file_type = 5\n elif file_type == 'symlink':\n self.file_type = 12\n else:\n raise pycdlibexception.PyCdlibInternalError(\"Invalid file type for ICB; must be one of 'dir', 'file', or 'symlink'\")\n\n self.parent_icb_log_block_num = 0 # FIXME: let the user set this\n self.parent_icb_part_ref_num = 0 # FIXME: let the user set this\n self.flags = 560 # hex 0x230 == binary 0010 0011 0000\n\n self._initialized = True", "docstring": "A method to create a new UDF ICB Tag.\n\n Parameters:\n file_type - What file type this represents, one of 'dir', 'file', or 'symlink'.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def run(self, dag):\n\n if self.layout is None:\n if self.property_set[\"layout\"]:\n self.layout = self.property_set[\"layout\"]\n else:\n self.layout = Layout.generate_trivial_layout(*dag.qregs.values())\n\n self.property_set['is_swap_mapped'] = True\n\n for gate in dag.twoQ_gates():\n physical_q0 = self.layout[gate.qargs[0]]\n physical_q1 = self.layout[gate.qargs[1]]\n\n if self.coupling_map.distance(physical_q0, physical_q1) != 1:\n self.property_set['is_swap_mapped'] = False\n return", "docstring": "If `dag` is mapped to `coupling_map`, the property\n `is_swap_mapped` is set to True (or to False otherwise).\n\nArgs:\n dag (DAGCircuit): DAG to map.", "source": "juraj_google_style"} -{"code": "def create_function(self, vpc_config):\n\n zip_file = 'lambda-holder.zip'\n with zipfile.ZipFile(zip_file, mode='w') as zipped:\n zipped.writestr('index.py', 'print \"Hello world\"')\n\n contents = ''\n with open('lambda-holder.zip', 'rb') as openfile:\n contents = openfile.read()\n\n LOG.info('Creating lambda function: %s', self.app_name)\n\n try:\n self.lambda_client.create_function(\n Environment=self.lambda_environment,\n FunctionName=self.app_name,\n Runtime=self.runtime,\n Role=self.role_arn,\n Handler=self.handler,\n Code={'ZipFile': contents},\n Description=self.description,\n Timeout=int(self.timeout),\n MemorySize=int(self.memory),\n Publish=False,\n VpcConfig=vpc_config,\n Tags={'app_group': self.group,\n 'app_name': self.app_name})\n except boto3.exceptions.botocore.exceptions.ClientError as error:\n if 'CreateNetworkInterface' in error.response['Error']['Message']:\n message = '{0} is missing \"ec2:CreateNetworkInterface\"'.format(self.role_arn)\n LOG.critical(message)\n raise SystemExit(message)\n\n raise\n\n LOG.info(\"Successfully created Lambda function and alias\")", "docstring": "Create lambda function, configures lambda parameters.\n\n We need to upload non-zero zip when creating function. Uploading\n hello_world python lambda function since AWS doesn't care which\n executable is in ZIP.\n\nArgs:\n vpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using\n a VPC in lambda", "source": "juraj_google_style"} -{"code": "def _take_screenshot(self, screenshot=False, name_prefix='unknown'):\n\n if isinstance(screenshot, bool):\n if not screenshot:\n return\n return self._save_screenshot(name_prefix=name_prefix)\n if isinstance(screenshot, Image.Image):\n return self._save_screenshot(screen=screenshot, name_prefix=name_prefix)\n\n raise TypeError(\"invalid type for func _take_screenshot: \"+ type(screenshot))", "docstring": "This is different from _save_screenshot.\n The return value maybe None or the screenshot path\n\nArgs:\n screenshot: bool or PIL image", "source": "juraj_google_style"} -{"code": "def _tensor_product(self, other, reverse=False):\n\n # Convert other to Stinespring\n if not isinstance(other, Stinespring):\n other = Stinespring(other)\n\n # Tensor stinespring ops\n sa_l, sa_r = self._data\n sb_l, sb_r = other._data\n\n # Reshuffle tensor dimensions\n din_a, dout_a = self.dim\n din_b, dout_b = other.dim\n dtr_a = sa_l.shape[0] // dout_a\n dtr_b = sb_l.shape[0] // dout_b\n if reverse:\n shape_in = (dout_b, dtr_b, dout_a, dtr_a, din_b * din_a)\n shape_out = (dout_b * dtr_b * dout_a * dtr_a, din_b * din_a)\n else:\n shape_in = (dout_a, dtr_a, dout_b, dtr_b, din_a * din_b)\n shape_out = (dout_a * dtr_a * dout_b * dtr_b, din_a * din_b)\n\n # Compute left stinepsring op\n if reverse:\n input_dims = self.input_dims() + other.input_dims()\n output_dims = self.output_dims() + other.output_dims()\n sab_l = np.kron(sb_l, sa_l)\n else:\n input_dims = other.input_dims() + self.input_dims()\n output_dims = other.output_dims() + self.output_dims()\n sab_l = np.kron(sa_l, sb_l)\n # Reravel indicies\n sab_l = np.reshape(\n np.transpose(np.reshape(sab_l, shape_in), (0, 2, 1, 3, 4)),\n shape_out)\n\n # Compute right stinespring op\n if sa_r is None and sb_r is None:\n sab_r = None\n else:\n if sa_r is None:\n sa_r = sa_l\n elif sb_r is None:\n sb_r = sb_l\n if reverse:\n sab_r = np.kron(sb_r, sa_r)\n else:\n sab_r = np.kron(sa_r, sb_r)\n # Reravel indicies\n sab_r = np.reshape(\n np.transpose(np.reshape(sab_r, shape_in), (0, 2, 1, 3, 4)),\n shape_out)\n return Stinespring((sab_l, sab_r), input_dims, output_dims)", "docstring": "Return the tensor product channel.\n\nArgs:\n other (QuantumChannel): a quantum channel subclass.\n reverse (bool): If False return self ⊗ other, if True return\n if True return (other ⊗ self) [Default: False]\n\nReturns:\n Stinespring: the tensor product channel as a Stinespring object.\n\nRaises:\n QiskitError: if other cannot be converted to a channel.", "source": "juraj_google_style"} -{"code": "def expand_benchmark_name(bm_name, bench_groups):\n\n expansion = bench_groups.get(bm_name)\n if expansion:\n for name in expansion:\n for name in expand_benchmark_name(name, bench_groups):\n yield name\n else:\n yield bm_name", "docstring": "Recursively expand name benchmark names.\n\nArgs:\n bm_name: string naming a benchmark or benchmark group.\n\nYields:\n Names of actual benchmarks, with all group names fully expanded.", "source": "juraj_google_style"} -{"code": "def run(self, sensor_graph, model):\n\n\n # This check can be done if there is 1 input and it is count == 1\n # and the stream type is input or unbuffered\n\n did_downgrade = False\n\n for node, inputs, _outputs in sensor_graph.iterate_bfs():\n can_downgrade = False\n\n if node.func_name != u'copy_all_a':\n continue\n\n input_a, trigger_a = node.inputs[0]\n\n # We can always downgrade unbuffered non-counter\n if input_a.selector.match_type in (DataStream.InputType, DataStream.UnbufferedType):\n can_downgrade = True\n elif isinstance(trigger_a, InputTrigger) and trigger_a.comp_string == u'==' and trigger_a.use_count and trigger_a.reference == 1:\n can_downgrade = True\n elif isinstance(trigger_a, TrueTrigger) and not input_a.selector.buffered:\n # We can only downgrade an always trigger if we know the node before this will not\n # generate more than one reading at a time. Since in that case, we get readings one at a\n # time and we don't accumulate them so there will be at most one reading to copy.\n # So, we need to check the node that produces input A to this node and see if we can\n # prove that it will produce at most one reading at a time.\n can_downgrade = True\n for in_node in inputs:\n if input_a.matches(in_node.stream) and in_node.func_name == u'copy_all_a' and in_node.input_a.match_type not in (DataStream.InputType, DataStream.UnbufferedType):\n can_downgrade = False\n break\n\n if can_downgrade:\n did_downgrade = True\n node.set_func(u'copy_latest_a', sensor_graph.find_processing_function(u'copy_latest_a'))\n\n return did_downgrade", "docstring": "Run this optimization pass on the sensor graph\n\n If necessary, information on the device model being targeted\n can be found in the associated model argument.\n\nArgs:\n sensor_graph (SensorGraph): The sensor graph to optimize\n model (DeviceModel): The device model we're using", "source": "juraj_google_style"} -{"code": "def autocorr(x):\n\n\n y = x - x.mean()\n n = len(y)\n result = fftconvolve(y, y[::-1])\n acorr = result[len(result) // 2:]\n acorr /= np.arange(n, 0, -1)\n acorr /= acorr[0]\n return acorr", "docstring": "Compute autocorrelation using FFT for every lag for the input array\n https://en.wikipedia.org/wiki/Autocorrelation#Efficient_computation.\n\nArgs:\n x (array-like): An array containing MCMC samples.\n\nReturns:\n np.ndarray: An array of the same size as the input array.", "source": "juraj_google_style"} -{"code": "def to_query(self, fields=None):\n\n # Do import here to avoid top-level circular dependencies.\n from . import _query\n if fields is None:\n fields = '*'\n elif isinstance(fields, list):\n fields = ','.join(fields)\n return _query.Query('SELECT %s FROM %s' % (fields, self._repr_sql_()), context=self._context)", "docstring": "Return a Query for this Table.\n\nArgs:\n fields: the fields to return. If None, all fields will be returned. This can be a string\n which will be injected into the Query after SELECT, or a list of field names.\n\nReturns:\n A Query object that will return the specified fields from the records in the Table.", "source": "juraj_google_style"} -{"code": "def parse_relations(\n belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors\n) -> Tuple[Parsed, Errors]:\n\n quotes = char_locs[\"quotes\"]\n quoted_range = set([i for start, end in quotes.items() for i in range(start, end)])\n\n for match in relations_pattern_middle.finditer(belstr):\n (start, end) = match.span(1)\n # log.debug(f'Relation-middle {match}')\n end = end - 1 # adjust end to match actual end character index\n if start != end:\n test_range = set(range(start, end))\n else:\n test_range = set(start)\n\n # Skip if relation overlaps with quoted string\n if test_range.intersection(quoted_range):\n continue\n\n span_key = (start, end)\n parsed[span_key] = {\n \"type\": \"Relation\",\n \"name\": match.group(1),\n \"span\": (start, end),\n }\n\n for match in relations_pattern_end.finditer(belstr):\n (start, end) = match.span(1)\n log.debug(f\"Relation-end {match}\")\n end = end - 1 # adjust end to match actual end character index\n if start != end:\n test_range = set(range(start, end))\n else:\n test_range = set(start)\n\n # Skip if relation overlaps with quoted string\n if test_range.intersection(quoted_range):\n continue\n\n span_key = (start, end)\n parsed[span_key] = {\n \"type\": \"Relation\",\n \"name\": match.group(1),\n \"span\": (start, end),\n }\n\n return parsed, errors", "docstring": "Parse relations from BEL string\n\nArgs:\n belstr: BEL string as one single string (not list of chars)\n char_locs: paren, comma and quote char locations\n parsed: data structure for parsed functions, relations, nested\n errors: error messages\n\nReturns:\n (parsed, errors):", "source": "juraj_google_style"} -{"code": "def fast_roc(actuals, controls):\n\n assert(type(actuals) is np.ndarray)\n assert(type(controls) is np.ndarray)\n\n actuals = np.ravel(actuals)\n controls = np.ravel(controls)\n if np.isnan(actuals).any():\n raise RuntimeError('NaN found in actuals')\n if np.isnan(controls).any():\n raise RuntimeError('NaN found in controls')\n\n thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])[::-1]\n true_pos_rate = np.empty(thresholds.size)\n false_pos_rate = np.empty(thresholds.size)\n num_act = float(len(actuals))\n num_ctr = float(len(controls))\n\n for i, value in enumerate(thresholds):\n true_pos_rate[i] = (actuals >= value).sum() / num_act\n false_pos_rate[i] = (controls >= value).sum() / num_ctr\n auc = np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1])\n # treat cases where TPR of one is not reached before FPR of one\n # by using trapezoidal integration for the last segment\n # (add the missing triangle)\n if false_pos_rate[-2] == 1:\n auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3]))\n return (auc, true_pos_rate, false_pos_rate)", "docstring": "approximates the area under the roc curve for sets of actuals and controls.\n Uses all values appearing in actuals as thresholds and lower sum\n interpolation. Also returns arrays of the true positive rate and the false\n positive rate that can be used for plotting the roc curve.\n\n Parameters:\n actuals : list\n A list of numeric values for positive observations.\n controls : list\n A list of numeric values for negative observations.", "source": "juraj_google_style"} -{"code": "def update_factor_providers(self, factor_name, name, body):\n\n url = self._url('factors/{}/providers/{}'.format(factor_name, name))\n return self.client.put(url, data=body)", "docstring": "Get Guardian factor providers.\n\n Returns provider configuration\n\nArgs:\n factor_name (str): Either push-notification or sms\n name (str): Name of the provider\n\n body (dict):\n See: https://auth0.com/docs/api/management/v2#!/Guardian/put_twilio", "source": "juraj_google_style"} -{"code": "def sample(self, features):\n\n logits, losses = self(features) # pylint: disable=not-callable\n if self._target_modality_is_real:\n return logits, logits, losses # Raw numbers returned from real modality.\n if self.hparams.sampling_method == \"argmax\":\n samples = tf.argmax(logits, axis=-1)\n else:\n assert self.hparams.sampling_method == \"random\"\n\n def multinomial_squeeze(logits, temperature=1.0):\n logits_shape = common_layers.shape_list(logits)\n reshaped_logits = (\n tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)\n choices = tf.multinomial(reshaped_logits, 1)\n choices = tf.reshape(choices, logits_shape[:-1])\n return choices\n\n samples = multinomial_squeeze(logits, self.hparams.sampling_temp)\n\n return samples, logits, losses", "docstring": "Run the model and extract samples.\n\nArgs:\n features: an map of string to `Tensor`.\n\nReturns:\n samples: an integer `Tensor`.\n logits: a list of `Tensor`s, one per datashard.\n losses: a dictionary: {loss-name (string): floating point `Scalar`}.", "source": "juraj_google_style"} -{"code": "def GetSubkeyByIndex(self, index):\n\n if not self._registry_key and self._registry:\n self._GetKeyFromRegistry()\n\n subkeys = list(self._subkeys.values())\n\n if index < 0 or index >= len(subkeys):\n raise IndexError('Index out of bounds.')\n\n return subkeys[index]", "docstring": "Retrieves a subkey by index.\n\nArgs:\n index (int): index of the subkey.\n\nReturns:\n WinRegistryKey: Windows Registry subkey or None if not found.\n\nRaises:\n IndexError: if the index is out of bounds.", "source": "juraj_google_style"} -{"code": "def cancel_id(cls, id):\n\n conn = Qubole.agent()\n data = {\"status\": \"kill\"}\n return conn.put(cls.element_path(id), data)", "docstring": "Cancels command denoted by this id\n\nArgs:\n `id`: command id", "source": "juraj_google_style"} -{"code": "def delete_user(self, user):\n\n self.project_service.set_auth(self._token_project)\n self.project_service.delete_user(user)", "docstring": "Delete the given user.\n\nArgs:\n user (string): User name.\n\nRaises:\n requests.HTTPError on failure.", "source": "juraj_google_style"} -{"code": "def BuildArtifactsRegistry(\n cls, artifact_definitions_path, custom_artifacts_path):\n\n if artifact_definitions_path and not os.path.isdir(\n artifact_definitions_path):\n raise errors.BadConfigOption(\n 'No such artifacts filter file: {0:s}.'.format(\n artifact_definitions_path))\n\n if custom_artifacts_path and not os.path.isfile(custom_artifacts_path):\n raise errors.BadConfigOption(\n 'No such artifacts filter file: {0:s}.'.format(custom_artifacts_path))\n\n registry = artifacts_registry.ArtifactDefinitionsRegistry()\n reader = artifacts_reader.YamlArtifactsReader()\n\n try:\n registry.ReadFromDirectory(reader, artifact_definitions_path)\n\n except (KeyError, artifacts_errors.FormatError) as exception:\n raise errors.BadConfigOption((\n 'Unable to read artifact definitions from: {0:s} with error: '\n '{1!s}').format(artifact_definitions_path, exception))\n\n if custom_artifacts_path:\n try:\n registry.ReadFromFile(reader, custom_artifacts_path)\n\n except (KeyError, artifacts_errors.FormatError) as exception:\n raise errors.BadConfigOption((\n 'Unable to read artifact definitions from: {0:s} with error: '\n '{1!s}').format(custom_artifacts_path, exception))\n\n return registry", "docstring": "Build Find Specs from artifacts or filter file if available.\n\nArgs:\n artifact_definitions_path (str): path to artifact definitions file.\n custom_artifacts_path (str): path to custom artifact definitions file.\n\nReturns:\n artifacts.ArtifactDefinitionsRegistry: artifact definitions registry.\n\nRaises:\n RuntimeError: if no valid FindSpecs are built.", "source": "juraj_google_style"} -{"code": "def _ProduceSingleContent(self, mod, showprivate=False, showinh=False):\n\n try:\n all = mod[1].__all__\n except AttributeError:\n raise RuntimeError('Module (%s) MUST have `__all__` defined.' % mod[1].__name__)\n try:\n name = mod[1].__displayname__\n except AttributeError:\n name = mod[0]\n try:\n category = mod[1].__category__\n self.__categories.setdefault(category, 0)\n self.__categories[category] += 1\n except AttributeError:\n pass\n feats = inspect.getmembers(mod[1])\n fname = 'content/' + mod[1].__name__.replace('.', '/').replace(' ', '-')+'.rst'\n feats = [f for f in feats if f[0] in all and (showprivate or not f[0][0:1] == '_')]\n with open(fname, 'w') as fid:\n fid.write(Classifier.GetModuleText(name, mod[1].__name__, showprivate=showprivate))\n\n for f in feats:\n # Check for a __displayname__\n if inspect.isclass(f[1]) or inspect.isfunction(f[1]):\n try:\n featname = f[1].__displayname__\n except AttributeError:\n featname = f[1].__name__\n try:\n category = f[1].__category__\n self.__categories.setdefault(category, 0)\n self.__categories[category] += 1\n except AttributeError:\n pass\n # Make the auto doc rst\n if inspect.isclass(f[1]):\n fid.write(Classifier.GetClassText(featname, '%s.%s' % (mod[1].__name__, f[1].__name__), showprivate=showprivate, showinh=showinh))\n elif inspect.isfunction(f[1]):\n fid.write(Classifier.GetFunctionText(featname, '%s.%s' % (mod[1].__name__, f[1].__name__)))\n\n fid.close()\n return '\\n %s' % (fname.split('/')[-1])", "docstring": "An internal helper to create a page for a single module. This will\n automatically generate the needed RSF to document the module\n and save the module to its own page in its appropriate location.\n\nArgs:\n mod (module): The single module to document as its own page\n showprivate (bool): A flag for whether or not to display private members\n\nReturns:\n str: The file name ready to be appended to a toctree", "source": "juraj_google_style"} -{"code": "def __init__(self, predicate, if_true, if_false):\n\n super(TernaryConditional, self).__init__(predicate, if_true, if_false)\n self.predicate = predicate\n self.if_true = if_true\n self.if_false = if_false\n self.validate()", "docstring": "Construct an expression that evaluates a predicate and returns one of two results.\n\nArgs:\n predicate: Expression to evaluate, and based on which to choose the returned value\n if_true: Expression to return if the predicate was true\n if_false: Expression to return if the predicate was false\n\nReturns:\n new TernaryConditional object", "source": "juraj_google_style"} -{"code": "def labels2onehot(labels: [List[str], List[List[str]], np.ndarray], classes: [list, np.ndarray]) -> np.ndarray:\n\n n_classes = len(classes)\n y = []\n for sample in labels:\n curr = np.zeros(n_classes)\n if isinstance(sample, list):\n for intent in sample:\n if intent not in classes:\n log.warning('Unknown intent {} detected. Assigning no class'.format(intent))\n else:\n curr[np.where(np.array(classes) == intent)[0]] = 1\n else:\n curr[np.where(np.array(classes) == sample)[0]] = 1\n y.append(curr)\n y = np.asarray(y)\n return y", "docstring": "Convert labels to one-hot vectors for multi-class multi-label classification\n\nArgs:\n labels: list of samples where each sample is a class or a list of classes which sample belongs with\n classes: array of classes' names\n\nReturns:\n 2d array with one-hot representation of given samples", "source": "juraj_google_style"} -{"code": "def calculate(self, token_list_x, token_list_y):\n\n match_list = [tanimoto_value for tanimoto_value in token_list_x if tanimoto_value in token_list_y]\n return float(len(match_list) / (len(token_list_x) + len(token_list_y) - len(match_list)))", "docstring": "Calculate similarity with the Tanimoto coefficient.\n\n Concrete method.\n\nArgs:\n token_list_x: [token, token, token, ...]\n token_list_y: [token, token, token, ...]\n\nReturns:\n Similarity.", "source": "juraj_google_style"} -{"code": "def test(verbosity=1): # pragma: no cover\n\n import unittest\n from .tests import test_suite\n\n # Testing\n unittest.TextTestRunner(verbosity=verbosity).run(test_suite)", "docstring": "Executes all the tests for pyplink.\n\nArgs:\n verbosity (int): The verbosity level for :py:mod:`unittest`.\n\n Just set ``verbosity`` to an integer higher than ``1`` to have more\n information about the tests.", "source": "juraj_google_style"} -{"code": "def signature_cert_chain_url(url):\n\n r = urlparse(url)\n if not r.scheme.lower() == 'https':\n warnings.warn('Certificate URL scheme is invalid.')\n return False\n if not r.hostname.lower() == 's3.amazonaws.com':\n warnings.warn('Certificate URL hostname is invalid.')\n return False\n if not os.path.normpath(r.path).startswith('/echo.api/'):\n warnings.warn('Certificate URL path is invalid.')\n return False\n if r.port and not r.port == 443:\n warnings.warn('Certificate URL port is invalid.')\n return False\n return True", "docstring": "Validate URL specified by SignatureCertChainUrl.\n\n See `validate.request` for additional info.\n\nArgs:\n url: str. SignatureCertChainUrl header value sent by request.\n\nReturns:\n bool: True if valid, False otherwise.", "source": "juraj_google_style"} -{"code": "def validateLogicalInterfaceConfiguration(self, logicalInterfaceId):\n\n req = ApiClient.oneLogicalInterfaceUrl % (self.host, \"/draft\", logicalInterfaceId)\n body = {\"operation\" : \"validate-configuration\"}\n resp = requests.patch(req, auth=self.credentials, headers={\"Content-Type\":\"application/json\"}, data=json.dumps(body),\n verify=self.verify)\n if resp.status_code == 200:\n self.logger.debug(\"Validation for logical interface configuration succeeded\")\n else:\n raise ibmiotf.APIException(resp.status_code, \"Validation for logical interface configuration failed\", resp)\n return resp.json()", "docstring": "Validate the logical interface configuration.\n Parameters:\n - logicalInterfaceId (string)\n Throws APIException on failure.", "source": "juraj_google_style"} -{"code": "def copy_file_links(self, src):\n # type: (RockRidge) -> None\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized')\n\n # First, get the src data\n if src.dr_entries.px_record is None:\n if src.ce_entries.px_record is None:\n raise pycdlibexception.PyCdlibInvalidInput('No Rock Ridge file links')\n num_links = src.ce_entries.px_record.posix_file_links\n else:\n num_links = src.dr_entries.px_record.posix_file_links\n\n # Now apply it to this record.\n if self.dr_entries.px_record is None:\n if self.ce_entries.px_record is None:\n raise pycdlibexception.PyCdlibInvalidInput('No Rock Ridge file links')\n self.ce_entries.px_record.posix_file_links = num_links\n else:\n self.dr_entries.px_record.posix_file_links = num_links", "docstring": "Copy the number of file links from the source Rock Ridge entry into\n this Rock Ridge entry.\n\n Parameters:\n src - The source Rock Ridge entry to copy from.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def reconstruct_hostname(self, port_override=None):\n\n url = self.server\n port = port_override or self.port\n if port and ((self.url_scheme == 'https' and str(port) != '443') or\n (self.url_scheme != 'https' and str(port) != '80')):\n url += ':{0}'.format(port)\n\n return url", "docstring": "Reconstruct the hostname of a request.\n\n This is based on the URL reconstruction code in Python PEP 333:\n http://www.python.org/dev/peps/pep-0333/#url-reconstruction. Rebuild the\n hostname from the pieces available in the environment.\n\nArgs:\n port_override: str, An override for the port on the returned hostname.\n\nReturns:\n The hostname portion of the URL from the request, not including the\n URL scheme.", "source": "juraj_google_style"} -{"code": "def write_summaries(self, tagged_data, experiment_name, run_name):\n\n logger.debug('Writing summaries for %s tags', len(tagged_data))\n # Connection used as context manager for auto commit/rollback on exit.\n # We still need an explicit BEGIN, because it doesn't do one on enter,\n # it waits until the first DML command - which is totally broken.\n # See: https://stackoverflow.com/a/44448465/1179226\n with self._db:\n self._db.execute('BEGIN TRANSACTION')\n run_id = self._maybe_init_run(experiment_name, run_name)\n tag_to_metadata = {\n tag: tagdata.metadata for tag, tagdata in six.iteritems(tagged_data)\n }\n tag_to_id = self._maybe_init_tags(run_id, tag_to_metadata)\n tensor_values = []\n for tag, tagdata in six.iteritems(tagged_data):\n tag_id = tag_to_id[tag]\n for step, wall_time, tensor_proto in tagdata.values:\n dtype = tensor_proto.dtype\n shape = ','.join(str(d.size) for d in tensor_proto.tensor_shape.dim)\n # Use tensor_proto.tensor_content if it's set, to skip relatively\n # expensive extraction into intermediate ndarray.\n data = self._make_blob(\n tensor_proto.tensor_content or\n tensor_util.make_ndarray(tensor_proto).tobytes())\n tensor_values.append((tag_id, step, wall_time, dtype, shape, data))\n self._db.executemany(\n ,\n tensor_values)", "docstring": "Transactionally writes the given tagged summary data to the DB.\n\nArgs:\n tagged_data: map from tag to TagData instances.\n experiment_name: name of experiment.\n run_name: name of run.", "source": "juraj_google_style"} -{"code": "def raise_for_status(\n status: int, headers: MutableMapping, data: MutableMapping\n) -> None:\n\n if status != 200:\n if status == 429:\n\n if isinstance(data, str):\n error = data\n else:\n error = data.get(\"error\", \"ratelimited\")\n\n try:\n retry_after = int(headers.get(\"Retry-After\", 1))\n except ValueError:\n retry_after = 1\n raise exceptions.RateLimited(retry_after, error, status, headers, data)\n else:\n raise exceptions.HTTPException(status, headers, data)", "docstring": "Check request response status\n\nArgs:\n status: Response status\n headers: Response headers\n data: Response data\n\nRaises:\n :class:`slack.exceptions.RateLimited`: For 429 status code\n :class:`slack.exceptions:HTTPException`:", "source": "juraj_google_style"} -{"code": "def _get_populate_values(self, instance) -> Tuple[str, str]:\n\n\n return [\n (\n lang_code,\n self._get_populate_from_value(\n instance,\n self.populate_from,\n lang_code\n ),\n )\n for lang_code, _ in settings.LANGUAGES\n ]", "docstring": "Gets all values (for each language) from the\n specified's instance's `populate_from` field.\n\nArgs:\n instance:\n The instance to get the values from.\n\nReturns:\n A list of (lang_code, value) tuples.", "source": "juraj_google_style"} -{"code": "def __init__(self, filename, filename_info, filetype_info):\n\n super(VIIRSActiveFiresTextFileHandler, self).__init__(filename, filename_info, filetype_info)\n\n if not os.path.isfile(filename):\n return\n\n self.file_content = dd.read_csv(filename, skiprows=15, header=None,\n names=[\"latitude\", \"longitude\",\n \"T13\", \"Along-scan\", \"Along-track\", \"detection_confidence\",\n \"power\"])", "docstring": "Makes sure filepath is valid and then reads data into a Dask DataFrame\n\nArgs:\n filename: Filename\n filename_info: Filename information\n filetype_info: Filetype information", "source": "juraj_google_style"} -{"code": "def generate_jwt_token(user, authsys, **kwargs):\n\n # Local import to prevent app startup failures\n from cloud_inquisitor.config import dbconfig\n\n token = {\n 'auth_system': authsys,\n 'exp': time.time() + dbconfig.get('session_expire_time'),\n 'roles': [role.name for role in user.roles]\n }\n\n if kwargs:\n token.update(**kwargs)\n\n enc = jwt.encode(token, get_jwt_key_data(), algorithm='HS512')\n return enc.decode()", "docstring": "Generate a new JWT token, with optional extra information. Any data provided in `**kwargs`\n will be added into the token object for auth specific usage\n\nArgs:\n user (:obj:`User`): User object to generate token for\n authsys (str): The auth system for which the token was generated\n **kwargs (dict): Any optional items to add to the token\n\nReturns:\n Encoded JWT token", "source": "juraj_google_style"} -{"code": "def read_tracers_h5(xdmf_file, infoname, snapshot, position):\n\n xdmf_root = xmlET.parse(str(xdmf_file)).getroot()\n tra = {}\n tra[infoname] = [{}, {}] # two blocks, ordered by cores\n if position:\n for axis in 'xyz':\n tra[axis] = [{}, {}]\n for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):\n ibk = int(elt_subdomain.get('Name').startswith('meshYang'))\n if position:\n for data_attr in elt_subdomain.findall('Geometry'):\n for data_item, axis in zip(data_attr.findall('DataItem'),\n 'xyz'):\n icore, data = _get_field(xdmf_file, data_item)\n tra[axis][ibk][icore] = data\n for data_attr in elt_subdomain.findall('Attribute'):\n if data_attr.get('Name') != infoname:\n continue\n icore, data = _get_field(xdmf_file, data_attr.find('DataItem'))\n tra[infoname][ibk][icore] = data\n for info in tra:\n tra[info] = [trab for trab in tra[info] if trab] # remove empty blocks\n for iblk, trab in enumerate(tra[info]):\n tra[info][iblk] = np.concatenate([trab[icore]\n for icore in range(len(trab))])\n return tra", "docstring": "Extract tracers data from hdf5 files.\n\nArgs:\n xdmf_file (:class:`pathlib.Path`): path of the xdmf file.\n infoname (str): name of information to extract.\n snapshot (int): snapshot number.\n position (bool): whether to extract position of tracers.\n\nReturns:\n dict of list of numpy.array:\n Tracers data organized by attribute and block.", "source": "juraj_google_style"} -{"code": "def pyrdf(value, class_type=None, datatype=None, **kwargs):\n\n if isinstance(value, BaseRdfDataType):\n return value\n if isinstance(value, dict):\n value = value.copy()\n class_type = value.pop('type')\n try:\n datatype = value.pop('datatype')\n except KeyError:\n datatype = __TYPE_MATCH__[class_type]\n kwargs = value\n value = kwargs.pop('value')\n if not class_type:\n class_type = 'literal'\n if not datatype:\n datatype = type(value)\n try:\n # print(\"pyrdf: \", value, \" class_type: \", class_type, \" datatype: \", datatype)\n return __DT_LOOKUP__[class_type][datatype](value, **kwargs)\n except KeyError:\n rtn_val = BaseRdfDataType(value)\n rtn_val.datatype = Uri(datatype)\n return rtn_val", "docstring": "Coverts an input to one of the rdfdatatypes classes\n\nArgs:\n value: any rdfdatatype, json dict or vlaue\n class_type: \"literal\", \"uri\" or \"blanknode\"\n datatype: \"xsd:string\", \"xsd:int\" , etc\n kwargs:\n lang: language tag", "source": "juraj_google_style"} -{"code": "def commit_offsets_async(self, offsets, callback=None):\n\n self._invoke_completed_offset_commit_callbacks()\n if not self.coordinator_unknown():\n future = self._do_commit_offsets_async(offsets, callback)\n else:\n # we don't know the current coordinator, so try to find it and then\n # send the commit or fail (we don't want recursive retries which can\n # cause offset commits to arrive out of order). Note that there may\n # be multiple offset commits chained to the same coordinator lookup\n # request. This is fine because the listeners will be invoked in the\n # same order that they were added. Note also that BaseCoordinator\n # prevents multiple concurrent coordinator lookup requests.\n future = self.lookup_coordinator()\n future.add_callback(lambda r: functools.partial(self._do_commit_offsets_async, offsets, callback)())\n if callback:\n future.add_errback(lambda e: self.completed_offset_commits.appendleft((callback, offsets, e)))\n\n # ensure the commit has a chance to be transmitted (without blocking on\n # its completion). Note that commits are treated as heartbeats by the\n # coordinator, so there is no need to explicitly allow heartbeats\n # through delayed task execution.\n self._client.poll(timeout_ms=0) # no wakeup if we add that feature\n\n return future", "docstring": "Commit specific offsets asynchronously.\n\nArgs:\n offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit\n callback (callable, optional): called as callback(offsets, response)\n response will be either an Exception or a OffsetCommitResponse\n struct. This callback can be used to trigger custom actions when\n a commit request completes.\n\nReturns:\n kafka.future.Future", "source": "juraj_google_style"} -{"code": "def _get_type(points, soma_class):\n\n assert soma_class in (SOMA_CONTOUR, SOMA_CYLINDER)\n\n npoints = len(points)\n if soma_class == SOMA_CONTOUR:\n return {0: None,\n 1: SomaSinglePoint,\n 2: None}.get(npoints, SomaSimpleContour)\n\n if(npoints == 3 and\n points[0][COLS.P] == -1 and\n points[1][COLS.P] == 1 and\n points[2][COLS.P] == 1):\n L.warning('Using neuromorpho 3-Point soma')\n # NeuroMorpho is the main provider of morphologies, but they\n # with SWC as their default file format: they convert all\n # uploads to SWC. In the process of conversion, they turn all\n # somas into their custom 'Three-point soma representation':\n # http://neuromorpho.org/SomaFormat.html\n\n return SomaNeuromorphoThreePointCylinders\n\n return {0: None,\n 1: SomaSinglePoint}.get(npoints, SomaCylinders)", "docstring": "get the type of the soma\n\nArgs:\n points: Soma points\n soma_class(str): one of 'contour' or 'cylinder' to specify the type", "source": "juraj_google_style"} -{"code": "def has_all_nonzero_neurite_radii(neuron, threshold=0.0):\n\n bad_ids = []\n seen_ids = set()\n for s in _nf.iter_sections(neuron):\n for i, p in enumerate(s.points):\n info = (s.id, i)\n if p[COLS.R] <= threshold and info not in seen_ids:\n seen_ids.add(info)\n bad_ids.append(info)\n\n return CheckResult(len(bad_ids) == 0, bad_ids)", "docstring": "Check presence of neurite points with radius not above threshold\n\nArgs:\n neuron(Neuron): The neuron object to test\n threshold: value above which a radius is considered to be non-zero\n\nReturns:\n CheckResult with result including list of (section ID, point ID) pairs\n of zero-radius points", "source": "juraj_google_style"} -{"code": "def scroll(self, x, y):\n\n assert isinstance(x, _INTTYPES), \"x must be an integer, got %s\" % repr(x)\n assert isinstance(y, _INTTYPES), \"y must be an integer, got %s\" % repr(x)\n def getSlide(x, length):\n\n if x > 0:\n srcx = 0\n length -= x\n elif x < 0:\n srcx = abs(x)\n x = 0\n length -= srcx\n else:\n srcx = 0\n return x, length, srcx\n def getCover(x, length):\n\n cover = (0, length) # everything covered\n uncover = None # nothing uncovered\n if x > 0: # left side uncovered\n cover = (x, length - x)\n uncover = (0, x)\n elif x < 0: # right side uncovered\n x = abs(x)\n cover = (0, length - x)\n uncover = (length - x, x)\n return cover, uncover\n\n width, height = self.get_size()\n if abs(x) >= width or abs(y) >= height:\n return self.clear() # just clear the console normally\n\n # get the ranges of the areas that will be uncovered\n coverX, uncoverX = getCover(x, width)\n coverY, uncoverY = getCover(y, height)\n # so at this point we know that coverX and coverY makes a rect that\n # encases the area that we end up blitting to. uncoverX/Y makes a\n # rect in the corner of the uncovered area. So we need to combine\n # the uncoverX/Y with coverY/X to make what's left of the uncovered\n # area. Explaining it makes it mush easier to do now.\n\n # But first we need to blit.\n x, width, srcx = getSlide(x, width)\n y, height, srcy = getSlide(y, height)\n self.blit(self, x, y, width, height, srcx, srcy)\n if uncoverX: # clear sides (0x20 is space)\n self.draw_rect(uncoverX[0], coverY[0], uncoverX[1], coverY[1],\n 0x20, self._fg, self._bg)\n if uncoverY: # clear top/bottom\n self.draw_rect(coverX[0], uncoverY[0], coverX[1], uncoverY[1],\n 0x20, self._fg, self._bg)\n if uncoverX and uncoverY: # clear corner\n self.draw_rect(uncoverX[0], uncoverY[0], uncoverX[1], uncoverY[1],\n 0x20, self._fg, self._bg)", "docstring": "Scroll the contents of the console in the direction of x,y.\n\n Uncovered areas will be cleared to the default background color.\n Does not move the virutal cursor.\n\nArgs:\n x (int): Distance to scroll along the x-axis.\n y (int): Distance to scroll along the y-axis.\n\nReturns:\n Iterator[Tuple[int, int]]: An iterator over the (x, y) coordinates\n of any tile uncovered after scrolling.\n\n .. seealso:: :any:`set_colors`", "source": "juraj_google_style"} -{"code": "def __init__(self, port=None, queue_id=None):\n\n super().__init__(action_type=ActionType.OFPAT_ENQUEUE, length=16)\n self.port = port\n self.queue_id = queue_id", "docstring": "Create an ActionEnqueue with the optional parameters below.\n\nArgs:\n port (physical port or :attr:`.Port.OFPP_IN_PORT`): Queue's port.\n queue_id (int): Where to enqueue the packets.", "source": "juraj_google_style"} -{"code": "def get_welcome(self, connId='default'):\n\n thisConn = self.__getConnection(connId)\n outputMsg = \"\"\n try:\n outputMsg += thisConn.getwelcome()\n except ftplib.all_errors as e:\n raise FtpLibraryError(str(e))\n if self.printOutput:\n logger.info(outputMsg)\n return outputMsg", "docstring": "Returns wlecome message of FTP server.\n Parameters:\n - connId(optional) - connection identifier. By default equals 'default'", "source": "juraj_google_style"} -{"code": "def AssertType(value, expected_type):\n\n if not isinstance(value, expected_type):\n message = \"Expected type `%r`, but got value `%r` of type `%s`\"\n message %= (expected_type, value, type(value))\n raise TypeError(message)", "docstring": "Ensures that given value has certain type.\n\nArgs:\n value: A value to assert the type for.\n expected_type: An expected type for the given value.\n\nRaises:\n TypeError: If given value does not have the expected type.", "source": "juraj_google_style"} -{"code": "def find_module_id_defining_flag(self, flagname, default=None):\n\n registered_flag = self._flags().get(flagname)\n if registered_flag is None:\n return default\n for module_id, flags in six.iteritems(self.flags_by_module_id_dict()):\n for flag in flags:\n # It must compare the flag with the one in _flags. This is because a\n # flag might be overridden only for its long name (or short name),\n # and only its short name (or long name) is considered registered.\n if (flag.name == registered_flag.name and\n flag.short_name == registered_flag.short_name):\n return module_id\n return default", "docstring": "Return the ID of the module defining this flag, or default.\n\nArgs:\n flagname: str, name of the flag to lookup.\n default: Value to return if flagname is not defined. Defaults\n to None.\n\nReturns:\n The ID of the module which registered the flag with this name.\n If no such module exists (i.e. no flag with this name exists),\n we return default.", "source": "juraj_google_style"} -{"code": "def bsp_new_with_size(x: int, y: int, w: int, h: int) -> tcod.bsp.BSP:\n\n return Bsp(x, y, w, h)", "docstring": "Create a new BSP instance with the given rectangle.\n\nArgs:\n x (int): Rectangle left coordinate.\n y (int): Rectangle top coordinate.\n w (int): Rectangle width.\n h (int): Rectangle height.\n\nReturns:\n BSP: A new BSP instance.\n\n .. deprecated:: 2.0\n Call the :any:`BSP` class instead.", "source": "juraj_google_style"} -{"code": "def _bfd_rx(self, **kwargs):\n\n method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \\\n 'bfd_interval_min_rx'\n bfd_rx = getattr(self._rbridge, method_name)\n config = bfd_rx(**kwargs)\n if kwargs['delete']:\n tag = 'min-rx'\n config.find('.//*%s' % tag).set('operation', 'delete')\n pass\n return config", "docstring": "Return the BFD minimum receive interval XML.\n\n You should not use this method.\n You probably want `BGP.bfd`.\n\nArgs:\n min_rx (str): BFD receive interval in milliseconds (300, 500, etc)\n delete (bool): Remove the configuration if ``True``.\n\nReturns:\n XML to be passed to the switch.\n\nRaises:\n None", "source": "juraj_google_style"} -{"code": "def _verify_docker_image_size(self, image_name):\n\n shell_call(['docker', 'pull', image_name])\n try:\n image_size = subprocess.check_output(\n ['docker', 'inspect', '--format={{.Size}}', image_name]).strip()\n image_size = int(image_size)\n except (ValueError, subprocess.CalledProcessError) as e:\n logging.error('Failed to determine docker image size: %s', e)\n return False\n logging.info('Size of docker image %s is %d', image_name, image_size)\n if image_size > MAX_DOCKER_IMAGE_SIZE:\n logging.error('Image size exceeds limit %d', MAX_DOCKER_IMAGE_SIZE)\n return image_size <= MAX_DOCKER_IMAGE_SIZE", "docstring": "Verifies size of Docker image.\n\nArgs:\n image_name: name of the Docker image.\n\nReturns:\n True if image size is within the limits, False otherwise.", "source": "juraj_google_style"} -{"code": "def get_pattern_link_topattern(self, patternnumber):\n\n _checkPatternNumber(patternnumber)\n\n address = _calculateRegisterAddress('linkpattern', patternnumber)\n return self.read_register(address)", "docstring": "Get the 'linked pattern' value for a given pattern.\n\nArgs:\n patternnumber (integer): From 0-7\n\nReturns:\n The 'linked pattern' value (int).", "source": "juraj_google_style"} -{"code": "def get_lanczos_eig(self, compute_m=True, feed_dict=None):\n\n if compute_m:\n min_eig, min_vec = self.sess.run([self.m_min_eig, self.m_min_vec], feed_dict=feed_dict)\n\n else:\n min_eig, min_vec = self.sess.run([self.h_min_eig, self.h_min_vec], feed_dict=feed_dict)\n\n return min_vec, min_eig", "docstring": "Computes the min eigen value and corresponding vector of matrix M or H\n using the Lanczos algorithm.\n\nArgs:\n compute_m: boolean to determine whether we should compute eig val/vec\n for M or for H. True for M; False for H.\n feed_dict: dictionary mapping from TF placeholders to values (optional)\n\nReturns:\n min_eig_vec: Corresponding eigen vector to min eig val\n eig_val: Minimum eigen value", "source": "juraj_google_style"} -{"code": "def query(self, query):\n\n\n # run the query on the child datastore\n cursor = self.child_datastore.query(query)\n\n # chain the deserializing generator to the cursor's result set iterable\n cursor._iterable = deserialized_gen(self.serializer, cursor._iterable)\n\n return cursor", "docstring": "Returns an iterable of objects matching criteria expressed in `query`\n De-serializes values on the way out, using a :ref:`deserialized_gen` to\n avoid incurring the cost of de-serializing all data at once, or ever, if\n iteration over results does not finish (subject to order generator\n constraint).\n\nArgs:\n query: Query object describing the objects to return.\n\n Raturns:\n iterable cursor with all objects matching criteria", "source": "juraj_google_style"} -{"code": "def asm_module(exprs, dst_reg, sym_to_reg, triple_or_target=None):\n\n\n if not llvmlite_available:\n raise RuntimeError(\"llvmlite module unavailable! can't assemble...\")\n\n target = llvm_get_target(triple_or_target)\n\n M = ll.Module()\n fntype = ll.FunctionType(ll.VoidType(), [])\n func = ll.Function(M, fntype, name='__arybo')\n func.attributes.add(\"naked\")\n func.attributes.add(\"nounwind\")\n BB = func.append_basic_block()\n\n IRB = ll.IRBuilder()\n IRB.position_at_end(BB)\n\n sym_to_value = {sym: IRB.load_reg(IntType(reg[1]), reg[0], reg[0]) for sym,reg in six.iteritems(sym_to_reg)}\n\n ret = to_llvm_ir(exprs, sym_to_value, IRB)\n IRB.store_reg(ret, IntType(dst_reg[1]), dst_reg[0])\n # See https://llvm.org/bugs/show_bug.cgi?id=15806\n IRB.unreachable()\n\n return M", "docstring": "Generate an LLVM module for a list of expressions\n\nArgs:\n * See :meth:`arybo.lib.exprs_asm.asm_binary` for a description of the list of arguments\n\n Output:\n * An LLVM module with one function named \"__arybo\", containing the\n translated expression.\n\n See :meth:`arybo.lib.exprs_asm.asm_binary` for an usage example.", "source": "juraj_google_style"} -{"code": "def get_token(self,\n token_name,\n project_name,\n dataset_name):\n\n return self.resources.get_token(token_name,\n project_name,\n dataset_name)", "docstring": "Get a token with the given parameters.\n\nArgs:\n project_name (str): Project name\n dataset_name (str): Dataset name project is based on\n token_name (str): Token name\n\nReturns:\n dict: Token info", "source": "juraj_google_style"} -{"code": "def remove(self,\n entity_id,\n property_uri,\n value):\n\n if not entity_id.startswith(\"http\"):\n entity_uri = urllib.parse.urljoin(self.base_url, entity_id)\n else:\n entity_uri = entity_id\n sparql_template = Template()\n sparql = sparql_template.substitute(\n prefix=build_prefixes(self.namespaces),\n entity=entity_uri,\n prop_name=property_uri,\n value_str=self.__value_format__(value))\n delete_property_request = urllib.request.Request(\n entity_uri,\n data=sparql.encode(),\n method='PATCH',\n headers={'Content-Type': 'application/sparql-update'})\n response = urllib.request.urlopen(delete_property_request)\n if response.code < 400:\n return True\n return False", "docstring": "Method removes a triple for the given/subject.\n\nArgs:\n entity_id(string): Fedora Object ID, ideally URI of the subject\n property_uri(string):\n value(string):\n\nReturns:\n boolean: True if triple was removed from the object", "source": "juraj_google_style"} -{"code": "def last_revision(self, mod: YangIdentifier) -> ModuleId:\n\n revs = [mn for mn in self.modules if mn[0] == mod]\n if not revs:\n raise ModuleNotRegistered(mod)\n return sorted(revs, key=lambda x: x[1])[-1]", "docstring": "Return the last revision of a module that's part of the data model.\n\nArgs:\n mod: Name of a module or submodule.\n\nRaises:\n ModuleNotRegistered: If the module `mod` is not present in the\n data model.", "source": "juraj_google_style"} -{"code": "def list_files(base_path, ext=None):\n\n if not os.path.isdir(base_path):\n raise ValueError(\"Path does not exist: %s\" % base_path)\n\n files = []\n for entry in os.listdir(base_path):\n if os.path.isfile(os.path.join(base_path, entry)):\n _, entry_ext = os.path.splitext(entry)\n entry_ext = entry_ext.lstrip('.')\n\n if (ext is None) or \\\n (isinstance(ext, str) and entry_ext == ext) or \\\n (isinstance(ext, list) and entry_ext in ext):\n files.append(entry)\n\n return files", "docstring": "Lists all of the files in the given base directory, optionally only\n including whose extension(s) match the ext string/list of strings.\n This is non-recursive.\n\nArgs:\n base_path: The directory in which to search.\n ext: The extension(s) to match in the given directory. If None, this\n matches all file extensions.\n\nReturns:\n A list of filenames relative to the given base path.", "source": "juraj_google_style"} -{"code": "def get_catalog_courses(self, catalog_id):\n\n return self._load_data(\n self.CATALOGS_COURSES_ENDPOINT.format(catalog_id),\n default=[]\n )", "docstring": "Return the courses included in a single course catalog by ID.\n\nArgs:\n catalog_id (int): The catalog ID we want to retrieve.\n\nReturns:\n list: Courses of the catalog in question", "source": "juraj_google_style"} -{"code": "def push_plugin(self, name):\n\n url = self._url('/plugins/{0}/pull', name)\n\n headers = {}\n registry, repo_name = auth.resolve_repository_name(name)\n header = auth.get_config_header(self, registry)\n if header:\n headers['X-Registry-Auth'] = header\n res = self._post(url, headers=headers)\n self._raise_for_status(res)\n return self._stream_helper(res, decode=True)", "docstring": "Push a plugin to the registry.\n\nArgs:\n name (string): Name of the plugin to upload. The ``:latest``\n tag is optional, and is the default if omitted.\n\nReturns:\n ``True`` if successful", "source": "juraj_google_style"} -{"code": "def setdocument(self, doc):\n\n assert isinstance(doc, Document)\n\n if not self.doc:\n self.doc = doc\n if self.id:\n if self.id in doc:\n raise DuplicateIDError(self.id)\n else:\n self.doc.index[id] = self\n\n for e in self: #recursive for all children\n if isinstance(e,AbstractElement): e.setdocument(doc)", "docstring": "Associate a document with this element.\n\nArgs:\n doc (:class:`Document`): A document\n\n Each element must be associated with a FoLiA document.", "source": "juraj_google_style"} -{"code": "def decompress_decoder_1d(x, hparams, name=None):\n\n x = tf.expand_dims(x, axis=2)\n output = decompress_decoder(x, hparams,\n strides=(2, 1),\n kernel=(hparams.kernel_size, 1),\n name=name)\n return tf.squeeze(output, axis=2)", "docstring": "Decoder that decompresses 1-D inputs by 2**num_compress_steps.\n\nArgs:\n x: Tensor of shape [batch, compress_length, channels].\n hparams: HParams.\n name: string, variable scope.\n\nReturns:\n Tensor of shape [batch, length, hparams.hidden_size].", "source": "juraj_google_style"} -{"code": "def unpack(self, gpsd_socket_response):\n\n try:\n fresh_data = json.loads(gpsd_socket_response) # The reserved word 'class' is popped from JSON object class\n package_name = fresh_data.pop('class', 'ERROR') # gpsd data package errors are also 'ERROR'.\n package = getattr(self, package_name, package_name) # packages are named for JSON object class\n for key in package.keys():\n package[key] = fresh_data.get(key, 'n/a') # Restores 'n/a' if key is absent in the socket response\n\n except AttributeError: # 'str' object has no attribute 'keys'\n sys.stderr.write('There is an unexpected exception in DataStream.unpack')\n return\n\n except (ValueError, KeyError) as error:\n sys.stderr.write(str(error)) # Extra data or aberrant data in stream.\n return", "docstring": "Sets new socket data as DataStream attributes in those initialised dictionaries\n\nArgs:\n gpsd_socket_response (json object):\n Provides:\n self attribute dictionaries, e.g., self.TPV['lat'], self.SKY['gdop']\n\nRaises:\n AttributeError: 'str' object has no attribute 'keys' when the device falls out of the system\n ValueError, KeyError: most likely extra, or mangled JSON data, should not happen, but that\n applies to a lot of things.", "source": "juraj_google_style"} -{"code": "def update(self, resource, timeout=-1):\n\n self.__set_default_values(resource)\n uri = self._client.build_uri(resource['logicalSwitch']['uri'])\n return self._client.update(resource, uri=uri, timeout=timeout)", "docstring": "Updates a Logical Switch.\n\nArgs:\n resource (dict): Object to update.\n timeout:\n Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\n in OneView, just stop waiting for its completion.\n\nReturns:\n dict: Updated resource.", "source": "juraj_google_style"} -{"code": "def __init__(self, options={}):\n\n settings = {\n 'currency': {\n 'symbol': \"$\",\n 'format': \"%s%v\",\n 'decimal': \".\",\n 'thousand': \",\",\n 'precision': 2,\n 'grouping': 3\n },\n 'number': {\n 'precision': 0,\n 'grouping': 3,\n 'thousand': \",\",\n 'decimal': \".\"\n }\n }\n if options:\n settings.update(options)\n\n self.settings = settings", "docstring": "Summary.\n\nArgs:\n options (dict, optional): settings configuration object.", "source": "juraj_google_style"} -{"code": "def memory_write32(self, addr, data, zone=None):\n\n return self.memory_write(addr, data, zone, 32)", "docstring": "Writes words to memory of a target system.\n\nArgs:\n self (JLink): the ``JLink`` instance\n addr (int): start address to write to\n data (list): list of words to write\n zone (str): optional memory zone to access\n\nReturns:\n Number of words written to target.\n\nRaises:\n JLinkException: on memory access error.", "source": "juraj_google_style"} -{"code": "def AddKeywordsForName(self, name, keywords):\n\n data_store.DB.IndexAddKeywordsForName(self.urn, name, keywords)", "docstring": "Associates keywords with name.\n\n Records that keywords are associated with name.\n\nArgs:\n name: A name which should be associated with some keywords.\n keywords: A collection of keywords to associate with name.", "source": "juraj_google_style"} -{"code": "def listtransactions(self, user_id=\"\", count=10, start_at=0):\n\n txlist = self.rpc.call(\"listtransactions\", user_id, count, start_at)\n self.logger.debug(\"Got transaction list for \" + str(user_id))\n return txlist", "docstring": "List all transactions associated with this account.\n\nArgs:\n user_id (str): this user's unique identifier\n count (int): number of transactions to return (default=10)\n start_at (int): start the list at this transaction (default=0)\n\nReturns:\n list [dict]: transactions associated with this user's account", "source": "juraj_google_style"} -{"code": "def __init__(self, item_id, desc, # pylint: disable=too-many-arguments\n resources, uri, metadata_dict, music_service=None):\n\n _LOG.debug('%s.__init__ with item_id=%s, desc=%s, resources=%s, '\n 'uri=%s, metadata_dict=..., music_service=%s',\n self.__class__.__name__, item_id, desc, resources, uri,\n music_service)\n super(MusicServiceItem, self).__init__(metadata_dict)\n self.item_id = item_id\n self.desc = desc\n self.resources = resources\n self.uri = uri\n self.music_service = music_service", "docstring": "Init music service item\n\nArgs:\n item_id (str): This is the Didl compatible id NOT the music item id\n desc (str): A DIDL descriptor, default ``'RINCON_AssociatedZPUDN'\n resources (list): List of DidlResource\n uri (str): The uri for the location of the item\n metdata_dict (dict): Mapping of metadata\n music_service (MusicService): The MusicService instance the item\n originates from", "source": "juraj_google_style"} -{"code": "def GetFieldValuesTuple(self, trip_id):\n\n result = []\n for fn in self._FIELD_NAMES:\n if fn == 'trip_id':\n result.append(trip_id)\n else:\n # Since we'll be writting to an output file, we want empty values to be\n # outputted as an empty string\n result.append(getattr(self, fn) or '' )\n return tuple(result)", "docstring": "Return a tuple that outputs a row of _FIELD_NAMES to be written to a\n GTFS file.\n\nArgs:\n trip_id: The trip_id of the trip to which this StopTime corresponds.\n It must be provided, as it is not stored in StopTime.", "source": "juraj_google_style"} -{"code": "def lines_from_string(string, as_interned=False):\n\n if as_interned:\n return [sys.intern(line) for line in string.splitlines()]\n return string.splitlines()", "docstring": "Create a list of file lines from a given string.\n\nArgs:\n string (str): File string\n as_interned (bool): List of \"interned\" strings (default False)\n\nReturns:\n strings (list): File line list", "source": "juraj_google_style"} -{"code": "def delete_existing_cname(env, zone_id, dns_name):\n\n client = boto3.Session(profile_name=env).client('route53')\n startrecord = None\n newrecord_name = dns_name\n startrecord = find_existing_record(env, zone_id, newrecord_name, check_key='Type', check_value='CNAME')\n if startrecord:\n LOG.info(\"Deleting old record: %s\", newrecord_name)\n _response = client.change_resource_record_sets(\n HostedZoneId=zone_id, ChangeBatch={'Changes': [{\n 'Action': 'DELETE',\n 'ResourceRecordSet': startrecord\n }]})\n LOG.debug('Response from deleting %s: %s', dns_name, _response)", "docstring": "Delete an existing CNAME record.\n\n This is used when updating to multi-region for deleting old records. The\n record can not just be upserted since it changes types.\n\nArgs:\n env (str): Deployment environment.\n zone_id (str): Route53 zone id.\n dns_name (str): FQDN of application's dns entry to add/update.", "source": "juraj_google_style"} -{"code": "def persist_time(run, session, timings):\n\n from benchbuild.utils import schema as s\n\n for timing in timings:\n session.add(\n s.Metric(name=\"time.user_s\", value=timing[0], run_id=run.id))\n session.add(\n s.Metric(name=\"time.system_s\", value=timing[1], run_id=run.id))\n session.add(\n s.Metric(name=\"time.real_s\", value=timing[2], run_id=run.id))", "docstring": "Persist the run results in the database.\n\nArgs:\n run: The run we attach this timing results to.\n session: The db transaction we belong to.\n timings: The timing measurements we want to store.", "source": "juraj_google_style"} -{"code": "def _convert_to_eval_metric(metric_fn):\n\n def problem_metric_fn(*args):\n\n (scores, weights) = metric_fn(*args)\n\n # The tf.metrics.mean function assures correct aggregation.\n return tf.metrics.mean(scores, weights)\n return problem_metric_fn", "docstring": "Wrap a metric fn that returns scores and weights as an eval metric fn.\n\n The input metric_fn returns values for the current batch. The wrapper\n aggregates the return values collected over all of the batches evaluated.\n\nArgs:\n metric_fn: function that returns scores and weights for the current batch's\n logits and predicted labels.\n\nReturns:\n function that aggregates the scores and weights from metric_fn.", "source": "juraj_google_style"} -{"code": "def at(self, instant):\n\n\n for event in self:\n if event.begin <= instant <= event.end:\n yield event", "docstring": "Iterates (in chronological order) over all events that are occuring during `instant`.\n\nArgs:\n instant (Arrow object)", "source": "juraj_google_style"} -{"code": "def get_minimizer_options(method):\n\n if method == 'Powell':\n return {'patience': 2,\n 'patience_line_search': None,\n 'reset_method': 'EXTRAPOLATED_POINT'}\n\n elif method == 'Nelder-Mead':\n return {'patience': 200,\n 'alpha': 1.0, 'beta': 0.5, 'gamma': 2.0, 'delta': 0.5, 'scale': 0.1,\n 'adaptive_scales': True}\n\n elif method == 'Levenberg-Marquardt':\n return {'patience': 250, 'step_bound': 100.0, 'scale_diag': 1, 'usertol_mult': 30}\n\n elif method == 'Subplex':\n return {'patience': 10,\n 'patience_nmsimplex': 100,\n 'alpha': 1.0, 'beta': 0.5, 'gamma': 2.0, 'delta': 0.5, 'scale': 1.0, 'psi': 0.0001, 'omega': 0.01,\n 'adaptive_scales': True,\n 'min_subspace_length': 'auto',\n 'max_subspace_length': 'auto'}\n\n raise ValueError('Could not find the specified method \"{}\".'.format(method))", "docstring": "Return a dictionary with the default options for the given minimization method.\n\nArgs:\n method (str): the name of the method we want the options off\n\nReturns:\n dict: a dictionary with the default options", "source": "juraj_google_style"} -{"code": "def expect_exitstatus(self, exit_status):\n\n self.expect_end()\n logger.debug(\"Checking exit status of '{0}', output so far: {1}\".format(\n self.name, self.get_output()))\n if self._spawn.exitstatus is None:\n raise WrongExitStatusException(\n instance=self, expected=exit_status, output=self.get_output())\n\n if self._spawn.exitstatus is not exit_status:\n raise WrongExitStatusException(\n instance=self,\n expected=exit_status,\n got=self._spawn.exitstatus,\n output=self.get_output())", "docstring": "Wait for the running program to finish and expect some exit status.\n\nArgs:\n exit_status (int): The expected exit status.\n\nRaises:\n WrongExitStatusException: The produced exit status is not the expected one.", "source": "juraj_google_style"} -{"code": "def ParseFileObject(self, parser_mediator, file_object):\n\n msiecf_file = pymsiecf.file()\n msiecf_file.set_ascii_codepage(parser_mediator.codepage)\n\n try:\n msiecf_file.open_file_object(file_object)\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to open file with error: {0!s}'.format(exception))\n return\n\n try:\n self._ParseItems(parser_mediator, msiecf_file)\n finally:\n msiecf_file.close()", "docstring": "Parses a MSIE Cache File (MSIECF) file-like object.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n file_object (dfvfs.FileIO): file-like object.", "source": "juraj_google_style"} -{"code": "def collections(tag, collectionName, token='', version=''):\n\n if tag not in _COLLECTION_TAGS:\n raise PyEXception('Tag must be in %s' % str(_COLLECTION_TAGS))\n return _getJson('stock/market/collection/' + tag + '?collectionName=' + collectionName, token, version)", "docstring": "Returns an array of quote objects for a given collection type. Currently supported collection types are sector, tag, and list\n\n\n https://iexcloud.io/docs/api/#collections\n\nArgs:\n tag (string); Sector, Tag, or List\n collectionName (string); Associated name for tag\n token (string); Access token\n version (string); API version\n\nReturns:\n dict: result", "source": "juraj_google_style"} -{"code": "def _post_process_apply(self, result_data, axis, try_scale=True):\n\n if try_scale:\n try:\n internal_index = self.compute_index(0, result_data, True)\n except IndexError:\n internal_index = self.compute_index(0, result_data, False)\n try:\n internal_columns = self.compute_index(1, result_data, True)\n except IndexError:\n internal_columns = self.compute_index(1, result_data, False)\n else:\n internal_index = self.compute_index(0, result_data, False)\n internal_columns = self.compute_index(1, result_data, False)\n if not axis:\n index = internal_index\n # We check if the two columns are the same length because if\n # they are the same length, `self.columns` is the correct index.\n # However, if the operation resulted in a different number of columns,\n # we must use the derived columns from `self.compute_index()`.\n if len(internal_columns) != len(self.columns):\n columns = internal_columns\n else:\n columns = self.columns\n else:\n columns = internal_columns\n # See above explanation for checking the lengths of columns\n if len(internal_index) != len(self.index):\n index = internal_index\n else:\n index = self.index\n return self.__constructor__(result_data, index, columns)", "docstring": "Recompute the index after applying function.\n\nArgs:\n result_data: a BaseFrameManager object.\n axis: Target axis along which function was applied.\n\nReturns:\n A new PandasQueryCompiler.", "source": "juraj_google_style"} -{"code": "def download(branch=None, build=True, installdir=\"MalmoPlatform\"):\n\n\n if branch is None:\n branch = malmo_version\n\n subprocess.check_call([\"git\", \"clone\", \"-b\", branch, \"https://github.com/Microsoft/malmo.git\", installdir])\n\n return setup(build=build, installdir=installdir)", "docstring": "Download Malmo from github and build (by default) the Minecraft Mod.\n Example usage: import malmoenv.bootstrap; malmoenv.bootstrap.download()\n\nArgs:\n branch: optional branch to clone. TODO Default is release version.\n build: build the Mod unless build arg is given as False.\n installdir: the install dir name. Defaults to MalmoPlatform.\n\nReturns:\n The path for the Malmo Minecraft mod.", "source": "juraj_google_style"} -{"code": "def __init__(self, encoding='utf-8'):\n\n super(StdoutOutputWriter, self).__init__(sys.stdout, encoding=encoding)", "docstring": "Initializes a stdout output writer.\n\nArgs:\n encoding (Optional[str]): output encoding.", "source": "juraj_google_style"} -{"code": "def user(self, email):\n\n LOG.info(\"Fetching user %s\", email)\n user_obj = self.user_collection.find_one({'_id': email})\n\n return user_obj", "docstring": "Fetch a user from the database.\n\nArgs:\n email(str)\n\nReturns:\n user_obj(dict)", "source": "juraj_google_style"} -{"code": "def _create_plugin(self, config):\n\n if config is None:\n raise ValueError('No plugin config to create plugin from.')\n\n name = config.pop('name', None)\n if name is None:\n raise(cfg.AitConfigMissing('plugin name'))\n\n # TODO I don't think we actually care about this being unique? Left over from\n # previous conversations about stuff?\n module_name = name.rsplit('.', 1)[0]\n class_name = name.rsplit('.', 1)[-1]\n if class_name in [x.name for x in (self.outbound_streams +\n self.inbound_streams +\n self.servers +\n self.plugins)]:\n raise ValueError(\n 'Plugin \"{}\" already loaded. Only one plugin of a given name is allowed'.\n format(class_name)\n )\n\n plugin_inputs = config.pop('inputs', None)\n if plugin_inputs is None:\n log.warn('No plugin inputs specified for {}'.format(name))\n plugin_inputs = [ ]\n\n subscribers = config.pop('outputs', None)\n if subscribers is None:\n log.warn('No plugin outputs specified for {}'.format(name))\n subscribers = [ ]\n\n # try to create plugin\n module = import_module(module_name)\n plugin_class = getattr(module, class_name)\n instance = plugin_class(plugin_inputs,\n subscribers,\n zmq_args={'zmq_context': self.broker.context,\n 'zmq_proxy_xsub_url': self.broker.XSUB_URL,\n 'zmq_proxy_xpub_url': self.broker.XPUB_URL},\n **config\n )\n\n return instance", "docstring": "Creates a plugin from its config.\n\n Params:\n config: plugin configuration as read by ait.config\n\nReturns:\n plugin: a Plugin\n\nRaises:\n ValueError: if any of the required config values are missing", "source": "juraj_google_style"} -{"code": "def select_if(df, fun):\n\n\n def _filter_f(col):\n try:\n return fun(df[col])\n except:\n return False\n\n cols = list(filter(_filter_f, df.columns))\n return df[cols]", "docstring": "Selects columns where fun(ction) is true\n\nArgs:\n fun: a function that will be applied to columns", "source": "juraj_google_style"} -{"code": "def Remove(self, row):\n\n if row == 0 or row > self.size:\n raise TableError(\"Attempt to remove header row\")\n new_table = []\n # pylint: disable=E1103\n for t_row in self._table:\n if t_row.row != row:\n new_table.append(t_row)\n if t_row.row > row:\n t_row.row -= 1\n self._table = new_table", "docstring": "Removes a row from the table.\n\nArgs:\n row: int, the row number to delete. Must be >= 1, as the header\n cannot be removed.\n\nRaises:\n TableError: Attempt to remove nonexistent or header row.", "source": "juraj_google_style"} -{"code": "def _parse_arg(self, var, arg, scope):\n\n if isinstance(var, Variable):\n # kwarg\n if arg:\n if utility.is_variable(arg[0]):\n tmp = scope.variables(arg[0])\n if not tmp:\n return None\n val = tmp.value\n else:\n val = arg\n var = Variable(var.tokens[:-1] + [val])\n else:\n # arg\n if utility.is_variable(var):\n if arg is None:\n raise SyntaxError('Missing argument to mixin')\n elif utility.is_variable(arg[0]):\n tmp = scope.variables(arg[0])\n if not tmp:\n return None\n val = tmp.value\n else:\n val = arg\n var = Variable([var, None, val])\n else:\n return None\n return var", "docstring": "Parse a single argument to mixin.\n\nArgs:\n var (Variable object): variable\n arg (mixed): argument\n scope (Scope object): current scope\n\nReturns:\n Variable object or None", "source": "juraj_google_style"} -{"code": "def _PopulateQuantilesHistogram(self, hist, nums):\n\n if not nums:\n return\n num_quantile_buckets = 10\n quantiles_to_get = [\n x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1)\n ]\n quantiles = np.percentile(nums, quantiles_to_get)\n hist.type = self.histogram_proto.QUANTILES\n quantiles_sample_count = float(len(nums)) / num_quantile_buckets\n for low, high in zip(quantiles, quantiles[1:]):\n hist.buckets.add(\n low_value=low, high_value=high, sample_count=quantiles_sample_count)", "docstring": "Fills in the histogram with quantile information from the provided array.\n\nArgs:\n hist: A Histogram proto message to fill in.\n nums: A list of numbers to create a quantiles histogram from.", "source": "juraj_google_style"} -{"code": "def get_heading_encoding(response):\n\n encoding = wpull.protocol.http.util.parse_charset(\n response.fields.get('content-type', ''))\n\n if encoding:\n return wpull.string.normalize_codec_name(encoding)\n else:\n return None", "docstring": "Return the document encoding from a HTTP header.\n\nArgs:\n response (Response): An instance of :class:`.http.Response`.\n\nReturns:\n ``str``, ``None``: The codec name.", "source": "juraj_google_style"} -{"code": "def group_associations_types(\n self,\n main_type,\n sub_type,\n unique_id,\n target,\n api_branch=None,\n api_entity=None,\n owner=None,\n params=None,\n ):\n\n params = params or {}\n if owner:\n params['owner'] = owner\n\n api_branch = api_branch or target.api_sub_type\n api_entity = api_entity or target.api_entity\n\n if not sub_type:\n url = '/v2/{}/{}/groups/{}'.format(main_type, unique_id, api_branch)\n else:\n url = '/v2/{}/{}/{}/groups/{}'.format(main_type, sub_type, unique_id, api_branch)\n\n for gat in self._iterate(url, params, api_entity):\n yield gat", "docstring": "Args:\n owner:\n main_type:\n sub_type:\n unique_id:\n target:\n api_branch:\n api_entity:\n params:\n\n Return:", "source": "juraj_google_style"} -{"code": "def resetAndRejoin(self, timeout):\n\n print '%s call resetAndRejoin' % self.port\n print timeout\n try:\n self._sendline('reset')\n self.isPowerDown = True\n time.sleep(timeout)\n\n if self.deviceRole == Thread_Device_Role.SED:\n self.setPollingRate(self.sedPollingRate)\n\n self.__startOpenThread()\n time.sleep(3)\n\n if self.__sendCommand('state')[0] == 'disabled':\n print '[FAIL] reset and rejoin'\n return False\n return True\n except Exception, e:\n ModuleHelper.WriteIntoDebugLogger(\"resetAndRejoin() Error: \" + str(e))", "docstring": "reset and join back Thread Network with a given timeout delay\n\nArgs:\n timeout: a timeout interval before rejoin Thread Network\n\nReturns:\n True: successful to reset and rejoin Thread Network\n False: fail to reset and rejoin the Thread Network", "source": "juraj_google_style"} -{"code": "def swap_gain(mapping, node_id1, mapping_id1, node_id2, mapping_id2, weight_dict, match_num):\n\n new_mapping_list = mapping[:]\n # Before swapping, node_id1 maps to mapping_id1, and node_id2 maps to mapping_id2\n # After swapping, node_id1 maps to mapping_id2 and node_id2 maps to mapping_id1\n new_mapping_list[node_id1] = mapping_id2\n new_mapping_list[node_id2] = mapping_id1\n if tuple(new_mapping_list) in match_triple_dict:\n return match_triple_dict[tuple(new_mapping_list)] - match_num\n gain = 0\n new_mapping1 = (node_id1, mapping_id2)\n new_mapping2 = (node_id2, mapping_id1)\n old_mapping1 = (node_id1, mapping_id1)\n old_mapping2 = (node_id2, mapping_id2)\n if node_id1 > node_id2:\n new_mapping2 = (node_id1, mapping_id2)\n new_mapping1 = (node_id2, mapping_id1)\n old_mapping1 = (node_id2, mapping_id2)\n old_mapping2 = (node_id1, mapping_id1)\n if new_mapping1 in weight_dict:\n for key in weight_dict[new_mapping1]:\n if key == -1:\n gain += weight_dict[new_mapping1][-1]\n elif new_mapping_list[key[0]] == key[1]:\n gain += weight_dict[new_mapping1][key]\n if new_mapping2 in weight_dict:\n for key in weight_dict[new_mapping2]:\n if key == -1:\n gain += weight_dict[new_mapping2][-1]\n # to avoid duplicate\n elif key[0] == node_id1:\n continue\n elif new_mapping_list[key[0]] == key[1]:\n gain += weight_dict[new_mapping2][key]\n if old_mapping1 in weight_dict:\n for key in weight_dict[old_mapping1]:\n if key == -1:\n gain -= weight_dict[old_mapping1][-1]\n elif mapping[key[0]] == key[1]:\n gain -= weight_dict[old_mapping1][key]\n if old_mapping2 in weight_dict:\n for key in weight_dict[old_mapping2]:\n if key == -1:\n gain -= weight_dict[old_mapping2][-1]\n # to avoid duplicate\n elif key[0] == node_id1:\n continue\n elif mapping[key[0]] == key[1]:\n gain -= weight_dict[old_mapping2][key]\n match_triple_dict[tuple(new_mapping_list)] = match_num + gain\n return gain", "docstring": "Compute the triple match number gain from the swapping\n\nArgs:\n mapping: current node mapping list\n node_id1: node 1 index in AMR 1\n mapping_id1: the node index in AMR 2 node 1 maps to (in the current mapping)\n node_id2: node 2 index in AMR 1\n mapping_id2: the node index in AMR 2 node 2 maps to (in the current mapping)\n weight_dict: weight dictionary\n match_num: the original matching triple number\n\nReturns:\n the gain number (might be negative)", "source": "juraj_google_style"} -{"code": "def _add_consequences(self, variant_obj, raw_variant_line):\n\n consequences = []\n for consequence in SO_TERMS:\n if consequence in raw_variant_line:\n consequences.append(consequence)\n\n variant_obj.consequences = consequences", "docstring": "Add the consequences found for a variant\n\nArgs:\n variant_obj (puzzle.models.Variant)\n raw_variant_line (str): A raw vcf variant line", "source": "juraj_google_style"} -{"code": "def __init__(self, schema, uid):\n\n message = 'UID \"{}\" is not of the schema \"{}\".'.format(uid, schema)\n super(SchemaUIDConflict, self).__init__(message)", "docstring": "Exception raised when a UID is not matching provided schema.\n\nArgs:\n schema (string): given schema\n uid (string): UID which conflicts the schema", "source": "juraj_google_style"} -{"code": "def result_code(self, value):\n\n if value == self._defaults['resultCode'] and 'resultCode' in self._values:\n del self._values['resultCode']\n else:\n self._values['resultCode'] = value", "docstring": "The result_code property.\n\nArgs:\n value (string). the property value.", "source": "juraj_google_style"} -{"code": "def _GetFieldByName(message_descriptor, field_name):\n\n try:\n return message_descriptor.fields_by_name[field_name]\n except KeyError:\n raise ValueError('Protocol message %s has no \"%s\" field.' %\n (message_descriptor.name, field_name))", "docstring": "Returns a field descriptor by field name.\n\nArgs:\n message_descriptor: A Descriptor describing all fields in message.\n field_name: The name of the field to retrieve.\n\nReturns:\n The field descriptor associated with the field name.", "source": "juraj_google_style"} -{"code": "def __init__(self, password, testnet=False):\n\n netcode = 'XTN' if testnet else 'BTC'\n if isinstance(password, str):\n password = password.encode()\n self.wallet = BIP32Node.from_master_secret(password, netcode=netcode)\n self.root_address = ('', self.wallet.address())", "docstring": "Initializes a BIP32 wallet.\n\n Addresses returned by the wallet are of the form ``(path, address)``.\n\nArgs:\n password (bytes): Master secret for the wallet. The password can\n also be passed as a string (``str``).\n testnet (bool): Wwether to use the bitcoin testnet or mainnet.\n Defaults to ``False``.", "source": "juraj_google_style"} -{"code": "def GenerateId(self, entity_id=None):\n\n self._idnum += 1\n if entity_id:\n return '%s_merged_%d' % (entity_id, self._idnum)\n else:\n return 'merged_%d' % self._idnum", "docstring": "Generate a unique id based on the given id.\n\n This is done by appending a counter which is then incremented. The\n counter is initialised at the maximum number used as an ending for\n any id in the old and new schedules.\n\nArgs:\n entity_id: The base id string. This is allowed to be None.\n\nReturns:\n The generated id.", "source": "juraj_google_style"} -{"code": "def change_password(self, username, newpassword, raise_on_error=False):\n\n\n response = self._put(self.rest_url + \"/user/password\",\n data=json.dumps({\"value\": newpassword}),\n params={\"username\": username})\n\n if response.ok:\n return True\n\n if raise_on_error:\n raise RuntimeError(response.json()['message'])\n\n return False", "docstring": "Change new password for a user\n\nArgs:\n username: The account username.\n\n newpassword: The account new password.\n\n raise_on_error: optional (default: False)\n\nReturns:\n True: Succeeded\n False: If unsuccessful", "source": "juraj_google_style"} -{"code": "def front(self, n):\n\n new_dtypes = (\n self._dtype_cache if self._dtype_cache is None else self._dtype_cache[:n]\n )\n # See head for an explanation of the transposed behavior\n if self._is_transposed:\n result = self.__constructor__(\n self.data.transpose().take(0, n).transpose(),\n self.index,\n self.columns[:n],\n new_dtypes,\n )\n result._is_transposed = True\n else:\n result = self.__constructor__(\n self.data.take(1, n), self.index, self.columns[:n], new_dtypes\n )\n return result", "docstring": "Returns the first n columns.\n\nArgs:\n n: Integer containing the number of columns to return.\n\nReturns:\n DataManager containing the first n columns of the original DataManager.", "source": "juraj_google_style"} -{"code": "def open_window(self, private=False):\n\n handles_before = self.selenium.window_handles\n self.switch_to()\n\n with self.selenium.context(self.selenium.CONTEXT_CHROME):\n # Opens private or non-private window\n self.selenium.find_element(*self._file_menu_button_locator).click()\n if private:\n self.selenium.find_element(\n *self._file_menu_private_window_locator\n ).click()\n else:\n self.selenium.find_element(\n *self._file_menu_new_window_button_locator\n ).click()\n\n return self.wait.until(\n expected.new_browser_window_is_opened(self.selenium, handles_before),\n message=\"No new browser window opened\",\n )", "docstring": "Open a new browser window.\n\nArgs:\n private (bool): Optional parameter to open a private browsing\n window. Defaults to False.\n\nReturns:\n :py:class:`BrowserWindow`: Opened window.", "source": "juraj_google_style"} -{"code": "def _force_edge_active(self, seqs: List[List[GridQubit]], edge: EDGE,\n sample_bool: Callable[[], bool]\n ) -> List[List[GridQubit]]:\n\n\n n0, n1 = edge\n\n # Make a copy of original sequences.\n seqs = list(seqs)\n\n # Localize edge nodes within current solution.\n i0, j0 = index_2d(seqs, n0)\n i1, j1 = index_2d(seqs, n1)\n s0 = seqs[i0]\n s1 = seqs[i1]\n\n # Handle case when nodes belong to different linear sequences,\n # separately from the case where they belong to a single linear\n # sequence.\n if i0 != i1:\n\n # Split s0 and s1 in two parts: s0 in parts before n0, and after n0\n # (without n0); s1 in parts before n1, and after n1 (without n1).\n part = [s0[:j0], s0[j0 + 1:]], [s1[:j1], s1[j1 + 1:]]\n\n # Remove both sequences from original list.\n del seqs[max(i0, i1)]\n del seqs[min(i0, i1)]\n\n # Choose part of s0 which will be attached to n0, and make sure it\n # can be attached in the end.\n c0 = 0 if not part[0][1] else 1 if not part[0][\n 0] else sample_bool()\n if c0:\n part[0][c0].reverse()\n\n # Choose part of s1 which will be attached to n1, and make sure it\n # can be attached in the beginning.\n c1 = 0 if not part[1][1] else 1 if not part[1][\n 0] else sample_bool()\n if not c1:\n part[1][c1].reverse()\n\n # Append newly formed sequence from the chosen parts and new edge.\n seqs.append(part[0][c0] + [n0, n1] + part[1][c1])\n\n # Append the left-overs to the solution, if they exist.\n other = [1, 0]\n seqs.append(part[0][other[c0]])\n seqs.append(part[1][other[c1]])\n else:\n # Swap nodes so that n0 always preceeds n1 on sequence.\n if j0 > j1:\n j0, j1 = j1, j0\n n0, n1 = n1, n0\n\n # Split sequence in three parts, without nodes n0 an n1 present:\n # head might end with n0, inner might begin with n0 and end with\n # n1, tail might begin with n1.\n head = s0[:j0]\n inner = s0[j0 + 1:j1]\n tail = s0[j1 + 1:]\n\n # Remove original sequence from sequences list.\n del seqs[i0]\n\n # Either append edge to inner section, or attach it between head\n # and tail.\n if sample_bool():\n # Append edge either before or after inner section.\n if sample_bool():\n seqs.append(inner + [n1, n0] + head[::-1])\n seqs.append(tail)\n else:\n seqs.append(tail[::-1] + [n1, n0] + inner)\n seqs.append(head)\n else:\n # Form a new sequence from head, tail, and new edge.\n seqs.append(head + [n0, n1] + tail)\n seqs.append(inner)\n\n return [e for e in seqs if e]", "docstring": "Move which forces given edge to appear on some sequence.\n\nArgs:\n seqs: List of linear sequences covering chip.\n edge: Edge to be activated.\n sample_bool: Callable returning random bool.\n\nReturns:\n New list of linear sequences with given edge on some of the\n sequences.", "source": "juraj_google_style"} -{"code": "def triangle(duration: int, amp: complex, period: float = None,\n phase: float = 0, name: str = None) -> SamplePulse:\n\n if period is None:\n period = duration\n\n return _sampled_triangle_pulse(duration, amp, period, phase=phase, name=name)", "docstring": "Generates triangle wave `SamplePulse`.\n\n Applies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\n duration: Duration of pulse. Must be greater than zero.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n period: Pulse period, units of dt. If `None` defaults to single cycle.\n phase: Pulse phase.\n name: Name of pulse.", "source": "juraj_google_style"} -{"code": "def GetMessages(self, formatter_mediator, event):\n\n if self.DATA_TYPE != event.data_type:\n raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n event.data_type))\n\n event_values = event.CopyToDict()\n\n visit_type = event_values.get('visit_type', 0)\n transition = self._URL_TRANSITIONS.get(visit_type, None)\n if transition:\n transition_str = 'Transition: {0!s}'.format(transition)\n\n extra = event_values.get('extra', None)\n if extra:\n if transition:\n extra.append(transition_str)\n event_values['extra_string'] = ' '.join(extra)\n\n elif transition:\n event_values['extra_string'] = transition_str\n\n return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\n formatter_mediator (FormatterMediator): mediates the interactions\n between formatters and other components, such as storage and Windows\n EventLog resources.\n event (EventObject): event.\n\nReturns:\n tuple(str, str): formatted message string and short message string.\n\nRaises:\n WrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj_google_style"} -{"code": "def test_config_to_dict(test_config_string):\n\n\n test_config = {}\n if test_config_string:\n for config in test_config_string.split(','):\n key, value = config.split('=')\n test_config[key] = value\n\n return test_config", "docstring": "Parse the test config to a dictionary\n\nArgs:\n test_config_string (str) this string come from the --test-config\n flag of the bro executable run command", "source": "juraj_google_style"} -{"code": "def __mul__(self, other: 'TensorFluent') -> 'TensorFluent':\n\n return self._binary_op(self, other, tf.multiply, tf.float32)", "docstring": "Returns a TensorFluent for the multiplication arithmetic operator.\n\nArgs:\n self: The first operand.\n other: The second operand.\n\nReturns:\n A TensorFluent wrapping the operator's output.", "source": "juraj_google_style"} -{"code": "def expression(self, rbp=0):\n\n prev_token = self.consume()\n\n # Retrieve the value from the previous token situated at the\n # leftmost point in the expression\n left = prev_token.nud(context=self)\n\n while rbp < self.current_token.lbp:\n # Read incoming tokens with a higher 'left binding power'.\n # Those are tokens that prefer binding to the left of an expression\n # than to the right of an expression.\n prev_token = self.consume()\n left = prev_token.led(left, context=self)\n\n return left", "docstring": "Extract an expression from the flow of tokens.\n\nArgs:\n rbp (int): the \"right binding power\" of the previous token.\n This represents the (right) precedence of the previous token,\n and will be compared to the (left) precedence of next tokens.\n\nReturns:\n Whatever the led/nud functions of tokens returned.", "source": "juraj_google_style"} -{"code": "def load(fp, **kwargs) -> BioCCollection:\n\n obj = json.load(fp, **kwargs)\n return parse_collection(obj)", "docstring": "Deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to\n a BioCCollection object\n\nArgs:\n fp: a file containing a JSON document\n **kwargs:\n\nReturns:\n BioCCollection: a collection", "source": "juraj_google_style"} -{"code": "def discover(package, cls_match_func):\n\n matched_classes = set()\n\n for _, module_name, _ in pkgutil.walk_packages(\n package.__path__,\n prefix=package.__name__ + '.',\n ):\n module = __import__(module_name, fromlist=[str('__trash')], level=0)\n\n # Check all the classes in that module\n for _, imported_class in inspect.getmembers(module, inspect.isclass):\n # Don't include things that are only there due to a side-effect of\n # importing\n if imported_class.__module__ != module.__name__:\n continue\n\n if cls_match_func(imported_class):\n matched_classes.add(imported_class)\n\n return matched_classes", "docstring": "Returns a set of classes in the directory matched by cls_match_func\n\nArgs:\n path - A Python package\n cls_match_func - Function taking a class and returning true if the\n class is to be included in the output.", "source": "juraj_google_style"} -{"code": "def _bind_length_scalar_handlers(tids, scalar_factory, lns=_NON_ZERO_LENGTH_LNS):\n\n handler = partial(_length_scalar_handler, scalar_factory)\n return _bind_length_handlers(tids, handler, lns)", "docstring": "Binds a set of scalar handlers for an inclusive range of low-nibble values.\n\nArgs:\n tids (Sequence[int]): The Type IDs to bind to.\n scalar_factory (Callable): The factory for the scalar parsing function.\n This function can itself return a function representing a thunk to defer the\n scalar parsing or a direct value.\n lns (Sequence[int]): The low-nibble lengths to bind to.", "source": "juraj_google_style"} -{"code": "def fts_match_any(self, fts, inv):\n\n return any([self.fts_match(fts, s) for s in inv])", "docstring": "Return `True` if any segment in `inv` matches the features in `fts`\n\nArgs:\n fts (list): a collection of (value, feature) tuples\n inv (list): a collection of IPA segments represented as Unicode\n strings\n\nReturns:\n bool: `True` if any segment in `inv` matches the features in `fts`", "source": "juraj_google_style"} -{"code": "def do_check(func, files, status):\n\n\n for file_name in files:\n with open(file_name, 'r') as f:\n output = func.parse(f.read(), file_name)\n\n if output:\n status.append(\"{0}: {1}\".format(file_name, output))\n\n return status", "docstring": "Generic do_check helper method\n\nArgs:\n func (function): Specific function to call\n files (list): list of files to run against\n status (list): list of pre-receive check failures to eventually print\n to the user\n\nReturns:\n status list of current pre-redeive check failures. Might be an empty\n list.", "source": "juraj_google_style"} -{"code": "def __field_to_parameter_type(self, field):\n\n # We use lowercase values for types (e.g. 'string' instead of 'STRING').\n variant = field.variant\n if variant == messages.Variant.MESSAGE:\n raise TypeError('A message variant can\\'t be used in a parameter.')\n\n custom_variant_map = {\n messages.Variant.SINT32: 'int32',\n messages.Variant.SINT64: 'int64',\n messages.Variant.BOOL: 'boolean',\n messages.Variant.ENUM: 'string',\n }\n return custom_variant_map.get(variant) or variant.name.lower()", "docstring": "Converts the field variant type into a string describing the parameter.\n\nArgs:\n field: An instance of a subclass of messages.Field.\n\nReturns:\n A string corresponding to the variant enum of the field, with a few\n exceptions. In the case of signed ints, the 's' is dropped; for the BOOL\n variant, 'boolean' is used; and for the ENUM variant, 'string' is used.\n\nRaises:\n TypeError: if the field variant is a message variant.", "source": "juraj_google_style"} -{"code": "def checkname(self, elt, ps):\n\n\n parselist,errorlist = self.get_parse_and_errorlist()\n ns, name = _get_element_nsuri_name(elt)\n if ns == SOAP.ENC:\n # Element is in SOAP namespace, so the name is a type.\n if parselist and \\\n (None, name) not in parselist and (ns, name) not in parselist:\n raise EvaluateException(\n 'Element mismatch (got %s wanted %s) (SOAP encoding namespace)' % \\\n (name, errorlist), ps.Backtrace(elt))\n return (ns, name)\n\n # Not a type, check name matches.\n if self.nspname and ns != self.nspname:\n raise EvaluateException('Element NS mismatch (got %s wanted %s)' % \\\n (ns, self.nspname), ps.Backtrace(elt))\n\n if self.pname and name != self.pname:\n raise EvaluateException('Element Name mismatch (got %s wanted %s)' % \\\n (name, self.pname), ps.Backtrace(elt))\n return self.checktype(elt, ps)", "docstring": "See if the name and type of the \"elt\" element is what we're\n looking for. Return the element's type.\n Parameters:\n elt -- the DOM element being parsed\n ps -- the ParsedSoap object.", "source": "juraj_google_style"} -{"code": "def poisson_ll(data, means):\n\n if sparse.issparse(data):\n return sparse_poisson_ll(data, means)\n genes, cells = data.shape\n clusters = means.shape[1]\n ll = np.zeros((cells, clusters))\n for i in range(clusters):\n means_i = np.tile(means[:,i], (cells, 1))\n means_i = means_i.transpose() + eps\n #ll[:,i] = np.sum(xlogy(data, means_i) - gammaln(data+1) - means_i, 0)\n ll[:,i] = np.sum(xlogy(data, means_i) - means_i, 0)\n return ll", "docstring": "Calculates the Poisson log-likelihood.\n\nArgs:\n data (array): 2d numpy array of genes x cells\n means (array): 2d numpy array of genes x k\n\nReturns:\n cells x k array of log-likelihood for each cell/cluster pair", "source": "juraj_google_style"} -{"code": "def log_warning(self, msg):\n\n if self.__logger:\n self.__logger.warning(msg)\n\n if self.__raise_exception_on_warning:\n raise RuntimeError(msg)", "docstring": "Log a warning if ``logger`` exists.\n\nArgs:\n msg: Warning to log.\n\n Warning:\n Can raise a ``RuntimeError`` if this was asked in the constructor.", "source": "juraj_google_style"} -{"code": "def toList(value, itemcast=None, delim=','):\n\n value = value or ''\n itemcast = itemcast or str\n return [itemcast(item) for item in value.split(delim) if item != '']", "docstring": "Returns a list of strings from the specified value.\n\n Parameters:\n value (str): comma delimited string to convert to list.\n itemcast (func): Function to cast each list item to (default str).\n delim (str): string delimiter (optional; default ',').", "source": "juraj_google_style"} -{"code": "def tag(self, repository, tag=None, **kwargs):\n\n return self.client.api.tag(self.id, repository, tag=tag, **kwargs)", "docstring": "Tag this image into a repository. Similar to the ``docker tag``\n command.\n\nArgs:\n repository (str): The repository to set for the tag\n tag (str): The tag name\n force (bool): Force\n\nRaises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\nReturns:\n (bool): ``True`` if successful", "source": "juraj_google_style"} -{"code": "def execute_command(self, command, **kwargs):\n\n\n self.info_log(\"executing command: %s\" % command)\n\n try:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n username = kwargs.get(\n 'username',\n self.browser_config.get('username')\n )\n password = self.browser_config.get('password')\n\n ssh.connect(self.get_ip(), username=username, password=password)\n\n stdin, stdout, stderr = ssh.exec_command(command)\n\n ssh.close()\n\n return (stdout, stderr)\n\n except Exception as e:\n msg = \"Execute_command exception: %s\" % str(e)\n self.error_log(msg)\n raise Exception(msg)", "docstring": "Execute a command on the node\n\nArgs:\n command (str)\n\n Kwargs:\n username (str)", "source": "juraj_google_style"} -{"code": "def pdf_to_text(pdf_filepath='', **kwargs):\n\n\n result = []\n try:\n if not os.path.exists(pdf_filepath):\n raise ValueError(\"No valid pdf filepath introduced..\")\n\n # TODO: REVIEW THIS PARAMS\n # update params if not defined\n kwargs['outfp'] = kwargs.get('outfp', StringIO())\n kwargs['laparams'] = kwargs.get('laparams', pdfminer.layout.LAParams())\n kwargs['imagewriter'] = kwargs.get('imagewriter', None)\n kwargs['output_type'] = kwargs.get('output_type', \"text\")\n kwargs['codec'] = kwargs.get('codec', 'utf-8')\n kwargs['disable_caching'] = kwargs.get('disable_caching', False)\n\n with open(pdf_filepath, \"rb\") as f_pdf:\n pdfminer.high_level.extract_text_to_fp(f_pdf, **kwargs)\n\n result = kwargs.get('outfp').getvalue()\n\n except Exception:\n logger.error('fail pdf to text parsing')\n\n return result", "docstring": "Parse pdf to a list of strings using the pdfminer lib.\n\nArgs:\n no_laparams=False,\n all_texts=None,\n detect_vertical=None, word_margin=None, char_margin=None,\n line_margin=None, boxes_flow=None, codec='utf-8',\n strip_control=False, maxpages=0, page_numbers=None, password=\"\",\n scale=1.0, rotation=0, layoutmode='normal', debug=False,\n disable_caching=False,", "source": "juraj_google_style"} -{"code": "def speed_heading(msg):\n\n spd, trk_or_hdg, rocd, tag = velocity(msg)\n return spd, trk_or_hdg", "docstring": "Get speed and ground track (or heading) from the velocity message\n (handles both airborne or surface message)\n\nArgs:\n msg (string): 28 bytes hexadecimal message string\n\nReturns:\n (int, float): speed (kt), ground track or heading (degree)", "source": "juraj_google_style"} -{"code": "def sys_save_screenshot(name: Optional[str] = None) -> None:\n\n lib.TCOD_sys_save_screenshot(\n _bytes(name) if name is not None else ffi.NULL\n )", "docstring": "Save a screenshot to a file.\n\n By default this will automatically save screenshots in the working\n directory.\n\n The automatic names are formatted as screenshotNNN.png. For example:\n screenshot000.png, screenshot001.png, etc. Whichever is available first.\n\nArgs:\n file Optional[AnyStr]: File path to save screenshot.", "source": "juraj_google_style"} -{"code": "def take_snapshot(self, snapshot_name, return_dict=True, power_off=False):\n\n if power_off is True and self.status != \"off\":\n action = self.power_off(return_dict=False)\n action.wait()\n self.load()\n\n return self._perform_action(\n {\"type\": \"snapshot\", \"name\": snapshot_name},\n return_dict\n )", "docstring": "Take a snapshot!\n\nArgs:\n snapshot_name (str): name of snapshot\n\n Optional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n power_off (bool): Before taking the snapshot the droplet will be\n turned off with another API call. It will wait until the\n droplet will be powered off.\n\n Returns dict or Action", "source": "juraj_google_style"} -{"code": "def __init__(self, ea):\n\n self._ea = ea\n\n results = self._calc_cases()\n\n self._map = self._build_map(results)\n\n self._reverse_map = self._build_reverse(self._map)", "docstring": "Initialize a switch parser.\n\nArgs:\n ea: An address of a switch jump instruction.", "source": "juraj_google_style"} -{"code": "def find_parents(root, path, names):\n\n if not root:\n return []\n\n if not os.path.commonprefix((root, path)):\n log.warning(\"Path %s not in %s\", path, root)\n return []\n\n # Split the relative by directory, generate all the parent directories, then check each of them.\n # This avoids running a loop that has different base-cases for unix/windows\n # e.g. /a/b and /a/b/c/d/e.py -> ['/a/b', 'c', 'd']\n dirs = [root] + os.path.relpath(os.path.dirname(path), root).split(os.path.sep)\n\n # Search each of /a/b/c, /a/b, /a\n while dirs:\n search_dir = os.path.join(*dirs)\n existing = list(filter(os.path.exists, [os.path.join(search_dir, n) for n in names]))\n if existing:\n return existing\n dirs.pop()\n\n # Otherwise nothing\n return []", "docstring": "Find files matching the given names relative to the given path.\n\nArgs:\n path (str): The file path to start searching up from.\n names (List[str]): The file/directory names to look for.\n root (str): The directory at which to stop recursing upwards.\n\nNote:\n The path MUST be within the root.", "source": "juraj_google_style"} -{"code": "def _Open(self, path_spec, mode='rb'):\n\n if not path_spec.HasParent():\n raise errors.PathSpecError(\n 'Unsupported path specification without parent.')\n\n file_object = resolver.Resolver.OpenFileObject(\n path_spec.parent, resolver_context=self._resolver_context)\n\n self._file_object = file_object", "docstring": "Opens the file system object defined by path specification.\n\nArgs:\n path_spec (PathSpec): path specification.\n mode (Optional[str]): file access mode. The default is 'rb' which\n represents read-only binary.\n\nRaises:\n AccessError: if the access to open the file was denied.\n IOError: if the file system object could not be opened.\n PathSpecError: if the path specification is incorrect.\n ValueError: if the path specification is invalid.", "source": "juraj_google_style"} -{"code": "def refresh_state(self, id_or_uri, configuration, timeout=-1):\n\n uri = self._client.build_uri(id_or_uri) + self.REFRESH_STATE_PATH\n return self._client.update(resource=configuration, uri=uri, timeout=timeout)", "docstring": "Refreshes a drive enclosure.\n\nArgs:\n id_or_uri: Can be either the resource ID or the resource URI.\n configuration: Configuration\n timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\n in OneView; it just stops waiting for its completion.\n\nReturns:\n dict: Drive Enclosure", "source": "juraj_google_style"} -{"code": "def tf_retrieve_indices(self, indices):\n\n states = dict()\n for name in sorted(self.states_memory):\n states[name] = tf.gather(params=self.states_memory[name], indices=indices)\n\n internals = dict()\n for name in sorted(self.internals_memory):\n internals[name] = tf.gather(params=self.internals_memory[name], indices=indices)\n\n actions = dict()\n for name in sorted(self.actions_memory):\n actions[name] = tf.gather(params=self.actions_memory[name], indices=indices)\n\n terminal = tf.gather(params=self.terminal_memory, indices=indices)\n reward = tf.gather(params=self.reward_memory, indices=indices)\n\n if self.include_next_states:\n assert util.rank(indices) == 1\n next_indices = (indices + 1) % self.capacity\n\n next_states = dict()\n for name in sorted(self.states_memory):\n next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices)\n\n next_internals = dict()\n for name in sorted(self.internals_memory):\n next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices)\n\n return dict(\n states=states,\n internals=internals,\n actions=actions,\n terminal=terminal,\n reward=reward,\n next_states=next_states,\n next_internals=next_internals\n )\n else:\n return dict(\n states=states,\n internals=internals,\n actions=actions,\n terminal=terminal,\n reward=reward\n )", "docstring": "Fetches experiences for given indices.\n\nArgs:\n indices: Index tensor\n\n Returns: Batch of experiences", "source": "juraj_google_style"} -{"code": "def fluent(fn):\n\n @functools.wraps(fn)\n def wrapper(self, *args, **kw):\n # Trigger method proxy\n result = fn(self, *args, **kw)\n # Return self instance or method result\n return self if result is None else result\n return wrapper", "docstring": "Simple function decorator allowing easy method chaining.\n\nArgs:\n fn (function): target function to decorate.", "source": "juraj_google_style"} -{"code": "def __init__(self, *timeslots: List[Timeslot]):\n\n self._table = defaultdict(list)\n\n for slot in timeslots:\n for interval in self._table[slot.channel]:\n if slot.interval.has_overlap(interval):\n raise PulseError(\"Cannot create TimeslotCollection from overlapped timeslots\")\n self._table[slot.channel].append(slot.interval)\n\n self._timeslots = tuple(timeslots)", "docstring": "Create a new time-slot collection.\n\nArgs:\n *timeslots: list of time slots\n\nRaises:\n PulseError: when overlapped time slots are specified", "source": "juraj_google_style"} -{"code": "def save_session(fname=None, session=None, pickleProto=-1):\n\n from scapy import utils\n if fname is None:\n fname = conf.session\n if not fname:\n conf.session = fname = utils.get_temp_file(keep=True)\n log_interactive.info(\"Use [%s] as session file\" % fname)\n\n if session is None:\n try:\n session = get_ipython().user_ns\n except Exception:\n session = six.moves.builtins.__dict__[\"scapy_session\"]\n\n to_be_saved = session.copy()\n if \"__builtins__\" in to_be_saved:\n del(to_be_saved[\"__builtins__\"])\n\n for k in list(to_be_saved):\n i = to_be_saved[k]\n if hasattr(i, \"__module__\") and (k[0] == \"_\" or i.__module__.startswith(\"IPython\")): # noqa: E501\n del(to_be_saved[k])\n if isinstance(i, ConfClass):\n del(to_be_saved[k])\n elif isinstance(i, (type, type, types.ModuleType)):\n if k[0] != \"_\":\n log_interactive.error(\"[%s] (%s) can't be saved.\", k, type(to_be_saved[k])) # noqa: E501\n del(to_be_saved[k])\n\n try:\n os.rename(fname, fname + \".bak\")\n except OSError:\n pass\n\n f = gzip.open(fname, \"wb\")\n six.moves.cPickle.dump(to_be_saved, f, pickleProto)\n f.close()\n del f", "docstring": "Save current Scapy session to the file specified in the fname arg.\n\n params:\n - fname: file to save the scapy session in\n - session: scapy session to use. If None, the console one will be used\n - pickleProto: pickle proto version (default: -1 = latest)", "source": "juraj_google_style"} -{"code": "def _CreateTempDir(prefix, run_dir=None):\n\n temp_dir = tempfile.mkdtemp(prefix=prefix + '-', dir=run_dir)\n try:\n yield temp_dir\n finally:\n shutil.rmtree(temp_dir)", "docstring": "Context manager for creating a temporary directory.\n\nArgs:\n prefix: string, the prefix for the temporary directory.\n run_dir: string, the base directory location of the temporary directory.\n\nYields:\n string, the temporary directory created.", "source": "juraj_google_style"} -{"code": "def _findlinestarts(code):\n\n byte_increments = [ord(c) for c in code.co_lnotab[0::2]]\n line_increments = [ord(c) for c in code.co_lnotab[1::2]]\n\n lastlineno = None\n lineno = code.co_firstlineno\n addr = 0\n for byte_incr, line_incr in zip(byte_increments, line_increments):\n if byte_incr:\n if lineno != lastlineno:\n yield (addr, lineno)\n lastlineno = lineno\n addr += byte_incr\n lineno += line_incr\n if lineno != lastlineno:\n yield (addr, lineno)", "docstring": "Find the offsets in a byte code which are start of lines in the source.\n\n Generate pairs (offset, lineno) as described in Python/compile.c.\n\nArgs:\n code: code object.\n\nYields:\n Address and line number pairs.", "source": "juraj_google_style"} -{"code": "def parse_table_name(bigquery_table):\n\n\n id_name = bigquery_table.split(':')\n if len(id_name) != 2:\n raise ValueError('Bigquery table name should be in the form '\n 'project_id:dataset.table_name. Got %s' % bigquery_table)\n return id_name[1]", "docstring": "Giving a string a:b.c, returns b.c.\n\nArgs:\n bigquery_table: full table name project_id:dataset:table\n\nReturns:\n dataset:table\n\nRaises:\n ValueError: if a, b, or c contain the character ':'.", "source": "juraj_google_style"} -{"code": "def _CalculateNTFSTimeHash(self, file_entry):\n\n date_time_values = []\n\n access_time = getattr(file_entry, 'access_time', None)\n if access_time:\n date_time_string = access_time.CopyToDateTimeString()\n date_time_values.append('atime:{0:s}'.format(date_time_string))\n\n creation_time = getattr(file_entry, 'creation_time', None)\n if creation_time:\n date_time_string = creation_time.CopyToDateTimeString()\n date_time_values.append('crtime:{0:s}'.format(date_time_string))\n\n modification_time = getattr(file_entry, 'modification_time', None)\n if modification_time:\n date_time_string = modification_time.CopyToDateTimeString()\n date_time_values.append('mtime:{0:s}'.format(date_time_string))\n\n # file_entry.change_time is an alias of file_entry.entry_modification_time.\n change_time = getattr(file_entry, 'change_time', None)\n if change_time:\n date_time_string = change_time.CopyToDateTimeString()\n date_time_values.append('ctime:{0:s}'.format(date_time_string))\n\n date_time_values = ''.join(date_time_values)\n date_time_values = date_time_values.encode('ascii')\n\n hash_value = hashlib.md5()\n hash_value.update(date_time_values)\n return hash_value.hexdigest()", "docstring": "Calculates an MD5 from the date and time value of a NTFS file entry.\n\nArgs:\n file_entry (dfvfs.FileEntry): file entry.\n\nReturns:\n str: hexadecimal representation of the MD5 hash value of the date and\n time values of the file entry.", "source": "juraj_google_style"} -{"code": "def smear(self, sigma):\n\n diff = [self.x[i + 1] - self.x[i] for i in range(len(self.x) - 1)]\n avg_x_per_step = np.sum(diff) / len(diff)\n if len(self.ydim) == 1:\n self.y = gaussian_filter1d(self.y, sigma / avg_x_per_step)\n else:\n self.y = np.array([\n gaussian_filter1d(self.y[:, k], sigma / avg_x_per_step)\n for k in range(self.ydim[1])]).T", "docstring": "Apply Gaussian smearing to spectrum y value.\n\nArgs:\n sigma: Std dev for Gaussian smear function", "source": "juraj_google_style"} -{"code": "def page_length(self, length):\n\n mH = length/256\n mL = length%256\n if length < 12000:\n self.send(chr(27)+'('+'C'+chr(2)+chr(0)+chr(mL)+chr(mH))\n else:\n raise RuntimeError('Length must be less than 12000.')", "docstring": "Specifies page length. This command is only valid with continuous length labels.\n\nArgs:\n length: The length of the page, in dots. Can't exceed 12000.\n\nReturns:\n None\n\nRaises:\n RuntimeError: Length must be less than 12000.", "source": "juraj_google_style"} -{"code": "def outer(vector1, vector2=None):\n\n if vector2 is None:\n vector2 = np.array(vector1).conj()\n else:\n vector2 = np.array(vector2).conj()\n return np.outer(vector1, vector2)", "docstring": "Construct the outer product of two vectors.\n\n The second vector argument is optional, if absent the projector\n of the first vector will be returned.\n\nArgs:\n vector1 (ndarray): the first vector.\n vector2 (ndarray): the (optional) second vector.\n\nReturns:\n np.array: The matrix |v1> 'Timeslot':\n\n return Timeslot(self.interval.shift(time), self.channel)", "docstring": "Return a new Timeslot shifted by `time`.\n\nArgs:\n time: time to be shifted", "source": "juraj_google_style"} -{"code": "def format_level_1_memory(memory):\n\n formatted_memory = _list_to_complex_array(memory)\n # infer meas_return from shape of returned data.\n if not 1 <= len(formatted_memory.shape) <= 2:\n raise QiskitError('Level one memory is not of correct shape.')\n return formatted_memory", "docstring": "Format an experiment result memory object for measurement level 1.\n\nArgs:\n memory (list): Memory from experiment with `meas_level==1`. `avg` or\n `single` will be inferred from shape of result memory.\n\nReturns:\n np.ndarray: Measurement level 1 complex numpy array\n\nRaises:\n QiskitError: If the returned numpy array does not have 1 (avg) or 2 (single)\n indicies.", "source": "juraj_google_style"} -{"code": "def _GetIntegerValue(self, row, value_name):\n\n value = row.get(value_name, None)\n try:\n return int(value, 10)\n except (TypeError, ValueError):\n return None", "docstring": "Converts a specific value of the row to an integer.\n\nArgs:\n row (dict[str, str]): fields of a single row, as specified in COLUMNS.\n value_name (str): name of the value within the row.\n\nReturns:\n int: value or None if the value cannot be converted.", "source": "juraj_google_style"} -{"code": "def ask_for_approval(full_changeset=None, params_diff=None,\n include_verbose=False):\n\n approval_options = ['y', 'n']\n if include_verbose:\n approval_options.append('v')\n\n approve = ui.ask(\"Execute the above changes? [{}] \".format(\n '/'.join(approval_options))).lower()\n\n if include_verbose and approve == \"v\":\n if params_diff:\n logger.info(\n \"Full changeset:\\n\\n%s\\n%s\",\n format_params_diff(params_diff),\n yaml.safe_dump(full_changeset),\n )\n else:\n logger.info(\n \"Full changeset:\\n%s\",\n yaml.safe_dump(full_changeset),\n )\n return ask_for_approval()\n elif approve != \"y\":\n raise exceptions.CancelExecution", "docstring": "Prompt the user for approval to execute a change set.\n\nArgs:\n full_changeset (list, optional): A list of the full changeset that will\n be output if the user specifies verbose.\n params_diff (list, optional): A list of DictValue detailing the\n differences between two parameters returned by\n :func:`stacker.actions.diff.diff_dictionaries`\n include_verbose (bool, optional): Boolean for whether or not to include\n the verbose option", "source": "juraj_google_style"} -{"code": "def getTracesByIds(self, trace_ids, adjust):\n\n self.send_getTracesByIds(trace_ids, adjust)\n return self.recv_getTracesByIds()", "docstring": "Get the full traces associated with the given trace ids.\n\n Second argument is a list of methods of adjusting the trace\n data before returning it. Can be empty.\n\n Parameters:\n - trace_ids\n - adjust", "source": "juraj_google_style"} -{"code": "def find(self, package, **kwargs):\n\n spec = find_spec(package)\n if spec is None:\n return None\n limit = []\n if '.' in package:\n package, limit = package.split('.', 1)\n limit = [limit]\n spec = find_spec(package)\n if spec is not None:\n if spec.submodule_search_locations:\n path = spec.submodule_search_locations[0]\n elif spec.origin and spec.origin != 'built-in':\n path = spec.origin\n else:\n return None\n return PackageSpec(spec.name, path, limit)\n return None", "docstring": "Find method.\n\nArgs:\n package (str): package to find.\n **kwargs (): additional keyword arguments.\n\nReturns:\n PackageSpec: the PackageSpec corresponding to the package, or None.", "source": "juraj_google_style"} -{"code": "def append(self, node, dirty=True):\n\n self._children[node.id] = node\n node.parent = self\n if dirty:\n self.touch()\n\n return node", "docstring": "Add a new child node.\n\nArgs:\n node (gkeepapi.Node): Node to add.\n dirty (bool): Whether this node should be marked dirty.", "source": "juraj_google_style"} -{"code": "def _keyDown(key):\n\n if key not in keyboardMapping or keyboardMapping[key] is None:\n return\n\n if type(key) == int:\n fake_input(_display, X.KeyPress, key)\n _display.sync()\n return\n\n needsShift = pyautogui.isShiftCharacter(key)\n if needsShift:\n fake_input(_display, X.KeyPress, keyboardMapping['shift'])\n\n fake_input(_display, X.KeyPress, keyboardMapping[key])\n\n if needsShift:\n fake_input(_display, X.KeyRelease, keyboardMapping['shift'])\n _display.sync()", "docstring": "Performs a keyboard key press without the release. This will put that\n key in a held down state.\n\n NOTE: For some reason, this does not seem to cause key repeats like would\n happen if a keyboard key was held down on a text field.\n\nArgs:\n key (str): The key to be pressed down. The valid names are listed in\n pyautogui.KEY_NAMES.\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def load(cls, config: Optional[Config] = None):\n\n if cls._dfk is not None:\n raise RuntimeError('Config has already been loaded')\n\n if config is None:\n cls._dfk = DataFlowKernel(Config())\n else:\n cls._dfk = DataFlowKernel(config)\n\n return cls._dfk", "docstring": "Load a DataFlowKernel.\n\nArgs:\n - config (Config) : Configuration to load. This config will be passed to a\n new DataFlowKernel instantiation which will be set as the active DataFlowKernel.\n\nReturns:\n - DataFlowKernel : The loaded DataFlowKernel object.", "source": "juraj_google_style"} -{"code": "def element(self, using, value):\n\n return self._execute(Command.FIND_ELEMENT, {\n 'using': using,\n 'value': value\n })", "docstring": "Find an element in the current context.\n\n Support:\n Android iOS Web(WebView)\n\nArgs:\n using(str): The element location strategy.\n value(str): The value of the location strategy.\n\nReturns:\n WebElement Object.\n\nRaises:\n WebDriverException.", "source": "juraj_google_style"} -{"code": "def _maybe_init_run(self, experiment_name, run_name):\n\n experiment_id = self._maybe_init_experiment(experiment_name)\n cursor = self._db.cursor()\n cursor.execute(\n ,\n (experiment_id, run_name))\n row = cursor.fetchone()\n if row:\n return row[0]\n run_id = self._create_id()\n # TODO: track actual run start times\n started_time = 0\n cursor.execute(\n ,\n (experiment_id, run_id, run_name, time.time(), started_time))\n return run_id", "docstring": "Returns the ID for the given run, creating the row if needed.\n\nArgs:\n experiment_name: name of experiment containing this run.\n run_name: name of run.", "source": "juraj_google_style"} -{"code": "def start(self, iterations):\n\n self.touched = True\n self.iter = int(iterations)\n self.t_start = time.time()", "docstring": "Start the progress bar.\n\n Parameters:\n iterations (int): Number of iterations.", "source": "juraj_google_style"} -{"code": "def char_style(self, style):\n\n styleset = {'normal': 0,\n 'outline': 1,\n 'shadow': 2,\n 'outlineshadow': 3\n }\n if style in styleset:\n self.send(chr(27) + 'q' + chr(styleset[style]))\n else:\n raise RuntimeError('Invalid character style in function charStyle')", "docstring": "Sets the character style.\n\nArgs:\n style: The desired character style. Choose from 'normal', 'outline', 'shadow', and 'outlineshadow'\n\nReturns:\n None\n\nRaises:\n RuntimeError: Invalid character style", "source": "juraj_google_style"} -{"code": "def sendto(self, transport, addr):\n\n msg = bytes(self) + b'\\r\\n'\n logger.debug(\"%s:%s < %s\", *(addr + (self,)))\n transport.sendto(msg, addr)", "docstring": "Send request to a given address via given transport.\n\nArgs:\n transport (asyncio.DatagramTransport):\n Write transport to send the message on.\n addr (Tuple[str, int]):\n IP address and port pair to send the message to.", "source": "juraj_google_style"} -{"code": "def radar_xsect(scatterer, h_pol=True):\n\n Z = scatterer.get_Z()\n if h_pol:\n return 2 * np.pi * \\\n (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])\n else:\n return 2 * np.pi * \\\n (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])", "docstring": "Radar cross section for the current setup.\n\nArgs:\n scatterer: a Scatterer instance.\n h_pol: If True (default), use horizontal polarization.\n If False, use vertical polarization.\n\nReturns:\n The radar cross section.", "source": "juraj_google_style"} -{"code": "def getRetinas(self, retina_name=None):\n\n\n resourcePath = '/retinas'\n method = 'GET'\n\n queryParams = {}\n headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}\n postData = None\n\n queryParams['retina_name'] = retina_name\n response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)\n return [retina.Retina(**r) for r in response.json()]", "docstring": "Information about retinas\n\nArgs:\n retina_name, str: The retina name (optional) (optional)\n Returns: Array[Retina]", "source": "juraj_google_style"} -{"code": "def single_gate_params(gate, params=None):\n\n if gate in ('U', 'u3'):\n return params[0], params[1], params[2]\n elif gate == 'u2':\n return np.pi / 2, params[0], params[1]\n elif gate == 'u1':\n return 0, 0, params[0]\n elif gate == 'id':\n return 0, 0, 0\n raise QiskitError('Gate is not among the valid types: %s' % gate)", "docstring": "Apply a single qubit gate to the qubit.\n\nArgs:\n gate(str): the single qubit gate name\n params(list): the operation parameters op['params']\n\nReturns:\n tuple: a tuple of U gate parameters (theta, phi, lam)\n\nRaises:\n QiskitError: if the gate name is not valid", "source": "juraj_google_style"} -{"code": "def call_remoteckan(self, *args, **kwargs):\n # type: (Any, Any) -> Dict\n\n requests_kwargs = kwargs.get('requests_kwargs', dict())\n credentials = self._get_credentials()\n if credentials:\n requests_kwargs['auth'] = credentials\n kwargs['requests_kwargs'] = requests_kwargs\n apikey = kwargs.get('apikey', self.get_api_key())\n kwargs['apikey'] = apikey\n return self.remoteckan().call_action(*args, **kwargs)", "docstring": "Calls the remote CKAN\n\nArgs:\n *args: Arguments to pass to remote CKAN call_action method\n **kwargs: Keyword arguments to pass to remote CKAN call_action method\n\nReturns:\n Dict: The response from the remote CKAN call_action method", "source": "juraj_google_style"} -{"code": "def convert_predictions_to_image_summaries(hook_args):\n\n decode_hparams = hook_args.decode_hparams\n if not decode_hparams.display_decoded_images:\n return []\n predictions = hook_args.predictions[0]\n\n # Display ten random inputs and outputs so that tensorboard does not hang.\n all_summaries = []\n rand_predictions = np.random.choice(predictions, size=10)\n for ind, prediction in enumerate(rand_predictions):\n output_summary = image_to_tf_summary_value(\n prediction[\"outputs\"], tag=\"%d_output\" % ind)\n input_summary = image_to_tf_summary_value(\n prediction[\"inputs\"], tag=\"%d_input\" % ind)\n all_summaries.append(input_summary)\n all_summaries.append(output_summary)\n return all_summaries", "docstring": "Optionally converts images from hooks_args to image summaries.\n\nArgs:\n hook_args: DecodeHookArgs namedtuple\n\nReturns:\n summaries: list of tf.Summary values if hook_args.decode_hpara", "source": "juraj_google_style"} -{"code": "def ensure_crossplat_path(path, winroot='C:'):\n r\n cplat_path = path.replace('\\\\', '/')\n if cplat_path == winroot:\n cplat_path += '/'\n return cplat_path", "docstring": "r\"\"\"\n ensure_crossplat_path\n\nArgs:\n path (str):\n\nReturns:\n str: crossplat_path\n\n Example(DOCTEST):\n >>> # ENABLE_DOCTEST\n >>> from utool.util_path import * # NOQA\n >>> path = r'C:\\somedir'\n >>> cplat_path = ensure_crossplat_path(path)\n >>> result = cplat_path\n >>> print(result)\n C:/somedir", "source": "juraj_google_style"} -{"code": "def GetMetadata(\n self, metadata_key='', recursive=True, timeout=None, retry=True):\n\n return self._HandleMetadataUpdate(\n metadata_key=metadata_key, recursive=recursive, wait=False,\n timeout=timeout, retry=retry)", "docstring": "Retrieve the contents of metadata server for a metadata key.\n\nArgs:\n metadata_key: string, the metadata key to watch for changes.\n recursive: bool, True if we should recursively watch for metadata changes.\n timeout: int, timeout in seconds for returning metadata output.\n retry: bool, True if we should retry on failure.\n\nReturns:\n json, the deserialized contents of the metadata server or None if error.", "source": "juraj_google_style"} -{"code": "def hash_blocks(text, hashes):\n\n def sub(match):\n block = match.group(1)\n hashed = hash_text(block, 'block')\n hashes[hashed] = block\n return '\\n\\n' + hashed + '\\n\\n'\n return re_block.sub(sub, text)", "docstring": "Hashes HTML block tags.\n\n PARAMETERS:\n text -- str; Markdown text\n hashes -- dict; a dictionary of all hashes, where keys are hashes\n and values are their unhashed versions.\n\n When HTML block tags are used, all content inside the tags is\n preserved as-is, without any Markdown processing. See block_tags\n for a list of block tags.", "source": "juraj_google_style"} -{"code": "def _save_env(env):\n\n env_path = os.path.join(env[\"resultdir\"], \"env\")\n\n if os.path.isdir(env[\"resultdir\"]):\n with open(env_path, \"w\") as f:\n yaml.dump(env, f)", "docstring": "Saves one environment.\n\nArgs:\n env (dict): the env dict to save.", "source": "juraj_google_style"} -{"code": "def xldate_as_datetime(xldate, datemode=0, option=\"to_datetime\"):\n\n\n # This does not work for numpy-arrays\n\n if option == \"to_float\":\n d = (xldate - 25589) * 86400.0\n else:\n try:\n d = datetime.datetime(1899, 12, 30) + \\\n datetime.timedelta(days=xldate + 1462 * datemode)\n # date_format = \"%Y-%m-%d %H:%M:%S:%f\" # with microseconds,\n # excel cannot cope with this!\n if option == \"to_string\":\n date_format = \"%Y-%m-%d %H:%M:%S\" # without microseconds\n d = d.strftime(date_format)\n except TypeError:\n logging.info(f'The date is not of correct type [{xldate}]')\n d = xldate\n return d", "docstring": "Converts a xls date stamp to a more sensible format.\n\nArgs:\n xldate (str): date stamp in Excel format.\n datemode (int): 0 for 1900-based, 1 for 1904-based.\n option (str): option in (\"to_datetime\", \"to_float\", \"to_string\"),\n return value\n\nReturns:\n datetime (datetime object, float, or string).", "source": "juraj_google_style"} -{"code": "def importGurobiSolution(self, grbmodel):\n\n self.eval(''.join(\n 'let {} := {};'.format(var.VarName, var.X)\n for var in grbmodel.getVars()\n if '$' not in var.VarName\n ))", "docstring": "Import the solution from a gurobipy.Model object.\n\nArgs:\n grbmodel: A :class:`gurobipy.Model` object with the model solved.", "source": "juraj_google_style"} -{"code": "def download(self, uri, file_path):\n\n with open(file_path, 'wb') as file:\n return self._connection.download_to_stream(file, uri)", "docstring": "Downloads the contents of the requested URI to a stream.\n\nArgs:\n uri: URI\n file_path: File path destination\n\nReturns:\n bool: Indicates if the file was successfully downloaded.", "source": "juraj_google_style"} -{"code": "def decompose(miz_file: Path, output_folder: Path):\n\n mission_folder, assets_folder = NewMiz._get_subfolders(output_folder)\n NewMiz._wipe_folders(mission_folder, assets_folder)\n LOGGER.info('unzipping mission file')\n with Miz(miz_file) as miz:\n version = miz.mission.d['version']\n LOGGER.debug(f'mission version: \"%s\"', version)\n\n LOGGER.info('copying assets to: \"%s\"', assets_folder)\n ignore = shutil.ignore_patterns('mission')\n shutil.copytree(str(miz.temp_dir), str(assets_folder), ignore=ignore)\n\n NewMiz._reorder_warehouses(assets_folder)\n\n LOGGER.info('decomposing mission table into: \"%s\" (this will take a while)', mission_folder)\n NewMiz._decompose_dict(miz.mission.d, 'base_info', mission_folder, version, miz)", "docstring": "Decompose this Miz into json\n\nArgs:\n output_folder: folder to output the json structure as a Path\n miz_file: MIZ file path as a Path", "source": "juraj_google_style"} -{"code": "def calculate_weighted_avg(bonds):\n\n minimum_bond = min(bonds)\n weighted_sum = 0.0\n total_sum = 0.0\n for entry in bonds:\n weighted_sum += entry * exp(1 - (entry / minimum_bond) ** 6)\n total_sum += exp(1 - (entry / minimum_bond) ** 6)\n return weighted_sum / total_sum", "docstring": "Returns the weighted average bond length given by\n Hoppe's effective coordination number formula.\n\nArgs:\n bonds (list): list of floats that are the\n bond distances between a cation and its\n peripheral ions", "source": "juraj_google_style"} -{"code": "def grow(self, times=1):\n\n self.nodes.append([])\n\n for n, node in enumerate(self.nodes[self.age]):\n if self.age == 0:\n p_node = Node(self.pos[:2])\n else:\n p_node = self._get_node_parent(self.age-1, n)\n angle = node.get_node_angle(p_node)\n for i in range(self.comp):\n tot_angle = self.__get_total_angle(angle, i)\n length = self.__get_total_length(self.age+1, i)\n self.nodes[self.age+1].append(node.make_new_node(length, tot_angle))\n\n self.age += 1\n\n if times > 1:\n self.grow(times-1)", "docstring": "Let the tree grow.\n\nArgs:\n times (integer): Indicate how many times the tree will grow.", "source": "juraj_google_style"} -{"code": "def predict(self, X_feat, X_seq):\n\n\n # insert one dimension - backcompatiblity\n X_seq = np.expand_dims(X_seq, axis=1)\n\n return self._get_other_var(X_feat, X_seq, variable=\"y_pred\")", "docstring": "Predict the response variable :py:attr:`y` for new input data (:py:attr:`X_feat`, :py:attr:`X_seq`).\n\nArgs:\n X_feat: Feature design matrix. Same format as :py:attr:`X_feat` in :py:meth:`train`\n X_seq: Sequenc design matrix. Same format as :py:attr:`X_seq` in :py:meth:`train`", "source": "juraj_google_style"} -{"code": "def remove(text, exclude):\n\n exclude = ''.join(str(symbol) for symbol in exclude)\n return text.translate(str.maketrans('', '', exclude))", "docstring": "Remove ``exclude`` symbols from ``text``.\n\nExample:\n >>> remove(\"example text\", string.whitespace)\n 'exampletext'\n\nArgs:\n text (str): The text to modify\n exclude (iterable): The symbols to exclude\n\nReturns:\n ``text`` with ``exclude`` symbols removed", "source": "juraj_google_style"} -{"code": "def iplot_state_qsphere(rho, figsize=None):\n\n\n # HTML\n html_template = Template()\n\n # JavaScript\n javascript_template = Template()\n rho = _validate_input_state(rho)\n if figsize is None:\n options = {}\n else:\n options = {'width': figsize[0], 'height': figsize[1]}\n\n qspheres_data = []\n # Process data and execute\n num = int(np.log2(len(rho)))\n\n # get the eigenvectors and eigenvalues\n weig, stateall = linalg.eigh(rho)\n\n for _ in range(2**num):\n # start with the max\n probmix = weig.max()\n prob_location = weig.argmax()\n if probmix > 0.001:\n # print(\"The \" + str(k) + \"th eigenvalue = \" + str(probmix))\n # get the max eigenvalue\n state = stateall[:, prob_location]\n loc = np.absolute(state).argmax()\n # get the element location closes to lowest bin representation.\n for j in range(2**num):\n test = np.absolute(np.absolute(state[j]) -\n np.absolute(state[loc]))\n if test < 0.001:\n loc = j\n break\n # remove the global phase\n angles = (np.angle(state[loc]) + 2 * np.pi) % (2 * np.pi)\n angleset = np.exp(-1j*angles)\n state = angleset*state\n state.flatten()\n\n spherepoints = []\n for i in range(2**num):\n # get x,y,z points\n\n element = bin(i)[2:].zfill(num)\n weight = element.count(\"1\")\n\n number_of_divisions = n_choose_k(num, weight)\n weight_order = bit_string_index(element)\n\n angle = weight_order * 2 * np.pi / number_of_divisions\n\n zvalue = -2 * weight / num + 1\n xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)\n yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)\n\n # get prob and angle - prob will be shade and angle color\n prob = np.real(np.dot(state[i], state[i].conj()))\n angles = (np.angle(state[i]) + 2 * np.pi) % (2 * np.pi)\n qpoint = {\n 'x': xvalue,\n 'y': yvalue,\n 'z': zvalue,\n 'prob': prob,\n 'phase': angles\n }\n spherepoints.append(qpoint)\n\n # Associate all points to one sphere\n sphere = {\n 'points': spherepoints,\n 'eigenvalue': probmix\n }\n\n # Add sphere to the spheres array\n qspheres_data.append(sphere)\n weig[prob_location] = 0\n\n div_number = str(time.time())\n div_number = re.sub('[.]', '', div_number)\n\n html = html_template.substitute({\n 'divNumber': div_number\n })\n\n javascript = javascript_template.substitute({\n 'data': qspheres_data,\n 'divNumber': div_number,\n 'options': options\n })\n\n display(HTML(html + javascript))", "docstring": "Create a Q sphere representation.\n\n Graphical representation of the input array, using a Q sphere for each\n eigenvalue.\n\nArgs:\n rho (array): State vector or density matrix.\n figsize (tuple): Figure size in pixels.", "source": "juraj_google_style"} -{"code": "def call(func, args):\n\n assert hasattr(func, '__call__'), 'Cannot call func: {}'.format(\n func.__name__)\n raw_func = (\n func if isinstance(func, FunctionType) else func.__class__.__call__)\n hints = collections.defaultdict(lambda: Any, get_type_hints(raw_func))\n argspec = _getargspec(raw_func)\n named_args = {}\n varargs = ()\n for k, nk, v in _normalize(args):\n if nk == argspec.varargs:\n hints[nk] = Tuple[hints[nk], ...]\n elif nk not in argspec.args and argspec.varkw in hints:\n hints[nk] = hints[argspec.varkw]\n try:\n value = cast(hints[nk], v)\n except TypeError as e:\n _LOGGER.exception(e)\n six.raise_from(exc.InvalidCliValueError(k, v), e)\n if nk == argspec.varargs:\n varargs = value\n elif (nk in argspec.args or argspec.varkw) and (\n nk not in named_args or named_args[nk] is None):\n named_args[nk] = value\n return func(*varargs, **named_args)", "docstring": "Call the function with args normalized and cast to the correct types.\n\nArgs:\n func: The function to call.\n args: The arguments parsed by docopt.\n\nReturns:\n The return value of func.", "source": "juraj_google_style"} -{"code": "def getServerSSLContext(self, hostname=None):\n\n sslctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n if hostname is None:\n hostname = socket.gethostname()\n certfile = self.getHostCertPath(hostname)\n if certfile is None:\n raise s_exc.NoCertKey('Missing .crt for %s' % hostname)\n keyfile = self.getHostKeyPath(hostname)\n if keyfile is None:\n raise s_exc.NoCertKey('Missing .key for %s' % hostname)\n\n sslctx.load_cert_chain(certfile, keyfile)\n\n return sslctx", "docstring": "Returns an ssl.SSLContext appropriate to listen on a socket\n\nArgs:\n hostname: if None, the value from socket.gethostname is used to find the key in the servers directory.\n This name should match the not-suffixed part of two files ending in .key and .crt in the hosts subdirectory", "source": "juraj_google_style"} -{"code": "def _add_node(self, node):\n\n node_id = len(self.node_list)\n self.node_to_id[node] = node_id\n self.node_list.append(node)\n self.adj_list[node_id] = []\n self.reverse_adj_list[node_id] = []\n return node_id", "docstring": "Add a new node to node_list and give the node an ID.\n\nArgs:\n node: An instance of Node.\n\nReturns:\n node_id: An integer.", "source": "juraj_google_style"} -{"code": "def _revoke(self, http):\n\n self._do_revoke(http, self.refresh_token or self.access_token)", "docstring": "Revokes this credential and deletes the stored copy (if it exists).\n\nArgs:\n http: an object to be used to make HTTP requests.", "source": "juraj_google_style"} -{"code": "def _times(t0, hours):\n\n\t\tif not isinstance(hours, Iterable):\n\t\t\treturn Tide._times(t0, [hours])[0]\n\t\telif not isinstance(hours[0], datetime):\n\t\t\treturn np.array([t0 + timedelta(hours=h) for h in hours])\n\t\telse:\n\t\t\treturn np.array(hours)", "docstring": "Return a (list of) datetime(s) given an initial time and an (list of) hourly offset(s).\n\nArgs:\n t0 -- initial time\n hours -- hourly offsets from t0", "source": "juraj_google_style"} -{"code": "def _do_refresh_request(self, http):\n\n body = self._generate_refresh_request_body()\n headers = self._generate_refresh_request_headers()\n\n logger.info('Refreshing access_token')\n resp, content = transport.request(\n http, self.token_uri, method='POST',\n body=body, headers=headers)\n content = _helpers._from_bytes(content)\n if resp.status == http_client.OK:\n d = json.loads(content)\n self.token_response = d\n self.access_token = d['access_token']\n self.refresh_token = d.get('refresh_token', self.refresh_token)\n if 'expires_in' in d:\n delta = datetime.timedelta(seconds=int(d['expires_in']))\n self.token_expiry = delta + _UTCNOW()\n else:\n self.token_expiry = None\n if 'id_token' in d:\n self.id_token = _extract_id_token(d['id_token'])\n self.id_token_jwt = d['id_token']\n else:\n self.id_token = None\n self.id_token_jwt = None\n # On temporary refresh errors, the user does not actually have to\n # re-authorize, so we unflag here.\n self.invalid = False\n if self.store:\n self.store.locked_put(self)\n else:\n # An {'error':...} response body means the token is expired or\n # revoked, so we flag the credentials as such.\n logger.info('Failed to retrieve access token: %s', content)\n error_msg = 'Invalid response {0}.'.format(resp.status)\n try:\n d = json.loads(content)\n if 'error' in d:\n error_msg = d['error']\n if 'error_description' in d:\n error_msg += ': ' + d['error_description']\n self.invalid = True\n if self.store is not None:\n self.store.locked_put(self)\n except (TypeError, ValueError):\n pass\n raise HttpAccessTokenRefreshError(error_msg, status=resp.status)", "docstring": "Refresh the access_token using the refresh_token.\n\nArgs:\n http: an object to be used to make HTTP requests.\n\nRaises:\n HttpAccessTokenRefreshError: When the refresh fails.", "source": "juraj_google_style"} -{"code": "def checkAndRaise(pageNum, itemsPerPage):\n\n if pageNum < 1:\n raise ErrPaginationLimits(ErrPaginationLimits.ERR_PAGE_NUM)\n\n if itemsPerPage < Settings.itemsPerPageMin or itemsPerPage > Settings.itemsPerPageMax:\n raise ErrPaginationLimits(ErrPaginationLimits.ERR_ITEMS_PER_PAGE)", "docstring": "Check and Raise an Exception if needed\n\nArgs:\n pageNum (int): Page number\n itemsPerPage (int): Number of items per Page\n\nRaises:\n ErrPaginationLimits: If we are out of limits", "source": "juraj_google_style"} -{"code": "def _CheckAttribute(self, attribute, value):\n\n if not isinstance(attribute, Attribute):\n raise AttributeError(\"Attribute %s must be of type aff4.Attribute()\" %\n attribute)\n\n if not isinstance(value, attribute.attribute_type):\n raise ValueError(\"Value for attribute %s must be of type %s()\" %\n (attribute, attribute.attribute_type.__name__))", "docstring": "Check that the value is of the expected type.\n\nArgs:\n attribute: An instance of Attribute().\n value: An instance of RDFValue.\n\nRaises:\n ValueError: when the value is not of the expected type.\n AttributeError: When the attribute is not of type Attribute().", "source": "juraj_google_style"} -{"code": "def FinalizeTaskStorage(self, task):\n\n if task.identifier not in self._task_storage_writers:\n raise IOError('Storage writer for task: {0:s} does not exist.'.format(\n task.identifier))", "docstring": "Finalizes a processed task storage.\n\nArgs:\n task (Task): task.\n\nRaises:\n IOError: if the task storage does not exist.\n OSError: if the task storage does not exist.", "source": "juraj_google_style"} -{"code": "def splits(cls, datasets, batch_sizes=None, **kwargs):\n\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)", "docstring": "Create Iterator objects for multiple splits of a dataset.\n\nArgs:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.", "source": "juraj_google_style"} -{"code": "def _all_number_groups_are_exactly_present(numobj, normalized_candidate, formatted_number_groups):\n\n candidate_groups = re.split(NON_DIGITS_PATTERN, normalized_candidate)\n # Set this to the last group, skipping it if the number has an extension.\n if numobj.extension is not None:\n candidate_number_group_index = len(candidate_groups) - 2\n else:\n candidate_number_group_index = len(candidate_groups) - 1\n # First we check if the national significant number is formatted as a\n # block. We use contains and not equals, since the national significant\n # number may be present with a prefix such as a national number prefix, or\n # the country code itself.\n if (len(candidate_groups) == 1 or\n candidate_groups[candidate_number_group_index].find(national_significant_number(numobj)) != -1):\n return True\n # Starting from the end, go through in reverse, excluding the first group,\n # and check the candidate and number groups are the same.\n formatted_number_group_index = len(formatted_number_groups) - 1\n while (formatted_number_group_index > 0 and candidate_number_group_index >= 0):\n if (candidate_groups[candidate_number_group_index] !=\n formatted_number_groups[formatted_number_group_index]):\n return False\n formatted_number_group_index -= 1\n candidate_number_group_index -= 1\n # Now check the first group. There may be a national prefix at the start, so we only check\n # that the candidate group ends with the formatted number group.\n return (candidate_number_group_index >= 0 and\n candidate_groups[candidate_number_group_index].endswith(formatted_number_groups[0]))", "docstring": "Returns True if the groups of digits found in our candidate phone number match our\n expectations.\n\nArgs:\n numobj -- the original number we found when parsing\n normalized_candidate -- the candidate number, normalized to only contain ASCII digits,\n but with non-digits (spaces etc) retained\n expected_number_groups -- the groups of digits that we would expect to see if we\n formatted this number\n Returns True if expectations matched.", "source": "juraj_google_style"} -{"code": "def set_password(self, raw_password):\n\n self.password = pbkdf2_sha512.encrypt(raw_password, rounds=10000,\n salt_size=10)", "docstring": "Kullanıcı şifresini encrypt ederek set eder.\n\nArgs:\n raw_password (str)", "source": "juraj_google_style"} -{"code": "def localize_file(path_or_buffer):\n\n\n path_or_buffer = _stringify_path(path_or_buffer)\n\n if _is_url(path_or_buffer):\n req = urlopen(path_or_buffer)\n filename = os.path.basename(req.geturl())\n if os.path.splitext(filename)[-1] is not \".pdf\":\n pid = os.getpid()\n filename = \"{0}.pdf\".format(pid)\n\n with open(filename, 'wb') as f:\n shutil.copyfileobj(req, f)\n\n return filename, True\n\n elif is_file_like(path_or_buffer):\n pid = os.getpid()\n filename = \"{0}.pdf\".format(pid)\n\n with open(filename, 'wb') as f:\n shutil.copyfileobj(path_or_buffer, f)\n\n return filename, True\n\n # File path case\n else:\n return os.path.expanduser(path_or_buffer), False", "docstring": "Ensure localize target file.\n\n If the target file is remote, this function fetches into local storage.\n\nArgs:\n path (str):\n File path or file like object or URL of target file.\n\nReturns:\n filename (str): file name in local storage\n temporary_file_flag (bool): temporary file flag", "source": "juraj_google_style"} -{"code": "def __init__(self, key_path):\n\n super(WindowsRegistryKeyPathFilter, self).__init__()\n\n key_path.rstrip('\\\\')\n self._key_path = key_path\n\n key_path = key_path.upper()\n self._key_path_upper = key_path\n\n self._wow64_key_path = None\n self._wow64_key_path_upper = None\n\n if key_path.startswith(self._CONTROL_SET_PREFIX.upper()):\n self._key_path_prefix, _, self._key_path_suffix = key_path.partition(\n 'CurrentControlSet'.upper())\n\n else:\n self._key_path_prefix = None\n self._key_path_suffix = None\n\n # Handle WoW64 Windows Registry key redirection.\n # Also see:\n # https://msdn.microsoft.com/en-us/library/windows/desktop/\n # ms724072%28v=vs.85%29.aspx\n # https://msdn.microsoft.com/en-us/library/windows/desktop/\n # aa384253(v=vs.85).aspx\n wow64_prefix = None\n for key_path_prefix in self._WOW64_PREFIXES:\n if key_path.startswith(key_path_prefix.upper()):\n wow64_prefix = key_path_prefix\n break\n\n if wow64_prefix:\n key_path_suffix = self._key_path[len(wow64_prefix):]\n if key_path_suffix.startswith('\\\\'):\n key_path_suffix = key_path_suffix[1:]\n self._wow64_key_path = '\\\\'.join([\n wow64_prefix, 'Wow6432Node', key_path_suffix])\n self._wow64_key_path_upper = self._wow64_key_path.upper()", "docstring": "Initializes a Windows Registry key filter.\n\nArgs:\n key_path (str): key path.", "source": "juraj_google_style"} -{"code": "def float_value_convert(dictin, dropfailedvalues=False):\n # type: (DictUpperBound, bool) -> Dict\n\n return key_value_convert(dictin, valuefn=float, dropfailedvalues=dropfailedvalues)", "docstring": "Convert values of dictionary to floats\n\nArgs:\n dictin (DictUpperBound): Input dictionary\n dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False.\n\nReturns:\n Dict: Dictionary with values converted to floats", "source": "juraj_google_style"} -{"code": "def shift(schedule: ScheduleComponent, time: int, name: str = None) -> Schedule:\n\n if name is None:\n name = schedule.name\n return union((time, schedule), name=name)", "docstring": "Return schedule shifted by `time`.\n\nArgs:\n schedule: The schedule to shift\n time: The time to shift by\n name: Name of shifted schedule. Defaults to name of `schedule`", "source": "juraj_google_style"} -{"code": "def _update_general_statistics(a_float, dist):\n\n if not dist.count:\n dist.count = 1\n dist.maximum = a_float\n dist.minimum = a_float\n dist.mean = a_float\n dist.sumOfSquaredDeviation = 0\n else:\n old_count = dist.count\n old_mean = dist.mean\n new_mean = ((old_count * old_mean) + a_float) / (old_count + 1)\n delta_sum_squares = (a_float - old_mean) * (a_float - new_mean)\n dist.count += 1\n dist.mean = new_mean\n dist.maximum = max(a_float, dist.maximum)\n dist.minimum = min(a_float, dist.minimum)\n dist.sumOfSquaredDeviation += delta_sum_squares", "docstring": "Adds a_float to distribution, updating the statistics fields.\n\nArgs:\n a_float (float): a new value\n dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):\n the Distribution being updated", "source": "juraj_google_style"} -{"code": "def to_csv(self, sep=',', path=None):\n\n data = []\n first_row = ['Stat']\n first_row.extend(self._names)\n data.append(sep.join(first_row))\n\n stats = self._stats()\n\n for stat in stats:\n k, n, f = stat\n # blank row\n if k is None:\n row = [''] * len(data[0])\n data.append(sep.join(row))\n continue\n\n row = [n]\n for key in self._names:\n raw = getattr(self[key], k)\n if f is None:\n row.append(raw)\n elif f == 'p':\n row.append(fmtp(raw))\n elif f == 'n':\n row.append(fmtn(raw))\n elif f == 'dt':\n row.append(raw.strftime('%Y-%m-%d'))\n else:\n raise NotImplementedError('unsupported format %s' % f)\n data.append(sep.join(row))\n\n res = '\\n'.join(data)\n\n if path is not None:\n with open(path, 'w') as fl:\n fl.write(res)\n else:\n return res", "docstring": "Returns a CSV string with appropriate formatting.\n If path is not None, the string will be saved to file\n at path.\n\nArgs:\n * sep (char): Separator\n * path (str): If None, CSV string returned. Else file\n written to specified path.", "source": "juraj_google_style"} -{"code": "def replace_in_list(stringlist: Iterable[str],\n replacedict: Dict[str, str]) -> List[str]:\n\n newlist = []\n for fromstring in stringlist:\n newlist.append(multiple_replace(fromstring, replacedict))\n return newlist", "docstring": "Returns a list produced by applying :func:`multiple_replace` to every\n string in ``stringlist``.\n\nArgs:\n stringlist: list of source strings\n replacedict: dictionary mapping \"original\" to \"replacement\" strings\n\nReturns:\n list of final strings", "source": "juraj_google_style"} -{"code": "def set_lacp_mode(self, name, mode):\n\n if mode not in ['on', 'passive', 'active']:\n return False\n\n grpid = re.search(r'(\\d+)', name).group()\n\n remove_commands = list()\n add_commands = list()\n\n for member in self.get_members(name):\n remove_commands.append('interface %s' % member)\n remove_commands.append('no channel-group %s' % grpid)\n add_commands.append('interface %s' % member)\n add_commands.append('channel-group %s mode %s' % (grpid, mode))\n\n return self.configure(remove_commands + add_commands)", "docstring": "Configures the LACP mode of the member interfaces\n\nArgs:\n name(str): The Port-Channel interface name to configure the\n LACP mode\n\n mode(str): The LACP mode to configure the member interfaces to.\n Valid values are 'on, 'passive', 'active'\n\nReturns:\n True if the operation succeeds otherwise False", "source": "juraj_google_style"} -{"code": "def print_overwrite(*args, **kwargs):\n\n kwargs.setdefault('file', sys.stdout)\n kwargs.setdefault('end', '')\n delay = None\n with suppress(KeyError):\n delay = kwargs.pop('delay')\n erase_line()\n # Move to the beginning of the line.\n move_column(1, file=kwargs['file'])\n if delay is None:\n print(*args, **kwargs)\n else:\n for c in kwargs.get('sep', ' ').join(str(a) for a in args):\n kwargs['file'].write(c)\n kwargs['file'].flush()\n sleep(delay)\n if kwargs['end']:\n kwargs['file'].write(kwargs['end'])\n kwargs['file'].flush()", "docstring": "Move to the beginning of the current line, and print some text.\n\nArgs:\n Same as `print()`.\n\n Keyword Arguments:\n Same as `print()`, except `end` defaults to '' (empty str),\n and these:\n delay : Time in seconds between character writes.", "source": "juraj_google_style"} -{"code": "def reverse_transform(self, column):\n\n self.check_data_type()\n\n return pd.DataFrame({self.col_name: np.log(column[self.col_name])})", "docstring": "Applies the natural logarithm function to turn positive values into real ranged values.\n\nArgs:\n column (pandas.DataFrame): Data to transform.\n\nReturns:\n pd.DataFrame", "source": "juraj_google_style"} -{"code": "def set_spacing(self, space):\n\n self.figure.spacing = space\n if 'subplots_adjust_kwargs' not in self.figure.__dict__:\n self.figure.subplots_adjust_kwargs = {}\n if space == 'wide':\n self.figure.subplots_adjust_kwargs['hspace'] = 0.3\n self.figure.subplots_adjust_kwargs['wspace'] = 0.3\n else:\n self.figure.subplots_adjust_kwargs['hspace'] = 0.0\n self.figure.subplots_adjust_kwargs['wspace'] = 0.0\n\n return", "docstring": "Set the figure spacing.\n\n Sets whether in general there is space between subplots.\n If all axes are shared, this can be `tight`. Default in code is `wide`.\n\n The main difference is the tick labels extend to the ends if space==`wide`.\n If space==`tight`, the edge tick labels are cut off for clearity.\n\nArgs:\n space (str): Sets spacing for subplots. Either `wide` or `tight`.", "source": "juraj_google_style"} -{"code": "def _normalize_mlengine_job_id(job_id):\n\n\n # Add a prefix when a job_id starts with a digit or a template\n match = re.search(r'\\d|\\{{2}', job_id)\n if match and match.start() == 0:\n job = 'z_{}'.format(job_id)\n else:\n job = job_id\n\n # Clean up 'bad' characters except templates\n tracker = 0\n cleansed_job_id = ''\n for m in re.finditer(r'\\{{2}.+?\\}{2}', job):\n cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_',\n job[tracker:m.start()])\n cleansed_job_id += job[m.start():m.end()]\n tracker = m.end()\n\n # Clean up last substring or the full string if no templates\n cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:])\n\n return cleansed_job_id", "docstring": "Replaces invalid MLEngine job_id characters with '_'.\n\n This also adds a leading 'z' in case job_id starts with an invalid\n character.\n\nArgs:\n job_id: A job_id str that may have invalid characters.\n\nReturns:\n A valid job_id representation.", "source": "juraj_google_style"} -{"code": "def buffer(self, data=None, *, reserve=0, dynamic=False) -> Buffer:\n\n\n if type(reserve) is str:\n reserve = mgl.strsize(reserve)\n\n res = Buffer.__new__(Buffer)\n res.mglo, res._size, res._glo = self.mglo.buffer(data, reserve, dynamic)\n res._dynamic = dynamic\n res.ctx = self\n res.extra = None\n return res", "docstring": "Create a :py:class:`Buffer` object.\n\nArgs:\n data (bytes): Content of the new buffer.\n\n Keyword Args:\n reserve (int): The number of bytes to reserve.\n dynamic (bool): Treat buffer as dynamic.\n\nReturns:\n :py:class:`Buffer` object", "source": "juraj_google_style"} -{"code": "def construct_channel(self, **kwargs):\n\n if self.compatibility_mode:\n # Constuct channel (using function from imported chef script)\n config.LOGGER.info(\"Populating channel... \")\n channel = self.chef_module.construct_channel(**kwargs)\n return channel\n else:\n raise NotImplementedError('Your chef class must overrride the construct_channel method')", "docstring": "Calls chef script's construct_channel method. Used only in compatibility mode.\n\nArgs:\n kwargs (dict): additional keyword arguments that `uploadchannel` received\n Returns: channel populated from construct_channel method", "source": "juraj_google_style"} -{"code": "def enqueue_mod(self, dn, mod):\n\n # mark for update\n if dn not in self.__pending_mod_dn__:\n self.__pending_mod_dn__.append(dn)\n self.__mod_queue__[dn] = []\n\n self.__mod_queue__[dn].append(mod)", "docstring": "Enqueue a LDAP modification.\n\nArgs:\n dn -- the distinguished name of the object to modify\n mod -- an ldap modfication entry to enqueue", "source": "juraj_google_style"} -{"code": "def iterate_references(config, to=None):\n\n for value in _iterate_flattened_values(config):\n if isinstance(value, ConfigurableReference):\n if to is None or value.configurable.fn_or_cls == to:\n yield value", "docstring": "Provides an iterator over references in the given config.\n\nArgs:\n config: A dictionary mapping scoped configurable names to argument bindings.\n to: If supplied, only yield references whose `configurable_fn` matches `to`.\n\nYields:\n `ConfigurableReference` instances within `config`, maybe restricted to those\n matching the `to` parameter if it is supplied.", "source": "juraj_google_style"} -{"code": "def wait_for_vacancy(self, processor_type):\n\n\n with self._condition:\n self._condition.wait_for(lambda: (\n self._processor_available(processor_type)\n or self._cancelled_event.is_set()))\n if self._cancelled_event.is_set():\n raise WaitCancelledException()\n processor = self[processor_type].next_processor()\n return processor", "docstring": "Waits for a particular processor type to have the capacity to\n handle additional transactions or until is_cancelled is True.\n\nArgs:\n processor_type (ProcessorType): The family, and version of\n the transaction processor.\n\nReturns:\n Processor", "source": "juraj_google_style"} -{"code": "def run_attack_work(self, work_id):\n\n adv_batch_id = (\n self.attack_work.work[work_id]['output_adversarial_batch_id'])\n adv_batch = self.adv_batches[adv_batch_id]\n dataset_batch_id = adv_batch['dataset_batch_id']\n submission_id = adv_batch['submission_id']\n epsilon = self.dataset_batches[dataset_batch_id]['epsilon']\n logging.info('Attack work piece: '\n 'dataset_batch_id=\"%s\" submission_id=\"%s\" '\n 'epsilon=%d', dataset_batch_id, submission_id, epsilon)\n if submission_id in self.blacklisted_submissions:\n raise WorkerError('Blacklisted submission')\n # get attack\n attack = AttackSubmission(submission_id, self.submissions,\n self.storage_bucket)\n attack.download()\n # prepare input\n input_dir = os.path.join(LOCAL_DATASET_DIR, dataset_batch_id)\n if attack.type == TYPE_TARGETED:\n # prepare file with target classes\n target_class_filename = os.path.join(input_dir, 'target_class.csv')\n self.dataset_meta.save_target_classes_for_batch(target_class_filename,\n self.dataset_batches,\n dataset_batch_id)\n # prepare output directory\n if os.path.exists(LOCAL_OUTPUT_DIR):\n sudo_remove_dirtree(LOCAL_OUTPUT_DIR)\n os.mkdir(LOCAL_OUTPUT_DIR)\n if os.path.exists(LOCAL_PROCESSED_OUTPUT_DIR):\n shutil.rmtree(LOCAL_PROCESSED_OUTPUT_DIR)\n os.mkdir(LOCAL_PROCESSED_OUTPUT_DIR)\n if os.path.exists(LOCAL_ZIPPED_OUTPUT_DIR):\n shutil.rmtree(LOCAL_ZIPPED_OUTPUT_DIR)\n os.mkdir(LOCAL_ZIPPED_OUTPUT_DIR)\n # run attack\n elapsed_time_sec = attack.run(input_dir, LOCAL_OUTPUT_DIR, epsilon)\n if attack.type == TYPE_TARGETED:\n # remove target class file\n os.remove(target_class_filename)\n # enforce epsilon and compute hashes\n image_hashes = eval_lib.enforce_epsilon_and_compute_hash(\n input_dir, LOCAL_OUTPUT_DIR, LOCAL_PROCESSED_OUTPUT_DIR, epsilon)\n if not image_hashes:\n logging.warning('No images saved by the attack.')\n return elapsed_time_sec, submission_id\n # write images back to datastore\n # rename images and add information to adversarial batch\n for clean_image_id, hash_val in iteritems(image_hashes):\n # we will use concatenation of batch_id and image_id\n # as adversarial image id and as a filename of adversarial images\n adv_img_id = adv_batch_id + '_' + clean_image_id\n # rename the image\n os.rename(\n os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, clean_image_id + '.png'),\n os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, adv_img_id + '.png'))\n # populate values which will be written to datastore\n image_path = '{0}/adversarial_images/{1}/{1}.zip/{2}.png'.format(\n self.round_name, adv_batch_id, adv_img_id)\n # u'' + foo is a a python 2/3 compatible way of casting foo to unicode\n adv_batch['images'][adv_img_id] = {\n 'clean_image_id': u'' + str(clean_image_id),\n 'image_path': u'' + str(image_path),\n 'image_hash': u'' + str(hash_val),\n }\n # archive all images and copy to storage\n zipped_images_filename = os.path.join(LOCAL_ZIPPED_OUTPUT_DIR,\n adv_batch_id + '.zip')\n try:\n logging.debug('Compressing adversarial images to %s',\n zipped_images_filename)\n shell_call([\n 'zip', '-j', '-r', zipped_images_filename,\n LOCAL_PROCESSED_OUTPUT_DIR])\n except subprocess.CalledProcessError as e:\n raise WorkerError('Can''t make archive from adversarial iamges', e)\n # upload archive to storage\n dst_filename = '{0}/adversarial_images/{1}/{1}.zip'.format(\n self.round_name, adv_batch_id)\n logging.debug(\n 'Copying archive with adversarial images to %s', dst_filename)\n self.storage_client.new_blob(dst_filename).upload_from_filename(\n zipped_images_filename)\n # writing adv batch to datastore\n logging.debug('Writing adversarial batch to datastore')\n self.adv_batches.write_single_batch_images_to_datastore(adv_batch_id)\n return elapsed_time_sec, submission_id", "docstring": "Runs one attack work.\n\nArgs:\n work_id: ID of the piece of work to run\n\nReturns:\n elapsed_time_sec, submission_id - elapsed time and id of the submission\n\nRaises:\n WorkerError: if error occurred during execution.", "source": "juraj_google_style"} -{"code": "def normalize_to(self, comp, factor=1):\n\n scale_factor = abs(1 / self._coeffs[self._all_comp.index(comp)]\n * factor)\n self._coeffs = [c * scale_factor for c in self._coeffs]", "docstring": "Normalizes the reaction to one of the compositions.\n By default, normalizes such that the composition given has a\n coefficient of 1. Another factor can be specified.\n\nArgs:\n comp (Composition): Composition to normalize to\n factor (float): Factor to normalize to. Defaults to 1.", "source": "juraj_google_style"} -{"code": "def db020(self, value=None):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `db020`'.format(value))\n\n self._db020 = value", "docstring": "Corresponds to IDD Field `db020`\n mean coincident wet-bulb temperature to\n Dry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions)\n\nArgs:\n value (float): value for IDD Field `db020`\n Unit: C\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def encodeAllRecords(self, records=None, toBeAdded=True):\n\n if records is None:\n records = self.getAllRecords()\n if self.verbosity>0: print 'Encoding', len(records), 'records.'\n encodings = [self.encodeRecord(record, toBeAdded) for record in records]\n\n return encodings", "docstring": "Encodes a list of records.\n Parameters:\n --------------------------------------------------------------------\n records: One or more records. (i,j)th element of this 2D array\n specifies the value at field j of record i.\n If unspecified, records previously generated and stored are\n used.\n toBeAdded: Whether the encodings corresponding to the record are added to\n the corresponding fields", "source": "juraj_google_style"} -{"code": "def setup(self, paths=None): # pylint: disable=arguments-differ\n\n if not paths:\n self.state.add_error(\n 'No `paths` argument provided in recipe, bailing', critical=True)\n else:\n self._paths = [path.strip() for path in paths.strip().split(',')]", "docstring": "Sets up the _paths attribute.\n\nArgs:\n paths: Comma-separated list of strings representing the paths to collect.", "source": "juraj_google_style"} -{"code": "def SetDecodedStreamSize(self, decoded_stream_size):\n\n if self._is_open:\n raise IOError('Already open.')\n\n if decoded_stream_size < 0:\n raise ValueError((\n 'Invalid decoded stream size: {0:d} value out of '\n 'bounds.').format(decoded_stream_size))\n\n self._decoded_stream_size = decoded_stream_size", "docstring": "Sets the decoded stream size.\n\n This function is used to set the decoded stream size if it can be\n determined separately.\n\nArgs:\n decoded_stream_size (int): size of the decoded stream in bytes.\n\nRaises:\n IOError: if the file-like object is already open.\n OSError: if the file-like object is already open.\n ValueError: if the decoded stream size is invalid.", "source": "juraj_google_style"} -{"code": "def send(self, **req_kwargs):\n\n i = 0\n while True:\n response = self._send(**req_kwargs).json()\n if 'error' not in response:\n break\n\n error = response['error']\n if error['code'] != 401:\n raise exception.APIException(error['code'], error)\n\n if i >= self.RETRY_CNT:\n raise exception.APIException(error['code'], error)\n\n logger.info('Refreshing access token')\n self._auth.refresh()\n i += 1\n\n return response", "docstring": "Send an authenticated request to a Google API.\n Automatically retries if the access token has expired.\n\nArgs:\n **req_kwargs: Arbitrary keyword arguments to pass to Requests.\n\nReturns:\n dict: The parsed JSON response.\n\nRaises:\n APIException: If the server returns an error.\n LoginException: If :py:meth:`login` has not been called.", "source": "juraj_google_style"} -{"code": "def supported_cache_type(types):\n\n if isinstance(types, str):\n types = [typ.strip() for typ in types.split(\",\")]\n for typ in types:\n if typ not in [\"reflink\", \"hardlink\", \"symlink\", \"copy\"]:\n return False\n return True", "docstring": "Checks if link type config option has a valid value.\n\nArgs:\n types (list/string): type(s) of links that dvc should try out.", "source": "juraj_google_style"} -{"code": "def gradients(ys, xs, grad_ys=None):\n\n graph = ys[0].graph\n if not grad_ys:\n grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys]\n # figure out what Tensors are downstream of xs\n downstream = set(xs)\n for op in graph.operations:\n if op.has_gradient:\n if set(op.inputs) & downstream:\n downstream |= set(op.outputs)\n tensor_to_gradient = dict(zip(ys, grad_ys))\n for op in graph.operations[::-1]:\n grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs]\n if op.has_gradient and any(grad_outputs) and (set(op.inputs) & downstream):\n with tf.variable_scope(op.name + \"/gradients\"):\n input_grads = op.gradient(grad_outputs)\n for inp, grad in zip(op.inputs, input_grads):\n if inp in downstream and grad is not None:\n if inp in tensor_to_gradient:\n tensor_to_gradient[inp] += grad\n else:\n tensor_to_gradient[inp] = grad\n return [tensor_to_gradient.get(x, None) for x in xs]", "docstring": "Compute gradients in dtf.\n\nArgs:\n ys: a list of Tensors\n xs: a list of Tensors\n grad_ys: an optional list of Tensors\n\nReturns:\n grad_xs: a list of Tensors", "source": "juraj_google_style"} -{"code": "def download_mmcif_header(pdb_id, outdir='', force_rerun=False):\n\n # TODO: keep an eye on https://github.com/biopython/biopython/pull/943 Biopython PR#493 for functionality of this\n # method in biopython. extra file types have not been added to biopython download yet\n\n pdb_id = pdb_id.lower()\n file_type = 'cif'\n folder = 'header'\n outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))\n\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)\n urlretrieve(download_link, outfile)\n log.debug('{}: saved header file'.format(outfile))\n else:\n log.debug('{}: header file already saved'.format(outfile))\n\n return outfile", "docstring": "Download a mmCIF header file from the RCSB PDB by ID.\n\nArgs:\n pdb_id: PDB ID\n outdir: Optional output directory, default is current working directory\n force_rerun: If the file should be downloaded again even if it exists\n\nReturns:\n str: Path to outfile", "source": "juraj_google_style"} -{"code": "def resolve_json_id(self, json_id, allow_no_match=False):\n\n if not json_id:\n return None\n\n if json_id.startswith('~'):\n # keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import\n if json_id not in self.pseudo_id_cache:\n spec = get_pseudo_id(json_id)\n spec = self.limit_spec(spec)\n\n if isinstance(spec, Q):\n objects = self.model_class.objects.filter(spec)\n else:\n objects = self.model_class.objects.filter(**spec)\n ids = {each.id for each in objects}\n if len(ids) == 1:\n self.pseudo_id_cache[json_id] = ids.pop()\n errmsg = None\n elif not ids:\n errmsg = 'cannot resolve pseudo id to {}: {}'.format(\n self.model_class.__name__, json_id)\n else:\n errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(\n self.model_class.__name__, json_id, ids)\n\n # either raise or log error\n if errmsg:\n if not allow_no_match:\n raise UnresolvedIdError(errmsg)\n else:\n self.error(errmsg)\n self.pseudo_id_cache[json_id] = None\n\n # return the cached object\n return self.pseudo_id_cache[json_id]\n\n # get the id that the duplicate points to, or use self\n json_id = self.duplicates.get(json_id, json_id)\n\n try:\n return self.json_to_db_id[json_id]\n except KeyError:\n raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))", "docstring": "Given an id found in scraped JSON, return a DB id for the object.\n\n params:\n json_id: id from json\n allow_no_match: just return None if id can't be resolved\n\nReturns:\n database id\n\nRaises:\n ValueError if id couldn't be resolved", "source": "juraj_google_style"} -{"code": "def ColorWithHue(self, hue):\n\n h, s, l = self.__hsl\n return Color((hue, s, l), 'hsl', self.__a, self.__wref)", "docstring": "Create a new instance based on this one with a new hue.\n\n Parameters:\n :hue:\n The hue of the new color [0...360].\n\nReturns:\n A grapefruit.Color instance.\n\n >>> Color.NewFromHsl(30, 1, 0.5).ColorWithHue(60)\n (1.0, 1.0, 0.0, 1.0)\n >>> Color.NewFromHsl(30, 1, 0.5).ColorWithHue(60).hsl\n (60, 1, 0.5)", "source": "juraj_google_style"} -{"code": "def connect(self, component):\n\n if not isinstance(component, ThreadPool):\n raise TypeError('\"component\" must be a ThreadPool object')\n component.in_queue = self.out_queue\n return component", "docstring": "Connect two ThreadPools.\n\n The ``in_queue`` of the second pool will be set as the ``out_queue`` of\n the current pool, thus all the output will be input to the second pool.\n\nArgs:\n component (ThreadPool): the ThreadPool to be connected.\n\nReturns:\n ThreadPool: the modified second ThreadPool.", "source": "juraj_google_style"} -{"code": "def lighten(self, color, diff, *args):\n\n if color and diff:\n return self._ophsl(color, diff, 1, operator.add)\n raise ValueError('Illegal color values')", "docstring": "Lighten a color\n\nArgs:\n color (str): color\n diff (str): percentage\n\nReturns:\n str", "source": "juraj_google_style"} -{"code": "def load_ipython_extension(shell):\n\n\n # Inject our user agent on all requests by monkey-patching a wrapper around httplib2.Http.request.\n\n def _request(self, uri, method=\"GET\", body=None, headers=None,\n redirections=_httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):\n if headers is None:\n headers = {}\n headers['user-agent'] = 'GoogleCloudDataLab/1.0'\n return _orig_request(self, uri, method=method, body=body, headers=headers,\n redirections=redirections, connection_type=connection_type)\n\n _httplib2.Http.request = _request\n\n # Similarly for the requests library.\n\n def _init_session(self):\n _orig_init(self)\n self.headers['User-Agent'] = 'GoogleCloudDataLab/1.0'\n\n _requests.Session.__init__ = _init_session\n\n # Be more tolerant with magics. If the user specified a cell magic that doesn't\n # exist and an empty cell body but a line magic with that name exists, run that\n # instead. Conversely, if the user specified a line magic that doesn't exist but\n # a cell magic exists with that name, run the cell magic with an empty body.\n\n def _run_line_magic(self, magic_name, line):\n fn = self.find_line_magic(magic_name)\n if fn is None:\n cm = self.find_cell_magic(magic_name)\n if cm:\n return _run_cell_magic(self, magic_name, line, None)\n return _orig_run_line_magic(self, magic_name, line)\n\n def _run_cell_magic(self, magic_name, line, cell):\n if cell is None or len(cell) == 0 or cell.isspace():\n fn = self.find_line_magic(magic_name)\n if fn:\n return _orig_run_line_magic(self, magic_name, line)\n # IPython will complain if cell is empty string but not if it is None\n cell = None\n return _orig_run_cell_magic(self, magic_name, line, cell)\n\n _shell.InteractiveShell.run_cell_magic = _run_cell_magic\n _shell.InteractiveShell.run_line_magic = _run_line_magic\n\n # Define global 'project_id' and 'set_project_id' functions to manage the default project ID. We\n # do this conditionally in a try/catch # to avoid the call to Context.default() when running tests\n # which mock IPython.get_ipython().\n\n def _get_project_id():\n try:\n return google.datalab.Context.default().project_id\n except Exception:\n return None\n\n def _set_project_id(project_id):\n context = google.datalab.Context.default()\n context.set_project_id(project_id)\n try:\n from datalab.context import Context as _old_context\n _old_context.default().set_project_id(project_id)\n except ImportError:\n # If the old library is not loaded, then we don't have to do anything\n pass\n\n try:\n if 'datalab_project_id' not in _IPython.get_ipython().user_ns:\n _IPython.get_ipython().user_ns['datalab_project_id'] = _get_project_id\n _IPython.get_ipython().user_ns['set_datalab_project_id'] = _set_project_id\n except TypeError:\n pass", "docstring": "Called when the extension is loaded.\n\nArgs:\n shell - (NotebookWebApplication): handle to the Notebook interactive shell instance.", "source": "juraj_google_style"} -{"code": "def bold(self, action):\n\n if action =='on':\n action = 'E'\n elif action == 'off':\n action = 'F'\n else:\n raise RuntimeError('Invalid action for function bold. Options are on and off')\n self.send(chr(27)+action)", "docstring": "Enable/cancel bold printing\n\nArgs:\n action: Enable or disable bold printing. Options are 'on' and 'off'\n\nReturns:\n None\n\nRaises:\n RuntimeError: Invalid action.", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.BatchWriteSpans = channel.unary_unary(\n \"/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans\",\n request_serializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_tracing__pb2.BatchWriteSpansRequest.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.CreateSpan = channel.unary_unary(\n \"/google.devtools.cloudtrace.v2.TraceService/CreateSpan\",\n request_serializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2.Span.SerializeToString,\n response_deserializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2.Span.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def _check_consistent_returns(self, node):\n\n # explicit return statements are those with a not None value\n explicit_returns = [\n _node for _node in self._return_nodes[node.name] if _node.value is not None\n ]\n if not explicit_returns:\n return\n if len(explicit_returns) == len(\n self._return_nodes[node.name]\n ) and self._is_node_return_ended(node):\n return\n self.add_message(\"inconsistent-return-statements\", node=node)", "docstring": "Check that all return statements inside a function are consistent.\n\n Return statements are consistent if:\n - all returns are explicit and if there is no implicit return;\n - all returns are empty and if there is, possibly, an implicit return.\n\nArgs:\n node (astroid.FunctionDef): the function holding the return statements.", "source": "juraj_google_style"} -{"code": "def __init__(self, rr, table='services'):\n\n self.rr = rr\n self.table = table\n self._ensure_table()", "docstring": "Initialize the service registry.\n\n Creates the database table if it does not exist.\n\nArgs:\n rr (doublethink.Rethinker): a doublethink.Rethinker, which must\n have `dbname` set", "source": "juraj_google_style"} -{"code": "def _is_propertyable(\n names, # type: List[str]\n attrs, # type: Dict[str, Any]\n annotations, # type: Dict[str, type]\n attr, # Dict[str, Any]\n):\n # type: (...) -> bool\n\n return (\n attr in annotations\n and not attr.startswith(\"_\")\n and not attr.isupper()\n and \"__{}\".format(attr) not in names\n and not isinstance(getattr(attrs, attr, None), types.MethodType)\n )", "docstring": "Determine if an attribute can be replaced with a property.\n\nArgs:\n names: The complete list of all attribute names for the class.\n attrs: The attribute dict returned by __prepare__.\n annotations: A mapping of all defined annotations for the class.\n attr: The attribute to test.\n\nReturns:\n True if the attribute can be replaced with a property; else False.", "source": "juraj_google_style"} -{"code": "def ping(token_or_hostname=None):\n\n if not token_or_hostname:\n return \"Pong!\"\n node = nago.core.get_node(token_or_hostname)\n if not node and token_or_hostname in ('master', 'server'):\n token_or_hostname = nago.settings.get_option('server')\n node = nago.core.get_node(token_or_hostname)\n if not node:\n try:\n address = socket.gethostbyname(token_or_hostname)\n node = nago.core.Node()\n node['host_name'] = token_or_hostname\n node['address'] = address\n node['access'] = 'node'\n if token_or_hostname == nago.settings.get_option('server'):\n node['access'] = 'master'\n node.save()\n except Exception:\n raise Exception(\"'%s' was not found in list of known hosts, and does not resolve to a valid address\" % token_or_hostname)\n return node.send_command('nodes', 'ping')", "docstring": "Send an echo request to a nago host.\n\nArgs:\n token_or_host_name -- The remote node to ping\n If node is not provided, simply return pong\n You can use the special nodenames \"server\" or \"master\"", "source": "juraj_google_style"} -{"code": "def uniprot_sites(uniprot_id):\n\n\n r = requests.post('http://www.uniprot.org/uniprot/%s.gff' % uniprot_id)\n gff = StringIO(r.content.decode('utf-8'))\n\n feats = list(GFF.parse(gff))\n if len(feats) > 1:\n log.warning('Too many sequences in GFF')\n else:\n return feats[0].features", "docstring": "Retrieve a list of UniProt sites parsed from the feature file\n\n Sites are defined here: http://www.uniprot.org/help/site and here: http://www.uniprot.org/help/function_section\n\nArgs:\n uniprot_id: Valid UniProt ID\n\n Returns:", "source": "juraj_google_style"} -{"code": "def Analyze(self, hashes):\n\n hash_analyses = []\n for digest in hashes:\n json_response = self._QueryHash(digest)\n hash_analysis = interface.HashAnalysis(digest, json_response)\n hash_analyses.append(hash_analysis)\n\n return hash_analyses", "docstring": "Looks up hashes in Viper using the Viper HTTP API.\n\nArgs:\n hashes (list[str]): hashes to look up.\n\nReturns:\n list[HashAnalysis]: hash analysis.\n\nRaises:\n RuntimeError: If no host has been set for Viper.", "source": "juraj_google_style"} -{"code": "def _Open(self, path_spec=None, mode='rb'):\n\n if not path_spec:\n raise ValueError('Missing path specification.')\n\n if not path_spec.HasParent():\n raise errors.PathSpecError(\n 'Unsupported path specification without parent.')\n\n self._gzip_file_object = resolver.Resolver.OpenFileObject(\n path_spec.parent, resolver_context=self._resolver_context)\n file_size = self._gzip_file_object.get_size()\n\n self._gzip_file_object.seek(0, os.SEEK_SET)\n\n uncompressed_data_offset = 0\n next_member_offset = 0\n\n while next_member_offset < file_size:\n member = gzipfile.GzipMember(\n self._gzip_file_object, next_member_offset, uncompressed_data_offset)\n uncompressed_data_offset = (\n uncompressed_data_offset + member.uncompressed_data_size)\n self._members_by_end_offset[uncompressed_data_offset] = member\n self.uncompressed_data_size += member.uncompressed_data_size\n next_member_offset = member.member_end_offset", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\n path_spec (Optional[PathSpec]): path specification.\n mode (Optional[str]): file access mode.\n\nRaises:\n AccessError: if the access to open the file was denied.\n IOError: if the file-like object could not be opened.\n OSError: if the file-like object could not be opened.\n PathSpecError: if the path specification is incorrect.\n ValueError: if the path specification is invalid.", "source": "juraj_google_style"} -{"code": "def compute_classification_results(self, adv_batches, dataset_batches,\n dataset_meta, defense_work=None):\n\n class_batch_to_work = {}\n if defense_work:\n for v in itervalues(defense_work.work):\n class_batch_to_work[v['output_classification_batch_id']] = v\n\n # accuracy_matrix[defense_id, attack_id] = num correctly classified\n accuracy_matrix = ResultMatrix()\n # error_matrix[defense_id, attack_id] = num misclassfied\n error_matrix = ResultMatrix()\n # hit_target_class_matrix[defense_id, attack_id] = num hit target class\n hit_target_class_matrix = ResultMatrix()\n # processed_images_count[defense_id] = num processed images by defense\n processed_images_count = {}\n\n total_count = len(self.data)\n processed_count = 0\n logging.info('Processing %d files with classification results',\n len(self.data))\n for k, v in iteritems(self.data):\n if processed_count % 100 == 0:\n logging.info('Processed %d out of %d classification results',\n processed_count, total_count)\n processed_count += 1\n defense_id = v['submission_id']\n adv_batch = adv_batches.data[v['adversarial_batch_id']]\n attack_id = adv_batch['submission_id']\n\n work_item = class_batch_to_work.get(k)\n required_work_stats = ['stat_correct', 'stat_error', 'stat_target_class',\n 'stat_num_images']\n if work_item and work_item['error']:\n # ignore batches with error\n continue\n if work_item and all(work_item.get(i) is not None\n for i in required_work_stats):\n count_correctly_classified = work_item['stat_correct']\n count_errors = work_item['stat_error']\n count_hit_target_class = work_item['stat_target_class']\n num_images = work_item['stat_num_images']\n else:\n logging.warning('Recomputing accuracy for classification batch %s', k)\n (count_correctly_classified, count_errors, count_hit_target_class,\n num_images) = analyze_one_classification_result(\n self._storage_client, v['result_path'], adv_batch, dataset_batches,\n dataset_meta)\n\n # update accuracy and hit target class\n accuracy_matrix[defense_id, attack_id] += count_correctly_classified\n error_matrix[defense_id, attack_id] += count_errors\n hit_target_class_matrix[defense_id, attack_id] += count_hit_target_class\n # update number of processed images\n processed_images_count[defense_id] = (\n processed_images_count.get(defense_id, 0) + num_images)\n return (accuracy_matrix, error_matrix, hit_target_class_matrix,\n processed_images_count)", "docstring": "Computes classification results.\n\nArgs:\n adv_batches: instance of AversarialBatches\n dataset_batches: instance of DatasetBatches\n dataset_meta: instance of DatasetMetadata\n defense_work: instance of DefenseWorkPieces\n\nReturns:\n accuracy_matrix, error_matrix, hit_target_class_matrix,\n processed_images_count", "source": "juraj_google_style"} -{"code": "def replaceFA(self, faDataType: int, xml: str):\n\n self.client.replaceFA(faDataType, xml)", "docstring": "Replaces Financial Advisor's settings.\n\nArgs:\n faDataType: See :meth:`.requestFA`.\n xml: The XML-formatted configuration string.", "source": "juraj_google_style"} -{"code": "def substitute(dict_, source):\n\n d_esc = (re.escape(k) for k in dict_.keys())\n pattern = re.compile('|'.join(d_esc))\n return pattern.sub(lambda x: dict_[x.group()], source)", "docstring": "Perform re.sub with the patterns in the given dict\n\nArgs:\n dict_: {pattern: repl}\n source: str", "source": "juraj_google_style"} -{"code": "def is_unknown(input, model_file=None, model_proto=None, name=None):\n\n\n return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(\n input, model_file=model_file, model_proto=model_proto, name=name,\n piece_type=0)", "docstring": "Returns true if input id is unknown piece.\n\nArgs:\n input: An arbitrary tensor of int32.\n model_file: The sentencepiece model file path.\n model_proto: The sentencepiece model serialized proto.\n Either `model_file` or `model_proto` must be set.\n name: The name argument that is passed to the op function.\n\nReturns:\n A tensor of bool with the same shape as input.", "source": "juraj_google_style"} -{"code": "def _create_test(self, testcase, function_name, sdkobject, attribute=None):\n\n func = getattr(testcase, function_name)\n object_name = sdkobject.rest_name\n\n # Name that will be displayed\n test_name = \"\"\n rep = dict()\n rep[\"object\"] = object_name\n\n if attribute:\n rep[\"attribute\"] = attribute.local_name\n\n rep = dict((re.escape(k), v) for k, v in rep.items())\n pattern = re.compile(\"|\".join(list(rep.keys())))\n\n if function_name.startswith(\"_\"):\n function_name = function_name[1:]\n\n test_name = pattern.sub(lambda m: rep[re.escape(m.group(0))], function_name)\n\n # Prepare and add test method to test suite\n test_func = None\n\n if attribute:\n test_func = lambda self, attribute=attribute: func(self, attribute)\n else:\n test_func = lambda self: func(self)\n\n test_func.__name__ = str(test_name)\n\n return (test_name, test_func)", "docstring": "Create a test method for the sdkoject\n\nArgs:\n testcase: the testcase to that should manage the method\n function_name: the name of the method in the testcase\n sdkobject: the object that should be tested\n attribute: the attribute information if necessary\n\nReturns:\n It returns a tuple (name, method) that represents the test method", "source": "juraj_google_style"} -{"code": "def pil_image(self, fill_value=None, compute=True):\n\n channels, mode = self.finalize(fill_value)\n res = channels.transpose('y', 'x', 'bands')\n img = dask.delayed(PILImage.fromarray)(np.squeeze(res.data), mode)\n if compute:\n img = img.compute()\n return img", "docstring": "Return a PIL image from the current image.\n\nArgs:\n fill_value (int or float): Value to use for NaN null values.\n See :meth:`~trollimage.xrimage.XRImage.finalize` for more\n info.\n compute (bool): Whether to return a fully computed PIL.Image\n object (True) or return a dask Delayed object representing\n the Image (False). This is True by default.", "source": "juraj_google_style"} -{"code": "def filter_by_dates(files_or_folders: list, date_fmt=DATE_FMT) -> list:\n\n r = re.compile(f'.*{date_fmt}.*')\n return list(filter(\n lambda vv: r.match(vv.replace('\\\\', '/').split('/')[-1]) is not None,\n files_or_folders,\n ))", "docstring": "Filter files or dates by date patterns\n\nArgs:\n files_or_folders: list of files or folders\n date_fmt: date format\n\nReturns:\n list", "source": "juraj_google_style"} -{"code": "def __init__(self, optimizer, scope=None, summary_labels=(), **kwargs):\n\n self.tf_optimizer_type = optimizer\n self.tf_optimizer = TFOptimizer.tf_optimizers[optimizer](**kwargs)\n\n super(TFOptimizer, self).__init__(scope=(scope or optimizer), summary_labels=summary_labels)", "docstring": "Creates a new optimizer instance of a TensorFlow optimizer.\n\nArgs:\n optimizer: The name of the optimizer. Must be one of the keys of the tf_optimizers dict.\n **kwargs: Arguments passed on to the TensorFlow optimizer constructor as **kwargs.", "source": "juraj_google_style"} -{"code": "def send_data(data):\n\n datalength = len(data)\n csm1 = checksum1(data, datalength)\n csm2 = checksum2(csm1)\n data.insert(0, 0xFF)\n data.insert(1, 0xFF)\n data.insert(5, csm1)\n data.insert(6, csm2)\n stringtosend = \"\"\n for i in range(len(data)):\n byteformat = '%02X' % data[i]\n stringtosend = stringtosend + \"\\\\x\" + byteformat\n\n try:\n\n SERPORT.write(stringtosend.decode('string-escape'))\n #print stringtosend\n\n except:\n raise HerkulexError(\"could not communicate with motors\")", "docstring": "Send data to herkulex\n\n Paketize & write the packet to serial port\n\nArgs:\n data (list): the data to be sent\n\nRaises:\n SerialException: Error occured while opening serial port", "source": "juraj_google_style"} -{"code": "def draw_pl_vote(m, gamma):\n\n\n localgamma = np.copy(gamma) # work on a copy of gamma\n localalts = np.arange(m) # enumeration of the candidates\n vote = []\n for j in range(m): # generate position in vote for every alternative\n\n # transform local gamma into intervals up to 1.0\n localgammaintervals = np.copy(localgamma)\n prev = 0.0\n for k in range(len(localgammaintervals)):\n localgammaintervals[k] += prev\n prev = localgammaintervals[k]\n\n selection = np.random.random() # pick random number\n\n # selection will fall into a gamma interval\n for l in range(len(localgammaintervals)): # determine position\n if selection <= localgammaintervals[l]:\n vote.append(localalts[l])\n localgamma = np.delete(localgamma, l) # remove that gamma\n localalts = np.delete(localalts, l) # remove the alternative\n localgamma /= np.sum(localgamma) # renormalize\n break\n return vote", "docstring": "Description:\n Generate a Plackett-Luce vote given the model parameters.\n Parameters:\n m: number of alternatives\n gamma: parameters of the Plackett-Luce model", "source": "juraj_google_style"} -{"code": "def write(data, file_name, worksheet_names=None):\n\n if re.search(XML_EXT_REGEX, file_name):\n return write_xml(data, file_name, worksheet_names=worksheet_names)\n elif re.search(XLSX_EXT_REGEX, file_name):\n return write_xlsx(data, file_name, worksheet_names=worksheet_names)\n elif re.search(XLS_EXT_REGEX, file_name):\n return write_xls(data, file_name, worksheet_names=worksheet_names)\n elif re.search(CSV_EXT_REGEX, file_name):\n return write_csv(data, file_name)\n else:\n return write_csv(data, file_name)", "docstring": "Writes 2D tables to file.\n\nArgs:\n data: 2D list of tables/worksheets.\n file_name: Name of the output file (determines type).\n worksheet_names: A list of worksheet names (optional).", "source": "juraj_google_style"} -{"code": "def adversary_assets(self, main_type, sub_type, unique_id, params=None):\n\n params = params or {}\n\n url = '/v2/{}/{}/{}/adversaryAssets'.format(main_type, sub_type, unique_id)\n for aa in self._iterate(url, params, 'adversaryAsset'):\n yield aa", "docstring": "Args:\n main_type:\n sub_type:\n unique_id:\n params:\n\n Return:", "source": "juraj_google_style"} -{"code": "def load_b26_file(file_name):\n\n # file_name = \"Z:\\Lab\\Cantilever\\Measurements\\\\tmp_\\\\a\"\n\n assert os.path.exists(file_name)\n\n with open(file_name, 'r') as infile:\n data = yaml.safe_load(infile)\n return data", "docstring": "loads a .b26 file into a dictionary\n\nArgs:\n file_name:\n\n Returns: dictionary with keys instrument, scripts, probes", "source": "juraj_google_style"} -{"code": "def submit_files(self, halt_on_error=True):\n\n # check global setting for override\n if self.halt_on_file_error is not None:\n halt_on_error = self.halt_on_file_error\n\n upload_status = []\n for xid, content_data in self._files.items():\n del self._files[xid] # win or loose remove the entry\n status = True\n\n # used for debug/testing to prevent upload of previously uploaded file\n if self.debug and xid in self.saved_xids:\n self.tcex.log.debug('skipping previously saved file {}.'.format(xid))\n continue\n\n # process the file content\n content = content_data.get('fileContent')\n if callable(content):\n content = content_data.get('fileContent')(xid)\n if content is None:\n upload_status.append({'uploaded': False, 'xid': xid})\n self.tcex.log.warning('File content was null for xid {}.'.format(xid))\n continue\n if content_data.get('type') == 'Document':\n api_branch = 'documents'\n elif content_data.get('type') == 'Report':\n api_branch = 'reports'\n\n # Post File\n url = '/v2/groups/{}/{}/upload'.format(api_branch, xid)\n headers = {'Content-Type': 'application/octet-stream'}\n params = {'owner': self._owner}\n r = self.submit_file_content('POST', url, content, headers, params, halt_on_error)\n if r.status_code == 401:\n # use PUT method if file already exists\n self.tcex.log.info('Received 401 status code using POST. Trying PUT to update.')\n r = self.submit_file_content('PUT', url, content, headers, params, halt_on_error)\n self.tcex.log.debug('{} Upload URL: {}.'.format(content_data.get('type'), r.url))\n if not r.ok:\n status = False\n self.tcex.handle_error(585, [r.status_code, r.text], halt_on_error)\n elif self.debug:\n self.saved_xids.append(xid)\n self.tcex.log.info('Status {} for file upload with xid {}.'.format(r.status_code, xid))\n upload_status.append({'uploaded': status, 'xid': xid})\n return upload_status", "docstring": "Submit Files for Documents and Reports to ThreatConnect API.\n\n Critical Errors\n\n * There is insufficient document storage allocated to this account.\n\nArgs:\n halt_on_error (bool, default:True): If True any exception will raise an error.\n\nReturns:\n dict: The upload status for each xid.", "source": "juraj_google_style"} -{"code": "def decrypt(key, ciphertext):\n\n index = 0\n decrypted = \"\"\n for char in ciphertext:\n if char in string.punctuation + string.whitespace + string.digits:\n decrypted += char\n continue # Not part of the decryption\n\n # Rotate character by the alphabet position of the letter in the key\n alphabet = string.ascii_uppercase if key[index].isupper() else string.ascii_lowercase\n decrypted += ''.join(shift.decrypt(int(alphabet.index(key[index])), char))\n index = (index + 1) % len(key)\n\n return decrypted", "docstring": "Decrypt Vigenere encrypted ``ciphertext`` using ``key``.\n\nExample:\n >>> decrypt(\"KEY\", \"RIJVS\")\n HELLO\n\nArgs:\n key (iterable): The key to use\n ciphertext (str): The text to decrypt\n\nReturns:\n Decrypted ciphertext", "source": "juraj_google_style"} -{"code": "def set_config_variables(repo, variables):\n\n with repo.config_writer() as writer:\n for k, value in variables.items():\n section, option = k.split('.')\n writer.set_value(section, option, value)\n writer.release()", "docstring": "Set config variables\n\nArgs:\n repo (git.Repo): repo\n variables (dict): entries of the form 'user.email': 'you@example.com'", "source": "juraj_google_style"} -{"code": "def showAddColumnDialog(self, triggered):\n\n if triggered:\n dialog = AddAttributesDialog(self)\n dialog.accepted.connect(self.addColumn)\n dialog.rejected.connect(self.uncheckButton)\n dialog.show()", "docstring": "Display the dialog to add a column to the model.\n\n This method is also a slot.\n\nArgs:\n triggered (bool): If the corresponding button was\n activated, the dialog will be created and shown.", "source": "juraj_google_style"} -{"code": "def deserialize_subject_info(subject_info_xml):\n\n try:\n return d1_common.xml.deserialize(subject_info_xml)\n except ValueError as e:\n raise d1_common.types.exceptions.InvalidToken(\n 0,\n 'Could not deserialize SubjectInfo. subject_info=\"{}\", error=\"{}\"'.format(\n subject_info_xml, str(e)\n ),\n )", "docstring": "Deserialize SubjectInfo XML doc to native object.\n\nArgs:\n subject_info_xml: str\n SubjectInfo XML doc\n\nReturns:\n SubjectInfo PyXB object", "source": "juraj_google_style"} -{"code": "def update_ledger(self, ledger_id, description=None):\n\n arguments = {'description': description}\n return self.do_req('PUT',\n self.merchant_api_base_url + '/ledger/' +\n ledger_id + '/', arguments)", "docstring": "Update ledger info\n\nArgs:\n ledger_id:\n Ledger id assigned by mCASH\n description:\n Description of the Ledger and it's usage", "source": "juraj_google_style"} -{"code": "def ValidateDependencies(rdf_artifact):\n\n for dependency in GetArtifactDependencies(rdf_artifact):\n try:\n dependency_obj = REGISTRY.GetArtifact(dependency)\n except rdf_artifacts.ArtifactNotRegisteredError as e:\n raise rdf_artifacts.ArtifactDependencyError(\n rdf_artifact, \"missing dependency\", cause=e)\n\n message = dependency_obj.error_message\n if message:\n raise rdf_artifacts.ArtifactDependencyError(\n rdf_artifact, \"dependency error\", cause=message)", "docstring": "Validates artifact dependencies.\n\n This method checks whether all dependencies of the artifact are present\n and contain no errors.\n\n This method can be called only after all other artifacts have been loaded.\n\nArgs:\n rdf_artifact: RDF object artifact.\n\nRaises:\n ArtifactDependencyError: If a dependency is missing or contains errors.", "source": "juraj_google_style"} -{"code": "def snapshot(self, filename=\"tmp.png\"):\n\n if not filename:\n filename = \"tmp.png\"\n if self.handle:\n try:\n screenshot(filename, self.handle)\n except win32gui.error:\n self.handle = None\n screenshot(filename)\n else:\n screenshot(filename)\n\n img = aircv.imread(filename)\n os.remove(filename)\n\n return img", "docstring": "Take a screenshot and save it to `tmp.png` filename by default\n\nArgs:\n filename: name of file where to store the screenshot\n\nReturns:\n display the screenshot", "source": "juraj_google_style"} -{"code": "def MakeCdfFromItems(items, name=''):\n\n runsum = 0\n xs = []\n cs = []\n\n for value, count in sorted(items):\n runsum += count\n xs.append(value)\n cs.append(runsum)\n\n total = float(runsum)\n ps = [c / total for c in cs]\n\n cdf = Cdf(xs, ps, name)\n return cdf", "docstring": "Makes a cdf from an unsorted sequence of (value, frequency) pairs.\n\nArgs:\n items: unsorted sequence of (value, frequency) pairs\n name: string name for this CDF\n\nReturns:\n cdf: list of (value, fraction) pairs", "source": "juraj_google_style"} -{"code": "def __init__(self, resolver_context):\n\n super(VShadowFileSystem, self).__init__(resolver_context)\n self._file_object = None\n self._vshadow_volume = None", "docstring": "Initializes a file system.\n\nArgs:\n resolver_context (Context): resolver context.", "source": "juraj_google_style"} -{"code": "def filter_by_pattern(self, pattern):\n\n _filt_values, _filt_datetimes = self._filter_by_pattern(pattern)\n if self._enumeration is None:\n self._get_mutable_enumeration()\n col_obj = self._enumeration['mutable'][self._collection_type]\n collection = col_obj(self.header.duplicate(), _filt_values, _filt_datetimes)\n collection._validated_a_period = self._validated_a_period\n return collection", "docstring": "Filter the Data Collection based on a list of booleans.\n\nArgs:\n pattern: A list of True/False values. Typically, this is a list\n with a length matching the length of the Data Collections values\n but it can also be a pattern to be repeated over the Data Collection.\n\nReturns:\n A new Data Collection with filtered data", "source": "juraj_google_style"} -{"code": "def __init__(self, filename,f_start=None, f_stop=None,t_start=None, t_stop=None, load_data=True, max_load=1.):\n\n super(FilReader, self).__init__()\n\n self.header_keywords_types = sigproc.header_keyword_types\n\n if filename and os.path.isfile(filename):\n self.filename = filename\n self.load_data = load_data\n self.header = self.read_header()\n self.file_size_bytes = os.path.getsize(self.filename)\n self.idx_data = sigproc.len_header(self.filename)\n self.n_channels_in_file = self.header[b'nchans']\n self.n_beams_in_file = self.header[b'nifs'] #Placeholder for future development.\n self.n_pols_in_file = 1 #Placeholder for future development.\n self._n_bytes = int(self.header[b'nbits'] / 8) #number of bytes per digit.\n self._d_type = self._setup_dtype()\n self._setup_n_ints_in_file()\n self.file_shape = (self.n_ints_in_file,self.n_beams_in_file,self.n_channels_in_file)\n\n if self.header[b'foff'] < 0:\n self.f_end = self.header[b'fch1']\n self.f_begin = self.f_end + self.n_channels_in_file*self.header[b'foff']\n else:\n self.f_begin = self.header[b'fch1']\n self.f_end = self.f_begin + self.n_channels_in_file*self.header[b'foff']\n\n self.t_begin = 0\n self.t_end = self.n_ints_in_file\n\n #Taking care all the frequencies are assigned correctly.\n self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop, init=True)\n #Convert input frequencies into what their corresponding channel number would be.\n self._setup_chans()\n #Update frequencies ranges from channel number.\n self._setup_freqs()\n\n self.freq_axis = 2\n self.time_axis = 0\n self.beam_axis = 1 # Place holder\n\n#EE ie.\n# spec = np.squeeze(fil_file.data)\n # set start of data, at real length of header (future development.)\n# self.datastart=self.hdrraw.find('HEADER_END')+len('HEADER_END')+self.startsample*self.channels\n\n #Applying data size limit to load.\n if max_load is not None:\n if max_load > 1.0:\n logger.warning('Setting data limit != 1GB, please handle with care!')\n self.MAX_DATA_ARRAY_SIZE = max_load * MAX_DATA_ARRAY_SIZE_UNIT\n else:\n self.MAX_DATA_ARRAY_SIZE = MAX_DATA_ARRAY_SIZE_UNIT\n\n if self.file_size_bytes > self.MAX_DATA_ARRAY_SIZE:\n self.large_file = True\n else:\n self.large_file = False\n\n if self.load_data:\n if self.large_file:\n if self.f_start or self.f_stop or self.t_start or self.t_stop:\n if self.isheavy():\n logger.warning(\"Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded, please try another (t,v) selection.\" % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3)))\n self._init_empty_selection()\n else:\n self.read_data()\n else:\n logger.warning(\"The file is of size %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded. You could try another (t,v) selection.\"%(self.file_size_bytes/(1024.**3), self.MAX_DATA_ARRAY_SIZE/(1024.**3)))\n self._init_empty_selection()\n else:\n self.read_data()\n else:\n logger.info(\"Skipping loading data ...\")\n self._init_empty_selection()\n else:\n raise IOError(\"Need a file to open, please give me one!\")", "docstring": "Constructor.\n\nArgs:\n filename (str): filename of blimpy file.\n f_start (float): start frequency, in MHz\n f_stop (float): stop frequency, in MHz\n t_start (int): start time bin\n t_stop (int): stop time bin", "source": "juraj_google_style"} -{"code": "def sign(self, byts):\n\n chosen_hash = c_hashes.SHA256()\n hasher = c_hashes.Hash(chosen_hash, default_backend())\n hasher.update(byts)\n digest = hasher.finalize()\n return self.priv.sign(digest,\n c_ec.ECDSA(c_utils.Prehashed(chosen_hash))\n )", "docstring": "Compute the ECC signature for the given bytestream.\n\nArgs:\n byts (bytes): The bytes to sign.\n\nReturns:\n bytes: The RSA Signature bytes.", "source": "juraj_google_style"} -{"code": "def _parse_udf_file_entry(self, abs_file_entry_extent, icb, parent):\n # type: (int, udfmod.UDFLongAD, Optional[udfmod.UDFFileEntry]) -> Optional[udfmod.UDFFileEntry]\n\n self._seek_to_extent(abs_file_entry_extent)\n icbdata = self._cdfp.read(icb.extent_length)\n\n if all(v == 0 for v in bytearray(icbdata)):\n # We have seen ISOs in the wild (Windows 2008 Datacenter Enterprise\n # Standard SP2 x86 DVD) where the UDF File Identifier points to a\n # UDF File Entry of all zeros. In those cases, we just keep the\n # File Identifier, and keep the UDF File Entry blank.\n return None\n\n desc_tag = udfmod.UDFTag()\n desc_tag.parse(icbdata, icb.log_block_num)\n if desc_tag.tag_ident != 261:\n raise pycdlibexception.PyCdlibInvalidISO('UDF File Entry Tag identifier not 261')\n\n file_entry = udfmod.UDFFileEntry()\n file_entry.parse(icbdata, abs_file_entry_extent, parent, desc_tag)\n\n return file_entry", "docstring": "An internal method to parse a single UDF File Entry and return the\n corresponding object.\n\n Parameters:\n part_start - The extent number the partition starts at.\n icb - The ICB object for the data.\n parent - The parent of the UDF File Entry.\n\nReturns:\n A UDF File Entry object corresponding to the on-disk File Entry.", "source": "juraj_google_style"} -{"code": "def load(self, languages=[]):\n\n duckling_load = self.clojure.var(\"duckling.core\", \"load!\")\n clojure_hashmap = self.clojure.var(\"clojure.core\", \"hash-map\")\n clojure_list = self.clojure.var(\"clojure.core\", \"list\")\n\n if languages:\n # Duckling's load function expects ISO 639-1 Language Codes (e.g. \"en\")\n iso_languages = [Language.convert_to_iso(lang) for lang in languages]\n\n duckling_load.invoke(\n clojure_hashmap.invoke(\n self.clojure.read(':languages'),\n clojure_list.invoke(*iso_languages)\n )\n )\n else:\n duckling_load.invoke()\n\n self._is_loaded = True", "docstring": "Loads the Duckling corpus.\n\n Languages can be specified, defaults to all.\n\nArgs:\n languages: Optional parameter to specify languages,\n e.g. [Duckling.ENGLISH, Duckling.FRENCH] or supported ISO 639-1 Codes (e.g. [\"en\", \"fr\"])", "source": "juraj_google_style"} -{"code": "def __init__(self, unit_def):\n\n\n if isinstance(unit_def, str):\n unit = collections.defaultdict(int)\n import re\n for m in re.finditer(r\"([A-Za-z]+)\\s*\\^*\\s*([\\-0-9]*)\", unit_def):\n p = m.group(2)\n p = 1 if not p else int(p)\n k = m.group(1)\n unit[k] += p\n else:\n unit = {k: v for k, v in dict(unit_def).items() if v != 0}\n self._unit = check_mappings(unit)", "docstring": "Constructs a unit.\n\nArgs:\n unit_def: A definition for the unit. Either a mapping of unit to\n powers, e.g., {\"m\": 2, \"s\": -1} represents \"m^2 s^-1\",\n or simply as a string \"kg m^2 s^-1\". Note that the supported\n format uses \"^\" as the power operator and all units must be\n space-separated.", "source": "juraj_google_style"} -{"code": "def create(self, callback_url):\n\n resource = self.resource.create({'subscribed_to': 'address',\n 'callback_url': callback_url})\n subscription = self.wrap(resource)\n self.add(subscription)\n return subscription", "docstring": "Register a new Subscription on this collection's parent object.\n\nArgs:\n callback_url (str): URI of an active endpoint which can receive\n notifications.\n\nReturns:\n A round.Subscription object if successful.", "source": "juraj_google_style"} -{"code": "def new(self):\n # type: () -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('UDF File Set Descriptor already initialized')\n\n self.desc_tag = UDFTag()\n self.desc_tag.new(256) # FIXME: we should let the user set serial_number\n\n self.recording_date = UDFTimestamp()\n self.recording_date.new()\n\n self.domain_ident = UDFEntityID()\n self.domain_ident.new(0, b'*OSTA UDF Compliant', b'\\x02\\x01\\x03')\n\n self.root_dir_icb = UDFLongAD()\n self.root_dir_icb.new(2048, 2)\n\n self.file_set_num = 0\n self.log_vol_char_set = _unicodecharset()\n self.log_vol_ident = _ostaunicode_zero_pad('CDROM', 128)\n self.file_set_char_set = _unicodecharset()\n self.file_set_ident = _ostaunicode_zero_pad('CDROM', 32)\n self.copyright_file_ident = b'\\x00' * 32 # FIXME: let the user set this\n self.abstract_file_ident = b'\\x00' * 32 # FIXME: let the user set this\n\n self._initialized = True", "docstring": "A method to create a new UDF File Set Descriptor.\n\n Parameters:\n None.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def get_pourbaix_plot(self, limits=None, title=\"\",\n label_domains=True, plt=None):\n\n if limits is None:\n limits = [[-2, 16], [-3, 3]]\n\n plt = plt or pretty_plot(16)\n\n xlim = limits[0]\n ylim = limits[1]\n\n h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],\n [xlim[1], -xlim[1] * PREFAC]])\n o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],\n [xlim[1], -xlim[1] * PREFAC + 1.23]])\n neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])\n V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])\n\n ax = plt.gca()\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n lw = 3\n plt.plot(h_line[0], h_line[1], \"r--\", linewidth=lw)\n plt.plot(o_line[0], o_line[1], \"r--\", linewidth=lw)\n plt.plot(neutral_line[0], neutral_line[1], \"k-.\", linewidth=lw)\n plt.plot(V0_line[0], V0_line[1], \"k-.\", linewidth=lw)\n\n for entry, vertices in self._pd._stable_domain_vertices.items():\n center = np.average(vertices, axis=0)\n x, y = np.transpose(np.vstack([vertices, vertices[0]]))\n plt.plot(x, y, 'k-', linewidth=lw)\n if label_domains:\n plt.annotate(generate_entry_label(entry), center, ha='center',\n va='center', fontsize=20, color=\"b\")\n\n plt.xlabel(\"pH\")\n plt.ylabel(\"E (V)\")\n plt.title(title, fontsize=20, fontweight='bold')\n return plt", "docstring": "Plot Pourbaix diagram.\n\nArgs:\n limits: 2D list containing limits of the Pourbaix diagram\n of the form [[xlo, xhi], [ylo, yhi]]\n title (str): Title to display on plot\n label_domains (bool): whether to label pourbaix domains\n plt (pyplot): Pyplot instance for plotting\n\nReturns:\n plt (pyplot) - matplotlib plot object with pourbaix diagram", "source": "juraj_google_style"} -{"code": "def __init__(self, path, script, optimized=True):\n\n\n # Save member variables\n self.path = path\n self.script = script\n\n if optimized:\n # For old ns-3 installations, the library is in build, while for\n # recent ns-3 installations it's in build/lib. Both paths are\n # thus required to support all versions of ns-3.\n library_path = \"%s:%s\" % (\n os.path.join(path, 'build/optimized'),\n os.path.join(path, 'build/optimized/lib'))\n\n # We use both LD_ and DYLD_ to support Linux and Mac OS.\n self.environment = {\n 'LD_LIBRARY_PATH': library_path,\n 'DYLD_LIBRARY_PATH': library_path}\n else:\n library_path = \"%s:%s\" % (os.path.join(path, 'build'),\n os.path.join(path, 'build/lib'))\n self.environment = {\n 'LD_LIBRARY_PATH': os.path.join(path, 'build'),\n 'DYLD_LIBRARY_PATH': os.path.join(path, 'build')}\n\n # Configure and build ns-3\n self.configure_and_build(path, optimized=optimized)\n\n # ns-3's build status output is used to get the executable path for the\n # specified script.\n if optimized:\n build_status_path = os.path.join(path,\n 'build/optimized/build-status.py')\n else:\n build_status_path = os.path.join(path,\n 'build/build-status.py')\n\n # By importing the file, we can naturally get the dictionary\n try: # This only works on Python >= 3.5\n spec = importlib.util.spec_from_file_location('build_status',\n build_status_path)\n build_status = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(build_status)\n except (AttributeError): # This happens in Python <= 3.4\n import imp\n build_status = imp.load_source('build_status', build_status_path)\n\n # Search is simple: we look for the script name in the program field.\n # Note that this could yield multiple matches, in case the script name\n # string is contained in another script's name.\n # matches contains [program, path] for each program matching the script\n matches = [{'name': program,\n 'path': os.path.abspath(os.path.join(path, program))} for\n program in build_status.ns3_runnable_programs if self.script\n in program]\n\n if not matches:\n raise ValueError(\"Cannot find %s script\" % self.script)\n\n # To handle multiple matches, we take the 'better matching' option,\n # i.e., the one with length closest to the original script name.\n match_percentages = map(lambda x: {'name': x['name'],\n 'path': x['path'],\n 'percentage':\n len(self.script)/len(x['name'])},\n matches)\n\n self.script_executable = max(match_percentages,\n key=lambda x: x['percentage'])['path']\n\n if optimized and \"scratch\" in self.script_executable:\n self.script_executable = os.path.abspath(\n os.path.join(path, \"build/optimized/scratch\", self.script))", "docstring": "Initialization function.\n\nArgs:\n path (str): absolute path to the ns-3 installation this Runner\n should lock on.\n script (str): ns-3 script that will be used by this Runner.\n optimized (bool): whether this Runner should build ns-3 with the\n optimized profile.", "source": "juraj_google_style"} -{"code": "def get_shape_str(tensors):\n\n if isinstance(tensors, (list, tuple)):\n for v in tensors:\n assert isinstance(v, (tf.Tensor, tf.Variable)), \"Not a tensor: {}\".format(type(v))\n shape_str = \",\".join(\n map(lambda x: str(x.get_shape().as_list()), tensors))\n else:\n assert isinstance(tensors, (tf.Tensor, tf.Variable)), \"Not a tensor: {}\".format(type(tensors))\n shape_str = str(tensors.get_shape().as_list())\n return shape_str", "docstring": "Internally used by layer registry, to print shapes of inputs/outputs of layers.\n\nArgs:\n tensors (list or tf.Tensor): a tensor or a list of tensors\n\nReturns:\n str: a string to describe the shape", "source": "juraj_google_style"} -{"code": "def save_link(self, path_info):\n\n assert path_info[\"scheme\"] == \"local\"\n path = path_info[\"path\"]\n\n if not os.path.exists(path):\n return\n\n mtime, _ = get_mtime_and_size(path)\n inode = get_inode(path)\n relpath = os.path.relpath(path, self.root_dir)\n\n cmd = (\n \"REPLACE INTO {}(path, inode, mtime) \"\n 'VALUES (\"{}\", {}, \"{}\")'.format(\n self.LINK_STATE_TABLE, relpath, self._to_sqlite(inode), mtime\n )\n )\n self._execute(cmd)", "docstring": "Adds the specified path to the list of links created by dvc. This\n list is later used on `dvc checkout` to cleanup old links.\n\nArgs:\n path_info (dict): path info to add to the list of links.", "source": "juraj_google_style"} -{"code": "def scheduled_sample_prob(ground_truth_x,\n generated_x,\n batch_size,\n scheduled_sample_var):\n\n probability_threshold = scheduled_sample_var\n probability_of_generated = tf.random_uniform([batch_size])\n return tf.where(probability_of_generated > probability_threshold,\n generated_x, ground_truth_x)", "docstring": "Probability based scheduled sampling.\n\nArgs:\n ground_truth_x: tensor of ground-truth data points.\n generated_x: tensor of generated data points.\n batch_size: batch size\n scheduled_sample_var: probability of choosing from ground_truth.\n\nReturns:\n New batch with randomly selected data points.", "source": "juraj_google_style"} -{"code": "def _process_counter_example(self, mma, w_string):\n\n diff = len(w_string)\n same = 0\n membership_answer = self._membership_query(w_string)\n while True:\n i = (same + diff) / 2\n access_string = self._run_in_hypothesis(mma, w_string, i)\n if membership_answer != self._membership_query(access_string + w_string[i:]):\n diff = i\n else:\n same = i\n if diff - same == 1:\n break\n exp = w_string[diff:]\n self.observation_table.em_vector.append(exp)\n for row in self.observation_table.sm_vector + self.observation_table.smi_vector:\n self._fill_table_entry(row, exp)\n return 0", "docstring": "Process a counterexample in the Rivest-Schapire way.\n\nArgs:\n mma (DFA): The hypothesis automaton\n w_string (str): The examined string to be consumed\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def _generate_entry(self, vm):\n\n return \\\n '{name} ' \\\n 'ansible_host={ip} ' \\\n 'ansible_ssh_private_key_file={key}'.format(\n name=vm.name(),\n ip=vm.ip(),\n key=self.prefix.paths.ssh_id_rsa()\n )", "docstring": "Generate host entry for the given VM\n\nArgs:\n vm (lago.plugins.vm.VMPlugin): The VM for which the entry\n should be created for.\n\nReturns:\n str: An entry for vm", "source": "juraj_google_style"} -{"code": "def get_inputs_outputs(namespace, method, snapshot_id):\n\n\n body = {\n \"methodNamespace\" : namespace,\n \"methodName\" : method,\n \"methodVersion\" : snapshot_id\n }\n return __post(\"inputsOutputs\", json=body)", "docstring": "Get a description of the inputs and outputs for a method.\n\n The method should exist in the methods repository.\n\nArgs:\n namespace (str): Methods namespace\n method (str): method name\n snapshot_id (int): snapshot_id of the method\n\n Swagger:\n https://api.firecloud.org/#!/Method_Repository/getMethodIO", "source": "juraj_google_style"} -{"code": "def get_compute_usage(access_token, subscription_id, location):\n\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/providers/Microsoft.compute/locations/', location,\n '/usages?api-version=', COMP_API])\n return do_get(endpoint, access_token)", "docstring": "List compute usage and limits for a location.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n location (str): Azure data center location. E.g. westus.\n\nReturns:\n HTTP response. JSON body of Compute usage and limits data.", "source": "juraj_google_style"} -{"code": "def AddBudget(self, client_customer_id, micro_amount):\n\n self.client.SetClientCustomerId(client_customer_id)\n\n budget_service = self.client.GetService('BudgetService')\n\n operations = [{\n 'operator': 'ADD',\n 'operand': {\n 'name': 'Budget #%s' % time.time(),\n 'amount': {\n 'microAmount': micro_amount\n },\n 'deliveryMethod': 'STANDARD'\n }\n }]\n\n return budget_service.mutate(operations)['value'][0]['budgetId']", "docstring": "Create a new Budget with the given microAmount.\n\nArgs:\n client_customer_id: str Client Customer Id used to create Budget.\n micro_amount: str The budget represented in micros.\n\nReturns:\n str BudgetId of the newly created Budget.", "source": "juraj_google_style"} -{"code": "def _gsa_force(grav, mass_i, mass_j, position_i, position_j):\n\n\n position_diff = numpy.subtract(position_j, position_i)\n distance = numpy.linalg.norm(position_diff)\n\n # The first 3 terms give the magnitude of the force\n # The last term is a vector that provides the direction\n # Epsilon prevents divide by zero errors\n return grav * (mass_i * mass_j) / (distance + EPSILON) * position_diff", "docstring": "Gives the force of solution j on solution i.\n\n Variable name in GSA paper given in ()\n\nArgs:\n grav: The gravitational constant. (G)\n mass_i: The mass of solution i (derived from fitness). (M_i)\n mass_j: The mass of solution j (derived from fitness). (M_j)\n position_i: The position of solution i. (x_i)\n position_j: The position of solution j. (x_j)\n\nReturns:\n numpy.array; The force vector of solution j on solution i.", "source": "juraj_google_style"} -{"code": "def __call__(self, text, toLang, fromLang=None):\n\n return self.skype.conn(\"GET\", \"{0}/skype/translate\".format(SkypeConnection.API_TRANSLATE),\n params={\"from\": fromLang or \"\", \"to\": toLang, \"text\": text},\n auth=SkypeConnection.Auth.SkypeToken).json()", "docstring": "Attempt translation of a string. Supports automatic language detection if ``fromLang`` is not specified.\n\nArgs:\n text (str): input text to be translated\n toLang (str): country code of output language\n fromLang (str): country code of input language", "source": "juraj_google_style"} -{"code": "def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'):\n\n h, w = dst_img.shape[:2]\n return imresize(img, (w, h), return_scale, interpolation)", "docstring": "Resize image to the same size of a given image.\n\nArgs:\n img (ndarray): The input image.\n dst_img (ndarray): The target image.\n return_scale (bool): Whether to return `w_scale` and `h_scale`.\n interpolation (str): Same as :func:`resize`.\n\nReturns:\n tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or\n `resized_img`.", "source": "juraj_google_style"} -{"code": "def simple_layer_stack(include_encdec_attention,\n num_layers=6,\n d_ff=2048,\n num_heads=8,\n d_kv=128,\n dropout_rate=0.1):\n\n ret = []\n for _ in xrange(num_layers):\n ret.append(\n transformer_layers.SelfAttention(\n num_heads=num_heads,\n key_value_size=d_kv,\n attention_kwargs={\"dropout_rate\": dropout_rate}))\n if include_encdec_attention:\n ret.append(\n transformer_layers.EncDecAttention(\n num_heads=num_heads,\n key_value_size=d_kv,\n attention_kwargs={\"dropout_rate\": dropout_rate}))\n ret.append(\n transformer_layers.DenseReluDense(\n hidden_size=d_ff,\n dropout_rate=dropout_rate))\n return transformer.LayerStack(ret)", "docstring": "Create a layer stack.\n\nArgs:\n include_encdec_attention: a boolean\n num_layers: an integer\n d_ff: an integer\n num_heads: an integer\n d_kv: an integer\n dropout_rate: a float\n\nReturns:\n a LayerStack", "source": "juraj_google_style"} -{"code": "def relative_humidity(self, value=999):\n\n if value is not None:\n try:\n value = int(value)\n except ValueError:\n raise ValueError('value {} need to be of type int '\n 'for field `relative_humidity`'.format(value))\n if value < 0:\n raise ValueError('value need to be greater or equal 0 '\n 'for field `relative_humidity`')\n if value > 110:\n raise ValueError('value need to be smaller 110 '\n 'for field `relative_humidity`')\n\n self._relative_humidity = value", "docstring": "Corresponds to IDD Field `relative_humidity`\n\nArgs:\n value (int): value for IDD Field `relative_humidity`\n value >= 0\n value <= 110\n Missing value: 999\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def consume_input(self, mystr, stack=[], state=1, curchar=0, depth=0):\n\n mystrsplit = mystr.split(' ')\n if self.s[state].type == 1:\n stack.append(self.s[state].sym)\n if len(self.s[state].trans) > 0:\n state = self.s[state].trans[0]\n if self.parse(\n mystr,\n stack=stack,\n state=state,\n curchar=curchar,\n depth=depth + 1) == 1:\n return True\n return False\n if self.s[state].type == 2:\n if len(stack) == 0:\n return False\n sym = stack.pop()\n for key in self.s[state].trans:\n if sym in self.s[state].trans[key]:\n if self.parse(\n mystr,\n stack=stack,\n state=key,\n curchar=curchar,\n depth=depth + 1) == 1:\n return True\n return False\n if self.s[state].type == 3:\n for key in self.s[state].trans:\n if mystrsplit[curchar] in self.s[state].trans[key]:\n # print 'found '\n if curchar + 1 == len(mystrsplit) \\\n and 'closing' in self.s[key].trans:\n return True\n elif curchar + 1 == len(mystrsplit):\n return False\n\n # print 'lets try as next state the state ' + repr(key)\n if self.parse(\n mystr,\n stack=stack,\n state=key,\n curchar=curchar + 1,\n depth=depth + 1) == 1:\n return True\n return False", "docstring": "Consumes an input and validates if it is accepted\n\nArgs:\n mystr (str): the input string to be consumes\n stack (list): the stack of symbols\n state (int): the current state of the PDA\n curchar (int): the index of the consumed character\n depth (int): the depth of the function call in the stack\n\nReturns:\n bool: A value indicating the correct or erroneous execution", "source": "juraj_google_style"} -{"code": "def preprocess(self, style):\n\n if self.property == 'font':\n style = [\n ''.join(u.expression()) if hasattr(u, 'expression') else u\n for u in style\n ]\n else:\n style = [(u, ' ') if hasattr(u, 'expression') else u\n for u in style]\n return style", "docstring": "Hackish preprocessing from font shorthand tags.\n Skips expression parse on certain tags.\n\nArgs:\n style (list): .\n\nReturns:\n list", "source": "juraj_google_style"} -{"code": "def sample_many(self, num_samples = 2000):\n\n x = []\n y = []\n fix = []\n sample = []\n\n # XXX: Delete ProgressBar\n pbar = ProgressBar(widgets=[Percentage(),Bar()], maxval=num_samples).start()\n\n for s in range(0, num_samples):\n for i, (xs, ys) in enumerate(self.sample()):\n x.append(xs)\n y.append(ys)\n fix.append(i+1)\n sample.append(s)\n pbar.update(s+1)\n\n fields = {'fix':np.array(fix), 'y':np.array(y), 'x':np.array(x)}\n param = {'pixels_per_degree':self.fm.pixels_per_degree}\n out = fixmat.VectorFixmatFactory(fields, param)\n return out", "docstring": "Generates a given number of trajectories, using the method sample().\n Returns a fixmat with the generated data.\n\n Parameters:\n num_samples : int, optional\n The number of trajectories that shall be generated.", "source": "juraj_google_style"} -{"code": "def proc_val(key, val):\n\n list_keys = (\"LDAUU\", \"LDAUL\", \"LDAUJ\", \"MAGMOM\", \"DIPOL\",\n \"LANGEVIN_GAMMA\", \"QUAD_EFG\", \"EINT\")\n bool_keys = (\"LDAU\", \"LWAVE\", \"LSCALU\", \"LCHARG\", \"LPLANE\", \"LUSE_VDW\",\n \"LHFCALC\", \"ADDGRID\", \"LSORBIT\", \"LNONCOLLINEAR\")\n float_keys = (\"EDIFF\", \"SIGMA\", \"TIME\", \"ENCUTFOCK\", \"HFSCREEN\",\n \"POTIM\", \"EDIFFG\", \"AGGAC\", \"PARAM1\", \"PARAM2\")\n int_keys = (\"NSW\", \"NBANDS\", \"NELMIN\", \"ISIF\", \"IBRION\", \"ISPIN\",\n \"ICHARG\", \"NELM\", \"ISMEAR\", \"NPAR\", \"LDAUPRINT\", \"LMAXMIX\",\n \"ENCUT\", \"NSIM\", \"NKRED\", \"NUPDOWN\", \"ISPIND\", \"LDAUTYPE\",\n \"IVDW\")\n\n def smart_int_or_float(numstr):\n if numstr.find(\".\") != -1 or numstr.lower().find(\"e\") != -1:\n return float(numstr)\n else:\n return int(numstr)\n\n try:\n if key in list_keys:\n output = []\n toks = re.findall(\n r\"(-?\\d+\\.?\\d*)\\*?(-?\\d+\\.?\\d*)?\\*?(-?\\d+\\.?\\d*)?\", val)\n for tok in toks:\n if tok[2] and \"3\" in tok[0]:\n output.extend(\n [smart_int_or_float(tok[2])] * int(tok[0])\n * int(tok[1]))\n elif tok[1]:\n output.extend([smart_int_or_float(tok[1])] *\n int(tok[0]))\n else:\n output.append(smart_int_or_float(tok[0]))\n return output\n if key in bool_keys:\n m = re.match(r\"^\\.?([T|F|t|f])[A-Za-z]*\\.?\", val)\n if m:\n if m.group(1) == \"T\" or m.group(1) == \"t\":\n return True\n else:\n return False\n raise ValueError(key + \" should be a boolean type!\")\n\n if key in float_keys:\n return float(re.search(r\"^-?\\d*\\.?\\d*[e|E]?-?\\d*\", val).group(0))\n\n if key in int_keys:\n return int(re.match(r\"^-?[0-9]+\", val).group(0))\n\n except ValueError:\n pass\n\n # Not in standard keys. We will try a hierarchy of conversions.\n try:\n val = int(val)\n return val\n except ValueError:\n pass\n\n try:\n val = float(val)\n return val\n except ValueError:\n pass\n\n if \"true\" in val.lower():\n return True\n\n if \"false\" in val.lower():\n return False\n\n return val.strip().capitalize()", "docstring": "Static helper method to convert INCAR parameters to proper types, e.g.,\n integers, floats, lists, etc.\n\nArgs:\n key: INCAR parameter key\n val: Actual value of INCAR parameter.", "source": "juraj_google_style"} -{"code": "def get_generated_cols(X_original, X_transformed, to_transform):\n\n original_cols = list(X_original.columns)\n\n if len(to_transform) > 0:\n [original_cols.remove(c) for c in to_transform]\n\n current_cols = list(X_transformed.columns)\n if len(original_cols) > 0:\n [current_cols.remove(c) for c in original_cols]\n\n return current_cols", "docstring": "Returns a list of the generated/transformed columns.\n\nArgs:\n X_original: df\n the original (input) DataFrame.\n X_transformed: df\n the transformed (current) DataFrame.\n to_transform: [str]\n a list of columns that were transformed (as in the original DataFrame), commonly self.cols.\n\n Output:\n a list of columns that were transformed (as in the current DataFrame).", "source": "juraj_google_style"} -{"code": "def askstring(title, prompt, **kw):\n\n return psidialogs.ask_string(title=title, message=prompt)", "docstring": "Original doc: get a string from the user\n\nArgs:\n title -- the dialog title\n prompt -- the label text\n **kw -- see SimpleDialog class\n\n Return value is a string", "source": "juraj_google_style"} -{"code": "def add_parents(self, parents):\n\n\n self._parents += [p for p in parents if p not in self._parents]", "docstring": "Adds new parent nodes after filtering for duplicates\n\nArgs:\n parents (list): list of OmniTree nodes to add as parents", "source": "juraj_google_style"} -{"code": "def _expression_to_sql(expression, node, context):\n\n _expression_transformers = {\n expressions.LocalField: _transform_local_field_to_expression,\n expressions.Variable: _transform_variable_to_expression,\n expressions.Literal: _transform_literal_to_expression,\n expressions.BinaryComposition: _transform_binary_composition_to_expression,\n }\n expression_type = type(expression)\n if expression_type not in _expression_transformers:\n raise NotImplementedError(\n u'Unsupported compiler expression \"{}\" of type \"{}\" cannot be converted to SQL '\n u'expression.'.format(expression, type(expression)))\n return _expression_transformers[expression_type](expression, node, context)", "docstring": "Recursively transform a Filter block predicate to its SQLAlchemy expression representation.\n\nArgs:\n expression: expression, the compiler expression to transform.\n node: SqlNode, the SqlNode the expression applies to.\n context: CompilationContext, global compilation state and metadata.\n\nReturns:\n Expression, SQLAlchemy Expression equivalent to the passed compiler expression.", "source": "juraj_google_style"} -{"code": "def get_structure_from_name(self, structure_name):\n\n return next((st for st in self.structures if st.name == structure_name), None)", "docstring": "Return a structure from a name\n\nArgs:\n structure_name (str): name of the structure\n\nReturns:\n Structure", "source": "juraj_google_style"} -{"code": "def parse_isoformat(timestamp):\n\n if len(timestamp) == 20:\n zone = TzOffset('+00:00')\n timestamp = timestamp[:-1]\n elif len(timestamp) == 24:\n zone = TzOffset('%s:%s' % (timestamp[-5:-2], timestamp[-2:]))\n timestamp = timestamp[:-5]\n elif len(timestamp) == 25:\n zone = TzOffset(timestamp[-6:])\n timestamp = timestamp[:-6]\n timestamp = Timestamp.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')\n timestamp = timestamp.replace(tzinfo=zone)\n return timestamp", "docstring": "Parse an ISO 8601 formatted time stamp.\n\nArgs:\n timestamp (str): Timestamp to parse\n\nReturns:\n Timestamp: Parsed timestamp", "source": "juraj_google_style"} -{"code": "def load(nifti_filename):\n\n # Expand filename to be absolute\n nifti_filename = os.path.expanduser(nifti_filename)\n\n try:\n data = nib.load(nifti_filename)\n img = data.get_data()\n\n except Exception as e:\n raise ValueError(\"Could not load file {0} for conversion.\"\n .format(nifti_filename))\n raise\n\n return img", "docstring": "Import a nifti file into a numpy array. TODO: Currently only\n transfers raw data for compatibility with annotation and ND formats\n\nArgs:\n nifti_filename (str): A string filename of a nifti datafile\n\nReturns:\n A numpy array with data from the nifti file", "source": "juraj_google_style"} -{"code": "def options(self, options=None):\n\n\n\t\t# If opts aren't set, this is a getter\n\t\tif options is None:\n\t\t\treturn self._options\n\n\t\t# If the options are not a list\n\t\tif not isinstance(options, (list, tuple)):\n\t\t\traise ValueError('__options__')\n\n\t\t# If the type is not one that can have options\n\t\tif self._type not in ['base64', 'date', 'datetime', 'decimal', 'float', \\\n\t\t\t\t\t\t\t\t'int', 'ip', 'md5', 'price', 'string', 'time', \\\n\t\t\t\t\t\t\t\t'timestamp', 'uint', 'uuid']:\n\t\t\traise TypeError('can not set __options__ for ' + self._type)\n\n\t\t# Init the list of options to be saved\n\t\tlOpts = []\n\n\t\t# Go through each item and make sure it's unique and valid\n\t\tfor i in range(len(options)):\n\n\t\t\t# Convert the value based on the type\n\t\t\t# If the type is a string one that we can validate\n\t\t\tif self._type in ['base64', 'date', 'datetime', 'ip', 'md5', 'time', 'uuid']:\n\n\t\t\t\t# If the value is not a string or doesn't match its regex, raise\n\t\t\t\t# \tan error\n\t\t\t\tif not isinstance(options[i], basestring) \\\n\t\t\t\t\tor not _typeToRegex[self._type].match(options[i]):\n\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t# Else if it's decimal\n\t\t\telif self._type == 'decimal':\n\n\t\t\t\t# If it's a Decimal\n\t\t\t\tif isinstance(options[i], Decimal):\n\t\t\t\t\tpass\n\n\t\t\t\t# Else if we can't conver it\n\t\t\t\telse:\n\t\t\t\t\ttry: options[i] = Decimal(options[i])\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t# Else if it's a float\n\t\t\telif self._type == 'float':\n\n\t\t\t\ttry:\n\t\t\t\t\toptions[i] = float(options[i])\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t# Else if it's an integer\n\t\t\telif self._type in ['int', 'timestamp', 'uint']:\n\n\t\t\t\t# If we don't already have an int\n\t\t\t\tif not isinstance(options[i], (int, long)):\n\n\t\t\t\t\t# And we don't have a string\n\t\t\t\t\tif not isinstance(options[i], basestring):\n\t\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\toptions[i] = int(options[i], 0)\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t\t# If the type is unsigned and negative, raise an error\n\t\t\t\tif self._type in ['timestamp', 'uint'] and options[i] < 0:\n\t\t\t\t\traise ValueError('__options__[' + str(i) + ']')\n\n\t\t\t# Else if it's a price\n\t\t\telif self._type == 'price':\n\n\t\t\t\t# If it's a Decimal\n\t\t\t\tif isinstance(options[i], Decimal):\n\t\t\t\t\tpass\n\n\t\t\t\t# Else if it's not a valid price representation\n\t\t\t\telif not isinstance(options[i], basestring) or not _typeToRegex['price'].match(options[i]):\n\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t\t# Store it as a Decimal\n\t\t\t\toptions[i] = Decimal(options[i])\n\n\t\t\t# Else if the type is a string\n\t\t\telif self._type == 'string':\n\n\t\t\t\t# If the value is not a string\n\t\t\t\tif not isinstance(options[i], basestring):\n\n\t\t\t\t\t# If the value can't be turned into a string\n\t\t\t\t\ttry:\n\t\t\t\t\t\toptions[i] = str(options[i])\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t# Else we have no validation for the type\n\t\t\telse:\n\t\t\t\traise TypeError('can not set __options__ for ' + self._type)\n\n\t\t\t# If it's already in the list, raise an error\n\t\t\tif options[i] in lOpts:\n\t\t\t\tsys.stderr.write('__options__[' + str(i) + '] is a duplicate')\n\n\t\t\t# Store the option\n\t\t\telse:\n\t\t\t\tlOpts.append(options[i])\n\n\t\t# Store the list of options\n\t\tself._options = lOpts", "docstring": "Options\n\n Sets or gets the list of acceptable values for the Node\n\nArgs:\n options {list} -- A list of valid values\n\nRaises:\n TypeError, ValueError\n\nReturns:\n None | list", "source": "juraj_google_style"} -{"code": "def __init__(self, name):\n\n self.name = name\n self.edges_in = set()\n self.edges_out = set()", "docstring": "Initialization method.\n\nArgs:\n name (str): name of the vertex.", "source": "juraj_google_style"} -{"code": "def AddArguments(cls, argument_group):\n\n argument_group.add_argument(\n '--analysis', metavar='PLUGIN_LIST', dest='analysis_plugins',\n default='', action='store', type=str, help=(\n 'A comma separated list of analysis plugin names to be loaded '\n 'or \"--analysis list\" to see a list of available plugins.'))\n\n arguments = sys.argv[1:]\n argument_index = 0\n\n if '--analysis' in arguments:\n argument_index = arguments.index('--analysis') + 1\n\n if 0 < argument_index < len(arguments):\n names = [name.strip() for name in arguments[argument_index].split(',')]\n else:\n names = None\n\n if names and names != ['list']:\n manager.ArgumentHelperManager.AddCommandLineArguments(\n argument_group, category='analysis', names=names)", "docstring": "Adds command line arguments to an argument group.\n\n This function takes an argument parser or an argument group object and adds\n to it all the command line arguments this helper supports.\n\nArgs:\n argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\n argparse group.", "source": "juraj_google_style"} -{"code": "def set(self, status_item, status):\n\n lg = logging.getLogger(\"%s.%s\" % (self.ln, inspect.stack()[0][3]))\n lg.setLevel(self.log_level)\n sparql = \n return self.conn.query(sparql=sparql.format(self.group,\n status_item,\n str(status).lower()),\n mode='update')", "docstring": "sets the status item to the passed in paramaters\n\nArgs:\n status_item: the name if the item to set\n status: boolean value to set the item", "source": "juraj_google_style"} -{"code": "def _publish_internal(self, push_messages):\n\n # Delayed import because this file is immediately read on install, and\n # the requests library may not be installed yet.\n import requests\n\n response = requests.post(\n self.host + self.api_url + '/push/send',\n data=json.dumps([pm.get_payload() for pm in push_messages]),\n headers={\n 'accept': 'application/json',\n 'accept-encoding': 'gzip, deflate',\n 'content-type': 'application/json',\n }\n )\n\n # Let's validate the response format first.\n try:\n response_data = response.json()\n except ValueError:\n # The response isn't json. First, let's attempt to raise a normal\n # http error. If it's a 200, then we'll raise our own error.\n response.raise_for_status()\n\n raise PushServerError('Invalid server response', response)\n\n # If there are errors with the entire request, raise an error now.\n if 'errors' in response_data:\n raise PushServerError(\n 'Request failed',\n response,\n response_data=response_data,\n errors=response_data['errors'])\n\n # We expect the response to have a 'data' field with the responses.\n if 'data' not in response_data:\n raise PushServerError(\n 'Invalid server response',\n response,\n response_data=response_data)\n\n # Use the requests library's built-in exceptions for any remaining 4xx\n # and 5xx errors.\n response.raise_for_status()\n\n # Sanity check the response\n if len(push_messages) != len(response_data['data']):\n raise PushServerError(\n ('Mismatched response length. Expected %d %s but only '\n 'received %d' % (\n len(push_messages),\n 'receipt' if len(push_messages) == 1 else 'receipts',\n len(response_data['data']))),\n response,\n response_data=response_data)\n\n # At this point, we know it's a 200 and the response format is correct.\n # Now let's parse the responses per push notification.\n receipts = []\n for i, receipt in enumerate(response_data['data']):\n receipts.append(PushResponse(\n push_message=push_messages[i],\n # If there is no status, assume error.\n status=receipt.get('status', PushResponse.ERROR_STATUS),\n message=receipt.get('message', ''),\n details=receipt.get('details', None)))\n\n return receipts", "docstring": "Send push notifications\n\n The server will validate any type of syntax errors and the client will\n raise the proper exceptions for the user to handle.\n\n Each notification is of the form:\n {\n 'to': 'ExponentPushToken[xxx]',\n 'body': 'This text gets display in the notification',\n 'badge': 1,\n 'data': {'any': 'json object'},\n }\n\nArgs:\n push_messages: An array of PushMessage objects.", "source": "juraj_google_style"} -{"code": "def _marginalize_factor(self, nodes, factor):\n\n marginalizing_nodes = list(set(factor.scope()).difference(nodes))\n return factor.marginalize(marginalizing_nodes, inplace=False)", "docstring": "Marginalizing the factor selectively for a set of variables.\n\n Parameters:\n ----------\n nodes: list, array-like\n A container of nodes (list, dict, set, etc.).\n\n factor: factor\n factor which is to be marginalized.", "source": "juraj_google_style"} -{"code": "def fit_arrhenius(temps, diffusivities):\n\n t_1 = 1 / np.array(temps)\n logd = np.log(diffusivities)\n # Do a least squares regression of log(D) vs 1/T\n a = np.array([t_1, np.ones(len(temps))]).T\n w, res, _, _ = np.linalg.lstsq(a, logd, rcond=None)\n w = np.array(w)\n n = len(temps)\n if n > 2:\n std_Ea = (res[0] / (n - 2) / (\n n * np.var(t_1))) ** 0.5 * const.k / const.e\n else:\n std_Ea = None\n return -w[0] * const.k / const.e, np.exp(w[1]), std_Ea", "docstring": "Returns Ea, c, standard error of Ea from the Arrhenius fit:\n D = c * exp(-Ea/kT)\n\nArgs:\n temps ([float]): A sequence of temperatures. units: K\n diffusivities ([float]): A sequence of diffusivities (e.g.,\n from DiffusionAnalyzer.diffusivity). units: cm^2/s", "source": "juraj_google_style"} -{"code": "def unexpected_disconnect(self, conn_or_internal_id):\n\n\n data = {\n 'id': conn_or_internal_id\n }\n\n action = ConnectionAction('force_disconnect', data, sync=False)\n self._actions.put(action)", "docstring": "Notify that there was an unexpected disconnection of the device.\n\n Any in progress operations are canceled cleanly and the device is transitioned\n to a disconnected state.\n\nArgs:\n conn_or_internal_id (string, int): Either an integer connection id or a string\n internal_id", "source": "juraj_google_style"} -{"code": "def load_model(itos_filename, classifier_filename, num_classes):\n\n\n # load the int to string mapping file\n itos = pickle.load(Path(itos_filename).open('rb'))\n # turn it into a string to int mapping (which is what we need)\n stoi = collections.defaultdict(lambda:0, {str(v):int(k) for k,v in enumerate(itos)})\n\n # these parameters aren't used, but this is the easiest way to get a model\n bptt,em_sz,nh,nl = 70,400,1150,3\n dps = np.array([0.4,0.5,0.05,0.3,0.4])*0.5\n vs = len(itos)\n\n model = get_rnn_classifer(bptt, 20*70, num_classes, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1,\n layers=[em_sz*3, 50, num_classes], drops=[dps[4], 0.1],\n dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])\n\n # load the trained classifier\n model.load_state_dict(torch.load(classifier_filename, map_location=lambda storage, loc: storage))\n\n # put the classifier into evaluation mode\n model.reset()\n model.eval()\n\n return stoi, model", "docstring": "Load the classifier and int to string mapping\n\nArgs:\n itos_filename (str): The filename of the int to string mapping file (usually called itos.pkl)\n classifier_filename (str): The filename of the trained classifier\n\nReturns:\n string to int mapping, trained classifer model", "source": "juraj_google_style"} -{"code": "def clone_rtcpath_update_rt_as(path, new_rt_as):\n\n assert path and new_rt_as\n if not path or path.route_family != RF_RTC_UC:\n raise ValueError('Expected RT_NLRI path')\n old_nlri = path.nlri\n new_rt_nlri = RouteTargetMembershipNLRI(new_rt_as, old_nlri.route_target)\n return RtcPath(path.source, new_rt_nlri, path.source_version_num,\n pattrs=path.pathattr_map, nexthop=path.nexthop,\n is_withdraw=path.is_withdraw)", "docstring": "Clones given RT NLRI `path`, and updates it with new RT_NLRI AS.\n\n Parameters:\n - `path`: (Path) RT_NLRI path\n - `new_rt_as`: AS value of cloned paths' RT_NLRI", "source": "juraj_google_style"} -{"code": "def build_profile_variant(variant):\n\n\n chrom = variant.CHROM\n if chrom.startswith(('chr', 'CHR', 'Chr')):\n chrom = chrom[3:]\n\n pos = int(variant.POS)\n\n variant_id = get_variant_id(variant)\n\n ref = variant.REF\n alt = variant.ALT[0]\n\n maf = get_maf(variant)\n\n profile_variant = ProfileVariant(\n variant_id=variant_id,\n chrom=chrom,\n pos=pos,\n ref=ref,\n alt=alt,\n maf=maf,\n id_column = variant.ID\n )\n\n return profile_variant", "docstring": "Returns a ProfileVariant object\n\nArgs:\n variant (cyvcf2.Variant)\n\nReturns:\n variant (models.ProfileVariant)", "source": "juraj_google_style"} -{"code": "def assignSchedule(self, schedule, period, hour, minute, tariff):\n\n if ((schedule not in range(Extents.Schedules)) or\n (period not in range(Extents.Tariffs)) or\n (hour < 0) or (hour > 23) or (minute < 0) or\n (minute > 59) or (tariff < 0)):\n ekm_log(\"Out of bounds in Schedule_\" + str(schedule + 1))\n return False\n\n period += 1\n idx_min = \"Min_\" + str(period)\n idx_hour = \"Hour_\" + str(period)\n idx_rate = \"Tariff_\" + str(period)\n if idx_min not in self.m_schedule_params:\n ekm_log(\"Incorrect index: \" + idx_min)\n return False\n if idx_hour not in self.m_schedule_params:\n ekm_log(\"Incorrect index: \" + idx_hour)\n return False\n if idx_rate not in self.m_schedule_params:\n ekm_log(\"Incorrect index: \" + idx_rate)\n return False\n\n self.m_schedule_params[idx_rate] = tariff\n self.m_schedule_params[idx_hour] = hour\n self.m_schedule_params[idx_min] = minute\n self.m_schedule_params['Schedule'] = schedule\n return True", "docstring": "Assign one schedule tariff period to meter bufffer.\n\nArgs:\n schedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extents.Schedules).\n tariff (int): :class:`~ekmmeters.Tariffs` value or in range(Extents.Tariffs).\n hour (int): Hour from 0-23.\n minute (int): Minute from 0-59.\n tariff (int): Rate value.\n\nReturns:\n bool: True on completed assignment.", "source": "juraj_google_style"} -{"code": "def assert_shape_match(shape1, shape2):\n\n shape1 = tf.TensorShape(shape1)\n shape2 = tf.TensorShape(shape2)\n if shape1.ndims is None or shape2.ndims is None:\n raise ValueError('Shapes must have known rank. Got %s and %s.' %\n (shape1.ndims, shape2.ndims))\n shape1.assert_same_rank(shape2)\n shape1.assert_is_compatible_with(shape2)", "docstring": "Ensure the shape1 match the pattern given by shape2.\n\n Ex:\n assert_shape_match((64, 64, 3), (None, None, 3))\n\nArgs:\n shape1 (tuple): Static shape\n shape2 (tuple): Dynamic shape (can contain None)", "source": "juraj_google_style"} -{"code": "def get_group_by_id(self, group_id: str) -> typing.Optional['Group']:\n\n VALID_POSITIVE_INT.validate(group_id, 'get_group_by_id', exc=ValueError)\n for group in self.groups:\n\n if group.group_id == group_id:\n return group\n return None", "docstring": "Gets a group by id\n\nArgs:\n group_id: group id\n\n Returns: Group", "source": "juraj_google_style"} -{"code": "def run_local(self, commands):\n\n process = subprocess.Popen(\n commands.get('cli_command'),\n shell=self.shell,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n out, err = process.communicate()\n\n # display app output\n self.run_display_app_output(out)\n self.run_display_app_errors(err)\n\n # Exit Code\n return self.run_exit_code(process.returncode)", "docstring": "Run the App on local system.\n\nArgs:\n commands (dict): A dictionary of the CLI commands.\n\nReturns:\n int: The exit code of the subprocess command.", "source": "juraj_google_style"} -{"code": "def convert_flatten(params, w_name, scope_name, inputs, layers, weights, names):\n\n print('Converting flatten ...')\n\n if names == 'short':\n tf_name = 'R' + random_string(7)\n elif names == 'keep':\n tf_name = w_name\n else:\n tf_name = w_name + str(random.random())\n\n reshape = keras.layers.Reshape([-1], name=tf_name)\n layers[scope_name] = reshape(layers[inputs[0]])", "docstring": "Convert reshape(view).\n\nArgs:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers", "source": "juraj_google_style"} -{"code": "def DisableInterfaces(interface):\n\n set_tested_versions = ['vista', '2008']\n set_args = ['/c', 'netsh', 'set', 'interface', interface, 'DISABLED']\n host_version = platform.platform().lower()\n for version in set_tested_versions:\n if host_version.find(version) != -1:\n # pylint: disable=undefined-variable\n res = client_utils_common.Execute(\n 'cmd', set_args, time_limit=-1, bypass_whitelist=True)\n return res\n return ('', 'Command not available for this version.', 99, '')", "docstring": "Tries to disable an interface. Only works on Vista and 7.\n\nArgs:\n interface: Name of the interface to disable.\n\nReturns:\n res which is a tuple of (stdout, stderr, exit_status, time_taken).", "source": "juraj_google_style"} -{"code": "def _x_flow_ok(self, active):\n\n args = AMQPWriter()\n args.write_bit(active)\n self._send_method((20, 21), args)", "docstring": "Confirm a flow method\n\n Confirms to the peer that a flow command was received and\n processed.\n\n PARAMETERS:\n active: boolean\n\n current flow setting\n\n Confirms the setting of the processed flow method:\n True means the peer will start sending or continue\n to send content frames; False means it will not.", "source": "juraj_google_style"} -{"code": "def list_storage_accounts_rg(access_token, subscription_id, rgname):\n\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourcegroups/', rgname,\n '/providers/Microsoft.Storage/storageAccounts',\n '?api-version=', STORAGE_API])\n return do_get(endpoint, access_token)", "docstring": "List the storage accounts in the specified resource group.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n rgname (str): Azure resource group name.\n\nReturns:\n HTTP response. JSON body list of storage accounts.", "source": "juraj_google_style"} -{"code": "def dump_tests(self):\n\n log.info('Dumping tests')\n for test in self.test_map.values():\n try:\n test.dump()\n except ex.SerializeException as e:\n log.warning('Unable to dump {}: {}'.format(test.name, str(e)))\n else:\n log.info('Dumped {}'.format(test.name))", "docstring": "Dumps all tests, as determined by their .dump() method.\n\n PARAMETERS:\n tests -- dict; file -> Test. Each Test object has a .dump method\n that takes a filename and serializes the test object.", "source": "juraj_google_style"} -{"code": "def interceptable(func):\n\n @functools.wraps(func)\n def func_wrapped(*args, **kwargs):\n with get_next_interceptor() as interceptor:\n return interceptor(func, *args, **kwargs)\n\n return func_wrapped", "docstring": "Decorator that wraps `func` so that its execution is intercepted.\n\n The wrapper passes `func` to the interceptor for the current thread.\n\n If there is no next interceptor, we perform an \"immediate\" call to `func`.\n That is, `func` terminates without forwarding its execution to another\n interceptor.\n\nArgs:\n func: Function to wrap.\n\nReturns:\n The decorated function.", "source": "juraj_google_style"} -{"code": "def use(self, middleware, path=None):\n\n self.log.info(\" Using middleware {}\", middleware)\n if path is None:\n path = MiddlewareChain.ROOT_PATTERN\n self.add(HTTPMethod.ALL, path, middleware)\n return self", "docstring": "Call the provided middleware upon requests matching the path.\n If path is not provided or None, all requests will match.\n\nArgs:\n middleware (callable): Callable with the signature\n ``(res, req) -> None``\n path (Optional[str or regex]): a specific path the\n request must match for the middleware to be called.\n\nReturns:\n This router", "source": "juraj_google_style"} -{"code": "def supported_language(lang):\n\n try:\n self.get_collection(lang=lang)\n return True\n except LanguageNotSupported as e:\n return False", "docstring": "Return True if polyglot supports the language.\n\nArgs:\n lang (string): Language code.", "source": "juraj_google_style"} -{"code": "def mapTypeToSql(fld_type=FieldType.NoType, fld_len=0):\n\n if fld_type == FieldType.Float:\n return \"FLOAT\"\n elif fld_type == FieldType.String:\n return \"VARCHAR(\" + str(fld_len) + \")\"\n elif fld_type == FieldType.Int:\n return \"INT\"\n elif fld_type == FieldType.Hex:\n return \"VARCHAR(\" + str(fld_len * 2) + \")\"\n elif fld_type == FieldType.PowerFactor:\n return \"VARCHAR(\" + str(fld_len) + \")\"\n else:\n ekm_log(\"Type \" + str(type) + \" not handled by mapTypeToSql, returned VARCHAR(255)\")\n return \"VARCHAR(255)\"", "docstring": "Translate FieldType to portable SQL Type. Override if needful.\n\nArgs:\n fld_type (int): :class:`~ekmmeters.FieldType` in serial block.\n fld_len (int): Binary length in serial block\n\nReturns:\n string: Portable SQL type and length where appropriate.", "source": "juraj_google_style"} -{"code": "def bottom(self, features):\n\n if not self._problem_hparams:\n log_warn(\"Without a Problem, T2TModel.bottom is a passthrough.\")\n return features\n\n transformed_features = collections.OrderedDict()\n all_previous_modalities = []\n target_modality = _create_target_modality(self._problem_hparams.modality)\n\n # Transform features via its corresponding modality.\n for feature_name, modality in sorted(\n six.iteritems(self._problem_hparams.modality)):\n if feature_name not in features:\n tf.logging.warning(\"Missing feature %s - ignoring.\" % feature_name)\n continue\n vocab_size = self._problem_hparams.vocab_size[feature_name]\n if vocab_size is not None and hasattr(self._hparams, \"vocab_divisor\"):\n vocab_size += (-vocab_size) % self._hparams.vocab_divisor\n modality_name = self._hparams.name.get(\n feature_name,\n modalities.get_name(modality))(self._hparams, vocab_size)\n # Use if-else clauses to preserve behavior of previous changes: namely,\n # the variable scope name for the targets feature if there is only one\n # target modality; and to reuse variable scopes for only input modalities.\n if feature_name in target_modality:\n if len(target_modality) > 1:\n variable_scope_name = \"%s/%s\" % (modality_name, feature_name)\n else:\n variable_scope_name = modality_name\n bottom = self._hparams.bottom.get(\n feature_name,\n modalities.get_targets_bottom(modality))\n # TODO(aidangomez): share variables?\n with tf.variable_scope(variable_scope_name) as vs:\n self._add_variable_scope(variable_scope_name, vs)\n log_info(\"Transforming feature '%s' with %s.targets_bottom\",\n feature_name,\n modality_name)\n transformed_features[feature_name] = bottom(features[feature_name],\n self._hparams,\n vocab_size)\n else:\n bottom = self._hparams.bottom.get(feature_name,\n modalities.get_bottom(modality))\n do_reuse = modality_name in all_previous_modalities\n with tf.variable_scope(modality_name, reuse=do_reuse) as vs:\n self._add_variable_scope(modality_name, vs)\n log_info(\"Transforming feature '%s' with %s.bottom\",\n feature_name,\n modality_name)\n transformed_features[feature_name] = bottom(features[feature_name],\n self._hparams,\n vocab_size)\n all_previous_modalities.append(modality_name)\n\n for key in features:\n if key not in transformed_features:\n # For features without a modality, we pass them along as is\n transformed_features[key] = features[key]\n else:\n # Other features get passed along with the \"raw\" suffix\n transformed_features[key + \"_raw\"] = features[key]\n\n return transformed_features", "docstring": "Transforms features to feed into body.\n\nArgs:\n features: dict of str to Tensor. Typically it is the preprocessed data\n batch after Problem's preprocess_example().\n\nReturns:\n transformed_features: dict of same key-value pairs as features. The value\n Tensors are newly transformed.", "source": "juraj_google_style"} -{"code": "def timestamp(method='iso8601'):\n\n if method == 'iso8601':\n # ISO 8601\n # datetime.datetime.utcnow().isoformat()\n # datetime.datetime.now().isoformat()\n # utcnow\n tz_hour = time.timezone // 3600\n utc_offset = str(tz_hour) if tz_hour < 0 else '+' + str(tz_hour)\n stamp = time.strftime('%Y-%m-%dT%H%M%S') + utc_offset\n return stamp\n else:\n raise ValueError('only iso8601 is accepted for now')", "docstring": "make an iso8601 timestamp\n\nArgs:\n method (str): type of timestamp\n\nExample:\n >>> stamp = timestamp()\n >>> print('stamp = {!r}'.format(stamp))\n stamp = ...-...-...T...", "source": "juraj_google_style"} -{"code": "def _sendtpl(self, sender, to, subject, cc=None, bcc=None, attach=None, replyto=None, **kwargs):\n\n plain, html = self.body(**kwargs)\n self.mailer.send(sender, to, subject, plain=plain, html=html, cc=cc, bcc=bcc,\n replyto=replyto, attach=attach)\n return", "docstring": "Send a Letter from SENDER to TO, with the subject SUBJECT.\n Use the current template, with KWARGS as the context.\n\nArgs:\n - `sender`: unicode\n - `to`: unicode\n - `subject`: unicode\n - `cc`: str or [str]\n - `bcc`: str or [str]\n - `replyto`: str\n - `**kwargs`: objects\n\n Return: None\n Exceptions: None", "source": "juraj_google_style"} -{"code": "def get_compatible_systems(self, id_or_uri):\n\n uri = self._client.build_uri(id_or_uri) + \"/compatible-systems\"\n return self._client.get(uri)", "docstring": "Retrieves a collection of all storage systems that is applicable to this storage volume template.\n\nArgs:\n id_or_uri:\n Can be either the power device id or the uri\n\nReturns:\n list: Storage systems.", "source": "juraj_google_style"} -{"code": "def _ProcessAMCacheFileKey(self, am_entry, parser_mediator):\n\n amcache_datetime = am_entry.get_value_by_name(\n self._AMCACHE_DATETIME).get_data_as_integer()\n event_data = AmcacheEventData()\n\n event_data.full_path = am_entry.get_value_by_name(\n self._AMCACHE_FULL_PATH).get_data_as_string()\n # Strip off the 4 leading zero's from the sha1 hash.\n event_data.sha1 = am_entry.get_value_by_name(\n self._AMCACHE_SHA1).get_data_as_string()[4:]\n\n productname = am_entry.get_value_by_name(self._AMCACHE_PRODUCTNAME)\n if productname:\n event_data.productname = productname.get_data_as_string()\n\n companyname = am_entry.get_value_by_name(self._AMCACHE_COMPANYNAME)\n if companyname:\n event_data.companyname = companyname.get_data_as_string()\n\n fileversion = am_entry.get_value_by_name(self._AMCACHE_FILEVERSION)\n if fileversion:\n event_data.fileversion = fileversion.get_data_as_string()\n\n languagecode = am_entry.get_value_by_name(self._AMCACHE_LANGUAGECODE)\n if languagecode:\n event_data.languagecode = languagecode.get_data_as_integer()\n\n filesize = am_entry.get_value_by_name(self._AMCACHE_FILESIZE)\n if filesize:\n event_data.filesize = filesize.get_data_as_integer()\n\n filedescription = am_entry.get_value_by_name(self._AMCACHE_FILEDESCRIPTION)\n if filedescription:\n event_data.filedescription = filedescription.get_data_as_string()\n\n linkerts = am_entry.get_value_by_name(self._AMCACHE_LINKERTS)\n if linkerts:\n event_data.linkerts = linkerts.get_data_as_integer()\n\n lastmodifiedts = am_entry.get_value_by_name(self._AMCACHE_LASTMODIFIEDTS)\n if lastmodifiedts:\n event_data.lastmodifiedts = lastmodifiedts.get_data_as_integer()\n\n createdts = am_entry.get_value_by_name(self._AMCACHE_CREATEDTS)\n if createdts:\n event_data.createdts = createdts.get_data_as_integer()\n\n programid = am_entry.get_value_by_name(self._AMCACHE_PROGRAMID)\n if programid:\n event_data.programid = programid.get_data_as_string()\n\n event = time_events.DateTimeValuesEvent(\n filetime.Filetime(amcache_datetime),\n definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n if event_data.createdts:\n event = time_events.DateTimeValuesEvent(\n filetime.Filetime(event_data.createdts),\n definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n if event_data.lastmodifiedts:\n event = time_events.DateTimeValuesEvent(\n filetime.Filetime(event_data.lastmodifiedts),\n definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n if event_data.linkerts:\n event = time_events.DateTimeValuesEvent(\n posix_time.PosixTime(event_data.linkerts),\n definitions.TIME_DESCRIPTION_CHANGE)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an Amcache Root/File key for events.\n\nArgs:\n am_entry (pyregf.key): amcache File key.\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.", "source": "juraj_google_style"} -{"code": "def __init__(self, input_reader=None, output_writer=None):\n\n super(StorageMediaTool, self).__init__(\n input_reader=input_reader, output_writer=output_writer)\n self._custom_artifacts_path = None\n self._artifact_definitions_path = None\n self._artifact_filters = None\n self._credentials = []\n self._credential_configurations = []\n self._filter_file = None\n self._partitions = None\n self._process_vss = False\n self._source_scanner = source_scanner.SourceScanner()\n self._source_path = None\n self._source_path_specs = []\n self._textwrapper = textwrap.TextWrapper()\n self._user_selected_vss_stores = False\n self._volumes = None\n self._vss_only = False\n self._vss_stores = None", "docstring": "Initializes the CLI tool object.\n\nArgs:\n input_reader (Optional[InputReader]): input reader, where None indicates\n that the stdin input reader should be used.\n output_writer (Optional[OutputWriter]): output writer, where None\n indicates that the stdout output writer should be used.", "source": "juraj_google_style"} -{"code": "def _construct_w(self, inputs):\n\n weight_shape = self._kernel_shape + (1, 1)\n\n if \"w\" not in self._initializers:\n self._initializers[\"w\"] = create_weight_initializer(weight_shape[:2],\n dtype=inputs.dtype)\n\n w = tf.get_variable(\"w\",\n shape=weight_shape,\n dtype=inputs.dtype,\n initializer=self._initializers[\"w\"],\n partitioner=self._partitioners.get(\"w\", None),\n regularizer=self._regularizers.get(\"w\", None))\n return w", "docstring": "Construct the convolution weight matrix.\n\n Figures out the shape of the weight matrix, initialize it, and return it.\n\nArgs:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16` or `tf.float32`.\n\nReturns:\n w: A weight matrix of the same type as `inputs` and of shape\n [kernel_shape, 1, 1].", "source": "juraj_google_style"} -{"code": "def heightmap_get_normal(\n hm: np.ndarray, x: float, y: float, waterLevel: float\n) -> Tuple[float, float, float]:\n\n cn = ffi.new(\"float[3]\")\n lib.TCOD_heightmap_get_normal(_heightmap_cdata(hm), x, y, cn, waterLevel)\n return tuple(cn)", "docstring": "Return the map normal at given coordinates.\n\nArgs:\n hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\n x (float): The x coordinate.\n y (float): The y coordinate.\n waterLevel (float): The heightmap is considered flat below this value.\n\nReturns:\n Tuple[float, float, float]: An (x, y, z) vector normal.", "source": "juraj_google_style"} -{"code": "def load_library(lib, name=None, lib_cls=None):\n\n try:\n if lib_cls:\n return lib_cls(lib)\n else:\n return ctypes.CDLL(lib)\n except Exception:\n if name:\n lib_msg = '%s (%s)' % (name, lib)\n else:\n lib_msg = lib\n\n lib_msg += ' could not be loaded'\n\n if sys.platform == 'cygwin':\n lib_msg += ' in cygwin'\n _LOGGER.error(lib_msg, exc_info=True)\n return None", "docstring": "Loads a library. Catches and logs exceptions.\n\n Returns: the loaded library or None\n\nArgs:\n * lib -- path to/name of the library to be loaded\n * name -- the library's identifier (for logging)\n Defaults to None.\n * lib_cls -- library class. Defaults to None (-> ctypes.CDLL).", "source": "juraj_google_style"} -{"code": "def setup(self,\n host, flow_id,\n reason, grr_server_url, grr_username, grr_password, approvers=None,\n verify=True):\n\n super(GRRFlowCollector, self).setup(\n reason, grr_server_url, grr_username, grr_password,\n approvers=approvers, verify=verify)\n self.flow_id = flow_id\n self.host = host", "docstring": "Initializes a GRR flow collector.\n\nArgs:\n host: hostname of machine.\n flow_id: ID of GRR flow to retrieve.\n reason: justification for GRR access.\n grr_server_url: GRR server URL.\n grr_username: GRR username.\n grr_password: GRR password.\n approvers: list of GRR approval recipients.\n verify: boolean, whether to verify the GRR server's x509 certificate.", "source": "juraj_google_style"} -{"code": "def serialize(obj):\n\n LOGGER.debug('serialize(%s)', obj)\n\n if isinstance(obj, datetime.date):\n return simplejson.dumps(obj, default=encoders.as_date)\n\n elif hasattr(obj, '__dict__'):\n return simplejson.dumps(obj, default=encoders.as_object)\n\n return simplejson.dumps(obj)", "docstring": "Serialize the given object into JSON.\n\nArgs:\n obj: the object to be serialized.\n\nReturns:\n (str): JSON representation of the given object.", "source": "juraj_google_style"} -{"code": "def make_mixture_prior(latent_size, mixture_components):\n\n if mixture_components == 1:\n # See the module docstring for why we don't learn the parameters here.\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros([latent_size]),\n scale_identity_multiplier=1.0)\n\n loc = tf.compat.v1.get_variable(\n name=\"loc\", shape=[mixture_components, latent_size])\n raw_scale_diag = tf.compat.v1.get_variable(\n name=\"raw_scale_diag\", shape=[mixture_components, latent_size])\n mixture_logits = tf.compat.v1.get_variable(\n name=\"mixture_logits\", shape=[mixture_components])\n\n return tfd.MixtureSameFamily(\n components_distribution=tfd.MultivariateNormalDiag(\n loc=loc,\n scale_diag=tf.nn.softplus(raw_scale_diag)),\n mixture_distribution=tfd.Categorical(logits=mixture_logits),\n name=\"prior\")", "docstring": "Creates the mixture of Gaussians prior distribution.\n\nArgs:\n latent_size: The dimensionality of the latent representation.\n mixture_components: Number of elements of the mixture.\n\nReturns:\n random_prior: A `tfd.Distribution` instance representing the distribution\n over encodings in the absence of any evidence.", "source": "juraj_google_style"} -{"code": "def highlight(__text: str, *, lexer: str = 'diff',\n formatter: str = 'terminal') -> str:\n\n if sys.stdout.isatty():\n lexer = get_lexer_by_name(lexer)\n formatter = get_formatter_by_name(formatter)\n __text = pyg_highlight(__text, lexer, formatter)\n return __text", "docstring": "Highlight text highlighted using ``pygments``.\n\n Returns text untouched if colour output is not enabled.\n\n See also: :pypi:`Pygments`\n\nArgs:\n __text: Text to highlight\n lexer: Jinja lexer to use\n formatter: Jinja formatter to use\n\nReturns:\n Syntax highlighted output, when possible", "source": "juraj_google_style"} -{"code": "def get_variants(self, chromosome=None, start=None, end=None):\n\n query = {}\n if chromosome:\n query['chrom'] = chromosome\n if start:\n query['start'] = {'$lte': end}\n query['end'] = {'$gte': start}\n LOG.info(\"Find all variants {}\".format(query))\n return self.db.variant.find(query).sort([('start', ASCENDING)])", "docstring": "Return all variants in the database\n If no region is specified all variants will be returned.\n\nArgs:\n chromosome(str)\n start(int)\n end(int)\n\nReturns:\n variants(Iterable(Variant))", "source": "juraj_google_style"} -{"code": "def _local_var_name(splittable_dimensions, assignment):\n\n assignment_string = []\n for splittable in sorted(splittable_dimensions):\n if splittable in assignment:\n assignment_string.append(\"{}:{}\".format(splittable,\n assignment[splittable]))\n else:\n assignment_string.append(\"{}\".format(splittable))\n return \"y_(\" + \",\".join(assignment_string) + \")\"", "docstring": "Name for a local variable.\n\nArgs:\n splittable_dimensions: frozenset of names of splittable dimensions.\n assignment: dict from names of splittable dimensions to names of mesh\n dimensions.\n\nReturns:\n A string, the variable name.", "source": "juraj_google_style"} -{"code": "def AddFilesWithUnknownHashes(\n client_path_blob_refs,\n use_external_stores = True\n):\n\n hash_id_blob_refs = dict()\n client_path_hash_id = dict()\n metadatas = dict()\n\n all_client_path_blob_refs = list()\n for client_path, blob_refs in iteritems(client_path_blob_refs):\n # In the special case where there is only one blob, we don't need to go to\n # the data store to read said blob and rehash it, we have all that\n # information already available. For empty files without blobs, we can just\n # hash the empty string instead.\n if len(blob_refs) <= 1:\n if blob_refs:\n hash_id = rdf_objects.SHA256HashID.FromBytes(\n blob_refs[0].blob_id.AsBytes())\n else:\n hash_id = rdf_objects.SHA256HashID.FromData(b\"\")\n\n client_path_hash_id[client_path] = hash_id\n hash_id_blob_refs[hash_id] = blob_refs\n metadatas[hash_id] = FileMetadata(\n client_path=client_path, blob_refs=blob_refs)\n else:\n for blob_ref in blob_refs:\n all_client_path_blob_refs.append((client_path, blob_ref))\n\n client_path_offset = collections.defaultdict(lambda: 0)\n client_path_sha256 = collections.defaultdict(hashlib.sha256)\n verified_client_path_blob_refs = collections.defaultdict(list)\n\n client_path_blob_ref_batches = collection.Batch(\n items=all_client_path_blob_refs, size=_BLOBS_READ_BATCH_SIZE)\n\n for client_path_blob_ref_batch in client_path_blob_ref_batches:\n blob_id_batch = set(\n blob_ref.blob_id for _, blob_ref in client_path_blob_ref_batch)\n blobs = data_store.BLOBS.ReadBlobs(blob_id_batch)\n\n for client_path, blob_ref in client_path_blob_ref_batch:\n blob = blobs[blob_ref.blob_id]\n if blob is None:\n message = \"Could not find one of referenced blobs: {}\".format(\n blob_ref.blob_id)\n raise BlobNotFoundError(message)\n\n offset = client_path_offset[client_path]\n if blob_ref.size != len(blob):\n raise ValueError(\n \"Got conflicting size information for blob %s: %d vs %d.\" %\n (blob_ref.blob_id, blob_ref.size, len(blob)))\n if blob_ref.offset != offset:\n raise ValueError(\n \"Got conflicting offset information for blob %s: %d vs %d.\" %\n (blob_ref.blob_id, blob_ref.offset, offset))\n\n verified_client_path_blob_refs[client_path].append(blob_ref)\n client_path_offset[client_path] = offset + len(blob)\n client_path_sha256[client_path].update(blob)\n\n for client_path in iterkeys(client_path_sha256):\n sha256 = client_path_sha256[client_path].digest()\n hash_id = rdf_objects.SHA256HashID.FromBytes(sha256)\n\n client_path_hash_id[client_path] = hash_id\n hash_id_blob_refs[hash_id] = verified_client_path_blob_refs[client_path]\n\n data_store.REL_DB.WriteHashBlobReferences(hash_id_blob_refs)\n\n if use_external_stores:\n for client_path in iterkeys(verified_client_path_blob_refs):\n metadatas[client_path_hash_id[client_path]] = FileMetadata(\n client_path=client_path,\n blob_refs=verified_client_path_blob_refs[client_path])\n\n EXTERNAL_FILE_STORE.AddFiles(metadatas)\n\n return client_path_hash_id", "docstring": "Adds new files consisting of given blob references.\n\nArgs:\n client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to\n lists of blob references.\n use_external_stores: A flag indicating if the files should also be added to\n external file stores.\n\nReturns:\n A dictionary mapping `db.ClientPath` to hash ids of the file.\n\nRaises:\n BlobNotFoundError: If one of the referenced blobs cannot be found.", "source": "juraj_google_style"} -{"code": "def get_variant_slice(self, package_name, range_):\n\n variant_list = self.variant_lists.get(package_name)\n\n if variant_list is None:\n variant_list = _PackageVariantList(package_name, self.solver)\n self.variant_lists[package_name] = variant_list\n\n entries = variant_list.get_intersection(range_)\n if not entries:\n return None\n\n slice_ = _PackageVariantSlice(package_name,\n entries=entries,\n solver=self.solver)\n return slice_", "docstring": "Get a list of variants from the cache.\n\nArgs:\n package_name (str): Name of package.\n range_ (`VersionRange`): Package version range.\n\nReturns:\n `_PackageVariantSlice` object.", "source": "juraj_google_style"} -{"code": "def get_build_output(self, process):\n\n\n while True:\n output = process.stdout.readline()\n if output == b'' and process.poll() is not None:\n if process.returncode > 0:\n raise Exception(\"Compilation ended with an error\"\n \".\\nSTDERR\\n%s\\nSTDOUT\\n%s\" %\n (process.stderr.read(),\n process.stdout.read()))\n return\n if output:\n # Parse the output to get current and total tasks\n # This assumes the progress displayed by waf is in the form\n # [current/total]\n matches = re.search(r'\\[\\s*(\\d+?)/(\\d+)\\].*',\n output.strip().decode('utf-8'))\n if matches is not None:\n yield [int(matches.group(1)), int(matches.group(2))]", "docstring": "Parse the output of the ns-3 build process to extract the information\n that is needed to draw the progress bar.\n\nArgs:\n process: the subprocess instance to listen to.", "source": "juraj_google_style"} -{"code": "def __init__(self, message):\n\n super(IllegalOperation, self).__init__(\n reason=enums.ResultReason.ILLEGAL_OPERATION,\n message=message\n )", "docstring": "Create an IllegalOperation exception.\n\nArgs:\n message (string): A string containing information about the error.", "source": "juraj_google_style"} -{"code": "def _close_rpc_interface(self, connection_id, callback):\n\n\n try:\n context = self.connections.get_context(connection_id)\n except ArgumentError:\n callback(connection_id, self.id, False, \"Could not find connection information\")\n return\n\n self.connections.begin_operation(connection_id, 'close_interface', callback, self.get_config('default_timeout'))\n\n try:\n service = context['services'][TileBusService]\n header_characteristic = service[ReceiveHeaderChar]\n payload_characteristic = service[ReceivePayloadChar]\n except KeyError:\n self.connections.finish_operation(connection_id, False, \"Can't find characteristics to open rpc interface\")\n return\n\n self.bable.set_notification(\n enabled=False,\n connection_handle=context['connection_handle'],\n characteristic=header_characteristic,\n on_notification_set=[self._on_interface_closed, context, payload_characteristic],\n timeout=1.0\n )", "docstring": "Disable RPC interface for this IOTile device\n\nArgs:\n connection_id (int): The unique identifier for the connection\n callback (callback): Callback to be called when this command finishes\n callback(conn_id, adapter_id, success, failure_reason)", "source": "juraj_google_style"} -{"code": "def memoize(max_cache_size=1000):\n\n def wrapper(f):\n @wraps(f)\n def fn(*args, **kwargs):\n if kwargs:\n key = (args, tuple(kwargs.items()))\n else:\n key = args\n try:\n return fn.cache[key]\n except KeyError:\n if fn.count >= max_cache_size:\n fn.cache = {}\n fn.count = 0\n result = f(*args, **kwargs)\n fn.cache[key] = result\n fn.count += 1\n return result\n except TypeError:\n return f(*args, **kwargs)\n fn.cache = {}\n fn.count = 0\n return fn\n return wrapper", "docstring": "Python 2.4 compatible memoize decorator.\n It creates a cache that has a maximum size. If the cache exceeds the max,\n it is thrown out and a new one made. With such behavior, it is wise to set\n the cache just a little larger that the maximum expected need.\n\n Parameters:\n max_cache_size - the size to which a cache can grow", "source": "juraj_google_style"} -{"code": "def validate(self, scope: ValidationScope = ValidationScope.all,\n ctype: ContentType = ContentType.config) -> None:\n\n self.schema_node._validate(self, scope, ctype)", "docstring": "Validate the receiver's value.\n\nArgs:\n scope: Scope of the validation (syntax, semantics or all).\n ctype: Receiver's content type.\n\nRaises:\n SchemaError: If the value doesn't conform to the schema.\n SemanticError: If the value violates a semantic constraint.\n YangTypeError: If the value is a scalar of incorrect type.", "source": "juraj_google_style"} -{"code": "def _get_event_id(object_type: str) -> str:\n\n key = _keys.event_counter(object_type)\n DB.watch(key, pipeline=True)\n count = DB.get_value(key)\n DB.increment(key)\n DB.execute()\n if count is None:\n count = 0\n return '{}_event_{:08d}'.format(object_type, int(count))", "docstring": "Return an event key for the event on the object type.\n\n This must be a unique event id for the object.\n\nArgs:\n object_type (str): Type of object\n\nReturns:\n str, event id", "source": "juraj_google_style"} -{"code": "def _ip_unnumbered_name(self, **kwargs):\n\n\n method_name = 'interface_%s_ip_ip_config_unnumbered_ip_donor_'\\\n 'interface_name' % kwargs['int_type']\n ip_unnumbered_name = getattr(self._interface, method_name)\n config = ip_unnumbered_name(**kwargs)\n if kwargs['delete']:\n tag = 'ip-donor-interface-name'\n config.find('.//*%s' % tag).set('operation', 'delete')\n return config", "docstring": "Return the `ip unnumbered` donor name XML.\n\n You should not use this method.\n You probably want `Interface.ip_unnumbered`.\n\nArgs:\n int_type (str): Type of interface. (gigabitethernet,\n tengigabitethernet etc).\n delete (bool): Remove the configuration if ``True``.\n ip_donor_interface_name (str): The donor interface name (1, 2, etc)\n\nReturns:\n XML to be passed to the switch.\n\nRaises:\n None", "source": "juraj_google_style"} -{"code": "def __init__(self, resolver_context, file_system, path_spec, is_root=False):\n\n location = getattr(path_spec, 'location', None)\n\n # Windows does not support running os.stat on device files so we use\n # libsmdev to do an initial check.\n is_windows_device = False\n if platform.system() == 'Windows' and location:\n try:\n # pylint: disable=no-member\n is_windows_device = pysmdev.check_device(location)\n except IOError:\n pass\n\n stat_info = None\n if not is_windows_device and location:\n # We are only catching OSError. However on the Windows platform\n # a WindowsError can be raised as well. We are not catching that since\n # that error does not exist on non-Windows platforms.\n try:\n stat_info = os.lstat(location)\n except OSError as exception:\n raise errors.BackEndError(\n 'Unable to retrieve stat object with error: {0!s}'.format(\n exception))\n\n super(OSFileEntry, self).__init__(\n resolver_context, file_system, path_spec, is_root=is_root,\n is_virtual=False)\n self._is_windows_device = is_windows_device\n self._name = None\n self._stat_info = stat_info\n\n if is_windows_device:\n self.entry_type = definitions.FILE_ENTRY_TYPE_DEVICE\n\n elif stat_info:\n # If location contains a trailing segment separator and points to\n # a symbolic link to a directory stat info will not indicate\n # the file entry as a symbolic link. The following check ensures\n # that the LINK type is correctly detected.\n is_link = os.path.islink(location)\n\n # The stat info member st_mode can have multiple types e.g.\n # LINK and DIRECTORY in case of a symbolic link to a directory\n # dfVFS currently only supports one type so we need to check\n # for LINK first.\n if stat.S_ISLNK(stat_info.st_mode) or is_link:\n self.entry_type = definitions.FILE_ENTRY_TYPE_LINK\n elif stat.S_ISREG(stat_info.st_mode):\n self.entry_type = definitions.FILE_ENTRY_TYPE_FILE\n elif stat.S_ISDIR(stat_info.st_mode):\n self.entry_type = definitions.FILE_ENTRY_TYPE_DIRECTORY\n elif (stat.S_ISCHR(stat_info.st_mode) or\n stat.S_ISBLK(stat_info.st_mode)):\n self.entry_type = definitions.FILE_ENTRY_TYPE_DEVICE\n elif stat.S_ISFIFO(stat_info.st_mode):\n self.entry_type = definitions.FILE_ENTRY_TYPE_PIPE\n elif stat.S_ISSOCK(stat_info.st_mode):\n self.entry_type = definitions.FILE_ENTRY_TYPE_SOCKET", "docstring": "Initializes a file entry.\n\nArgs:\n resolver_context (Context): resolver context.\n file_system (FileSystem): file system.\n path_spec (PathSpec): path specification.\n is_root (Optional[bool]): True if the file entry is the root file entry\n of the corresponding file system.\n\nRaises:\n BackEndError: If an OSError comes up it is caught and an\n BackEndError error is raised instead.", "source": "juraj_google_style"} -{"code": "def parse(self, arguments):\n\n if not isinstance(arguments, list):\n # Default value may be a list of values. Most other arguments\n # will not be, so convert them into a single-item list to make\n # processing simpler below.\n arguments = [arguments]\n\n if self.present:\n # keep a backup reference to list of previously supplied option values\n values = self.value\n else:\n # \"erase\" the defaults with an empty list\n values = []\n\n for item in arguments:\n # have Flag superclass parse argument, overwriting self.value reference\n Flag.Parse(self, item) # also increments self.present\n values.append(self.value)\n\n # put list of option values back in the 'value' attribute\n self.value = values", "docstring": "Parses one or more arguments with the installed parser.\n\nArgs:\n arguments: a single argument or a list of arguments (typically a\n list of default values); a single argument is converted\n internally into a list containing one item.", "source": "juraj_google_style"} -{"code": "def Glob2Regex(glob_pattern):\n\n if not glob_pattern:\n raise ValueError('Missing glob pattern.')\n\n regex_pattern = []\n\n glob_pattern_index = 0\n glob_pattern_length = len(glob_pattern)\n while glob_pattern_index < glob_pattern_length:\n character = glob_pattern[glob_pattern_index]\n glob_pattern_index += 1\n\n if character == '*':\n regex_pattern.append('.*')\n\n elif character == '?':\n regex_pattern.append('.')\n\n elif character != '[':\n regex_character = re.escape(character)\n regex_pattern.append(regex_character)\n\n else:\n glob_group_index = glob_pattern_index\n\n if (glob_group_index < glob_pattern_length and\n glob_pattern[glob_group_index] == '!'):\n glob_group_index += 1\n\n if (glob_group_index < glob_pattern_length and\n glob_pattern[glob_group_index] == ']'):\n glob_group_index += 1\n\n while (glob_group_index < glob_pattern_length and\n glob_pattern[glob_group_index] != ']'):\n glob_group_index += 1\n\n if glob_group_index >= glob_pattern_length:\n regex_pattern.append('\\\\[')\n continue\n\n glob_group = glob_pattern[glob_pattern_index:glob_group_index]\n glob_pattern_index = glob_group_index + 1\n\n glob_group = glob_group.replace('\\\\', '\\\\\\\\')\n if py2to3.PY_3_7_AND_LATER:\n glob_group = glob_group.replace('|', '\\\\|')\n\n regex_pattern.append('[')\n\n if glob_group[0] == '!':\n regex_pattern.append('^')\n glob_group = glob_group[1:]\n\n elif glob_group[0] == '^':\n regex_pattern.append('\\\\')\n\n regex_pattern.append(glob_group)\n regex_pattern.append(']')\n\n return ''.join(regex_pattern)", "docstring": "Converts a glob pattern to a regular expression.\n\n This function supports basic glob patterns that consist of:\n * matches everything\n ? matches any single character\n [seq] matches any character in sequence\n [!seq] matches any character not in sequence\n\nArgs:\n glob_pattern (str): glob pattern.\n\nReturns:\n str: regular expression pattern.\n\nRaises:\n ValueError: if the glob pattern cannot be converted.", "source": "juraj_google_style"} -{"code": "def get_common_properties(root):\n\n properties = {}\n\n for elem in root.iterfind('commonProperties/property'):\n name = elem.attrib['name']\n\n if name == 'initial composition':\n properties['composition'] = {'species': [], 'kind': None}\n\n for child in elem.iter('component'):\n spec = {}\n spec['species-name'] = child.find('speciesLink').attrib['preferredKey']\n units = child.find('amount').attrib['units']\n\n # use InChI for unique species identifier (if present)\n try:\n spec['InChI'] = child.find('speciesLink').attrib['InChI']\n except KeyError:\n # TODO: add InChI validator/search\n warn('Missing InChI for species ' + spec['species-name'])\n pass\n\n # If mole or mass fraction, just set value\n if units in ['mole fraction', 'mass fraction', 'mole percent']:\n spec['amount'] = [float(child.find('amount').text)]\n elif units == 'percent':\n # assume this means mole percent\n warn('Assuming percent in composition means mole percent')\n spec['amount'] = [float(child.find('amount').text)]\n units = 'mole percent'\n elif units == 'ppm':\n # assume molar ppm, convert to mole fraction\n warn('Assuming molar ppm in composition and converting to mole fraction')\n spec['amount'] = [float(child.find('amount').text) * 1.e-6]\n units = 'mole fraction'\n elif units == 'ppb':\n # assume molar ppb, convert to mole fraction\n warn('Assuming molar ppb in composition and converting to mole fraction')\n spec['amount'] = [float(child.find('amount').text) * 1.e-9]\n units = 'mole fraction'\n else:\n raise KeywordError('Composition units need to be one of: mole fraction, '\n 'mass fraction, mole percent, percent, ppm, or ppb.'\n )\n\n properties['composition']['species'].append(spec)\n\n # check consistency of composition type\n if properties['composition']['kind'] is None:\n properties['composition']['kind'] = units\n elif properties['composition']['kind'] != units:\n raise KeywordError('composition units ' + units +\n ' not consistent with ' +\n properties['composition']['kind']\n )\n\n elif name in datagroup_properties:\n field = name.replace(' ', '-')\n units = elem.attrib['units']\n if units == 'Torr':\n units = 'torr'\n quantity = 1.0 * unit_registry(units)\n try:\n quantity.to(property_units[field])\n except pint.DimensionalityError:\n raise KeywordError('units incompatible for property ' + name)\n\n properties[field] = [' '.join([elem.find('value').text, units])]\n\n else:\n raise KeywordError('Property ' + name + ' not supported as common property')\n\n return properties", "docstring": "Read common properties from root of ReSpecTh XML file.\n\nArgs:\n root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file\n\nReturns:\n properties (`dict`): Dictionary with common properties", "source": "juraj_google_style"} -{"code": "def _FormatMessage(template, parameters):\n\n def GetParameter(m):\n try:\n return parameters[int(m.group(0)[1:])]\n except IndexError:\n return INVALID_EXPRESSION_INDEX\n\n parts = template.split('$$')\n return '$'.join(re.sub(r'\\$\\d+', GetParameter, part) for part in parts)", "docstring": "Formats the message. Unescapes '$$' with '$'.\n\nArgs:\n template: message template (e.g. 'a = $0, b = $1').\n parameters: substitution parameters for the format.\n\nReturns:\n Formatted message with parameters embedded in template placeholders.", "source": "juraj_google_style"} -{"code": "def create_surrogate_run_config(hp):\n\n save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)\n save_ckpt_secs = FLAGS.save_checkpoints_secs or None\n if save_ckpt_secs:\n save_ckpt_steps = None\n assert FLAGS.surrogate_output_dir\n # the various custom getters we have written do not play well together yet.\n # TODO(noam): ask rsepassi for help here.\n daisy_chain_variables = (\n hp.daisy_chain_variables and hp.activation_dtype == \"float32\" and\n hp.weight_dtype == \"float32\")\n return trainer_lib.create_run_config(\n model_name=FLAGS.model,\n model_dir=os.path.expanduser(FLAGS.surrogate_output_dir),\n master=FLAGS.master,\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.tpu_num_shards,\n log_device_placement=FLAGS.log_device_placement,\n save_checkpoints_steps=save_ckpt_steps,\n save_checkpoints_secs=save_ckpt_secs,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n num_gpus=FLAGS.worker_gpu,\n gpu_order=FLAGS.gpu_order,\n num_async_replicas=FLAGS.worker_replicas,\n gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction,\n enable_graph_rewriter=FLAGS.enable_graph_rewriter,\n use_tpu=FLAGS.use_tpu,\n schedule=FLAGS.schedule,\n no_data_parallelism=hp.no_data_parallelism,\n daisy_chain_variables=daisy_chain_variables,\n ps_replicas=FLAGS.ps_replicas,\n ps_job=FLAGS.ps_job,\n ps_gpu=FLAGS.ps_gpu,\n sync=FLAGS.sync,\n worker_id=FLAGS.worker_id,\n worker_job=FLAGS.worker_job,\n random_seed=FLAGS.random_seed,\n tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs,\n inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n log_step_count_steps=FLAGS.log_step_count_steps,\n intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads)", "docstring": "Create a run config.\n\nArgs:\n hp: model hyperparameters\n\nReturns:\n a run config", "source": "juraj_google_style"} -{"code": "def report_hit_filename(zipfilename: str, contentsfilename: str,\n show_inner_file: bool) -> None:\n\n if show_inner_file:\n print(\"{} [{}]\".format(zipfilename, contentsfilename))\n else:\n print(zipfilename)", "docstring": "For \"hits\": prints either the ``.zip`` filename, or the ``.zip`` filename\n and the inner filename.\n\nArgs:\n zipfilename: filename of the ``.zip`` file\n contentsfilename: filename of the inner file\n show_inner_file: if ``True``, show both; if ``False``, show just the\n ``.zip`` filename\n\n Returns:", "source": "juraj_google_style"} -{"code": "def stop(self, **kwargs):\n\n path = '%s/%s/stop' % (self.manager.path, self.get_id())\n self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Stop the environment.\n\nArgs:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabStopError: If the operation failed", "source": "juraj_google_style"} -{"code": "def _ParseEventData(self, variable_length_section):\n\n event_data = WinJobEventData()\n event_data.application = (\n variable_length_section.application_name.rstrip('\\x00'))\n event_data.comment = variable_length_section.comment.rstrip('\\x00')\n event_data.parameters = (\n variable_length_section.parameters.rstrip('\\x00'))\n event_data.username = variable_length_section.author.rstrip('\\x00')\n event_data.working_directory = (\n variable_length_section.working_directory.rstrip('\\x00'))\n\n return event_data", "docstring": "Parses the event data form a variable-length data section.\n\nArgs:\n variable_length_section (job_variable_length_data_section): a\n Windows Scheduled Task job variable-length data section.\n\nReturns:\n WinJobEventData: event data of the job file.", "source": "juraj_google_style"} -{"code": "def destination(self, bearing, distance):\n\n return (segment.destination(bearing, distance) for segment in self)", "docstring": "Calculate destination locations for given distance and bearings.\n\nArgs:\n bearing (float): Bearing to move on in degrees\n distance (float): Distance in kilometres\n\nReturns:\n list of list of Point: Groups of points shifted by ``distance``\n and ``bearing``", "source": "juraj_google_style"} -{"code": "def get(self, report_id):\n\n return Report(\n self._app,\n self._swimlane.request('get', \"reports/{0}\".format(report_id)).json()\n )", "docstring": "Retrieve report by ID\n\nArgs:\n report_id (str): Full report ID\n\nReturns:\n Report: Corresponding Report instance", "source": "juraj_google_style"} -{"code": "def _interpretPayload(functioncode, payload):\n r\n raise NotImplementedError()\n output = ''\n output += 'Modbus payload decoder\\n'\n output += 'Input payload (length {} characters): {!r} \\n'.format(len(payload), payload)\n output += 'Function code: {} (dec).\\n'.format(functioncode)\n\n if len(payload) == 4:\n FourbyteMessageFirstHalfValue = _twoByteStringToNum(payload[0:2])\n FourbyteMessageSecondHalfValue = _twoByteStringToNum(payload[2:4])\n\n\n return output", "docstring": "r\"\"\"Generate a human readable description of a Modbus payload.\n\nArgs:\n * functioncode (int): Function code\n * payload (str): The payload that should be interpreted. It should be a byte string.\n\nReturns:\n A descriptive string.\n\n For example, the payload ``'\\x10\\x01\\x00\\x01'`` for functioncode 3 should give something like::\n\n TODO: Update", "source": "juraj_google_style"} -{"code": "def create_module_graph(module_spec):\n\n height, width = hub.get_expected_image_size(module_spec)\n with tf.Graph().as_default() as graph:\n resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])\n m = hub.Module(module_spec)\n bottleneck_tensor = m(resized_input_tensor)\n wants_quantization = any(node.op in FAKE_QUANT_OPS\n for node in graph.as_graph_def().node)\n return graph, bottleneck_tensor, resized_input_tensor, wants_quantization", "docstring": "Creates a graph and loads Hub Module into it.\n\nArgs:\n module_spec: the hub.ModuleSpec for the image module being used.\n\nReturns:\n graph: the tf.Graph that was created.\n bottleneck_tensor: the bottleneck values output by the module.\n resized_input_tensor: the input images, resized as expected by the module.\n wants_quantization: a boolean, whether the module has been instrumented\n with fake quantization ops.", "source": "juraj_google_style"} -{"code": "def delete_unspent_outputs(self, *unspent_outputs):\n\n if unspent_outputs:\n return backend.query.delete_unspent_outputs(\n self.connection, *unspent_outputs)", "docstring": "Deletes the given ``unspent_outputs`` (utxos).\n\nArgs:\n *unspent_outputs (:obj:`tuple` of :obj:`dict`): Variable\n length tuple or list of unspent outputs.", "source": "juraj_google_style"} -{"code": "def get_speaker_info(self, refresh=False, timeout=None):\n\n if self.speaker_info and refresh is False:\n return self.speaker_info\n else:\n response = requests.get('http://' + self.ip_address +\n ':1400/xml/device_description.xml',\n timeout=timeout)\n dom = XML.fromstring(response.content)\n\n device = dom.find('{urn:schemas-upnp-org:device-1-0}device')\n if device is not None:\n self.speaker_info['zone_name'] = device.findtext(\n '{urn:schemas-upnp-org:device-1-0}roomName')\n\n # no zone icon in device_description.xml -> player icon\n self.speaker_info['player_icon'] = device.findtext(\n '{urn:schemas-upnp-org:device-1-0}iconList/'\n '{urn:schemas-upnp-org:device-1-0}icon/'\n '{urn:schemas-upnp-org:device-1-0}url'\n )\n\n self.speaker_info['uid'] = self.uid\n self.speaker_info['serial_number'] = device.findtext(\n '{urn:schemas-upnp-org:device-1-0}serialNum')\n self.speaker_info['software_version'] = device.findtext(\n '{urn:schemas-upnp-org:device-1-0}softwareVersion')\n self.speaker_info['hardware_version'] = device.findtext(\n '{urn:schemas-upnp-org:device-1-0}hardwareVersion')\n self.speaker_info['model_number'] = device.findtext(\n '{urn:schemas-upnp-org:device-1-0}modelNumber')\n self.speaker_info['model_name'] = device.findtext(\n '{urn:schemas-upnp-org:device-1-0}modelName')\n self.speaker_info['display_version'] = device.findtext(\n '{urn:schemas-upnp-org:device-1-0}displayVersion')\n\n # no mac address - extract from serial number\n mac = self.speaker_info['serial_number'].split(':')[0]\n self.speaker_info['mac_address'] = mac\n\n return self.speaker_info\n return None", "docstring": "Get information about the Sonos speaker.\n\nArgs:\n refresh(bool): Refresh the speaker info cache.\n timeout: How long to wait for the server to send\n data before giving up, as a float, or a\n `(connect timeout, read timeout)` tuple\n e.g. (3, 5). Default is no timeout.\n\nReturns:\n dict: Information about the Sonos speaker, such as the UID,\n MAC Address, and Zone Name.", "source": "juraj_google_style"} -{"code": "def decompress_decoder(inputs,\n hparams,\n strides=(2, 2),\n kernel=(3, 3),\n name=None):\n\n with tf.variable_scope(name, default_name=\"decompress\"):\n x = inputs\n x = tf.layers.dense(x, hparams.hidden_size, name=name + \"_dense\")\n x = residual_block_layer(x, hparams)\n for i in range(hparams.num_compress_steps // 2):\n j = hparams.num_compress_steps // 2 - i - 1\n with tf.variable_scope(name + \"_%d\" % j):\n if hparams.do_decompress_attend:\n y = compress_self_attention_layer(\n x, hparams, name=\"decompress_selfatt\")\n x += y\n y = tf.layers.conv2d_transpose(\n x,\n hparams.hidden_size,\n kernel,\n strides=strides,\n padding=\"SAME\",\n activation=tf.nn.relu if i > 0 else None,\n name=\"decompress_conv\")\n x = y\n return x", "docstring": "Decoder that decompresses 2-D inputs by 2**num_compress_steps.\n\nArgs:\n inputs: Tensor of shape [batch, compress_height, compress_width, channels].\n hparams: HParams.\n strides: Tuple, strides for conv block.\n kernel: Tuple, kernel window size for conv block.\n name: string, variable scope.\n\nReturns:\n Tensor of shape [batch, height, width, hparams.hidden_size].", "source": "juraj_google_style"} -{"code": "def _ExtractContentFromDataStream(\n self, mediator, file_entry, data_stream_name):\n\n self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING\n\n if self._processing_profiler:\n self._processing_profiler.StartTiming('extracting')\n\n self._event_extractor.ParseDataStream(\n mediator, file_entry, data_stream_name)\n\n if self._processing_profiler:\n self._processing_profiler.StopTiming('extracting')\n\n self.processing_status = definitions.STATUS_INDICATOR_RUNNING\n\n self.last_activity_timestamp = time.time()", "docstring": "Extracts content from a data stream.\n\nArgs:\n mediator (ParserMediator): mediates the interactions between\n parsers and other components, such as storage and abort signals.\n file_entry (dfvfs.FileEntry): file entry to extract its content.\n data_stream_name (str): name of the data stream whose content is to be\n extracted.", "source": "juraj_google_style"} -{"code": "def geosearch(latitude, longitude, title=None, results=10, radius=1000):\n\n\n search_params = {\n 'list': 'geosearch',\n 'gsradius': radius,\n 'gscoord': '{0}|{1}'.format(latitude, longitude),\n 'gslimit': results\n }\n if title:\n search_params['titles'] = title\n\n raw_results = _wiki_request(search_params)\n\n if 'error' in raw_results:\n if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'):\n raise HTTPTimeoutError('{0}|{1}'.format(latitude, longitude))\n else:\n raise WikipediaException(raw_results['error']['info'])\n\n search_pages = raw_results['query'].get('pages', None)\n if search_pages:\n search_results = (v['title'] for k, v in search_pages.items() if k != '-1')\n else:\n search_results = (d['title'] for d in raw_results['query']['geosearch'])\n\n return list(search_results)", "docstring": "Do a wikipedia geo search for `latitude` and `longitude`\n using HTTP API described in http://www.mediawiki.org/wiki/Extension:GeoData\n\nArgs:\n * latitude (float or decimal.Decimal)\n * longitude (float or decimal.Decimal)\n\n Keyword arguments:\n\n * title - The title of an article to search for\n * results - the maximum number of results returned\n * radius - Search radius in meters. The value must be between 10 and 10000", "source": "juraj_google_style"} -{"code": "def __init__(self, type, data):\n\n # Check type is type int\n if not isinstance(type, int):\n raise TypeError(\"ext type is not type integer\")\n # Check data is type bytes\n elif sys.version_info[0] == 3 and not isinstance(data, bytes):\n raise TypeError(\"ext data is not type \\'bytes\\'\")\n elif sys.version_info[0] == 2 and not isinstance(data, str):\n raise TypeError(\"ext data is not type \\'str\\'\")\n self.type = type\n self.data = data", "docstring": "Construct a new Ext object.\n\nArgs:\n type: application-defined type integer\n data: application-defined data byte array\n\nExample:\n >>> foo = umsgpack.Ext(0x05, b\"\\x01\\x02\\x03\")\n >>> umsgpack.packb({u\"special stuff\": foo, u\"awesome\": True})\n '\\x82\\xa7awesome\\xc3\\xadspecial stuff\\xc7\\x03\\x05\\x01\\x02\\x03'\n >>> bar = umsgpack.unpackb(_)\n >>> print(bar[\"special stuff\"])\n Ext Object (Type: 0x05, Data: 01 02 03)\n >>>", "source": "juraj_google_style"} -{"code": "def os_version(self, value):\n\n if value == self._defaults['ai.device.osVersion'] and 'ai.device.osVersion' in self._values:\n del self._values['ai.device.osVersion']\n else:\n self._values['ai.device.osVersion'] = value", "docstring": "The os_version property.\n\nArgs:\n value (string). the property value.", "source": "juraj_google_style"} -{"code": "def authenticate(self, request, username=None, password=None):\n\n if not hasattr(settings, 'MASTER_PASSWORD'):\n logging.debug(\"Master password not set.\")\n return None\n if check_password(password, settings.MASTER_PASSWORD):\n try:\n user = User.objects.get(username__iexact=username)\n except User.DoesNotExist:\n if settings.MASTER_NOTIFY:\n logger.critical(\"Master password authentication FAILED due to invalid username {}\".format(username))\n logger.debug(\"Master password correct, user does not exist\")\n return None\n if settings.MASTER_NOTIFY:\n logger.critical(\"Master password authentication SUCCEEDED with username {}\".format(username))\n logger.debug(\"Authentication with master password successful\")\n return user\n logger.debug(\"Master password authentication failed\")\n return None", "docstring": "Authenticate a username-password pair.\n\n Creates a new user if one is not already in the database.\n\nArgs:\n username\n The username of the `User` to authenticate.\n password\n The master password.\n\nReturns:\n `User`", "source": "juraj_google_style"} -{"code": "def __init__(self, nmr_items, ctype):\n\n self._ctype = ctype\n self._mot_float_dtype = None\n self._nmr_items = nmr_items", "docstring": "Adds a private memory array of the indicated size to the kernel data elements.\n\n This is useful if you want to have private memory arrays in kernel data structs.\n\nArgs:\n nmr_items (int): the size of the private memory array\n ctype (str): the desired c-type for this local memory object, like ``int``, ``float`` or ``mot_float_type``.", "source": "juraj_google_style"} -{"code": "def CSS_setStyleSheetText(self, styleSheetId, text):\n\n\t\tassert isinstance(text, (str,)\n\t\t ), \"Argument 'text' must be of type '['str']'. Received type: '%s'\" % type(\n\t\t text)\n\t\tsubdom_funcs = self.synchronous_command('CSS.setStyleSheetText',\n\t\t styleSheetId=styleSheetId, text=text)\n\t\treturn subdom_funcs", "docstring": "Function path: CSS.setStyleSheetText\n Domain: CSS\n Method name: setStyleSheetText\n\n Parameters:\n Required arguments:\n 'styleSheetId' (type: StyleSheetId) -> No description\n 'text' (type: string) -> No description\n\nReturns:\n 'sourceMapURL' (type: string) -> URL of source map associated with script (if any).\n\n Description: Sets the new stylesheet text.", "source": "juraj_google_style"} -{"code": "def evaluate(inputs_stream, predict_fun, metric_funs, rng):\n\n metrics = collections.defaultdict(float)\n count = 0\n for inp in inputs_stream:\n count += 1\n rng, subrng = jax_random.split(rng)\n preds = predict_fun(inp[0], rng=subrng)\n for m, f in six.iteritems(metric_funs):\n metrics[m] += f(inp, preds)\n return {m: v / count for (m, v) in six.iteritems(metrics)}", "docstring": "Evaluate.\n\nArgs:\n inputs_stream: iterable of inputs to evaluate on.\n predict_fun: function from inputs to predictions. params should already be\n partially applied.\n metric_funs: dict from metric name to metric function, which takes inputs\n and predictions and returns a scalar metric value.\n rng: random number generator.\n\nReturns:\n metrics: dict from metric name to metric value averaged over the number of\n inputs.", "source": "juraj_google_style"} -{"code": "def _GetPathSegmentSeparator(self, path):\n\n if path.startswith('\\\\') or path[1:].startswith(':\\\\'):\n return '\\\\'\n\n if path.startswith('/'):\n return '/'\n\n if '/' and '\\\\' in path:\n # Let's count slashes and guess which one is the right one.\n forward_count = len(path.split('/'))\n backward_count = len(path.split('\\\\'))\n\n if forward_count > backward_count:\n return '/'\n\n return '\\\\'\n\n # Now we are sure there is only one type of separators yet\n # the path does not start with one.\n if '/' in path:\n return '/'\n\n return '\\\\'", "docstring": "Given a path give back the path separator as a best guess.\n\nArgs:\n path (str): path.\n\nReturns:\n str: path segment separator.", "source": "juraj_google_style"} -{"code": "def derive_annotations(self, annotations):\n\n cls = type(self)\n # We use ordinals to avoid thunk materialization.\n return cls(\n self[0],\n self[1],\n self[2],\n self[3],\n annotations,\n self[5]\n )", "docstring": "Derives a new event from this one setting the ``annotations`` attribute.\n\nArgs:\n annotations: (Sequence[Union[amazon.ion.symbols.SymbolToken, unicode]]):\n The annotations associated with the derived event.\n\nReturns:\n IonEvent: The newly generated event.", "source": "juraj_google_style"} -{"code": "def transpose(self, name=None):\n\n if name is None:\n name = self.module_name + \"_transpose\"\n\n if self._data_format == DATA_FORMAT_NHWC:\n stride = self._stride[1:-1]\n else: # self._data_format == DATA_FORMAT_NCHW\n stride = self._stride[2:]\n\n return Conv2D(output_channels=lambda: self.input_channels,\n kernel_shape=self._kernel_shape,\n stride=stride,\n padding=self._padding,\n use_bias=self._use_bias,\n initializers=self._initializers,\n partitioners=self._partitioners,\n regularizers=self._regularizers,\n data_format=self._data_format,\n custom_getter=self._custom_getter,\n name=name)", "docstring": "Returns matching `Conv2D` module.\n\nArgs:\n name: Optional string assigning name of transpose module. The default name\n is constructed by appending \"_transpose\" to `self.name`.\n\nReturns:\n `Conv2D` module.", "source": "juraj_google_style"} -{"code": "def outgoing_edges(self, node):\n\n #TODO: pls make outgoig_edges less insane\n edges = self.edges()\n out_edges = []\n for out_node, in_node in edges:\n if node is out_node:\n out_edges.append((out_node, in_node))\n return tuple(out_edges)", "docstring": "Returns a ``tuple`` of outgoing edges for a **node object**.\n\nArgs:\n - node(``object``) **node object** present in the graph to be queried\n for outgoing edges.", "source": "juraj_google_style"} -{"code": "def get_subport_statistics(self, id_or_uri, port_name, subport_number):\n\n uri = self._client.build_uri(id_or_uri) + \"/statistics/{0}/subport/{1}\".format(port_name, subport_number)\n return self._client.get(uri)", "docstring": "Gets the subport statistics on an interconnect.\n\nArgs:\n id_or_uri: Can be either the interconnect id or the interconnect uri.\n port_name (str): A specific port name of an interconnect.\n subport_number (int): The subport.\n\nReturns:\n dict: The statistics for the interconnect that matches id, port_name, and subport_number.", "source": "juraj_google_style"} -{"code": "def create_workshift_profile(sender, instance, created, **kwargs):\n\n if instance.user.username == ANONYMOUS_USERNAME or \\\n instance.status != UserProfile.RESIDENT:\n return\n\n try:\n semester = Semester.objects.get(current=True)\n except (Semester.DoesNotExist, Semester.MultipleObjectsReturned):\n pass\n else:\n profile, created = WorkshiftProfile.objects.get_or_create(\n user=instance.user,\n semester=semester,\n )\n if created:\n utils.make_workshift_pool_hours(\n semester=semester,\n profiles=[profile],\n )", "docstring": "Function to add a workshift profile for every User that is created.\n Parameters:\n instance is an of UserProfile that was just saved.", "source": "juraj_google_style"} -{"code": "def get_counters(counter_list):\n\n if not isinstance(counter_list, list):\n raise CommandExecutionError('counter_list must be a list of tuples')\n\n try:\n # Start a Query instances\n query = win32pdh.OpenQuery()\n\n # Build the counters\n counters = build_counter_list(counter_list)\n\n # Add counters to the Query\n for counter in counters:\n counter.add_to_query(query)\n\n # https://docs.microsoft.com/en-us/windows/desktop/perfctrs/collecting-performance-data\n win32pdh.CollectQueryData(query)\n # The sleep here is required for counters that require more than 1\n # reading\n time.sleep(1)\n win32pdh.CollectQueryData(query)\n ret = {}\n\n for counter in counters:\n try:\n ret.update({counter.path: counter.value()})\n except pywintypes.error as exc:\n if exc.strerror == 'No data to return.':\n # Some counters are not active and will throw an error if\n # there is no data to return\n continue\n else:\n raise\n\n finally:\n win32pdh.CloseQuery(query)\n\n return ret", "docstring": "Get the values for the passes list of counters\n\nArgs:\n counter_list (list):\n A list of counters to lookup\n\nReturns:\n dict: A dictionary of counters and their values", "source": "juraj_google_style"} -{"code": "def titles(self, unique=False):\n\n if unique:\n return tools.uniqued(title for _, title in self.iterfiles())\n return [title for _, title in self.iterfiles()]", "docstring": "Return a list of all available spreadsheet titles.\n\nArgs:\n unique (bool): drop duplicates\n\nReturns:\n list: list of title/name strings", "source": "juraj_google_style"} -{"code": "def on_core_metadata_event(self, event):\n\n core_metadata = json.loads(event.log_message.message)\n input_names = ','.join(core_metadata['input_names'])\n output_names = ','.join(core_metadata['output_names'])\n target_nodes = ','.join(core_metadata['target_nodes'])\n\n self._run_key = RunKey(input_names, output_names, target_nodes)\n if not self._graph_defs:\n self._graph_defs_arrive_first = False\n else:\n for device_name in self._graph_defs:\n self._add_graph_def(device_name, self._graph_defs[device_name])\n\n self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))\n\n # Wait for acknowledgement from client. Blocks until an item is got.\n logger.info('on_core_metadata_event() waiting for client ack (meta)...')\n self._incoming_channel.get()\n logger.info('on_core_metadata_event() client ack received (meta).')", "docstring": "Implementation of the core metadata-carrying Event proto callback.\n\nArgs:\n event: An Event proto that contains core metadata about the debugged\n Session::Run() in its log_message.message field, as a JSON string.\n See the doc string of debug_data.DebugDumpDir.core_metadata for details.", "source": "juraj_google_style"} -{"code": "def match_pattern(expr_or_pattern: object, expr: object) -> MatchDict:\n\n try: # first try expr_or_pattern as a Pattern\n return expr_or_pattern.match(expr)\n except AttributeError: # expr_or_pattern is an expr, not a Pattern\n if expr_or_pattern == expr:\n return MatchDict() # success\n else:\n res = MatchDict()\n res.success = False\n res.reason = \"Expressions '%s' and '%s' are not the same\" % (\n repr(expr_or_pattern), repr(expr))\n return res", "docstring": "Recursively match `expr` with the given `expr_or_pattern`\n\nArgs:\n expr_or_pattern: either a direct expression (equal to `expr` for a\n successful match), or an instance of :class:`Pattern`.\n expr: the expression to be matched", "source": "juraj_google_style"} -{"code": "def slice_hidden(self, x):\n\n x_sliced = tf.reshape(\n x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim])\n return x_sliced", "docstring": "Slice encoder hidden state into block_dim.\n\nArgs:\n x: Encoder hidden state of shape [-1, hidden_size].\n\nReturns:\n Sliced states of shape [-1, num_blocks, block_dim].", "source": "juraj_google_style"} -{"code": "def _get_array(self, handle: int) -> np.ndarray:\n\n tup = self._arrays[handle]\n assert tup is not None\n c_arr, shape = tup\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RuntimeWarning)\n result = np.ctypeslib.as_array(c_arr)\n result.shape = shape\n return result", "docstring": "Returns the array with the given handle.\n\nArgs:\n handle: The handle of the array whose memory should be freed. This\n handle must come from the _create_array method.\n\nReturns:\n The numpy ndarray with the handle given from _create_array.", "source": "juraj_google_style"} -{"code": "def _get_original_composition_ratio(self, reaction):\n\n if self.c1_original == self.c2_original:\n return 1\n c1_coeff = reaction.get_coeff(self.c1_original) \\\n if self.c1_original in reaction.reactants else 0\n c2_coeff = reaction.get_coeff(self.c2_original) \\\n if self.c2_original in reaction.reactants else 0\n return c1_coeff * 1.0 / (c1_coeff + c2_coeff)", "docstring": "Returns the molar mixing ratio between the reactants with ORIGINAL (\n instead of processed) compositions for a reaction.\n\nArgs:\n reaction (Reaction): Reaction object that contains the original\n reactant compositions.\n\nReturns:\n The molar mixing ratio between the original reactant\n compositions for a reaction.", "source": "juraj_google_style"} -{"code": "def __init__(self, location=None, parent=None, **kwargs):\n\n if not parent:\n raise ValueError('Missing parent value.')\n\n super(ZipPathSpec, self).__init__(\n location=location, parent=parent, **kwargs)", "docstring": "Initializes a path specification.\n\n Note that the zip file path specification must have a parent.\n\nArgs:\n location (Optional[str]): ZIP file internal location string prefixed\n with a path separator character.\n parent (Optional[PathSpec]): parent path specification.\n\nRaises:\n ValueError: when parent is not set.", "source": "juraj_google_style"} -{"code": "def get(self, *, txid, headers=None):\n\n block_list = self.transport.forward_request(\n method='GET',\n path=self.path,\n params={'transaction_id': txid},\n headers=headers,\n )\n return block_list[0] if len(block_list) else None", "docstring": "Get the block that contains the given transaction id (``txid``)\n else return ``None``\n\nArgs:\n txid (str): Transaction id.\n headers (dict): Optional headers to pass to the request.\n\nReturns:\n :obj:`list` of :obj:`int`: List of block heights.", "source": "juraj_google_style"} -{"code": "def AddRow(self, values):\n\n super(CLITableView, self).AddRow(values)\n\n value_length = len(values[0])\n if value_length > self._column_width:\n self._column_width = value_length", "docstring": "Adds a row of values.\n\nArgs:\n values (list[object]): values.\n\nRaises:\n ValueError: if the number of values is out of bounds.", "source": "juraj_google_style"} -{"code": "def ParseChat(self, parser_mediator, query, row, **unused_kwargs):\n\n query_hash = hash(query)\n\n participants = self._GetRowValue(query_hash, row, 'participants')\n author = self._GetRowValue(query_hash, row, 'author')\n dialog_partner = self._GetRowValue(query_hash, row, 'dialog_partner')\n from_displayname = self._GetRowValue(query_hash, row, 'from_displayname')\n\n accounts = []\n participants = participants.split(' ')\n for participant in participants:\n if participant != author:\n accounts.append(participant)\n\n to_account = ', '.join(accounts)\n if not to_account:\n to_account = dialog_partner or 'Unknown User'\n\n from_account = '{0:s} <{1:s}>'.format(from_displayname, author)\n\n event_data = SkypeChatEventData()\n event_data.from_account = from_account\n event_data.query = query\n event_data.text = self._GetRowValue(query_hash, row, 'body_xml')\n event_data.title = self._GetRowValue(query_hash, row, 'title')\n event_data.to_account = to_account\n\n timestamp = self._GetRowValue(query_hash, row, 'timestamp')\n if timestamp:\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(date_time, 'Chat from Skype')\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a chat message.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n query (str): query that created the row.\n row (sqlite3.Row): row resulting from query.", "source": "juraj_google_style"} -{"code": "def resolve_lookups(variable, context, provider):\n\n resolved_lookups = {}\n for lookup in variable.lookups:\n try:\n handler = LOOKUP_HANDLERS[lookup.type]\n except KeyError:\n raise UnknownLookupType(lookup)\n try:\n resolved_lookups[lookup] = handler(\n value=lookup.input,\n context=context,\n provider=provider,\n )\n except Exception as e:\n raise FailedVariableLookup(variable.name, lookup, e)\n return resolved_lookups", "docstring": "Resolve a set of lookups.\n\nArgs:\n variable (:class:`stacker.variables.Variable`): The variable resolving\n it's lookups.\n context (:class:`stacker.context.Context`): stacker context\n provider (:class:`stacker.provider.base.BaseProvider`): subclass of the\n base provider\n\nReturns:\n dict: dict of Lookup -> resolved value", "source": "juraj_google_style"} -{"code": "def buy(self, index):\n\n item = self.items[index]\n us = UserShopFront(self.usr, item.owner, item.id, str(item.price))\n us.load()\n\n if not item.name in us.inventory:\n return False\n\n if not us.inventory[item.name].buy():\n return False\n\n return True", "docstring": "Attempts to buy indexed item, returns result\n\n Parameters:\n index (int) -- The item index\n\n Returns\n bool - True if item was bought, false otherwise", "source": "juraj_google_style"} -{"code": "def get_option(option_name, section_name=\"main\", default=_sentinel, cfg_file=cfg_file):\n\n defaults = get_defaults()\n\n # As a quality issue, we strictly disallow looking up an option that does not have a default\n # value specified in the code\n #if option_name not in defaults.get(section_name, {}) and default == _sentinel:\n # raise ValueError(\"There is no default value for Option %s in section %s\" % (option_name, section_name))\n\n # If default argument was provided, we set variable my_defaults to that\n # otherwise use the global nago defaults\n if default != _sentinel:\n my_defaults = {option_name: default}\n else:\n my_defaults = defaults.get('section_name', {})\n\n # Lets parse our configuration file and see what we get\n parser = get_parser(cfg_file)\n return parser.get(section_name, option_name, vars=my_defaults)", "docstring": "Returns a specific option specific in a config file\n\nArgs:\n option_name -- Name of the option (example host_name)\n section_name -- Which section of the config (default: name)\n\nExample:\n >>> get_option(\"some option\", default=\"default result\")\n 'default result'", "source": "juraj_google_style"} -{"code": "def remove_pardir_symbols(path, sep=os.sep, pardir=os.pardir):\n\n bits = path.split(sep)\n bits = (x for x in bits if x != pardir)\n return sep.join(bits)", "docstring": "Remove relative path symobls such as '..'\n\nArgs:\n path (str): A target path string\n sep (str): A strint to refer path delimiter (Default: `os.sep`)\n pardir (str): A string to refer parent directory (Default: `os.pardir`)\n\nReturns:\n str", "source": "juraj_google_style"} -{"code": "def exit_hook(callable, once=True):\n r\n if once and callable in ExitHooks:\n return\n\n ExitHooks.append(callable)", "docstring": "r\"\"\"A decorator that makes the decorated function to run while ec exits.\n\nArgs:\n callable (callable): The target callable.\n once (bool): Avoids adding a func to the hooks, if it has been added already. Defaults to True.\n\nNote:\n Hooks are processedd in a LIFO order.", "source": "juraj_google_style"} -{"code": "def _alephResultToDict(dom):\n\n result = {}\n for i in dom.childs:\n if not i.isOpeningTag():\n continue\n\n keyword = i.getTagName().strip()\n value = _tryConvertToInt(i.getContent().strip())\n\n # if there are multiple tags with same keyword, add values into\n # array, instead of rewriting existing value at given keyword\n if keyword in result: # if it is already there ..\n if isinstance(result[keyword], list): # and it is list ..\n result[keyword].append(value) # add it to list\n else: # or make it array\n result[keyword] = [result[keyword], value]\n else: # if it is not in result, add it\n result[keyword] = value\n\n return result", "docstring": "Convert part of non-nested XML to :py:class:`dict`.\n\nArgs:\n dom (HTMLElement tree): pre-parsed XML (see dhtmlparser).\n\nReturns:\n dict: with python data", "source": "juraj_google_style"} -{"code": "def add_group_coordinator(self, group, response):\n\n log.debug(\"Updating coordinator for %s: %s\", group, response)\n error_type = Errors.for_code(response.error_code)\n if error_type is not Errors.NoError:\n log.error(\"GroupCoordinatorResponse error: %s\", error_type)\n self._groups[group] = -1\n return False\n\n node_id = response.coordinator_id\n coordinator = BrokerMetadata(\n response.coordinator_id,\n response.host,\n response.port,\n None)\n\n # Assume that group coordinators are just brokers\n # (this is true now, but could diverge in future)\n if node_id not in self._brokers:\n self._brokers[node_id] = coordinator\n\n # If this happens, either brokers have moved without\n # changing IDs, or our assumption above is wrong\n else:\n node = self._brokers[node_id]\n if coordinator.host != node.host or coordinator.port != node.port:\n log.error(\"GroupCoordinator metadata conflicts with existing\"\n \" broker metadata. Coordinator: %s, Broker: %s\",\n coordinator, node)\n self._groups[group] = node_id\n return False\n\n log.info(\"Group coordinator for %s is %s\", group, coordinator)\n self._groups[group] = node_id\n return True", "docstring": "Update with metadata for a group coordinator\n\nArgs:\n group (str): name of group from GroupCoordinatorRequest\n response (GroupCoordinatorResponse): broker response\n\nReturns:\n bool: True if metadata is updated, False on error", "source": "juraj_google_style"} -{"code": "def add_handler(self, handler):\n\n handler['logger'] = self._get_logger(handler)\n handler['reads'] = 0\n handler['data_read'] = 0\n\n self.capture_handlers.append(handler)", "docstring": "Add an additional handler\n\nArgs:\n handler:\n A dictionary of handler configuration for the handler\n that should be added. See :func:`__init__` for details\n on valid parameters.", "source": "juraj_google_style"} -{"code": "def get(self, page=0, size=10):\n\n dash_list = r_db.zrevrange(config.DASH_ID_KEY, 0, -1, True)\n id_list = dash_list[page * size : page * size + size]\n dash_meta = []\n data = []\n if id_list:\n dash_meta = r_db.hmget(config.DASH_META_KEY, [i[0] for i in id_list])\n data = [json.loads(i) for i in dash_meta]\n\n return build_response(dict(data=data, code=200))", "docstring": "Get dashboard meta info from in page `page` and page size is `size`.\n\nArgs:\n page: page number.\n size: size number.\n\nReturns:\n list of dict containing the dash_id and accordingly meta info.\n maybe empty list [] when page * size > total dashes in db. that's reasonable.", "source": "juraj_google_style"} -{"code": "def np2str(value):\n\n if hasattr(value, 'dtype') and \\\n issubclass(value.dtype.type, (np.string_, np.object_)) and value.size == 1:\n value = np.asscalar(value)\n if not isinstance(value, str):\n # python 3 - was scalar numpy array of bytes\n # otherwise python 2 - scalar numpy array of 'str'\n value = value.decode()\n return value\n else:\n raise ValueError(\"Array is not a string type or is larger than 1\")", "docstring": "Convert an `numpy.string_` to str.\n\nArgs:\n value (ndarray): scalar or 1-element numpy array to convert\n\nRaises:\n ValueError: if value is array larger than 1-element or it is not of\n type `numpy.string_` or it is not a numpy array", "source": "juraj_google_style"} -{"code": "def __init__(self, label_name, value_type, kind, update_label_func):\n\n self.label_name = label_name\n self.kind = kind\n self.update_label_func = update_label_func\n self.value_type = value_type", "docstring": "Constructor.\n\n update_label_func is used when updating a label in an `Operation` from a\n `ReportRequestInfo`.\n\nArgs:\n label_name (str): the name of the label descriptor\n value_type (:class:`ValueType`): the `value type` of the described metric\n kind (:class:`Kind`): the ``kind`` of the described metric\n update_op_func (function): the func to update an operation", "source": "juraj_google_style"} -{"code": "def unparse_headers(hdrs):\n\n return \"\".join([unparse_header(n, v) for n, v in hdrs.items()]) + \"\\r\\n\"", "docstring": "Parse a dictionary of headers to a string.\n\nArgs:\n hdrs: A dictionary of headers.\n\nReturns:\n The headers as a string that can be used in an NNTP POST.", "source": "juraj_google_style"} -{"code": "def from_string(string):\n\n lines = list(clean_lines(string.splitlines()))\n\n def input_mode(line):\n if line[0] == \"&\":\n return (\"sections\", line[1:].lower())\n elif \"ATOMIC_SPECIES\" in line:\n return (\"pseudo\", )\n elif \"K_POINTS\" in line:\n return (\"kpoints\", line.split(\"{\")[1][:-1])\n elif \"CELL_PARAMETERS\" in line or \"ATOMIC_POSITIONS\" in line:\n return (\"structure\", line.split(\"{\")[1][:-1])\n elif line == \"/\":\n return None\n else:\n return mode\n\n sections = {\"control\": {}, \"system\": {}, \"electrons\": {}, \n \"ions\": {}, \"cell\":{}}\n pseudo = {}\n pseudo_index = 0\n lattice = []\n species = []\n coords = []\n structure = None\n site_properties = {\"pseudo\":[]}\n mode = None\n for line in lines:\n mode = input_mode(line)\n if mode == None:\n pass\n elif mode[0] == \"sections\":\n section = mode[1]\n m = re.match(r'(\\w+)\\(?(\\d*?)\\)?\\s*=\\s*(.*)', line)\n if m:\n key = m.group(1).strip()\n key_ = m.group(2).strip()\n val = m.group(3).strip()\n if key_ != \"\":\n if sections[section].get(key, None) == None:\n val_ = [0.0]*20 # MAX NTYP DEFINITION\n val_[int(key_)-1] = PWInput.proc_val(key, val)\n sections[section][key] = val_\n\n site_properties[key] = []\n else:\n sections[section][key][int(key_)-1] = PWInput.proc_val(key, val) \n else:\n sections[section][key] = PWInput.proc_val(key, val)\n\n elif mode[0] == \"pseudo\":\n m = re.match(r'(\\w+)\\s+(\\d*.\\d*)\\s+(.*)', line)\n if m:\n pseudo[m.group(1).strip()] = {}\n pseudo[m.group(1).strip()][\"index\"] = pseudo_index\n pseudo[m.group(1).strip()][\"pseudopot\"] = m.group(3).strip()\n pseudo_index += 1\n elif mode[0] == \"kpoints\":\n m = re.match(r'(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)', line)\n if m:\n kpoints_grid = (int(m.group(1)), int(m.group(2)), int(m.group(3)))\n kpoints_shift = (int(m.group(4)), int(m.group(5)), int(m.group(6)))\n else:\n kpoints_mode = mode[1]\n elif mode[0] == \"structure\":\n m_l = re.match(r'(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)', line)\n m_p = re.match(r'(\\w+)\\s+(-?\\d+\\.\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)', line)\n if m_l:\n lattice += [ float(m_l.group(1)), float(m_l.group(2)), float(m_l.group(3)) ]\n elif m_p:\n site_properties[\"pseudo\"].append(pseudo[m_p.group(1)][\"pseudopot\"])\n species += [pseudo[m_p.group(1)][\"pseudopot\"].split(\".\")[0]]\n coords += [[float(m_p.group(2)), float(m_p.group(3)), float(m_p.group(4))]]\n\n for k, v in site_properties.items():\n if k != \"pseudo\":\n site_properties[k].append(sections['system'][k][pseudo[m_p.group(1)][\"index\"]])\n if mode[1] == \"angstrom\":\n coords_are_cartesian = True\n elif mode[1] == \"crystal\":\n coords_are_cartesian = False\n\n structure = Structure(Lattice(lattice), species, coords, \n coords_are_cartesian=coords_are_cartesian,\n site_properties=site_properties)\n return PWInput(structure=structure, control=sections[\"control\"],\n system=sections[\"system\"], electrons=sections[\"electrons\"], \n ions=sections[\"ions\"], cell=sections[\"cell\"], kpoints_mode=kpoints_mode,\n kpoints_grid=kpoints_grid, kpoints_shift=kpoints_shift)", "docstring": "Reads an PWInput object from a string.\n\nArgs:\n string (str): PWInput string\n\nReturns:\n PWInput object", "source": "juraj_google_style"} -{"code": "def set_property(self, name, value, update_session=True):\n\n if type(value) == datetime:\n value = value.isoformat()\n else:\n value = value\n\n try:\n prop = self.get_property(name)\n if prop.value == value:\n return False\n\n prop.value = value\n\n except AttributeError:\n prop = ResourceProperty()\n prop.resource_id = self.id\n prop.name = name\n prop.value = value\n\n if update_session:\n db.session.add(prop)\n\n return True", "docstring": "Create or set the value of a property. Returns `True` if the property was created or updated, or `False` if\n there were no changes to the value of the property.\n\nArgs:\n name (str): Name of the property to create or update\n value (any): Value of the property. This can be any type of JSON serializable data\n update_session (bool): Automatically add the change to the SQLAlchemy session. Default: True\n\nReturns:\n `bool`", "source": "juraj_google_style"} -{"code": "def _normalize_field_name(self, field_name) -> str:\n\n\n if isinstance(field_name, tuple):\n field_name, _ = field_name\n\n return field_name", "docstring": "Normalizes a field name into a string by\n extracting the field name if it was specified\n as a reference to a HStore key (as a tuple).\n\nArgs:\n field_name:\n The field name to normalize.\n\nReturns:\n The normalized field name.", "source": "juraj_google_style"} -{"code": "def getRuleForLogicalInterface(self, logicalInterfaceId, ruleId, draft=False):\n\n if draft:\n req = ApiClient.oneRuleForLogicalInterfaceUrl % (self.host, \"/draft\", logicalInterfaceId, ruleId)\n else:\n req = ApiClient.oneRuleForLogicalInterfaceUrl % (self.host, \"\", logicalInterfaceId, ruleId)\n resp = requests.get(req, auth=self.credentials, verify=self.verify)\n if resp.status_code == 200:\n self.logger.debug(\"logical interface rule retrieved\")\n else:\n raise ibmiotf.APIException(resp.status_code, \"HTTP error getting logical interface rule\", resp)\n return resp.json()", "docstring": "Gets a rule for a logical interface.\n Parameters:\n - logicalInterfaceId (string)\n - ruleId (string)\n - draft (boolean)\n Throws APIException on failure.", "source": "juraj_google_style"} -{"code": "def pbs_for_set_no_merge(document_path, document_data):\n\n extractor = DocumentExtractor(document_data)\n\n if extractor.deleted_fields:\n raise ValueError(\n \"Cannot apply DELETE_FIELD in a set request without \"\n \"specifying 'merge=True' or 'merge=[field_paths]'.\"\n )\n\n # Conformance tests require send the 'update_pb' even if the document\n # contains only transforms.\n write_pbs = [extractor.get_update_pb(document_path)]\n\n if extractor.has_transforms:\n transform_pb = extractor.get_transform_pb(document_path)\n write_pbs.append(transform_pb)\n\n return write_pbs", "docstring": "Make ``Write`` protobufs for ``set()`` methods.\n\nArgs:\n document_path (str): A fully-qualified document path.\n document_data (dict): Property names and values to use for\n replacing a document.\n\nReturns:\n List[google.cloud.firestore_v1beta1.types.Write]: One\n or two ``Write`` protobuf instances for ``set()``.", "source": "juraj_google_style"} -{"code": "def flatten(self, d=None):\n\n if d is None:\n d = {}\n if self.name is not None:\n d[self.name] = self\n for child in self.children:\n child.flatten(d=d)\n return d", "docstring": "Flatten tree structure to a one level dictionary.\n\nArgs:\n d (dict, optional): output dictionary to update\n\nReturns:\n dict: Node.name -> Node. The returned dictionary includes the\n current Node and all its children.", "source": "juraj_google_style"} -{"code": "def setViewModel(self, model):\n\n if isinstance(model, DataFrameModel):\n self.enableEditing(False)\n self.uncheckButton()\n\n selectionModel = self.tableView.selectionModel()\n self.tableView.setModel(model)\n model.dtypeChanged.connect(self.updateDelegate)\n model.dataChanged.connect(self.updateDelegates)\n del selectionModel", "docstring": "Sets the model for the enclosed TableView in this widget.\n\nArgs:\n model (DataFrameModel): The model to be displayed by\n the Table View.", "source": "juraj_google_style"} -{"code": "def _somethingFound(self, data, mode=\"phonefy\"):\n\n if data:\n try:\n for text in self.notFoundText[mode]:\n if text in data:\n return False\n return True\n except AttributeError as e:\n # Update to version 2 of the wrappers.\n verifier = self.modes.get(mode)\n if verifier:\n if verifier.get(\"not_found_text\", \"\") in data:\n return False\n else:\n return True\n return False", "docstring": "Verifying if something was found.\n\nArgs:\n -----\n data: Data where the self.notFoundText will be searched.\n mode: Mode to be executed.\n\nReturns:\n -------\n True if exists.", "source": "juraj_google_style"} -{"code": "def configure_ospf(self, cmd):\n\n config = self.get()\n cmds = ['router ospf {}'.format(config['ospf_process_id'])]\n cmds.extend(make_iterable(cmd))\n return super(Ospf, self).configure(cmds)", "docstring": "Allows for a list of OSPF subcommands to be configured\"\n\nArgs:\n cmd: (list or str): Subcommand to be entered\n\nReturns:\n bool: True if all the commands completed successfully", "source": "juraj_google_style"} -{"code": "def import_tracks(self, import_tracks):\n\n\n if isinstance(import_tracks, tracks.Track):\n import_tracks = [import_tracks]\n\n idx_mapping = {}\n\n for track in import_tracks:\n idx_mapping[track.idx] = track\n\n # Add index to idx if already existing\n if track.idx in self._tracks.keys():\n track.idx = naming.index_name_if_in_list(track.idx, self._tracks.keys())\n\n self._tracks[track.idx] = track\n\n return idx_mapping", "docstring": "Add the given tracks/track to the corpus.\n If any of the given track-ids already exists, a suffix is appended so it is unique.\n\nArgs:\n import_tracks (list): Either a list of or a single :py:class:`audiomate.tracks.Track`.\n\nReturns:\n dict: A dictionary containing track-idx mappings (old-track-idx/track-instance).\n If a track is imported, whose idx already exists this mapping can be used to check\n the new id.", "source": "juraj_google_style"} -{"code": "def noisy_moment(self, moment: 'cirq.Moment',\n system_qubits: Sequence['cirq.Qid']) -> 'cirq.OP_TREE':\n\n if not hasattr(self.noisy_moments, '_not_overridden'):\n return self.noisy_moments([moment], system_qubits)\n\n if not hasattr(self.noisy_operation, '_not_overridden'):\n return [self.noisy_operation(op) for op in moment]\n\n assert False, 'Should be unreachable.'", "docstring": "Adds noise to the operations from a moment.\n\nArgs:\n moment: The moment to add noise to.\n system_qubits: A list of all qubits in the system.\n\nReturns:\n An OP_TREE corresponding to the noisy operations for the moment.", "source": "juraj_google_style"} -{"code": "def pad_to_multiple_2d(x, block_shape):\n\n old_shape = x.get_shape().dims\n last = old_shape[-1]\n if len(old_shape) == 4:\n height_padding = -common_layers.shape_list(x)[1] % block_shape[0]\n width_padding = -common_layers.shape_list(x)[2] % block_shape[1]\n paddings = [[0, 0], [0, height_padding], [0, width_padding], [0, 0]]\n elif len(old_shape) == 5:\n height_padding = -common_layers.shape_list(x)[2] % block_shape[0]\n width_padding = -common_layers.shape_list(x)[3] % block_shape[1]\n paddings = [[0, 0], [0, 0], [0, height_padding], [0, width_padding], [0, 0]]\n\n padded_x = tf.pad(x, paddings)\n padded_shape = padded_x.get_shape().as_list()\n padded_shape = padded_shape[:-1] + [last]\n padded_x.set_shape(padded_shape)\n return padded_x", "docstring": "Making sure x is a multiple of shape.\n\nArgs:\n x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor\n block_shape: a 2-d list of integer shapes\n\nReturns:\n padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor", "source": "juraj_google_style"} -{"code": "def get_cookie_header(queue_item):\n\n\n header = []\n path = URLHelper.get_path(queue_item.request.url)\n\n for cookie in queue_item.request.cookies:\n root_path = cookie.path == \"\" or cookie.path == \"/\"\n if path.startswith(cookie.path) or root_path:\n header.append(cookie.name + \"=\" + cookie.value)\n\n return \"&\".join(header)", "docstring": "Convert a requests cookie jar to a HTTP request cookie header value.\n\nArgs:\n queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request.\n\nReturns:\n str: The HTTP cookie header value.", "source": "juraj_google_style"} -{"code": "def read_folder(directory):\n\n res = []\n for filename in os.listdir(directory):\n with io.open(os.path.join(directory, filename), encoding=\"utf-8\") as f:\n content = f.read()\n res.append(content)\n return res", "docstring": "read text files in directory and returns them as array\n\nArgs:\n directory: where the text files are\n\nReturns:\n Array of text", "source": "juraj_google_style"} -{"code": "def _CreatePlacemark(self, parent, name, style_id=None, visible=True,\n description=None):\n\n placemark = ET.SubElement(parent, 'Placemark')\n placemark_name = ET.SubElement(placemark, 'name')\n placemark_name.text = name\n if description is not None:\n desc_tag = ET.SubElement(placemark, 'description')\n desc_tag.text = description\n if style_id is not None:\n styleurl = ET.SubElement(placemark, 'styleUrl')\n styleurl.text = '#%s' % style_id\n if not visible:\n visibility = ET.SubElement(placemark, 'visibility')\n visibility.text = '0'\n return placemark", "docstring": "Create a KML Placemark element.\n\nArgs:\n parent: The parent ElementTree.Element instance.\n name: The placemark name as a string.\n style_id: If not None, the id of a style to use for the placemark.\n visible: Whether the placemark is initially visible or not.\n description: A description string or None.\n\nReturns:\n The placemark ElementTree.Element instance.", "source": "juraj_google_style"} -{"code": "def __init__(self, encoding='utf-8'):\n\n super(CLIInputReader, self).__init__()\n self._encoding = encoding", "docstring": "Initializes an input reader.\n\nArgs:\n encoding (Optional[str]): input encoding.", "source": "juraj_google_style"} -{"code": "def words_and_tags_from_wsj_tree(tree_string):\n\n stack, tags, words = [], [], []\n for tok in tree_string.strip().split():\n if tok[0] == \"(\":\n symbol = tok[1:]\n tags.append(symbol)\n stack.append(symbol)\n else:\n assert tok[-1] == \")\"\n stack.pop() # Pop the POS-tag.\n while tok[-2] == \")\":\n tags.append(\"/\" + stack.pop())\n tok = tok[:-1]\n words.append(tok[:-1])\n return str.join(\" \", words), str.join(\" \", tags[1:-1])", "docstring": "Generates linearized trees and tokens from the wsj tree format.\n\n It uses the linearized algorithm described in https://arxiv.org/abs/1412.7449.\n\nArgs:\n tree_string: tree in wsj format\n\nReturns:\n tuple: (words, linearized tree)", "source": "juraj_google_style"} -{"code": "def _SerializeRequest(self, request):\n\n # Construct status line\n parsed = urllib_parse.urlsplit(request.url)\n request_line = urllib_parse.urlunsplit(\n ('', '', parsed.path, parsed.query, ''))\n if not isinstance(request_line, six.text_type):\n request_line = request_line.decode('utf-8')\n status_line = u' '.join((\n request.http_method,\n request_line,\n u'HTTP/1.1\\n'\n ))\n major, minor = request.headers.get(\n 'content-type', 'application/json').split('/')\n msg = mime_nonmultipart.MIMENonMultipart(major, minor)\n\n # MIMENonMultipart adds its own Content-Type header.\n # Keep all of the other headers in `request.headers`.\n for key, value in request.headers.items():\n if key == 'content-type':\n continue\n msg[key] = value\n\n msg['Host'] = parsed.netloc\n msg.set_unixfrom(None)\n\n if request.body is not None:\n msg.set_payload(request.body)\n\n # Serialize the mime message.\n str_io = six.StringIO()\n # maxheaderlen=0 means don't line wrap headers.\n gen = generator.Generator(str_io, maxheaderlen=0)\n gen.flatten(msg, unixfrom=False)\n body = str_io.getvalue()\n\n return status_line + body", "docstring": "Convert a http_wrapper.Request object into a string.\n\nArgs:\n request: A http_wrapper.Request to serialize.\n\nReturns:\n The request as a string in application/http format.", "source": "juraj_google_style"} -{"code": "def _Open(self, path_spec=None, mode='rb'):\n\n if not self._file_object_set_in_init and not path_spec:\n raise ValueError('Missing path specification.')\n\n if self._file_object_set_in_init:\n return\n\n self._file_object = self._OpenFileObject(path_spec)\n if not self._file_object:\n raise IOError('Unable to open missing file-like object.')", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\n path_spec (Optional[PathSpec]): path specification.\n mode (Optional[str]): file access mode.\n\nRaises:\n AccessError: if the access to open the file was denied.\n IOError: if the file-like object could not be opened.\n OSError: if the file-like object could not be opened.\n PathSpecError: if the path specification is incorrect.\n ValueError: if the path specification is invalid.", "source": "juraj_google_style"} -{"code": "def resample(self, seed=None):\n\n if seed is not None:\n gen = torch.manual_seed(seed)\n else:\n gen = torch.default_generator\n\n if self.replacement:\n self.perm = torch.LongTensor(len(self)).random_(\n len(self.dataset), generator=gen)\n else:\n self.perm = torch.randperm(\n len(self.dataset), generator=gen).narrow(0, 0, len(self))", "docstring": "Resample the dataset.\n\nArgs:\n seed (int, optional): Seed for resampling. By default no seed is\n used.", "source": "juraj_google_style"} -{"code": "def copy_to(self, new_key, bucket=None):\n\n if bucket is None:\n bucket = self._bucket\n try:\n new_info = self._api.objects_copy(self._bucket, self._key, bucket, new_key)\n except Exception as e:\n raise e\n return Item(bucket, new_key, new_info, context=self._context)", "docstring": "Copies this item to the specified new key.\n\nArgs:\n new_key: the new key to copy this item to.\n bucket: the bucket of the new item; if None (the default) use the same bucket.\n\nReturns:\n An Item corresponding to new key.\n\nRaises:\n Exception if there was an error copying the item.", "source": "juraj_google_style"} -{"code": "def create_projection(self, fov: float = 75.0, near: float = 1.0, far: float = 100.0, aspect_ratio: float = None):\n\n return matrix44.create_perspective_projection_matrix(\n fov,\n aspect_ratio or self.window.aspect_ratio,\n near,\n far,\n dtype='f4',\n )", "docstring": "Create a projection matrix with the following parameters.\n When ``aspect_ratio`` is not provided the configured aspect\n ratio for the window will be used.\n\nArgs:\n fov (float): Field of view (float)\n near (float): Camera near value\n far (float): Camrea far value\n\n Keyword Args:\n aspect_ratio (float): Aspect ratio of the viewport\n\nReturns:\n The projection matrix as a float32 :py:class:`numpy.array`", "source": "juraj_google_style"} -{"code": "def _validate_netconfig(self, conf):\n\n\n nets = conf.get('nets', {})\n if len(nets) == 0:\n # TO-DO: add default networking if no network is configured\n raise LagoInitException('No networks configured.')\n\n no_mgmt_dns = [\n name for name, net in nets.iteritems()\n if net.get('management', None) is None and\n (net.get('main_dns') or net.get('dns_domain_name'))\n ]\n if len(no_mgmt_dns) > 0 and len(nets.keys()) > 1:\n raise LagoInitException(\n (\n 'Networks: {0}, misconfigured, they '\n 'are not marked as management, but have '\n 'DNS attributes. DNS is supported '\n 'only in management networks.'\n ).format(','.join(no_mgmt_dns))\n )\n\n for dom_name, dom_spec in conf['domains'].items():\n mgmts = []\n for nic in dom_spec['nics']:\n net = self._get_net(conf, dom_name, nic)\n if net.get('management', False) is True:\n mgmts.append(nic['net'])\n if len(mgmts) == 0:\n raise LagoInitException(\n (\n 'VM {0} has no management network, '\n 'please connect it to '\n 'one.'\n ).format(dom_name)\n )\n\n if len(mgmts) > 1:\n raise LagoInitException(\n (\n 'VM {0} has more than one management '\n 'network: {1}. It should have exactly '\n 'one.'\n ).format(dom_name, ','.join(mgmts))\n )", "docstring": "Validate network configuration\n\nArgs:\n conf(dict): spec\n\nReturns:\n None\n\nRaises:\n :exc:`~lago.utils.LagoInitException`: If a VM has more than\n one management network configured, or a network which is not\n management has DNS attributes, or a VM is configured with a\n none-existence NIC, or a VM has no management network.", "source": "juraj_google_style"} -{"code": "def download(self, chunk_size=1024):\n\n stream = BytesIO()\n\n response = self._swimlane.request(\n 'get',\n 'attachment/download/{}'.format(self.file_id),\n stream=True\n )\n\n for chunk in response.iter_content(chunk_size):\n stream.write(chunk)\n\n stream.seek(0)\n\n return stream", "docstring": "Download attachment\n\nArgs:\n chunk_size (int): Byte-size of chunked download request stream\n\nReturns:\n BytesIO: Stream ready for reading containing the attachment file contents", "source": "juraj_google_style"} -{"code": "def add_delta_deltas(filterbanks, name=None):\n\n delta_filter = np.array([2, 1, 0, -1, -2])\n delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, \"full\")\n\n delta_filter_stack = np.array(\n [[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0] * 2,\n list(delta_delta_filter)],\n dtype=np.float32).T[:, None, None, :]\n\n delta_filter_stack /= np.sqrt(\n np.sum(delta_filter_stack**2, axis=0, keepdims=True))\n\n filterbanks = tf.nn.conv2d(\n filterbanks, delta_filter_stack, [1, 1, 1, 1], \"SAME\", data_format=\"NHWC\",\n name=name)\n return filterbanks", "docstring": "Compute time first and second-order derivative channels.\n\nArgs:\n filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]\n name: scope name\n\nReturns:\n float32 tensor with shape [batch_size, len, num_bins, 3]", "source": "juraj_google_style"} -{"code": "def _initialize_global_state(self,\n redis_address,\n redis_password=None,\n timeout=20):\n\n self.redis_client = services.create_redis_client(\n redis_address, redis_password)\n start_time = time.time()\n\n num_redis_shards = None\n redis_shard_addresses = []\n\n while time.time() - start_time < timeout:\n # Attempt to get the number of Redis shards.\n num_redis_shards = self.redis_client.get(\"NumRedisShards\")\n if num_redis_shards is None:\n print(\"Waiting longer for NumRedisShards to be populated.\")\n time.sleep(1)\n continue\n num_redis_shards = int(num_redis_shards)\n if num_redis_shards < 1:\n raise Exception(\"Expected at least one Redis shard, found \"\n \"{}.\".format(num_redis_shards))\n\n # Attempt to get all of the Redis shards.\n redis_shard_addresses = self.redis_client.lrange(\n \"RedisShards\", start=0, end=-1)\n if len(redis_shard_addresses) != num_redis_shards:\n print(\"Waiting longer for RedisShards to be populated.\")\n time.sleep(1)\n continue\n\n # If we got here then we successfully got all of the information.\n break\n\n # Check to see if we timed out.\n if time.time() - start_time >= timeout:\n raise Exception(\"Timed out while attempting to initialize the \"\n \"global state. num_redis_shards = {}, \"\n \"redis_shard_addresses = {}\".format(\n num_redis_shards, redis_shard_addresses))\n\n # Get the rest of the information.\n self.redis_clients = []\n for shard_address in redis_shard_addresses:\n self.redis_clients.append(\n services.create_redis_client(shard_address.decode(),\n redis_password))", "docstring": "Initialize the GlobalState object by connecting to Redis.\n\n It's possible that certain keys in Redis may not have been fully\n populated yet. In this case, we will retry this method until they have\n been populated or we exceed a timeout.\n\nArgs:\n redis_address: The Redis address to connect.\n redis_password: The password of the redis server.", "source": "juraj_google_style"} -{"code": "def VerifyStructure(self, parser_mediator, lines):\n\n # Identify the token to which we attempt a match.\n match = self._PARSING_COMPONENTS['msg_left_delimiter'].match\n\n # Because logs files can lead with a partial event,\n # we can't assume that the first character (post-BOM)\n # in the file is the beginning of our match - so we\n # look for match anywhere in lines.\n return match in lines", "docstring": "Verifies whether content corresponds to an SCCM log file.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n lines (str): one or more lines from the text file.\n\nReturns:\n bool: True if this is the correct parser, False otherwise.", "source": "juraj_google_style"} -{"code": "def format(sql, args=None):\n\n resolved_vars = {}\n code = []\n SqlStatement._find_recursive_dependencies(sql, args, code=code,\n resolved_vars=resolved_vars)\n\n # Rebuild the SQL string, substituting just '$' for escaped $ occurrences,\n # variable references substituted with their values, or literal text copied\n # over as-is.\n parts = []\n for (escape, placeholder, _, literal) in SqlStatement._get_tokens(sql):\n if escape:\n parts.append('$')\n elif placeholder:\n variable = placeholder[1:]\n try:\n value = resolved_vars[variable]\n except KeyError as e:\n raise Exception('Invalid sql. Unable to substitute $%s.' % e.args[0])\n\n if isinstance(value, types.ModuleType):\n value = _utils.get_default_query_from_module(value)\n\n if isinstance(value, SqlStatement):\n sql = value.format(value._sql, resolved_vars)\n value = '(%s)' % sql\n elif '_repr_sql_' in dir(value):\n # pylint: disable=protected-access\n value = value._repr_sql_()\n elif isinstance(value, basestring):\n value = SqlStatement._escape_string(value)\n elif isinstance(value, list) or isinstance(value, tuple):\n if isinstance(value, tuple):\n value = list(value)\n expansion = '('\n for v in value:\n if len(expansion) > 1:\n expansion += ', '\n if isinstance(v, basestring):\n expansion += SqlStatement._escape_string(v)\n else:\n expansion += str(v)\n expansion += ')'\n value = expansion\n else:\n value = str(value)\n parts.append(value)\n elif literal:\n parts.append(literal)\n\n expanded = ''.join(parts)\n return expanded", "docstring": "Resolve variable references in a query within an environment.\n\n This computes and resolves the transitive dependencies in the query and raises an\n exception if that fails due to either undefined or circular references.\n\nArgs:\n sql: query to format.\n args: a dictionary of values to use in variable expansion.\n\nReturns:\n The resolved SQL text with variables expanded.\n\nRaises:\n Exception on failure.", "source": "juraj_google_style"} -{"code": "def apply(self, data, path=None):\n\n\n if path is None:\n path = []\n\n if not isinstance(data, dict):\n return data\n\n def _enumerate(value):\n if isinstance(value, list):\n for k, v in enumerate(value):\n yield k, v\n elif isinstance(value, dict):\n for k, v in value.items():\n yield k, v\n\n def _set(container, key, value):\n if isinstance(container, list):\n container.append(value)\n else:\n container[key] = value\n\n def _apply(ramap, value, status=False, wc=False, path=[]):\n\n\n if not isinstance(value, dict) and not isinstance(value, list):\n if status:\n return value\n else:\n return None\n\n if not wc:\n status = ramap.get(\"__\", status)\n\n handler = None\n key_handler = None\n if path and self.handlers:\n namespace = Namespace(path)\n for _handler in self.handlers.values():\n if namespace.match(_handler.get(\"namespace\").keys, partial=False):\n handler = _handler\n key_handler = handler.get(\"key\")\n break\n\n if isinstance(value, list):\n if not key_handler:\n key_handler = list_key_handler\n rv = []\n else:\n rv = {}\n\n for k, v in _enumerate(value):\n if key_handler:\n k = key_handler(v, k)\n k = str(k)\n if isinstance(v, dict) or isinstance(v, list):\n if k in ramap:\n r = _apply(ramap[k], v, status=status, path=path+[k])\n if r:\n _set(rv, k, r)\n elif \"*\" in ramap:\n r = _apply(ramap[\"*\"], v, status=status, wc=True, path=path+[k])\n if r:\n _set(rv, k, r)\n elif status:\n _set(rv, k, v)\n else:\n if k in ramap:\n if ramap[k].get(\"__\", True):\n _set(rv, k, v)\n elif \"*\" in ramap and ramap[\"*\"].get(\"__\", True):\n _set(rv, k, v)\n elif status:\n _set(rv, k, v)\n\n return rv\n\n # loop through all the handlers that specify the `explicit` arguments\n # and temprorarily add deny rules for those to the targeted permissionset\n tmpns = {}\n for ns, handler in self.handlers.items():\n if handler.get(\"explicit\"):\n p = self.pset.get_permissions(ns)\n if p & const.PERM_READ:\n exists = False\n for _ns in self.pset.namespaces:\n if Namespace(_ns).match(Namespace(ns).keys, partial=False):\n exists = True\n break\n if exists:\n continue\n tmpns[ns] = p\n self.pset[ns] = const.PERM_DENY\n\n\n # apply permissions\n rv = _apply(self.pset.read_access_map, data)\n\n # remove temporarily added deny rules\n for ns, p in tmpns.items():\n if p is None:\n del self.pset[ns]\n else:\n self.pset[ns] = p\n\n return rv", "docstring": "Apply permissions in this set to the provided data, effectively\n removing all keys from it are not permissioned to be viewed\n\nArgs:\n data -- dict of data\n\nReturns:\n Cleaned data", "source": "juraj_google_style"} -{"code": "def retrieve_token(self, token):\n\n headers = self.client._get_private_headers()\n endpoint = '/tokens/{}'.format(token)\n return self.client._get(self.client.URL_BASE + endpoint, headers=headers)", "docstring": "Retrieve Token details for a specific Token.\n\nArgs:\n token: The identifier of the token.\n\n\n Returns:", "source": "juraj_google_style"} -{"code": "def check_the_end_flag(self, state_arr):\n\n if self.__check_goal_flag(state_arr) is True or self.__check_crash_flag(state_arr):\n return True\n else:\n return False", "docstring": "Check the end flag.\n\n If this return value is `True`, the learning is end.\n\n As a rule, the learning can not be stopped.\n This method should be overrided for concreate usecases.\n\nArgs:\n state_arr: `np.ndarray` of state in `self.t`.\n\nReturns:\n bool", "source": "juraj_google_style"} -{"code": "def encrypt_encoded(self, encoding, r_value):\n\n # If r_value is None, obfuscate in a call to .obfuscate() (below)\n obfuscator = r_value or 1\n ciphertext = self.raw_encrypt(encoding.encoding, r_value=obfuscator)\n encrypted_number = EncryptedNumber(self, ciphertext, encoding.exponent)\n if r_value is None:\n encrypted_number.obfuscate()\n return encrypted_number", "docstring": "Paillier encrypt an encoded value.\n\nArgs:\n encoding: The EncodedNumber instance.\n r_value (int): obfuscator for the ciphertext; by default (i.e.\n if *r_value* is None), a random value is used.\n\nReturns:\n EncryptedNumber: An encryption of *value*.", "source": "juraj_google_style"} -{"code": "def get_server(self, name):\n\n mech = self.get(name)\n return mech if isinstance(mech, ServerMechanism) else None", "docstring": "Like :meth:`.get`, but only mechanisms inheriting\n :class:`ServerMechanism` will be returned.\n\nArgs:\n name: The SASL mechanism name.\n\nReturns:\n The mechanism object or ``None``", "source": "juraj_google_style"} -{"code": "def request_and_check(self, url, method='get',\n expected_content_type=None, **kwargs):\n\n assert method in ['get', 'post']\n result = self.driver.request(method, url, **kwargs)\n if result.status_code != requests.codes.ok:\n raise RuntimeError('Error requesting %r, status = %d' %\n (url, result.status_code))\n if expected_content_type is not None:\n content_type = result.headers.get('content-type', '')\n if not re.match(expected_content_type, content_type):\n raise RuntimeError(\n 'Error requesting %r, content type %r does not match %r' %\n (url, content_type, expected_content_type))\n return result", "docstring": "Performs a request, and checks that the status is OK, and that the\n content-type matches expectations.\n\nArgs:\n url: URL to request\n method: either 'get' or 'post'\n expected_content_type: prefix to match response content-type against\n **kwargs: passed to the request method directly.\n\nRaises:\n RuntimeError if status_code does not match.", "source": "juraj_google_style"} -{"code": "def set_user(self, user):\n\n self.session['user_id'] = user.key\n self.session['user_data'] = user.clean_value()\n role = self.get_role()\n # TODO: this should be remembered from previous login\n # self.session['role_data'] = default_role.clean_value()\n self.session['role_id'] = role.key\n self.current.role_id = role.key\n self.current.user_id = user.key\n # self.perm_cache = PermissionCache(role.key)\n self.session['permissions'] = role.get_permissions()", "docstring": "Writes user data to session.\n\nArgs:\n user: User object", "source": "juraj_google_style"} -{"code": "def AddBasicOptions(self, argument_group):\n\n version_string = self.GetVersionInformation()\n\n # We want a custom help message and not the default argparse one.\n argument_group.add_argument(\n '-h', '--help', action='help',\n help='Show this help message and exit.')\n\n argument_group.add_argument(\n '--troubles', dest='show_troubleshooting', action='store_true',\n default=False, help='Show troubleshooting information.')\n\n argument_group.add_argument(\n '-V', '--version', dest='version', action='version',\n version=version_string, help='Show the version information.')", "docstring": "Adds the basic options to the argument group.\n\nArgs:\n argument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj_google_style"} -{"code": "def to_string(self, other_separation_char=None):\n\n separation_char = self._separation_char\n if other_separation_char is not None:\n separation_char = other_separation_char\n return separation_char.join(self._locations_list)", "docstring": "String representation of :class:`LocationDescriptor` object.\n\nArgs:\n other_separation_char: If needed, another separator character can be used.\n\n Returns:", "source": "juraj_google_style"} -{"code": "def __init__(self, code, error, content, message=None):\n\n super(AdWordsReportError, self).__init__(\n message if message else ('AdWords report download failed with HTTP '\n 'status code: %s' % code))\n self.code = code\n self.error = error\n self.content = content", "docstring": "Initializes an AdWordsReportError.\n\nArgs:\n code: The HTTP status code number that was returned.\n error: The urllib2.HTTPError (Python 2) or urllib.error.HTTPError\n (Python 3) describing the failure.\n content: The HTTP response body as a string.\n [optional]\n message: A user-friendly error message string. If one is not provided, a\n default message will be used.", "source": "juraj_google_style"} -{"code": "def validate(data):\n\n try:\n return Schema(Validator.SCHEMA).validate(data)\n except SchemaError as exception:\n logging.getLogger(__name__).error(exception)\n return None", "docstring": "Validate data against the schema.\n\nArgs:\n data(dict): data structure to validate.\n\nReturns:\n dict: data as provided and defaults where defined in schema.", "source": "juraj_google_style"} -{"code": "def dict_factory(self, cursor, row):\n\n d = {}\n for idx, col in enumerate(cursor.description):\n val = row[idx]\n name = col[0]\n if name == Field.Time_Stamp:\n d[col[0]] = str(val)\n continue\n if name == \"Raw_A\" or name == \"Raw_B\": # or name == Field.Meter_Time:\n continue\n if name not in self.m_all_fields:\n continue\n if (str(val) != \"None\") and ((val > 0) or (val < 0)):\n d[name] = str(val)\n return d", "docstring": "Sqlite callback accepting the cursor and the original row as a tuple.\n\n Simple return of JSON safe types.\n\nArgs:\n cursor (sqlite cursor): Original cursory\n row (sqlite row tuple): Original row.\n\nReturns:\n dict: modified row.", "source": "juraj_google_style"} -{"code": "def read_classification_results(storage_client, file_path):\n\n if storage_client:\n # file on Cloud\n success = False\n retry_count = 0\n while retry_count < 4:\n try:\n blob = storage_client.get_blob(file_path)\n if not blob:\n return {}\n if blob.size > MAX_ALLOWED_CLASSIFICATION_RESULT_SIZE:\n logging.warning('Skipping classification result because it''s too '\n 'big: %d bytes for %s', blob.size, file_path)\n return None\n buf = BytesIO()\n blob.download_to_file(buf)\n buf.seek(0)\n success = True\n break\n except Exception:\n retry_count += 1\n time.sleep(5)\n if not success:\n return None\n else:\n # local file\n try:\n with open(file_path, 'rb') as f:\n buf = BytesIO(f.read())\n except IOError:\n return None\n result = {}\n if PY3:\n buf = StringIO(buf.read().decode('UTF-8'))\n for row in csv.reader(buf):\n try:\n image_filename = row[0]\n if image_filename.endswith('.png') or image_filename.endswith('.jpg'):\n image_filename = image_filename[:image_filename.rfind('.')]\n label = int(row[1])\n except (IndexError, ValueError):\n continue\n result[image_filename] = label\n return result", "docstring": "Reads classification results from the file in Cloud Storage.\n\n This method reads file with classification results produced by running\n defense on singe batch of adversarial images.\n\nArgs:\n storage_client: instance of CompetitionStorageClient or None for local file\n file_path: path of the file with results\n\nReturns:\n dictionary where keys are image names or IDs and values are classification\n labels", "source": "juraj_google_style"} -{"code": "def relaxng(filename=None):\n\n E = ElementMaker(namespace=\"http://relaxng.org/ns/structure/1.0\",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': NSFOLIA, 'xml' : \"http://www.w3.org/XML/1998/namespace\"})\n grammar = E.grammar( E.start( E.element( #FoLiA\n E.attribute(name='id',ns=\"http://www.w3.org/XML/1998/namespace\"),\n E.optional( E.attribute(name='version') ),\n E.optional( E.attribute(name='generator') ),\n E.element( #metadata\n E.optional(E.attribute(name='type')),\n E.optional(E.attribute(name='src')),\n E.element( E.zeroOrMore( E.choice( *relaxng_declarations() ) ) ,name='annotations'),\n E.zeroOrMore(\n E.element(E.attribute(name='id'), E.text(), name='meta'),\n ),\n E.zeroOrMore(\n E.ref(name=\"foreign-data\"),\n ),\n E.zeroOrMore(\n E.element( #submetadata\n E.attribute(name='id',ns=\"http://www.w3.org/XML/1998/namespace\"),\n E.optional(E.attribute(name='type')),\n E.optional(E.attribute(name='src')),\n E.zeroOrMore(\n E.element(E.attribute(name='id'), E.text(), name='meta'),\n ),\n E.zeroOrMore(\n E.ref(name=\"foreign-data\"),\n ),\n name=\"submetadata\"\n )\n ),\n #E.optional(\n # E.ref(name='METATRANSCRIPT')\n #),\n name='metadata',\n #ns=NSFOLIA,\n ),\n E.interleave(\n E.zeroOrMore(\n E.ref(name='text'),\n ),\n E.zeroOrMore(\n E.ref(name='speech'),\n ),\n ),\n name='FoLiA',\n ns = NSFOLIA\n ) ),\n #definitions needed for ForeignData (allow any content) - see http://www.microhowto.info/howto/match_arbitrary_content_using_relax_ng.html\n E.define( E.interleave(E.zeroOrMore(E.ref(name=\"any_element\")),E.text()), name=\"any_content\"),\n E.define( E.element(E.anyName(), E.zeroOrMore(E.ref(name=\"any_attribute\")), E.zeroOrMore(E.ref(name=\"any_content\"))), name=\"any_element\"),\n E.define( E.attribute(E.anyName()), name=\"any_attribute\"),\n #Definition for allowing alien-namespace attributes on any element\n E.define( E.zeroOrMore(E.attribute(E.anyName(getattr(E,'except')(E.nsName(),E.nsName(ns=\"\"),E.nsName(ns=\"http://www.w3.org/XML/1998/namespace\"),E.nsName(ns=\"http://www.w3.org/1999/xlink\"))))), name=\"allow_foreign_attributes\"),\n datatypeLibrary=\"http://www.w3.org/2001/XMLSchema-datatypes\",\n )\n\n done = {}\n for c in globals().values():\n if 'relaxng' in dir(c):\n if c.relaxng and c.XMLTAG and not c.XMLTAG in done:\n done[c.XMLTAG] = True\n definition = c.relaxng()\n grammar.append( definition )\n if c.XMLTAG == 'item': #nasty backward-compatibility hack to allow deprecated listitem element (actually called item)\n definition_alias = c.relaxng()\n definition_alias.set('name','listitem')\n definition_alias[0].set('name','listitem')\n grammar.append( definition_alias )\n\n #for e in relaxng_imdi():\n # grammar.append(e)\n if filename:\n if sys.version < '3':\n f = io.open(filename,'w',encoding='utf-8')\n else:\n f = io.open(filename,'wb')\n if LXE:\n if sys.version < '3':\n f.write( ElementTree.tostring(relaxng(),pretty_print=True).replace(\"\",\"\\n\\n\") )\n else:\n f.write( ElementTree.tostring(relaxng(),pretty_print=True).replace(b\"\",b\"\\n\\n\") )\n else:\n f.write( ElementTree.tostring(relaxng()).replace(\"\",\"\\n\\n\") )\n f.close()\n\n return grammar", "docstring": "Generates a RelaxNG Schema for FoLiA. Optionally saves it to file.\n\nArgs:\n filename (str): Save the schema to the following filename\n\nReturns:\n lxml.ElementTree: The schema", "source": "juraj_google_style"} -{"code": "def protein_only_and_noH(self, keep_ligands=None, force_rerun=False):\n\n log.debug('{}: running protein receptor isolation...'.format(self.id))\n\n if not self.dockprep_path:\n return ValueError('Please run dockprep')\n\n receptor_mol2 = op.join(self.dock_dir, '{}_receptor.mol2'.format(self.id))\n receptor_noh = op.join(self.dock_dir, '{}_receptor_noH.pdb'.format(self.id))\n\n prly_com = op.join(self.dock_dir, \"prly.com\")\n\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=receptor_noh):\n with open(prly_com, \"w\") as f:\n f.write('open {}\\n'.format(self.dockprep_path))\n\n keep_str = 'delete ~protein'\n if keep_ligands:\n keep_ligands = ssbio.utils.force_list(keep_ligands)\n for res in keep_ligands:\n keep_str += ' & ~:{} '.format(res)\n keep_str = keep_str.strip() + '\\n'\n f.write(keep_str)\n\n f.write('write format mol2 0 {}\\n'.format(receptor_mol2))\n f.write('delete element.H\\n')\n f.write('write format pdb 0 {}\\n'.format(receptor_noh))\n\n cmd = 'chimera --nogui {}'.format(prly_com)\n os.system(cmd)\n os.remove(prly_com)\n\n if ssbio.utils.is_non_zero_file(receptor_mol2) and ssbio.utils.is_non_zero_file(receptor_noh):\n self.receptormol2_path = receptor_mol2\n self.receptorpdb_path = receptor_noh\n log.debug('{}: successful receptor isolation (mol2)'.format(self.receptormol2_path))\n log.debug('{}: successful receptor isolation (pdb)'.format(self.receptorpdb_path))\n else:\n log.critical('{}: protein_only_and_noH failed to run on dockprep file'.format(self.dockprep_path))", "docstring": "Isolate the receptor by stripping everything except protein and specified ligands.\n\nArgs:\n keep_ligands (str, list): Ligand(s) to keep in PDB file\n force_rerun (bool): If method should be rerun even if output file exists", "source": "juraj_google_style"} -{"code": "def AddNewSignature(self, pattern, offset=None):\n\n self.signatures.append(Signature(pattern, offset=offset))", "docstring": "Adds a signature.\n\nArgs:\n pattern (bytes): pattern of the signature.\n offset (int): offset of the signature. None is used to indicate\n the signature has no offset. A positive offset is relative from\n the start of the data a negative offset is relative from the end\n of the data.", "source": "juraj_google_style"} -{"code": "def setPulseInputRatio(self, line_in, new_cnst, password=\"00000000\"):\n\n result = False\n self.setContext(\"setPulseInputRatio\")\n\n try:\n if not self.requestA():\n self.writeCmdMsg(\"Bad read CRC on setting\")\n else:\n if not self.serialCmdPwdAuth(password):\n self.writeCmdMsg(\"Password failure\")\n else:\n req_const = binascii.hexlify(str(new_cnst).zfill(4))\n line_const = binascii.hexlify(str(line_in - 1))\n req_str = \"01573102303041\" + line_const + \"28\" + req_const + \"2903\"\n req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n self.m_serial_port.write(req_str.decode(\"hex\"))\n if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n self.writeCmdMsg(\"Success: 06 returned.\")\n result = True\n\n self.serialPostEnd()\n except:\n ekm_log(traceback.format_exc(sys.exc_info()))\n\n self.setContext(\"\")\n return result", "docstring": "Serial call to set pulse input ratio on a line.\n\nArgs:\n line_in (int): Member of :class:`~ekmmeters.Pulse`\n new_cnst (int): New pulse input ratio\n password (str): Optional password\n\n Returns:", "source": "juraj_google_style"} -{"code": "def _check_job_status(self, job, desc, status_key_name):\n\n status = desc[status_key_name]\n # If the status is capital case, then convert it to Camel case\n status = _STATUS_CODE_TABLE.get(status, status)\n\n if status != 'Completed' and status != 'Stopped':\n reason = desc.get('FailureReason', '(No reason provided)')\n job_type = status_key_name.replace('JobStatus', ' job')\n raise ValueError('Error for {} {}: {} Reason: {}'.format(job_type, job, status, reason))", "docstring": "Check to see if the job completed successfully and, if not, construct and\n raise a ValueError.\n\nArgs:\n job (str): The name of the job to check.\n desc (dict[str, str]): The result of ``describe_training_job()``.\n status_key_name (str): Status key name to check for.\n\nRaises:\n ValueError: If the training job fails.", "source": "juraj_google_style"} -{"code": "def subscribe_registration_ids_to_topic(self, registration_ids, topic_name):\n\n url = 'https://iid.googleapis.com/iid/v1:batchAdd'\n payload = {\n 'to': '/topics/' + topic_name,\n 'registration_tokens': registration_ids,\n }\n response = self.requests_session.post(url, json=payload)\n if response.status_code == 200:\n return True\n elif response.status_code == 400:\n error = response.json()\n raise InvalidDataError(error['error'])\n else:\n raise FCMError()", "docstring": "Subscribes a list of registration ids to a topic\n\nArgs:\n registration_ids (list): ids to be subscribed\n topic_name (str): name of topic\n\nReturns:\n True: if operation succeeded\n\nRaises:\n InvalidDataError: data sent to server was incorrectly formatted\n FCMError: an error occured on the server", "source": "juraj_google_style"} -{"code": "def clean_df(df, fill_nan=True, drop_empty_columns=True):\n\n if fill_nan:\n df = df.fillna(value=np.nan)\n if drop_empty_columns:\n df = df.dropna(axis=1, how='all')\n return df.sort_index()", "docstring": "Clean a pandas dataframe by:\n 1. Filling empty values with Nan\n 2. Dropping columns with all empty values\n\nArgs:\n df: Pandas DataFrame\n fill_nan (bool): If any empty values (strings, None, etc) should be replaced with NaN\n drop_empty_columns (bool): If columns whose values are all empty should be dropped\n\nReturns:\n DataFrame: cleaned DataFrame", "source": "juraj_google_style"} -{"code": "def _add_dispatcher(self, path_regex, dispatch_function):\n\n self._dispatchers.append((re.compile(path_regex), dispatch_function))", "docstring": "Add a request path and dispatch handler.\n\nArgs:\n path_regex: A string regex, the path to match against incoming requests.\n dispatch_function: The function to call for these requests. The function\n should take (request, start_response) as arguments and\n return the contents of the response body.", "source": "juraj_google_style"} -{"code": "def calculate_covariance_matrix(X):\n\n\n n_features = X.shape[1] \n S = np.zeros((n_features, n_features))\n m = np.mean(X, axis=0).reshape(n_features, 1)\n\n for x in X:\n v = x.reshape(n_features, 1) - m\n S += v @ v.T\n\n return 1/(X.shape[0]-1) * S", "docstring": "Calculates the Variance-Covariance matrix\n\n Parameters:\n -----------\n X : array-like, shape (m, n) - the data\n\nReturns:\n --------\n variance_covariance_matrix : array-like, shape(n, n)", "source": "juraj_google_style"} -{"code": "def destroy_elb(app='', env='dev', region='us-east-1', **_):\n\n task_json = get_template(\n template_file='destroy/destroy_elb.json.j2',\n app=app,\n env=env,\n region=region,\n vpc=get_vpc_id(account=env, region=region))\n\n wait_for_task(task_json)\n\n return True", "docstring": "Destroy ELB Resources.\n\nArgs:\n app (str): Spinnaker Application name.\n env (str): Deployment environment.\n region (str): AWS region.\n\nReturns:\n True upon successful completion.", "source": "juraj_google_style"} -{"code": "def draw_proposal_recall(img, proposals, proposal_scores, gt_boxes):\n\n box_ious = np_iou(gt_boxes, proposals) # ng x np\n box_ious_argsort = np.argsort(-box_ious, axis=1)\n good_proposals_ind = box_ious_argsort[:, :3] # for each gt, find 3 best proposals\n good_proposals_ind = np.unique(good_proposals_ind.ravel())\n\n proposals = proposals[good_proposals_ind, :]\n tags = list(map(str, proposal_scores[good_proposals_ind]))\n img = viz.draw_boxes(img, proposals, tags)\n return img, good_proposals_ind", "docstring": "Draw top3 proposals for each gt.\n\nArgs:\n proposals: NPx4\n proposal_scores: NP\n gt_boxes: NG", "source": "juraj_google_style"} -{"code": "def __convertIp6PrefixStringToIp6Address(self, strIp6Prefix):\n\n prefix1 = strIp6Prefix.rstrip('L')\n prefix2 = prefix1.lstrip(\"0x\")\n hexPrefix = str(prefix2).ljust(16,'0')\n hexIter = iter(hexPrefix)\n finalMac = ':'.join(a + b + c + d for a,b,c,d in zip(hexIter, hexIter,hexIter,hexIter))\n prefix = str(finalMac)\n strIp6Prefix = prefix[:20]\n return strIp6Prefix +':'", "docstring": "convert IPv6 prefix string to IPv6 dotted-quad format\n for example:\n 2001000000000000 -> 2001::\n\nArgs:\n strIp6Prefix: IPv6 address string\n\nReturns:\n IPv6 address dotted-quad format", "source": "juraj_google_style"} -{"code": "def add_metric(self, labels, value, created=None, timestamp=None):\n\n self.samples.append(Sample(self.name + '_total', dict(zip(self._labelnames, labels)), value, timestamp))\n if created is not None:\n self.samples.append(Sample(self.name + '_created', dict(zip(self._labelnames, labels)), created, timestamp))", "docstring": "Add a metric to the metric family.\n\nArgs:\n labels: A list of label values\n value: The value of the metric\n created: Optional unix timestamp the child was created at.", "source": "juraj_google_style"} -{"code": "def split_input(cls, mapper_spec):\n\n batch_size = int(_get_params(mapper_spec).get(\n cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))\n shard_count = mapper_spec.shard_count\n namespace_ranges = namespace_range.NamespaceRange.split(shard_count,\n contiguous=True)\n return [NamespaceInputReader(ns_range, batch_size)\n for ns_range in namespace_ranges]", "docstring": "Returns a list of input readers for the input spec.\n\nArgs:\n mapper_spec: The MapperSpec for this InputReader.\n\nReturns:\n A list of InputReaders.", "source": "juraj_google_style"} -{"code": "def copy_data(data_length, blocksize, infp, outfp):\n # type: (int, int, BinaryIO, BinaryIO) -> None\n\n use_sendfile = False\n if have_sendfile:\n # Python 3 implements the fileno method for all file-like objects, so\n # we can't just use the existence of the method to tell whether it is\n # available. Instead, we try to assign it, and if we fail, then we\n # assume it is not available.\n try:\n x_unused = infp.fileno() # NOQA\n y_unused = outfp.fileno() # NOQA\n use_sendfile = True\n except (AttributeError, io.UnsupportedOperation):\n pass\n\n if use_sendfile:\n # This is one of those instances where using the file object and the\n # file descriptor causes problems. The sendfile() call actually updates\n # the underlying file descriptor, but the file object does not know\n # about it. To get around this, we instead get the offset, allow\n # sendfile() to update the offset, then manually seek the file object\n # to the right location. This ensures that the file object gets updated\n # properly.\n in_offset = infp.tell()\n out_offset = outfp.tell()\n sendfile(outfp.fileno(), infp.fileno(), in_offset, data_length)\n infp.seek(in_offset + data_length)\n outfp.seek(out_offset + data_length)\n else:\n left = data_length\n readsize = blocksize\n while left > 0:\n if left < readsize:\n readsize = left\n data = infp.read(readsize)\n # We have seen ISOs in the wild (Tribes Vengeance 1of4.iso) that\n # lie about the size of their files, causing reads to fail (since\n # we hit EOF before the supposed end of the file). If we are using\n # sendfile above, sendfile just silently returns as much data as it\n # can, with no additional checking. We should do the same here, so\n # if we got less data than we asked for, abort the loop silently.\n data_len = len(data)\n if data_len != readsize:\n data_len = left\n outfp.write(data)\n left -= data_len", "docstring": "A utility function to copy data from the input file object to the output\n file object. This function will use the most efficient copy method available,\n which is often sendfile.\n\n Parameters:\n data_length - The amount of data to copy.\n blocksize - How much data to copy per iteration.\n infp - The file object to copy data from.\n outfp - The file object to copy data to.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def __getitem__(self, id):\n\n if id == slice(None, None):\n return list(self)\n response = backend.spreadsheet(self._sheets, id)\n result = models.SpreadSheet._from_response(response, self._sheets)\n result._api = self\n return result", "docstring": "Fetch and return the spreadsheet with the given id.\n\nArgs:\n id (str): unique alphanumeric id of the spreadsheet\n\nReturns:\n SpreadSheet: new SpreadSheet instance\n\nRaises:\n KeyError: if no spreadsheet with the given ``id`` is found", "source": "juraj_google_style"} -{"code": "def __init__(self,\n line: int, prefix_length: int,\n option_suffix: Text = '') -> None:\n\n super().__init__(prefix_length, option_suffix)\n self._line = line", "docstring": "Create a new instance.\n\nArgs:\n line:\n the line to extract, counting from zero\n prefix_length:\n Amount of characters to skip at the beginning of the line\n option_suffix:\n Suffix for each configuration option", "source": "juraj_google_style"} -{"code": "def xray_driver_removed_handler(self, unused_channel, data):\n\n gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(\n data, 0)\n driver_data = gcs_entries.Entries(0)\n message = ray.gcs_utils.DriverTableData.GetRootAsDriverTableData(\n driver_data, 0)\n driver_id = message.DriverId()\n logger.info(\"Monitor: \"\n \"XRay Driver {} has been removed.\".format(\n binary_to_hex(driver_id)))\n self._xray_clean_up_entries_for_driver(driver_id)", "docstring": "Handle a notification that a driver has been removed.\n\nArgs:\n unused_channel: The message channel.\n data: The message data.", "source": "juraj_google_style"} -{"code": "def _generate_moment_matrix(self, n_vars, block_index, processed_entries,\n monomialsA, monomialsB, ppt=False):\n\n row_offset = 0\n if block_index > 0:\n for block_size in self.block_struct[0:block_index]:\n row_offset += block_size ** 2\n N = len(monomialsA)*len(monomialsB)\n func = partial(assemble_monomial_and_do_substitutions,\n monomialsA=monomialsA, monomialsB=monomialsB, ppt=ppt,\n substitutions=self.substitutions,\n pure_substitution_rules=self.pure_substitution_rules)\n if self._parallel:\n pool = Pool()\n # This is just a guess and can be optimized\n chunksize = int(max(int(np.sqrt(len(monomialsA) * len(monomialsB) *\n len(monomialsA) / 2) / cpu_count()),\n 1))\n for rowA in range(len(monomialsA)):\n if self._parallel:\n iter_ = pool.map(func, [(rowA, columnA, rowB, columnB)\n for rowB in range(len(monomialsB))\n for columnA in range(rowA,\n len(monomialsA))\n for columnB in range((rowA == columnA)*rowB,\n len(monomialsB))],\n chunksize)\n print_criterion = processed_entries + len(iter_)\n else:\n iter_ = imap(func, [(rowA, columnA, rowB, columnB)\n for columnA in range(rowA, len(monomialsA))\n for rowB in range(len(monomialsB))\n for columnB in range((rowA == columnA)*rowB,\n len(monomialsB))])\n for columnA, rowB, columnB, monomial in iter_:\n processed_entries += 1\n n_vars = self._push_monomial(monomial, n_vars,\n row_offset, rowA,\n columnA, N, rowB,\n columnB, len(monomialsB),\n prevent_substitutions=True)\n if self.verbose > 0 and (not self._parallel or\n processed_entries == self.n_vars or\n processed_entries == print_criterion):\n percentage = processed_entries / self.n_vars\n time_used = time.time()-self._time0\n eta = (1.0 / percentage) * time_used - time_used\n hours = int(eta/3600)\n minutes = int((eta-3600*hours)/60)\n seconds = eta-3600*hours-minutes*60\n\n msg = \"\"\n if self.verbose > 1 and self._parallel:\n msg = \", working on block {:0} with {:0} processes with a chunksize of {:0d}\"\\\n .format(block_index, cpu_count(),\n chunksize)\n msg = \"{:0} (done: {:.2%}, ETA {:02d}:{:02d}:{:03.1f}\"\\\n .format(n_vars, percentage, hours, minutes, seconds) + \\\n msg\n msg = \"\\r\\x1b[KCurrent number of SDP variables: \" + msg + \")\"\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n if self._parallel:\n pool.close()\n pool.join()\n if self.verbose > 0:\n sys.stdout.write(\"\\r\")\n return n_vars, block_index + 1, processed_entries", "docstring": "Generate the moment matrix of monomials.\n\nArgs:\n n_vars -- current number of variables\n block_index -- current block index in the SDP matrix\n monomials -- |W_d| set of words of length up to the relaxation level", "source": "juraj_google_style"} -{"code": "def cmd_ssh(options):\n\n import os\n import subprocess\n from os.path import expanduser\n options.inst_state = \"running\"\n (i_info, param_str) = gather_data(options)\n (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)\n home_dir = expanduser(\"~\")\n if options.user is None:\n tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami'])\n options.user = cmd_ssh_user(tar_aminame,\n i_info[tar_idx]['tag']['Name'])\n else:\n debg.dprint(\"LoginUser set by user: \", options.user)\n os_spec = {\"nt\": [\"powershell plink\", \"\\\\\", \"ppk\"]}\n c_itm = os_spec.get(os.name, [\"ssh\", \"/\", \"pem\"])\n cmd_ssh_run = c_itm[0]\n if not options.nopem:\n cmd_ssh_run += (\" -i {0}{1}.aws{1}{2}.{3}\".\n format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'],\n c_itm[2]))\n else:\n debg.dprint(\"Connect string: \", \"ssh {}@{}\".\n format(options.user, i_info[tar_idx]['pub_dns_name']))\n cmd_ssh_run += \" {0}@{1}\".format(options.user,\n i_info[tar_idx]['pub_dns_name'])\n print(cmd_ssh_run)\n subprocess.call(cmd_ssh_run, shell=True)", "docstring": "Connect to the specified instance via ssh.\n\n Finds instances that match the user specified args that are also\n in the 'running' state. The target instance is determined, the\n required connection information is retreived (IP, key and ssh\n user-name), then an 'ssh' connection is made to the instance.\n\nArgs:\n options (object): contains args and data from parser", "source": "juraj_google_style"} -{"code": "def red_home(request, message=None):\n\n if message:\n messages.add_message(request, messages.ERROR, message)\n return HttpResponseRedirect(reverse('homepage'))", "docstring": "Convenience function for redirecting users who don't have access to a page to the home page.\n Parameters:\n request - the request in the calling function\n message - a message from the caller function", "source": "juraj_google_style"} -{"code": "def wait_for(port_num, timeout):\n\n logger.debug(\"wait for {port_num}\".format(**locals()))\n t_start = time.time()\n sleeps = 0.1\n while time.time() - t_start < timeout:\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((_host(), port_num))\n return True\n except (IOError, socket.error):\n time.sleep(sleeps)\n finally:\n s.close()\n return False", "docstring": "waits while process starts.\n\nArgs:\n port_num - port number\n timeout - specify how long, in seconds, a command can take before times out.\n return True if process started, return False if not", "source": "juraj_google_style"} -{"code": "def send(self, commands):\n\n try:\n self.streamSock.send(bytes(commands, encoding='utf-8'))\n except TypeError:\n self.streamSock.send(commands) # 2.7 chokes on 'bytes' and 'encoding='\n except (OSError, IOError) as error: # HEY MOE, LEAVE THIS ALONE FOR NOW!\n sys.stderr.write(f'\\nAGPS3 send command fail with {error}\\n')", "docstring": "Ship commands to the daemon\n\nArgs:\n commands: e.g., '?WATCH={{'enable':true,'json':true}}'|'?VERSION;'|'?DEVICES;'|'?DEVICE;'|'?POLL;'", "source": "juraj_google_style"} -{"code": "def lines(start=None, end=None, reverse=False, selection=False):\n\n if selection:\n start, end = get_selection()\n\n else:\n start, end = fix_addresses(start, end)\n\n if not reverse:\n item = idaapi.get_item_head(start)\n while item < end:\n yield Line(item)\n item += idaapi.get_item_size(item)\n\n else: # if reverse:\n item = idaapi.get_item_head(end - 1)\n while item >= start:\n yield Line(item)\n item = idaapi.get_item_head(item - 1)", "docstring": "Iterate lines in range.\n\nArgs:\n start: Starting address, start of IDB if `None`.\n end: End address, end of IDB if `None`.\n reverse: Set to true to iterate in reverse order.\n selection: If set to True, replaces start and end with current selection.\n\nReturns:\n iterator of `Line` objects.", "source": "juraj_google_style"} -{"code": "def add_before(self, pipeline):\n\n if not isinstance(pipeline, Pipeline):\n pipeline = Pipeline(pipeline)\n self.pipes = pipeline.pipes[:] + self.pipes[:]\n return self", "docstring": "Add a Pipeline to be applied before this processing pipeline.\n\nArgs:\n pipeline: The Pipeline or callable to apply before this\n Pipeline.", "source": "juraj_google_style"} -{"code": "def scalars_impl(self, run, tag_regex_string):\n\n if not tag_regex_string:\n # The user provided no regex.\n return {\n _REGEX_VALID_PROPERTY: False,\n _TAG_TO_EVENTS_PROPERTY: {},\n }\n\n # Construct the regex.\n try:\n regex = re.compile(tag_regex_string)\n except re.error:\n return {\n _REGEX_VALID_PROPERTY: False,\n _TAG_TO_EVENTS_PROPERTY: {},\n }\n\n # Fetch the tags for the run. Filter for tags that match the regex.\n run_to_data = self._multiplexer.PluginRunToTagToContent(\n scalars_metadata.PLUGIN_NAME)\n\n tag_to_data = None\n try:\n tag_to_data = run_to_data[run]\n except KeyError:\n # The run could not be found. Perhaps a configuration specified a run that\n # TensorBoard has not read from disk yet.\n payload = {}\n\n if tag_to_data:\n scalars_plugin_instance = self._get_scalars_plugin()\n if not scalars_plugin_instance:\n raise ValueError(('Failed to respond to request for /scalars. '\n 'The scalars plugin is oddly not registered.'))\n\n form = scalars_plugin.OutputFormat.JSON\n payload = {\n tag: scalars_plugin_instance.scalars_impl(tag, run, None, form)[0]\n for tag in tag_to_data.keys()\n if regex.match(tag)\n }\n\n return {\n _REGEX_VALID_PROPERTY: True,\n _TAG_TO_EVENTS_PROPERTY: payload,\n }", "docstring": "Given a tag regex and single run, return ScalarEvents.\n\nArgs:\n run: A run string.\n tag_regex_string: A regular expression that captures portions of tags.\n\nRaises:\n ValueError: if the scalars plugin is not registered.\n\nReturns:\n A dictionary that is the JSON-able response.", "source": "juraj_google_style"} -{"code": "def value_derived_from_wavefunction(self,\n state: np.ndarray,\n qubit_map: Dict[raw_types.Qid, int]\n ) -> Any:\n", "docstring": "The value of the display, derived from the full wavefunction.\n\nArgs:\n state: The wavefunction.\n qubit_map: A dictionary from qubit to qubit index in the\n ordering used to define the wavefunction.", "source": "juraj_google_style"} -{"code": "def initial_sql(self, value):\n\n\n self._initial_sql = value\n # If initial_sql is None we remove the element and don't write it to XML\n if value is None:\n try:\n del self._connectionXML.attrib['one-time-sql']\n except KeyError:\n pass\n else:\n self._connectionXML.set('one-time-sql', value)", "docstring": "Set the connection's initial_sql property.\n\nArgs:\n value: New initial_sql value. String.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def __init__(self, stream):\n\n super(BinaryWriter, self).__init__()\n self.stream = stream", "docstring": "Create an instance.\n\nArgs:\n stream (BytesIO): a stream to operate on. i.e. a neo.IO.MemoryStream or raw BytesIO.", "source": "juraj_google_style"} -{"code": "def __init__(self, getter, verbose=False):\n\n self._count = 0\n self._getter = getter\n self._verbose = verbose", "docstring": "Initializes a contextual switch for a custom getter.\n\nArgs:\n getter: The custom getter which we may want to switch on.\n verbose: Log out every time a variable is fetched, and whether or not\n `getter` is used.\n\nReturns:\n A custom getter which can also be used as a context manager.\n Entering the context enables the custom getter.", "source": "juraj_google_style"} -{"code": "def FlagCxx14Features(filename, clean_lines, linenum, error):\n\n line = clean_lines.elided[linenum]\n\n include = Match(r'\\s*#\\s*include\\s+[<\"]([^<\"]+)[\">]', line)\n\n # Flag unapproved C++14 headers.\n if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):\n error(filename, linenum, 'build/c++14', 5,\n ('<%s> is an unapproved C++14 header.') % include.group(1))", "docstring": "Flag those C++14 features that we restrict.\n\nArgs:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.", "source": "juraj_google_style"} -{"code": "def __init__(self, encoding_method=None, parent=None, **kwargs):\n\n if not encoding_method or not parent:\n raise ValueError('Missing encoding method or parent value.')\n\n super(EncodedStreamPathSpec, self).__init__(parent=parent, **kwargs)\n self.encoding_method = encoding_method", "docstring": "Initializes a path specification.\n\n Note that the encoded stream path specification must have a parent.\n\nArgs:\n encoding_method (Optional[str]): method used to the encode the data.\n parent (Optional[PathSpec]): parent path specification.\n\nRaises:\n ValueError: when encoding method or parent are not set.", "source": "juraj_google_style"} -{"code": "def list_group_maintainers(self, name):\n\n self.project_service.set_auth(self._token_project)\n return self.project_service.list_group_maintainers(name)", "docstring": "Get the maintainers of a group.\n\nArgs:\n name (string): Name of group to query.\n\nReturns:\n (list[string]): List of maintainer names.\n\nRaises:\n requests.HTTPError on failure.", "source": "juraj_google_style"} -{"code": "def load_data(self,\n data,\n datatype=\"ttl\",\n namespace=None,\n graph=None,\n is_file=False,\n **kwargs):\n\n if kwargs.get('debug'):\n log.setLevel(logging.DEBUG)\n time_start = datetime.datetime.now()\n datatype_map = {\n 'ttl': 'turtle',\n 'xml': 'xml',\n 'rdf': 'xml',\n 'nt': 'nt',\n 'n3': 'n3',\n 'nquads': 'nquads',\n 'hturtle': 'hturtle'\n }\n if is_file:\n datatype = data.split(os.path.extsep)[-1]\n file_name = data\n log.debug('starting data load of %s', file_name)\n data = open(data, 'rb').read()\n try:\n content_type = datatype_map[datatype]\n except KeyError:\n raise NotImplementedError(\"'%s' is not an implemented data fromat\",\n datatype)\n conn = self.conn\n if namespace:\n conn = self.tstore.get_namespace(namespace)\n else:\n namespace = self.namespace\n graph = pick(graph, self.graph)\n start = datetime.datetime.now()\n try:\n result = conn.parse(data=data, publicID=graph, format=content_type)\n except:\n if is_file:\n print(\"Datafile \", file_name)\n raise\n if is_file:\n log.info (\" loaded %s into rdflib namespace '%s'\",\n file_name,\n namespace)\n else:\n log.info(\" loaded data into rdflib namespace '%s' in time: %s\",\n namespace,\n (datetime.datetime.now() - start))\n return result", "docstring": "loads data via file stream from python to triplestore\n\nArgs:\n data: The data or filepath to load\n datatype(['ttl', 'xml', 'rdf']): the type of data to load\n namespace: the namespace to use\n graph: the graph to load the data to.\n is_file(False): If true python will read the data argument as a\n filepath, determine the datatype from the file extension,\n read the file and send it to blazegraph as a datastream", "source": "juraj_google_style"} -{"code": "def publish_state(self, state):\n\n message = json.dumps({'state': {'reported': state}})\n self.client.publish(self.topic, message)\n self._state = state", "docstring": "Publish thing state to AWS IoT.\n\nArgs:\n state (dict): object state. Must be JSON serializable (i.e., not\n have circular references).", "source": "juraj_google_style"} -{"code": "def ParseMetadataFile(\n self, parser_mediator, file_entry, data_stream_name):\n\n parent_path_spec = getattr(file_entry.path_spec, 'parent', None)\n filename_upper = file_entry.name.upper()\n if (self._mft_parser and parent_path_spec and\n filename_upper in ('$MFT', '$MFTMIRR') and not data_stream_name):\n self._ParseDataStreamWithParser(\n parser_mediator, self._mft_parser, file_entry, '')\n\n elif (self._usnjrnl_parser and parent_path_spec and\n filename_upper == '$USNJRNL' and data_stream_name == '$J'):\n # To be able to ignore the sparse data ranges the UsnJrnl parser\n # needs to read directly from the volume.\n volume_file_object = path_spec_resolver.Resolver.OpenFileObject(\n parent_path_spec, resolver_context=parser_mediator.resolver_context)\n\n try:\n self._ParseFileEntryWithParser(\n parser_mediator, self._usnjrnl_parser, file_entry,\n file_object=volume_file_object)\n finally:\n volume_file_object.close()", "docstring": "Parses a metadata file.\n\nArgs:\n parser_mediator (ParserMediator): parser mediator.\n file_entry (dfvfs.FileEntry): file entry.\n data_stream_name (str): data stream name.", "source": "juraj_google_style"} -{"code": "def __get_vtt_angles(self, pvals, nvals):\n\n\t\t# https://www.khanacademy.org/math/trigonometry/unit-circle-trig-func/inverse_trig_functions/v/inverse-trig-functions--arctan\n\t\tangles = np.arctan2(pvals, nvals)-np.pi/4\n\t\tnorm = np.maximum(np.minimum(angles, np.pi-angles), -1*np.pi-angles)\n\t\tnorm = csr_matrix(norm)\n\t\t# Remove any weight from the NER features. These will be added later.\n\t\tfor key, value in self.B.items():\n\t\t\tnorm[0, key] = 0.\n\t\treturn norm", "docstring": "Fit the angles to the model\n\nArgs:\n pvals (array-like) : positive values\n nvals (array-like) : negative values\n\n Returns: normalized coef_ values", "source": "juraj_google_style"} -{"code": "def __init__(self, name, *value):\n\n self.name = name\n self.key = name\n self.value = name if len(value) != 1 else value[0]\n self.description = \"Matches {!r} and maps it to {!r}\".format(name, self.value)", "docstring": "Initialize Keywords\n\nArgs:\n name -- keyword name\n value -- Optional value, otherwise name is used\n\n value is setup as *value to detect if the parameter is supplied, while\n still supporting None. If no value is supplied then name should be used.\n If any value is supplied (even None), then that value is used instead", "source": "juraj_google_style"} -{"code": "def getattr(self, c, attr, default=None, match_only=None):\n\n matching_decor = self.get_decor(c, match_only=match_only)\n\n try:\n return getattr(matching_decor, attr)\n except AttributeError:\n return default", "docstring": "Get the attribute of a component.\n\nArgs:\n c (component): The component to look up.\n attr (str): The attribute to get.\n default (str): What to return in the event of no match.\n match_only (list of str): The component attributes to include in the\n comparison. Default: All of them.\n\nReturns:\n obj. The specified attribute of the matching Decor in the Legend.", "source": "juraj_google_style"} -{"code": "def supports(cls, *functionalities):\n\n def _decorator(view):\n\n # pylint: disable=protected-access\n if not hasattr(view, \"_supports\"):\n view._supports = set()\n for functionality in functionalities:\n view._supports.add(functionality)\n return view\n return _decorator", "docstring": "A view decorator to indicate that an xBlock view has support for the\n given functionalities.\n\nArgs:\n functionalities: String identifiers for the functionalities of the view.\n For example: \"multi_device\".", "source": "juraj_google_style"} -{"code": "def get_value_for_datastore(self, model_instance):\n\n value = super(JsonProperty, self).get_value_for_datastore(model_instance)\n if not value:\n return None\n json_value = value\n if not isinstance(value, dict):\n json_value = value.to_json()\n if not json_value:\n return None\n return datastore_types.Text(json.dumps(\n json_value, sort_keys=True, cls=JsonEncoder))", "docstring": "Gets value for datastore.\n\nArgs:\n model_instance: instance of the model class.\n\nReturns:\n datastore-compatible value.", "source": "juraj_google_style"} -{"code": "def get_samples(self, n_samples, log_p_function, burn_in_steps=50):\n\n restarts = initial_design('random', self.space, n_samples)\n sampler = emcee.EnsembleSampler(n_samples, self.space.input_dim(), log_p_function)\n samples, samples_log, _ = sampler.run_mcmc(restarts, burn_in_steps)\n\n # make sure we have an array of shape (n samples, space input dim)\n if len(samples.shape) == 1:\n samples = samples.reshape(-1, 1)\n samples_log = samples_log.reshape(-1, 1)\n\n return samples, samples_log", "docstring": "Generates samples.\n\n Parameters:\n n_samples - number of samples to generate\n log_p_function - a function that returns log density for a specific sample\n burn_in_steps - number of burn-in steps for sampling\n\n Returns a tuple of two array: (samples, log_p_function values for samples)", "source": "juraj_google_style"} -{"code": "def dictfetchall(cursor: Cursor) -> List[Dict[str, Any]]:\n\n columns = get_fieldnames_from_cursor(cursor)\n return [\n OrderedDict(zip(columns, row))\n for row in cursor.fetchall()\n ]", "docstring": "Return all rows from a cursor as a list of :class:`OrderedDict` objects.\n\nArgs:\n cursor: the cursor\n\nReturns:\n a list (one item per row) of :class:`OrderedDict` objects whose key are\n column names and whose values are the row values", "source": "juraj_google_style"} -{"code": "def apply_handler_to_all_logs(handler: logging.Handler,\n remove_existing: bool = False) -> None:\n\n # noinspection PyUnresolvedReferences\n for name, obj in logging.Logger.manager.loggerDict.items():\n if remove_existing:\n obj.handlers = [] # http://stackoverflow.com/questions/7484454\n obj.addHandler(handler)", "docstring": "Applies a handler to all logs, optionally removing existing handlers.\n\n Should ONLY be called from the ``if __name__ == 'main'`` script;\n see https://docs.python.org/3.4/howto/logging.html#library-config.\n\n Generally MORE SENSIBLE just to apply a handler to the root logger.\n\nArgs:\n handler: the handler to apply\n remove_existing: remove existing handlers from logger first?", "source": "juraj_google_style"} -{"code": "def __init__(self, file_system, path_spec):\n\n super(SQLiteBlobDirectory, self).__init__(file_system, path_spec)\n self._number_of_entries = None", "docstring": "Initializes a directory.\n\nArgs:\n file_system (SQLiteBlobFileSystem): file system.\n path_spec (SQLiteBlobPathSpec): path specification.", "source": "juraj_google_style"} -{"code": "def CopyFromDateTimeString(self, time_string):\n\n date_time_values = self._CopyDateTimeFromString(time_string)\n\n year = date_time_values.get('year', 0)\n month = date_time_values.get('month', 0)\n day_of_month = date_time_values.get('day_of_month', 0)\n hours = date_time_values.get('hours', 0)\n minutes = date_time_values.get('minutes', 0)\n seconds = date_time_values.get('seconds', 0)\n\n self._normalized_timestamp = None\n self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n year, month, day_of_month, hours, minutes, seconds)\n self._microseconds = date_time_values.get('microseconds', None)\n\n self.is_local_time = False", "docstring": "Copies a fake timestamp from a date and time string.\n\nArgs:\n time_string (str): date and time value formatted as:\n YYYY-MM-DD hh:mm:ss.######[+-]##:##\n\n Where # are numeric digits ranging from 0 to 9 and the seconds\n fraction can be either 3 or 6 digits. The time of day, seconds\n fraction and time zone offset are optional. The default time zone\n is UTC.", "source": "juraj_google_style"} -{"code": "def __init__(self, column_names=None, column_sizes=None, title=None):\n\n super(CLITabularTableView, self).__init__(\n column_names=column_names, title=title)\n self._column_sizes = column_sizes or []", "docstring": "Initializes a command line table view.\n\nArgs:\n column_names (Optional[list[str]]): column names.\n column_sizes (Optional[list[int]]): minimum column sizes, in number of\n characters. If a column name or row value is larger than the\n minimum column size the column will be enlarged. Note that the\n minimum columns size will be rounded up to the number of spaces\n of the next tab.\n title (Optional[str]): title.", "source": "juraj_google_style"} -{"code": "def __init__(\n self,\n learning_rate,\n cg_max_iterations=20,\n cg_damping=1e-3,\n cg_unroll_loop=False,\n scope='natural-gradient',\n summary_labels=()\n ):\n\n assert learning_rate > 0.0\n self.learning_rate = learning_rate\n\n self.solver = ConjugateGradient(\n max_iterations=cg_max_iterations,\n damping=cg_damping,\n unroll_loop=cg_unroll_loop\n )\n\n super(NaturalGradient, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new natural gradient optimizer instance.\n\nArgs:\n learning_rate: Learning rate, i.e. KL-divergence of distributions between optimization steps.\n cg_max_iterations: Conjugate gradient solver max iterations.\n cg_damping: Conjugate gradient solver damping factor.\n cg_unroll_loop: Unroll conjugate gradient loop if true.", "source": "juraj_google_style"} -{"code": "def read_config_string_options(obj: Any,\n parser: ConfigParser,\n section: str,\n options: Iterable[str],\n default: str = None) -> None:\n\n # enforce_str removed; ConfigParser always returns strings unless asked\n # specifically\n for o in options:\n setattr(obj, o, get_config_string_option(parser, section, o,\n default=default))", "docstring": "Reads config options and writes them as attributes of ``obj``, with\n attribute names as per ``options``.\n\nArgs:\n obj: the object to modify\n parser: instance of :class:`ConfigParser`\n section: section name within config file\n options: option (variable) names within that section\n default: value to use for any missing options\n\n Returns:", "source": "juraj_google_style"} -{"code": "def serve(self, server=None):\n\n if server is None:\n from wsgiref.simple_server import make_server\n server = lambda app: make_server('', 8000, app).serve_forever()\n print('Listening on 0.0.0.0:8000')\n try:\n server(self)\n finally:\n server.socket.close()", "docstring": "Serve app using wsgiref or provided server.\n\nArgs:\n - server (callable): An callable", "source": "juraj_google_style"} -{"code": "def parsed_forensic_reports_to_csv(reports):\n\n fields = [\"feedback_type\", \"user_agent\", \"version\", \"original_envelope_id\",\n \"original_mail_from\", \"original_rcpt_to\", \"arrival_date\",\n \"arrival_date_utc\", \"subject\", \"message_id\",\n \"authentication_results\", \"dkim_domain\", \"source_ip_address\",\n \"source_country\", \"source_reverse_dns\", \"source_base_domain\",\n \"delivery_result\", \"auth_failure\", \"reported_domain\",\n \"authentication_mechanisms\", \"sample_headers_only\"]\n\n if type(reports) == OrderedDict:\n reports = [reports]\n csv_file = StringIO()\n csv_writer = DictWriter(csv_file, fieldnames=fields)\n csv_writer.writeheader()\n for report in reports:\n row = report.copy()\n row[\"source_ip_address\"] = report[\"source\"][\"ip_address\"]\n row[\"source_reverse_dns\"] = report[\"source\"][\"reverse_dns\"]\n row[\"source_base_domain\"] = report[\"source\"][\"base_domain\"]\n row[\"source_country\"] = report[\"source\"][\"country\"]\n del row[\"source\"]\n row[\"subject\"] = report[\"parsed_sample\"][\"subject\"]\n row[\"auth_failure\"] = \",\".join(report[\"auth_failure\"])\n authentication_mechanisms = report[\"authentication_mechanisms\"]\n row[\"authentication_mechanisms\"] = \",\".join(\n authentication_mechanisms)\n del row[\"sample\"]\n del row[\"parsed_sample\"]\n csv_writer.writerow(row)\n\n return csv_file.getvalue()", "docstring": "Converts one or more parsed forensic reports to flat CSV format, including\n headers\n\nArgs:\n reports: A parsed forensic report or list of parsed forensic reports\n\nReturns:\n str: Parsed forensic report data in flat CSV format, including headers", "source": "juraj_google_style"} -{"code": "def search(cls, five9, filters):\n\n return cls._name_search(five9.configuration.getDispositions, filters)", "docstring": "Search for a record on the remote and return the results.\n\nArgs:\n five9 (five9.Five9): The authenticated Five9 remote.\n filters (dict): A dictionary of search parameters, keyed by the\n name of the field to search. This should conform to the\n schema defined in :func:`five9.Five9.create_criteria`.\n\nReturns:\n list[BaseModel]: A list of records representing the result.", "source": "juraj_google_style"} -{"code": "def chosen_angle_to_half_turns(\n half_turns: Optional[Union[sympy.Basic, float]] = None,\n rads: Optional[float] = None,\n degs: Optional[float] = None,\n default: float = 1.0,\n) -> Union[sympy.Basic, float]:\n\n\n if len([1 for e in [half_turns, rads, degs] if e is not None]) > 1:\n raise ValueError('Redundant angle specification. '\n 'Use ONE of half_turns, rads, or degs.')\n\n if rads is not None:\n return rads / np.pi\n\n if degs is not None:\n return degs / 180\n\n if half_turns is not None:\n return half_turns\n\n return default", "docstring": "Returns a half_turns value based on the given arguments.\n\n At most one of half_turns, rads, degs must be specified. If none are\n specified, the output defaults to half_turns=1.\n\nArgs:\n half_turns: The number of half turns to rotate by.\n rads: The number of radians to rotate by.\n degs: The number of degrees to rotate by\n default: The half turns angle to use if nothing else is specified.\n\nReturns:\n A number of half turns.", "source": "juraj_google_style"} -{"code": "def upload_metric(self, dataset_name, table_name, run_id):\n\n expected_file = os.path.join(\n self._logging_dir, logger.METRIC_LOG_FILE_NAME)\n with tf.gfile.GFile(expected_file) as f:\n lines = f.readlines()\n metrics = []\n for line in filter(lambda l: l.strip(), lines):\n metric = json.loads(line)\n metric[\"run_id\"] = run_id\n metrics.append(metric)\n table_ref = self._bq_client.dataset(dataset_name).table(table_name)\n errors = self._bq_client.insert_rows_json(table_ref, metrics)\n if errors:\n tf.logging.error(\n \"Failed to upload benchmark info to bigquery: {}\".format(errors))", "docstring": "Upload metric information to Bigquery.\n\nArgs:\n dataset_name: string, the name of bigquery dataset where the data will be\n uploaded.\n table_name: string, the name of bigquery table under the dataset where\n the metric data will be uploaded. This is different from the\n benchmark_run table.\n run_id: string, a unique ID that will be attached to the data, usually\n this is a UUID4 format. This should be the same as the benchmark run_id.", "source": "juraj_google_style"} -{"code": "def _import_templates(force=False):\n\n tmplpath = os.path.join(resource_filename('cloud_inquisitor', 'data'), 'templates')\n disk_templates = {f: os.path.join(root, f) for root, directory, files in os.walk(tmplpath) for f in files}\n db_templates = {tmpl.template_name: tmpl for tmpl in db.Template.find()}\n\n for name, template_file in disk_templates.items():\n with open(template_file, 'r') as f:\n body = f.read()\n disk_hash = get_hash(body)\n\n if name not in db_templates:\n template = Template()\n template.template_name = name\n template.template = body\n\n db.session.add(template)\n auditlog(\n event='template.import',\n actor='init',\n data={\n 'template_name': name,\n 'template': body\n }\n )\n logger.info('Imported template {}'.format(name))\n else:\n template = db_templates[name]\n db_hash = get_hash(template.template)\n\n if db_hash != disk_hash:\n if force or not db_templates[name].is_modified:\n template.template = body\n\n db.session.add(template)\n auditlog(\n event='template.update',\n actor='init',\n data={\n 'template_name': name,\n 'template_diff': diff(template.template, body)\n }\n )\n logger.info('Updated template {}'.format(name))\n else:\n logger.warning(\n 'Updated template available for {}. Will not import as it would'\n ' overwrite user edited content and force is not enabled'.format(name)\n )", "docstring": "Import templates from disk into database\n\n Reads all templates from disk and adds them to the database. By default, any template that has been modified by\n the user will not be updated. This can however be changed by setting `force` to `True`, which causes all templates\n to be imported regardless of status\n\nArgs:\n force (`bool`): Force overwrite any templates with local changes made. Default: `False`\n\nReturns:\n `None`", "source": "juraj_google_style"} -{"code": "def list_parking(self, **kwargs):\n\n # Endpoint parameters\n url_args = {'lang': util.language_code(kwargs.get('lang'))}\n\n # Request\n result = self.make_request('list_parking', url_args)\n\n if not util.check_result(result):\n return False, result.get('message', 'UNKNOWN ERROR')\n\n # Parse\n values = util.response_list(result, 'Data')\n return True, [emtype.Parking(**a) for a in values]", "docstring": "Obtain a list of parkings.\n\nArgs:\n lang (str): Language code (*es* or *en*).\n\nReturns:\n Status boolean and parsed response (list[Parking]), or message\n string in case of error.", "source": "juraj_google_style"} -{"code": "def add(self, *dic):\n\n dicList = list(flatten(dic))\n # for every dict in the list passed in\n for d in dicList:\n # make a dict single (list of pairs)\n di = []\n for k in d:\n # checkKey(k, self.keyWord)\n di.append(Pair(k, IntegerSingle(d[k])))\n dictSingle = DictSingle(di)\n # append dict single to array single's value\n self._add([dictSingle], self.l)", "docstring": "add a config to StartCalendarInterval.\n\nArgs:\n *dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.", "source": "juraj_google_style"} -{"code": "def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:\n\n if isinstance(variables, dict):\n if variables.get(\"times\"):\n times = int(variables[\"times\"])\n del variables[\"times\"]\n\n yield list(variable_matrix(variables, parent, \"product\")) * times\n\n else:\n raise ValueError(f\"times is a required keyword for the repeat iterator.\")\n else:\n raise ValueError(\n f\"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}\"\n )", "docstring": "Cycle through a list of values a specified number of times\n\nArgs:\n variables: The input variables for the creation of the range\n parent: The variable for which the values are being generated.\n\n Returns: A list of dictionaries mapping the parent to each value.", "source": "juraj_google_style"} -{"code": "def execute(self):\n\n # if not self.tasks:\n # raise WorkflowError('Workflow contains no tasks, and cannot be executed.')\n\n # for task in self.tasks:\n # self.definition['tasks'].append( task.generate_task_workflow_json() )\n\n self.generate_workflow_description()\n\n # hit batch workflow endpoint if batch values\n if self.batch_values:\n self.id = self.workflow.launch_batch_workflow(self.definition)\n\n # use regular workflow endpoint if no batch values\n else:\n self.id = self.workflow.launch(self.definition)\n\n return self.id", "docstring": "Execute the workflow.\n\nArgs:\n None\n\nReturns:\n Workflow_id", "source": "juraj_google_style"} -{"code": "def _build_update_ops(self, mean, variance, is_training):\n\n\n def build_update_ops():\n\n\n update_mean_op = moving_averages.assign_moving_average(\n variable=self._moving_mean,\n value=tf.reshape(mean, (self._num_channels,)),\n decay=self._decay_rate,\n zero_debias=False,\n name=\"update_moving_mean\").op\n\n update_variance_op = moving_averages.assign_moving_average(\n variable=self._moving_variance,\n value=tf.reshape(variance, (self._num_channels,)),\n decay=self._decay_rate,\n zero_debias=False,\n name=\"update_moving_variance\").op\n\n return update_mean_op, update_variance_op\n\n def build_no_ops():\n return (tf.no_op(), tf.no_op())\n\n # Only make the ops if we know that `is_training=True`, or the value of\n # `is_training` is unknown.\n is_training_const = utils.constant_value(is_training)\n if is_training_const is None or is_training_const:\n update_mean_op, update_variance_op = utils.smart_cond(\n is_training,\n build_update_ops,\n build_no_ops,\n )\n return (update_mean_op, update_variance_op)\n else:\n return None", "docstring": "Builds the moving average update ops when using moving variance.\n\nArgs:\n mean: The mean value to update with.\n variance: The variance value to update with.\n is_training: Boolean Tensor to indicate if we're currently in\n training mode.\n\nReturns:\n Tuple of `(update_mean_op, update_variance_op)` when `is_training` is or\n could be `True`. Returns `None` when `is_training=False`.", "source": "juraj_google_style"} -{"code": "def update_parent_directory_number(self, parent_dir_num):\n # type: (int) -> None\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')\n self.parent_directory_num = parent_dir_num", "docstring": "A method to update the parent directory number for this Path Table\n Record from the directory record.\n\n Parameters:\n parent_dir_num - The new parent directory number to assign to this PTR.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def cd(new_directory, clean_up=lambda: True): # pylint: disable=invalid-name\n\n previous_directory = os.getcwd()\n os.chdir(os.path.expanduser(new_directory))\n try:\n yield\n finally:\n os.chdir(previous_directory)\n clean_up()", "docstring": "Changes into a given directory and cleans up after it is done\n\nArgs:\n new_directory: The directory to change to\n clean_up: A method to clean up the working directory once done", "source": "juraj_google_style"} -{"code": "def clear_extra_selections(self, key):\n\n for decoration in self.extra_selections_dict.get(key, []):\n self.decorations.remove(decoration)\n self.extra_selections_dict[key] = []", "docstring": "Remove decorations added through set_extra_selections.\n\nArgs:\n key (str) name of the extra selections group.", "source": "juraj_google_style"} -{"code": "def _extract_and_handle_mpbgp_withdraws(self, mp_unreach_attr):\n\n msg_rf = mp_unreach_attr.route_family\n # Check if this route family is among supported route families.\n if msg_rf not in SUPPORTED_GLOBAL_RF:\n LOG.info(\n 'Received route family %s is not supported. '\n 'Ignoring withdraw routes on this UPDATE message.',\n msg_rf)\n return\n\n w_nlris = mp_unreach_attr.withdrawn_routes\n if not w_nlris:\n # If this is EOR of some kind, handle it\n self._handle_eor(msg_rf)\n\n for w_nlri in w_nlris:\n w_path = bgp_utils.create_path(\n self,\n w_nlri,\n is_withdraw=True\n )\n block, blocked_cause = self._apply_in_filter(w_path)\n\n received_route = ReceivedRoute(w_path, self, block)\n nlri_str = w_nlri.formatted_nlri_str\n\n if nlri_str in self._adj_rib_in:\n del self._adj_rib_in[nlri_str]\n self._signal_bus.adj_rib_in_changed(self, received_route)\n\n if not block:\n # Update appropriate table with withdraws.\n tm = self._core_service.table_manager\n tm.learn_path(w_path)\n else:\n LOG.debug('prefix : %s is blocked by in-bound filter: %s',\n w_nlri, blocked_cause)", "docstring": "Extracts withdraws advertised in the given update message's\n *MpUnReachNlri* attribute.\n\n Assumes MPBGP capability is enabled.\n Parameters:\n - update_msg: (Update) is assumed to be checked for all bgp\n message errors.\n\n Extracted withdraws are added to appropriate *Destination* for further\n processing.", "source": "juraj_google_style"} -{"code": "def make_absolute(base, relative):\n\n\n # Python 3.4 and lower do not remove folder traversal strings.\n # This was fixed in 3.5 (https://docs.python.org/3/whatsnew/3.5.html#urllib)\n while relative.startswith('/../') or relative.startswith('../'):\n relative = relative[3:]\n\n base_parsed = urlparse(base)\n new_path = base_parsed.path.rsplit('/', 1)[0]\n base_parsed = base_parsed._replace(path=new_path)\n base = base_parsed.geturl()\n\n return urljoin(base, relative)", "docstring": "Make the given (relative) URL absolute.\n\nArgs:\n base (str): The absolute URL the relative url was found on.\n relative (str): The (possibly relative) url to make absolute.\n\nReturns:\n str: The absolute URL.", "source": "juraj_google_style"} -{"code": "def _flatten_obs(self, obs_dict, verbose=False):\n\n ob_lst = []\n for key in obs_dict:\n if key in self.keys:\n if verbose:\n print(\"adding key: {}\".format(key))\n ob_lst.append(obs_dict[key])\n return np.concatenate(ob_lst)", "docstring": "Filters keys of interest out and concatenate the information.\n\nArgs:\n obs_dict: ordered dictionary of observations", "source": "juraj_google_style"} -{"code": "def _get_ngram_counter(ids, n):\n\n # Remove zero IDs used to pad the sequence.\n ids = [token_id for token_id in ids if token_id != 0]\n ngram_list = [tuple(ids[i:i + n]) for i in range(len(ids) + 1 - n)]\n ngrams = set(ngram_list)\n counts = collections.Counter()\n for ngram in ngrams:\n counts[ngram] = 1\n return counts", "docstring": "Get a Counter with the ngrams of the given ID list.\n\nArgs:\n ids: np.array or a list corresponding to a single sentence\n n: n-gram size\n\nReturns:\n collections.Counter with ID tuples as keys and 1s as values.", "source": "juraj_google_style"} -{"code": "def _json_clean(d):\n\n result = {}\n compkeys = {}\n for k, v in d.items():\n if not isinstance(k, tuple):\n result[k] = v\n else:\n #v is a list of entries for instance methods/constructors on the\n #UUID of the key. Instead of using the composite tuple keys, we\n #switch them for a string using the \n key = \"c.{}\".format(id(k))\n result[key] = v\n compkeys[key] = k\n\n return (result, compkeys)", "docstring": "Cleans the specified python `dict` by converting any tuple keys to\n strings so that they can be serialized by JSON.\n\nArgs:\n d (dict): python dictionary to clean up.\n\nReturns:\n dict: cleaned-up dictionary.", "source": "juraj_google_style"} -{"code": "def success(channel, title, datapacks):\n\n\n # Create embed UI object\n gui = ui_embed.UI(\n channel,\n title,\n \"\",\n modulename=modulename,\n datapacks=datapacks\n )\n\n return gui", "docstring": "Creates an embed UI containing the help message\n\nArgs:\n channel (discord.Channel): The Discord channel to bind the embed to\n title (str): The title of the embed\n datapacks (list): The hex value\n\nReturns:\n ui (ui_embed.UI): The embed UI object", "source": "juraj_google_style"} -{"code": "def _update(self, axes_list):\n\n plot_type = self.settings['plot_style']\n if plot_type == '2D':\n # we expect exactely one image in the axes object (see ScriptDummy.plot)\n implot = axes_list[1].get_images()[0]\n # now update the data\n implot.set_data(self.data['random data'])\n\n colorbar = implot.colorbar\n\n if not colorbar is None:\n colorbar.update_bruteforce(implot)\n\n else:\n # fall back to default behaviour\n Script._update(self, axes_list)", "docstring": "updates the data in already existing plots. the axes objects are provided in axes_list\n\nArgs:\n axes_list: a list of axes objects, this should be implemented in each subscript\n\n Returns: None", "source": "juraj_google_style"} -{"code": "def step(self, thumb=False):\n\n method = self._dll.JLINKARM_Step\n if thumb:\n method = self._dll.JLINKARM_StepComposite\n\n res = method()\n if res != 0:\n raise errors.JLinkException(\"Failed to step over instruction.\")\n\n return None", "docstring": "Executes a single step.\n\n Steps even if there is a breakpoint.\n\nArgs:\n self (JLink): the ``JLink`` instance\n thumb (bool): boolean indicating if to step in thumb mode\n\nReturns:\n ``None``\n\nRaises:\n JLinkException: on error", "source": "juraj_google_style"} -{"code": "def _collect_data(directory):\n\n # Returns:\n data_files = []\n transcripts = [\n filename for filename in os.listdir(directory)\n if filename.endswith(\".csv\")\n ]\n for transcript in transcripts:\n transcript_path = os.path.join(directory, transcript)\n with open(transcript_path, \"r\") as transcript_file:\n transcript_reader = csv.reader(transcript_file)\n # skip header\n _ = next(transcript_reader)\n for transcript_line in transcript_reader:\n media_name, label = transcript_line[0:2]\n filename = os.path.join(directory, media_name)\n data_files.append((media_name, filename, label))\n return data_files", "docstring": "Traverses directory collecting input and target files.\n\nArgs:\n directory: base path to extracted audio and transcripts.\n\nReturns:\n list of (media_base, media_filepath, label) tuples", "source": "juraj_google_style"} -{"code": "def isloaded(self, name):\n\n if name is None:\n return True\n\n if isinstance(name, str):\n return (name in [x.__module__ for x in self])\n\n if isinstance(name, Iterable):\n return set(name).issubset([x.__module__ for x in self])\n\n return False", "docstring": "Checks if given hook module has been loaded\n\nArgs:\n name (str): The name of the module to check\n\nReturns:\n bool. The return code::\n\n True -- Loaded\n False -- Not Loaded", "source": "juraj_google_style"} -{"code": "def observe(self, success, failure):\n\n if isinstance(success, int) is False:\n if isinstance(success, float) is False:\n raise TypeError()\n if isinstance(failure, int) is False:\n if isinstance(failure, float) is False:\n raise TypeError()\n\n if success <= 0:\n raise ValueError()\n if failure <= 0:\n raise ValueError()\n\n self.__success += success\n self.__failure += failure", "docstring": "Observation data.\n\nArgs:\n success: The number of success.\n failure: The number of failure.", "source": "juraj_google_style"} -{"code": "def check(self, instance, format):\n\n\n if format not in self.checkers:\n return\n\n func, raises = self.checkers[format]\n result, cause = None, None\n try:\n result = func(instance)\n except raises as e:\n cause = e\n if not result:\n raise FormatError(\n \"%r is not a %r\" % (instance, format), cause=cause,\n )", "docstring": "Check whether the instance conforms to the given format.\n\nArgs:\n instance (*any primitive type*, i.e. str, number, bool):\n\n The instance to check\n\n format (str):\n\n The format that instance should conform to\n\nRaises:\n FormatError: if the instance does not conform to ``format``", "source": "juraj_google_style"} -{"code": "def do_command_line(infile: typing.IO[str]) -> int:\n\n lines = infile.readlines()\n tree = ast.parse(''.join(lines))\n checker = Checker(tree, lines, infile.name)\n checker.load()\n errors = [] # type: typing.List[AAAError]\n for func in checker.all_funcs(skip_noqa=True):\n try:\n errors = list(func.check_all())\n except ValidationError as error:\n errors = [error.to_aaa()]\n print(func.__str__(errors), end='')\n return len(errors)", "docstring": "Currently a small stub to create an instance of Checker for the passed\n ``infile`` and run its test functions through linting.\n\nArgs:\n infile\n\nReturns:\n int: Number of flake8 errors raised.", "source": "juraj_google_style"} -{"code": "def get_ssm_parameter(parameter_name):\n\n try:\n response = boto3.client('ssm').get_parameters(\n Names=[parameter_name],\n WithDecryption=True\n )\n\n return response.get('Parameters', None)[0].get('Value', '')\n except Exception:\n pass\n\n return ''", "docstring": "Get the decrypted value of an SSM parameter\n\nArgs:\n parameter_name - the name of the stored parameter of interest\n\nReturns:\n Value if allowed and present else None", "source": "juraj_google_style"} -{"code": "def fallback_move(fobj, dest, src, count, BUFFER_SIZE=2 ** 16):\n\n\n if dest < 0 or src < 0 or count < 0:\n raise ValueError\n\n fobj.seek(0, 2)\n filesize = fobj.tell()\n\n if max(dest, src) + count > filesize:\n raise ValueError(\"area outside of file\")\n\n if src > dest:\n moved = 0\n while count - moved:\n this_move = min(BUFFER_SIZE, count - moved)\n fobj.seek(src + moved)\n buf = fobj.read(this_move)\n fobj.seek(dest + moved)\n fobj.write(buf)\n moved += this_move\n fobj.flush()\n else:\n while count:\n this_move = min(BUFFER_SIZE, count)\n fobj.seek(src + count - this_move)\n buf = fobj.read(this_move)\n fobj.seek(count + dest - this_move)\n fobj.write(buf)\n count -= this_move\n fobj.flush()", "docstring": "Moves data around using read()/write().\n\nArgs:\n fileobj (fileobj)\n dest (int): The destination offset\n src (int): The source offset\n count (int) The amount of data to move\n\nRaises:\n IOError: In case an operation on the fileobj fails\n ValueError: In case invalid parameters were given", "source": "juraj_google_style"} -{"code": "def get_cache_index_key(resource):\n\n if isinstance(resource, APIResource):\n attr, attr_value = list(resource.get_cache_index_keys().items())[0]\n key = (type(resource), attr, attr_value)\n else:\n key = tuple(resource)\n\n if len(key) != 3:\n raise TypeError('Cache key must be tuple of (class, key, value), got `{!r}` instead'.format(key))\n\n if not issubclass(key[0], APIResource):\n raise TypeError('First value of cache key must be a subclass of APIResource, got `{!r}` instead'.format(key[0]))\n\n return key", "docstring": "Return a usable cache lookup key for an already initialized resource\n\nArgs:\n resource (APIResource|tuple): APIResource instance or 3-length tuple key returned from this function\n\nRaises:\n TypeError: If resource is not an APIResource instance or acceptable 3-length tuple cache key", "source": "juraj_google_style"} -{"code": "def add_papyrus_routes(self, route_name_prefix, base_url):\n\n route_name = route_name_prefix + '_read_many'\n self.add_route(route_name, base_url, request_method='GET')\n route_name = route_name_prefix + '_read_one'\n self.add_route(route_name, base_url + '/{id}', request_method='GET')\n route_name = route_name_prefix + '_count'\n self.add_route(route_name, base_url + '/count', request_method='GET')\n route_name = route_name_prefix + '_create'\n self.add_route(route_name, base_url, request_method='POST')\n route_name = route_name_prefix + '_update'\n self.add_route(route_name, base_url + '/{id}', request_method='PUT')\n route_name = route_name_prefix + '_delete'\n self.add_route(route_name, base_url + '/{id}', request_method='DELETE')", "docstring": "A helper method that adds routes to view callables that, together,\n implement the MapFish HTTP interface.\n\n Example::\n\n import papyrus\n config.include(papyrus)\n config.add_papyrus_routes('spots', '/spots')\n config.scan()\n\nArgs:\n ``route_name_prefix' The prefix used for the route names\n passed to ``config.add_route``.\n\n ``base_url`` The web service's base URL, e.g. ``/spots``. No\n trailing slash!", "source": "juraj_google_style"} -{"code": "def _get_manager(cluster_info, host, executor_id):\n\n for node in cluster_info:\n if node['host'] == host and node['executor_id'] == executor_id:\n addr = node['addr']\n authkey = node['authkey']\n TFSparkNode.mgr = TFManager.connect(addr, authkey)\n break\n\n if TFSparkNode.mgr is None:\n msg = \"No TFManager found on this node, please ensure that:\\n\" + \\\n \"1. Spark num_executors matches TensorFlow cluster_size\\n\" + \\\n \"2. Spark cores/tasks per executor is 1.\\n\" + \\\n \"3. Spark dynamic allocation is disabled.\"\n raise Exception(msg)\n\n logging.info(\"Connected to TFSparkNode.mgr on {0}, executor={1}, state={2}\".format(host, executor_id, str(TFSparkNode.mgr.get('state'))))\n return TFSparkNode.mgr", "docstring": "Returns this executor's \"singleton\" instance of the multiprocessing.Manager, reconnecting per python-worker if needed.\n\nArgs:\n :cluster_info: cluster node reservations\n :host: host IP address\n :executor_id: unique id per executor (created during initial call to run())\n\nReturns:\n TFManager instance for this executor/python-worker", "source": "juraj_google_style"} -{"code": "def after_run(self, run_context, run_values): # pylint: disable=unused-argument\n\n global_step = run_values.results\n\n if self._timer.should_trigger_for_step(\n global_step) and global_step > self._warm_steps:\n elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(\n global_step)\n if elapsed_time is not None:\n self._step_train_time += elapsed_time\n self._total_steps += elapsed_steps\n\n # average examples per second is based on the total (accumulative)\n # training steps and training time so far\n average_examples_per_sec = self._batch_size * (\n self._total_steps / self._step_train_time)\n # current examples per second is based on the elapsed training steps\n # and training time per batch\n current_examples_per_sec = self._batch_size * (\n elapsed_steps / elapsed_time)\n # Current examples/sec followed by average examples/sec\n tf.logging.info('Batch [%g]: current exp/sec = %g, average exp/sec = '\n '%g', self._total_steps, current_examples_per_sec,\n average_examples_per_sec)", "docstring": "Called after each call to run().\n\nArgs:\n run_context: A SessionRunContext object.\n run_values: A SessionRunValues object.", "source": "juraj_google_style"} -{"code": "def with_contest_type(self, contest_type):\n\n self._validate_contest_type(contest_type)\n if contest_type.lower() in ('by', 'by election', 'by-election'):\n self.contest_type = 'by'\n return self", "docstring": "Add a contest_type segment\n\nArgs:\n contest_type (str): Invoke with ``contest_type='by'`` or\n ``contest_type='by-election'`` to add a 'by' segment to the\n ballot_id. Invoking with ``contest_type='election'`` is valid\n syntax but has no effect.\n\nReturns:\n IdBuilder\n\nRaises:\n ValueError", "source": "juraj_google_style"} -{"code": "def remove_delegate(self, callback):\n\n\n if callback not in self._delegate_methods:\n return\n\n self._delegate_methods.remove(callback)", "docstring": "Unregisters a registered delegate function or a method.\n\nArgs:\n callback(function): method to trigger when push center receives events", "source": "juraj_google_style"} -{"code": "def ParseFileObject(self, parser_mediator, file_object):\n\n page_header_map = self._GetDataTypeMap('dls_page_header')\n\n try:\n page_header, file_offset = self._ReadStructureFromFileObject(\n file_object, 0, page_header_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.UnableToParseFile(\n 'Unable to parse page header with error: {0!s}'.format(\n exception))\n\n if page_header.signature not in self._DLS_SIGNATURES:\n raise errors.UnableToParseFile('Invalid file signature')\n\n current_page_end = page_header.page_size\n\n file_entry = parser_mediator.GetFileEntry()\n date_time = self._GetParentModificationTime(file_entry)\n # TODO: Change this to use a more representative time definition (time span)\n # when https://github.com/log2timeline/dfdatetime/issues/65 is resolved.\n if date_time:\n timestamp_description = definitions.TIME_DESCRIPTION_RECORDED\n else:\n date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME\n event = time_events.DateTimeValuesEvent(date_time, timestamp_description)\n\n file_size = file_object.get_size()\n while file_offset < file_size:\n if file_offset >= current_page_end:\n try:\n page_header, header_size = self._ParseDLSPageHeader(\n file_object, file_offset)\n except errors.ParseError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to parse page header with error: {0!s}'.format(\n exception))\n break\n\n current_page_end += page_header.page_size\n file_offset += header_size\n continue\n\n if page_header.signature == self._DLS_V1_SIGNATURE:\n record_map = self._GetDataTypeMap('dls_record_v1')\n else:\n record_map = self._GetDataTypeMap('dls_record_v2')\n\n try:\n record, record_length = self._ReadStructureFromFileObject(\n file_object, file_offset, record_map)\n file_offset += record_length\n except (ValueError, errors.ParseError) as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to parse page record with error: {0!s}'.format(\n exception))\n break\n\n event_data = self._BuildEventData(record)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an fseventsd file.\n\nArgs:\n parser_mediator (ParserMediator): parser mediator.\n file_object (dfvfs.FileIO): a file-like object.\n\nRaises:\n UnableToParseFile: when the header cannot be parsed.", "source": "juraj_google_style"} -{"code": "def QA_fetch_financial_report(code, report_date, ltype='EN', db=DATABASE):\n\n\n if isinstance(code, str):\n code = [code]\n if isinstance(report_date, str):\n report_date = [QA_util_date_str2int(report_date)]\n elif isinstance(report_date, int):\n report_date = [report_date]\n elif isinstance(report_date, list):\n report_date = [QA_util_date_str2int(item) for item in report_date]\n\n collection = db.financial\n num_columns = [item[:3] for item in list(financial_dict.keys())]\n CH_columns = [item[3:] for item in list(financial_dict.keys())]\n EN_columns = list(financial_dict.values())\n #num_columns.extend(['283', '_id', 'code', 'report_date'])\n # CH_columns.extend(['283', '_id', 'code', 'report_date'])\n #CH_columns = pd.Index(CH_columns)\n #EN_columns = list(financial_dict.values())\n #EN_columns.extend(['283', '_id', 'code', 'report_date'])\n #EN_columns = pd.Index(EN_columns)\n\n try:\n if code is not None and report_date is not None:\n data = [item for item in collection.find(\n {'code': {'$in': code}, 'report_date': {'$in': report_date}}, {\"_id\": 0}, batch_size=10000)]\n elif code is None and report_date is not None:\n data = [item for item in collection.find(\n {'report_date': {'$in': report_date}}, {\"_id\": 0}, batch_size=10000)]\n elif code is not None and report_date is None:\n data = [item for item in collection.find(\n {'code': {'$in': code}}, {\"_id\": 0}, batch_size=10000)]\n else:\n data = [item for item in collection.find({}, {\"_id\": 0})]\n if len(data) > 0:\n res_pd = pd.DataFrame(data)\n\n if ltype in ['CH', 'CN']:\n\n cndict = dict(zip(num_columns, CH_columns))\n\n cndict['283'] = '283'\n try:\n cndict['284'] = '284'\n cndict['285'] = '285'\n cndict['286'] = '286'\n except:\n pass\n\n cndict['code'] = 'code'\n cndict['report_date'] = 'report_date'\n res_pd.columns = res_pd.columns.map(lambda x: cndict[x])\n elif ltype is 'EN':\n endict = dict(zip(num_columns, EN_columns))\n endict['283'] = '283'\n try:\n endict['284'] = '284'\n endict['285'] = '285'\n endict['286'] = '286'\n except:\n pass\n\n endict['code'] = 'code'\n endict['report_date'] = 'report_date'\n res_pd.columns = res_pd.columns.map(lambda x: endict[x])\n\n if res_pd.report_date.dtype == numpy.int64:\n res_pd.report_date = pd.to_datetime(\n res_pd.report_date.apply(QA_util_date_int2str))\n else:\n res_pd.report_date = pd.to_datetime(res_pd.report_date)\n\n return res_pd.replace(-4.039810335e+34, numpy.nan).set_index(['report_date', 'code'], drop=False)\n else:\n return None\n except Exception as e:\n raise e", "docstring": "获取专业财务报表\n\nArgs:\n code {[type]} -- [description]\n report_date {[type]} -- [description]\n Keyword Arguments:\n ltype {str} -- [description] (default: {'EN'})\n db {[type]} -- [description] (default: {DATABASE})\n\nRaises:\n e -- [description]\n\nReturns:\n pd.DataFrame -- [description]", "source": "juraj_google_style"} -{"code": "def read_single_knmi_file(filename):\n\n hourly_data_obs_raw = pd.read_csv(\n filename,\n parse_dates=[['YYYYMMDD', 'HH']],\n date_parser=lambda yyyymmdd, hh: pd.datetime(int(str(yyyymmdd)[0:4]),\n int(str(yyyymmdd)[4:6]),\n int(str(yyyymmdd)[6:8]),\n int(hh) - 1),\n skiprows=31,\n skipinitialspace=True,\n na_values='',\n keep_date_col=True,\n )\n\n hourly_data_obs_raw.index = hourly_data_obs_raw['YYYYMMDD_HH']\n hourly_data_obs_raw.index = hourly_data_obs_raw.index + pd.Timedelta(hours=1)\n\n columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd']\n\n hourly_data_obs = pd.DataFrame(\n index=hourly_data_obs_raw.index,\n columns=columns_hourly,\n data=dict(\n temp=hourly_data_obs_raw['T'] / 10 + 273.15,\n precip=hourly_data_obs_raw['RH'] / 10,\n glob=hourly_data_obs_raw['Q'] * 10000 / 3600.,\n hum=hourly_data_obs_raw['U'],\n wind=hourly_data_obs_raw['FH'] / 10,\n ssd=hourly_data_obs_raw['SQ'] * 6,\n ),\n )\n # remove negative values\n negative_values = hourly_data_obs['precip'] < 0.0\n hourly_data_obs.loc[negative_values, 'precip'] = 0.0\n return hourly_data_obs", "docstring": "reads a single file of KNMI's meteorological time series\n\n data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens\n\nArgs:\n filename: the file to be opened\n\nReturns:\n pandas data frame including time series", "source": "juraj_google_style"} -{"code": "def delete(self, messageId):\n\n check_type(messageId, basestring, may_be_none=False)\n\n # API request\n self._session.delete(API_ENDPOINT + '/' + messageId)", "docstring": "Delete a message.\n\nArgs:\n messageId(basestring): The ID of the message to be deleted.\n\nRaises:\n TypeError: If the parameter types are incorrect.\n ApiError: If the Webex Teams cloud returns an error.", "source": "juraj_google_style"} -{"code": "def create_from_json(cls, json_data):\n\n zipcode = ZipCode()\n zipcode.zipcode = json_data[\"zipcode_info\"][\"zipcode\"]\n zipcode.meta = json_data[\"meta\"] if \"meta\" in json_data else None\n\n zipcode.component_results = _create_component_results(json_data, \"zipcode_info\")\n\n return zipcode", "docstring": "Deserialize zipcode json data into a ZipCode object\n\nArgs:\n json_data (dict): The json data for this zipcode\n\nReturns:\n Zip object", "source": "juraj_google_style"} -{"code": "def send_event(self, action, properties, event_severity=EVENT_SEVERITY):\n\n # verify properties\n event_properties = dict() if (properties is None) else properties\n if type(event_properties) is not dict:\n raise TypeError('properties is not dict')\n\n # prepare event\n event_bunch = Bunch(\n Product=self.product_name,\n Version=self.product_version,\n Server=self.server_name,\n Platform=self.platform,\n Action=action,\n Properties=event_properties)\n event_description = self._get_description_prefix() + \\\n json.dumps(event_bunch)\n\n use_custom_event = True\n if CSS_PRODUCT_EVENT in dir(self.xcli.cmd):\n try:\n # send css product event\n log.debug(\"sending css_product_event \"\n \"description=%s severity=%s\",\n event_description, event_severity)\n self.xcli.cmd.css_product_event(severity=event_severity,\n product=self.product_name,\n version=self.product_version,\n server=self.server_name,\n platform=self.platform,\n action=action,\n properties=event_properties)\n use_custom_event = False\n except (UnrecognizedCommandError,\n OperationForbiddenForUserCategoryError):\n log.warning(\"failed css_product_event \"\n \"description=%s severity=%s\",\n event_description, event_severity)\n if use_custom_event:\n # send custom event\n log.debug(\"sending custom_event description=%s severity=%s\",\n event_description, event_severity)\n self.xcli.cmd.custom_event(\n description=event_description, severity=event_severity)", "docstring": "send css_event and if fails send custom_event instead\n\nArgs:\n action (ACTIONS): the action causing the event\n properties (dict): the action additional properties\n event_severity (string): the event severity\n\nRaises:\n XCLIError: if the xcli.cmd.custom_event failed\n KeyError: if action wasn't predefined\n TypeError: if properties is not None or dict", "source": "juraj_google_style"} -{"code": "def __init__(self, permissive=True):\n\n self._journal_contents = ''\n self._init_journal(permissive=permissive)", "docstring": "Inititalize the journal maker object.\n\n Appends the first lines in the journal (JrnObj variable and timestamp)\n to the _journal_contents.\n\nArgs:\n permissive (bool): if True most errors in journal will not\n cause Revit to stop journal execution.\n Some still do.", "source": "juraj_google_style"} -{"code": "def merge_results(inputs, arguments=None):\n\n if arguments is None:\n arguments = Arguments()\n\n args = arguments.args\n kwargs = arguments.kwargs\n\n for i in inputs:\n # without a mapping we handle two cases\n # when the result is a dict merge it with a global dict\n if isinstance(i.result, dict):\n # but do not override\n kwargs.update({k: v for k, v in i.result.items() if k not in kwargs})\n elif isinstance(i.result, list):\n args.extend(i.result)\n elif isinstance(i.result, Arguments):\n args.extend(i.result.args)\n kwargs.update({k: v for k, v in i.result.kwargs.items() if k not in kwargs})\n # otherwise use it as a positional argument\n else:\n args.append(i.result)\n\n return arguments", "docstring": "Merges results to form arguments to run(). There are two cases for each result:\n - dictionary: dictionaries get merged and passed as keyword arguments\n - list: lists get concatenated to positional arguments\n - Arguments: kwargs gets merged and args gets appended\n - else: concatenated and passed as postitional arguments\n\nArgs:\n inputs: the inputs whose results to merge\n arguments: an optional existing Arguments object to merge into", "source": "juraj_google_style"} -{"code": "def _parse_title_url(html_chunk):\n\n url = None\n title_tags = html_chunk.match(\n [\"div\", {\"class\": \"polozka_nazev\"}],\n [\"a\", None, has_param(\"href\")]\n )\n\n if not title_tags:\n return _parse_alt_title(html_chunk), _parse_alt_url(html_chunk)\n\n title = title_tags[0]\n\n url = normalize_url(BASE_URL, title.params[\"href\"])\n title = title.getContent()\n\n if not title:\n title = _parse_alt_title(html_chunk)\n\n return title, url", "docstring": "Parse title/name of the book and URL of the book.\n\nArgs:\n html_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\n tuple: (title, url), both as strings.", "source": "juraj_google_style"} -{"code": "def placeOrder(self, contract: Contract, order: Order) -> Trade:\n\n orderId = order.orderId or self.client.getReqId()\n self.client.placeOrder(orderId, contract, order)\n now = datetime.datetime.now(datetime.timezone.utc)\n key = self.wrapper.orderKey(\n self.wrapper.clientId, orderId, order.permId)\n trade = self.wrapper.trades.get(key)\n if trade:\n # this is a modification of an existing order\n assert trade.orderStatus.status not in OrderStatus.DoneStates\n logEntry = TradeLogEntry(now, trade.orderStatus.status, 'Modify')\n trade.log.append(logEntry)\n self._logger.info(f'placeOrder: Modify order {trade}')\n trade.modifyEvent.emit(trade)\n self.orderModifyEvent.emit(trade)\n else:\n # this is a new order\n order.clientId = self.wrapper.clientId\n order.orderId = orderId\n orderStatus = OrderStatus(status=OrderStatus.PendingSubmit)\n logEntry = TradeLogEntry(now, orderStatus.status, '')\n trade = Trade(\n contract, order, orderStatus, [], [logEntry])\n self.wrapper.trades[key] = trade\n self._logger.info(f'placeOrder: New order {trade}')\n self.newOrderEvent.emit(trade)\n return trade", "docstring": "Place a new order or modify an existing order.\n Returns a Trade that is kept live updated with\n status changes, fills, etc.\n\nArgs:\n contract: Contract to use for order.\n order: The order to be placed.", "source": "juraj_google_style"} -{"code": "def Value(self, p):\n\n if p < 0 or p > 1:\n raise ValueError('Probability p must be in range [0, 1]')\n\n if p == 0: return self.xs[0]\n if p == 1: return self.xs[-1]\n index = bisect.bisect(self.ps, p)\n if p == self.ps[index - 1]:\n return self.xs[index - 1]\n else:\n return self.xs[index]", "docstring": "Returns InverseCDF(p), the value that corresponds to probability p.\n\nArgs:\n p: number in the range [0, 1]\n\nReturns:\n number value", "source": "juraj_google_style"} -{"code": "def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None, \n pos=None, end=None):\n\n query = {}\n\n if chromosome:\n query['chrom'] = chromosome\n if end_chromosome:\n query['end_chrom'] = end_chromosome\n if sv_type:\n query['sv_type'] = sv_type\n if pos:\n if not '$and' in query:\n query['$and'] = []\n query['$and'].append({'pos_left': {'$lte': pos}})\n query['$and'].append({'pos_right': {'$gte': pos}})\n\n if end:\n if not '$and' in query:\n query['$and'] = []\n query['$and'].append({'end_left': {'$lte': end}})\n query['$and'].append({'end_right': {'$gte': end}})\n\n LOG.info(\"Find all sv variants {}\".format(query))\n\n return self.db.structural_variant.find(query).sort([('chrom', ASCENDING), ('pos_left', ASCENDING)])", "docstring": "Return all structural variants in the database\n\nArgs:\n chromosome (str)\n end_chromosome (str)\n sv_type (str)\n pos (int): Left position of SV\n end (int): Right position of SV\n\nReturns:\n variants (Iterable(Variant))", "source": "juraj_google_style"} -{"code": "def save(self, resource):\n\n resource_type = None\n xid = None\n if isinstance(resource, dict):\n resource_type = resource.get('type')\n xid = resource.get('xid')\n else:\n resource_type = resource.type\n xid = resource.xid\n\n if resource_type is not None and xid is not None:\n saved = True\n if resource_type in self.tcex.group_types:\n try:\n # groups\n self.groups_shelf[xid] = resource\n except Exception:\n saved = False\n\n if saved:\n try:\n del self._groups[xid]\n except KeyError:\n # if group was saved twice it would already be delete\n pass\n elif resource_type in self.tcex.indicator_types_data.keys():\n try:\n # indicators\n self.indicators_shelf[xid] = resource\n except Exception:\n saved = False\n\n if saved:\n try:\n del self._indicators[xid]\n except KeyError:\n # if indicator was saved twice it would already be delete\n pass", "docstring": "Save group|indicator dict or object to shelve.\n\n Best effort to save group/indicator data to disk. If for any reason the save fails\n the data will still be accessible from list in memory.\n\nArgs:\n resource (dict|obj): The Group or Indicator dict or object.", "source": "juraj_google_style"} -{"code": "def shortest_undirected_path(self, physical_qubit1, physical_qubit2):\n\n try:\n return nx.shortest_path(self.graph.to_undirected(as_view=True), source=physical_qubit1,\n target=physical_qubit2)\n except nx.exception.NetworkXNoPath:\n raise CouplingError(\n \"Nodes %s and %s are not connected\" % (str(physical_qubit1), str(physical_qubit2)))", "docstring": "Returns the shortest undirected path between physical_qubit1 and physical_qubit2.\n\nArgs:\n physical_qubit1 (int): A physical qubit\n physical_qubit2 (int): Another physical qubit\n\nReturns:\n List: The shortest undirected path\n\nRaises:\n CouplingError: When there is no path between physical_qubit1, physical_qubit2.", "source": "juraj_google_style"} -{"code": "def combine(path1, path2):\n # type: (Text, Text) -> Text\n\n if not path1:\n return path2.lstrip()\n return \"{}/{}\".format(path1.rstrip(\"/\"), path2.lstrip(\"/\"))", "docstring": "Join two paths together.\n\n This is faster than :func:`~fs.path.join`, but only works when the\n second path is relative, and there are no back references in either\n path.\n\nArgs:\n path1 (str): A PyFilesytem path.\n path2 (str): A PyFilesytem path.\n\nReturns:\n str: The joint path.\n\nExample:\n >>> combine(\"foo/bar\", \"baz\")\n 'foo/bar/baz'", "source": "juraj_google_style"} -{"code": "def _max_retries_for_error(self, error):\n\n status = error.get(\"status\")\n if status == \"ABORTED\" and get_transactions() > 0:\n # Avoids retrying Conflicts when inside a transaction.\n return None\n return self._MAX_RETRIES.get(status)", "docstring": "Handles Datastore response errors according to their documentation.\n\n Parameters:\n error(dict)\n\nReturns:\n int or None: The max number of times this error should be\n retried or None if it shouldn't.\n\n See also:\n https://cloud.google.com/datastore/docs/concepts/errors", "source": "juraj_google_style"} -{"code": "def __init__(self, access_token, access_token_type, refresh_token=None, expires_in=None, state=None):\n\n\n self.access_token = access_token\n self.access_token_type = access_token_type\n self.refresh_token = refresh_token\n self.expires_in = expires_in\n self.state = state", "docstring": "Initialziation of the object\n\nArgs:\n access_token (str): Access token\n access_token_type (str): Access token type\n refresh_token (str):\n expires_in (int): Seconds after which the token will expire\n state (str):", "source": "juraj_google_style"} -{"code": "def get_icohp_dict_by_bondlengths(self, minbondlength=0.0, maxbondlength=8.0):\n\n newicohp_dict = {}\n for value in self._icohplist.values():\n if value._length >= minbondlength and value._length <= maxbondlength:\n newicohp_dict[value._label] = value\n return newicohp_dict", "docstring": "get a dict of IcohpValues corresponding to certaind bond lengths\n\nArgs:\n minbondlength: defines the minimum of the bond lengths of the bonds\n maxbondlength: defines the maximum of the bond lengths of the bonds\n\nReturns:\n dict of IcohpValues, the keys correspond to the values from the initial list_labels", "source": "juraj_google_style"} -{"code": "def set(self, key, data):\n\n self.raise_error_if_not_open()\n\n if key in self._file:\n del self._file[key]\n\n self._file.create_dataset(key, data=data)", "docstring": "Set the given data to the container with the given key.\n Any existing data for the given key is discarded/overwritten.\n\nArgs:\n key (str): A key to store the data for.\n data (numpy.ndarray): Array-like data.\n\nNote:\n The container has to be opened in advance.", "source": "juraj_google_style"} -{"code": "def set_wallpaper(image):\n\n\n\tdesktop_env = system.get_name()\n\n\tif desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']:\n\t\turi = 'file://%s' % image\n\n\t\tSCHEMA = 'org.gnome.desktop.background'\n\t\tKEY = 'picture-uri'\n\n\t\tif desktop_env == 'mate':\n\t\t\turi = image\n\n\t\t\tSCHEMA = 'org.mate.background'\n\t\t\tKEY = 'picture-filename'\n\n\t\ttry:\n\t\t\tfrom gi.repository import Gio\n\n\t\t\tgsettings = Gio.Settings.new(SCHEMA)\n\t\t\tgsettings.set_string(KEY, uri)\n\t\texcept ImportError:\n\t\t\ttry:\n\t\t\t\tgsettings_proc = sp.Popen(\n\t\t\t\t\t['gsettings', 'set', SCHEMA, KEY, uri])\n\t\t\texcept: # MATE < 1.6\n\t\t\t\tsp.Popen(['mateconftool-2',\n\t\t\t\t\t\t '-t',\n\t\t\t\t\t\t 'string',\n\t\t\t\t\t\t '--set',\n\t\t\t\t\t\t '/desktop/mate/background/picture_filename',\n\t\t\t\t\t\t '%s' % image],\n\t\t\t\t\t\t stdout=sp.PIPE)\n\t\t\tfinally:\n\t\t\t\tgsettings_proc.communicate()\n\n\t\t\t\tif gsettings_proc.returncode != 0:\n\t\t\t\t\tsp.Popen(['mateconftool-2',\n\t\t\t\t\t\t\t '-t',\n\t\t\t\t\t\t\t 'string',\n\t\t\t\t\t\t\t '--set',\n\t\t\t\t\t\t\t '/desktop/mate/background/picture_filename',\n\t\t\t\t\t\t\t '%s' % image])\n\n\telif desktop_env == 'gnome2':\n\t\tsp.Popen(\n\t\t\t['gconftool-2',\n\t\t\t '-t',\n\t\t\t 'string',\n\t\t\t '--set',\n\t\t\t '/desktop/gnome/background/picture_filename',\n\t\t\t image]\n\t\t)\n\n\telif desktop_env == 'kde':\n\t\t# This probably only works in Plasma 5+\n\n\t\tkde_script = dedent(\n\t\t).format(image)\n\n\t\tsp.Popen(\n\t\t\t\t['dbus-send',\n\t\t\t\t '--session',\n\t\t\t\t '--dest=org.kde.plasmashell',\n\t\t\t\t '--type=method_call',\n\t\t\t\t '/PlasmaShell',\n\t\t\t\t 'org.kde.PlasmaShell.evaluateScript',\n\t\t\t\t 'string:{}'.format(kde_script)]\n\t\t)\n\n\telif desktop_env in ['kde3', 'trinity']:\n\t\targs = 'dcop kdesktop KBackgroundIface setWallpaper 0 \"%s\" 6' % image\n\t\tsp.Popen(args, shell=True)\n\n\telif desktop_env == 'xfce4':\n\t\t# XFCE4's image property is not image-path but last-image (What?)\n\n\t\tlist_of_properties = system.get_cmd_out(\n\t\t\t\t['xfconf-query',\n\t\t\t\t '-R',\n\t\t\t\t '-l',\n\t\t\t\t '-c',\n\t\t\t\t 'xfce4-desktop',\n\t\t\t\t '-p',\n\t\t\t\t '/backdrop']\n\t\t)\n\n\t\tfor i in list_of_properties.split('\\n'):\n\t\t\tif i.endswith('last-image'):\n\t\t\t\t# The property given is a background property\n\t\t\t\tsp.Popen(\n\t\t\t\t\t['xfconf-query -c xfce4-desktop -p %s -s \"%s\"' %\n\t\t\t\t\t\t(i, image)],\n\t\t\t\t\tshell=True)\n\n\t\t\t\tsp.Popen(['xfdesktop --reload'], shell=True)\n\n\telif desktop_env == 'razor-qt':\n\t\tdesktop_conf = configparser.ConfigParser()\n\t\t# Development version\n\n\t\tdesktop_conf_file = os.path.join(\n\t\t\tget_config_dir('razor')[0], 'desktop.conf')\n\n\t\tif os.path.isfile(desktop_conf_file):\n\t\t\tconfig_option = r'screens\\1\\desktops\\1\\wallpaper'\n\n\t\telse:\n\t\t\tdesktop_conf_file = os.path.join(\n\t\t\t\tos.path.expanduser('~'), '.razor/desktop.conf')\n\t\t\tconfig_option = r'desktops\\1\\wallpaper'\n\n\t\tdesktop_conf.read(os.path.join(desktop_conf_file))\n\t\ttry:\n\t\t\tif desktop_conf.has_option('razor', config_option):\n\t\t\t\tdesktop_conf.set('razor', config_option, image)\n\t\t\t\twith codecs.open(desktop_conf_file, 'w', encoding='utf-8', errors='replace') as f:\n\t\t\t\t\tdesktop_conf.write(f)\n\t\texcept:\n\t\t\tpass\n\n\telif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']:\n\t\ttry:\n\t\t\targs = ['feh', '--bg-scale', image]\n\t\t\tsp.Popen(args)\n\t\texcept:\n\t\t\tsys.stderr.write('Error: Failed to set wallpaper with feh!')\n\t\t\tsys.stderr.write('Please make sre that You have feh installed.')\n\n\telif desktop_env == 'icewm':\n\t\targs = ['icewmbg', image]\n\t\tsp.Popen(args)\n\n\telif desktop_env == 'blackbox':\n\t\targs = ['bsetbg', '-full', image]\n\t\tsp.Popen(args)\n\n\telif desktop_env == 'lxde':\n\t\targs = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image\n\t\tsp.Popen(args, shell=True)\n\n\telif desktop_env == 'lxqt':\n\t\targs = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image\n\t\tsp.Popen(args, shell=True)\n\n\telif desktop_env == 'windowmaker':\n\t\targs = 'wmsetbg -s -u %s' % image\n\t\tsp.Popen(args, shell=True)\n\n\telif desktop_env == 'enlightenment':\n\t\targs = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image\n\t\tsp.Popen(args, shell=True)\n\n\telif desktop_env == 'awesome':\n\t\twith sp.Popen(\"awesome-client\", stdin=sp.PIPE) as awesome_client:\n\t\t\tcommand = ('local gears = require(\"gears\"); for s = 1,'\n\t\t\t\t\t\t' screen.count() do gears.wallpaper.maximized'\n\t\t\t\t\t\t'(\"%s\", s, true); end;') % image\n\t\t\tawesome_client.communicate(input=bytes(command, 'UTF-8'))\n\n\telif desktop_env == 'windows':\n\t\tWINDOWS_SCRIPT = dedent() % image\n\n\t\twindows_script_file = os.path.join(\n\t\t\ttempfile.gettempdir(), 'wallscript.bat')\n\n\t\twith open(windows_script_file, 'w') as f:\n\t\t\tf.write(WINDOWS_SCRIPT)\n\n\t\tsp.Popen([windows_script_file], shell=True)\n\n\t\t# Sometimes the method above works\n\t\t# and sometimes the one below\n\n\t\tSPI_SETDESKWALLPAPER = 20\n\t\tctypes.windll.user32.SystemParametersInfoA(\n\t\t\tSPI_SETDESKWALLPAPER, 0, image, 0)\n\n\telif desktop_env == 'mac':\n\t\ttry:\n\t\t\tfrom appscript import app, mactypes\n\t\t\tapp('Finder').desktop_picture.set(mactypes.File(image))\n\t\texcept ImportError:\n\t\t\tOSX_SCRIPT = dedent(\n\t\t\t\t) % image\n\n\t\t\tsp.Popen(['osascript', OSX_SCRIPT])\n\telse:\n\t\ttry:\n\t\t\tsp.Popen(['feh', '--bg-scale', image])\n\t\t\t# feh is nearly a catch-all for Linux WMs\n\t\texcept:\n\t\t\tpass", "docstring": "Set the desktop wallpaper.\n\n Sets the desktop wallpaper to an image.\n\nArgs:\n image (str): The path to the image to be set as wallpaper.", "source": "juraj_google_style"} -{"code": "def voronoi(script, hole_num=50, target_layer=None, sample_layer=None, thickness=0.5, backward=True):\n\n\n if target_layer is None:\n target_layer = script.current_layer()\n if sample_layer is None:\n # Current layer is currently not changed after poisson_disk is run\n sampling.poisson_disk(script, sample_num=hole_num)\n sample_layer = script.last_layer()\n\n vert_color.voronoi(script, target_layer=target_layer, source_layer=sample_layer, backward=backward)\n select.vert_quality(script, min_quality=0.0, max_quality=thickness)\n if backward:\n select.invert(script)\n delete.selected(script)\n smooth.laplacian(script, iterations=3)\n\n return None", "docstring": "Turn a model into a surface with Voronoi style holes in it\n\n References:\n http://meshlabstuff.blogspot.com/2009/03/creating-voronoi-sphere.html\n http://meshlabstuff.blogspot.com/2009/04/creating-voronoi-sphere-2.html\n\n Requires FilterScript object\n\nArgs:\n script: the FilterScript object to write the filter to. Does not\n work with a script filename.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "source": "juraj_google_style"} -{"code": "def audio(self, audio, sample_rate, name=None, subdir=''):\n\n\n from chainerui.report.audio_report import check_available\n if not check_available():\n return\n from chainerui.report.audio_report import report as _audio\n\n col_name = self.get_col_name(name, 'audio')\n out_dir, rel_out_dir = self.get_subdir(subdir)\n filename, _ = _audio(audio, sample_rate, out_dir, col_name)\n self.audios[col_name] = os.path.join(rel_out_dir, filename)\n\n self.count += 1", "docstring": "Summary audio to listen on web browser.\n\nArgs:\n audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \\\n :class:`chainer.Variable`): sampled wave array.\n sample_rate (int): sampling rate.\n name (str): name of image. set as column name. when not setting,\n assigned ``'audio'`` + sequential number.\n subdir (str): sub-directory path of output.", "source": "juraj_google_style"} -{"code": "def get_op_traceback(self, op_name):\n\n if not self._graph_traceback:\n raise ValueError('No graph traceback has been received yet.')\n for op_log_entry in self._graph_traceback.log_entries:\n if op_log_entry.name == op_name:\n return self._code_def_to_traceback_list(op_log_entry.code_def)\n raise ValueError(\n 'No op named \"%s\" can be found in the graph of the latest version '\n ' (%d).' % (op_name, self._graph_version))", "docstring": "Get the traceback of an op in the latest version of the TF graph.\n\nArgs:\n op_name: Name of the op.\n\nReturns:\n Creation traceback of the op, in the form of a list of 2-tuples:\n (file_path, lineno)\n\nRaises:\n ValueError: If the op with the given name cannot be found in the latest\n version of the graph that this SourceManager instance has received, or\n if this SourceManager instance has not received any graph traceback yet.", "source": "juraj_google_style"} -{"code": "def DeregisterMountPoint(cls, mount_point):\n\n if mount_point not in cls._mount_points:\n raise KeyError('Mount point: {0:s} not set.'.format(mount_point))\n\n del cls._mount_points[mount_point]", "docstring": "Deregisters a path specification mount point.\n\nArgs:\n mount_point (str): mount point identifier.\n\nRaises:\n KeyError: if the corresponding mount point is not set.", "source": "juraj_google_style"} -{"code": "def create_view(operations, operation):\n\n operations.execute(\"CREATE VIEW %s AS %s\" % (\n operation.target.name,\n operation.target.sqltext\n ))", "docstring": "Implements ``CREATE VIEW``.\n\nArgs:\n operations: instance of ``alembic.operations.base.Operations``\n operation: instance of :class:`.ReversibleOp`\n\nReturns:\n ``None``", "source": "juraj_google_style"} -{"code": "def __init__(self, desired_capabilities, url='http://127.0.0.1:3456/wd/hub'):\n\n self.session_id = None\n self.capabilities = None\n self.desired_capabilities = desired_capabilities\n self.remote_invoker = RemoteInvoker(url)", "docstring": "Initialize the WebDriver\n\nArgs:\n desired_capabilities(dict): The desired capabilities requested by\n the local end.\n url(str): The url of remote server, default: localhost:3456/wd/hub.", "source": "juraj_google_style"} -{"code": "def get_graph(graph, conn, **kwargs):\n\n sparql = render_without_request(\"sparqlGraphDataTemplate.rq\",\n prefix=NSM.prefix(),\n graph=graph)\n return conn.query(sparql, **kwargs)", "docstring": "Returns all the triples for a specific are graph\n\nArgs:\n graph: the URI of the graph to retreive\n conn: the rdfframework triplestore connection", "source": "juraj_google_style"} -{"code": "def percent_point(self, U):\n\n self.check_fit()\n\n return scipy.optimize.brentq(self._brentq_cdf(U), -1000.0, 1000.0)", "docstring": "Given a cdf value, returns a value in original space.\n\nArgs:\n U(numpy.array): cdf values in [0,1]\n\nReturns:\n numpy.array: value in original space", "source": "juraj_google_style"} -{"code": "def setDocuments(self, documenting_pid, documented_pid):\n\n self._check_initialized()\n documenting_id = self.getObjectByPid(documenting_pid)\n documented_id = self.getObjectByPid(documented_pid)\n self.add((documenting_id, CITO.documents, documented_id))", "docstring": "Add a CiTO, the Citation Typing Ontology, triple asserting that\n ``documenting_pid`` documents ``documented_pid``.\n\n Adds assertion: ``documenting_pid cito:documents documented_pid``\n\nArgs:\n documenting_pid: str\n PID of a Science Object that documents ``documented_pid``.\n\n documented_pid: str\n PID of a Science Object that is documented by ``documenting_pid``.", "source": "juraj_google_style"} -{"code": "def export_dae(filename, cutout, level=0):\n\n if \".dae\" not in filename:\n filename = filename + \".dae\"\n\n vs, fs = mcubes.marching_cubes(cutout, level)\n mcubes.export_mesh(vs, fs, filename, \"ndioexport\")", "docstring": "Converts a dense annotation to a DAE, using Marching Cubes (PyMCubes).\n\nArgs:\n filename (str): The filename to write out to\n cutout (numpy.ndarray): The dense annotation\n level (int): The level at which to run mcubes\n\nReturns:\n boolean success", "source": "juraj_google_style"} -{"code": "def _add_genotype_calls(self, variant_obj, variant_line, case_obj):\n\n variant_line = variant_line.split('\\t')\n #if there is gt calls we have no individuals to add\n if len(variant_line) > 8:\n gt_format = variant_line[8].split(':')\n for individual in case_obj.individuals:\n sample_id = individual.ind_id\n index = individual.ind_index\n\n gt_call = variant_line[9+index].split(':')\n\n raw_call = dict(zip(gt_format, gt_call))\n\n genotype = Genotype(**raw_call)\n\n variant_obj.add_individual(puzzle_genotype(\n sample_id = sample_id,\n genotype = genotype.genotype,\n case_id = case_obj.name,\n phenotype = individual.phenotype,\n ref_depth = genotype.ref_depth,\n alt_depth = genotype.alt_depth,\n genotype_quality = genotype.genotype_quality,\n depth = genotype.depth_of_coverage,\n supporting_evidence = genotype.supporting_evidence,\n pe_support = genotype.pe_support,\n sr_support = genotype.sr_support,\n ))", "docstring": "Add the genotype calls for the variant\n\nArgs:\n variant_obj (puzzle.models.Variant)\n variant_dict (dict): A variant dictionary\n case_obj (puzzle.models.Case)", "source": "juraj_google_style"} -{"code": "def _CopyFromDateTimeValues(self, date_time_values):\n\n year = date_time_values.get('year', 0)\n month = date_time_values.get('month', 0)\n day_of_month = date_time_values.get('day_of_month', 0)\n hours = date_time_values.get('hours', 0)\n minutes = date_time_values.get('minutes', 0)\n seconds = date_time_values.get('seconds', 0)\n microseconds = date_time_values.get('microseconds', 0)\n\n precision_helper = precisions.PrecisionHelperFactory.CreatePrecisionHelper(\n self._precision)\n\n fraction_of_second = precision_helper.CopyMicrosecondsToFractionOfSecond(\n microseconds)\n\n self._normalized_timestamp = None\n self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n year, month, day_of_month, hours, minutes, seconds)\n self._time_elements_tuple = (\n year, month, day_of_month, hours, minutes, seconds)\n self.fraction_of_second = fraction_of_second\n self.is_local_time = False", "docstring": "Copies time elements from date and time values.\n\nArgs:\n date_time_values (dict[str, int]): date and time values, such as year,\n month, day of month, hours, minutes, seconds, microseconds.\n\nRaises:\n ValueError: if no helper can be created for the current precision.", "source": "juraj_google_style"} -{"code": "def parse_elements(elements):\n\n if not len(elements) in (14, 15):\n raise ValueError('Invalid GGA fix data')\n time = datetime.time(*[int(elements[0][i:i + 2])\n for i in range(0, 6, 2)])\n # Latitude and longitude are checked for validity during Fix\n # instantiation\n latitude = parse_latitude(elements[1], elements[2])\n longitude = parse_longitude(elements[3], elements[4])\n quality = int(elements[5])\n if not 0 <= quality <= 9:\n raise ValueError('Invalid quality value %r' % quality)\n satellites = int(elements[6])\n if not 0 <= satellites <= 12:\n raise ValueError('Invalid number of satellites %r'\n % satellites)\n dilution = float(elements[7])\n altitude = float(elements[8])\n if elements[9] == 'F':\n altitude = altitude * 3.2808399\n elif not elements[9] == 'M':\n raise ValueError('Unknown altitude unit %r' % elements[9])\n if elements[10] in ('-', ''):\n geoid_delta = False\n logging.warning('Altitude data could be incorrect, as the geoid '\n 'difference has not been provided')\n else:\n geoid_delta = float(elements[10])\n if elements[11] == 'F':\n geoid_delta = geoid_delta * 3.2808399\n elif geoid_delta and not elements[11] == 'M':\n raise ValueError('Unknown geoid delta unit %r' % elements[11])\n dgps_delta = float(elements[12]) if elements[12] else None\n dgps_station = int(elements[13]) if elements[13] else None\n mode = elements[14] if len(elements) == 15 else None\n return Fix(time, latitude, longitude, quality, satellites, dilution,\n altitude, geoid_delta, dgps_delta, dgps_station, mode)", "docstring": "Parse essential fix's data elements.\n\nArgs:\n elements (list): Data values for fix\n\nReturns:\n Fix: Fix object representing data", "source": "juraj_google_style"} -{"code": "def on_conflict(self, fields: List[Union[str, Tuple[str]]], action, index_predicate: str=None):\n\n\n self.conflict_target = fields\n self.conflict_action = action\n self.index_predicate = index_predicate\n\n return self", "docstring": "Sets the action to take when conflicts arise when attempting\n to insert/create a new row.\n\nArgs:\n fields:\n The fields the conflicts can occur in.\n\n action:\n The action to take when the conflict occurs.\n\n index_predicate:\n The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking\n conflicts)", "source": "juraj_google_style"} -{"code": "def read_hdf(cls,path,key=None):\n\n df = pd.read_hdf(path,key)\n df['scored_calls'] = df['scored_calls'].apply(lambda x: json.loads(x))\n df['channel_values'] = df['channel_values'].apply(lambda x: json.loads(x))\n df['regions'] = df['regions'].apply(lambda x: json.loads(x))\n df['phenotype_calls'] = df['phenotype_calls'].apply(lambda x: json.loads(x))\n df['neighbors'] = df['neighbors'].apply(lambda x: json.loads(x))\n df['neighbors'] = df['neighbors'].apply(lambda x:\n np.nan if not isinstance(x,dict) else dict(zip([int(y) for y in x.keys()],x.values()))\n )\n df['frame_shape'] = df['frame_shape'].apply(lambda x: tuple(json.loads(x)))\n df = cls(df)\n f = h5py.File(path,'r')\n mpp = f[key].attrs[\"microns_per_pixel\"]\n if not np.isnan(mpp): df.microns_per_pixel = mpp\n f.close()\n return df", "docstring": "Read a CellDataFrame from an hdf5 file.\n\nArgs:\n path (str): the path to read from\n key (str): the name of the location to read from\n\nReturns:\n CellDataFrame", "source": "juraj_google_style"} -{"code": "def _determine_api_url(self, platform, service, action):\n\n base_uri = settings.BASE_PAL_URL.format(platform)\n if service == \"Recurring\":\n api_version = settings.API_RECURRING_VERSION\n elif service == \"Payout\":\n api_version = settings.API_PAYOUT_VERSION\n else:\n api_version = settings.API_PAYMENT_VERSION\n return '/'.join([base_uri, service, api_version, action])", "docstring": "This returns the Adyen API endpoint based on the provided platform,\n service and action.\n\nArgs:\n platform (str): Adyen platform, ie 'live' or 'test'.\n service (str): API service to place request through.\n action (str): the API action to perform.", "source": "juraj_google_style"} -{"code": "def Decrypt(self, encrypted_data):\n\n index_split = -(len(encrypted_data) % Blowfish.block_size)\n if index_split:\n remaining_encrypted_data = encrypted_data[index_split:]\n encrypted_data = encrypted_data[:index_split]\n else:\n remaining_encrypted_data = b''\n\n decrypted_data = self._blowfish_cipher.decrypt(encrypted_data)\n\n return decrypted_data, remaining_encrypted_data", "docstring": "Decrypts the encrypted data.\n\nArgs:\n encrypted_data (bytes): encrypted data.\n\nReturns:\n tuple[bytes,bytes]: decrypted data and remaining encrypted data.", "source": "juraj_google_style"} -{"code": "def from_file(filename, file_format=\"xyz\"):\n\n mols = list(pb.readfile(str(file_format), str(filename)))\n return BabelMolAdaptor(mols[0].OBMol)", "docstring": "Uses OpenBabel to read a molecule from a file in all supported formats.\n\nArgs:\n filename: Filename of input file\n file_format: String specifying any OpenBabel supported formats.\n\nReturns:\n BabelMolAdaptor object", "source": "juraj_google_style"} -{"code": "def read(self, size=None):\n\n data = self.rfile.read(size)\n self.bytes_read += len(data)\n self._check_length()\n return data", "docstring": "Read a chunk from rfile buffer and return it.\n\nArgs:\n size (int): amount of data to read\n\nReturns:\n bytes: Chunk from rfile, limited by size if specified.", "source": "juraj_google_style"} -{"code": "def connection_made(self, transport: asyncio.transports.Transport):\n\n self._transport = transport\n self._remote_host = self._transport.get_extra_info('peername')\n self._extra = {\"client\": str(self._remote_host)}\n self.connections.add(self)\n\n self._stream_reader = asyncio.StreamReader(loop=self._loop)\n self._stream_writer = asyncio.StreamWriter(transport, self,\n self._stream_reader,\n self._loop)\n super().connection_made(transport)\n if self.timeout:\n self._timeout_handler = self._loop.call_soon(\n self.timeout_callback)\n self._handlertask = asyncio.ensure_future(self.query_handler())\n if self.debug:\n access_logger.info(\"connected\", extra=self._extra)", "docstring": "连接建立起来触发的回调函数.\n\n 用于设定一些参数,并将监听任务放入事件循环,如果设置了timeout,也会将timeout_callback放入事件循环\n\n Parameters:\n transport (asyncio.Transports): - 连接的传输对象", "source": "juraj_google_style"} -{"code": "def getWindow(title, exact=False):\n\n titles = getWindows()\n hwnd = titles.get(title, None)\n if not hwnd and not exact:\n for k, v in titles.items():\n if title in k:\n hwnd = v\n break\n if hwnd:\n return Window(hwnd)\n else:\n return None", "docstring": "Return Window object if 'title' or its part found in visible windows titles, else return None\n\n Return only 1 window found first\n\nArgs:\n title: unicode string\n exact (bool): True if search only exact match", "source": "juraj_google_style"} -{"code": "def get_tokens(self, *, payer_id, credit_card_token_id, start_date, end_date):\n\n payload = {\n \"language\": self.client.language.value,\n \"command\": PaymentCommand.GET_TOKENS.value,\n \"merchant\": {\n \"apiLogin\": self.client.api_login,\n \"apiKey\": self.client.api_key\n },\n \"creditCardTokenInformation\": {\n \"payerId\": payer_id,\n \"creditCardTokenId\": credit_card_token_id,\n \"startDate\": start_date.strftime('%Y-%m-%dT%H:%M:%S'),\n \"endDate\": end_date.strftime('%Y-%m-%dT%H:%M:%S')\n },\n \"test\": self.client.is_test\n }\n return self.client._post(self.url, json=payload)", "docstring": "With this functionality you can query previously the Credit Cards Token.\n\nArgs:\n payer_id:\n credit_card_token_id:\n start_date:\n end_date:\n\n Returns:", "source": "juraj_google_style"} -{"code": "def forward_feed(self, amount):\n\n if amount <= 255 and amount >=0:\n self.send(chr(27)+'J'+chr(amount))\n else:\n raise RuntimeError('Invalid foward feed, must be less than 255 and >= 0')", "docstring": "Calling this function finishes input of the current line, then moves the vertical\n print position forward by x/300 inch.\n\nArgs:\n amount: how far foward you want the position moved. Actual movement is calculated as\n amount/300 inches.\n\nReturns:\n None\n\nRaises:\n RuntimeError: Invalid foward feed.", "source": "juraj_google_style"} -{"code": "def add_to_buffer(self, content, read_position):\n\n self.read_position = read_position\n if self.read_buffer is None:\n self.read_buffer = content\n else:\n self.read_buffer = content + self.read_buffer", "docstring": "Add additional bytes content as read from the read_position.\n\nArgs:\n content (bytes): data to be added to buffer working BufferWorkSpac.\n read_position (int): where in the file pointer the data was read from.", "source": "juraj_google_style"} -{"code": "def SendCommand(self, command, arg=None):\n\n if arg is not None:\n if not isinstance(arg, bytes):\n arg = arg.encode('utf8')\n command = b'%s:%s' % (command, arg)\n\n self._Write(io.BytesIO(command), len(command))", "docstring": "Sends a command to the device.\n\nArgs:\n command: The command to send.\n arg: Optional argument to the command.", "source": "juraj_google_style"} -{"code": "def padding_to_length(padding):\n\n non_padding = 1.0 - padding\n return tf.to_int32(tf.reduce_sum(non_padding, axis=-1))", "docstring": "Calculate the length of mask based on padding.\n\nArgs:\n padding: a Tensor with shape [..., length].\n\nReturns:\n a Tensor with shape [...].", "source": "juraj_google_style"} -{"code": "def model(self, inputs, mode='train'):\n\n # Extract features\n training = (mode == 'train')\n with tf.variable_scope('conv1') as scope:\n conv = tf.layers.conv2d(inputs=inputs, filters=16, kernel_size=[3, 3], padding='SAME')\n bn = tf.layers.batch_normalization(inputs=conv, training=training)\n bn = tf.nn.relu(bn)\n conv = tf.layers.conv2d(inputs=bn, filters=16, kernel_size=[3, 3], padding='SAME')\n bn = tf.layers.batch_normalization(inputs=conv, training=training)\n bn = tf.nn.relu(bn)\n pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)\n\n with tf.variable_scope('conv2') as scope:\n conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')\n bn = tf.layers.batch_normalization(inputs=conv, training=training)\n bn = tf.nn.relu(bn)\n conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')\n bn = tf.layers.batch_normalization(inputs=conv, training=training)\n bn = tf.nn.relu(bn)\n pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)\n\n with tf.variable_scope('conv3') as scope:\n conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')\n bn = tf.layers.batch_normalization(inputs=conv, training=training)\n bn = tf.nn.relu(bn)\n conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')\n bn = tf.layers.batch_normalization(inputs=conv, training=training)\n bn = tf.nn.relu(bn)\n pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)\n\n # Classify\n with tf.variable_scope('fc') as scope:\n flat = tf.layers.flatten(pool)\n fc = tf.layers.dense(inputs=flat, units=32, activation=tf.nn.relu)\n softmax = tf.layers.dense(inputs=fc, units=self.num_classes, activation=tf.nn.softmax)\n\n return softmax", "docstring": "Build a simple convnet (BN before ReLU).\n\nArgs:\n inputs: a tensor of size [batch_size, height, width, channels]\n mode: string in ['train', 'test']\n\nReturns:\n the last op containing the predictions\n\nNote:\n Best score\n Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656\n Worst score\n Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874", "source": "juraj_google_style"} -{"code": "def GaussianBlur(X, ksize_width, ksize_height, sigma_x, sigma_y):\n\n return image_transform(\n X,\n cv2.GaussianBlur,\n ksize=(ksize_width, ksize_height),\n sigmaX=sigma_x,\n sigmaY=sigma_y\n )", "docstring": "Apply Gaussian blur to the given data.\n\nArgs:\n X: data to blur\n kernel_size: Gaussian kernel size\n stddev: Gaussian kernel standard deviation (in both X and Y directions)", "source": "juraj_google_style"} -{"code": "def delete_assets(self, service_id, url=None, all=False):\n\n self._services_manager.delete_assets(service_id, url, all)", "docstring": "Delete CDN assets\n\nArgs:\n service_id: The ID of the service to delete from.\n url: The URL at which to delete assets\n all: When True, delete all assets associated with the service_id.\n\n You cannot specifiy both url and all.", "source": "juraj_google_style"} -{"code": "def stepBy(self, steps):\n\n self.setValue(self.value() + steps*self.singleStep())", "docstring": "steps value up/down by a single step. Single step is defined in singleStep().\n\nArgs:\n steps (int): positiv int steps up, negativ steps down", "source": "juraj_google_style"} -{"code": "def write_temp_bird_conf(dummy_ip_prefix,\n config_file,\n variable_name,\n prefixes):\n\n log = logging.getLogger(PROGRAM_NAME)\n comment = (\"# {i} is a dummy IP Prefix. It should NOT be used and \"\n \"REMOVED from the constant.\".format(i=dummy_ip_prefix))\n\n # the temporary file must be on the same filesystem as the bird config\n # as we use os.rename to perform an atomic update on the bird config.\n # Thus, we create it in the same directory that bird config is stored.\n tm_file = os.path.join(os.path.dirname(config_file), str(time.time()))\n log.debug(\"going to write to %s\", tm_file)\n\n try:\n with open(tm_file, 'w') as tmpf:\n tmpf.write(\"# Generated {t} by {n} (pid={p})\\n\"\n .format(t=datetime.datetime.now(),\n n=PROGRAM_NAME,\n p=os.getpid()))\n tmpf.write(\"{c}\\n\".format(c=comment))\n tmpf.write(\"define {n} =\\n\".format(n=variable_name))\n tmpf.write(\"{s}[\\n\".format(s=4 * ' '))\n # all entries of the array need a trailing comma except the last\n # one. A single element array doesn't need a trailing comma.\n tmpf.write(',\\n'.join([' '*8 + n for n in prefixes]))\n tmpf.write(\"\\n{s}];\\n\".format(s=4 * ' '))\n except OSError as error:\n log.critical(\"failed to write temporary file %s: %s. This is a FATAL \"\n \"error, this exiting main program\", tm_file, error)\n sys.exit(1)\n else:\n return tm_file", "docstring": "Write in a temporary file the list of IP-Prefixes.\n\n A failure to create and write the temporary file will exit main program.\n\nArgs:\n dummy_ip_prefix (str): The dummy IP prefix, which must be always\n config_file (str): The file name of bird configuration\n variable_name (str): The name of the variable set in bird configuration\n prefixes (list): The list of IP-Prefixes to write\n\nReturns:\n The filename of the temporary file", "source": "juraj_google_style"} -{"code": "def b_fit_score(self, x, y):\n\n x = np.reshape(minmax_scale(x), (-1, 1))\n y = np.reshape(minmax_scale(y), (-1, 1))\n poly = PolynomialFeatures(degree=self.degree)\n poly_x = poly.fit_transform(x)\n\n poly_x[:,1] = 0\n poly_x[:,2] = 0\n\n regressor = LinearRegression()\n regressor.fit(poly_x, y)\n\n y_predict = regressor.predict(poly_x)\n error = mean_squared_error(y_predict, y)\n\n return error", "docstring": "Compute the RECI fit score\n\nArgs:\n x (numpy.ndarray): Variable 1\n y (numpy.ndarray): Variable 2\n\nReturns:\n float: RECI fit score", "source": "juraj_google_style"} -{"code": "def _convert_pandas_csv_options(pandas_options, columns):\n\n\n _columns = pandas_options.pop('names', columns)\n header = pandas_options.pop('header', None)\n pandas_options.pop('encoding', None)\n\n if header == 'infer':\n header_line_number = 0 if not bool(_columns) else None\n else:\n header_line_number = header\n\n return _columns, header_line_number", "docstring": "Translate `pd.read_csv()` options into `pd.DataFrame()` especially for header.\n\nArgs:\n pandas_option (dict):\n pandas options like {'header': None}.\n columns (list):\n list of column name.", "source": "juraj_google_style"} -{"code": "def impersonate(self, user, enterprise):\n\n\n if not user or not enterprise:\n raise ValueError('You must set a user name and an enterprise name to begin impersonification')\n\n self._is_impersonating = True\n self._impersonation = \"%s@%s\" % (user, enterprise)", "docstring": "Impersonate a user in a enterprise\n\nArgs:\n user: the name of the user to impersonate\n enterprise: the name of the enterprise where to use impersonation", "source": "juraj_google_style"} -{"code": "def __init__(self, source_urn=None, token=None):\n\n super(InstantOutputPlugin, self).__init__()\n\n if not source_urn:\n raise ValueError(\"source_urn can't be empty.\")\n\n if not token:\n raise ValueError(\"token can't be empty.\")\n\n self.source_urn = source_urn\n self.token = token", "docstring": "OutputPlugin constructor.\n\nArgs:\n source_urn: URN identifying source of the data (hunt or flow).\n token: Security token.\n\nRaises:\n ValueError: If one of the keyword arguments is empty.", "source": "juraj_google_style"} -{"code": "def enable_store_parameters_in_results(kernel):\n\n kernel_stack = []\n while hasattr(kernel, 'parameters') and 'inner_kernel' in kernel.parameters:\n kernel_stack.append(kernel)\n kernel = kernel.parameters['inner_kernel']\n\n def _recreate_kernel(kernel, parameters):\n new_parameters = kernel.parameters.copy()\n new_parameters.update(parameters)\n if 'store_parameters_in_results' in new_parameters:\n new_parameters['store_parameters_in_results'] = True\n with deprecation.silence():\n return type(kernel)(**new_parameters)\n\n if hasattr(kernel, 'parameters'):\n kernel = _recreate_kernel(kernel, {})\n\n for outer_kernel in reversed(kernel_stack):\n outer_kernel = _recreate_kernel(outer_kernel, {'inner_kernel': kernel})\n kernel = outer_kernel\n\n return kernel", "docstring": "Enables the `store_parameters_in_results` parameter in a chain of kernels.\n\n This is a temporary utility for use during the transition period of the\n parameter storage methods.\n\nArgs:\n kernel: A TransitionKernel.\n\nReturns:\n kernel: The same kernel, but recreated with `store_parameters_in_results`\n recursively set to `True` in its parameters and its inner kernels (as\n appropriate).", "source": "juraj_google_style"} -{"code": "def result_to_dict(raw_result):\n\n\n result = {}\n\n for channel_index, channel in enumerate(raw_result):\n channel_id, channel_name = channel[0], channel[1]\n channel_result = {\n 'id': channel_id,\n 'name': channel_name,\n 'movies': []\n }\n for movie in channel[2]:\n channel_result['movies'].append({\n 'title': movie[1],\n 'start_time': datetime.fromtimestamp(movie[2]),\n 'end_time': datetime.fromtimestamp(movie[2] + movie[3]),\n 'inf': True if movie[3] else False,\n })\n result[channel_id] = channel_result\n\n return result", "docstring": "Parse raw result from fetcher into readable dictionary\n\nArgs:\n raw_result (dict) - raw data from `fetcher`\n\nReturns:\n dict - readable dictionary", "source": "juraj_google_style"} -{"code": "def add_document(self, key, url, **kwargs):\n\n document = self._check_metadata_for_file(key=key, url=url, **kwargs)\n\n for dict_key in (\n 'description',\n 'fulltext',\n 'hidden',\n 'material',\n 'original_url',\n 'url',\n 'filename',\n ):\n if kwargs.get(dict_key):\n document[dict_key] = kwargs[dict_key]\n\n if key_already_there(document, self.record.get('documents', ())):\n raise ValueError(\n 'There\\'s already a document with the key %s.'\n % document['key']\n )\n\n self._append_to('documents', document)", "docstring": "Adds document to record\n\nArgs:\n key (string): document key\n url (string): document url\n Keyword Args:\n description (string): simple description\n fulltext (bool): mark if this is a full text\n hidden (bool): is document should be hidden\n material (string):\n original_url (string): original url\n filename (string): current url\n\n\n Returns: None", "source": "juraj_google_style"} -{"code": "def ListAttrs(cls):\n\n precondition.AssertType(cls, type)\n\n if PY2:\n # TODO(user): once https://github.com/google/pytype/issues/127 is fixed,\n # pytype should be able to tell that this line is unreachable in py3.\n return [item.decode(\"ascii\") for item in dir(cls)] # pytype: disable=attribute-error\n else:\n return dir(cls)", "docstring": "A compatibility wrapper for listing class attributes.\n\n This method solves similar Python 2 compatibility issues for `dir` function as\n `GetName` does for `__name__` invocations. See documentation for `GetName` for\n more details.\n\n Once support for Python 2 is dropped all invocations of this function should\n be replaced with ordinary `dir` calls.\n\nArgs:\n cls: A class object to list the attributes for.\n\nReturns:\n A list of attribute names as unicode strings.", "source": "juraj_google_style"} -{"code": "def changeTo(self, path):\n\n dictionary = DictSingle(Pair('PATH', StringSingle(path)))\n self.value = [dictionary]", "docstring": "change value\n\nArgs:\n path (str): the new environment path", "source": "juraj_google_style"} -{"code": "def _is_autocomplete_valid(cur_commands, alias_command):\n\n parent_command = ' '.join(cur_commands[1:])\n with open(GLOBAL_ALIAS_TAB_COMP_TABLE_PATH, 'r') as tab_completion_table_file:\n try:\n tab_completion_table = json.loads(tab_completion_table_file.read())\n return alias_command in tab_completion_table and parent_command in tab_completion_table[alias_command]\n except Exception: # pylint: disable=broad-except\n return False", "docstring": "Determine whether autocomplete can be performed at the current state.\n\nArgs:\n parser: The current CLI parser.\n cur_commands: The current commands typed in the console.\n alias_command: The alias command.\n\nReturns:\n True if autocomplete can be performed.", "source": "juraj_google_style"} -{"code": "def __init__(self, entities=[], metadata={}):\n\n self.entities = entities\n self.metadata = metadata", "docstring": "Initialize ContextManagerFrame\n\nArgs:\n entities(list): List of Entities...\n metadata(object): metadata to describe context?", "source": "juraj_google_style"} -{"code": "def _PrintAnalysisReportsDetails(self, storage_reader):\n\n if not storage_reader.HasAnalysisReports():\n self._output_writer.Write('No analysis reports stored.\\n\\n')\n return\n\n for index, analysis_report in enumerate(\n storage_reader.GetAnalysisReports()):\n title = 'Analysis report: {0:d}'.format(index)\n table_view = views.ViewsFactory.GetTableView(\n self._views_format_type, title=title)\n\n table_view.AddRow(['String', analysis_report.GetString()])\n\n table_view.Write(self._output_writer)", "docstring": "Prints the details of the analysis reports.\n\nArgs:\n storage_reader (StorageReader): storage reader.", "source": "juraj_google_style"} -{"code": "def get_destination(self, filepath, targetdir=None):\n\n dst = self.change_extension(filepath, 'css')\n if targetdir:\n dst = os.path.join(targetdir, dst)\n return dst", "docstring": "Return destination path from given source file path.\n\n Destination is allways a file with extension ``.css``.\n\nArgs:\n filepath (str): A file path. The path is allways relative to\n sources directory. If not relative, ``targetdir`` won't be\n joined.\n absolute (bool): If given will be added at beginning of file\n path.\n\nReturns:\n str: Destination filepath.", "source": "juraj_google_style"} -{"code": "def schedule_saved(sender, instance, **kwargs):\n\n from contentstore.tasks import sync_schedule\n\n sync_schedule.delay(str(instance.id))", "docstring": "Fires off the celery task to ensure that this schedule is in the scheduler\n\nArgs:\n sender {class} -- The model class, always Schedule\n instance {Schedule} --\n The instance of the Schedule that we want to sync", "source": "juraj_google_style"} -{"code": "def reverse_transform(self, col):\n\n output = pd.DataFrame()\n new_name = '?' + self.col_name\n\n col.loc[col[new_name] == 0, self.col_name] = np.nan\n output[self.col_name] = col[self.col_name]\n return output", "docstring": "Converts data back into original format.\n\nArgs:\n col(pandas.DataFrame): Data to transform.\n\nReturns:\n pandas.DataFrame", "source": "juraj_google_style"} -{"code": "def dump_in_memory_result(self, result, output_path):\n\n file_count = 0\n logger.debug(\"Dumping in-memory processing results to output folder: %s\", output_path)\n for k, v in iteritems(result):\n cur_output_path = os.path.join(output_path, k)\n\n if isinstance(v, dict):\n file_count += self.dump_in_memory_result(v, cur_output_path)\n else:\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n\n filename = os.path.join(output_path, k)\n logger.debug(\"Writing output file: %s\", filename)\n # dump the contents of the file\n with open(filename, 'wt', encoding=self.config.encoding) as f:\n f.write(v)\n\n file_count += 1\n\n return file_count", "docstring": "Recursively dumps the result of our processing into files within the\n given output path.\n\nArgs:\n result: The in-memory result of our processing.\n output_path: Full path to the folder into which to dump the files.\n\nReturns:\n The number of files generated (integer).", "source": "juraj_google_style"} -{"code": "def defocus_blur(x, severity=1):\n\n c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]\n x = np.array(x) / 255.\n kernel = disk(radius=c[0], alias_blur=c[1])\n channels = []\n for d in range(3):\n channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel))\n channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3\n x_clip = np.clip(channels, 0, 1) * 255\n return around_and_astype(x_clip)", "docstring": "Defocus blurring to images.\n\n Apply defocus blurring to images using Gaussian kernel.\n\nArgs:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\nReturns:\n numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.", "source": "juraj_google_style"} -{"code": "def switch_to_window(self, window_name):\n\n data = {\n 'name': window_name\n }\n self._execute(Command.SWITCH_TO_WINDOW, data)", "docstring": "Switch to the given window.\n\n Support:\n Web(WebView)\n\nArgs:\n window_name(str): The window to change focus to.\n\nReturns:\n WebDriver Object.", "source": "juraj_google_style"} -{"code": "def __setitem__(self, complete_selector, value):\n\n if not SELECTOR_RE.match(complete_selector):\n raise ValueError(\"Invalid selector '{}'.\".format(complete_selector))\n\n selector_components = complete_selector.split('.')\n node = self._selector_tree\n\n # Iterate backwards over the components of the selector.\n for component in selector_components[::-1]:\n node = node.setdefault(component, {})\n node[_TERMINAL_KEY] = complete_selector\n self._selector_map[complete_selector] = value", "docstring": "Associates a value with `complete_selector`.\n\n This function also performs some additional bookkeeping to facilitate\n partial matching of selectors.\n\nArgs:\n complete_selector: The (complete) selector to associate a value with.\n value: The value to associate.\n\nRaises:\n ValueError: If `complete_selector` isn't a string consisting of valid\n Python identifiers separated by periods.", "source": "juraj_google_style"} -{"code": "def sils(T,f,c,d,h):\n\n model = Model(\"single item lotsizing\")\n Ts = range(1,T+1)\n M = sum(d[t] for t in Ts)\n y,x,I = {},{},{}\n for t in Ts:\n y[t] = model.addVar(vtype=\"I\", ub=1, name=\"y(%s)\"%t)\n x[t] = model.addVar(vtype=\"C\", ub=M, name=\"x(%s)\"%t)\n I[t] = model.addVar(vtype=\"C\", name=\"I(%s)\"%t)\n I[0] = 0\n\n for t in Ts:\n model.addCons(x[t] <= M*y[t], \"ConstrUB(%s)\"%t)\n model.addCons(I[t-1] + x[t] == I[t] + d[t], \"FlowCons(%s)\"%t)\n\n model.setObjective(\\\n quicksum(f[t]*y[t] + c[t]*x[t] + h[t]*I[t] for t in Ts),\\\n \"minimize\")\n\n model.data = y,x,I\n return model", "docstring": "sils -- LP lotsizing for the single item lot sizing problem\n Parameters:\n - T: number of periods\n - P: set of products\n - f[t]: set-up costs (on period t)\n - c[t]: variable costs\n - d[t]: demand values\n - h[t]: holding costs\n Returns a model, ready to be solved.", "source": "juraj_google_style"} -{"code": "def retry(exceptions, tries=5, delay=1, backoff=2, logger=None):\n\n\n def deco_retry(func):\n @wraps(func)\n async def f_retry(self, *args, **kwargs):\n if not iscoroutine(func):\n f = coroutine(func)\n else:\n f = func\n\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return await f(self, *args, **kwargs)\n except exceptions:\n if logger:\n logger.info('Retrying %s after %s seconds', f.__name__, mdelay)\n sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return await f(self, *args, **kwargs)\n\n return f_retry\n\n return deco_retry", "docstring": "Retry calling the decorated function using an exponential backoff.\n\nArgs:\n exceptions: The exception to check. may be a tuple of\n exceptions to check.\n tries: Number of times to try (not retry) before giving up.\n delay: Initial delay between retries in seconds.\n backoff: Backoff multiplier (e.g. value of 2 will double the delay\n each retry).\n logger: Logger to use. If None, print.", "source": "juraj_google_style"} -{"code": "def _extract_dir(self, dir_not_exists, output):\n\n if not dir_not_exists:\n lst = output.dir_cache\n return {i[\"relpath\"]: i[\"md5\"] for i in lst}\n return {}", "docstring": "Extract the content of dvc tree file\n\nArgs:\n self(object) - Repo class instance\n dir_not_exists(bool) - flag for directory existence\n output(object) - OutputLOCAL class instance\n\nReturns:\n dict - dictionary with keys - paths to file in .dvc/cache\n values -checksums for that files", "source": "juraj_google_style"} -{"code": "def __openAndInitCSVFile(self, modelInfo):\n\n # Get the base path and figure out the path of the report file.\n basePath = self.__outputDirAbsPath\n\n # Form the name of the output csv file that will contain all the results\n reportCSVName = \"%s_Report.csv\" % (self.__outputLabel,)\n reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)\n\n # If a report CSV file already exists, back it up\n backupCSVPath = None\n if os.path.exists(reportCSVPath):\n backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)\n\n\n # Open report file\n if self.__replaceReport:\n mode = \"w\"\n else:\n mode = \"a\"\n csv = self.__csvFileObj = open(reportCSVPath, mode)\n\n # If we are appending, add some blank line separators\n if not self.__replaceReport and backupCSVPath:\n print >> csv\n print >> csv\n\n # Print the column names\n print >> csv, \"jobID, \",\n print >> csv, \"modelID, \",\n print >> csv, \"status, \" ,\n print >> csv, \"completionReason, \",\n print >> csv, \"startTime, \",\n print >> csv, \"endTime, \",\n print >> csv, \"runtime(s), \" ,\n print >> csv, \"expDesc, \",\n print >> csv, \"numRecords, \",\n\n for key in self.__sortedVariableNames:\n print >> csv, \"%s, \" % key,\n for key in self.__sortedMetricsKeys:\n print >> csv, \"%s, \" % key,\n print >> csv", "docstring": "- Backs up old report csv file;\n - opens the report csv file in append or overwrite mode (per\n self.__replaceReport);\n - emits column fields;\n - sets up self.__sortedVariableNames, self.__csvFileObj,\n self.__backupCSVPath, and self.__reportCSVPath\n\n Parameters:\n ----------------------------------------------------------------------\n modelInfo: First _NupicModelInfo instance passed to emit()\n retval: nothing", "source": "juraj_google_style"} -{"code": "def save_config(config, logdir=None):\n\n if logdir:\n with config.unlocked:\n config.logdir = logdir\n message = 'Start a new run and write summaries and checkpoints to {}.'\n tf.logging.info(message.format(config.logdir))\n tf.gfile.MakeDirs(config.logdir)\n config_path = os.path.join(config.logdir, 'config.yaml')\n with tf.gfile.FastGFile(config_path, 'w') as file_:\n yaml.dump(config, file_, default_flow_style=False)\n else:\n message = (\n 'Start a new run without storing summaries and checkpoints since no '\n 'logging directory was specified.')\n tf.logging.info(message)\n return config", "docstring": "Save a new configuration by name.\n\n If a logging directory is specified, is will be created and the configuration\n will be stored there. Otherwise, a log message will be printed.\n\nArgs:\n config: Configuration object.\n logdir: Location for writing summaries and checkpoints if specified.\n\nReturns:\n Configuration object.", "source": "juraj_google_style"} -{"code": "def filter_prop_defs(prop_defs, hierarchy, cls_names):\n\n\n def _is_valid(test_list, valid_list):\n\n\n for test in test_list:\n if test in valid_list:\n return True\n return False\n\n new_dict = {}\n valid_classes = [Uri('kdr_AllClasses')] + cls_names + hierarchy\n for def_name, value in prop_defs.items():\n new_dict[def_name] = []\n empty_def = []\n try:\n for item in value:\n if item.get('kds_appliesToClass'):\n if _is_valid(item['kds_appliesToClass'], valid_classes):\n new_dict[def_name].append(item)\n else:\n empty_def.append(item)\n if not new_dict[def_name]:\n new_dict[def_name] = empty_def\n except AttributeError:\n new_dict[def_name] = value\n return new_dict", "docstring": "Reads through the prop_defs and returns a dictionary filtered by the\n current class\n\nArgs:\n prop_defs: the defintions from the rdf vocabulary defintion\n cls_object: the class object to tie the property\n cls_names: the name of the classes", "source": "juraj_google_style"} -{"code": "def itersplit_to_fields(str_,\n fsep=DEFAULT_FSEP,\n revtuple=None,\n fields=[],\n preparse=None):\n\n if preparse:\n str_ = preparse(str_)\n _fields = itersplit(str_, fsep)\n\n if revtuple is not None:\n try:\n values = (t[1] for t in izip_longest(revtuple._fields, _fields))\n return revtuple(*values)\n except Exception as e:\n log.error(revtuple)\n log.error(_fields)\n log.exception(e)\n raise\n\n return tuple(izip_longest(fields, _fields, fillvalue=None))", "docstring": "Itersplit a string into a (named, if specified) tuple.\n\nArgs:\n str_ (str): string to split\n fsep (str): field separator (delimiter to split by)\n revtuple (object): namedtuple (or class with a ``._fields`` attr)\n (optional)\n fields (list of str): field names (if revtuple is not specified)\n preparse (callable): function to parse str with before itersplitting\n\nReturns:\n tuple or revtuple: fields as a tuple or revtuple, if specified", "source": "juraj_google_style"} -{"code": "def create_rank_dicts(self, alt_scores):\n\n self.alts_to_ranks = dict()\n cur_score = max(alt_scores.values())\n cur_rank = 0\n self.ranks_to_alts = {cur_rank:[]}\n for i in sorted(alt_scores.keys(), key=lambda x: -alt_scores[x]):\n if alt_scores[i] == cur_score:\n self.ranks_to_alts[cur_rank].append(i)\n elif alt_scores[i] < cur_score:\n cur_rank += 1\n cur_score = alt_scores[i]\n self.ranks_to_alts[cur_rank] = [i]\n self.alts_to_ranks[i] = cur_rank", "docstring": "Description:\n Takes in the scores of the alternatives in the form alt:score and\n generates the dictionaries mapping alternatives to rankings and\n rankings to alternatives.\n Parameters:\n alt_scores: dictionary of the scores of every alternative", "source": "juraj_google_style"} -{"code": "def update_model_in_repo_based_on_filename(self, model):\n\n if model._tx_filename is None:\n for fn in self.all_models.filename_to_model:\n if self.all_models.filename_to_model[fn] == model:\n return fn\n i = 0\n while self.all_models.has_model(\"anonymous{}\".format(i)):\n i += 1\n myfilename = \"anonymous{}\".format(i)\n self.all_models.filename_to_model[myfilename] = model\n else:\n myfilename = model._tx_filename\n if (not self.all_models.has_model(myfilename)):\n self.all_models.filename_to_model[myfilename] = model\n return myfilename", "docstring": "Adds a model to the repo (not initially visible)\n\nArgs:\n model: the model to be added. If the model\n has no filename, a name is invented\n Returns: the filename of the model added to the repo", "source": "juraj_google_style"} -{"code": "def __validate(self, value, validate_element):\n\n if not self.repeated:\n return validate_element(value)\n else:\n # Must be a list or tuple, may not be a string.\n if isinstance(value, (list, tuple)):\n result = []\n for element in value:\n if element is None:\n try:\n name = self.name\n except AttributeError:\n raise ValidationError(\n 'Repeated values for %s '\n 'may not be None' % self.__class__.__name__)\n else:\n raise ValidationError(\n 'Repeated values for field %s '\n 'may not be None' % name)\n result.append(validate_element(element))\n return result\n elif value is not None:\n try:\n name = self.name\n except AttributeError:\n raise ValidationError('%s is repeated. Found: %s' % (\n self.__class__.__name__, value))\n else:\n raise ValidationError(\n 'Field %s is repeated. Found: %s' % (name, value))\n return value", "docstring": "Internal validation function.\n\n Validate an internal value using a function to validate\n individual elements.\n\nArgs:\n value: Value to validate.\n validate_element: Function to use to validate individual elements.\n\nRaises:\n ValidationError if value is not expected type.", "source": "juraj_google_style"} -{"code": "def layout(self, dimensions=None, **kwargs):\n\n return self.groupby(dimensions, container_type=NdLayout, **kwargs)", "docstring": "Groups data by supplied dimension(s) laying the groups along\n the dimension(s) out in a NdLayout.\n\nArgs:\n dimensions: Dimension/str or list\n Dimension or list of dimensions to group by\n\nReturns:\n layout: NdLayout\n NdLayout with supplied dimensions", "source": "juraj_google_style"} -{"code": "def try_to_create_directory(directory_path):\n\n logger = logging.getLogger(\"ray\")\n directory_path = os.path.expanduser(directory_path)\n if not os.path.exists(directory_path):\n try:\n os.makedirs(directory_path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e\n logger.warning(\n \"Attempted to create '{}', but the directory already \"\n \"exists.\".format(directory_path))\n # Change the log directory permissions so others can use it. This is\n # important when multiple people are using the same machine.\n try:\n os.chmod(directory_path, 0o0777)\n except OSError as e:\n # Silently suppress the PermissionError that is thrown by the chmod.\n # This is done because the user attempting to change the permissions\n # on a directory may not own it. The chmod is attempted whether the\n # directory is new or not to avoid race conditions.\n # ray-project/ray/#3591\n if e.errno in [errno.EACCES, errno.EPERM]:\n pass\n else:\n raise", "docstring": "Attempt to create a directory that is globally readable/writable.\n\nArgs:\n directory_path: The path of the directory to create.", "source": "juraj_google_style"} -{"code": "def _add_consequences(self, variant_obj):\n\n\n consequences = set()\n for transcript in variant_obj.transcripts:\n for consequence in transcript.consequence.split('&'):\n consequences.add(consequence)\n\n variant_obj.consequences = list(consequences)", "docstring": "Add the consequences found in all transcripts\n\nArgs:\n variant_obj (puzzle.models.Variant)", "source": "juraj_google_style"} -{"code": "def put_many(self, type: Type[T], items: Iterable[T]) -> None:\n\n LOGGER.info(\"Getting SinkHandlers for \\\"{type}\\\"\".format(type=type.__name__))\n try:\n handlers = self._put_types[type]\n except KeyError:\n try:\n LOGGER.info(\"Building new SinkHandlers for \\\"{type}\\\"\".format(type=type.__name__))\n handlers = self._put_handlers(type)\n except NoConversionError:\n handlers = None\n self._get_types[type] = handlers\n\n LOGGER.info(\"Creating new PipelineContext\")\n context = self._new_context()\n\n LOGGER.info(\"Sending items \\\"{items}\\\" to SourceHandlers\".format(items=items))\n if handlers is not None:\n items = list(items)\n for handler in handlers:\n handler.put_many(items, context)", "docstring": "Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary.\n\nArgs:\n items: An iterable (e.g. list) of objects to be inserted into the data pipeline.", "source": "juraj_google_style"} -{"code": "def __init__(self, chgcar):\n\n self.chgcar = chgcar\n self.structure = chgcar.structure\n self.extrema_coords = [] # list of frac_coords of local extrema\n self.extrema_type = None # \"local maxima\" or \"local minima\"\n self._extrema_df = None # extrema frac_coords - chg density table\n self._charge_distribution_df = None", "docstring": "Initialization.\n\nArgs:\n chgcar (pmg.Chgcar): input Chgcar object.", "source": "juraj_google_style"} -{"code": "def query_band(self, value):\n\n\n self._query_band = value\n # If query band is None we remove the element and don't write it to XML\n if value is None:\n try:\n del self._connectionXML.attrib['query-band-spec']\n except KeyError:\n pass\n else:\n self._connectionXML.set('query-band-spec', value)", "docstring": "Set the connection's query_band property.\n\nArgs:\n value: New query_band value. String.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def new_result(self, loss, budget, parameters, update_model=True):\n\n if loss is None:\n # One could skip crashed results, but we decided\n # assign a +inf loss and count them as bad configurations\n loss = np.inf\n\n if budget not in self.configs.keys():\n self.configs[budget] = []\n self.losses[budget] = []\n\n # skip model building if we already have a bigger model\n if max(list(self.kde_models.keys()) + [-np.inf]) > budget:\n return\n\n # We want to get a numerical representation of the configuration in the original space\n conf = ConfigSpace.Configuration(self.configspace, parameters)\n self.configs[budget].append(conf.get_array())\n self.losses[budget].append(loss)\n\n # skip model building:\n # a) if not enough points are available\n if len(self.configs[budget]) <= self.min_points_in_model - 1:\n logger.debug(\"Only %i run(s) for budget %f available, need more than %s \\\n -> can't build model!\"%(len(self.configs[budget]), budget, self.min_points_in_model+1))\n return\n # b) during warnm starting when we feed previous results in and only update once\n if not update_model:\n return\n\n train_configs = np.array(self.configs[budget])\n train_losses = np.array(self.losses[budget])\n\n n_good = max(self.min_points_in_model, (self.top_n_percent * train_configs.shape[0])//100)\n n_bad = max(self.min_points_in_model, ((100-self.top_n_percent)*train_configs.shape[0])//100)\n\n # Refit KDE for the current budget\n idx = np.argsort(train_losses)\n\n train_data_good = self.impute_conditional_data(train_configs[idx[:n_good]])\n train_data_bad = self.impute_conditional_data(train_configs[idx[n_good:n_good+n_bad]])\n\n if train_data_good.shape[0] <= train_data_good.shape[1]:\n return\n if train_data_bad.shape[0] <= train_data_bad.shape[1]:\n return\n\n #more expensive crossvalidation method\n #bw_estimation = 'cv_ls'\n # quick rule of thumb\n bw_estimation = 'normal_reference'\n\n bad_kde = sm.nonparametric.KDEMultivariate(data=train_data_bad, var_type=self.kde_vartypes, bw=bw_estimation)\n good_kde = sm.nonparametric.KDEMultivariate(data=train_data_good, var_type=self.kde_vartypes, bw=bw_estimation)\n\n bad_kde.bw = np.clip(bad_kde.bw, self.min_bandwidth, None)\n good_kde.bw = np.clip(good_kde.bw, self.min_bandwidth, None)\n\n self.kde_models[budget] = {\n 'good': good_kde,\n 'bad' : bad_kde\n }\n\n # update probs for the categorical parameters for later sampling\n logger.debug('done building a new model for budget %f based on %i/%i split\\nBest loss for this budget:%f\\n'\n %(budget, n_good, n_bad, np.min(train_losses)))", "docstring": "Function to register finished runs. Every time a run has finished, this function should be called\n to register it with the loss.\n\n Parameters:\n -----------\n loss: float\n the loss of the parameters\n budget: float\n the budget of the parameters\n parameters: dict\n the parameters of this trial\n update_model: bool\n whether use this parameter to update BP model\n\n Returns\n -------\n None", "source": "juraj_google_style"} -{"code": "def _set_unique_id(self, json_response):\n\n self.unique_id = (\n json_response.get('md5')\n or json_response.get('sha1')\n or json_response.get('sha256')\n or ''\n )", "docstring": "Sets the unique_id provided a json response.\n\nArgs:\n json_response:", "source": "juraj_google_style"} -{"code": "def from_string(cls, string_input):\n\n\n correlation_grid = {}\n Exc_DFT_option = {}\n COHSEX_options = {}\n GW_options = {}\n BSE_TDDFT_options = {}\n\n lines = string_input.strip().split(\"\\n\")\n\n # number of atoms and species\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n nat = toks[0]\n nsp = toks[1]\n # number of valence bands\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n nvbands = toks[0]\n\n # correlation_grid\n # number of points and spacing in eV for correlation grid\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n correlation_grid['n_grid'] = toks[0]\n correlation_grid['dE_grid'] = toks[1]\n\n # Exc DFT\n # relire=1 ou recalculer=0 Exc DFT\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n Exc_DFT_option['rdVxcpsi'] = toks[0]\n\n # COHSEX\n # number of COHSEX corrected occp and unoccp bands: C=COHSEX H=HF\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n COHSEX_options['nv_cohsex'] = toks[0]\n COHSEX_options['nc_cohsex'] = toks[1]\n COHSEX_options['eigMethod'] = toks[2]\n # number of COHSEX iter, scf on wfns, mixing coeff; V=RI-V I=RI-D\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n COHSEX_options['nit_cohsex'] = toks[0]\n COHSEX_options['resMethod'] = toks[1]\n COHSEX_options['scf_cohsex_wf'] = toks[2]\n COHSEX_options['mix_cohsex'] = toks[3]\n\n # GW\n # number of GW corrected occp and unoccp bands\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n GW_options['nv_corr'] = toks[0]\n GW_options['nc_corr'] = toks[1]\n # number of GW iterations\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n GW_options['nit_gw'] = toks[0]\n\n # BSE\n # dumping for BSE and TDDFT\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n BSE_TDDFT_options['do_bse'] = toks[0]\n BSE_TDDFT_options['do_tddft'] = toks[1]\n # number of occp. and virtual bands fo BSE: nocore and up to 40 eVs\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n BSE_TDDFT_options['nv_bse'] = toks[0]\n BSE_TDDFT_options['nc_bse'] = toks[1]\n # number of excitations needed and number of iterations\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n BSE_TDDFT_options['npsi_bse'] = toks[0]\n BSE_TDDFT_options['nit_bse'] = toks[1]\n\n # Molecule\n # list of symbols in order\n lines.pop(0)\n atname = []\n i = int(nsp)\n while i != 0:\n l = lines.pop(0).strip()\n toks = l.split()\n atname.append(toks[0])\n i -= 1\n\n # scaling factor\n lines.pop(0)\n l = lines.pop(0).strip()\n toks = l.split()\n scale = toks[0]\n # atoms x,y,z cartesian .. will be multiplied by scale\n lines.pop(0)\n # Parse geometry\n species = []\n coords = []\n i = int(nat)\n while i != 0:\n l = lines.pop(0).strip()\n toks = l.split()\n coords.append([float(j) for j in toks[0:3]])\n species.append(atname[int(toks[3]) - 1])\n i -= 1\n\n mol = Molecule(species, coords)\n\n return FiestaInput(mol=mol, correlation_grid=correlation_grid,\n Exc_DFT_option=Exc_DFT_option,\n COHSEX_options=COHSEX_options,\n GW_options=GW_options,\n BSE_TDDFT_options=BSE_TDDFT_options)", "docstring": "Read an FiestaInput from a string. Currently tested to work with\n files generated from this class itself.\n\nArgs:\n string_input: string_input to parse.\n\nReturns:\n FiestaInput object", "source": "juraj_google_style"} -{"code": "def __init__(self, connect_func, max_size=10):\n\n self.connect_func = connect_func\n self.limiter = threading.BoundedSemaphore(max_size)\n self.idle_conns = [] # Atomic access only!!\n self.closed = False", "docstring": "Creates a ConnectionPool.\n\nArgs:\n connect_func: A closure which returns a new connection to the underlying\n database, i.e. a MySQLdb.Connection. Should raise or block if the\n database is unavailable.\n max_size: The maximum number of simultaneous connections.", "source": "juraj_google_style"} -{"code": "def __add__(self, other):\n\n if not all(np.equal(self.energies, other.energies)):\n raise ValueError(\"Energies of both COHP are not compatible.\")\n populations = {spin: self.populations[spin] + other.populations[spin]\n for spin in self.cohp}\n if self.icohp is not None and other.icohp is not None:\n int_pop = {spin: self.icohp[spin] + other.icohp[spin]\n for spin in self.icohp}\n else:\n if self.icohp is not None or other.icohp is not None:\n warnings.warn(\"One of the COHP objects does not contain \"\n \"ICOHPs. Setting ICOHP to None.\")\n int_pop = None\n return Cohp(self.efermi, self.energies, populations, icohp=int_pop)", "docstring": "Adds two COHP together. Checks that energy scales are the same.\n Otherwise, it raises a ValueError. It also adds ICOHP if present.\n If ICOHP is only present in one object, it displays a warning and\n will not add ICOHP.\n\nArgs:\n other: Another COHP object.\n\nReturns:\n Sum of the two COHPs as a COHP object.", "source": "juraj_google_style"} -{"code": "def GetAllPluginInformation(cls, show_all=True):\n\n results = []\n for plugin_class in iter(cls._plugin_classes.values()):\n plugin_object = plugin_class()\n if not show_all and not plugin_class.ENABLE_IN_EXTRACTION:\n continue\n\n # TODO: Use a specific description variable, not the docstring.\n doc_string, _, _ = plugin_class.__doc__.partition('\\n')\n type_string = cls._PLUGIN_TYPE_STRINGS.get(plugin_object.plugin_type)\n information_tuple = (plugin_object.plugin_name, doc_string, type_string)\n results.append(information_tuple)\n\n return sorted(results)", "docstring": "Retrieves a list of the registered analysis plugins.\n\nArgs:\n show_all (Optional[bool]): True if all analysis plugin names should\n be listed.\n\nReturns:\n list[tuple[str, str, str]]: the name, docstring and type string of each\n analysis plugin in alphabetical order.", "source": "juraj_google_style"} -{"code": "def select(self, condition):\n\n return self.__class__([p for p in self if condition(p)])", "docstring": "Select only those pseudopotentials for which condition is True.\n Return new class:`PseudoTable` object.\n\nArgs:\n condition:\n Function that accepts a :class:`Pseudo` object and returns True or False.", "source": "juraj_google_style"} -{"code": "def contains(self, value, equality_comparer=operator.eq):\n\n if self.closed():\n raise ValueError(\"Attempt to call contains() on a \"\n \"closed Queryable.\")\n\n if not is_callable(equality_comparer):\n raise TypeError(\"contains() parameter equality_comparer={0} is \"\n \"not callable\".format(repr(equality_comparer)))\n\n if equality_comparer is operator.eq:\n return value in self._iterable\n\n for item in self:\n if equality_comparer(value, item):\n return True\n return False", "docstring": "Determines whether the sequence contains a particular value.\n\n Execution is immediate. Depending on the type of the sequence, all or\n none of the sequence may be consumed by this operation.\n\n Note: This method uses immediate execution.\n\nArgs:\n value: The value to test for membership of the sequence\n\nReturns:\n True if value is in the sequence, otherwise False.\n\nRaises:\n ValueError: If the Queryable has been closed.", "source": "juraj_google_style"} -{"code": "def match(self, set_a, set_b):\n\n track_step_matches = [[] * len(set_a)]\n\n costs = self.cost_matrix(set_a, set_b)\n valid_costs = np.all(costs < 1, axis=2)\n set_a_matches, set_b_matches = np.where(valid_costs)\n s = 0\n track_pairings = pd.DataFrame(index=np.arange(costs.shape[0]),\n columns=[\"Track\", \"Step\", \"Time\", \"Matched\", \"Pairings\"], dtype=object)\n set_b_info = []\n for trb, track_b in enumerate(set_b):\n for t, time in enumerate(track_b.times):\n set_b_info.append((trb, t))\n set_b_info_arr = np.array(set_b_info, dtype=int)\n for tr, track_a in enumerate(set_a):\n for t, time in enumerate(track_a.times):\n track_pairings.loc[s, [\"Track\", \"Step\", \"Time\"]] = [tr, t, time]\n track_pairings.loc[s, \"Matched\"] = 1 if np.count_nonzero(set_a_matches == s) > 0 else 0\n if track_pairings.loc[s, \"Matched\"] == 1:\n track_pairings.loc[s, \"Pairings\"] = set_b_info_arr[set_b_matches[set_a_matches == s]]\n else:\n track_pairings.loc[s, \"Pairings\"] = np.array([])\n s += 1\n return track_pairings", "docstring": "For each step in each track from set_a, identify all steps in all tracks from set_b that meet all\n cost function criteria\n\nArgs:\n set_a: List of STObjects\n set_b: List of STObjects\n\nReturns:\n track_pairings: pandas.DataFrame", "source": "juraj_google_style"} -{"code": "def QA_SU_save_future_day(engine, client=DATABASE):\n\n\n engine = select_save_engine(engine)\n engine.QA_SU_save_future_day(client=client)", "docstring": "save future_day\n\nArgs:\n engine {[type]} -- [description]\n\n Keyword Arguments:\n client {[type]} -- [description] (default: {DATABASE})", "source": "juraj_google_style"} -{"code": "def compute_distance(a, b):\n\n\n # check simple cases first\n if not a:\n return len(b)\n if not b:\n return len(a)\n if a == b or str.lower(a) == str.lower(b):\n return 0\n\n # lowercase each string\n a = str.lower(a)\n b = str.lower(b)\n\n # create empty vectors to store costs\n vector_1 = [-1] * (len(b) + 1)\n vector_2 = [-1] * (len(b) + 1)\n\n # set default values\n for i in range(len(vector_1)):\n vector_1[i] = i\n\n # compute distance\n for i in range(len(a)):\n vector_2[0] = i + 1\n\n for j in range(len(b)):\n penalty = 0 if a[i] == b[j] else compute_qwerty_distance(a[i], b[j])\n vector_2[j + 1] = min(vector_2[j] + 1, vector_1[j + 1] + 1, vector_1[j] + penalty)\n\n for j in range(len(vector_1)):\n vector_1[j] = vector_2[j]\n\n return vector_2[len(b)]", "docstring": "Computes a modified Levenshtein distance between two strings, comparing the\n lowercase versions of each string and accounting for QWERTY distance.\n\nArgs:\n - a (str) String to compare to 'b'\n - b (str) String to compare to 'a'\n\nReturns:\n - (int) Number representing closeness of 'a' and 'b' (lower is better)", "source": "juraj_google_style"} -{"code": "def ismount(self, path):\n\n path = make_string_path(path)\n if not path:\n return False\n normed_path = self.filesystem.absnormpath(path)\n sep = self.filesystem._path_separator(path)\n if self.filesystem.is_windows_fs:\n if self.filesystem.alternative_path_separator is not None:\n path_seps = (\n sep, self.filesystem._alternative_path_separator(path)\n )\n else:\n path_seps = (sep, )\n drive, rest = self.filesystem.splitdrive(normed_path)\n if drive and drive[:1] in path_seps:\n return (not rest) or (rest in path_seps)\n if rest in path_seps:\n return True\n for mount_point in self.filesystem.mount_points:\n if normed_path.rstrip(sep) == mount_point.rstrip(sep):\n return True\n return False", "docstring": "Return true if the given path is a mount point.\n\nArgs:\n path: Path to filesystem object to be checked\n\nReturns:\n `True` if path is a mount point added to the fake file system.\n Under Windows also returns True for drive and UNC roots\n (independent of their existence).", "source": "juraj_google_style"} -{"code": "def get_customer(self, customer_id):\n\n return self.client._get(self.url + 'customers/{}'.format(customer_id), headers=self.get_headers())", "docstring": "Queries the information related to the customer.\n\nArgs:\n customer_id: Identifier of the client from which you want to find the associated information.\n\n Returns:", "source": "juraj_google_style"} -{"code": "def _compute_equations(self, x, verbose=False):\n\n # compute the error and the normals.\n normals = []\n values = []\n signs = []\n error = 0.0\n if verbose:\n print()\n print(' '.join('% 10.3e' % val for val in x), end=' ')\n active_str = ''\n for i, (sign, equation) in enumerate(self.equations):\n value, normal = equation(x)\n if (i < len(self.lock) and self.lock[i]) or \\\n (sign==-1 and value > -self.threshold) or \\\n (sign==0) or (sign==1 and value < self.threshold):\n values.append(value)\n normals.append(normal)\n signs.append(sign)\n error += value**2\n if verbose:\n active_str += 'X'\n if i < len(self.lock):\n self.lock[i] = True\n elif verbose:\n active_str += '-'\n error = np.sqrt(error)\n normals = np.array(normals, float)\n values = np.array(values, float)\n signs = np.array(signs, int)\n if verbose:\n print('[%s]' % active_str, end=' ')\n if error < self.threshold:\n print('OK')\n else:\n print('%.5e' % error)\n return normals, values, error, signs", "docstring": "Compute the values and the normals (gradients) of active constraints.\n\nArgs:\n | ``x`` -- The unknowns.", "source": "juraj_google_style"} -{"code": "def merge_corpus(self, corpus):\n\n\n # Create a copy, so objects aren't changed in the original merging corpus\n merging_corpus = Corpus.from_corpus(corpus)\n\n self.import_tracks(corpus.tracks.values())\n self.import_issuers(corpus.issuers.values())\n utterance_idx_mapping = self.import_utterances(corpus.utterances.values())\n\n for subview_idx, subview in merging_corpus.subviews.items():\n for filter in subview.filter_criteria:\n if isinstance(filter, subset.MatchingUtteranceIdxFilter):\n new_filtered_utt_ids = set()\n for utt_idx in filter.utterance_idxs:\n new_filtered_utt_ids.add(utterance_idx_mapping[utt_idx].idx)\n filter.utterance_idxs = new_filtered_utt_ids\n\n new_idx = naming.index_name_if_in_list(subview_idx, self.subviews.keys())\n self.import_subview(new_idx, subview)\n\n for feat_container_idx, feat_container in merging_corpus.feature_containers.items():\n self.new_feature_container(feat_container_idx, feat_container.path)", "docstring": "Merge the given corpus into this corpus. All assets (tracks, utterances, issuers, ...) are copied into\n this corpus. If any ids (utt-idx, track-idx, issuer-idx, subview-idx, ...) are occurring in both corpora,\n the ids from the merging corpus are suffixed by a number (starting from 1 until no other is matching).\n\nArgs:\n corpus (CorpusView): The corpus to merge.", "source": "juraj_google_style"} -{"code": "def __call__(self, data):\n\n if isinstance(data, dict):\n # convert each value in dict from a numpy array to a list if necessary, so they can be json serialized\n return json.dumps({k: _ndarray_to_list(v) for k, v in six.iteritems(data)})\n\n # files and buffers\n if hasattr(data, 'read'):\n return _json_serialize_from_buffer(data)\n\n return json.dumps(_ndarray_to_list(data))", "docstring": "Take data of various formats and serialize them into the expected request body.\n This uses information about supported input formats for the deployed model.\n\nArgs:\n data (object): Data to be serialized.\n\nReturns:\n object: Serialized data used for the request.", "source": "juraj_google_style"} -{"code": "def clean_output_files(self, follow_parents=True):\n\n paths = []\n if self.status != self.S_OK:\n logger.warning(\"Calling task.clean_output_files on a task whose status != S_OK\")\n\n # Remove all files in tmpdir.\n self.tmpdir.clean()\n\n # Find the file extensions that should be preserved since these files are still\n # needed by the children who haven't reached S_OK\n except_exts = set()\n for child in self.get_children():\n if child.status == self.S_OK: continue\n # Find the position of self in child.deps and add the extensions.\n i = [dep.node for dep in child.deps].index(self)\n except_exts.update(child.deps[i].exts)\n\n # Remove the files in the outdir of the task but keep except_exts.\n exts = self.gc.exts.difference(except_exts)\n #print(\"Will remove its extensions: \", exts)\n paths += self.outdir.remove_exts(exts)\n if not follow_parents: return paths\n\n # Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled.\n for parent in self.get_parents():\n\n # Here we build a dictionary file extension --> list of child nodes requiring this file from parent\n # e.g {\"WFK\": [node1, node2]}\n ext2nodes = collections.defaultdict(list)\n for child in parent.get_children():\n if child.status == child.S_OK: continue\n i = [d.node for d in child.deps].index(parent)\n for ext in child.deps[i].exts:\n ext2nodes[ext].append(child)\n\n # Remove extension only if no node depends on it!\n except_exts = [k for k, lst in ext2nodes.items() if lst]\n exts = self.gc.exts.difference(except_exts)\n #print(\"%s removes extensions %s from parent node %s\" % (self, exts, parent))\n paths += parent.outdir.remove_exts(exts)\n\n self.history.info(\"Removed files: %s\" % paths)\n return paths", "docstring": "This method is called when the task reaches S_OK. It removes all the output files\n produced by the task that are not needed by its children as well as the output files\n produced by its parents if no other node needs them.\n\nArgs:\n follow_parents: If true, the output files of the parents nodes will be removed if possible.\n\nReturns:\n list with the absolute paths of the files that have been removed.", "source": "juraj_google_style"} -{"code": "def BreachDepressions(\n dem,\n in_place = False,\n topology = 'D8'\n):\n\n if type(dem) is not rdarray:\n raise Exception(\"A richdem.rdarray or numpy.ndarray is required!\")\n\n if topology not in ['D8','D4']:\n raise Exception(\"Unknown topology!\")\n\n if not in_place:\n dem = dem.copy()\n\n _AddAnalysis(dem, \"BreachDepressions(dem)\")\n\n demw = dem.wrap()\n\n if topology=='D8':\n _richdem.rdBreachDepressionsD8(demw)\n elif topology=='D4':\n _richdem.rdBreachDepressionsD4(demw)\n\n dem.copyFromWrapped(demw)\n\n if not in_place:\n return dem", "docstring": "Breaches all depressions in a DEM.\n\nArgs:\n dem (rdarray): An elevation model\n in_place (bool): If True, the DEM is modified in place and there is\n no return; otherwise, a new, altered DEM is returned.\n topology (string): A topology indicator\n\nReturns:\n DEM without depressions.", "source": "juraj_google_style"} -{"code": "def pad_blank_lines(indent_texts):\n\n current_indent = 0\n result = []\n for indent, text in indent_texts:\n if len(text) > 0:\n current_indent = indent\n result.append((current_indent, text))\n return result", "docstring": "Give blank (empty) lines the same indent level as the preceding line.\n\nArgs:\n indent_texts: An iterable of tuples each containing an integer in the\n first element and a string in the second element.\n\nReturns:\n A list of tuples each containing an integer in the first element and a\n string in the second element.", "source": "juraj_google_style"} -{"code": "def parse_cl_function(cl_code, dependencies=()):\n\n from mot.lib.cl_function import SimpleCLFunction\n\n def separate_cl_functions(input_str):\n\n class Semantics:\n\n def __init__(self):\n self._functions = []\n\n def result(self, ast):\n return self._functions\n\n def arglist(self, ast):\n return '({})'.format(', '.join(ast))\n\n def function(self, ast):\n def join(items):\n result = ''\n for item in items:\n if isinstance(item, str):\n result += item\n else:\n result += join(item)\n return result\n\n self._functions.append(join(ast).strip())\n return ast\n\n return _extract_cl_functions_parser.parse(input_str, semantics=Semantics())\n\n functions = separate_cl_functions(cl_code)\n return SimpleCLFunction.from_string(functions[-1], dependencies=list(dependencies or []) + [\n SimpleCLFunction.from_string(s) for s in functions[:-1]])", "docstring": "Parse the given OpenCL string to a single SimpleCLFunction.\n\n If the string contains more than one function, we will return only the last, with all the other added as a\n dependency.\n\nArgs:\n cl_code (str): the input string containing one or more functions.\n dependencies (Iterable[CLCodeObject]): The list of CL libraries this function depends on\n\nReturns:\n mot.lib.cl_function.SimpleCLFunction: the CL function for the last function in the given strings.", "source": "juraj_google_style"} -{"code": "def __init__(self, context):\n\n self._event_multiplexer = context.multiplexer\n self._logdir = context.logdir\n self._debugger_data_server = None\n self._grpc_port = None", "docstring": "Constructs a debugger plugin for TensorBoard.\n\n This plugin adds handlers for retrieving debugger-related data. The plugin\n also starts a debugger data server once the log directory is passed to the\n plugin via the call to get_plugin_apps.\n\nArgs:\n context: A base_plugin.TBContext instance.", "source": "juraj_google_style"} -{"code": "def generate_poisson_states(n_states, n_cells, n_genes):\n\n W = np.random.dirichlet([1]*n_states, size=(n_cells,))\n W = W.T\n M = np.random.random((n_genes, n_states))*100\n return M, W", "docstring": "Generates means and weights for the Poisson Convex Mixture Model.\n Weights are distributed Dirichlet(1,1,...), means are rand(0, 100).\n Returned values can be passed to generate_state_data(M, W).\n\nArgs:\n n_states (int): number of states or clusters\n n_cells (int): number of cells\n n_genes (int): number of genes\n\nReturns:\n M - genes x clusters\n W - clusters x cells", "source": "juraj_google_style"} -{"code": "def _load_from_file_object(self, f):\n\n subtoken_strings = []\n for line in f:\n s = line.strip()\n # Some vocab files wrap words in single quotes, but others don't\n if ((s.startswith(\"'\") and s.endswith(\"'\")) or\n (s.startswith(\"\\\"\") and s.endswith(\"\\\"\"))):\n s = s[1:-1]\n subtoken_strings.append(native_to_unicode(s))\n self._init_subtokens_from_list(subtoken_strings)\n self._init_alphabet_from_tokens(subtoken_strings)", "docstring": "Load from a file object.\n\nArgs:\n f: File object to load vocabulary from", "source": "juraj_google_style"} -{"code": "def make_fixture(model_class, **kwargs):\n\n all_fields = get_fields(model_class)\n\n fields_for_random_generation = map(\n lambda x: getattr(model_class, x), all_fields\n )\n\n overrides = {}\n\n for kwarg, value in kwargs.items():\n if kwarg in all_fields:\n kwarg_field = getattr(model_class, kwarg)\n fields_for_random_generation.remove(kwarg_field)\n overrides.update({kwarg_field: value})\n\n random_values = get_random_values(fields_for_random_generation)\n\n values = dict(overrides, **random_values)\n\n assert len(all_fields) == len(values), (\n \"Mismatch in values, {} != {}\".format(\n len(all_fields), len(values)\n )\n )\n data = {k.name: v for k, v in values.items()}\n return model_class(**data)", "docstring": "Take the model_klass and generate a fixure for it\n\nArgs:\n model_class (MongoEngine Document): model for which a fixture\n is needed\n kwargs (dict): any overrides instead of random values\n\nReturns:\n dict for now, other fixture types are not implemented yet", "source": "juraj_google_style"} -{"code": "def remove_indirect_links(g, alg=\"aracne\", **kwargs):\n\n alg = {\"aracne\": aracne,\n \"nd\": network_deconvolution,\n \"clr\": clr}[alg]\n mat = np.array(nx.adjacency_matrix(g).todense())\n return nx.relabel_nodes(nx.DiGraph(alg(mat, **kwargs)),\n {idx: i for idx, i in enumerate(list(g.nodes()))})", "docstring": "Apply deconvolution to a networkx graph.\n\nArgs:\n g (networkx.Graph): Graph to apply deconvolution to\n alg (str): Algorithm to use ('aracne', 'clr', 'nd')\n kwargs (dict): extra options for algorithms\n\nReturns:\n networkx.Graph: graph with undirected links removed.", "source": "juraj_google_style"} -{"code": "def publish(self, channel, message, pipeline=False):\n\n if pipeline:\n self._pipeline.publish(channel, message)\n else:\n self._db.publish(channel, message)", "docstring": "Post a message to a given channel.\n\nArgs:\n channel (str): Channel where the message will be published\n message (str): Message to publish\n pipeline (bool): True, start a transaction block. Default false.", "source": "juraj_google_style"} -{"code": "def delete_repo(self, repo_name=None, force=False, all=False):\n\n if not all:\n if repo_name:\n req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)\n self.stub.DeleteRepo(req, metadata=self.metadata)\n else:\n raise ValueError(\"Either a repo_name or all=True needs to be provided\")\n else:\n if not repo_name:\n req = proto.DeleteRepoRequest(force=force, all=all)\n self.stub.DeleteRepo(req, metadata=self.metadata)\n else:\n raise ValueError(\"Cannot specify a repo_name if all=True\")", "docstring": "Deletes a repo and reclaims the storage space it was using.\n\n Params:\n * repo_name: The name of the repo.\n * force: If set to true, the repo will be removed regardless of\n errors. This argument should be used with care.\n * all: Delete all repos.", "source": "juraj_google_style"} -{"code": "def intersect(df, other, index=False, keep='first'):\n\n\n validate_set_ops(df, other)\n if index:\n df_reset_index = df.reset_index()\n other_reset_index = other.reset_index()\n index_cols = [col for col in df_reset_index.columns if col not in df.columns]\n df_index_names = df.index.names\n return_df = (pd.merge(df_reset_index, other_reset_index,\n how='inner',\n left_on=df_reset_index.columns.values.tolist(),\n right_on=df_reset_index.columns.values.tolist())\n .set_index(index_cols))\n return_df.index.names = df_index_names\n return_df = return_df.drop_duplicates(keep=keep)\n return return_df\n else:\n return_df = pd.merge(df, other,\n how='inner',\n left_on=df.columns.values.tolist(),\n right_on=df.columns.values.tolist())\n return_df = return_df.drop_duplicates(keep=keep)\n return return_df", "docstring": "Returns rows that appear in both DataFrames.\n\nArgs:\n df (pandas.DataFrame): data passed in through the pipe.\n other (pandas.DataFrame): other DataFrame to use for set operation with\n the first.\n\n Kwargs:\n index (bool): Boolean indicating whether to consider the pandas index\n as part of the set operation (default `False`).\n keep (str): Indicates which duplicate should be kept. Options are `'first'`\n and `'last'`.", "source": "juraj_google_style"} -{"code": "def __init__(self, broker, queue_output, backend=None,\n max_tasks_in_memory=None, max_workers_in_memory=None):\n\n self._app = Celery(broker=broker, backend=backend)\n self._queue_output = queue_output\n\n from celery.backends.base import DisabledBackend\n self._use_result_backend = not isinstance(self._app.backend, DisabledBackend)\n\n logger.info('Creating %s: max_tasks=%d; max_workers=%d',\n EventListener.__name__, max_tasks_in_memory, max_workers_in_memory)\n logger.info('Celery broker=%s; backend=%s; using_result_backend=%s',\n broker, backend, self._use_result_backend)\n\n # events handling: storage and filling missing states.\n self.memory = State(\n max_tasks_in_memory=max_tasks_in_memory,\n max_workers_in_memory=max_workers_in_memory,\n ) # type: State\n\n # running engine (should be asyncio in the future)\n self._listener_thread = None # type:threading.Thread\n self._celery_receiver = None # type:EventReceiver\n\n # concurrency control\n self._wait_event = threading.Event()\n\n # detect shutdown.\n def sigterm_handler(_signo, _stack_frame): # pragma: no cover\n self.__stop()\n\n signal.signal(signal.SIGTERM, sigterm_handler)\n self.__start()", "docstring": "Constructs an event listener instance.\n\nArgs:\n broker (str): the broker being used by the celery system.\n queue_output (Queue): to send to streaming dispatcher.\n backend (str): the result backend being used by the celery system.\n max_tasks_in_memory (int): max tasks stored\n max_workers_in_memory (int): max workers stored", "source": "juraj_google_style"} -{"code": "def __call__(self, obj, **cfg):\n\n depth = self.config.depth or 0\n seen_on_path = self.config.seen_on_path or frozenset()\n cfg.setdefault('depth', depth + 1)\n cfg['seen_on_path'] = seen_on_path | {id(obj)}\n h = self.with_config(cfg)\n max_depth = h.config.max_depth\n\n if h.config.preprocess:\n obj = h.config.preprocess(obj, hrepr)\n\n if id(obj) in seen_on_path:\n # This object is a child of itself, so we display a neat\n # little loop to avoid busting the stack.\n result = self.H.span['hrepr-circular']('⥁')\n elif max_depth is not None and depth >= max_depth:\n result = h._hrepr(obj, self.type_handlers_short,\n ['__hrepr_short__'], self.stdrepr_short)\n else:\n result = h._hrepr(obj, self.type_handlers,\n ['__hrepr__', '__hrepr_short__'],\n self.stdrepr)\n\n if h.config.postprocess:\n return h.config.postprocess(obj, result, h.H, h) or result\n else:\n return result", "docstring": "Return the HTML representation of an object.\n\nArgs:\n obj: The object to represent\n cfg: Configuration to add to the current\n configuration for this operation.\n\nReturns:\n The representation of the object.", "source": "juraj_google_style"} -{"code": "def ApprovalRevokeRaw(aff4_path, token):\n\n try:\n urn = rdf_client.ClientURN(aff4_path)\n except type_info.TypeValueError:\n urn = rdfvalue.RDFURN(aff4_path)\n\n approval_urn = aff4.ROOT_URN.Add(\"ACL\").Add(urn.Path()).Add(\n token.username).Add(utils.EncodeReasonString(token.reason))\n\n super_token = access_control.ACLToken(username=\"raw-approval-superuser\")\n super_token.supervisor = True\n\n approval_request = aff4.FACTORY.Open(\n approval_urn, mode=\"rw\", token=super_token)\n approval_request.DeleteAttribute(approval_request.Schema.APPROVER)\n approval_request.Close()", "docstring": "Revokes an approval for a given token.\n\n This method requires raw datastore access to manipulate approvals directly.\n\nArgs:\n aff4_path: The aff4_path or client id the approval should be created for.\n token: The token that should be revoked.", "source": "juraj_google_style"} -{"code": "def save_tiles(tiles, prefix='', directory=os.getcwd(), format='png'):\n\n# Causes problems in CLI script.\n# if not os.path.exists(directory):\n# os.makedirs(directory)\n for tile in tiles:\n tile.save(filename=tile.generate_filename(prefix=prefix,\n directory=directory,\n format=format), \n format=format)\n return tuple(tiles)", "docstring": "Write image files to disk. Create specified folder(s) if they\n don't exist. Return list of :class:`Tile` instance.\n\nArgs:\n tiles (list): List, tuple or set of :class:`Tile` objects to save.\n prefix (str): Filename prefix of saved tiles.\n\n Kwargs:\n directory (str): Directory to save tiles. Created if non-existant.\n\nReturns:\n Tuple of :class:`Tile` instances.", "source": "juraj_google_style"} -{"code": "def __init__(\n self, location=None, parent=None, volume_index=None, **kwargs):\n\n if not parent:\n raise ValueError('Missing parent value.')\n\n super(APFSContainerPathSpec, self).__init__(parent=parent, **kwargs)\n self.location = location\n self.volume_index = volume_index", "docstring": "Initializes a path specification.\n\n Note that an APFS container path specification must have a parent.\n\nArgs:\n location (Optional[str]): location.\n parent (Optional[PathSpec]): parent path specification.\n volume_index (Optional[int]): index of the volume within the container.\n\nRaises:\n ValueError: when parent is not set.", "source": "juraj_google_style"} -{"code": "def read_structs(fstream):\n\n struct = read_struct(fstream)\n while struct is not None:\n yield struct\n struct = read_struct(fstream)", "docstring": "Read all structs from likwid's file stream.\n\nArgs:\n fstream: Likwid's output file stream.\n\nReturns:\n A generator that can be used to iterate over all structs in the\n fstream.", "source": "juraj_google_style"} -{"code": "def Parse(text, message):\n\n if not isinstance(text, six.text_type): text = text.decode('utf-8')\n try:\n if sys.version_info < (2, 7):\n # object_pair_hook is not supported before python2.7\n js = json.loads(text)\n else:\n js = json.loads(text, object_pairs_hook=_DuplicateChecker)\n except ValueError as e:\n raise ParseError('Failed to load JSON: {0}.'.format(str(e)))\n _ConvertMessage(js, message)\n return message", "docstring": "Parses a JSON representation of a protocol message into a message.\n\nArgs:\n text: Message JSON representation.\n message: A protocol beffer message to merge into.\n\nReturns:\n The same message passed as argument.\n\n Raises::\n ParseError: On JSON parsing problems.", "source": "juraj_google_style"} -{"code": "def getRowByIndex(self, index):\n\n assert isinstance(index, int)\n return Row(self._impl.getRowByIndex(index))", "docstring": "Get row by numeric index.\n\nArgs:\n index: Zero-based index of the row to get.\n\nReturns:\n The corresponding row.", "source": "juraj_google_style"} -{"code": "def resolve_type(arg):\n # type: (object) -> InternalType\n\n arg_type = type(arg)\n if arg_type == list:\n assert isinstance(arg, list) # this line helps mypy figure out types\n sample = arg[:min(4, len(arg))]\n tentative_type = TentativeType()\n for sample_item in sample:\n tentative_type.add(resolve_type(sample_item))\n return ListType(tentative_type)\n elif arg_type == set:\n assert isinstance(arg, set) # this line helps mypy figure out types\n sample = []\n iterator = iter(arg)\n for i in range(0, min(4, len(arg))):\n sample.append(next(iterator))\n tentative_type = TentativeType()\n for sample_item in sample:\n tentative_type.add(resolve_type(sample_item))\n return SetType(tentative_type)\n elif arg_type == FakeIterator:\n assert isinstance(arg, FakeIterator) # this line helps mypy figure out types\n sample = []\n iterator = iter(arg)\n for i in range(0, min(4, len(arg))):\n sample.append(next(iterator))\n tentative_type = TentativeType()\n for sample_item in sample:\n tentative_type.add(resolve_type(sample_item))\n return IteratorType(tentative_type)\n elif arg_type == tuple:\n assert isinstance(arg, tuple) # this line helps mypy figure out types\n sample = list(arg[:min(10, len(arg))])\n return TupleType([resolve_type(sample_item) for sample_item in sample])\n elif arg_type == dict:\n assert isinstance(arg, dict) # this line helps mypy figure out types\n key_tt = TentativeType()\n val_tt = TentativeType()\n for i, (k, v) in enumerate(iteritems(arg)):\n if i > 4:\n break\n key_tt.add(resolve_type(k))\n val_tt.add(resolve_type(v))\n return DictType(key_tt, val_tt)\n else:\n return type(arg)", "docstring": "Resolve object to one of our internal collection types or generic built-in type.\n\nArgs:\n arg: object to resolve", "source": "juraj_google_style"} -{"code": "def reqScannerData(\n self, subscription: ScannerSubscription,\n scannerSubscriptionOptions: List[TagValue] = None,\n scannerSubscriptionFilterOptions:\n List[TagValue] = None) -> ScanDataList:\n\n return self._run(\n self.reqScannerDataAsync(\n subscription, scannerSubscriptionOptions,\n scannerSubscriptionFilterOptions))", "docstring": "Do a blocking market scan by starting a subscription and canceling it\n after the initial list of results are in.\n\n This method is blocking.\n\n https://interactivebrokers.github.io/tws-api/market_scanners.html\n\nArgs:\n subscription: Basic filters.\n scannerSubscriptionOptions: Unknown.\n scannerSubscriptionFilterOptions: Advanced generic filters.", "source": "juraj_google_style"} -{"code": "def __generate_reference__(self, triple_map, **kwargs):\n\n raw_value = self.source.get(str(triple_map.reference))\n if raw_value is None or len(raw_value) < 1:\n return\n if hasattr(triple_map, \"datatype\"):\n if triple_map.datatype == NS_MGR.xsd.anyURI.rdflib:\n output = rdflib.URIRef(raw_value)\n else:\n output = rdflib.Literal(\n raw_value,\n datatype=triple_map.datatype)\n else:\n output = rdflib.Literal(raw_value)\n return output", "docstring": "Generates a RDF entity based on triple map\n\nArgs:\n triple_map(SimpleNamespace): Triple Map", "source": "juraj_google_style"} -{"code": "def save(self, clean=True):\n\n ret = {}\n if clean:\n self._dirty = False\n else:\n ret['_dirty'] = self._dirty\n return ret", "docstring": "Serialize into raw representation. Clears the dirty bit by default.\n\nArgs:\n clean (bool): Whether to clear the dirty bit.\n\nReturns:\n dict: Raw.", "source": "juraj_google_style"} -{"code": "def by_name(name):\n\n devices = discover(all_households=True)\n for device in (devices or []):\n if device.player_name == name:\n return device\n return None", "docstring": "Return a device by name.\n\nArgs:\n name (str): The name of the device to return.\n\nReturns:\n :class:`~.SoCo`: The first device encountered among all zone with the\n given player name. If none are found `None` is returned.", "source": "juraj_google_style"} -{"code": "def add_recipe_actions(self, recipe_actions):\n\n for action_name, action in recipe_actions:\n self._recipe_actions[action_name] = action", "docstring": "Add additional valid recipe actions to RecipeManager\n\nArgs:\n recipe_actions (list): List of tuples. First value of tuple is the classname,\n second value of tuple is RecipeAction Object", "source": "juraj_google_style"} -{"code": "def __set_cfg_reqs__(self, requirements=None, **kwargs):\n\n if requirements:\n self.__cfg_reqs__.update(requirements)\n for attr in kwargs.get('remove_reqs', []):\n try:\n del self.__cfg_reqs__[attr]\n except KeyError:\n pass", "docstring": "Applies any new requirements\n\nArgs:\n requirements: dictionary of attribute requirements\n\n kwargs:\n remove_reqs: list of requirement names to remove", "source": "juraj_google_style"} -{"code": "def __init__(self, latitude, longitude, altitude, name=None,\n identity=None):\n\n super(Trigpoint, self).__init__(latitude, longitude)\n self.altitude = altitude\n self.name = name\n self.identity = identity", "docstring": "Initialise a new ``Trigpoint`` object.\n\nArgs:\n latitude (float): Location's latitude\n longitude (float): Location's longitude\n altitude (float): Location's altitude\n name (str): Name for location\n identity (int): Database identifier, if known", "source": "juraj_google_style"} -{"code": "def _Open(self, path_spec=None, mode='rb'):\n\n if not path_spec:\n raise ValueError('Missing path specification.')\n\n if path_spec.HasParent():\n raise errors.PathSpecError('Unsupported path specification with parent.')\n\n location = getattr(path_spec, 'location', None)\n\n if location is None:\n raise errors.PathSpecError('Path specification missing location.')\n\n # Windows does not support running os.stat on device files so we use\n # libsmdev to do an initial check.\n try:\n is_device = pysmdev.check_device(location)\n except IOError as exception:\n # Since os.stat() will not recognize Windows device file names and\n # will return '[Error 87] The parameter is incorrect' we check here\n # if pysmdev exception message contains ' access denied ' and raise\n # AccessError instead.\n\n # Note that exception.message no longer works in Python 3.\n exception_string = str(exception)\n if not isinstance(exception_string, py2to3.UNICODE_TYPE):\n exception_string = py2to3.UNICODE_TYPE(\n exception_string, errors='replace')\n\n if ' access denied ' in exception_string:\n raise errors.AccessError(\n 'Access denied to file: {0:s} with error: {1!s}'.format(\n location, exception_string))\n is_device = False\n\n if not is_device:\n try:\n stat_info = os.stat(location)\n except OSError as exception:\n raise IOError('Unable to open file with error: {0!s}.'.format(\n exception))\n\n # In case the libsmdev check is not able to detect the device also use\n # the stat information.\n if stat.S_ISCHR(stat_info.st_mode) or stat.S_ISBLK(stat_info.st_mode):\n is_device = True\n\n if is_device:\n self._file_object = pysmdev.handle()\n self._file_object.open(location, mode=mode)\n self._size = self._file_object.media_size\n\n else:\n self._file_object = open(location, mode=mode)\n self._size = stat_info.st_size", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\n path_spec (PathSpec): path specification.\n mode (Optional[str]): file access mode.\n\nRaises:\n AccessError: if the access to open the file was denied.\n IOError: if the file-like object could not be opened.\n OSError: if the file-like object could not be opened.\n PathSpecError: if the path specification is incorrect.\n ValueError: if the path specification is invalid.", "source": "juraj_google_style"} -{"code": "def global_step(device=''):\n\n global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)\n if global_step_ref:\n return global_step_ref[0]\n else:\n collections = [\n VARIABLES_TO_RESTORE,\n tf.GraphKeys.GLOBAL_VARIABLES,\n tf.GraphKeys.GLOBAL_STEP,\n ]\n # Get the device for the variable.\n with tf.device(variable_device(device, 'global_step')):\n return tf.get_variable('global_step', shape=[], dtype=tf.int64,\n initializer=tf.zeros_initializer(),\n trainable=False, collections=collections)", "docstring": "Returns the global step variable.\n\nArgs:\n device: Optional device to place the variable. It can be an string or a\n function that is called to get the device for the variable.\n\nReturns:\n the tensor representing the global step variable.", "source": "juraj_google_style"} -{"code": "def _method_url(self, method_name):\n\n return \"{base_url}/api/{api}/{method}\".format(\n base_url=self._base_url(),\n api=self.api_version,\n method=method_name\n )", "docstring": "Generate the URL for the requested method\n\nArgs:\n method_name (str): Name of the method\n\nReturns:\n A string containing the URL of the method", "source": "juraj_google_style"} -{"code": "def block_start(self, previous_block):\n\n\n previous_header_bytes = previous_block.header\n previous_header = BlockHeader()\n previous_header.ParseFromString(previous_header_bytes)\n\n block_info = BlockInfo(\n block_num=previous_header.block_num,\n previous_block_id=previous_header.previous_block_id,\n signer_public_key=previous_header.signer_public_key,\n header_signature=previous_block.header_signature,\n timestamp=int(time.time()))\n\n return [self.create_batch(block_info)]", "docstring": "Returns an ordered list of batches to inject at the beginning of the\n block. Can also return None if no batches should be injected.\n\nArgs:\n previous_block (Block): The previous block.\n\nReturns:\n A list of batches to inject.", "source": "juraj_google_style"} -{"code": "def dict_strip(d):\n\n _d = deepcopy(d)\n for k, v in iteritems(d):\n if isinstance(v, str):\n _d[k] = v.strip()\n elif isinstance(v, dict):\n _d[k] = dict_strip(v)\n\n return _d", "docstring": "Strips whitespace from the string values of the given dictionary (recursively).\n\nArgs:\n d: A dictionary object.\n\nReturns:\n A new dictionary object, whose string values' whitespace has been stripped out.", "source": "juraj_google_style"} -{"code": "def download(self, file: Optional[IO[bytes]]=None,\n duration_timeout: Optional[float]=None):\n\n yield from \\\n self._current_session.download(file, duration_timeout=duration_timeout)", "docstring": "Download content.\n\nArgs:\n file: An optional file object for the document contents.\n duration_timeout: Maximum time in seconds of which the\n entire file must be read.\n\nReturns:\n Response: An instance of :class:`.http.request.Response`.\n\n See :meth:`WebClient.session` for proper usage of this function.\n\n Coroutine.", "source": "juraj_google_style"} -{"code": "def run(self, command, block=True, cwd=None, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE):\n\n if cwd is None:\n cwd = self.cwd\n\n return ShellCommand(command=command, logger=self.logger, block=block, cwd=cwd, stdin=stdin, stdout=stdout, stderr=stderr).run()", "docstring": "Create an instance of :class:`~ShellCommand` and run it\n\nArgs:\n command (str): :class:`~ShellCommand`\n block (bool): See :class:`~ShellCommand`\n cwd (str): Override the runner cwd. Useb by the :class:`~ShellCommand` instance", "source": "juraj_google_style"} -{"code": "def check_updates(transformers, datastore=None, stimuli=None):\n\n # Find datastore file\n datastore = datastore or expanduser('~/.pliers_updates')\n prior_data = pd.read_csv(datastore) if exists(datastore) else None\n\n # Load stimuli\n stimuli = stimuli or glob.glob(\n join(dirname(realpath(__file__)), '../tests/data/image/CC0/*'))\n stimuli = load_stims(stimuli)\n\n # Get transformers\n loaded_transformers = {get_transformer(name, **params): (name, params)\n for name, params in transformers}\n\n # Transform stimuli\n results = pd.DataFrame({'time_extracted': [datetime.datetime.now()]})\n for trans in loaded_transformers.keys():\n for stim in stimuli:\n if trans._stim_matches_input_types(stim):\n res = trans.transform(stim)\n\n try: # Add iterable\n res = [getattr(res, '_data', res.data) for r in res]\n except TypeError:\n res = getattr(res, '_data', res.data)\n\n res = hash_data(res)\n\n results[\"{}.{}\".format(trans.__hash__(), stim.name)] = [res]\n\n # Check for mismatches\n mismatches = []\n if prior_data is not None:\n last = prior_data[\n prior_data.time_extracted == prior_data.time_extracted.max()]. \\\n iloc[0].drop('time_extracted')\n\n for label, value in results.iteritems():\n old = last.get(label)\n new = value.values[0]\n\n if old is not None:\n if isinstance(new, str):\n if new != old:\n mismatches.append(label)\n elif not np.isclose(old, new):\n mismatches.append(label)\n\n results = prior_data.append(results)\n\n results.to_csv(datastore, index=False)\n\n # Get corresponding transformer name and parameters\n def get_trans(hash_tr):\n for obj, attr in loaded_transformers.items():\n if str(obj.__hash__()) == hash_tr:\n return attr\n\n delta_t = set([m.split('.')[0] for m in mismatches])\n delta_t = [get_trans(dt) for dt in delta_t]\n\n return {'transformers': delta_t, 'mismatches': mismatches}", "docstring": "Run transformers through a battery of stimuli, and check if output has\n changed. Store results in csv file for comparison.\n\nArgs:\n transformers (list): A list of tuples of transformer names and\n dictionary of parameters to instantiate with (or empty dict).\n datastore (str): Filepath of CSV file with results. Stored in home dir\n by default.\n stimuli (list): List of stimuli file paths to extract from. If None,\n use test data.", "source": "juraj_google_style"} -{"code": "def Print(self, output_writer):\n\n if self._filters:\n output_writer.Write('Filters:\\n')\n for file_entry_filter in self._filters:\n file_entry_filter.Print(output_writer)", "docstring": "Prints a human readable version of the filter.\n\nArgs:\n output_writer (CLIOutputWriter): output writer.", "source": "juraj_google_style"} -{"code": "def create_per_test_excerpt(self, current_test_info):\n\n self.pause()\n dest_path = current_test_info.output_path\n utils.create_dir(dest_path)\n self._ad.log.debug('AdbLog excerpt location: %s', dest_path)\n shutil.move(self.adb_logcat_file_path, dest_path)\n self.resume()", "docstring": "Convenient method for creating excerpts of adb logcat.\n\n To use this feature, call this method at the end of: `setup_class`,\n `teardown_test`, and `teardown_class`.\n\n This moves the current content of `self.adb_logcat_file_path` to the\n log directory specific to the current test.\n\nArgs:\n current_test_info: `self.current_test_info` in a Mobly test.", "source": "juraj_google_style"} -{"code": "def update_file(filename, result, content, indent):\n\n # Split the file into frontmatter and content\n parts = re.split('---+', content, 2)\n\n # Load the frontmatter into an object\n frontmatter = yaml.safe_load(parts[1])\n\n # Add the counts entry in the results object to the frontmatter\n frontmatter['counts'] = result['counts']\n\n # Set the frontmatter part backed to the stringified version of the\n # frontmatter object\n parts[1] = '\\n{}'.format(\n yaml.safe_dump(frontmatter, default_flow_style=False, indent=indent))\n result = '---'.join(parts)\n\n # Write everything back to the file\n with open(filename, 'wb') as f:\n f.write(result.encode('utf-8'))\n print('{} updated.'.format(filename))", "docstring": "Updates a Jekyll file to contain the counts form an object\n\n This just converts the results to YAML and adds to the Jekyll frontmatter.\n\nArgs:\n filename: the Jekyll file to update\n result: the results object from `wc`\n content: the contents of the original file\n indent: the indentation level for dumping YAML", "source": "juraj_google_style"} -{"code": "def patch_splitMax(self, patches):\n\n patch_size = self.Match_MaxBits\n if patch_size == 0:\n # Python has the option of not splitting strings due to its ability\n # to handle integers of arbitrary precision.\n return\n for x in range(len(patches)):\n if patches[x].length1 <= patch_size:\n continue\n bigpatch = patches[x]\n # Remove the big old patch.\n del patches[x]\n x -= 1\n start1 = bigpatch.start1\n start2 = bigpatch.start2\n precontext = ''\n while len(bigpatch.diffs) != 0:\n # Create one of several smaller patches.\n patch = patch_obj()\n empty = True\n patch.start1 = start1 - len(precontext)\n patch.start2 = start2 - len(precontext)\n if precontext:\n patch.length1 = patch.length2 = len(precontext)\n patch.diffs.append((self.DIFF_EQUAL, precontext))\n\n while (len(bigpatch.diffs) != 0 and\n patch.length1 < patch_size - self.Patch_Margin):\n (diff_type, diff_text) = bigpatch.diffs[0]\n if diff_type == self.DIFF_INSERT:\n # Insertions are harmless.\n patch.length2 += len(diff_text)\n start2 += len(diff_text)\n patch.diffs.append(bigpatch.diffs.pop(0))\n empty = False\n elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and\n patch.diffs[0][0] == self.DIFF_EQUAL and\n len(diff_text) > 2 * patch_size):\n # This is a large deletion. Let it pass in one chunk.\n patch.length1 += len(diff_text)\n start1 += len(diff_text)\n empty = False\n patch.diffs.append((diff_type, diff_text))\n del bigpatch.diffs[0]\n else:\n # Deletion or equality. Only take as much as we can stomach.\n diff_text = diff_text[:patch_size - patch.length1 -\n self.Patch_Margin]\n patch.length1 += len(diff_text)\n start1 += len(diff_text)\n if diff_type == self.DIFF_EQUAL:\n patch.length2 += len(diff_text)\n start2 += len(diff_text)\n else:\n empty = False\n\n patch.diffs.append((diff_type, diff_text))\n if diff_text == bigpatch.diffs[0][1]:\n del bigpatch.diffs[0]\n else:\n bigpatch.diffs[0] = (bigpatch.diffs[0][0],\n bigpatch.diffs[0][1][len(diff_text):])\n\n # Compute the head context for the next patch.\n precontext = self.diff_text2(patch.diffs)\n precontext = precontext[-self.Patch_Margin:]\n # Append the end context for this patch.\n postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin]\n if postcontext:\n patch.length1 += len(postcontext)\n patch.length2 += len(postcontext)\n if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:\n patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] +\n postcontext)\n else:\n patch.diffs.append((self.DIFF_EQUAL, postcontext))\n\n if not empty:\n x += 1\n patches.insert(x, patch)", "docstring": "Look through the patches and break up any which are longer than the\n maximum limit of the match algorithm.\n Intended to be called only from within patch_apply.\n\nArgs:\n patches: Array of Patch objects.", "source": "juraj_google_style"} -{"code": "def from_json(cls, json):\n\n if json[cls.KEY_RANGE_PARAM] is None:\n # pylint: disable=redefined-outer-name\n key_ranges = None\n else:\n key_ranges = []\n for k in json[cls.KEY_RANGE_PARAM]:\n if k:\n key_ranges.append(key_range.KeyRange.from_json(k))\n else:\n key_ranges.append(None)\n\n if json[cls.NAMESPACE_RANGE_PARAM] is None:\n ns_range = None\n else:\n ns_range = namespace_range.NamespaceRange.from_json_object(\n json[cls.NAMESPACE_RANGE_PARAM])\n\n if json[cls.CURRENT_KEY_RANGE_PARAM] is None:\n current_key_range = None\n else:\n current_key_range = key_range.KeyRange.from_json(\n json[cls.CURRENT_KEY_RANGE_PARAM])\n\n return cls(\n json[cls.ENTITY_KIND_PARAM],\n key_ranges,\n ns_range,\n json[cls.BATCH_SIZE_PARAM],\n current_key_range,\n filters=json.get(cls.FILTERS_PARAM))", "docstring": "Create new DatastoreInputReader from the json, encoded by to_json.\n\nArgs:\n json: json map representation of DatastoreInputReader.\n\nReturns:\n an instance of DatastoreInputReader with all data deserialized from json.", "source": "juraj_google_style"} -{"code": "def __init__(self, logger=logging, instance_config_metadata=None):\n\n self.logger = logger\n self.instance_config_metadata = instance_config_metadata\n self.instance_config_header %= (\n self.instance_config_script, self.instance_config_template)\n # User provided instance configs should always take precedence.\n super(InstanceConfig, self).__init__(\n config_file=self.instance_config_template,\n config_header=self.instance_config_header)\n\n # Use the instance config settings from metadata if specified. Then use\n # settings in an instance config file if one exists. If a config\n # file does not already exist, try to use the distro provided defaults. If\n # no file exists, use the default configuration settings.\n config_files = [self.instance_config, self.instance_config_distro]\n config_defaults = []\n if self.instance_config_metadata:\n config = parser.Parser()\n try:\n config.read_file(stringio.StringIO(self.instance_config_metadata))\n except parser.Error as e:\n self.logger.error('Error parsing metadata configs: %s', str(e))\n else:\n config_defaults.append(\n dict((s, dict(config.items(s))) for s in config.sections()))\n for config_file in config_files:\n if os.path.exists(config_file):\n config = parser.Parser()\n try:\n config.read(config_file)\n except parser.Error as e:\n self.logger.error('Error parsing config file: %s', str(e))\n else:\n config_defaults.append(\n dict((s, dict(config.items(s))) for s in config.sections()))\n config_defaults.append(self.instance_config_options)\n\n for defaults in config_defaults:\n for section, options in sorted(defaults.items()):\n for option, value in sorted(options.items()):\n super(InstanceConfig, self).SetOption(\n section, option, value, overwrite=False)", "docstring": "Constructor.\n\n Inherit from the ConfigManager class. Read the template for instance\n defaults and write new sections and options. This prevents package\n updates from overriding user set defaults.\n\nArgs:\n logger: logger object, used to write to SysLog and serial port.\n instance_config_metadata: string, a config file specified in metadata.", "source": "juraj_google_style"} -{"code": "def long_id(self, sample):\n\n if self.grid == 'WAC':\n lon = self.CENTER_LONGITUDE + (sample - self.SAMPLE_PROJECTION_OFFSET - 1)\\\n * self.MAP_SCALE * 1e-3 / (self.A_AXIS_RADIUS * np.cos(self.CENTER_LATITUDE * np.pi / 180.0))\n return lon * 180 / np.pi\n else:\n lon = float(self.CENTER_LONGITUDE) + \\\n (sample - float(self.SAMPLE_PROJECTION_OFFSET) - 1)\\\n / float(self.MAP_RESOLUTION)\n return lon", "docstring": "Return the corresponding longitude\n\nArgs:\n sample (int): sample number on a line\n\nReturns:\n Correponding longidude in degree", "source": "juraj_google_style"} -{"code": "def playlists(self, *, include_songs=False):\n\n\n\t\tplaylist_list = []\n\t\tfor chunk in self.playlists_iter(page_size=49995):\n\t\t\tfor playlist in chunk:\n\t\t\t\tif include_songs:\n\t\t\t\t\tplaylist['tracks'] = self.playlist_songs(playlist)\n\n\t\t\t\tplaylist_list.append(playlist)\n\n\t\treturn playlist_list", "docstring": "Get a listing of library playlists.\n\n Parameters:\n include_songs (bool, Optional): Include songs in the returned playlist dicts.\n Default: ``False``.\n\nReturns:\n list: A list of playlist dicts.", "source": "juraj_google_style"} -{"code": "def etherscan_verify_contract(\n chain_id: int,\n apikey: str,\n source_module: DeploymentModule,\n contract_name: str,\n):\n\n etherscan_api = api_of_chain_id[chain_id]\n deployment_info = get_contracts_deployment_info(\n chain_id=chain_id,\n module=source_module,\n )\n if deployment_info is None:\n raise FileNotFoundError(\n f'Deployment file not found for chain_id={chain_id} and module={source_module}',\n )\n contract_manager = ContractManager(contracts_precompiled_path())\n\n data = post_data_for_etherscan_verification(\n apikey=apikey,\n deployment_info=deployment_info['contracts'][contract_name],\n source=join_sources(source_module=source_module, contract_name=contract_name),\n contract_name=contract_name,\n metadata=json.loads(contract_manager.contracts[contract_name]['metadata']),\n constructor_args=get_constructor_args(\n deployment_info=deployment_info,\n contract_name=contract_name,\n contract_manager=contract_manager,\n ),\n )\n response = requests.post(etherscan_api, data=data)\n content = json.loads(response.content.decode())\n print(content)\n print(f'Status: {content[\"status\"]}; {content[\"message\"]} ; GUID = {content[\"result\"]}')\n\n etherscan_url = etherscan_api.replace('api-', '').replace('api', '')\n etherscan_url += '/verifyContract2?a=' + data['contractaddress']\n manual_submission_guide = f\n\n if content['status'] != '1':\n if content['result'] == 'Contract source code already verified':\n return\n else:\n raise ValueError(\n 'Etherscan submission failed for an unknown reason\\n' +\n manual_submission_guide,\n )\n\n # submission succeeded, obtained GUID\n guid = content['result']\n status = '0'\n retries = 10\n while status == '0' and retries > 0:\n retries -= 1\n r = guid_status(etherscan_api=etherscan_api, guid=guid)\n status = r['status']\n if r['result'] == 'Fail - Unable to verify':\n raise ValueError(manual_submission_guide)\n if r['result'] == 'Pass - Verified':\n return\n print('Retrying...')\n sleep(5)\n raise TimeoutError(manual_submission_guide)", "docstring": "Calls Etherscan API for verifying the Solidity source of a contract.\n\nArgs:\n chain_id: EIP-155 chain id of the Ethereum chain\n apikey: key for calling Etherscan API\n source_module: a module name to look up contracts_source_path()\n contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.", "source": "juraj_google_style"} -{"code": "def CompileReport(self, mediator):\n\n report_text = [\n 'Sessionize plugin identified {0:d} sessions and '\n 'applied {1:d} tags.'.format(\n len(self._events_per_session), self._number_of_event_tags)]\n for session, event_count in enumerate(self._events_per_session):\n report_text.append('\\tSession {0:d}: {1:d} events'.format(\n session, event_count))\n report_text = '\\n'.join(report_text)\n return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)", "docstring": "Compiles an analysis report.\n\nArgs:\n mediator (AnalysisMediator): mediates interactions between\n analysis plugins and other components, such as storage and dfvfs.\n\nReturns:\n AnalysisReport: analysis report.", "source": "juraj_google_style"} -{"code": "def set_level(level):\n\n Logger.level = level\n for logger in Logger.loggers.values():\n logger.setLevel(level)", "docstring": "Set level of logging for all loggers.\n\nArgs:\n level (int): level of logging.", "source": "juraj_google_style"} -{"code": "def ParseRow(self, parser_mediator, row_offset, row):\n\n filename = row.get('name', None)\n md5_hash = row.get('md5', None)\n mode = row.get('mode_as_string', None)\n\n inode_number = row.get('inode', None)\n if '-' in inode_number:\n inode_number, _, _ = inode_number.partition('-')\n\n try:\n inode_number = int(inode_number, 10)\n except (TypeError, ValueError):\n inode_number = None\n\n data_size = self._GetIntegerValue(row, 'size')\n user_uid = self._GetIntegerValue(row, 'uid')\n user_gid = self._GetIntegerValue(row, 'gid')\n\n event_data = MactimeEventData()\n event_data.filename = filename\n event_data.inode = inode_number\n event_data.md5 = md5_hash\n event_data.mode_as_string = mode\n event_data.offset = row_offset\n event_data.size = data_size\n event_data.user_gid = user_gid\n\n if user_uid is None:\n event_data.user_sid = None\n else:\n # Note that the user_sid value is expected to be a string.\n event_data.user_sid = '{0:d}'.format(user_uid)\n\n for value_name, timestamp_description in iter(\n self._TIMESTAMP_DESC_MAP.items()):\n posix_time = self._GetIntegerValue(row, value_name)\n # mactime will return 0 if the timestamp is not set.\n if not posix_time:\n continue\n\n date_time = dfdatetime_posix_time.PosixTime(timestamp=posix_time)\n event = time_events.DateTimeValuesEvent(date_time, timestamp_description)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a line of the log file and produces events.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n row_offset (int): number of the corresponding line.\n row (dict[str, str]): fields of a single row, as specified in COLUMNS.", "source": "juraj_google_style"} -{"code": "def split_input(cls, job_config):\n\n reader_params = job_config.input_reader_params\n bucket = reader_params[cls.BUCKET_NAME_PARAM]\n filenames = reader_params[cls.OBJECT_NAMES_PARAM]\n delimiter = reader_params.get(cls.DELIMITER_PARAM)\n account_id = reader_params.get(cls._ACCOUNT_ID_PARAM)\n buffer_size = reader_params.get(cls.BUFFER_SIZE_PARAM)\n path_filter = reader_params.get(cls.PATH_FILTER_PARAM)\n\n # Gather the complete list of files (expanding wildcards)\n all_filenames = []\n for filename in filenames:\n if filename.endswith(\"*\"):\n all_filenames.extend(\n [file_stat.filename for file_stat in cloudstorage.listbucket(\n \"/\" + bucket + \"/\" + filename[:-1], delimiter=delimiter,\n _account_id=account_id)])\n else:\n all_filenames.append(\"/%s/%s\" % (bucket, filename))\n\n # Split into shards\n readers = []\n for shard in range(0, job_config.shard_count):\n shard_filenames = all_filenames[shard::job_config.shard_count]\n if shard_filenames:\n readers.append(cls(\n shard_filenames, buffer_size=buffer_size, _account_id=account_id,\n delimiter=delimiter, path_filter=path_filter))\n return readers", "docstring": "Returns a list of input readers.\n\n An equal number of input files are assigned to each shard (+/- 1). If there\n are fewer files than shards, fewer than the requested number of shards will\n be used. Input files are currently never split (although for some formats\n could be and may be split in a future implementation).\n\nArgs:\n job_config: map_job.JobConfig\n\nReturns:\n A list of InputReaders. None when no input data can be found.", "source": "juraj_google_style"} -{"code": "def addDataDocuments(self, scidata_pid_list, scimeta_pid=None):\n\n mpids = self.getAggregatedScienceMetadataPids()\n if scimeta_pid is None:\n if len(mpids) > 1:\n raise ValueError(\n \"No metadata PID specified and more than one choice available.\"\n )\n scimeta_pid = mpids[0]\n else:\n if scimeta_pid not in mpids:\n self.addMetadataDocument(scimeta_pid)\n\n for dpid in scidata_pid_list:\n self.addResource(dpid)\n self.setDocumentedBy(dpid, scimeta_pid)\n self.setDocuments(scimeta_pid, dpid)", "docstring": "Add Science Data object(s)\n\nArgs:\n scidata_pid_list : list of str\n List of one or more PIDs of Science Data objects\n\n scimeta_pid: str\n PID of a Science Metadata object that documents the Science Data objects.", "source": "juraj_google_style"} -{"code": "def validate(self, definition, version=None, strict=False):\n\n if not HAS_KUBERNETES_VALIDATE:\n raise KubernetesValidateMissing()\n\n errors = list()\n warnings = list()\n try:\n if version is None:\n try:\n version = self.version['kubernetes']['gitVersion']\n except KeyError:\n version = kubernetes_validate.latest_version()\n kubernetes_validate.validate(definition, version, strict)\n except kubernetes_validate.utils.ValidationError as e:\n errors.append(\"resource definition validation error at %s: %s\" % ('.'.join([str(item) for item in e.path]), e.message)) # noqa: B306\n except VersionNotSupportedError as e:\n errors.append(\"Kubernetes version %s is not supported by kubernetes-validate\" % version)\n except kubernetes_validate.utils.SchemaNotFoundError as e:\n warnings.append(\"Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)\" %\n (e.kind, e.api_version, e.version))\n return warnings, errors", "docstring": "validate checks a kubernetes resource definition\n\nArgs:\n definition (dict): resource definition\n version (str): version of kubernetes to validate against\n strict (bool): whether unexpected additional properties should be considered errors\n\nReturns:\n warnings (list), errors (list): warnings are missing validations, errors are validation failures", "source": "juraj_google_style"} -{"code": "def restore(self, state):\n\n\n own_properties = set(self.get_properties())\n state_properties = set(state)\n\n to_restore = own_properties.intersection(state_properties)\n\n for name in to_restore:\n value = state.get(name)\n\n if name in self._complex_properties:\n value = self._complex_properties[name][1](value)\n\n setattr(self, name, value)", "docstring": "Restore this state from the output of a previous call to dump().\n\n Only those properties in this object and listed in state will be\n updated. Other properties will not be modified and state may contain\n keys that do not correspond with properties in this object.\n\nArgs:\n state (dict): A serialized representation of this object.", "source": "juraj_google_style"} -{"code": "def parse(self, extent, length, fp, log_block_size):\n # type: (int, int, BinaryIO, int) -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('Inode is already initialized')\n\n self.orig_extent_loc = extent\n\n self.data_length = length\n\n self.data_fp = fp\n self.manage_fp = False\n self.fp_offset = extent * log_block_size\n self.original_data_location = self.DATA_ON_ORIGINAL_ISO\n\n self._initialized = True", "docstring": "Parse an existing Inode. This just saves off the extent for later use.\n\n Parameters:\n extent - The original extent that the data lives at.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def add_moc_from_dict(self, moc_dict, moc_options = {}):\n\n self.moc_dict = moc_dict\n self.moc_options = moc_options\n self.moc_from_dict_flag = not self.moc_from_dict_flag", "docstring": "load a MOC from a dict object and display it in Aladin Lite widget\n\nArgs:\n moc_dict: the dict containing the MOC cells. Key are the HEALPix orders,\n values are the pixel indexes, eg: {\"1\":[1,2,4], \"2\":[12,13,14,21,23,25]}\n moc_options: dictionary object", "source": "juraj_google_style"} -{"code": "def set_prod_state(prod_state, device=None):\n\n\n if not device:\n device = __salt__['grains.get']('fqdn')\n\n device_object = find_device(device)\n\n if not device_object:\n return \"Unable to find a device in Zenoss for {0}\".format(device)\n\n log.info('Setting prodState to %d on %s device', prod_state, device)\n data = dict(uids=[device_object['uid']], prodState=prod_state, hashcheck=device_object['hash'])\n return _router_request('DeviceRouter', 'setProductionState', [data])", "docstring": "A function to set the prod_state in zenoss.\n\n Parameters:\n prod_state: (Required) Integer value of the state\n device: (Optional) Will use the grain 'fqdn' by default.\n\n CLI Example:\n salt zenoss.set_prod_state 1000 hostname", "source": "juraj_google_style"} -{"code": "def parse(self, valstr):\n # type: (bytes) -> bool\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog already initialized')\n\n if self.state == self.EXPECTING_VALIDATION_ENTRY:\n # The first entry in an El Torito boot catalog is the Validation\n # Entry. A Validation entry consists of 32 bytes (described in\n # detail in the parse_eltorito_validation_entry() method).\n self.validation_entry.parse(valstr)\n self.state = self.EXPECTING_INITIAL_ENTRY\n elif self.state == self.EXPECTING_INITIAL_ENTRY:\n # The next entry is the Initial/Default entry. An Initial/Default\n # entry consists of 32 bytes (described in detail in the\n # parse_eltorito_initial_entry() method).\n self.initial_entry.parse(valstr)\n self.state = self.EXPECTING_SECTION_HEADER_OR_DONE\n else:\n val = bytes(bytearray([valstr[0]]))\n if val == b'\\x00':\n # An empty entry tells us we are done parsing El Torito. Do\n # some sanity checks.\n last_section_index = len(self.sections) - 1\n for index, sec in enumerate(self.sections):\n if sec.num_section_entries != len(sec.section_entries):\n raise pycdlibexception.PyCdlibInvalidISO('El Torito section header specified %d entries, only saw %d' % (sec.num_section_entries, len(sec.section_entries)))\n if index != last_section_index:\n if sec.header_indicator != 0x90:\n raise pycdlibexception.PyCdlibInvalidISO('Intermediate El Torito section header not properly specified')\n # In theory, we should also make sure that the very last\n # section has a header_indicator of 0x91. However, we\n # have seen ISOs in the wild (FreeBSD 11.0 amd64) in which\n # this is not the case, so we skip that check.\n self._initialized = True\n elif val in (b'\\x90', b'\\x91'):\n # A Section Header Entry\n section_header = EltoritoSectionHeader()\n section_header.parse(valstr)\n self.sections.append(section_header)\n elif val in (b'\\x88', b'\\x00'):\n # A Section Entry. According to El Torito 2.4, a Section Entry\n # must follow a Section Header, but we have seen ISOs in the\n # wild that do not follow this (Mageia 4 ISOs, for instance).\n # To deal with this, we get a little complicated here. If there\n # is a previous section header, and the length of the entries\n # attached to it is less than the number of entries it should\n # have, then we attach this entry to that header. If there is\n # no previous section header, or if the previous section header\n # is already 'full', then we make this a standalone entry.\n secentry = EltoritoEntry()\n secentry.parse(valstr)\n if self.sections and len(self.sections[-1].section_entries) < self.sections[-1].num_section_entries:\n self.sections[-1].add_parsed_entry(secentry)\n else:\n self.standalone_entries.append(secentry)\n elif val == b'\\x44':\n # A Section Entry Extension\n self.sections[-1].section_entries[-1].selection_criteria += valstr[2:]\n else:\n raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito Boot Catalog entry')\n\n return self._initialized", "docstring": "A method to parse an El Torito Boot Catalog out of a string.\n\n Parameters:\n valstr - The string to parse the El Torito Boot Catalog out of.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def sub_map(self, counters_map):\n\n for counter_name in counters_map.counters:\n self.increment(counter_name, -counters_map.counters[counter_name])", "docstring": "Subtracts all counters from the map.\n\n For each counter in the passed map, subtracts its value to the counter in\n this map.\n\nArgs:\n counters_map: CounterMap instance to subtract.", "source": "juraj_google_style"} -{"code": "def VerifyScripts(verifiable):\n\n try:\n hashes = verifiable.GetScriptHashesForVerifying()\n except Exception as e:\n logger.debug(\"couldn't get script hashes %s \" % e)\n return False\n\n if len(hashes) != len(verifiable.Scripts):\n logger.debug(f\"hash - verification script length mismatch ({len(hashes)}/{len(verifiable.Scripts)})\")\n return False\n\n blockchain = GetBlockchain()\n\n for i in range(0, len(hashes)):\n verification = verifiable.Scripts[i].VerificationScript\n\n if len(verification) == 0:\n sb = ScriptBuilder()\n sb.EmitAppCall(hashes[i].Data)\n verification = sb.ms.getvalue()\n else:\n verification_hash = Crypto.ToScriptHash(verification, unhex=False)\n if hashes[i] != verification_hash:\n logger.debug(f\"hash {hashes[i]} does not match verification hash {verification_hash}\")\n return False\n\n state_reader = GetStateReader()\n script_table = CachedScriptTable(DBCollection(blockchain._db, DBPrefix.ST_Contract, ContractState))\n\n engine = ApplicationEngine(TriggerType.Verification, verifiable, script_table, state_reader, Fixed8.Zero())\n engine.LoadScript(verification)\n invocation = verifiable.Scripts[i].InvocationScript\n engine.LoadScript(invocation)\n\n try:\n success = engine.Execute()\n state_reader.ExecutionCompleted(engine, success)\n except Exception as e:\n state_reader.ExecutionCompleted(engine, False, e)\n\n if engine.ResultStack.Count != 1 or not engine.ResultStack.Pop().GetBoolean():\n Helper.EmitServiceEvents(state_reader)\n if engine.ResultStack.Count > 0:\n logger.debug(f\"Result stack failure! Count: {engine.ResultStack.Count} bool value: {engine.ResultStack.Pop().GetBoolean()}\")\n else:\n logger.debug(f\"Result stack failure! Count: {engine.ResultStack.Count}\")\n return False\n\n Helper.EmitServiceEvents(state_reader)\n\n return True", "docstring": "Verify the scripts of the provided `verifiable` object.\n\nArgs:\n verifiable (neo.IO.Mixins.VerifiableMixin):\n\nReturns:\n bool: True if verification is successful. False otherwise.", "source": "juraj_google_style"} -{"code": "def _data_to_tensor(data_list, batch_size, name=None):\n r\n # convert to constant tensor\n const_list = [tf.constant(data) for data in data_list]\n\n # create queue from constant tensor\n queue_list = tf.train.slice_input_producer(const_list, capacity=batch_size*128, name=name)\n\n # create batch queue\n return tf.train.shuffle_batch(queue_list, batch_size, capacity=batch_size*128,\n min_after_dequeue=batch_size*32, name=name)", "docstring": "r\"\"\"Returns batch queues from the whole data.\n\nArgs:\n data_list: A list of ndarrays. Every array must have the same size in the first dimension.\n batch_size: An integer.\n name: A name for the operations (optional).\n\nReturns:\n A list of tensors of `batch_size`.", "source": "juraj_google_style"} -{"code": "def commit_offsets_sync(self, offsets):\n\n assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'\n assert all(map(lambda k: isinstance(k, TopicPartition), offsets))\n assert all(map(lambda v: isinstance(v, OffsetAndMetadata),\n offsets.values()))\n self._invoke_completed_offset_commit_callbacks()\n if not offsets:\n return\n\n while True:\n self.ensure_coordinator_ready()\n\n future = self._send_offset_commit_request(offsets)\n self._client.poll(future=future)\n\n if future.succeeded():\n return future.value\n\n if not future.retriable():\n raise future.exception # pylint: disable-msg=raising-bad-type\n\n time.sleep(self.config['retry_backoff_ms'] / 1000)", "docstring": "Commit specific offsets synchronously.\n\n This method will retry until the commit completes successfully or an\n unrecoverable error is encountered.\n\nArgs:\n offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit\n\n Raises error on failure", "source": "juraj_google_style"} -{"code": "def case_mme_update(self, case_obj, user_obj, mme_subm_obj):\n\n created = None\n patient_ids = []\n updated = datetime.now()\n if 'mme_submission' in case_obj and case_obj['mme_submission']:\n created = case_obj['mme_submission']['created_at']\n else:\n created = updated\n patients = [ resp['patient'] for resp in mme_subm_obj.get('server_responses')]\n\n subm_obj = {\n 'created_at' : created,\n 'updated_at' : updated,\n 'patients' : patients, # list of submitted patient data\n 'subm_user' : user_obj['_id'], # submitting user\n 'sex' : mme_subm_obj['sex'],\n 'features' : mme_subm_obj['features'],\n 'disorders' : mme_subm_obj['disorders'],\n 'genes_only' : mme_subm_obj['genes_only']\n }\n case_obj['mme_submission'] = subm_obj\n updated_case = self.update_case(case_obj)\n\n # create events for subjects add in MatchMaker for this case\n institute_obj = self.institute(case_obj['owner'])\n for individual in case_obj['individuals']:\n if individual['phenotype'] == 2: # affected\n # create event for patient\n self.create_event(institute=institute_obj, case=case_obj, user=user_obj,\n link='', category='case', verb='mme_add', subject=individual['display_name'],\n level='specific')\n\n return updated_case", "docstring": "Updates a case after a submission to MatchMaker Exchange\n\nArgs:\n case_obj(dict): a scout case object\n user_obj(dict): a scout user object\n mme_subm_obj(dict): contains MME submission params and server response\n\nReturns:\n updated_case(dict): the updated scout case", "source": "juraj_google_style"} -{"code": "def get_environmental_configuration(self, id_or_uri):\n\n uri = self._client.build_uri(id_or_uri) + \"/environmentalConfiguration\"\n return self._client.get(uri)", "docstring": "Returns a description of the environmental configuration (supported feature set, calibrated minimum & maximum\n power, location & dimensions, ...) of the resource.\n\nArgs:\n id_or_uri:\n Can be either the Unmanaged Device id or the uri\n\nReturns:\n dict:\n EnvironmentalConfiguration", "source": "juraj_google_style"} -{"code": "def renew(self, requested_timeout=None):\n\n # NB This code is sometimes called from a separate thread (when\n # subscriptions are auto-renewed. Be careful to ensure thread-safety\n\n if self._has_been_unsubscribed:\n raise SoCoException(\n 'Cannot renew subscription once unsubscribed')\n if not self.is_subscribed:\n raise SoCoException(\n 'Cannot renew subscription before subscribing')\n if self.time_left == 0:\n raise SoCoException(\n 'Cannot renew subscription after expiry')\n\n # SUBSCRIBE publisher path HTTP/1.1\n # HOST: publisher host:publisher port\n # SID: uuid:subscription UUID\n # TIMEOUT: Second-requested subscription duration (optional)\n headers = {\n 'SID': self.sid\n }\n if requested_timeout is None:\n requested_timeout = self.requested_timeout\n if requested_timeout is not None:\n headers[\"TIMEOUT\"] = \"Second-{}\".format(requested_timeout)\n response = requests.request(\n 'SUBSCRIBE',\n self.service.base_url + self.service.event_subscription_url,\n headers=headers)\n response.raise_for_status()\n timeout = response.headers['timeout']\n # According to the spec, timeout can be \"infinite\" or \"second-123\"\n # where 123 is a number of seconds. Sonos uses \"Second-123\" (with a\n # a capital letter)\n if timeout.lower() == 'infinite':\n self.timeout = None\n else:\n self.timeout = int(timeout.lstrip('Second-'))\n self._timestamp = time.time()\n self.is_subscribed = True\n log.info(\n \"Renewed subscription to %s, sid: %s\",\n self.service.base_url + self.service.event_subscription_url,\n self.sid)", "docstring": "Renew the event subscription.\n\n You should not try to renew a subscription which has been\n unsubscribed, or once it has expired.\n\nArgs:\n requested_timeout (int, optional): The period for which a renewal\n request should be made. If None (the default), use the timeout\n requested on subscription.", "source": "juraj_google_style"} -{"code": "def FileEntryExistsByPathSpec(self, path_spec):\n\n location = getattr(path_spec, 'location', None)\n if location is None or not location.startswith(self.LOCATION_ROOT):\n return False\n\n if len(location) == 1:\n return True\n\n return self._cpio_archive_file.FileEntryExistsByPath(location[1:])", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\n path_spec (PathSpec): a path specification.\n\nReturns:\n bool: True if the file entry exists.", "source": "juraj_google_style"} -{"code": "def GetFormatStringAttributeNames(self, event):\n\n event_formatter = self.GetEventFormatter(event)\n if not event_formatter:\n return None\n\n return event_formatter.GetFormatStringAttributeNames()", "docstring": "Retrieves the attribute names in the format string.\n\nArgs:\n event (EventObject): event.\n\nReturns:\n list[str]: list containing the attribute names. If no event formatter to\n match the event can be found the function returns None.", "source": "juraj_google_style"} -{"code": "def accept(self, context):\n\n _next = 0\n for _s in context:\n _data = [self.data[j] for j in self.trn[_next]]\n if _s in _data:\n _next = self.trn[_next][_data.index(_s)]\n else:\n return 0, _next\n return 1, _next", "docstring": "Check if the context could be accepted by the oracle\n\nArgs:\n context: s sequence same type as the oracle data\n\nReturns:\n bAccepted: whether the sequence is accepted or not\n _next: the state where the sequence is accepted", "source": "juraj_google_style"} -{"code": "def email_has_role(self, email, role_name, uuid=None):\n\n mbr_data = self.get_membership(uuid=uuid)\n docs = []\n try:\n docs = mbr_data['response']['docs']\n except KeyError:\n failure_message = ('KeyError in membership data - '\n 'got {0}'.format(mbr_data))\n log.exception(failure_message)\n raise PyLmodUnexpectedData(failure_message)\n if len(docs) == 0:\n return False\n has_role = any(\n (x.get('email') == email and x.get('roleType') == role_name)\n for x in docs\n )\n if has_role:\n return True\n return False", "docstring": "Determine if an email is associated with a role.\n\nArgs:\n email (str): user email\n role_name (str): user role\n uuid (str): optional uuid. defaults to self.cuuid\n\nRaises:\n PyLmodUnexpectedData: Unexpected data was returned.\n requests.RequestException: Exception connection error\n\nReturns:\n bool: True or False if email has role_name", "source": "juraj_google_style"} -{"code": "def convert_avgpool(params, w_name, scope_name, inputs, layers, weights, names):\n\n print('Converting pooling ...')\n\n if names == 'short':\n tf_name = 'P' + random_string(7)\n elif names == 'keep':\n tf_name = w_name\n else:\n tf_name = w_name + str(random.random())\n\n if 'kernel_shape' in params:\n height, width = params['kernel_shape']\n else:\n height, width = params['kernel_size']\n\n if 'strides' in params:\n stride_height, stride_width = params['strides']\n else:\n stride_height, stride_width = params['stride']\n\n if 'pads' in params:\n padding_h, padding_w, _, _ = params['pads']\n else:\n padding_h, padding_w = params['padding']\n\n input_name = inputs[0]\n pad = 'valid' \n\n if height % 2 == 1 and width % 2 == 1 and \\\n height // 2 == padding_h and width // 2 == padding_w and \\\n stride_height == 1 and stride_width == 1:\n pad = 'same'\n else:\n padding_name = tf_name + '_pad'\n padding_layer = keras.layers.ZeroPadding2D(\n padding=(padding_h, padding_w),\n name=padding_name\n )\n layers[padding_name] = padding_layer(layers[inputs[0]])\n input_name = padding_name\n\n # Pooling type AveragePooling2D\n pooling = keras.layers.AveragePooling2D(\n pool_size=(height, width),\n strides=(stride_height, stride_width),\n padding=pad,\n name=tf_name,\n data_format='channels_first'\n )\n\n layers[scope_name] = pooling(layers[input_name])", "docstring": "Convert Average pooling.\n\nArgs:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers", "source": "juraj_google_style"} -{"code": "def shutdown(pidfile, signalnb=None, frame=None):\n\n log = logging.getLogger(PROGRAM_NAME)\n log.info(\"received %s at %s\", signalnb, frame)\n log.info(\"going to remove pidfile %s\", pidfile)\n # no point to catch possible errors when we delete the pid file\n os.unlink(pidfile)\n log.info('shutdown is complete')\n sys.exit(0)", "docstring": "Clean up pidfile upon shutdown.\n\n Notice:\n We should register this function as signal handler for the following\n termination signals:\n SIGHUP\n SIGTERM\n SIGABRT\n SIGINT\n\nArgs:\n pidfile (str): pidfile to remove\n signalnb (int): The ID of signal\n frame (obj): Frame object at the time of receiving the signal", "source": "juraj_google_style"} -{"code": "def __init__(self, index, columns=tuple(), **kwargs):\n\n if index is not None:\n if isinstance(index, basestring):\n index = (index,)\n if isinstance(columns, basestring):\n columns = (columns,)\n index_names = [\n col[0] if isinstance(col, tuple) else col\n for col in index\n ]\n column_names = [\n col[0] if isinstance(col, tuple) else col\n for col in columns\n ]\n self._impl = amplpython.DataFrame.factory(\n len(index_names),\n list(index_names) + list(column_names),\n len(index_names) + len(column_names)\n )\n for col in index:\n if isinstance(col, tuple):\n self.setColumn(col[0], col[1])\n for col in columns:\n if isinstance(col, tuple):\n self.setColumn(col[0], col[1])\n else:\n self._impl = kwargs.get('_impl', None)", "docstring": "Create a new DataFrame with specifed index and column headers.\n\nArgs:\n index: Index column;\n\n columns: Column headers.", "source": "juraj_google_style"} -{"code": "def classes_file(flag_leaf=False):\n\n if __flag_first:\n __setup()\n\n if not flag_leaf:\n return _classes_file\n\n return [cls for cls in _classes_file if cls not in _classes_file_superclass]", "docstring": "All known File* classes\n\nArgs:\n flag_leaf: returns only classes that do not have subclasses\n (\"leaf\" nodes as in a class tree graph)", "source": "juraj_google_style"} -{"code": "def __init__(self, max_workers=None):\n\n _remove_dead_thread_references()\n\n if max_workers is None:\n self._max_workers = multiprocessing.cpu_count()\n else:\n self._max_workers = max_workers\n\n # Make the call queue slightly larger than the number of processes to\n # prevent the worker processes from idling. But don't make it too big\n # because futures in the call queue cannot be cancelled.\n self._call_queue = multiprocessing.Queue(self._max_workers +\n EXTRA_QUEUED_CALLS)\n self._result_queue = multiprocessing.Queue()\n self._work_ids = queue.Queue()\n self._queue_management_thread = None\n self._processes = set()\n\n # Shutdown is a two-step process.\n self._shutdown_thread = False\n self._shutdown_process_event = multiprocessing.Event()\n self._shutdown_lock = threading.Lock()\n self._queue_count = 0\n self._pending_work_items = {}", "docstring": "Initializes a new ProcessPoolExecutor instance.\n\nArgs:\n max_workers: The maximum number of processes that can be used to\n execute the given calls. If None or not given then as many\n worker processes will be created as the machine has processors.", "source": "juraj_google_style"} -{"code": "def _get_upload_session_status(res):\n\n response = json.loads(res.body.decode())\n if 'sessionStatus' not in response:\n try:\n info = (\n response['errorMessage']['additionalInfo']\n ['uploader_service.GoogleRupioAdditionalInfo']\n ['completionInfo']['customerSpecificInfo']\n )\n reason = '{} : {}'.format(info['status'], info['message'])\n except KeyError:\n reason = 'unknown reason'\n raise exceptions.NetworkError('image upload failed: {}'.format(\n reason\n ))\n return response['sessionStatus']", "docstring": "Parse the image upload response to obtain status.\n\nArgs:\n res: http_utils.FetchResponse instance, the upload response\n\nReturns:\n dict, sessionStatus of the response\n\nRaises:\n hangups.NetworkError: If the upload request failed.", "source": "juraj_google_style"} -{"code": "def safe_url(self, url, errors='strict'):\n\n if url is not None:\n url = quote(self.s(url, errors=errors), safe='~')\n return url", "docstring": "URL encode value for safe HTTP request.\n\nArgs:\n url (string): The string to URL Encode.\n\nReturns:\n (string): The urlencoded string.", "source": "juraj_google_style"} -{"code": "def __init__(self,\n no_decomp: Callable[[ops.Operation], bool]=(lambda _: False)\n ) -> None:\n\n super().__init__()\n self.no_decomp = no_decomp", "docstring": "Construct the optimization pass.\n\nArgs:\n no_decomp: A predicate that determines whether an operation should\n be decomposed or not. Defaults to decomposing everything.", "source": "juraj_google_style"} -{"code": "def bulk_upsert(self, conflict_target: List, rows: List[Dict], index_predicate: str=None):\n\n\n if not rows or len(rows) <= 0:\n return\n\n self.on_conflict(conflict_target, ConflictAction.UPDATE, index_predicate)\n return self.bulk_insert(rows)", "docstring": "Creates a set of new records or updates the existing\n ones with the specified data.\n\nArgs:\n conflict_target:\n Fields to pass into the ON CONFLICT clause.\n\n rows:\n Rows to upsert.\n\n index_predicate:\n The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking\n conflicts)", "source": "juraj_google_style"} -{"code": "def getModelSummaryAsGeoJson(self, session, withStreamNetwork=True, withNodes=False):\n\n # Get mask map\n watershedMaskCard = self.getCard('WATERSHED_MASK')\n maskFilename = watershedMaskCard.value\n maskExtension = maskFilename.strip('\"').split('.')[1]\n\n maskMap = session.query(RasterMapFile).\\\n filter(RasterMapFile.projectFile == self).\\\n filter(RasterMapFile.fileExtension == maskExtension).\\\n one()\n\n # Get mask map as a KML polygon\n statement = .format('raster', maskMap.tableName, maskMap.id)\n\n result = session.execute(statement)\n\n maskMapJsonPolygon = ''\n for row in result:\n maskMapJsonPolygon = row.polygon\n\n jsonString = maskMapJsonPolygon\n\n if withStreamNetwork:\n # Get the channel input file for the stream network\n channelInputFile = self.channelInputFile\n\n if channelInputFile is not None:\n # Use the existing method on the channel input file to generate the stream network GeoJson\n jsonStreamNetwork = channelInputFile.getStreamNetworkAsGeoJson(session=session, withNodes=withNodes)\n\n # Convert to json Python objects\n featureCollection = json.loads(jsonStreamNetwork)\n jsonMaskMapObjects = json.loads(maskMapJsonPolygon)\n\n # Create a mask feature\n maskFeature = {\"type\": \"Feature\",\n \"geometry\": jsonMaskMapObjects,\n \"properties\": {},\n \"id\": maskMap.id}\n\n # Add mask map to feature collection\n tempFeatures = featureCollection['features']\n tempFeatures.append(maskFeature)\n featureCollection['features'] = tempFeatures\n\n # Dump to string\n jsonString = json.dumps(featureCollection)\n\n return jsonString", "docstring": "Retrieve a GeoJSON representation of the model. Includes vectorized mask map and stream network.\n\nArgs:\n session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\n withStreamNetwork (bool, optional): Include stream network. Defaults to True.\n withNodes (bool, optional): Include nodes. Defaults to False.\n\nReturns:\n str: GeoJSON string", "source": "juraj_google_style"} -{"code": "def GetDisplayNameForPathSpec(self, path_spec):\n\n return path_helper.PathHelper.GetDisplayNameForPathSpec(\n path_spec, mount_path=self._mount_path, text_prepend=self._text_prepend)", "docstring": "Retrieves the display name for a path specification.\n\nArgs:\n path_spec (dfvfs.PathSpec): path specification.\n\nReturns:\n str: human readable version of the path specification.", "source": "juraj_google_style"} -{"code": "def createRoles(self, configFiles, dateTimeFormat=None):\n\n if dateTimeFormat is None:\n dateTimeFormat = '%Y-%m-%d %H:%M'\n\n scriptStartTime = datetime.datetime.now()\n try:\n\n print (\"********************Create Roles********************\")\n\n print (\"Script started at %s\" % scriptStartTime.strftime(dateTimeFormat))\n\n if self.securityhandler.valid == False:\n print (\"Login required\")\n else:\n orgTools = orgtools.orgtools(securityinfo=self)\n\n if orgTools is None:\n print (\"Error creating orgtools\")\n else:\n\n for configFile in configFiles:\n\n config = common.init_config_json(config_file=configFile)\n if config is not None:\n\n startTime = datetime.datetime.now()\n print (\"Processing config %s, starting at: %s\" % (configFile,startTime.strftime(dateTimeFormat)))\n\n roleInfos = config['Roles']\n for roleInfo in roleInfos:\n createRoleResults = orgTools.createRole(roleInfo['Name'],roleInfo['Description'],roleInfo['Privileges'])\n\n else:\n print (\"Config %s not found\" % configFile)\n\n\n except(TypeError,ValueError,AttributeError) as e:\n print (e)\n except (common.ArcRestHelperError) as e:\n print (\"error in function: %s\" % e[0]['function'])\n print (\"error on line: %s\" % e[0]['line'])\n print (\"error in file name: %s\" % e[0]['filename'])\n print (\"with error message: %s\" % e[0]['synerror'])\n if 'arcpyError' in e[0]:\n print (\"with arcpy message: %s\" % e[0]['arcpyError'])\n\n except Exception as e:\n if (reportToolsInstalled):\n if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)):\n print (\"error in function: %s\" % e[0]['function'])\n print (\"error on line: %s\" % e[0]['line'])\n print (\"error in file name: %s\" % e[0]['filename'])\n print (\"with error message: %s\" % e[0]['synerror'])\n if 'arcpyError' in e[0]:\n print (\"with arcpy message: %s\" % e[0]['arcpyError'])\n else:\n line, filename, synerror = trace()\n print (\"error on line: %s\" % line)\n print (\"error in file name: %s\" % filename)\n print (\"with error message: %s\" % synerror)\n else:\n line, filename, synerror = trace()\n print (\"error on line: %s\" % line)\n print (\"error in file name: %s\" % filename)\n print (\"with error message: %s\" % synerror)\n finally:\n print (\"Script complete, time to complete: %s\" % str(datetime.datetime.now() - scriptStartTime))\n print (\"###############Create Groups Completed#################\")\n print (\"\")\n\n #if orgTools is not None:\n #orgTools.dispose()\n groupInfo = None\n groupFile = None\n iconPath = None\n startTime = None\n thumbnail = None\n result = None\n config = None\n sciptPath = None\n orgTools = None\n del groupInfo\n del groupFile\n del iconPath\n del startTime\n del thumbnail\n del result\n del config\n del sciptPath\n del orgTools\n\n gc.collect()", "docstring": "Parses a JSON configuration file to create roles.\n\nArgs:\n configFiles (list): A list of JSON files on disk containing\n configuration data for creating roles.\n dateTimeFormat (str): A valid date formatting directive, as understood\n by :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e.,\n ``'%Y-%m-%d %H:%M'``.", "source": "juraj_google_style"} -{"code": "def reshape(self, shape: tf.TensorShape) -> 'TensorFluent':\n\n t = tf.reshape(self.tensor, shape)\n scope = self.scope.as_list()\n batch = self.batch\n return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the reshape operation with given `shape`.\n\nArgs:\n shape: The output's shape.\n\nReturns:\n A TensorFluent wrapping the reshape operation.", "source": "juraj_google_style"} -{"code": "def fit_size_distribution_component_models(self, model_names, model_objs, input_columns, output_columns):\n\n groups = np.unique(self.data[\"train\"][\"member\"][self.group_col])\n\n weights=None\n\n for group in groups:\n print(group)\n group_data = self.data[\"train\"][\"combo\"].loc[self.data[\"train\"][\"combo\"][self.group_col] == group]\n group_data = group_data.dropna()\n group_data = group_data.loc[group_data[output_columns[-1]] > 0]\n\n if self.sector:\n\n lon_obj = group_data.loc[:,'Centroid_Lon']\n lat_obj = group_data.loc[:,'Centroid_Lat']\n\n conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel())\n center_lon, center_lat = self.proj_dict[\"lon_0\"],self.proj_dict[\"lat_0\"] \n\n distances = np.array([np.sqrt((x-center_lon)**2+\\\n (y-center_lat)**2) for (x, y) in conus_lat_lon_points])\n\n min_dist, max_minus_min = min(distances),max(distances)-min(distances)\n\n distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances]\n weights = np.array(distance_0_1)\n\n\n self.size_distribution_models[group] = {\"lognorm\": {}}\n self.size_distribution_models[group][\"lognorm\"][\"pca\"] = PCA(n_components=len(output_columns))\n log_labels = np.log(group_data[output_columns].values)\n log_labels[:, np.where(output_columns == \"Shape\")[0]] *= -1\n log_means = log_labels.mean(axis=0)\n log_sds = log_labels.std(axis=0)\n log_norm_labels = (log_labels - log_means) / log_sds\n out_pc_labels = self.size_distribution_models[group][\"lognorm\"][\"pca\"].fit_transform(log_norm_labels)\n self.size_distribution_models[group]['lognorm']['mean'] = log_means\n self.size_distribution_models[group]['lognorm']['sd'] = log_sds\n for comp in range(len(output_columns)):\n self.size_distribution_models[group][\"pc_{0:d}\".format(comp)] = dict()\n for m, model_name in enumerate(model_names):\n print(model_name, comp)\n self.size_distribution_models[group][\n \"pc_{0:d}\".format(comp)][model_name] = deepcopy(model_objs[m])\n try:\n self.size_distribution_models[group][\n \"pc_{0:d}\".format(comp)][model_name].fit(group_data[input_columns],\n out_pc_labels[:, comp],\n sample_weight=weights)\n except:\n self.size_distribution_models[group][\n \"pc_{0:d}\".format(comp)][model_name].fit(group_data[input_columns],\n out_pc_labels[:, comp])\n return", "docstring": "This calculates 2 principal components for the hail size distribution between the shape and scale parameters.\n Separate machine learning models are fit to predict each component.\n\nArgs:\n model_names: List of machine learning model names\n model_objs: List of machine learning model objects.\n input_columns: List of input variables\n output_columns: Output columns, should contain Shape and Scale.\n\n Returns:", "source": "juraj_google_style"} -{"code": "def helper(*commands):\n\n def decorated_func(f):\n f.__help_targets__ = list(commands)\n return f\n return decorated_func", "docstring": "Decorate a function to be the helper function of commands.\n\nArgs:\n commands: Names of command that should trigger this function object.\n\n ---------------------------\n Interface of helper methods:\n\n @helper('some-command')\n def help_foo(self, args):\n '''\n\nArgs:\n args: A list of arguments.\n\nReturns:\n A string that is the help message.\n '''\n pass", "source": "juraj_google_style"} -{"code": "def query(self, s):\n\n s1 = np.sort([self.order[token] for token in s if token in self.order])\n logging.debug(\"{} original tokens and {} tokens after applying \"\n \"frequency order.\".format(len(s), len(s1)))\n prefix = self._get_prefix(s1)\n candidates = set([i for p1, token in enumerate(prefix)\n for i, p2 in self.index[token]\n if self.position_filter_func(s1, self.sets[i], p1, p2,\n self.similarity_threshold)])\n logging.debug(\"{} candidates found.\".format(len(candidates)))\n results = deque([])\n for i in candidates:\n s2 = self.sets[i]\n sim = self.similarity_func(s1, s2)\n if sim < self.similarity_threshold:\n continue\n results.append((i, sim))\n logging.debug(\"{} verified sets found.\".format(len(results)))\n return list(results)", "docstring": "Query the search index for sets similar to the query set.\n\nArgs:\n s (Iterable): the query set.\n\n Returns (list): a list of tuples `(index, similarity)` where the index\n is the index of the matching sets in the original list of sets.", "source": "juraj_google_style"} -{"code": "def set_verbosity(v):\n\n try:\n new_level = int(v)\n except ValueError:\n new_level = converter.ABSL_NAMES[v.upper()]\n FLAGS.verbosity = new_level", "docstring": "Sets the logging verbosity.\n\n Causes all messages of level <= v to be logged,\n and all messages of level > v to be silently discarded.\n\nArgs:\n v: int|str, the verbosity level as an integer or string. Legal string values\n are those that can be coerced to an integer as well as case-insensitive\n 'debug', 'info', 'warning', 'error', and 'fatal'.", "source": "juraj_google_style"} -{"code": "def _attend_over_memory(self, memory):\n\n attention_mlp = basic.BatchApply(\n mlp.MLP([self._mem_size] * self._attention_mlp_layers))\n for _ in range(self._num_blocks):\n attended_memory = self._multihead_attention(memory)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = basic.BatchApply(layer_norm.LayerNorm())(\n memory + attended_memory)\n\n # Add a skip connection to the attention_mlp's input.\n memory = basic.BatchApply(layer_norm.LayerNorm())(\n attention_mlp(memory) + memory)\n\n return memory", "docstring": "Perform multiheaded attention over `memory`.\n\nArgs:\n memory: Current relational memory.\n\nReturns:\n The attended-over memory.", "source": "juraj_google_style"} -{"code": "def getdata(inputfile, argnum=None, close=False):\n\n # get data and converts them to list\n # outputtype - list, dict, all\n output = []\n add_data = {}\n line_num = 0\n for line in inputfile:\n line_num += 1\n if (\"#\" not in line) and (line != \"\"):\n linesplit = line.split()\n if argnum is not None and len(linesplit) != int(argnum):\n raise ValueError(\n \"Line {:d} has {:d} arguments (need {:d})\".format(\n line_num, len(linesplit), argnum))\n output.append(linesplit)\n # additional float variable\n if \"#f\" in line:\n data = line.split()[1].split(\"=\")\n add_data[data[0]] = float(data[1])\n # additional list float variable\n if \"#l\" in line:\n data = line.split()[1].split(\"=\")\n add_data[data[0]] = [float(e) for e in data[1].split(\",\")]\n if close:\n inputfile.close()\n output = cleandata(output)\n return {\n \"data\": np.array(output),\n \"variables\": add_data,\n }", "docstring": "Get data from the .dat files\n\nArgs:\n inputfile: file\n Input File\n close: bool, default=False\n Closes inputfile if True\n inputfile (File): Input file\n close (boolean): Closes inputfile if True (default: False)\n\nReturns:\n dictionary:\n data: list of parsed data\n variables: dictionary of errors and other additional variables", "source": "juraj_google_style"} -{"code": "def prepend_block(self, node, reverse=False):\n\n if not isinstance(node, grammar.STATEMENTS):\n raise ValueError\n if reverse:\n self.to_prepend_block[-1].appendleft(node)\n else:\n self.to_prepend_block[-1].append(node)", "docstring": "Prepend a statement to the current block.\n\nArgs:\n node: The statement to prepend.\n reverse: When called multiple times, this flag determines whether the\n statement should be prepended or appended to the already inserted\n statements.\n\nRaises:\n ValueError: If the given node is not a statement.", "source": "juraj_google_style"} -{"code": "def _process_tensor_event(self, event, thresholds):\n\n return self._make_pr_entry(\n event.step,\n event.wall_time,\n tensor_util.make_ndarray(event.tensor_proto),\n thresholds)", "docstring": "Converts a TensorEvent into a dict that encapsulates information on it.\n\nArgs:\n event: The TensorEvent to convert.\n thresholds: An array of floats that ranges from 0 to 1 (in that\n direction and inclusive of 0 and 1).\n\nReturns:\n A JSON-able dictionary of PR curve data for 1 step.", "source": "juraj_google_style"} -{"code": "def run(self,\n env: env_tools.PreparedEnv,\n verbose: bool,\n previous_failures: Set['Check']) -> CheckResult:\n\n\n # Skip if a dependency failed.\n if previous_failures.intersection(self.dependencies):\n print(shell_tools.highlight(\n 'Skipped ' + self.command_line_switch(),\n shell_tools.YELLOW))\n return CheckResult(\n self, False, 'Skipped due to dependency failing.', None)\n\n print(shell_tools.highlight(\n 'Running ' + self.command_line_switch(),\n shell_tools.GREEN))\n try:\n success, message = self.perform_check(env, verbose=verbose)\n result = CheckResult(self, success, message, None)\n except Exception as ex:\n result = CheckResult(self, False, 'Unexpected error.', ex)\n\n print(shell_tools.highlight(\n 'Finished ' + self.command_line_switch(),\n shell_tools.GREEN if result.success else shell_tools.RED))\n if verbose:\n print(result)\n\n return result", "docstring": "Evaluates this check.\n\nArgs:\n env: The prepared python environment to run the check in.\n verbose: When set, more progress output is produced.\n previous_failures: Checks that have already run and failed.\n\nReturns:\n A CheckResult instance.", "source": "juraj_google_style"} -{"code": "def bfs(graph, start):\n\n # maintain a queue of paths\n queue = []\n visited = []\n # maintain a queue of nodes\n # push the first path into the queue\n queue.append([['', start]])\n while queue:\n # get the first path from the queue\n path = queue.pop(0)\n # get the last node from the path\n node = path[-1][1]\n if node.stateid not in visited:\n visited.append(node.stateid)\n # path found\n if node.final != TropicalWeight(float('inf')):\n return \"\".join([mnode[0] for mnode in path])\n # enumerate all adjacent nodes, construct a new path and push\n # it into the queue\n for arc in node.arcs:\n char = graph.isyms.find(arc.ilabel)\n next_state = graph[arc.nextstate]\n # print next_state.stateid\n if next_state.stateid not in visited:\n new_path = list(path)\n new_path.append([char, next_state])\n queue.append(new_path)", "docstring": "Finds the shortest string using BFS\n\nArgs:\n graph (DFA): The DFA states\n start (DFA state): The DFA initial state\n\nReturns:\n str: The shortest string", "source": "juraj_google_style"} -{"code": "def _parse_mtu(self, config):\n\n match = re.search(r'mtu (\\d+)', config)\n return dict(mtu=int(match.group(1)))", "docstring": "Parses the config block and returns the configured IP MTU value\n\n The provided configuration block is scanned and the configured value\n for the IP MTU is returned as a dict object. The IP MTU value is\n expected to always be present in the provided config block\n\nArgs:\n config (str): The interface configuration block to parse\n\nReturns:\n dict: A dict object intended to be merged into the resource dict", "source": "juraj_google_style"} -{"code": "def _get_source(link):\n\n if link.startswith(\"http://\") or link.startswith(\"https://\"):\n down = httpkie.Downloader()\n return down.download(link)\n\n if os.path.exists(link):\n with open(link) as f:\n return f.read()\n\n raise UserWarning(\"html: '%s' is neither URL or data!\" % link)", "docstring": "Return source of the `link` whether it is filename or url.\n\nArgs:\n link (str): Filename or URL.\n\nReturns:\n str: Content.\n\nRaises:\n UserWarning: When the `link` couldn't be resolved.", "source": "juraj_google_style"} -{"code": "def _set_textarea(el, value):\n\n if isinstance(value, dict):\n el.text = value[\"val\"]\n elif type(value) in [list, tuple]:\n el.text = \"\\n\\n\".join(\n \"-- %s --\\n%s\" % (item[\"source\"], item[\"val\"])\n for item in value\n )\n else:\n el.text = value", "docstring": "Set content of given textarea element `el` to `value`.\n\nArgs:\n el (obj): Reference to textarea element you wish to set.\n value (obj/list): Value to which the `el` will be set.", "source": "juraj_google_style"} -{"code": "def __init__(self, cbFun, cbCtx=None):\n\n self._cbFun = cbFun\n self._cbCtx = cbCtx", "docstring": "Create an instance of *CallbackReader* bound to specific URL.\n\nArgs:\n cbFun (callable): user callable accepting *MIB name* and *cbCtx* objects\n\n Keyword Args:\n cbCtx (object): user object that can be used to communicate state information\n between user-scope code and the *cbFun* callable scope", "source": "juraj_google_style"} -{"code": "def set_tif(self, interface):\n\n if not ((1 << interface) & self.supported_tifs()):\n raise errors.JLinkException('Unsupported target interface: %s' % interface)\n\n # The return code here is actually *NOT* the previous set interface, it\n # is ``0`` on success, otherwise ``1``.\n res = self._dll.JLINKARM_TIF_Select(interface)\n if res != 0:\n return False\n\n self._tif = interface\n return True", "docstring": "Selects the specified target interface.\n\n Note that a restart must be triggered for this to take effect.\n\nArgs:\n self (Jlink): the ``JLink`` instance\n interface (int): integer identifier of the interface\n\nReturns:\n ``True`` if target was updated, otherwise ``False``.\n\nRaises:\n JLinkException: if the given interface is invalid or unsupported.", "source": "juraj_google_style"} -{"code": "def make_trace_api(client):\n\n generated = trace_service_client.TraceServiceClient(\n credentials=client._credentials, client_info=_CLIENT_INFO\n )\n return _TraceAPI(generated, client)", "docstring": "Create an instance of the gapic Trace API.\n\nArgs:\n client (~google.cloud.trace.client.Client): The client that holds\n configuration details.\n\nReturns:\n A :class:`~google.cloud.trace._gapic._TraceAPI` instance with the\n proper configurations.", "source": "juraj_google_style"} -{"code": "def remove_spines(ax, sides):\n\n for side in sides:\n ax.spines[side].set_visible(False)\n return ax", "docstring": "Remove spines of axis.\n\n Parameters:\n ax: axes to operate on\n sides: list of sides: top, left, bottom, right\n\nExample:\n removespines(ax, ['top'])\n removespines(ax, ['top', 'bottom', 'right', 'left'])", "source": "juraj_google_style"} -{"code": "def create_heart(self, git_repo_url, max_commits=10, weeks_from_now=1):\n\n self.weeks_from_now = weeks_from_now\n self.end_date = self.get_end_date()\n try:\n self.repository_name = git_repo_url.split('/')[-1][:-4]\n self.git_repo_url = git_repo_url\n self.max_commits = max_commits\n self.do_commits()\n self.do_commit_amends()\n except IndexError as ie:\n raise ErrorMessage(\n \"Please provide the correct URL for the Repository\")\n except Exception as e:\n raise ErrorMessage(str(e))", "docstring": "Creates heart on the Summary.\n\nArgs:\n git_repo_url: The url (ssh or https) of the Repository, used for cloning\n max_commits: Maximum number of commits in a day\n weeks_from_now: The number of week from this week the Heart's Right center boundary will be.", "source": "juraj_google_style"} -{"code": "def add_parameter(self, name, min_val, max_val):\n\n\n self.__parameters.append(Parameter(name, min_val, max_val))", "docstring": "Adds a paramber to the Population\n\nArgs:\n name (str): name of the parameter\n min_val (int or float): minimum value for the parameter\n max_val (int or float): maximum value for the parameter", "source": "juraj_google_style"} -{"code": "def wrap_callable(cls, uri, methods, callable_obj):\n\n if isinstance(callable_obj, HandlerMeta):\n callable_obj.base_endpoint = uri\n callable_obj.is_valid = True\n return callable_obj\n\n if isinstance(callable_obj, types.FunctionType):\n return cls(uri=uri, methods=methods, callable_obj=callable_obj)\n\n raise RouteError(\"Invalid handler type.\")", "docstring": "Wraps function-based callable_obj into a `Route` instance, else\n proxies a `bottle_neck.handlers.BaseHandler` subclass instance.\n\nArgs:\n uri (str): The uri relative path.\n methods (tuple): A tuple of valid method strings.\n callable_obj (instance): The callable object.\n\nReturns:\n A route instance.\n\nRaises:\n RouteError for invalid callable object type.", "source": "juraj_google_style"} -{"code": "def Scan(self, fd,\n matcher):\n\n streamer = streaming.Streamer(\n chunk_size=self.CHUNK_SIZE, overlap_size=self.OVERLAP_SIZE)\n\n offset = self.params.start_offset\n amount = self.params.length\n for chunk in streamer.StreamFile(fd, offset=offset, amount=amount):\n for span in chunk.Scan(matcher):\n ctx_begin = max(span.begin - self.params.bytes_before, 0)\n ctx_end = min(span.end + self.params.bytes_after, len(chunk.data))\n ctx_data = chunk.data[ctx_begin:ctx_end]\n\n yield rdf_client.BufferReference(\n offset=chunk.offset + ctx_begin,\n length=len(ctx_data),\n data=ctx_data)\n\n if self.params.mode == self.params.Mode.FIRST_HIT:\n return", "docstring": "Scans given file searching for occurrences of given pattern.\n\nArgs:\n fd: A file descriptor of the file that needs to be searched.\n matcher: A matcher object specifying a pattern to search for.\n\nYields:\n `BufferReference` objects pointing to file parts with matching content.", "source": "juraj_google_style"} -{"code": "def find_slots(self, wanted, slots=None):\n\n if slots is None:\n slots = self.inv_slots_preferred + self.window.window_slots\n wanted = make_slot_check(wanted)\n\n for slot in slots:\n if wanted(slot):\n yield slot", "docstring": "Yields all slots containing the item.\n Searches the given slots or, if not given,\n active hotbar slot, hotbar, inventory, open window in this order.\n\nArgs:\n wanted: function(Slot) or Slot or itemID or (itemID, metadata)", "source": "juraj_google_style"} -{"code": "def split(self, grouper):\n\n data = self.to_df(condition=True, entities=True)\n data = data.drop('condition', axis=1)\n\n subsets = []\n for i, (name, g) in enumerate(data.groupby(grouper)):\n name = '%s.%s' % (self.name, name)\n col = self.__class__(name=name, data=g, source=self.source,\n run_info=getattr(self, 'run_info', None))\n subsets.append(col)\n return subsets", "docstring": "Split the current SparseRunVariable into multiple columns.\n\nArgs:\n grouper (iterable): list to groupby, where each unique value will\n be taken as the name of the resulting column.\n\nReturns:\n A list of SparseRunVariables, one per unique value in the\n grouper.", "source": "juraj_google_style"} -{"code": "def datestr2date(date_str):\n\n if any(c not in '0123456789-/' for c in date_str):\n raise ValueError('Illegal character in date string')\n if '/' in date_str:\n try:\n m, d, y = date_str.split('/')\n except:\n raise ValueError('Date {} must have no or exactly 2 slashes. {}'.\n format(date_str, VALID_DATE_FORMATS_TEXT))\n elif '-' in date_str:\n try:\n d, m, y = date_str.split('-')\n except:\n raise ValueError('Date {} must have no or exactly 2 dashes. {}'.\n format(date_str, VALID_DATE_FORMATS_TEXT))\n elif len(date_str) == 8 or len(date_str) == 6:\n d = date_str[-2:]\n m = date_str[-4:-2]\n y = date_str[:-4]\n else:\n raise ValueError('Date format not recognised. {}'.format(\n VALID_DATE_FORMATS_TEXT))\n if len(y) == 2:\n year = 2000 + int(y)\n elif len(y) == 4:\n year = int(y)\n else:\n raise ValueError('year must be 2 or 4 digits')\n for s in (m, d):\n if 1 <= len(s) <= 2:\n month, day = int(m), int(d)\n else:\n raise ValueError('m and d must be 1 or 2 digits')\n try:\n return datetime.date(year, month, day)\n except ValueError:\n raise ValueError('Invalid date {}. {}'.format(date_str, \n VALID_DATE_FORMATS_TEXT))", "docstring": "Turns a string into a datetime.date object. This will only work if the\n format can be \"guessed\", so the string must have one of the formats from\n VALID_DATE_FORMATS_TEXT.\n\nArgs:\n date_str (str) a string that represents a date\n\nReturns:\n datetime.date object\n\nRaises:\n ValueError if the input string does not have a valid format.", "source": "juraj_google_style"} -{"code": "def DEFINE_integer(flag_name, default_value, docstring, required=False): # pylint: disable=invalid-name\n\n _define_helper(flag_name, default_value, docstring, int, required)", "docstring": "Defines a flag of type 'int'.\n\nArgs:\n flag_name: The name of the flag as a string.\n default_value: The default value the flag should take as an int.\n docstring: A helpful message explaining the use of the flag.", "source": "juraj_google_style"} -{"code": "def search_groups(self, group):\n\n group_url = \"%s/%s/%s\" % (self.url, \"group\", group)\n response = self.jss.get(group_url)\n return LDAPGroupsResults(self.jss, response)", "docstring": "Search for LDAP groups.\n\nArgs:\n group: Group to search for. It is not entirely clear how the\n JSS determines the results- are regexes allowed, or\n globbing?\n\nReturns:\n LDAPGroupsResult object.\n\nRaises:\n JSSGetError if no results are found.", "source": "juraj_google_style"} -{"code": "def _get_backend(filename):\n\n filename = os.path.abspath(filename)\n\n with _backends_lock:\n if filename not in _backends:\n _backends[filename] = _MultiprocessStorageBackend(filename)\n return _backends[filename]", "docstring": "A helper method to get or create a backend with thread locking.\n\n This ensures that only one backend is used per-file per-process, so that\n thread and process locks are appropriately shared.\n\nArgs:\n filename: The full path to the credential storage file.\n\nReturns:\n An instance of :class:`_MultiprocessStorageBackend`.", "source": "juraj_google_style"} -{"code": "def init_grad(obj, allow_lazy_initializer=False):\n\n if obj is None:\n # TODO: fixes.py appears to pass None value and expect 0.0 back. Bug?\n return 0.0\n\n initializer, supports_lazy_initializer = grad_initializers[type(obj)]\n if supports_lazy_initializer:\n if isinstance(obj, ZeroGradient):\n if allow_lazy_initializer:\n return ZeroGradient(obj.like)\n else:\n # TODO: Not sure this should normally be hit. In forward-over-reverse?\n return obj.instantiate()\n else:\n if allow_lazy_initializer:\n return ZeroGradient(obj)\n else:\n assert not isinstance(obj, ZeroGradient)\n return initializer(obj)", "docstring": "Initialize the gradient for an object.\n\nArgs:\n obj: The object to initialize the gradient for, can be either a number,\n array, tuple, list, or dictionary.\n allow_lazy_initializer: Whether to allow using the ZeroGradient wrapper,\n for efficiency.\n\nReturns:\n An object of the same type, shape, etc. but with all numeric values set to\n zero. If the type is unknown, a zero is returned.", "source": "juraj_google_style"} -{"code": "def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):\n # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]\n\n countriesdata = cls.countriesdata(use_live=use_live)\n country = countriesdata['countries'].get(iso3.upper())\n if country is not None:\n return country\n\n if exception is not None:\n raise exception\n return None", "docstring": "Get country information from ISO3 code\n\nArgs:\n iso3 (str): ISO3 code for which to get country information\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\n Optional[Dict[str]]: country information", "source": "juraj_google_style"} -{"code": "def __init__(self, qobj_model, default_qubit_los, default_meas_los, **run_config):\n\n self.qobj_model = qobj_model\n self.default_qubit_los = default_qubit_los\n self.default_meas_los = default_meas_los\n self.run_config = run_config", "docstring": "Create new converter.\n\nArgs:\n qobj_model (PulseQobjExperimentConfig): qobj model for experiment config.\n default_qubit_los (list): List of default qubit lo frequencies.\n default_meas_los (list): List of default meas lo frequencies.\n run_config (dict): experimental configuration.", "source": "juraj_google_style"} -{"code": "def createVideo(videoQuality):\n\n if videoQuality == VIDEO_QUALITY_ORIGINAL:\n return MediaSettings('', '', '')\n elif videoQuality < len(VIDEO_QUALITIES['bitrate']):\n return MediaSettings(VIDEO_QUALITIES['bitrate'][videoQuality],\n VIDEO_QUALITIES['videoQuality'][videoQuality],\n VIDEO_QUALITIES['videoResolution'][videoQuality])\n else:\n raise BadRequest('Unexpected video quality')", "docstring": "Returns a :class:`~plexapi.sync.MediaSettings` object, based on provided video quality value.\n\n Parameters:\n videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in this module.\n\nRaises:\n :class:`plexapi.exceptions.BadRequest`: when provided unknown video quality.", "source": "juraj_google_style"} -{"code": "def _restore_resources(resources):\n\n resources = deepcopy(resources)\n for resource in resources:\n schema = resource['schema']\n for fk in schema.get('foreignKeys', []):\n _, name = _restore_path(fk['reference']['resource'])\n fk['reference']['resource'] = name\n return resources", "docstring": "Restore schemas from being compatible with storage schemas.\n\n Foreign keys related operations.\n\nArgs:\n list: resources from storage\n\nReturns:\n list: restored resources", "source": "juraj_google_style"} -{"code": "def copy(self, dest):\n\n if os.path.isfile(self.path):\n shutil.copy2(self.path, dest)\n else:\n shutil.copytree(self.path, dest, symlinks=False, ignore=None)", "docstring": "Copy item to the given `dest` path.\n\nArgs:\n * dest: destination path to copy.", "source": "juraj_google_style"} -{"code": "def get_repository_config_acl(namespace, config, snapshot_id):\n\n uri = \"configurations/{0}/{1}/{2}/permissions\".format(namespace,\n config, snapshot_id)\n return __get(uri)", "docstring": "Get configuration permissions.\n\n The configuration should exist in the methods repository.\n\nArgs:\n namespace (str): Configuration namespace\n config (str): Configuration name\n snapshot_id (int): snapshot_id of the method\n\n Swagger:\n https://api.firecloud.org/#!/Method_Repository/getConfigACL", "source": "juraj_google_style"} -{"code": "def download_to_file(url, file_path, **kwargs):\n\n content = download(url, **kwargs)\n with open(file_path, \"wb\") as f:\n f.write(content)", "docstring": "Descarga un archivo a través del protocolo HTTP, en uno o más intentos, y\n escribe el contenido descargado el el path especificado.\n\nArgs:\n url (str): URL (schema HTTP) del archivo a descargar.\n file_path (str): Path del archivo a escribir. Si un archivo ya existe\n en el path especificado, se sobrescribirá con nuevos contenidos.\n kwargs: Parámetros para download().", "source": "juraj_google_style"} -{"code": "def _create_m_objective(w, X):\n\n clusters, cells = w.shape\n genes = X.shape[0]\n w_sum = w.sum(1)\n def objective(m):\n m = m.reshape((X.shape[0], w.shape[0]))\n d = m.dot(w)+eps\n temp = X/d\n w2 = w.dot(temp.T)\n deriv = w_sum - w2.T\n return np.sum(d - X*np.log(d))/genes, deriv.flatten()/genes\n return objective", "docstring": "Creates an objective function and its derivative for M, given W and X\n\nArgs:\n w (array): clusters x cells\n X (array): genes x cells", "source": "juraj_google_style"} -{"code": "def __init__(self, input_dim=0, model=None, input_tensor=None, monitors=None,\n cost=None, output=None, outputs=None, blocks=None, input_vars=None, target_vars=None):\n\n from deepy.core.neural_var import NeuralVariable\n from deepy.core.tensor_conversion import convert_to_theano_var\n from theano.sandbox.cuda import CudaNdarraySharedVariable\n super(ComputationalGraph, self).__init__(input_dim, input_tensor=input_tensor)\n if model:\n self.stack(model)\n if cost:\n self.stack(cost)\n if output:\n if cost:\n self._test_output = output.tensor\n else:\n self.stack(output)\n if blocks:\n self.register(*blocks)\n if input_vars:\n self.input_variables = [t.tensor for t in input_vars]\n if target_vars:\n self.target_variables = [t.tensor for t in target_vars]\n if outputs:\n if not output and not cost:\n self._test_output = None\n self._test_outputs, _, _ = convert_to_theano_var(outputs)\n\n\n if monitors:\n if type(monitors) == dict:\n monitors = monitors.items()\n for monitor in monitors:\n if type(monitor) != tuple:\n raise Exception(\"monitors shall be tuples of (name, var).\")\n name, var = monitor\n if isinstance(var, NeuralVariable):\n var = var.tensor\n if isinstance(var, CudaNdarraySharedVariable):\n var *= 1.0 # Avoid CudaNdarray\n self.training_monitors.append((name, var))\n self.testing_monitors.append((name, var))", "docstring": "Create a basic network.\n\n Parameters:\n input_dim - dimension of input variable\n model - a short hand to specify the model\n config - network configuration\n input_tensor - specify the tensor of input if it's special", "source": "juraj_google_style"} -{"code": "def reminder_date(self, reminder_date):\n\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n\n reminder_date = self._utils.format_datetime(reminder_date, date_format='%Y-%m-%dT%H:%M:%SZ')\n self._data['reminderDate'] = reminder_date\n request = {'reminderDate': reminder_date}\n return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Sets the task reminder_date\n\nArgs:\n reminder_date: Converted to %Y-%m-%dT%H:%M:%SZ date format", "source": "juraj_google_style"} -{"code": "def list_asgs(access_token, subscription_id, resource_group):\n\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourceGroups/', resource_group,\n '/providers/Microsoft.Network/virtualNetworks/',\n '?api-version=', NETWORK_API])\n return do_get(endpoint, access_token)", "docstring": "Get details about the application security groups for a resource group.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n\nReturns:\n HTTP response. ASG JSON body.", "source": "juraj_google_style"} -{"code": "def get_user_groups(self, user):\n\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_user_groups(user)", "docstring": "Get user's group memberships.\n\nArgs:\n user (string): User name.\n\nReturns:\n (list): User's groups.\n\nRaises:\n requests.HTTPError on failure.", "source": "juraj_google_style"} -{"code": "def change_password(username, new_password):\n\n assert username in passwd_reader.load_users(),\\\n \"Username '%s' not found!\" % username\n\n sh.ftpasswd(\n \"--change-password\",\n passwd=True, # passwd file, not group file\n name=username,\n stdin=True, # tell ftpasswd to read password from stdin\n file=settings.LOGIN_FILE,\n _in=new_password\n )\n\n reload_configuration()", "docstring": "Change password for given `username`.\n\nArgs:\n username (str): User's name.\n new_password (str): User's new password.", "source": "juraj_google_style"} -{"code": "def clean_all(G, settings):\n\n quiet = settings[\"quiet\"]\n recon = settings[\"recon\"]\n sprint = settings[\"sprint\"]\n error = settings[\"error\"]\n all_outputs = []\n for node in G.nodes(data=True):\n if \"output\" in node[1]:\n for item in get_all_outputs(node[1]):\n all_outputs.append(item)\n all_outputs.append(\".shastore\")\n retcode = 0\n for item in sorted(all_outputs):\n if os.path.isfile(item):\n if recon:\n sprint(\"Would remove file: {}\".format(item))\n continue\n sprint(\"Attempting to remove file '{}'\", level=\"verbose\")\n try:\n os.remove(item)\n sprint(\"Removed file\", level=\"verbose\")\n except:\n errmes = \"Error: file '{}' failed to be removed\"\n error(errmes.format(item))\n retcode = 1\n if not retcode and not recon:\n sprint(\"All clean\", color=True)\n return retcode", "docstring": "Removes all the output files from all targets. Takes\n the graph as the only argument\n\nArgs:\n The networkx graph object\n The settings dictionary\n\nReturns:\n 0 if successful\n 1 if removing even one file failed", "source": "juraj_google_style"} -{"code": "def GetMessages(self, formatter_mediator, event):\n\n if self.DATA_TYPE != event.data_type:\n raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n event.data_type))\n\n event_values = event.CopyToDict()\n\n login_type = event_values.get('type', None)\n if login_type is None:\n status = 'N/A'\n else:\n status = self._STATUS_TYPES.get(login_type, 'UNKNOWN')\n\n event_values['status'] = status\n\n return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\n formatter_mediator (FormatterMediator): mediates the interactions\n between formatters and other components, such as storage and Windows\n EventLog resources.\n event (EventObject): event.\n\nReturns:\n tuple(str, str): formatted message string and short message string.\n\nRaises:\n WrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj_google_style"} -{"code": "def percentile(self, percent):\n\n # Sanity check: Any value over 100 should become 100.\n if percent >= 100:\n percent = 100\n\n # Determine the actual target number.\n target = len(self) - len(self) * (percent / 100)\n\n # Iterate over the values in reverse, dropping the target by the\n # number of times each value has been seen. When the target passes\n # 0, return the value we are currently viewing.\n for k in reversed(sorted(self._data.keys())):\n target -= self._data[k]\n if target < 0:\n return k\n\n # The only way to get here is if there was no data.\n # In this case, just return 10 seconds.\n return 10", "docstring": "Return the value that is the Nth precentile in the histogram.\n\nArgs:\n percent (Union[int, float]): The precentile being sought. The\n default consumer implementations use consistently use ``99``.\n\nReturns:\n int: The value corresponding to the requested percentile.", "source": "juraj_google_style"} -{"code": "def _get_args(cls, args):\n # type: (tuple) -> Tuple[type, slice, Callable]\n\n if isinstance(args, tuple):\n if not len(args) == 2:\n raise TypeError(\n \"{}[...] takes one or two argument.\".format(cls.__name__)\n )\n return super(_ValidationBoundedMeta, cls)._get_args(\n (args[0], None, args[1])\n )\n return super(_ValidationBoundedMeta, cls)._get_args((Any, None, args))", "docstring": "Return the parameters necessary to check type boundaries.\n\nArgs:\n args: A tuple with one or two parameters: A type to cast the\n value passed, and a predicate function to use for bounds\n checking.\n\nReturns:\n A tuple with three parameters: a type, a slice, and the predicate\n function. If no type was passed in args, the type defaults to Any.", "source": "juraj_google_style"} -{"code": "def __init__(self, protojson_protocol=None, **kwargs):\n\n super(MessageJSONEncoder, self).__init__(**kwargs)\n self.__protojson_protocol = (\n protojson_protocol or ProtoJson.get_default())", "docstring": "Constructor.\n\nArgs:\n protojson_protocol: ProtoJson instance.", "source": "juraj_google_style"} -{"code": "def dump(self,format='ttl'):\n\n\n\n\t\treturn self.rdf.graph.serialize(format=format).decode('utf-8')", "docstring": "Convenience method to return RDF data for resource,\n optionally selecting serialization format.\n Inspired by .dump from Samvera.\n\nArgs:\n format (str): expecting serialization formats accepted by rdflib.serialization(format=)", "source": "juraj_google_style"} -{"code": "def copy_submission_to_destination(self, src_filename, dst_subdir,\n submission_id):\n\n\n extension = [e for e in ALLOWED_EXTENSIONS if src_filename.endswith(e)]\n if len(extension) != 1:\n logging.error('Invalid submission extension: %s', src_filename)\n return\n dst_filename = os.path.join(self.target_dir, dst_subdir,\n submission_id + extension[0])\n cmd = ['gsutil', 'cp', src_filename, dst_filename]\n if subprocess.call(cmd) != 0:\n logging.error('Can\\'t copy submission to destination')\n else:\n logging.info('Submission copied to: %s', dst_filename)", "docstring": "Copies submission to target directory.\n\nArgs:\n src_filename: source filename of the submission\n dst_subdir: subdirectory of the target directory where submission should\n be copied to\n submission_id: ID of the submission, will be used as a new\n submission filename (before extension)", "source": "juraj_google_style"} -{"code": "def check_errors(self, is_global=False):\n\n errors = self.global_errors if is_global else self.errors\n if errors:\n print('dfTimewolf encountered one or more errors:')\n for error, critical in errors:\n print('{0:s} {1:s}'.format('CRITICAL: ' if critical else '', error))\n if critical:\n print('Critical error found. Aborting.')\n sys.exit(-1)", "docstring": "Checks for errors and exits if any of them are critical.\n\nArgs:\n is_global: If True, check the global_errors attribute. If false, check the\n error attribute.", "source": "juraj_google_style"} -{"code": "def genome_name_from_fasta_path(fasta_path):\n\n filename = os.path.basename(fasta_path)\n return re.sub(r'(\\.fa$)|(\\.fas$)|(\\.fasta$)|(\\.fna$)|(\\.\\w{1,}$)', '', filename)", "docstring": "Extract genome name from fasta filename\n\n Get the filename without directory and remove the file extension.\n\nExample:\n With fasta file path ``/path/to/genome_1.fasta``::\n\n fasta_path = '/path/to/genome_1.fasta'\n genome_name = genome_name_from_fasta_path(fasta_path)\n print(genome_name)\n # => \"genome_1\"\n\nArgs:\n fasta_path (str): fasta file path\n\nReturns:\n str: genome name", "source": "juraj_google_style"} -{"code": "def enable_network(self, *hostnames):\n\n def hostname_filter(hostname, req):\n if isregex(hostname):\n return hostname.match(req.url.hostname)\n return req.url.hostname == hostname\n\n for hostname in hostnames:\n self.use_network_filter(partial(hostname_filter, hostname))\n\n self.networking = True", "docstring": "Enables real networking mode, optionally passing one or multiple\n hostnames that would be used as filter.\n\n If at least one hostname matches with the outgoing traffic, the\n request will be executed via the real network.\n\nArgs:\n *hostnames: optional list of host names to enable real network\n against them. hostname value can be a regular expression.", "source": "juraj_google_style"} -{"code": "def _FilterOutPathInfoDuplicates(path_infos):\n\n pi_dict = {}\n\n for pi in path_infos:\n path_key = (pi.path_type, pi.GetPathID())\n pi_dict.setdefault(path_key, []).append(pi)\n\n def _SortKey(pi):\n return (\n pi.stat_entry.st_ctime,\n pi.stat_entry.st_mtime,\n pi.stat_entry.st_atime,\n pi.stat_entry.st_ino,\n )\n\n for pi_values in pi_dict.values():\n if len(pi_values) > 1:\n pi_values.sort(key=_SortKey, reverse=True)\n\n return [v[0] for v in pi_dict.values()]", "docstring": "Filters out duplicates from passed PathInfo objects.\n\nArgs:\n path_infos: An iterable with PathInfo objects.\n\nReturns:\n A list of PathInfo objects with duplicates removed. Duplicates are\n removed following this logic: they're sorted by (ctime, mtime, atime,\n inode number) in the descending order and then the first one is taken\n and the others are dropped.", "source": "juraj_google_style"} -{"code": "def __ge__(self, other):\n\n if not isinstance(other, DateTimeValues):\n raise ValueError('Other not an instance of DateTimeValues')\n\n normalized_timestamp = self._GetNormalizedTimestamp()\n other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access\n\n if normalized_timestamp is None:\n return other_normalized_timestamp is None\n\n if other_normalized_timestamp is None:\n return True\n\n return normalized_timestamp >= other_normalized_timestamp", "docstring": "Determines if the date time values are greater than or equal to other.\n\nArgs:\n other (DateTimeValues): date time values to compare against.\n\nReturns:\n bool: True if the date time values are greater than or equal to other.\n\nRaises:\n ValueError: if other is not an instance of DateTimeValues.", "source": "juraj_google_style"} -{"code": "def get_gdns_publisher(config, metrics, **kwargs):\n\n builder = gdns_publisher.GDNSPublisherBuilder(\n config, metrics, **kwargs)\n return builder.build_publisher()", "docstring": "Get a GDNSPublisher client.\n\n A factory function that validates configuration and returns a\n publisher client (:interface:`gordon.interfaces.IMessageHandler`)\n provider.\n\nArgs:\n config (dict): Google Cloud DNS API related configuration.\n metrics (obj): :interface:`IMetricRelay` implementation.\n kwargs (dict): Additional keyword arguments to pass to the\n publisher.\n\nReturns:\n A :class:`GDNSPublisher` instance.", "source": "juraj_google_style"} -{"code": "def convert_selu(params, w_name, scope_name, inputs, layers, weights, names):\n\n print('Converting selu ...')\n\n if names == 'short':\n tf_name = 'SELU' + random_string(4)\n elif names == 'keep':\n tf_name = w_name\n else:\n tf_name = w_name + str(random.random())\n\n selu = keras.layers.Activation('selu', name=tf_name)\n layers[scope_name] = selu(layers[inputs[0]])", "docstring": "Convert selu layer.\n\nArgs:\n params: dictionary with layer parameters\n w_name: name prefix in state_dict\n scope_name: pytorch scope name\n inputs: pytorch node inputs\n layers: dictionary with keras tensors\n weights: pytorch state_dict\n names: use short names for keras layers", "source": "juraj_google_style"} -{"code": "def is_installed(self, name: str) -> bool:\n\n assert name is not None\n try:\n self.__docker.images.get(name)\n return True\n except docker.errors.ImageNotFound:\n return False", "docstring": "Indicates a given Docker image is installed on this server.\n\n Parameters:\n name: the name of the Docker image.\n\nReturns:\n `True` if installed; `False` if not.", "source": "juraj_google_style"} -{"code": "def d_step(self, true_frames, gen_frames):\n\n hparam_to_disc_loss = {\n \"least_squares\": gan_losses.least_squares_discriminator_loss,\n \"cross_entropy\": gan_losses.modified_discriminator_loss,\n \"wasserstein\": gan_losses.wasserstein_discriminator_loss}\n\n # Concat across batch-axis.\n _, batch_size, _, _, _ = common_layers.shape_list(true_frames)\n all_frames = tf.concat(\n [true_frames, tf.stop_gradient(gen_frames)], axis=1)\n\n all_logits = self.discriminator(all_frames)\n true_logits, fake_logits_stop = \\\n all_logits[:batch_size], all_logits[batch_size:]\n mean_true_logits = tf.reduce_mean(true_logits)\n tf.summary.scalar(\"mean_true_logits\", mean_true_logits)\n\n mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop)\n tf.summary.scalar(\"mean_fake_logits_stop\", mean_fake_logits_stop)\n\n discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss]\n gan_d_loss = discriminator_loss_func(\n discriminator_real_outputs=true_logits,\n discriminator_gen_outputs=fake_logits_stop,\n add_summaries=True)\n return gan_d_loss, true_logits, fake_logits_stop", "docstring": "Performs the discriminator step in computing the GAN loss.\n\n Applies stop-gradient to the generated frames while computing the\n discriminator loss to make sure that the gradients are not back-propagated\n to the generator. This makes sure that only the discriminator is updated.\n\nArgs:\n true_frames: True outputs\n gen_frames: Generated frames.\n\nReturns:\n d_loss: Loss component due to the discriminator.", "source": "juraj_google_style"} -{"code": "def howPlotArgs(goodFormat):\n\n if args.exportplots is not None:\n exportPlotsPath = pathlib.Path(args.exportplots)\n\n if args.showplots:\n plotter(exportPlotsPath, True, goodFormat)\n else:\n plotter(exportPlotsPath, False, goodFormat)\n elif args.showplots:\n plotter(None, True, goodFormat)\n else:\n howPlotAsk(goodFormat)", "docstring": "plots using argparse if can, if not uses howPlotask()\n\nArgs:\n goodFormat {dict} -- module : [results for module]", "source": "juraj_google_style"} -{"code": "def find_rule(condition):\n\n final_condition = re.sub('{{.*}}', '42', condition)\n ast_tokens = Condition.get_tokens(final_condition)\n ast_compressed_tokens = Condition.compress_tokens(ast_tokens)\n\n name = 'undefined'\n function = lambda tokens: False\n\n if len(ast_compressed_tokens) > 0:\n for rule in Condition.RULES:\n if Condition.match_tokens(ast_compressed_tokens, rule['types']):\n name = rule['name']\n function = rule['evaluate']\n break\n return name, ast_tokens, function", "docstring": "Find rule for given condition.\n\nArgs:\n condition (str): Python condition as string.\n\nReturns:\n str, list, function: found rule name, list of AST tokens for condition\n and verification function.", "source": "juraj_google_style"} -{"code": "def setEditable(self, editable):\n\n if not isinstance(editable, bool):\n raise TypeError('Argument is not of type bool')\n self._editable = editable", "docstring": "setter to _editable. apply changes while changing dtype.\n\nRaises:\n TypeError: if editable is not of type bool.\n\nArgs:\n editable (bool): apply changes while changing dtype.", "source": "juraj_google_style"} -{"code": "def check_par(chrom, pos):\n\n par = False\n\n for interval in PAR.get(chrom,[]):\n if (pos >= interval[0] and pos <= interval[1]):\n par = True\n\n return par", "docstring": "Check if a coordinate is in the PAR region\n\nArgs:\n chrom(str)\n pos(int)\n\nReturns:\n par(bool)", "source": "juraj_google_style"} -{"code": "def new(self, dev_t_high, dev_t_low):\n # type: (int, int) -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('PN record already initialized!')\n\n self.dev_t_high = dev_t_high\n self.dev_t_low = dev_t_low\n\n self._initialized = True", "docstring": "Create a new Rock Ridge POSIX device number record.\n\n Parameters:\n dev_t_high - The high-order 32-bits of the device number.\n dev_t_low - The low-order 32-bits of the device number.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def index_2d(seqs: List[List[Any]], target: Any) -> Tuple[int, int]:\n\n for i in range(len(seqs)):\n for j in range(len(seqs[i])):\n if seqs[i][j] == target:\n return i, j\n raise ValueError('Item not present.')", "docstring": "Finds the first index of a target item within a list of lists.\n\nArgs:\n seqs: The list of lists to search.\n target: The item to find.\n\nRaises:\n ValueError: Item is not present.", "source": "juraj_google_style"} -{"code": "def validate_redis(self, db_data, user_data, oper):\n\n passed = True\n # convert any int to string since playbooks don't support int values\n if isinstance(db_data, int):\n db_data = str(db_data)\n if isinstance(user_data, int):\n user_data = str(user_data)\n\n # try to sort list of strings for simple comparisons\n # if list has a more complex data structure the sort will fail\n if isinstance(db_data, (list)):\n try:\n db_data = sorted(db_data)\n except TypeError:\n # self.log.debug('[validate] could not sort list')\n pass\n if isinstance(user_data, (list)):\n try:\n user_data = sorted(user_data)\n except TypeError:\n # self.log.debug('[validate] could not sort list')\n pass\n\n if oper not in self.operators:\n self.log.error('Invalid operator provided ({})'.format(oper))\n return False\n\n # compare the data\n if self.operators.get(oper)(db_data, user_data):\n self.reports.profile_validation(True)\n else:\n self.reports.profile_validation(False)\n passed = False\n\n # log validation data in a readable format\n self.validate_log_output(passed, db_data, user_data, oper)\n\n return passed", "docstring": "Validate data in Redis.\n\nArgs:\n db_data (str): The data store in Redis.\n user_data (str): The user provided data.\n oper (str): The comparison operator.\n\nReturns:\n bool: True if the data passed validation.", "source": "juraj_google_style"} -{"code": "def PluginAssets(self, plugin_name):\n\n with self._accumulators_mutex:\n # To avoid nested locks, we construct a copy of the run-accumulator map\n items = list(six.iteritems(self._accumulators))\n\n return {run: accum.PluginAssets(plugin_name) for run, accum in items}", "docstring": "Get index of runs and assets for a given plugin.\n\nArgs:\n plugin_name: Name of the plugin we are checking for.\n\nReturns:\n A dictionary that maps from run_name to a list of plugin\n assets for that run.", "source": "juraj_google_style"} -{"code": "def update_endpoint(self, endpoint_name, endpoint_config_name):\n\n if not _deployment_entity_exists(lambda: self.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)):\n raise ValueError('Endpoint with name \"{}\" does not exist; please use an existing endpoint name'\n .format(endpoint_name))\n\n self.sagemaker_client.update_endpoint(EndpointName=endpoint_name,\n EndpointConfigName=endpoint_config_name)\n return endpoint_name", "docstring": "Update an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request\n\n Raise an error if endpoint with endpoint_name does not exist.\n\nArgs:\n endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to update.\n endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy.\n\nReturns:\n str: Name of the Amazon SageMaker ``Endpoint`` being updated.", "source": "juraj_google_style"} -{"code": "def content_ratings(self, **kwargs):\n\n path = self._get_id_path('content_ratings')\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Get the content ratings for a TV Series.\n\nArgs:\n language: (optional) ISO 639 code.\n append_to_response: (optional) Comma separated, any collection\n method.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "source": "juraj_google_style"} -{"code": "def create(self, data, **kwargs):\n\n self._check_missing_create_attrs(data)\n server_data = self.gitlab.http_post(self.path, post_data=data,\n **kwargs)\n source_issue = ProjectIssue(self._parent.manager,\n server_data['source_issue'])\n target_issue = ProjectIssue(self._parent.manager,\n server_data['target_issue'])\n return source_issue, target_issue", "docstring": "Create a new object.\n\nArgs:\n data (dict): parameters to send to the server to create the\n resource\n **kwargs: Extra options to send to the server (e.g. sudo)\n\nReturns:\n RESTObject, RESTObject: The source and target issues\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabCreateError: If the server cannot perform the request", "source": "juraj_google_style"} -{"code": "def refresh(self, only_closed=False):\n\n if only_closed:\n opened = filter(self.__check_port, self.__closed)\n self.__closed = self.__closed.difference(opened)\n self.__ports = self.__ports.union(opened)\n else:\n ports = self.__closed.union(self.__ports)\n self.__ports = set(filter(self.__check_port, ports))\n self.__closed = ports.difference(self.__ports)", "docstring": "refresh ports status\n\nArgs:\n only_closed - check status only for closed ports", "source": "juraj_google_style"} -{"code": "def percentOutputsStableOverNTimeSteps(vectors, numSamples=None):\n\n\n # ----------------------------------------------------------------------\n # Calculate the stability\n totalSamples = len(vectors)\n windowSize = numSamples\n\n # Process each window\n numWindows = 0\n pctStable = 0\n\n for wStart in range(0, totalSamples-windowSize+1):\n # Count how many elements are active for the entire time\n data = vectors[wStart:wStart+windowSize]\n outputSums = data.sum(axis=0)\n stableOutputs = (outputSums == windowSize).sum()\n\n # Accumulated\n samplePctStable = float(stableOutputs) / data[0].sum()\n print samplePctStable\n pctStable += samplePctStable\n numWindows += 1\n\n # Return percent average over all possible windows\n return float(pctStable) / numWindows", "docstring": "Returns the percent of the outputs that remain completely stable over\n N time steps.\n\n Parameters:\n -----------------------------------------------\n vectors: the vectors for which the stability is calculated\n numSamples: the number of time steps where stability is counted\n\n For each window of numSamples, count how many outputs are active during\n the entire window.", "source": "juraj_google_style"} -{"code": "def put_headers_in_environ(headers, environ):\n\n for key, value in headers:\n environ['HTTP_%s' % key.upper().replace('-', '_')] = value", "docstring": "Given a list of headers, put them into environ based on PEP-333.\n\n This converts headers to uppercase, prefixes them with 'HTTP_', and\n converts dashes to underscores before adding them to the environ dict.\n\nArgs:\n headers: A list of (header, value) tuples. The HTTP headers to add to the\n environment.\n environ: An environ dict for the request as defined in PEP-333.", "source": "juraj_google_style"} -{"code": "def _ConditionalFormatMessages(self, event_values):\n\n # Using getattr here to make sure the attribute is not set to None.\n # if A.b = None, hasattr(A, b) is True but getattr(A, b, None) is False.\n string_pieces = []\n for map_index, attribute_name in enumerate(self._format_string_pieces_map):\n if not attribute_name or attribute_name in event_values:\n if attribute_name:\n attribute = event_values.get(attribute_name, None)\n # If an attribute is an int, yet has zero value we want to include\n # that in the format string, since that is still potentially valid\n # information. Otherwise we would like to skip it.\n # pylint: disable=unidiomatic-typecheck\n if (not isinstance(attribute, (bool, float)) and\n not isinstance(attribute, py2to3.INTEGER_TYPES) and\n not attribute):\n continue\n\n string_pieces.append(self.FORMAT_STRING_PIECES[map_index])\n\n format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces)\n\n string_pieces = []\n for map_index, attribute_name in enumerate(\n self._format_string_short_pieces_map):\n if not attribute_name or event_values.get(attribute_name, None):\n string_pieces.append(self.FORMAT_STRING_SHORT_PIECES[map_index])\n short_format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces)\n\n return self._FormatMessages(\n format_string, short_format_string, event_values)", "docstring": "Determines the conditional formatted message strings.\n\nArgs:\n event_values (dict[str, object]): event values.\n\nReturns:\n tuple(str, str): formatted message string and short message string.", "source": "juraj_google_style"} -{"code": "def scalar_stats(data, functions=('min', 'max', 'mean', 'std')):\n\n stats = {}\n for func in functions:\n\n stats[func] = getattr(np, func)(data)\n\n return stats", "docstring": "Calculate the stats from the given numpy functions\n\n Parameters:\n data: array of data points to be used for the stats\n\n Options:\n functions: tuple of numpy stat functions to apply on data\n\nReturns:\n Dictionary with the name of the function as key and the result\n as the respective value", "source": "juraj_google_style"} -{"code": "def flux_minimization(model, fixed, solver, weights={}):\n\n\n fba = FluxBalanceProblem(model, solver)\n\n for reaction_id, value in iteritems(fixed):\n flux = fba.get_flux_var(reaction_id)\n fba.prob.add_linear_constraints(flux >= value)\n\n fba.minimize_l1()\n\n return ((reaction_id, fba.get_flux(reaction_id))\n for reaction_id in model.reactions)", "docstring": "Minimize flux of all reactions while keeping certain fluxes fixed.\n\n The fixed reactions are given in a dictionary as reaction id\n to value mapping. The weighted L1-norm of the fluxes is minimized.\n\nArgs:\n model: MetabolicModel to solve.\n fixed: dict of additional lower bounds on reaction fluxes.\n solver: LP solver instance to use.\n weights: dict of weights on the L1-norm terms.\n\nReturns:\n An iterator of reaction ID and reaction flux pairs.", "source": "juraj_google_style"} -{"code": "def __init__(self, min_value, max_value, scaling_type='Auto'):\n\n self.min_value = min_value\n self.max_value = max_value\n self.scaling_type = scaling_type", "docstring": "Initialize a parameter range.\n\nArgs:\n min_value (float or int): The minimum value for the range.\n max_value (float or int): The maximum value for the range.\n scaling_type (str): The scale used for searching the range during tuning (default: 'Auto').\n Valid values: 'Auto', 'Linear', 'Logarithmic' and 'ReverseLogarithmic'.", "source": "juraj_google_style"} -{"code": "def get_content(self, url):\n\n cache_path = self._url_to_path(url)\n try:\n with open(cache_path, 'rb') as f:\n return f.read()\n except IOError:\n return None", "docstring": "Returns the content of a cached resource.\n\nArgs:\n url: The url of the resource\n\nReturns:\n The content of the cached resource or None if not in the cache", "source": "juraj_google_style"} -{"code": "def ParseHeader(table):\n\n precondition.AssertIterableType(table, dict)\n\n prototype = None # type: List[Text]\n\n for row in table:\n columns = list(iterkeys(row))\n if prototype is None:\n prototype = columns\n elif prototype != columns:\n message = \"Expected columns '{expected}', got '{actual}' for table {json}\"\n message = message.format(expected=prototype, actual=columns, json=table)\n raise ValueError(message)\n\n result = rdf_osquery.OsqueryHeader()\n for name in prototype or []:\n result.columns.append(rdf_osquery.OsqueryColumn(name=name))\n return result", "docstring": "Parses header of osquery output.\n\nArgs:\n table: A table in a \"parsed JSON\" representation.\n\nReturns:\n A parsed `rdf_osquery.OsqueryHeader` instance.", "source": "juraj_google_style"} -{"code": "def build_vep_string(vep_info, vep_columns):\n\n logger = getLogger(__name__)\n logger.debug(\"Building vep string from {0}\".format(vep_info))\n logger.debug(\"Found vep headers {0}\".format(vep_columns))\n vep_strings = []\n for vep_annotation in vep_info:\n try:\n vep_info_list = [\n vep_annotation[vep_key] for vep_key in vep_columns\n ]\n except KeyError:\n raise SyntaxError(\"Vep entry does not correspond to vep headers\")\n\n vep_strings.append('|'.join(vep_info_list))\n return ','.join(vep_strings)", "docstring": "Build a vep string formatted string.\n\n Take a list with vep annotations and build a new vep string\n\nArgs:\n vep_info (list): A list with vep annotation dictionaries\n vep_columns (list): A list with the vep column names found in the\n header of the vcf\n\nReturns:\n string: A string with the proper vep annotations", "source": "juraj_google_style"} -{"code": "def __init__(self, order_dict, default_order=None):\n\n self.order_dict = order_dict.copy()\n self.default_order = default_order", "docstring": "Create a reorderer.\n\nArgs:\n order_dict (dict of (str, `PackageOrder`): Orderers to apply to\n each package family.\n default_order (`PackageOrder`): Orderer to apply to any packages\n not specified in `order_dict`.", "source": "juraj_google_style"} -{"code": "def register_entity(self, entity_value, entity_type, alias_of=None):\n\n if alias_of:\n self.trie.insert(entity_value.lower(), data=(alias_of, entity_type))\n else:\n self.trie.insert(entity_value.lower(), data=(entity_value, entity_type))\n self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept'))", "docstring": "Register an entity to be tagged in potential parse results\n\nArgs:\n entity_value(str): the value/proper name of an entity instance (Ex: \"The Big Bang Theory\")\n entity_type(str): the type/tag of an entity instance (Ex: \"Television Show\")", "source": "juraj_google_style"} -{"code": "def StartsWith(this, that):\n\n this_iter = iter(this)\n that_iter = iter(that)\n\n while True:\n try:\n this_value = next(that_iter)\n except StopIteration:\n return True\n\n try:\n that_value = next(this_iter)\n except StopIteration:\n return False\n\n if this_value != that_value:\n return False", "docstring": "Checks whether an items of one iterable are a prefix of another.\n\nArgs:\n this: An iterable that needs to be checked.\n that: An iterable of which items must match the prefix of `this`.\n\nReturns:\n `True` if `that` is a prefix of `this`, `False` otherwise.", "source": "juraj_google_style"} -{"code": "def coldestmonth(self, value=None):\n\n if value is not None:\n try:\n value = int(value)\n except ValueError:\n raise ValueError('value {} need to be of type int '\n 'for field `coldestmonth`'.format(value))\n if value < 1:\n raise ValueError('value need to be greater or equal 1 '\n 'for field `coldestmonth`')\n if value > 12:\n raise ValueError('value need to be smaller 12 '\n 'for field `coldestmonth`')\n\n self._coldestmonth = value", "docstring": "Corresponds to IDD Field `coldestmonth`\n\nArgs:\n value (int): value for IDD Field `coldestmonth`\n value >= 1\n value <= 12\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def plot_power_factor_mu(self, temp=600, output='eig',\n relaxation_time=1e-14, xlim=None):\n\n import matplotlib.pyplot as plt\n plt.figure(figsize=(9, 7))\n pf = self._bz.get_power_factor(relaxation_time=relaxation_time,\n output=output, doping_levels=False)[\n temp]\n plt.semilogy(self._bz.mu_steps, pf, linewidth=3.0)\n self._plot_bg_limits()\n self._plot_doping(temp)\n if output == 'eig':\n plt.legend(['PF$_1$', 'PF$_2$', 'PF$_3$'])\n if xlim is None:\n plt.xlim(-0.5, self._bz.gap + 0.5)\n else:\n plt.xlim(xlim)\n plt.ylabel(\"Power factor, ($\\\\mu$W/(mK$^2$))\", fontsize=30.0)\n plt.xlabel(\"E-E$_f$ (eV)\", fontsize=30.0)\n plt.xticks(fontsize=25)\n plt.yticks(fontsize=25)\n plt.tight_layout()\n return plt", "docstring": "Plot the power factor in function of Fermi level. Semi-log plot\n\nArgs:\n temp: the temperature\n xlim: a list of min and max fermi energy by default (0, and band\n gap)\n tau: A relaxation time in s. By default none and the plot is by\n units of relaxation time\n\nReturns:\n a matplotlib object", "source": "juraj_google_style"} -{"code": "def __init__(self, tcex, main_type, api_type, sub_type, api_entity, owner):\n\n self._tcex = tcex\n self._data = {}\n\n self._owner = owner\n self._type = main_type\n self._api_sub_type = sub_type\n self._api_type = api_type\n self._unique_id = None\n self._api_entity = api_entity\n\n self._utils = TcExUtils()\n self._tc_requests = TiTcRequest(self._tcex)", "docstring": "Initialize Class Properties.\n\nArgs:\n tcex:\n main_type:\n api_type:\n sub_type:\n api_entity:", "source": "juraj_google_style"} -{"code": "def mtztw(n,c,e,l):\n\n model = Model(\"tsptw - mtz\")\n\n x,u = {},{}\n for i in range(1,n+1):\n u[i] = model.addVar(lb=e[i], ub=l[i], vtype=\"C\", name=\"u(%s)\"%i)\n for j in range(1,n+1):\n if i != j:\n x[i,j] = model.addVar(vtype=\"B\", name=\"x(%s,%s)\"%(i,j))\n\n for i in range(1,n+1):\n model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, \"Out(%s)\"%i)\n model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, \"In(%s)\"%i)\n\n for i in range(1,n+1):\n for j in range(2,n+1):\n if i != j:\n M = max(l[i] + c[i,j] - e[j], 0)\n model.addCons(u[i] - u[j] + M*x[i,j] <= M-c[i,j], \"MTZ(%s,%s)\"%(i,j))\n\n model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), \"minimize\")\n\n model.data = x,u\n return model", "docstring": "mtzts: model for the traveling salesman problem with time windows\n (based on Miller-Tucker-Zemlin's one-index potential formulation)\n Parameters:\n - n: number of nodes\n - c[i,j]: cost for traversing arc (i,j)\n - e[i]: earliest date for visiting node i\n - l[i]: latest date for visiting node i\n Returns a model, ready to be solved.", "source": "juraj_google_style"} -{"code": "def maybe_get_common_dtype(arg_list):\n\n # Note that `all` defaults to `True` if `arg_list` is empty.\n if all(a is None for a in arg_list):\n return None\n return dtype_util.common_dtype(arg_list, tf.float32)", "docstring": "Return common dtype of arg_list, or None.\n\nArgs:\n arg_list: an iterable of items which are either `None` or have a `dtype`\n property.\n\nReturns:\n dtype: The common dtype of items in `arg_list`, or `None` if the list is\n empty or all items are `None`.", "source": "juraj_google_style"} -{"code": "def resize(self, targ_sz, new_path='tmp', resume=True, fn=None):\n\n new_ds = []\n dls = [self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl]\n if self.test_dl: dls += [self.test_dl, self.test_aug_dl]\n else: dls += [None,None]\n t = tqdm_notebook(dls)\n for dl in t: new_ds.append(self.resized(dl, targ_sz, new_path, resume, fn))\n t.close()\n return self.__class__(new_ds[0].path, new_ds, self.bs, self.num_workers, self.classes)", "docstring": "Resizes all the images in the train, valid, test folders to a given size.\n\nArgs:\n targ_sz (int): the target size\n new_path (str): the path to save the resized images (default tmp)\n resume (bool): if True, check for images in the DataSet that haven't been resized yet (useful if a previous resize\n operation was aborted)\n fn (function): optional custom resizing function", "source": "juraj_google_style"} -{"code": "def mark_locations(h,section,locs,markspec='or',**kwargs):\n\n\n # get list of cartesian coordinates specifying section path\n xyz = get_section_path(h,section)\n (r,theta,phi) = sequential_spherical(xyz)\n rcum = np.append(0,np.cumsum(r))\n\n # convert locs into lengths from the beginning of the path\n if type(locs) is float or type(locs) is np.float64:\n locs = np.array([locs])\n if type(locs) is list:\n locs = np.array(locs)\n lengths = locs*rcum[-1]\n\n # find cartesian coordinates for markers\n xyz_marks = []\n for targ_length in lengths:\n xyz_marks.append(find_coord(targ_length,xyz,rcum,theta,phi))\n xyz_marks = np.array(xyz_marks)\n\n # plot markers\n line, = plt.plot(xyz_marks[:,0], xyz_marks[:,1], \\\n xyz_marks[:,2], markspec, **kwargs)\n return line", "docstring": "Marks one or more locations on along a section. Could be used to\n mark the location of a recording or electrical stimulation.\n\nArgs:\n h = hocObject to interface with neuron\n section = reference to section\n locs = float between 0 and 1, or array of floats\n optional arguments specify details of marker\n\nReturns:\n line = reference to plotted markers", "source": "juraj_google_style"} -{"code": "def fragment_search(self, fragement:str) -> List[dict]:\n\n fragement = self.extract_fragment(fragement)\n ilx_rows = self.fragment2rows.get(fragement)\n if not ilx_rows:\n return None\n else:\n return ilx_rows", "docstring": "Returns the rows in InterLex associated with the fragment\n\nNote:\n Pressumed to have duplicate fragements in InterLex\n\nArgs:\n fragment: The fragment_id of the curie pertaining to the ontology\n\nReturns:\n None or List[dict]", "source": "juraj_google_style"} -{"code": "def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs):\n\n # NOTE: To eliminate the dangers of accidentally signing a condition by\n # reference, we remove the reference of input_ here\n # intentionally. If the user of this class knows how to use it,\n # this should never happen, but then again, never say never.\n input_ = deepcopy(input_)\n public_key = input_.owners_before[0]\n message = sha3_256(message.encode())\n if input_.fulfills:\n message.update('{}{}'.format(\n input_.fulfills.txid, input_.fulfills.output).encode())\n\n try:\n # cryptoconditions makes no assumptions of the encoding of the\n # message to sign or verify. It only accepts bytestrings\n input_.fulfillment.sign(\n message.digest(), base58.b58decode(key_pairs[public_key].encode()))\n except KeyError:\n raise KeypairMismatchException('Public key {} is not a pair to '\n 'any of the private keys'\n .format(public_key))\n return input_", "docstring": "Signs a Ed25519Fulfillment.\n\nArgs:\n input_ (:class:`~bigchaindb.common.transaction.\n Input`) The input to be signed.\n message (str): The message to be signed\n key_pairs (dict): The keys to sign the Transaction with.", "source": "juraj_google_style"} -{"code": "def __init__(\n self,\n name,\n named_tensors=None,\n scope='output',\n summary_labels=()\n ):\n\n self.name = name\n super(Output, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Output layer.\n\nArgs:\n output: A string that names the tensor, will be added to available inputs", "source": "juraj_google_style"} -{"code": "def __init__(self, path, exts, fields, **kwargs):\n\n if not isinstance(fields[0], (tuple, list)):\n fields = [('src', fields[0]), ('trg', fields[1])]\n\n src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)\n\n examples = []\n with io.open(src_path, mode='r', encoding='utf-8') as src_file, \\\n io.open(trg_path, mode='r', encoding='utf-8') as trg_file:\n for src_line, trg_line in zip(src_file, trg_file):\n src_line, trg_line = src_line.strip(), trg_line.strip()\n if src_line != '' and trg_line != '':\n examples.append(data.Example.fromlist(\n [src_line, trg_line], fields))\n\n super(TranslationDataset, self).__init__(examples, fields, **kwargs)", "docstring": "Create a TranslationDataset given paths and fields.\n\nArgs:\n path: Common prefix of paths to the data files for both languages.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n Remaining keyword arguments: Passed to the constructor of\n data.Dataset.", "source": "juraj_google_style"} -{"code": "def read(cls, data):\n\n if isinstance(data, pd.DataFrame):\n return cls((json.loads(\n to_json_stat(data, output='dict', version='2.0'),\n object_pairs_hook=OrderedDict)))\n elif isinstance(data, OrderedDict):\n return cls(data)\n elif (isinstance(data, basestring)\n and data.startswith((\"http://\", \"https://\",\n \"ftp://\", \"ftps://\"))):\n # requests will do the rest...\n return cls(request(data))\n elif isinstance(data, basestring):\n try:\n json_dict = json.loads(data, object_pairs_hook=OrderedDict)\n return cls(json_dict)\n except ValueError:\n raise\n else:\n try:\n json_dict = json.load(data, object_pairs_hook=OrderedDict)\n return cls(json_dict)\n except ValueError:\n raise", "docstring": "Reads data from URL, Dataframe, JSON string, JSON file or\n OrderedDict.\n\nArgs:\n data: can be a Pandas Dataframe, a JSON file, a JSON string,\n an OrderedDict or a URL pointing to a JSONstat file.\n\nReturns:\n An object of class Dataset populated with data.", "source": "juraj_google_style"} -{"code": "def flatten(l):\n\n for el in l:\n # I don;t want dict to be flattened\n if isinstance(el, Iterable) and not isinstance(\n el, (str, bytes)) and not isinstance(el, dict):\n yield from flatten(el)\n else:\n yield el", "docstring": "Flatten a multi-deminision list and return a iterable\n\n Note that dict and str will not be expanded, instead, they will be kept as a single element.\n\nArgs:\n l (list): The list needs to be flattened\n\nReturns:\n A iterable of flattened list. To have a list instead use ``list(flatten(l))``", "source": "juraj_google_style"} -{"code": "def frame(self, locator=None, *args, **kwargs):\n\n\n self.switch_to_frame(self._find_frame(locator, *args, **kwargs))\n try:\n yield\n finally:\n self.switch_to_frame(\"parent\")", "docstring": "Execute the wrapped code within the given iframe using the given frame or frame name/id.\n May not be supported by all drivers.\n\nArgs:\n locator (str | Element, optional): The name/id of the frame or the frame's element.\n Defaults to the only frame in the document.", "source": "juraj_google_style"} -{"code": "def _get_events_list(object_key: str) -> List[str]:\n\n return DB.get_list(_keys.events_list(object_key))", "docstring": "Get list of event ids for the object with the specified key.\n\nArgs:\n object_key (str): Key of an object in the database.", "source": "juraj_google_style"} -{"code": "def match(self, message: Message) -> bool:\n\n if self.template:\n return self.template.match(message)\n return True", "docstring": "Matches a message with the behaviour's template\n\nArgs:\n message(spade.message.Message): the message to match with\n\nReturns:\n bool: wheter the messaged matches or not", "source": "juraj_google_style"} -{"code": "def add_metric(self, labels, value, timestamp=None):\n\n self.samples.append(Sample(\n self.name + '_info',\n dict(dict(zip(self._labelnames, labels)), **value),\n 1,\n timestamp,\n ))", "docstring": "Add a metric to the metric family.\n\nArgs:\n labels: A list of label values\n value: A dict of labels", "source": "juraj_google_style"} -{"code": "def log_estimator_evaluation_result(self, eval_results):\n\n if not isinstance(eval_results, dict):\n tf.logging.warning(\"eval_results should be directory for logging. Got %s\",\n type(eval_results))\n return\n global_step = eval_results[tf.GraphKeys.GLOBAL_STEP]\n for key in sorted(eval_results):\n if key != tf.GraphKeys.GLOBAL_STEP:\n self.log_metric(key, eval_results[key], global_step=global_step)", "docstring": "Log the evaluation result for a estimator.\n\n The evaluate result is a directory that contains metrics defined in\n model_fn. It also contains a entry for global_step which contains the value\n of the global step when evaluation was performed.\n\nArgs:\n eval_results: dict, the result of evaluate() from a estimator.", "source": "juraj_google_style"} -{"code": "def download_patric_genomes(self, ids, force_rerun=False):\n\n ids = ssbio.utils.force_list(ids)\n\n counter = 0\n log.info('Downloading sequences from PATRIC...')\n for patric_id in tqdm(ids):\n f = ssbio.databases.patric.download_coding_sequences(patric_id=patric_id, seqtype='protein',\n outdir=self.sequences_by_organism_dir,\n force_rerun=force_rerun)\n if f:\n self.load_strain(patric_id, f)\n counter += 1\n log.debug('{}: downloaded sequence'.format(patric_id))\n else:\n log.warning('{}: unable to download sequence'.format(patric_id))\n\n log.info('Created {} new strain GEM-PROs, accessible at \"strains\" attribute'.format(counter))", "docstring": "Download genome files from PATRIC given a list of PATRIC genome IDs and load them as strains.\n\nArgs:\n ids (str, list): PATRIC ID or list of PATRIC IDs\n force_rerun (bool): If genome files should be downloaded again even if they exist", "source": "juraj_google_style"} -{"code": "def _step(self, actions):\n\n\n # Pre-conditions: common_preconditions, see `assert_common_preconditions`.\n # : len(actions) == len(self._envs)\n self.assert_common_preconditions()\n assert len(actions) == len(self._envs)\n\n observations = []\n rewards = []\n dones = []\n infos = []\n\n # Take steps in all environments.\n for env, action in zip(self._envs, actions):\n observation, reward, done, info = env.step(action)\n\n observations.append(observation)\n rewards.append(reward)\n dones.append(done)\n infos.append(info)\n\n # Convert each list (observations, rewards, ...) into np.array and return a\n # tuple.\n return tuple(map(np.stack, [observations, rewards, dones, infos]))", "docstring": "Takes a step in all environments, shouldn't pre-process or record.\n\n Subclasses should override this to do the actual step if something other\n than the default implementation is desired.\n\nArgs:\n actions: (np.ndarray) with first dimension equal to the batch size.\n\nReturns:\n a tuple of stacked raw observations, raw rewards, dones and infos.", "source": "juraj_google_style"} -{"code": "def get_environment_details(zone, environment):\n\n default_context = google.datalab.Context.default()\n url = (Api._ENDPOINT + (Api._ENVIRONMENTS_PATH_FORMAT % (default_context.project_id, zone,\n environment)))\n\n return google.datalab.utils.Http.request(url, credentials=default_context.credentials)", "docstring": "Issues a request to Composer to get the environment details.\n\nArgs:\n zone: GCP zone of the composer environment\n environment: name of the Composer environment\n\nReturns:\n A parsed result object.\n\nRaises:\n Exception if there is an error performing the operation.", "source": "juraj_google_style"} -{"code": "def songs_delete(self, songs):\n\n\n\t\tmutations = [mc_calls.TrackBatch.delete(song['id']) for song in songs]\n\t\tresponse = self._call(\n\t\t\tmc_calls.TrackBatch,\n\t\t\tmutations\n\t\t)\n\n\t\tsuccess_ids = [\n\t\t\tres['id']\n\t\t\tfor res in response.body['mutate_response']\n\t\t\tif res['response_code'] == 'OK'\n\t\t]\n\n\t\t# TODO: Report failures.\n\t\t# failure_ids = [\n\t\t# \tres['id']\n\t\t# \tfor res in response.body['mutate_response']\n\t\t# \tif res['response_code'] != 'OK'\n\t\t# ]\n\n\t\treturn success_ids", "docstring": "Delete songs from library.\n\n Parameters:\n song (list): A list of song dicts.\n\nReturns:\n list: Successfully deleted song IDs.", "source": "juraj_google_style"} -{"code": "def __init__(self, env, past_indices, flatten):\n\n if 0 not in past_indices:\n raise KeyError('Past indices should include 0 for the current frame.')\n self._env = env\n self._past_indices = past_indices\n self._step = 0\n self._buffer = None\n self._capacity = max(past_indices) + 1\n self._flatten = flatten", "docstring": "Augment the observation with past observations.\n\n Implemented as a Numpy ring buffer holding the necessary past observations.\n\nArgs:\n env: OpenAI Gym environment to wrap.\n past_indices: List of non-negative integers indicating the time offsets\n from the current time step of observations to include.\n flatten: Concatenate the past observations rather than stacking them.\n\nRaises:\n KeyError: The current observation is not included in the indices.", "source": "juraj_google_style"} -{"code": "def are_all_matches_terminal(self,\n predicate: Callable[[ops.Operation], bool]):\n\n return all(\n self.next_moment_operating_on(op.qubits, i + 1) is None for\n (i, op) in self.findall_operations(predicate)\n )", "docstring": "Check whether all of the ops that satisfy a predicate are terminal.\n\nArgs:\n predicate: A predicate on ops.Operations which is being checked.\n\nReturns:\n Whether or not all `Operation` s in a circuit that satisfy the\n given predicate are terminal.", "source": "juraj_google_style"} -{"code": "def on_moved(self, event):\n\n if not self._event_error:\n # We are only interested for final file, not transitional file\n # from editors (like *.part)\n pathtools_options = {\n 'included_patterns': self.patterns,\n 'excluded_patterns': self.ignore_patterns,\n 'case_sensitive': self.case_sensitive,\n }\n # Apply pathtool matching on destination since Watchdog only\n # automatically apply it on source\n if match_path(event.dest_path, **pathtools_options):\n self.logger.info(u\"Change detected from a move on: %s\",\n event.dest_path)\n self.compile_dependencies(event.dest_path)", "docstring": "Called when a file or a directory is moved or renamed.\n\n Many editors don't directly change a file, instead they make a\n transitional file like ``*.part`` then move it to the final filename.\n\nArgs:\n event: Watchdog event, either ``watchdog.events.DirMovedEvent`` or\n ``watchdog.events.FileModifiedEvent``.", "source": "juraj_google_style"} -{"code": "def set_approvers(self, approver_ids=[], approver_group_ids=[], **kwargs):\n\n path = '%s/%s/approvers' % (self._parent.manager.path,\n self._parent.get_id())\n data = {'approver_ids': approver_ids,\n 'approver_group_ids': approver_group_ids}\n self.gitlab.http_put(path, post_data=data, **kwargs)", "docstring": "Change MR-level allowed approvers and approver groups.\n\nArgs:\n approver_ids (list): User IDs that can approve MRs\n approver_group_ids (list): Group IDs whose members can approve MRs\n\nRaises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabUpdateError: If the server failed to perform the request", "source": "juraj_google_style"} -{"code": "def get_review(review_struct):\n\n review_fn = _resource_context(\"review.rst\")\n\n # read review template\n with open(review_fn) as f:\n review = f.read()\n\n # generate qr code\n with NamedTemporaryFile(suffix=\".png\") as qr_file:\n url = pyqrcode.create(review_struct.internal_url)\n url.png(qr_file.name, scale=5)\n\n # save the file\n qr_file.flush()\n qr_file.seek(0)\n\n # generate template\n review = Template(review).substitute(\n content=review_struct.get_rst(),\n datum=time.strftime(\"%d.%m.%Y\", time.localtime()),\n cas=time.strftime(\"%H:%M\", time.localtime()),\n resources_path=RES_PATH,\n qr_path=qr_file.name,\n )\n\n return gen_pdf(\n review,\n open(_resource_context(\"review_style.json\")).read(),\n )", "docstring": "Generate review from `review_struct`.\n\nArgs:\n review_struct (obj): :class:`.GenerateReview` instance.\n\nReturns:\n obj: StringIO file instance containing PDF file.", "source": "juraj_google_style"} -{"code": "def Analyze(self, hashes):\n\n logger.debug(\n 'Opening connection to {0:s}:{1:d}'.format(self._host, self._port))\n\n nsrl_socket = self._GetSocket()\n if not nsrl_socket:\n self.SignalAbort()\n return []\n\n hash_analyses = []\n for digest in hashes:\n response = self._QueryHash(nsrl_socket, digest)\n if response is None:\n continue\n\n hash_analysis = interface.HashAnalysis(digest, response)\n hash_analyses.append(hash_analysis)\n\n nsrl_socket.close()\n\n logger.debug(\n 'Closed connection to {0:s}:{1:d}'.format(self._host, self._port))\n\n return hash_analyses", "docstring": "Looks up hashes in nsrlsvr.\n\nArgs:\n hashes (list[str]): hash values to look up.\n\nReturns:\n list[HashAnalysis]: analysis results, or an empty list on error.", "source": "juraj_google_style"} -{"code": "def _set_notification(self, conn, char, enabled, timeout=1.0):\n\n\n if 'client_configuration' not in char:\n return False, {'reason': 'Cannot enable notification without a client configuration attribute for characteristic'}\n\n props = char['properties']\n if not props.notify:\n return False, {'reason': 'Cannot enable notification on a characteristic that does not support it'}\n\n value = char['client_configuration']['value']\n\n #Check if we don't have to do anything\n current_state = bool(value & (1 << 0))\n if current_state == enabled:\n return\n\n if enabled:\n value |= 1 << 0\n else:\n value &= ~(1 << 0)\n\n char['client_configuration']['value'] = value\n\n valarray = struct.pack(\"b and -1 if b->a)", "source": "juraj_google_style"} -{"code": "def does_attribute_meet_condition(self, attribute, conditions):\n\n if conditions is None or len(conditions) == 0:\n return True\n\n for attribute_name, attribute_value in conditions.items():\n value = getattr(attribute, attribute_name, False)\n if value != attribute_value and bool(value) != attribute_value:\n return False\n\n return True", "docstring": "Check if the attribute meet all the given conditions\n\nArgs:\n attribute: the attribute information\n conditions: a dictionary of condition to match\n\nReturns:\n True if the attribute match all conditions. False otherwise", "source": "juraj_google_style"} -{"code": "def execute(self, action):\n\n\n assert action in self.possible_actions\n\n self.remaining_cycles -= 1\n index = int(bitstrings.BitString(\n self.current_situation[:self.address_size]\n ))\n bit = self.current_situation[self.address_size + index]\n return action == bit", "docstring": "Execute the indicated action within the environment and\n return the resulting immediate reward dictated by the reward\n program.\n\n Usage:\n immediate_reward = scenario.execute(selected_action)\n\nArgs:\n action: The action to be executed within the current situation.\n\nReturns:\n A float, the reward received for the action that was executed,\n or None if no reward is offered.", "source": "juraj_google_style"} -{"code": "def do_ams_auth(endpoint, body):\n\n headers = {\"content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": json_acceptformat}\n return requests.post(endpoint, data=body, headers=headers)", "docstring": "Acquire Media Services Authentication Token.\n\nArgs:\n endpoint (str): Azure Media Services Initial Endpoint.\n body (str): A Content Body.\n\nReturns:\n HTTP response. JSON body.", "source": "juraj_google_style"} -{"code": "def intersects(self, rect, edges=False):\n\n # Not even touching\n if (self.bottom > rect.top or \\\n self.top < rect.bottom or \\\n self.left > rect.right or \\\n self.right < rect.left):\n return False\n\n # Discard edge intersects\n if not edges:\n if (self.bottom == rect.top or \\\n self.top == rect.bottom or \\\n self.left == rect.right or \\\n self.right == rect.left):\n return False\n\n # Discard corner intersects \n if (self.left == rect.right and self.bottom == rect.top or \\\n self.left == rect.right and rect.bottom == self.top or \\\n rect.left == self.right and self.bottom == rect.top or \\\n rect.left == self.right and rect.bottom == self.top):\n return False\n\n return True", "docstring": "Detect intersections between this rectangle and rect.\n\nArgs:\n rect (Rectangle): Rectangle to test for intersections.\n edges (bool): Accept edge touching rectangles as intersects or not\n\nReturns:\n bool: True if the rectangles intersect, False otherwise", "source": "juraj_google_style"} -{"code": "def Stream(self, reader, amount=None):\n\n if amount is None:\n amount = float(\"inf\")\n\n data = reader.Read(min(self.chunk_size, amount))\n if not data:\n return\n\n amount -= len(data)\n offset = reader.offset - len(data)\n yield Chunk(offset=offset, data=data)\n\n while amount > 0:\n # We need `len(data)` here because overlap size can be 0.\n overlap = data[len(data) - self.overlap_size:]\n\n new = reader.Read(min(self.chunk_size - self.overlap_size, amount))\n if not new:\n return\n\n data = overlap + new\n\n amount -= len(new)\n offset = reader.offset - len(data)\n yield Chunk(offset=offset, data=data, overlap=len(overlap))", "docstring": "Streams chunks of a given file starting at given offset.\n\nArgs:\n reader: A `Reader` instance.\n amount: An upper bound on number of bytes to read.\n\nYields:\n `Chunk` instances.", "source": "juraj_google_style"} -{"code": "def set_cookie(name, value):\n\n\n r = app.make_response(redirect(url_for(\"view_cookies\")))\n r.set_cookie(key=name, value=value, secure=secure_cookie())\n\n return r", "docstring": "Sets a cookie and redirects to cookie list.\n ---\n tags:\n - Cookies\n parameters:\n - in: path\n name: name\n type: string\n - in: path\n name: value\n type: string\n produces:\n - text/plain\n responses:\n 200:\n description: Set cookies and redirects to cookie list.", "source": "juraj_google_style"} -{"code": "def bit_to_int(x_bit, num_bits, base=2):\n\n x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits])))\n x_labels = [\n x_l[:, i] * tf.to_int32(base)**tf.to_int32(i) for i in range(num_bits)]\n res = sum(x_labels)\n return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1]))", "docstring": "Turn x_bit representing numbers bitwise (lower-endian) to int tensor.\n\nArgs:\n x_bit: Tensor containing numbers in a particular base to be converted to\n int.\n num_bits: Number of bits in the representation.\n base: Base of the representation.\n\nReturns:\n Integer representation of this number.", "source": "juraj_google_style"} -{"code": "def __init__(self, email=None, *args, **kwargs):\n\n if 'scopes' in kwargs:\n warnings.warn(_SCOPES_WARNING)\n kwargs['scopes'] = None\n\n # Assertion type is no longer used, but still in the\n # parent class signature.\n super(AppAssertionCredentials, self).__init__(None, *args, **kwargs)\n\n self.service_account_email = email\n self.scopes = None\n self.invalid = True", "docstring": "Constructor for AppAssertionCredentials\n\nArgs:\n email: an email that specifies the service account to use.\n Only necessary if using custom service accounts\n (see https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#createdefaultserviceaccount).", "source": "juraj_google_style"} -{"code": "def writeline(self, line, line_number):\n\n tmp_file = tempfile.TemporaryFile('w+')\n if not line.endswith(os.linesep):\n\n line += os.linesep\n try:\n\n with open(self.path, 'r') as file_handle:\n\n for count, new_line in enumerate(file_handle):\n\n if count == line_number:\n\n new_line = line\n\n tmp_file.write(new_line)\n\n tmp_file.seek(0)\n with open(self.path, 'w') as file_handle:\n\n for new_line in tmp_file:\n\n file_handle.write(new_line)\n finally:\n\n tmp_file.close()", "docstring": "Rewrite a single line in the file.\n\nArgs:\n line (str): The new text to write to the file.\n line_number (int): The line of the file to rewrite. Numbering\n starts at 0.", "source": "juraj_google_style"} -{"code": "def _DeserializeResponse(self, payload):\n\n # Strip off the status line.\n status_line, payload = payload.split('\\n', 1)\n _, status, _ = status_line.split(' ', 2)\n\n # Parse the rest of the response.\n parser = email_parser.Parser()\n msg = parser.parsestr(payload)\n\n # Get the headers.\n info = dict(msg)\n info['status'] = status\n\n # Create Response from the parsed headers.\n content = msg.get_payload()\n\n return http_wrapper.Response(info, content, self.__batch_url)", "docstring": "Convert string into Response and content.\n\nArgs:\n payload: Header and body string to be deserialized.\n\nReturns:\n A Response object", "source": "juraj_google_style"} -{"code": "def __add__(self, other: 'TensorFluent') -> 'TensorFluent':\n\n return self._binary_op(self, other, tf.add, tf.float32)", "docstring": "Returns a TensorFluent for the addition arithmetic operator.\n\nArgs:\n self: The first operand.\n other: The second operand.\n\nReturns:\n A TensorFluent wrapping the operator's output.", "source": "juraj_google_style"} -{"code": "def transition(self, state, message=\"\"):\n\n with self.changes_squashed:\n initial_state = self.state.value\n if self.state_set.transition_allowed(\n initial_state=initial_state, target_state=state):\n self.log.debug(\n \"%s: Transitioning from %s to %s\",\n self.mri, initial_state, state)\n if state == ss.DISABLED:\n alarm = Alarm.invalid(\"Disabled\")\n elif state == ss.FAULT:\n alarm = Alarm.major(message)\n else:\n alarm = Alarm()\n self.update_health(self, HealthInfo(alarm))\n self.state.set_value(state)\n self.state.set_alarm(alarm)\n for child, writeable in self._children_writeable[state].items():\n if isinstance(child, AttributeModel):\n child.meta.set_writeable(writeable)\n elif isinstance(child, MethodModel):\n child.set_writeable(writeable)\n else:\n raise TypeError(\"Cannot transition from %s to %s\" %\n (initial_state, state))", "docstring": "Change to a new state if the transition is allowed\n\nArgs:\n state (str): State to transition to\n message (str): Message if the transition is to a fault state", "source": "juraj_google_style"} -{"code": "def read_into(self, buffer, size=-1, *, offset=0, write_offset=0) -> None:\n\n\n return self.mglo.read_into(buffer, size, offset, write_offset)", "docstring": "Read the content into a buffer.\n\nArgs:\n buffer (bytarray): The buffer that will receive the content.\n size (int): The size. Value ``-1`` means all.\n\n Keyword Args:\n offset (int): The read offset.\n write_offset (int): The write offset.", "source": "juraj_google_style"} -{"code": "def df_to_array(df, netshape, nettype):\n\n if len(df) > 0:\n idx = np.array(list(map(list, df.values)))\n G = np.zeros([netshape[0], netshape[0], netshape[1]])\n if idx.shape[1] == 3:\n if nettype[-1] == 'u':\n idx = np.vstack([idx, idx[:, [1, 0, 2]]])\n idx = idx.astype(int)\n G[idx[:, 0], idx[:, 1], idx[:, 2]] = 1\n elif idx.shape[1] == 4:\n if nettype[-1] == 'u':\n idx = np.vstack([idx, idx[:, [1, 0, 2, 3]]])\n weights = idx[:, 3]\n idx = np.array(idx[:, :3], dtype=int)\n G[idx[:, 0], idx[:, 1], idx[:, 2]] = weights\n else:\n G = np.zeros([netshape[0], netshape[0], netshape[1]])\n return G", "docstring": "Returns a numpy array (snapshot representation) from thedataframe contact list\n\n Parameters:\n df : pandas df\n pandas df with columns, i,j,t.\n netshape : tuple\n network shape, format: (node, time)\n nettype : str\n 'wu', 'wd', 'bu', 'bd'\n\nReturns:\n --------\n G : array\n (node,node,time) array for the network", "source": "juraj_google_style"} -{"code": "def tsp(V,c):\n\n model = Model(\"TSP_lazy\")\n conshdlr = TSPconshdlr()\n\n x = {}\n for i in V:\n for j in V:\n if j > i:\n x[i,j] = model.addVar(vtype = \"B\",name = \"x(%s,%s)\" % (i,j))\n\n for i in V:\n model.addCons(quicksum(x[j, i] for j in V if j < i) +\n quicksum(x[i, j] for j in V if j > i) == 2, \"Degree(%s)\" % i)\n\n model.setObjective(quicksum(c[i, j] * x[i, j] for i in V for j in V if j > i), \"minimize\")\n\n model.data = x\n return model, conshdlr", "docstring": "tsp -- model for solving the traveling salesman problem with callbacks\n - start with assignment model\n - add cuts until there are no sub-cycles\n Parameters:\n - V: set/list of nodes in the graph\n - c[i,j]: cost for traversing edge (i,j)\n Returns the optimum objective value and the list of edges used.", "source": "juraj_google_style"} -{"code": "def get_victim_phone_asset(self, main_type, sub_type, unique_id, asset_id, params=None):\n\n params = params or {}\n\n return self.victim_phone_asset(main_type, sub_type, unique_id, asset_id, params=params)", "docstring": "Args:\n main_type:\n sub_type:\n unique_id:\n asset_id:\n params:\n\n Return:", "source": "juraj_google_style"} -{"code": "def get_access_token(self, http=None, additional_claims=None):\n\n if additional_claims is None:\n if self.access_token is None or self.access_token_expired:\n self.refresh(None)\n return client.AccessTokenInfo(\n access_token=self.access_token, expires_in=self._expires_in())\n else:\n # Create a 1 time token\n token, unused_expiry = self._create_token(additional_claims)\n return client.AccessTokenInfo(\n access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)", "docstring": "Create a signed jwt.\n\nArgs:\n http: unused\n additional_claims: dict, additional claims to add to\n the payload of the JWT.\n\nReturns:\n An AccessTokenInfo with the signed jwt", "source": "juraj_google_style"} -{"code": "def __init__(self, pidfile, logger, port = 64042, host = 'localhost'):\n\n super(RemoteControllerDeamon, self).__init__(pidfile, logger)\n self.__port = port\n self.__host = host\n for name in dir(self):\n method = getattr(self, name)\n if hasattr(method, 'registered_for_rpc'):\n self.register_method(method, method.registered_for_rpc.__name__)", "docstring": "Create a daemon which is controllable via jsonrpc with decorator\n\nArgs:\n pidfile (str): path to create pid file\n logger (logging.Logger): logger for the daemon\n port (int):\n host (str):", "source": "juraj_google_style"} -{"code": "def isHostCert(self, name):\n\n crtpath = self._getPathJoin('hosts', '%s.crt' % name)\n return os.path.isfile(crtpath)", "docstring": "Checks if a host certificate exists.\n\nArgs:\n name (str): The name of the host keypair.\n\nExample:\n Check if the host cert \"myhost\" exists:\n\n exists = cdir.isUserCert('myhost')\n\nReturns:\n bool: True if the certificate is present, False otherwise.", "source": "juraj_google_style"} -{"code": "def split(x, split_dim, num_or_size_splits, name=None):\n\n return SplitOperation(x, split_dim, num_or_size_splits, name=name).outputs", "docstring": "Like tf.split.\n\nArgs:\n x: a Tensor\n split_dim: a Dimension in x.shape.dims\n num_or_size_splits: either an integer dividing split_dim.size\n or a list of integers adding up to split_dim.size\n name: an optional string\n\nReturns:\n a list of Tensors.", "source": "juraj_google_style"} -{"code": "def __init__(self, feature_set='spe+'):\n\n filename = filenames[feature_set]\n self.segments, self.seg_dict, self.names = self._read_table(filename)\n self.seg_seq = {seg[0]: i for (i, seg) in enumerate(self.segments)}\n self.weights = self._read_weights()\n self.seg_regex = self._build_seg_regex()\n self.longest_seg = max([len(x) for x in self.seg_dict.keys()])\n self.xsampa = xsampa.XSampa()", "docstring": "Construct a FeatureTable object\n\nArgs:\n feature_set (str): the feature set that the FeatureTable will use;\n currently, there is only one of these (\"spe+\")", "source": "juraj_google_style"} -{"code": "def getCompletingSwarms(self):\n\n swarmIds = []\n for swarmId, info in self._state['swarms'].iteritems():\n if info['status'] == 'completing':\n swarmIds.append(swarmId)\n\n return swarmIds", "docstring": "Return the list of all completing swarms.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: list of active swarm Ids", "source": "juraj_google_style"} -{"code": "def edges(self, tail_head_iter):\n\n edge = self._edge_plain\n quote = self._quote_edge\n lines = (edge % (quote(t), quote(h)) for t, h in tail_head_iter)\n self.body.extend(lines)", "docstring": "Create a bunch of edges.\n\nArgs:\n tail_head_iter: Iterable of ``(tail_name, head_name)`` pairs.", "source": "juraj_google_style"} -{"code": "def combs(a, r):\n\n # Special-case for 0-length combinations\n if r == 0:\n return np.asarray([])\n\n a = np.asarray(a)\n data_type = a.dtype if r == 0 else np.dtype([('', a.dtype)] * r)\n b = np.fromiter(combinations(a, r), data_type)\n return b.view(a.dtype).reshape(-1, r)", "docstring": "NumPy implementation of ``itertools.combinations``.\n\n Return successive ``r``-length combinations of elements in the array ``a``.\n\nArgs:\n a (np.ndarray): The array from which to get combinations.\n r (int): The length of the combinations.\n\nReturns:\n np.ndarray: An array of combinations.", "source": "juraj_google_style"} -{"code": "def seek(self, offset, whence=os.SEEK_SET):\n\n if not self._is_open:\n raise IOError('Not opened.')\n\n if self._current_offset < 0:\n raise IOError(\n 'Invalid current offset: {0:d} value less than zero.'.format(\n self._current_offset))\n\n if whence == os.SEEK_CUR:\n offset += self._current_offset\n\n elif whence == os.SEEK_END:\n if self._decrypted_stream_size is None:\n self._decrypted_stream_size = self._GetDecryptedStreamSize()\n if self._decrypted_stream_size is None:\n raise IOError('Invalid decrypted stream size.')\n\n offset += self._decrypted_stream_size\n\n elif whence != os.SEEK_SET:\n raise IOError('Unsupported whence.')\n\n if offset < 0:\n raise IOError('Invalid offset value less than zero.')\n\n if offset != self._current_offset:\n self._current_offset = offset\n self._realign_offset = True", "docstring": "Seeks to an offset within the file-like object.\n\nArgs:\n offset (int): offset to seek.\n whence (Optional[int]): value that indicates whether offset is an\n absolute or relative position within the file.\n\nRaises:\n IOError: if the seek failed.\n OSError: if the seek failed.", "source": "juraj_google_style"} -{"code": "def get_ranking(self, alt):\n\n if self.alts_to_ranks is None:\n raise ValueError(\"Aggregate ranking must be created first\")\n try:\n rank = self.alts_to_ranks[alt]\n return rank\n except KeyError:\n raise KeyError(\"No alternative \\\"{}\\\" found in \".format(str(alt)) +\n \"the aggregate ranking\")", "docstring": "Description:\n Returns the ranking of a given alternative in the\n computed aggregate ranking. An error is thrown if\n the alternative does not exist. The ranking is the\n index in the aggregate ranking, which is 0-indexed.\n Parameters:\n alt: the key that represents an alternative", "source": "juraj_google_style"} -{"code": "def ParseFileObject(self, parser_mediator, file_object):\n\n # TODO: self._line_structures is a work-around and this needs\n # a structural fix.\n if not self._line_structures:\n raise errors.UnableToParseFile(\n 'Line structure undeclared, unable to proceed.')\n\n encoding = self._ENCODING or parser_mediator.codepage\n text_file_object = text_file.TextFile(file_object, encoding=encoding)\n\n try:\n line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH)\n except UnicodeDecodeError:\n raise errors.UnableToParseFile(\n 'Not a text file or encoding not supported.')\n\n if not line:\n raise errors.UnableToParseFile('Not a text file.')\n\n if len(line) == self.MAX_LINE_LENGTH or len(\n line) == self.MAX_LINE_LENGTH - 1:\n logger.debug((\n 'Trying to read a line and reached the maximum allowed length of '\n '{0:d}. The last few bytes of the line are: {1:s} [parser '\n '{2:s}]').format(\n self.MAX_LINE_LENGTH, repr(line[-10:]), self.NAME))\n\n if not self._IsText(line):\n raise errors.UnableToParseFile('Not a text file, unable to proceed.')\n\n if not self.VerifyStructure(parser_mediator, line):\n raise errors.UnableToParseFile('Wrong file structure.')\n\n consecutive_line_failures = 0\n index = None\n # Set the offset to the beginning of the file.\n self._current_offset = 0\n # Read every line in the text file.\n while line:\n if parser_mediator.abort:\n break\n parsed_structure = None\n use_key = None\n # Try to parse the line using all the line structures.\n for index, (key, structure) in enumerate(self._line_structures):\n try:\n parsed_structure = structure.parseString(line)\n except pyparsing.ParseException:\n pass\n if parsed_structure:\n use_key = key\n break\n\n if parsed_structure:\n self.ParseRecord(parser_mediator, use_key, parsed_structure)\n consecutive_line_failures = 0\n if index is not None and index != 0:\n key_structure = self._line_structures.pop(index)\n self._line_structures.insert(0, key_structure)\n else:\n if len(line) > 80:\n line = '{0:s}...'.format(line[:77])\n parser_mediator.ProduceExtractionWarning(\n 'unable to parse log line: {0:s} at offset: {1:d}'.format(\n repr(line), self._current_offset))\n consecutive_line_failures += 1\n if (consecutive_line_failures >\n self.MAXIMUM_CONSECUTIVE_LINE_FAILURES):\n raise errors.UnableToParseFile(\n 'more than {0:d} consecutive failures to parse lines.'.format(\n self.MAXIMUM_CONSECUTIVE_LINE_FAILURES))\n\n self._current_offset = text_file_object.get_offset()\n\n try:\n line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH)\n except UnicodeDecodeError:\n parser_mediator.ProduceExtractionWarning(\n 'unable to read and decode log line at offset {0:d}'.format(\n self._current_offset))\n break", "docstring": "Parses a text file-like object using a pyparsing definition.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n file_object (dfvfs.FileIO): file-like object.\n\nRaises:\n UnableToParseFile: when the file cannot be parsed.", "source": "juraj_google_style"} -{"code": "def find_by_field(self, table, field, field_value):\n\n sql = \"select * from {} where {} = '{}'\".format(\n table, field, field_value)\n res = self.query(sql)\n return res", "docstring": "从数据库里查询指定条件的记录\n\nArgs:\n table: 表名字 str\n field: 字段名\n field_value: 字段值\n\nReturns:\n 成功: [dict] 保存的记录\n 失败: -1 并打印返回报错信息", "source": "juraj_google_style"} -{"code": "def response_data_to_model_instance(self, response_data):\n\n # Coerce datetime strings into datetime objects\n response_data[\"datetime_created\"] = dateutil.parser.parse(\n response_data[\"datetime_created\"]\n )\n\n if response_data[\"datetime_finished\"]:\n response_data[\"datetime_finished\"] = dateutil.parser.parse(\n response_data[\"datetime_finished\"]\n )\n\n # Instantiate a model for the task instance\n return super(\n BaseTaskInstanceManager, self\n ).response_data_to_model_instance(response_data)", "docstring": "Convert response data to a task instance model.\n\nArgs:\n response_data (dict): The data from the request's response.\n\nReturns:\n :class:`saltant.models.base_task_instance.BaseTaskInstance`:\n A task instance model instance representing the task\n instance from the reponse data.", "source": "juraj_google_style"} -{"code": "def warn_once(self, msg, msg_name=None):\n\n assert isinstance(msg, str)\n msg_name = msg_name if msg_name else msg\n if msg_name not in warnings_given:\n warnings.warn(msg)\n warnings_given.add(msg_name)", "docstring": "Prints a warning statement just once\n\nArgs:\n msg: The warning message\n msg_name: [optional] The name of the warning. If None, the msg_name\n will be the msg itself.", "source": "juraj_google_style"} -{"code": "def reqMatchingSymbols(self, pattern: str) -> List[ContractDescription]:\n\n return self._run(self.reqMatchingSymbolsAsync(pattern))", "docstring": "Request contract descriptions of contracts that match a pattern.\n\n This method is blocking.\n\n https://interactivebrokers.github.io/tws-api/matching_symbols.html\n\nArgs:\n pattern: The first few letters of the ticker symbol, or for\n longer strings a character sequence matching a word in\n the security name.", "source": "juraj_google_style"} -{"code": "def transform(self, X):\n\n assert np.shape(X)[0] == len(self._weights), (\n 'BlendingOptimizer: Number of models to blend its predictions and weights does not match: '\n 'n_models={}, weights_len={}'.format(np.shape(X)[0], len(self._weights)))\n blended_predictions = np.average(np.power(X, self._power),\n weights=self._weights,\n axis=0) ** (1.0 / self._power)\n\n return {'y_pred': blended_predictions}", "docstring": "Performs predictions blending using the trained weights.\n\nArgs:\n X (array-like): Predictions of different models.\n Returns: dict with blended predictions (key is 'y_pred').", "source": "juraj_google_style"} -{"code": "def preprocess_data(data: List[Tuple[List[str], List[str]]], to_lower: bool = True,\n append_case: str = \"first\") -> List[Tuple[List[Tuple[str]], List[str]]]:\n\n new_data = []\n for words, tags in data:\n new_words = [process_word(word, to_lower=to_lower, append_case=append_case)\n for word in words]\n # tags could also be processed in future\n new_tags = tags\n new_data.append((new_words, new_tags))\n return new_data", "docstring": "Processes all words in data using\n :func:`~deeppavlov.dataset_iterators.morphotagger_iterator.process_word`.\n\nArgs:\n data: a list of pairs (words, tags), each pair corresponds to a single sentence\n to_lower: whether to lowercase\n append_case: whether to add case mark\n\nReturns:\n a list of preprocessed sentences", "source": "juraj_google_style"} -{"code": "def do_add_all_link(self, args):\n\n linkcode = 1\n group = 0\n addr = None\n params = args.split()\n if params:\n try:\n linkcode = int(params[0])\n except IndexError:\n linkcode = 1\n except ValueError:\n linkcode = None\n\n try:\n group = int(params[1])\n except IndexError:\n group = 0\n except ValueError:\n group = None\n\n try:\n addr = params[2]\n except IndexError:\n addr = None\n\n if linkcode in [0, 1, 3] and 255 >= group >= 0:\n self.loop.create_task(\n self.tools.start_all_linking(linkcode, group, addr))\n else:\n _LOGGING.error('Link code %d or group number %d not valid',\n linkcode, group)\n self.do_help('add_all_link')", "docstring": "Add an All-Link record to the IM and a device.\n\n Usage:\n add_all_link [linkcode] [group] [address]\n\nArgs:\n linkcode: 0 - PLM is responder\n 1 - PLM is controller\n 3 - PLM is controller or responder\n Default 1\n group: All-Link group number (0 - 255). Default 0.\n address: INSTEON device to link with (not supported by all devices)", "source": "juraj_google_style"} -{"code": "def _render_batch(self,\n non_fluents: NonFluents,\n states: Fluents, actions: Fluents, interms: Fluents,\n rewards: np.array,\n horizon: Optional[int] = None) -> None:\n\n if horizon is None:\n horizon = len(states[0][1])\n self._render_round_init(horizon, non_fluents)\n for t in range(horizon):\n s = [(s[0], s[1][t]) for s in states]\n f = [(f[0], f[1][t]) for f in interms]\n a = [(a[0], a[1][t]) for a in actions]\n r = rewards[t]\n self._render_timestep(t, s, a, f, r)\n self._render_round_end(rewards)", "docstring": "Prints `non_fluents`, `states`, `actions`, `interms` and `rewards`\n for given `horizon`.\n\nArgs:\n states (Sequence[Tuple[str, np.array]]): A state trajectory.\n actions (Sequence[Tuple[str, np.array]]): An action trajectory.\n interms (Sequence[Tuple[str, np.array]]): An interm state trajectory.\n rewards (np.array): Sequence of rewards (1-dimensional array).\n horizon (Optional[int]): Number of timesteps.", "source": "juraj_google_style"} -{"code": "def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['b1'], group['b2']\n\n state['step'] += 1\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n denom = exp_avg_sq.sqrt().add_(group['e'])\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n lr_scheduled = group['lr']\n lr_scheduled *= group['schedule'].get_lr(state['step'])\n\n step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n # Add weight decay at the end (fixed version)\n if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0:\n p.data.add_(-lr_scheduled * group['weight_decay'], p.data)\n\n return loss", "docstring": "Performs a single optimization step.\n\nArgs:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.", "source": "juraj_google_style"} -{"code": "def ephemeris(self, **kwargs):\n\n\n for orb in self.iter(inclusive=True, **kwargs):\n yield orb", "docstring": "Generator giving the propagation of the orbit at different dates\n\nArgs:\n start (Date)\n stop (Date or timedelta)\n step (timedelta)\n\nYields:\n Orbit", "source": "juraj_google_style"} -{"code": "def get_content_metadata(self, enterprise_customer):\n\n content_metadata = OrderedDict()\n\n # TODO: This if block can be removed when we get rid of discovery service-based catalogs.\n if enterprise_customer.catalog:\n response = self._load_data(\n self.ENTERPRISE_CUSTOMER_ENDPOINT,\n detail_resource='courses',\n resource_id=str(enterprise_customer.uuid),\n traverse_pagination=True,\n )\n for course in response['results']:\n for course_run in course['course_runs']:\n course_run['content_type'] = 'courserun' # Make this look like a search endpoint result.\n content_metadata[course_run['key']] = course_run\n\n for enterprise_customer_catalog in enterprise_customer.enterprise_customer_catalogs.all():\n response = self._load_data(\n self.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT,\n resource_id=str(enterprise_customer_catalog.uuid),\n traverse_pagination=True,\n querystring={'page_size': 1000},\n )\n\n for item in response['results']:\n content_id = utils.get_content_metadata_item_id(item)\n content_metadata[content_id] = item\n\n return content_metadata.values()", "docstring": "Return all content metadata contained in the catalogs associated with the EnterpriseCustomer.\n\nArgs:\n enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for.\n\nReturns:\n list: List of dicts containing content metadata.", "source": "juraj_google_style"} -{"code": "def use_color(setting):\n\n if setting not in COLOR_CHOICES:\n raise InvalidColorSetting(setting)\n\n return (\n setting == 'always' or\n (setting == 'auto' and sys.stdout.isatty() and terminal_supports_color)\n )", "docstring": "Choose whether to use color based on the command argument.\n\nArgs:\n setting - Either `auto`, `always`, or `never`", "source": "juraj_google_style"} -{"code": "def supported_tasks(self, lang=None):\n\n if lang:\n collection = self.get_collection(lang=lang)\n return [x.id.split('.')[0] for x in collection.packages]\n else:\n return [x.name.split()[0] for x in self.collections() if Downloader.TASK_PREFIX in x.id]", "docstring": "Languages that are covered by a specific task.\n\nArgs:\n lang (string): Language code name.", "source": "juraj_google_style"} -{"code": "def __init__(self, parent):\n\n logger.debug(\"Initialising status bar\")\n\n super(StatusBar, self).__init__(parent)\n\n self.status = tk.StringVar()\n # Status bar\n self.statusbar = ttk.Label(self, textvariable=self.status, padding=2, anchor=\"center\")\n self.statusbar.grid(column=0, row=0, sticky=\"W E\")\n\n # Configure stretch ratios\n self.columnconfigure(0, weight=1)\n\n # Set default status\n self.set_status(False)", "docstring": "Create a new status bar.\n\nArgs:\n parent: A tk or ttk object", "source": "juraj_google_style"} -{"code": "def encipher_vigenere(plaintext, plain_vocab, key):\n\n ciphertext = []\n # generate Vigenere table\n layers = [\n ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab))\n ]\n\n for i, sentence in enumerate(plaintext):\n cipher_sentence = []\n for j, character in enumerate(sentence):\n key_idx = key[j % len(key)]\n encrypted_char = layers[key_idx].encrypt_character(character)\n cipher_sentence.append(encrypted_char)\n ciphertext.append(cipher_sentence)\n\n return ciphertext", "docstring": "Encrypt plain text with given key.\n\nArgs:\n plaintext (list of list of Strings): a list of plain text to encrypt.\n plain_vocab (list of Integer): unique vocabularies being used.\n key (list of Integer): key to encrypt cipher using Vigenere table.\n\nReturns:\n ciphertext (list of Strings): encrypted plain text.", "source": "juraj_google_style"} -{"code": "def copy(self, name=None):\n\n cpy = copy.copy(self)\n if name:\n cpy.name = name\n return cpy", "docstring": "shallow copy of the instruction.\n\nArgs:\n name (str): name to be given to the copied circuit,\n if None then the name stays the same\n\nReturns:\n Instruction: a shallow copy of the current instruction, with the name\n updated if it was provided", "source": "juraj_google_style"} -{"code": "def match(self, subject: Expression) -> Iterator[Tuple[Pattern, Substitution]]:\n\n if not isinstance(subject, self.operation):\n return\n\n subjects = list(op_iter(subject))\n flatterms = [FlatTerm(o) for o in subjects]\n\n for i in range(len(flatterms)):\n flatterm = FlatTerm.merged(*flatterms[i:])\n\n for index in self._net._match(flatterm, collect=True):\n match_index = self._net._patterns[index][1]\n pattern, first_name, last_name = self._patterns[match_index]\n operand_count = op_len(pattern.expression) - 2\n expr_operands = subjects[i:i + operand_count]\n patt_operands = list(op_iter(pattern.expression))[1:-1]\n\n substitution = Substitution()\n if not all(itertools.starmap(substitution.extract_substitution, zip(expr_operands, patt_operands))):\n continue\n\n try:\n if first_name is not None:\n substitution.try_add_variable(first_name, tuple(subjects[:i]))\n if last_name is not None:\n substitution.try_add_variable(last_name, tuple(subjects[i + operand_count:]))\n except ValueError:\n continue\n\n for constraint in pattern.constraints:\n if not constraint(substitution):\n break\n else:\n yield pattern, substitution", "docstring": "Match the given subject against all patterns in the sequence matcher.\n\nArgs:\n subject:\n The subject that is matched. Must be constant.\n\nYields:\n A tuple :code:`(pattern, substitution)` for every matching pattern.", "source": "juraj_google_style"} -{"code": "def _convert_to_dict(data):\n\n if isinstance(data, dict):\n return data\n\n if isinstance(data, list) or isinstance(data, tuple):\n if _all_correct_list(data):\n return dict(data)\n else:\n data = zip(data[::2], data[1::2])\n return dict(data)\n else:\n raise MetaParsingException(\n \"Can't decode provided metadata - unknown structure.\"\n )", "docstring": "Convert `data` to dictionary.\n\n Tries to get sense in multidimensional arrays.\n\nArgs:\n data: List/dict/tuple of variable dimension.\n\nReturns:\n dict: If the data can be converted to dictionary.\n\nRaises:\n MetaParsingException: When the data are unconvertible to dict.", "source": "juraj_google_style"} -{"code": "def create_dir(path):\n\n full_path = abs_path(path)\n if not os.path.exists(full_path):\n try:\n os.makedirs(full_path)\n except OSError as e:\n # ignore the error for dir already exist.\n if e.errno != os.errno.EEXIST:\n raise", "docstring": "Creates a directory if it does not exist already.\n\nArgs:\n path: The path of the directory to create.", "source": "juraj_google_style"} -{"code": "def _create_gates(self, inputs, memory):\n\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n num_gates = 2 * self._calculate_gate_size()\n\n memory = tf.tanh(memory)\n inputs = basic.BatchFlatten()(inputs)\n gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs)\n gate_inputs = tf.expand_dims(gate_inputs, axis=1)\n gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory)\n gates = tf.split(gate_memory + gate_inputs, num_or_size_splits=2, axis=2)\n input_gate, forget_gate = gates\n\n input_gate = tf.sigmoid(input_gate + self._input_bias)\n forget_gate = tf.sigmoid(forget_gate + self._forget_bias)\n\n return input_gate, forget_gate", "docstring": "Create input and forget gates for this step using `inputs` and `memory`.\n\nArgs:\n inputs: Tensor input.\n memory: The current state of memory.\n\nReturns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.", "source": "juraj_google_style"} -{"code": "def __init__(self, scope, parent, name, result, args=None, paren=False):\n\n CodeExpression.__init__(self, scope, parent, name, result, paren)\n self.arguments = args or ()", "docstring": "Constructor for operators.\n\nArgs:\n scope (CodeEntity): The program scope where this object belongs.\n parent (CodeEntity): This object's parent in the program tree.\n name (str): The name of the operator in the program.\n result (str): The return type of the operator in the program.\n\n Kwargs:\n args (tuple): Initial tuple of arguments.\n paren (bool): Whether the expression is enclosed in parentheses.", "source": "juraj_google_style"} -{"code": "def wait_any(jobs, timeout=None):\n\n return Job._wait(jobs, timeout, concurrent.futures.FIRST_COMPLETED)", "docstring": "Return when at least one of the specified jobs has completed or timeout expires.\n\nArgs:\n jobs: a Job or list of Jobs to wait on.\n timeout: a timeout in seconds to wait for. None (the default) means no timeout.\n\nReturns:\n A list of the jobs that have now completed or None if there were no jobs.", "source": "juraj_google_style"} -{"code": "def compose_path(pub, uuid_url=False):\n\n if uuid_url:\n return join(\n \"/\",\n UUID_DOWNLOAD_KEY,\n str(pub.uuid)\n )\n\n return join(\n \"/\",\n DOWNLOAD_KEY,\n basename(pub.file_pointer),\n basename(pub.filename)\n )", "docstring": "Compose absolute path for given `pub`.\n\nArgs:\n pub (obj): :class:`.DBPublication` instance.\n uuid_url (bool, default False): Compose URL using UUID.\n\nReturns:\n str: Absolute url-path of the publication, without server's address \\\n and protocol.\n\nRaises:\n PrivatePublicationError: When the `pub` is private publication.", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.Classify = channel.unary_unary(\n '/tensorflow.serving.PredictionService/Classify',\n request_serializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.SerializeToString,\n response_deserializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.FromString,\n )\n self.Regress = channel.unary_unary(\n '/tensorflow.serving.PredictionService/Regress',\n request_serializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.SerializeToString,\n response_deserializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.FromString,\n )\n self.Predict = channel.unary_unary(\n '/tensorflow.serving.PredictionService/Predict',\n request_serializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.SerializeToString,\n response_deserializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.FromString,\n )\n self.GetModelMetadata = channel.unary_unary(\n '/tensorflow.serving.PredictionService/GetModelMetadata',\n request_serializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.SerializeToString,\n response_deserializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def quality(self, tests, alias=None):\n\n # This is hacky... striplog should probably merge with welly...\n\n # Ignore aliases\n alias = alias or {}\n alias = alias.get('striplog', alias.get('Striplog', []))\n\n # Gather the tests.\n # First, anything called 'all', 'All', or 'ALL'.\n # Second, anything with the name of the curve we're in now.\n # Third, anything that the alias list has for this curve.\n # (This requires a reverse look-up so it's a bit messy.)\n this_tests =\\\n tests.get('all', [])+tests.get('All', [])+tests.get('ALL', [])\\\n + tests.get('striplog', tests.get('Striplog', []))\\\n + utils.flatten_list([tests.get(a) for a in alias])\n this_tests = filter(None, this_tests)\n\n # If we explicitly set zero tests for a particular key, then this\n # overrides the 'all' tests.\n if not tests.get('striplog', tests.get('Striplog', 1)):\n this_tests = []\n\n return {test.__name__: test(self) for test in this_tests}", "docstring": "Run a series of tests and return the corresponding results.\n\n Based on curve testing for ``welly``.\n\nArgs:\n tests (list): a list of functions.\n\nReturns:\n list. The results. Stick to booleans (True = pass) or ints.", "source": "juraj_google_style"} -{"code": "def apply_grad_processors(opt, gradprocs):\n\n assert isinstance(gradprocs, (list, tuple)), gradprocs\n for gp in gradprocs:\n assert isinstance(gp, GradientProcessor), gp\n\n class _ApplyGradientProcessor(ProxyOptimizer):\n def __init__(self, opt, gradprocs):\n self._gradprocs = gradprocs[:]\n super(_ApplyGradientProcessor, self).__init__(opt)\n\n def apply_gradients(self, grads_and_vars,\n global_step=None, name=None):\n g = self._apply(grads_and_vars)\n return self._opt.apply_gradients(g, global_step, name)\n\n def _apply(self, g):\n for proc in self._gradprocs:\n g = proc.process(g)\n return g\n\n return _ApplyGradientProcessor(opt, gradprocs)", "docstring": "Wrapper around optimizers to apply gradient processors.\n\nArgs:\n opt (tf.train.Optimizer):\n gradprocs (list[GradientProcessor]): gradient processors to add to the\n optimizer.\n\nReturns:\n a :class:`tf.train.Optimizer` instance which runs the gradient\n processors before updating the variables.", "source": "juraj_google_style"} -{"code": "def get_corrections_dict(self, entry):\n\n corrections = {}\n for c in self.corrections:\n val = c.get_correction(entry)\n if val != 0:\n corrections[str(c)] = val\n return corrections", "docstring": "Returns the corrections applied to a particular entry.\n\nArgs:\n entry: A ComputedEntry object.\n\nReturns:\n ({correction_name: value})", "source": "juraj_google_style"} -{"code": "def semantic_eq(node1, node2):\n\n # For barriers, qarg order is not significant so compare as sets\n if 'barrier' == node1.name == node2.name:\n return set(node1.qargs) == set(node2.qargs)\n return node1.data_dict == node2.data_dict", "docstring": "Check if DAG nodes are considered equivalent, e.g. as a node_match for nx.is_isomorphic.\n\nArgs:\n node1 (DAGNode): A node to compare.\n node2 (DAGNode): The other node to compare.\n\nReturns:\n Bool: If node1 == node2", "source": "juraj_google_style"} -{"code": "def _convert_oauth2_credentials(credentials):\n\n new_credentials = google.oauth2.credentials.Credentials(\n token=credentials.access_token,\n refresh_token=credentials.refresh_token,\n token_uri=credentials.token_uri,\n client_id=credentials.client_id,\n client_secret=credentials.client_secret,\n scopes=credentials.scopes)\n\n new_credentials._expires = credentials.token_expiry\n\n return new_credentials", "docstring": "Converts to :class:`google.oauth2.credentials.Credentials`.\n\nArgs:\n credentials (Union[oauth2client.client.OAuth2Credentials,\n oauth2client.client.GoogleCredentials]): The credentials to\n convert.\n\nReturns:\n google.oauth2.credentials.Credentials: The converted credentials.", "source": "juraj_google_style"} -{"code": "def post_build(self, p, pay):\n\n p += pay\n if self.type in [0, 0x31, 0x32, 0x22]: # for these, field is reserved (0)\n p = p[:1]+chr(0)+p[2:]\n if self.chksum is None:\n ck = checksum(p[:2]+p[4:])\n p = p[:2]+ck.to_bytes(2, 'big')+p[4:]\n return p", "docstring": "Called implicitly before a packet is sent to compute and place IGMPv3 checksum.\n\n Parameters:\n self The instantiation of an IGMPv3 class\n p The IGMPv3 message in hex in network byte order\n pay Additional payload for the IGMPv3 message", "source": "juraj_google_style"} -{"code": "def bulk_get_or_create(self, data_list):\n\n items_to_create = dict()\n for record_key, record_config in data_list.items():\n if record_key not in items_to_create:\n record = self.get_instance(record_key)\n if not record:\n items_to_create[record_key] = self.model_cls(**record_config)\n if items_to_create:\n\n\n self.model_cls.objects.bulk_create(items_to_create.values())\n self.set_record_lookup(True)\n return self.record_lookup", "docstring": "data_list is the data to get or create\n We generate the query and set all the record keys based on passed in queryset\n Then we loop over each item in the data_list, which has the keys already! No need to generate them. Should save a lot of time\n Use values instead of the whole object, much faster\n\nArgs:\n data_list:\n\n Returns:", "source": "juraj_google_style"} -{"code": "def __mutate_parameter(value, param, mut_rate, max_mut_amt):\n\n\n if uniform(0, 1) < mut_rate:\n mut_amt = uniform(0, max_mut_amt)\n op = choice((add, sub))\n new_val = op(value, param.dtype(\n (param.max_val - param.min_val) * mut_amt\n ))\n if new_val > param.max_val:\n return param.max_val\n elif new_val < param.min_val:\n return param.min_val\n else:\n return new_val\n else:\n return value", "docstring": "Private, static method: mutates parameter\n\nArgs:\n value (int or float): current value for Member's parameter\n param (Parameter): parameter object\n mut_rate (float): mutation rate of the value\n max_mut_amt (float): maximum mutation amount of the value\n\nReturns:\n int or float: mutated value", "source": "juraj_google_style"} -{"code": "def largestTradesDF(symbol, token='', version=''):\n\n df = pd.DataFrame(largestTrades(symbol, token, version))\n _toDatetime(df)\n _reindex(df, 'time')\n return df", "docstring": "This returns 15 minute delayed, last sale eligible trades.\n\n https://iexcloud.io/docs/api/#largest-trades\n 9:30-4pm ET M-F during regular market hours\n\nArgs:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\nReturns:\n DataFrame: result", "source": "juraj_google_style"} -{"code": "def apply_pairwise(self, function, symmetric=True, diagonal=False, block=None, **kwargs):\n\n steps = self.index\n r = pd.DataFrame(index=steps, columns=steps)\n for i, s1 in enumerate(steps):\n j = range(i+1 if symmetric else len(steps))\n if not diagonal:\n j.remove(i)\n other = set(steps[j])\n if block is not None:\n df = self.reset_index()\n df = df.merge(df, on=block)\n other &= set(df[df.index_x == s1].index_y)\n\n for s2 in other:\n r.ix[s1, s2] = function(s1, s2, **kwargs)\n return r", "docstring": "Helper function for pairwise apply.\n\nArgs:\n steps: an ordered collection of steps\n function: function to apply, first two positional arguments are steps\n symmetric: whether function is symmetric in the two steps\n diagonal: whether to apply on the diagonal\n block: apply only when the given columns match\n kwargs: keyword arguments to pass to the function\n\nReturns:\n DataFrame with index and columns equal to the steps argument", "source": "juraj_google_style"} -{"code": "def __call__(self, utterances_batch: list, utterances_ids: Optional[list] = None) -> list:\n\n responses_batch = self._call(utterances_batch, utterances_ids)\n\n batch_size = len(utterances_batch)\n ids = utterances_ids or list(range(batch_size))\n\n for utt_batch_idx, utt_id in enumerate(ids):\n self.history[utt_id].append(str(utterances_batch[utt_batch_idx]))\n self.dialog_logger.log_in(utterances_batch[utt_batch_idx], utt_id)\n\n self.history[utt_id].append(str(responses_batch[utt_batch_idx]))\n self.dialog_logger.log_out(responses_batch[utt_batch_idx], utt_id)\n\n return responses_batch", "docstring": "Wraps _call method and updates utterances history.\n\nArgs:\n utterances_batch: Batch of incoming utterances.\n utterances_ids: Batch of dialog IDs corresponding to incoming utterances.\n\nReturns:\n responses: A batch of responses corresponding to the\n utterance batch received by agent.", "source": "juraj_google_style"} -{"code": "def add_segment(self, address, data, overwrite=False):\n\n\n seg_type = self._classify_segment(address, len(data))\n if not isinstance(seg_type, DisjointSegment):\n raise ArgumentError(\"Unsupported segment type\")\n\n segment = MemorySegment(address, address+len(data)-1, len(data), bytearray(data))\n self._segments.append(segment)", "docstring": "Add a contiguous segment of data to this memory map\n\n If the segment overlaps with a segment already added , an\n ArgumentError is raised unless the overwrite flag is True.\n\n Params:\n address (int): The starting address for this segment\n data (bytearray): The data to add\n overwrite (bool): Overwrite data if this segment overlaps\n with one previously added.", "source": "juraj_google_style"} -{"code": "def open(self, **params):\n\n logger.info('opening telnet')\n self.port = params['port']\n self.ip = params['ip']\n self.tn = None\n self._init()", "docstring": "Open telnet connection\n\nArgs:\n params (dict), must contain two parameters \"ip\" - ip address or hostname and \"port\" - port number\n\nExample:\n params = {'port': 23, 'ip': 'localhost'}", "source": "juraj_google_style"} -{"code": "def create_checksum_object_from_iterator(\n itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM\n):\n\n checksum_str = calculate_checksum_on_iterator(itr, algorithm)\n checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str)\n checksum_pyxb.algorithm = algorithm\n return checksum_pyxb", "docstring": "Calculate the checksum of an iterator.\n\nArgs:\n itr: iterable\n Object which supports the iterator protocol.\n\n algorithm: str\n Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.\n\nReturns:\n Populated Checksum PyXB object.", "source": "juraj_google_style"} -{"code": "def __init__(self, action_type=None, nw_addr=None):\n\n super().__init__(action_type, length=8)\n self.nw_addr = nw_addr", "docstring": "Create an ActionNWAddr with the optional parameters below.\n\nArgs:\n action_type (:class:`~pyof.v0x01.common.action.ActionType`):\n :attr:`~ActionType.OFPAT_SET_NW_SRC` or\n :attr:`~ActionType.OFPAT_SET_NW_DST`.\n nw_addr (int): IP Address.", "source": "juraj_google_style"} -{"code": "def _Open(self, path_spec, mode='rb'):\n\n if not path_spec.HasParent():\n raise errors.PathSpecError(\n 'Unsupported path specification without parent.')\n\n range_offset = getattr(path_spec, 'range_offset', None)\n if range_offset is None:\n raise errors.PathSpecError(\n 'Unsupported path specification without encoding method.')\n\n range_size = getattr(path_spec, 'range_size', None)\n if range_size is None:\n raise errors.PathSpecError(\n 'Unsupported path specification without encoding method.')\n\n self._range_offset = range_offset\n self._range_size = range_size", "docstring": "Opens the file system defined by path specification.\n\nArgs:\n path_spec (PathSpec): a path specification.\n mode (Optional[str]): file access mode. The default is 'rb' which\n represents read-only binary.\n\nRaises:\n AccessError: if the access to open the file was denied.\n IOError: if the file system could not be opened.\n PathSpecError: if the path specification is incorrect.\n ValueError: if the path specification is invalid.", "source": "juraj_google_style"} -{"code": "def split_folder_and_path(filepath):\n\n dirname = op.dirname(filepath)\n filename = op.basename(filepath)\n splitext = op.splitext(filename)\n filename_without_extension = splitext[0]\n extension = splitext[1]\n\n return dirname, filename_without_extension, extension", "docstring": "Split a file path into its folder, filename, and extension\n\nArgs:\n path (str): Path to a file\n\nReturns:\n tuple: of (folder, filename (without extension), extension)", "source": "juraj_google_style"} -{"code": "def empty(shape, dtype=None, **kwargs):\n\n data = np.empty(shape, dtype)\n return dc.array(data, **kwargs)", "docstring": "Create an array of given shape and type, without initializing entries.\n\nArgs:\n shape (sequence of ints): 2D shape of the array.\n dtype (data-type, optional): Desired data-type for the array.\n kwargs (optional): Other arguments of the array (*coords, attrs, and name).\n\nReturns:\n array (decode.array): Decode array without initializing entries.", "source": "juraj_google_style"} -{"code": "def start(self, on_done):\n\n genesis_file = os.path.join(self._data_dir, 'genesis.batch')\n try:\n with open(genesis_file, 'rb') as batch_file:\n genesis_data = genesis_pb2.GenesisData()\n genesis_data.ParseFromString(batch_file.read())\n LOGGER.info('Producing genesis block from %s', genesis_file)\n except IOError:\n raise InvalidGenesisStateError(\n \"Genesis File {} specified, but unreadable\".format(\n genesis_file))\n\n initial_state_root = self._context_manager.get_first_root()\n\n genesis_batches = [batch for batch in genesis_data.batches]\n if genesis_batches:\n scheduler = SerialScheduler(\n self._context_manager.get_squash_handler(),\n initial_state_root,\n always_persist=True)\n\n LOGGER.debug('Adding %s batches', len(genesis_data.batches))\n for batch in genesis_data.batches:\n scheduler.add_batch(batch)\n\n self._transaction_executor.execute(scheduler)\n\n scheduler.finalize()\n scheduler.complete(block=True)\n\n txn_receipts = []\n state_hash = initial_state_root\n for batch in genesis_batches:\n result = scheduler.get_batch_execution_result(\n batch.header_signature)\n if result is None or not result.is_valid:\n raise InvalidGenesisStateError(\n 'Unable to create genesis block, due to batch {}'\n .format(batch.header_signature))\n if result.state_hash is not None:\n state_hash = result.state_hash\n\n txn_results = scheduler.get_transaction_execution_results(\n batch.header_signature)\n txn_receipts += self._make_receipts(txn_results)\n\n settings_view = SettingsView(\n self._state_view_factory.create_view(state_hash))\n name = settings_view.get_setting('sawtooth.consensus.algorithm.name')\n version = settings_view.get_setting(\n 'sawtooth.consensus.algorithm.version')\n if name is None or version is None:\n raise LocalConfigurationError(\n 'Unable to start validator; sawtooth.consensus.algorithm.name '\n 'and sawtooth.consensus.algorithm.version must be set in the '\n 'genesis block.')\n\n LOGGER.debug('Produced state hash %s for genesis block.', state_hash)\n\n block_builder = self._generate_genesis_block()\n block_builder.add_batches(genesis_batches)\n block_builder.set_state_hash(state_hash)\n\n block_publisher = self._get_block_publisher(initial_state_root)\n if not block_publisher.initialize_block(block_builder.block_header):\n LOGGER.error('Consensus refused to initialize consensus block.')\n raise InvalidGenesisConsensusError(\n 'Consensus refused to initialize genesis block.')\n\n if not block_publisher.finalize_block(block_builder.block_header):\n LOGGER.error('Consensus refused to finalize genesis block.')\n raise InvalidGenesisConsensusError(\n 'Consensus refused to finalize genesis block.')\n\n self._sign_block(block_builder)\n\n block = block_builder.build_block()\n\n blkw = BlockWrapper(block=block)\n\n LOGGER.info('Genesis block created: %s', blkw)\n\n self._block_manager.put([blkw.block])\n self._block_manager.persist(blkw.identifier, \"commit_store\")\n\n self._txn_receipt_store.chain_update(block, txn_receipts)\n self._chain_id_manager.save_block_chain_id(block.header_signature)\n\n LOGGER.debug('Deleting genesis data.')\n os.remove(genesis_file)\n\n if on_done is not None:\n on_done()", "docstring": "Starts the genesis block creation process. Will call the given\n `on_done` callback on successful completion.\n\nArgs:\n on_done (function): a function called on completion\n\nRaises:\n InvalidGenesisStateError: raises this error if a genesis block is\n unable to be produced, or the resulting block-chain-id saved.", "source": "juraj_google_style"} -{"code": "def scope(self, framebuffer, enable_only=None, *, textures=(), uniform_buffers=(), storage_buffers=()) -> 'Scope':\n\n\n textures = tuple((tex.mglo, idx) for tex, idx in textures)\n uniform_buffers = tuple((buf.mglo, idx) for buf, idx in uniform_buffers)\n storage_buffers = tuple((buf.mglo, idx) for buf, idx in storage_buffers)\n\n res = Scope.__new__(Scope)\n res.mglo = self.mglo.scope(framebuffer.mglo, enable_only, textures, uniform_buffers, storage_buffers)\n res.ctx = self\n res.extra = None\n return res", "docstring": "Create a :py:class:`Scope` object.\n\nArgs:\n framebuffer (Framebuffer): The framebuffer to use when entering.\n enable_only (int): The enable_only flags to set when entering.\n\n Keyword Args:\n textures (list): List of (texture, binding) tuples.\n uniform_buffers (list): List of (buffer, binding) tuples.\n storage_buffers (list): List of (buffer, binding) tuples.", "source": "juraj_google_style"} -{"code": "def _clean_isbn(isbn):\n\n if isinstance(isbn, basestring):\n isbn = list(isbn.lower())\n\n # filter digits and \"x\"\n isbn = filter(lambda x: x.isdigit() or x == \"x\", isbn)\n\n # convert ISBN to numbers\n return map(lambda x: 10 if x == \"x\" else int(x), isbn)", "docstring": "Remove all non-digit and non \"x\" characters from given string.\n\nArgs:\n isbn (str): isbn string, which will be cleaned.\n\nReturns:\n list: array of numbers (if \"x\" is found, it is converted to 10).", "source": "juraj_google_style"} -{"code": "def tradesDF(symbol=None, token='', version=''):\n\n x = trades(symbol, token, version)\n data = []\n for key in x:\n dat = x[key]\n for d in dat:\n d['symbol'] = key\n data.append(d)\n df = pd.DataFrame(data)\n _toDatetime(df)\n return df", "docstring": "Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.\n\n https://iexcloud.io/docs/api/#deep-trades\n\nArgs:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\nReturns:\n DataFrame: result", "source": "juraj_google_style"} -{"code": "def _assign_udf_desc_extents(descs, start_extent):\n # type: (PyCdlib._UDFDescriptors, int) -> None\n\n current_extent = start_extent\n\n descs.pvd.set_extent_location(current_extent)\n current_extent += 1\n\n descs.impl_use.set_extent_location(current_extent)\n current_extent += 1\n\n descs.partition.set_extent_location(current_extent)\n current_extent += 1\n\n descs.logical_volume.set_extent_location(current_extent)\n current_extent += 1\n\n descs.unallocated_space.set_extent_location(current_extent)\n current_extent += 1\n\n descs.terminator.set_extent_location(current_extent)\n current_extent += 1", "docstring": "An internal function to assign a consecutive sequence of extents for the\n given set of UDF Descriptors, starting at the given extent.\n\n Parameters:\n descs - The PyCdlib._UDFDescriptors object to assign extents for.\n start_extent - The starting extent to assign from.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def remove_send_message(self, connection):\n\n if connection in self._send_message:\n del self._send_message[connection]\n LOGGER.debug(\"Removed send_message function \"\n \"for connection %s\", connection)\n else:\n LOGGER.warning(\"Attempted to remove send_message \"\n \"function for connection %s, but no \"\n \"send_message function was registered\",\n connection)", "docstring": "Removes a send_message function previously registered\n with the Dispatcher.\n\nArgs:\n connection (str): A locally unique identifier provided\n by the receiver of messages.", "source": "juraj_google_style"} -{"code": "def find_replace(obj, find, replace):\n\n try:\n if isinstance(obj, dict):\n return {find_replace(key,find,replace): find_replace(value,find,replace) for key, value in obj.items()}\n elif isinstance(obj, list):\n return [find_replace(element,find,replace) for element in obj]\n elif obj == find:\n return unicode_convert(replace)\n else:\n try:\n return unicode_convert(find_replace_string(obj, find, replace))\n #obj = unicode_convert(json.loads(obj))\n #return find_replace(obj,find,replace)\n except:\n return unicode_convert(obj)\n except:\n line, filename, synerror = trace()\n raise ArcRestHelperError({\n \"function\": \"find_replace\",\n \"line\": line,\n \"filename\": filename,\n \"synerror\": synerror,\n }\n )\n finally:\n pass", "docstring": "Searches an object and performs a find and replace.\n\nArgs:\n obj (object): The object to iterate and find/replace.\n find (str): The string to search for.\n replace (str): The string to replace with.\n\nReturns:\n object: The object with replaced strings.", "source": "juraj_google_style"} -{"code": "def downsampled_mesh(self, step):\n\n from lace.mesh import Mesh\n\n if self.f is not None:\n raise ValueError(\n 'Function `downsampled_mesh` does not support faces.')\n\n low = Mesh()\n if self.v is not None:\n low.v = self.v[::step]\n if self.vc is not None:\n low.vc = self.vc[::step]\n return low", "docstring": "Returns a downsampled copy of this mesh.\n\nArgs:\n step: the step size for the sampling\n\nReturns:\n a new, downsampled Mesh object.\n\nRaises:\n ValueError if this Mesh has faces.", "source": "juraj_google_style"} -{"code": "def find_elements_by_name(self, name, update=False) -> Elements:\n\n return self.find_elements(by=By.NAME, value=name, update=update)", "docstring": "Finds multiple elements by name.\n\nArgs:\n name: The name of the elements to be found.\n update: If the interface has changed, this option should be True.\n\nReturns:\n A list with elements if any was found. An empty list if not.\n\nRaises:\n NoSuchElementException - If the element wasn't found.\n\n Usage:\n elements = driver.find_elements_by_name('foo')", "source": "juraj_google_style"} -{"code": "def operate(self, left, right, operation):\n\n operation = {\n '+': operator.add,\n '-': operator.sub,\n '*': operator.mul,\n '/': operator.truediv\n }.get(operation)\n return operation(left, right)", "docstring": "Do operation on colors\n\nArgs:\n left (str): left side\n right (str): right side\n operation (str): Operation\n\nReturns:\n str", "source": "juraj_google_style"} -{"code": "def __init__(self, shape, num_actions, probabilities=None, scope='categorical', summary_labels=()):\n\n self.num_actions = num_actions\n\n action_size = util.prod(shape) * self.num_actions\n if probabilities is None:\n logits = 0.0\n else:\n logits = [log(prob) for _ in range(util.prod(shape)) for prob in probabilities]\n self.logits = Linear(size=action_size, bias=logits, scope='logits', summary_labels=summary_labels)\n\n super(Categorical, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Categorical distribution.\n\nArgs:\n shape: Action shape.\n num_actions: Number of discrete action alternatives.\n probabilities: Optional distribution bias.", "source": "juraj_google_style"} -{"code": "def locked_put(self, credentials):\n\n self._create_file_if_needed()\n _helpers.validate_file(self._filename)\n f = open(self._filename, 'w')\n f.write(credentials.to_json())\n f.close()", "docstring": "Write Credentials to file.\n\nArgs:\n credentials: Credentials, the credentials to store.\n\nRaises:\n IOError if the file is a symbolic link.", "source": "juraj_google_style"} -{"code": "def __init__(self, df, grouping_column_names):\n\n\n self.df = df\n self.grouping_columns = []\n self.grouping_column_types = []\n\n if isinstance(grouping_column_names, str):\n grouping_column_names = [grouping_column_names]\n for column_name in grouping_column_names:\n column = df[column_name]\n if isinstance(column, LazyOpResult):\n self.grouping_column_types.append(column.weld_type)\n self.grouping_columns.append(column.expr)\n elif isinstance(column, np.ndarray):\n column_type = numpyImpl.numpy_to_weld_type_mapping[\n str(column.dtype)]\n self.grouping_column_types.append(column_type)\n self.grouping_columns.append(column)\n\n self.grouping_column_names = grouping_column_names\n self.column_names = []\n for x in df._get_column_names():\n if x not in self.grouping_column_names:\n self.column_names.append(x)\n\n self.columns = []\n self.column_types = []\n for column_name in self.column_names:\n column = df[column_name]\n column_type = None\n if isinstance(column, LazyOpResult):\n column_type = column.weld_type\n column = column.expr\n elif isinstance(column, np.ndarray):\n column_type = numpyImpl.numpy_to_weld_type_mapping[\n str(column.dtype)]\n\n self.columns.append(column)\n self.column_types.append(column_type)", "docstring": "Summary\n\nArgs:\n df (TYPE): Description\n grouping_column_name (TYPE): Description", "source": "juraj_google_style"} -{"code": "def __init__(self, data):\n\n\n self._coord = data['NODE_COORD_SECTION']\n self._nodes = {i: Node(i, data['DEMAND'][i]) for i in data['MATRIX']}\n self._matrix = {}\n self._depot = None\n self._branch_kind = data['BRANCH_KIND']\n self._branch_type = data['BRANCH_TYPE']\n self._v_level = data['V_LEVEL']\n self._is_aggregated = data['IS_AGGREGATED']\n\n for i in data['MATRIX']:\n\n x = self._nodes[i]\n self._matrix[x] = {}\n\n if i == data['DEPOT']:\n self._depot = x # x, not i!!\n\n for j in data['MATRIX']:\n y = self._nodes[j]\n\n self._matrix[x][y] = data['MATRIX'][i][j]\n\n if self._depot is None:\n raise Exception('Depot not found')", "docstring": "Class constructor\n\n Initialize all nodes, edges and depot\n\n Parameters:\n data: TSPLIB parsed data", "source": "juraj_google_style"} -{"code": "def angle(self, deg=False):\n\n if self.dtype.str[1] != 'c':\n warnings.warn('angle() is intended for complex-valued timeseries',\n RuntimeWarning, 1)\n return Timeseries(np.angle(self, deg=deg), self.tspan, self.labels)", "docstring": "Return the angle of the complex argument.\n\nArgs:\n deg (bool, optional):\n Return angle in degrees if True, radians if False (default).\n\nReturns:\n angle (Timeseries):\n The counterclockwise angle from the positive real axis on\n the complex plane, with dtype as numpy.float64.", "source": "juraj_google_style"} -{"code": "def add_showcases(self, showcases, showcases_to_check=None):\n # type: (List[Union[hdx.data.showcase.Showcase,Dict,str]], List[hdx.data.showcase.Showcase]) -> bool\n\n if showcases_to_check is None:\n showcases_to_check = self.get_showcases()\n allshowcasesadded = True\n for showcase in showcases:\n if not self.add_showcase(showcase, showcases_to_check=showcases_to_check):\n allshowcasesadded = False\n return allshowcasesadded", "docstring": "Add dataset to multiple showcases\n\nArgs:\n showcases (List[Union[Showcase,Dict,str]]): A list of either showcase ids or showcase metadata from Showcase objects or dictionaries\n showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset.\n\nReturns:\n bool: True if all showcases added or False if any already present", "source": "juraj_google_style"} -{"code": "def playlist(self, playlist_id, *, include_songs=False):\n\n\n\t\tplaylist_info = next(\n\t\t\t(\n\t\t\t\tplaylist\n\t\t\t\tfor playlist in self.playlists(include_songs=include_songs)\n\t\t\t\tif playlist['id'] == playlist_id\n\t\t\t),\n\t\t\tNone\n\t\t)\n\n\t\treturn playlist_info", "docstring": "Get information about a playlist.\n\n Parameters:\n playlist_id (str): A playlist ID.\n include_songs (bool, Optional): Include songs from\n the playlist in the returned dict.\n Default: ``False``\n\nReturns:\n dict: Playlist information.", "source": "juraj_google_style"} -{"code": "def add_deps(self, deps):\n\n if isinstance(deps, collections.Mapping):\n # Convert dictionary into list of dependencies.\n deps = [Dependency(node, exts) for node, exts in deps.items()]\n\n # We want a list\n if not isinstance(deps, (list, tuple)):\n deps = [deps]\n\n assert all(isinstance(d, Dependency) for d in deps)\n\n # Add the dependencies to the node\n self._deps.extend(deps)\n\n if self.is_work:\n # The task in the work should inherit the same dependency.\n for task in self:\n task.add_deps(deps)\n\n # If we have a FileNode as dependency, add self to its children\n # Node.get_parents will use this list if node.is_isfile.\n for dep in (d for d in deps if d.node.is_file):\n dep.node.add_filechild(self)", "docstring": "Add a list of dependencies to the :class:`Node`.\n\nArgs:\n deps: List of :class:`Dependency` objects specifying the dependencies of the node.\n or dictionary mapping nodes to file extensions e.g. {task: \"DEN\"}", "source": "juraj_google_style"} -{"code": "def __setDeviceMode(self, mode):\n\n print 'call __setDeviceMode'\n\n try:\n cmd = WPANCTL_CMD + 'setprop Thread:DeviceMode %d' % mode\n return self.__sendCommand(cmd)[0] != 'Fail'\n except Exception, e:\n ModuleHelper.WriteIntoDebugLogger('setDeviceMode() Error: ' + str(e))", "docstring": "set thread device mode:\n\nArgs:\n mode: thread device mode. 15=rsdn, 13=rsn, 4=s\n r: rx-on-when-idle\n s: secure IEEE 802.15.4 data request\n d: full thread device\n n: full network data\n\nReturns:\n True: successful to set the device mode\n False: fail to set the device mode", "source": "juraj_google_style"} -{"code": "def get_developer_package(path, format=None):\n\n from rez.developer_package import DeveloperPackage\n return DeveloperPackage.from_path(path, format=format)", "docstring": "Create a developer package.\n\nArgs:\n path (str): Path to dir containing package definition file.\n format (str): Package definition file format, detected if None.\n\nReturns:\n `DeveloperPackage`.", "source": "juraj_google_style"} -{"code": "def grid_destroy_from_ids(oargrid_jobids):\n\n jobs = grid_reload_from_ids(oargrid_jobids)\n for job in jobs:\n job.delete()\n logger.info(\"Killing the jobs %s\" % oargrid_jobids)", "docstring": "Destroy all the jobs with corresponding ids\n\nArgs:\n oargrid_jobids (list): the ``(site, oar_job_id)`` list of tuple\n identifying the jobs for each site.", "source": "juraj_google_style"} -{"code": "def __init__(self, app=None, env=None, region=None, prop_path=None):\n\n self.app_name = app\n self.env = env\n self.region = region\n self.prop_path = prop_path\n self.properties = get_properties(properties_file=prop_path, env=env, region=region)", "docstring": "Lambda event object.\n\nArgs:\n app (str): Application name\n env (str): Environment/Account\n region (str): AWS Region\n prop_path (str): Path of environment property file", "source": "juraj_google_style"} -{"code": "def __init__(self, client, user_list, conversation, events=[],\n event_cont_token=None):\n # pylint: disable=dangerous-default-value\n self._client = client # Client\n self._user_list = user_list # UserList\n self._conversation = conversation # hangouts_pb2.Conversation\n self._events = [] # [hangouts_pb2.Event]\n self._events_dict = {} # {event_id: ConversationEvent}\n self._send_message_lock = asyncio.Lock()\n self._watermarks = {} # {UserID: datetime.datetime}\n self._event_cont_token = event_cont_token\n for event_ in events:\n # Workaround to ignore observed events returned from\n # syncrecentconversations.\n if event_.event_type != hangouts_pb2.EVENT_TYPE_OBSERVED_EVENT:\n self.add_event(event_)\n\n self.on_event = event.Event('Conversation.on_event')\n\n\n self.on_typing = event.Event('Conversation.on_typing')\n\n\n self.on_watermark_notification = event.Event(\n 'Conversation.on_watermark_notification'\n )\n\n\n self.on_watermark_notification.add_observer(\n self._on_watermark_notification\n )", "docstring": ":class:`.Event` fired when an event occurs in this conversation.\n\nArgs:\n conv_event: :class:`.ConversationEvent` that occurred.", "source": "juraj_google_style"} -{"code": "def get_cmd_handler(self, cmd):\n\n cmd = cmd.replace('-', '_')\n handler = getattr(self, cmd, None)\n if not handler:\n raise BuildException(\n 'Command {} is not supported as a '\n 'build command'.format(cmd)\n )\n return handler", "docstring": "Return an handler for cmd.\n The handler and the command should have the same name.\n See class description for more info about handlers.\n\nArgs:\n cmd (str): The name of the command\n\nReturns:\n callable: which handles cmd\n\nRaises:\n lago.build.BuildException: If an handler for cmd doesn't exist", "source": "juraj_google_style"} -{"code": "def add_status_parser(subparsers, parent_parser):\n\n parser = subparsers.add_parser(\n 'status',\n help='Displays information about validator status',\n description=\"Provides a subcommand to show a validator\\'s status\")\n\n grand_parsers = parser.add_subparsers(title='subcommands',\n dest='subcommand')\n grand_parsers.required = True\n add_status_show_parser(grand_parsers, parent_parser)", "docstring": "Adds argument parser for the status command\n\nArgs:\n subparsers: Add parsers to this subparser object\n parent_parser: The parent argparse.ArgumentParser object", "source": "juraj_google_style"} -{"code": "def mset(self, values):\n\n for key, value in values.items():\n self.set(key, value)", "docstring": "Set the value of several keys at once.\n\nArgs:\n values (dict): maps a key to its value.", "source": "juraj_google_style"} -{"code": "def _read_from_hdx(self, object_type, value, fieldname='id',\n action=None, **kwargs):\n # type: (str, str, str, Optional[str], Any) -> Tuple[bool, Union[Dict, str]]\n\n if not fieldname:\n raise HDXError('Empty %s field name!' % object_type)\n if action is None:\n action = self.actions()['show']\n data = {fieldname: value}\n data.update(kwargs)\n try:\n result = self.configuration.call_remoteckan(action, data)\n return True, result\n except NotFound:\n return False, '%s=%s: not found!' % (fieldname, value)\n except Exception as e:\n raisefrom(HDXError, 'Failed when trying to read: %s=%s! (POST)' % (fieldname, value), e)", "docstring": "Makes a read call to HDX passing in given parameter.\n\nArgs:\n object_type (str): Description of HDX object type (for messages)\n value (str): Value of HDX field\n fieldname (str): HDX field name. Defaults to id.\n action (Optional[str]): Replacement CKAN action url to use. Defaults to None.\n **kwargs: Other fields to pass to CKAN.\n\nReturns:\n Tuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error)", "source": "juraj_google_style"} -{"code": "def _get_subset_matches(self, query):\n\n for field_name, db_index in self._get_db_fields(query):\n attr = getattr(query, field_name)\n\n if attr is None: # don't use unset attributes\n continue\n\n results = db_index.get(attr, OOTreeSet())\n\n if results:\n yield results", "docstring": "Yield publications, at indexes defined by `query` property values.\n\nArgs:\n query (obj): Object implementing proper interface.\n\nYields:\n list: List of matching publications.", "source": "juraj_google_style"} -{"code": "def check_index(self, key, *, index):\n\n self.append({\n \"Verb\": \"check-index\",\n \"Key\": key,\n \"Index\": extract_attr(index, keys=[\"ModifyIndex\", \"Index\"])\n })\n return self", "docstring": "Fails the transaction if Key does not have a modify index equal to\n Index\n\n Parameters:\n key (str): Key to check\n index (ObjectIndex): Index ID", "source": "juraj_google_style"} -{"code": "def _rapply(input_layer, operation, *op_args, **op_kwargs):\n\n op_args = list(op_args)\n op_args.append(input_layer.tensor)\n return input_layer.with_tensor(operation(*op_args, **op_kwargs))", "docstring": "Applies the given operation to this after expanding op_args.\n\nArgs:\n input_layer: The input layer for this op.\n operation: An operation that takes a tensor and the supplied args.\n *op_args: Extra arguments for operation.\n **op_kwargs: Keyword arguments for the operation.\n\nReturns:\n A new layer with operation applied.", "source": "juraj_google_style"} -{"code": "def __init__(self, **kwds):\n\n self.code_objs = dict()\n self._codes = []\n self._functions = []\n self._executables = []\n self.dry_run = None\n self.encoding = 'utf-8'\n self.newline = None\n if 'module' in kwds:\n self.import_module(kwds['module'])\n if 'code' in kwds:\n self.append_code_expr(kwds['code'])\n if 'function' in kwds:\n self.append_function(kwds['function'])\n if 'executable' in kwds:\n self.append_executable(kwds['executable'])\n if 'dry_run' in kwds:\n self.dry_run = kwds['dry_run']\n if 'encoding' in kwds:\n self.encoding = kwds['encoding']\n if 'newline' in kwds:\n self.newline = kwds['newline']", "docstring": "Initialize MassEdit object.\n\nArgs:\n - code (byte code object): code to execute on input file.\n - function (str or callable): function to call on input file.\n - module (str): module name where to find the function.\n - executable (str): executable file name to execute on input file.\n - dry_run (bool): skip actual modification of input file if True.", "source": "juraj_google_style"} -{"code": "def i4_sobol_generate_std_normal(dim_num, n, skip=1):\n\n\n sobols = i4_sobol_generate(dim_num, n, skip)\n\n normals = norm.ppf(sobols)\n\n return normals", "docstring": "Generates multivariate standard normal quasi-random variables.\n\n Parameters:\n Input, integer dim_num, the spatial dimension.\n Input, integer n, the number of points to generate.\n Input, integer SKIP, the number of initial points to skip.\n\n Output, real np array of shape (n, dim_num).", "source": "juraj_google_style"} -{"code": "def min_max_normalize(img):\n\n\n min_img = img.min()\n max_img = img.max()\n\n return (img - min_img) / (max_img - min_img)", "docstring": "Centre and normalize a given array.\n\n Parameters:\n ----------\n img: np.ndarray", "source": "juraj_google_style"} -{"code": "def files_info(self, *, id: str, **kwargs) -> SlackResponse:\n\n kwargs.update({\"id\": id})\n return self.api_call(\"files.info\", http_verb=\"GET\", params=kwargs)", "docstring": "Gets information about a team file.\n\nArgs:\n id (str): The file id. e.g. 'F1234467890'", "source": "juraj_google_style"} -{"code": "def empty(cls, labels=None):\n\n warnings.warn(\"Table.empty(labels) is deprecated. Use Table(labels)\", FutureWarning)\n if labels is None:\n return cls()\n values = [[] for label in labels]\n return cls(values, labels)", "docstring": "Creates an empty table. Column labels are optional. [Deprecated]\n\nArgs:\n ``labels`` (None or list): If ``None``, a table with 0\n columns is created.\n If a list, each element is a column label in a table with\n 0 rows.\n\nReturns:\n A new instance of ``Table``.", "source": "juraj_google_style"} -{"code": "def __init__(self, xid=None, body_type=None, flags=0, body=b''):\n\n super().__init__(xid)\n self.body_type = body_type\n self.flags = flags\n self.body = body", "docstring": "Create a StatsRequest with the optional parameters below.\n\nArgs:\n xid (int): xid to be used on the message header.\n body_type (StatsType): One of the OFPST_* constants.\n flags (int): OFPSF_REQ_* flags (none yet defined).\n body (BinaryData): Body of the request.", "source": "juraj_google_style"} -{"code": "def __init__(\n self, resolver_context, file_system, path_spec, file_entry_type=None,\n is_root=False):\n\n super(FakeFileEntry, self).__init__(\n resolver_context, file_system, path_spec, is_root=is_root,\n is_virtual=True)\n self._date_time = dfdatetime_fake_time.FakeTime()\n self._name = None\n self.entry_type = file_entry_type", "docstring": "Initializes a file entry.\n\nArgs:\n resolver_context (Context): resolver context.\n file_system (FileSystem): file system.\n path_spec (PathSpec): path specification.\n file_entry_type (Optional[str]): file entry type.\n is_root (Optional[bool]): True if the file entry is the root file entry\n of the corresponding file system.", "source": "juraj_google_style"} -{"code": "def bin_hash160Bytes(bts):\n\n intermed = hashlib.sha256(bts).digest()\n return hashlib.new('ripemd160', intermed).digest()", "docstring": "Get a hash of the provided message using the ripemd160 algorithm.\n\nArgs:\n bts (str): message to hash.\n\nReturns:\n bytes: hash.", "source": "juraj_google_style"} -{"code": "def add_url_rule(self, route, endpoint, handler):\n\n self.app.add_url_rule(route, endpoint, handler)", "docstring": "Add a new url route.\n\nArgs:\n See flask.Flask.add_url_route().", "source": "juraj_google_style"} -{"code": "def chip_as_adjacency_list(device: 'cirq.google.XmonDevice',\n ) -> Dict[GridQubit, List[GridQubit]]:\n\n c_set = set(device.qubits)\n c_adj = {} # type: Dict[GridQubit, List[GridQubit]]\n for n in device.qubits:\n c_adj[n] = []\n for m in [above(n), left_of(n), below(n), right_of(n)]:\n if m in c_set:\n c_adj[n].append(m)\n return c_adj", "docstring": "Gives adjacency list representation of a chip.\n\n The adjacency list is constructed in order of above, left_of, below and\n right_of consecutively.\n\nArgs:\n device: Chip to be converted.\n\nReturns:\n Map from nodes to list of qubits which represent all the neighbours of\n given qubit.", "source": "juraj_google_style"} -{"code": "def send_batches(self, batch_list):\n\n if isinstance(batch_list, BaseMessage):\n batch_list = batch_list.SerializeToString()\n\n return self._post('/batches', batch_list)", "docstring": "Sends a list of batches to the validator.\n\nArgs:\n batch_list (:obj:`BatchList`): the list of batches\n\nReturns:\n dict: the json result data, as a dict", "source": "juraj_google_style"} -{"code": "def get_pending_users_queryset(self, search_keyword, customer_uuid):\n\n queryset = PendingEnterpriseCustomerUser.objects.filter(\n enterprise_customer__uuid=customer_uuid\n )\n\n if search_keyword is not None:\n queryset = queryset.filter(user_email__icontains=search_keyword)\n\n return queryset", "docstring": "Get the list of PendingEnterpriseCustomerUsers we want to render.\n\nArgs:\n search_keyword (str): The keyword to search for in pending users' email addresses.\n customer_uuid (str): A unique identifier to filter down to only pending users\n linked to a particular EnterpriseCustomer.", "source": "juraj_google_style"} -{"code": "def set_aws_clients(self, clients):\n\n if type(clients) is not dict:\n raise TypeError(\"clients must be a dict\")\n self._aws_clients = clients", "docstring": "Stash a dictionary of AWS clients in the context object\n\nArgs:\n clients: dictionary of clients", "source": "juraj_google_style"} -{"code": "def convert(isbn, code='978'):\n\n isbn = _isbn_cleanse(isbn)\n if len(isbn) == 10:\n isbn = code + isbn[:-1]\n return isbn + calculate_checksum(isbn)\n else:\n if isbn.startswith('978'):\n return isbn[3:-1] + calculate_checksum(isbn[3:-1])\n else:\n raise IsbnError('Only ISBN-13s with 978 Bookland code can be '\n 'converted to ISBN-10.')", "docstring": "Convert ISBNs between ISBN-10 and ISBN-13.\n\nNote:\n No attempt to hyphenate converted ISBNs is made, because the\n specification requires that *any* hyphenation must be correct but\n allows ISBNs without hyphenation.\n\nArgs:\n isbn (str): SBN, ISBN-10 or ISBN-13\n code (str): EAN Bookland code\n\nReturns:\n ``str``: Converted ISBN-10 or ISBN-13\n\nRaises:\n IsbnError: When ISBN-13 isn't convertible to an ISBN-10", "source": "juraj_google_style"} -{"code": "def mtr_tr_dense(sz):\n\n n = 2 ** sz\n hparams = mtf_bitransformer_base()\n hparams.d_model = 1024\n hparams.max_length = 256\n hparams.batch_size = 128\n hparams.d_ff = int(4096 * n)\n hparams.d_kv = 128\n hparams.encoder_num_heads = int(8 * n)\n hparams.decoder_num_heads = int(8 * n)\n # one epoch for translate_enfr_wmt32k_packed = 51400 steps\n hparams.learning_rate_decay_steps = 51400\n hparams.layout = \"batch:batch;vocab:model;d_ff:model;heads:model\"\n hparams.mesh_shape = \"batch:32\"\n hparams.label_smoothing = 0.1\n hparams.layer_prepostprocess_dropout = 0.1\n hparams.attention_dropout = 0.1\n hparams.relu_dropout = 0.1\n return hparams", "docstring": "Series of machine translation models.\n\n All models are trained on sequences of 256 tokens.\n\n You can use the dataset translate_enfr_wmt32k_packed.\n 154000 steps = 3 epochs.\n\nArgs:\n sz: an integer\n\nReturns:\n a hparams", "source": "juraj_google_style"} -{"code": "def wwpn_free_if_allocated(self, wwpn):\n\n\n wwpn_int = int(wwpn[-4:], 16)\n self._wwpn_pool.free_if_allocated(wwpn_int)", "docstring": "Free a WWPN allocated with :meth:`wwpn_alloc`.\n\n If the WWPN is not currently allocated or not in the pool\n range, nothing happens.\n\n Parameters:\n WWPN (string): The WWPN as 16 hexadecimal digits.", "source": "juraj_google_style"} -{"code": "def ExpandPath(path, opts=None):\n\n precondition.AssertType(path, Text)\n\n for grouped_path in ExpandGroups(path):\n for globbed_path in ExpandGlobs(grouped_path, opts):\n yield globbed_path", "docstring": "Applies all expansion mechanisms to the given path.\n\nArgs:\n path: A path to expand.\n opts: A `PathOpts` object.\n\nYields:\n All paths possible to obtain from a given path by performing expansions.", "source": "juraj_google_style"} -{"code": "def get_results(self, job_id):\n\n url = self._url('%s/results' % job_id)\n return self.client.get(url)", "docstring": "Get results of a job\n\nArgs:\n job_id (str): The ID of the job.\n\n See: https://auth0.com/docs/api/management/v2#!/Jobs/get_results", "source": "juraj_google_style"} -{"code": "def expand_by_device(original_parallelism, device_parallelism, data):\n\n device_to_datum = {\n device_parallelism.devices[i]: data[i]\n for i in range(device_parallelism.n)}\n return [device_to_datum[d] for d in original_parallelism.devices]", "docstring": "Opposite of reduce_by_device().\n\nArgs:\n original_parallelism: a expert_utils.Parallelism object.\n device_parallelism: a expert_utils.Parallelism object.\n data: a list of tensors with length device_parallelism.n\n\nReturns:\n a list of Tensors with length original_parallelism.n", "source": "juraj_google_style"} -{"code": "def candidates(self, word):\n\n if self.known([word]): # short-cut if word is correct already\n return {word}\n # get edit distance 1...\n res = [x for x in self.edit_distance_1(word)]\n tmp = self.known(res)\n if tmp:\n return tmp\n # if still not found, use the edit distance 1 to calc edit distance 2\n if self._distance == 2:\n tmp = self.known([x for x in self.__edit_distance_alt(res)])\n if tmp:\n return tmp\n return {word}", "docstring": "Generate possible spelling corrections for the provided word up to\n an edit distance of two, if and only when needed\n\nArgs:\n word (str): The word for which to calculate candidate spellings\n\nReturns:\n set: The set of words that are possible candidates", "source": "juraj_google_style"} -{"code": "def onepara(R):\n\n import numpy as np\n import warnings\n\n d = R.shape[0]\n\n if d < 2:\n raise Exception((\n \"More than one variable is required.\"\n \"Supply at least a 2x2 matrix.\"))\n\n # the explicit solution\n x = (np.sum(R) + np.trace(R)) / (d**2 - d)\n\n if x < (-1. / (d - 1)) or x > 1:\n warnings.warn(\"No analytic solution found x={:.8f}\".format(x))\n return None\n else:\n C = np.eye(d)\n C[np.logical_not(C)] = x\n return C", "docstring": "Converts an ill-conditioned correlation matrix\n into well-conditioned matrix with one common\n correlation coefficient\n\n Parameters:\n -----------\n R : ndarray\n an illconditioned correlation matrix,\n e.g. oxyba.illcond_corrmat\n\nReturns:\n -------\n cmat : ndarray\n DxD matrix with +1 as diagonal elements\n and 1 common coefficient for all other\n relations.", "source": "juraj_google_style"} -{"code": "def set_energy(self, spins, target_energy):\n\n spin_energy = self.energy(spins)\n self.assertions.add(Equals(spin_energy, limitReal(target_energy)))", "docstring": "Set the energy of Theta with spins fixed to target_energy.\n\nArgs:\n spins (dict): Spin values for a subset of the variables in Theta.\n target_energy (float): The desired energy for Theta with spins fixed.\n\nNote:\n Add equality constraint to assertions.", "source": "juraj_google_style"} -{"code": "def populate(self, filename):\n\n\n if os.path.isfile(filename):\n fid_st = os.stat(filename)\n self.name = os.path.abspath(filename)\n self.full_name = filename\n self.size = fid_st.st_size\n self.last_modified = fid_st.st_mtime\n self.last_accessed = fid_st.st_atime\n self.last_info_changed = fid_st.st_ctime\n self.location = os.path.dirname(filename)", "docstring": "Finds the file-stats and populates the class with stat values.\n\nArgs:\n filename (str): name of the file.", "source": "juraj_google_style"} -{"code": "def list_load_balancers(access_token, subscription_id):\n\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/providers/Microsoft.Network/',\n '/loadBalancers?api-version=', NETWORK_API])\n return do_get(endpoint, access_token)", "docstring": "List the load balancers in a subscription.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n\nReturns:\n HTTP response. JSON body of load balancer list with properties.", "source": "juraj_google_style"} -{"code": "def parse_document(text, options=0):\n\n encoded_text = text.encode('utf-8')\n return _cmark.lib.cmark_parse_document(\n encoded_text, len(encoded_text), options)", "docstring": "Parse a document and return the root node.\n\nArgs:\n text (str): The text to parse.\n options (int): The cmark options.\n\nReturns:\n Any: Opaque reference to the root node of the parsed syntax tree.", "source": "juraj_google_style"} -{"code": "def join(self, *data: Iterable[MaybeBytes]) -> bytes:\n\n return self.how.join([bytes(item) for item in chain(*data)])", "docstring": "Iterable join on a delimiter.\n\nArgs:\n data: Iterable of items to join.\n\nExample:\n ::\n\n BytesFormat(b' ').join([b'one', b'two', b'three'])", "source": "juraj_google_style"} -{"code": "def latest_malicious(self, ips):\n\n api_name = 'opendns-latest_malicious'\n fmt_url_path = u'ips/{0}/latest_domains'\n return self._multi_get(api_name, fmt_url_path, ips)", "docstring": "Get the a list of malicious domains related to input ips.\n\nArgs:\n ips: an enumerable of strings as ips\n\nReturns:\n An enumerable of strings for the malicious domains", "source": "juraj_google_style"} -{"code": "def set_xlim(self, xlims, dx, xscale, reverse=False):\n\n self._set_axis_limits('x', xlims, dx, xscale, reverse)\n return", "docstring": "Set x limits for plot.\n\n This will set the limits for the x axis\n for the specific plot.\n\nArgs:\n xlims (len-2 list of floats): The limits for the axis.\n dx (float): Amount to increment by between the limits.\n xscale (str): Scale of the axis. Either `log` or `lin`.\n reverse (bool, optional): If True, reverse the axis tick marks. Default is False.", "source": "juraj_google_style"} -{"code": "def get_kwdefaults(func, parse_source=False):\n r\n #import utool as ut\n #with ut.embed_on_exception_context:\n argspec = inspect.getargspec(func)\n kwdefaults = {}\n if argspec.args is None or argspec.defaults is None:\n pass\n else:\n args = argspec.args\n defaults = argspec.defaults\n #kwdefaults = OrderedDict(zip(argspec.args[::-1], argspec.defaults[::-1]))\n kwpos = len(args) - len(defaults)\n kwdefaults = OrderedDict(zip(args[kwpos:], defaults))\n if parse_source and argspec.keywords:\n # TODO parse for kwargs.get/pop\n keyword_defaults = parse_func_kwarg_keys(func, with_vals=True)\n for key, val in keyword_defaults:\n assert key not in kwdefaults, 'parsing error'\n kwdefaults[key] = val\n return kwdefaults", "docstring": "r\"\"\"\n\nArgs:\n func (func):\n\nReturns:\n dict:\n\n CommandLine:\n python -m utool.util_inspect get_kwdefaults\n\nExample:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_inspect import * # NOQA\n >>> import utool as ut\n >>> func = dummy_func\n >>> parse_source = True\n >>> kwdefaults = get_kwdefaults(func, parse_source)\n >>> print('kwdefaults = %s' % (ut.repr4(kwdefaults),))", "source": "juraj_google_style"} -{"code": "def child_link_extent(self):\n # type: () -> int\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized')\n\n if self.dr_entries.cl_record is not None:\n return self.dr_entries.cl_record.child_log_block_num\n if self.ce_entries.cl_record is not None:\n return self.ce_entries.cl_record.child_log_block_num\n\n raise pycdlibexception.PyCdlibInternalError('Asked for child extent for non-existent parent record')", "docstring": "Get the extent of the child of this entry if it has one.\n\n Parameters:\n None.\n\nReturns:\n The logical block number of the child if it exists.", "source": "juraj_google_style"} -{"code": "def prep_gate(self, circuit, qreg, op):\n\n if self.prep_fun is None:\n pass\n else:\n self.prep_fun(circuit, qreg, op)", "docstring": "Add state preparation gates to a circuit.\n\nArgs:\n circuit (QuantumCircuit): circuit to add a preparation to.\n qreg (tuple(QuantumRegister,int)): quantum register to apply\n preparation to.\n op (tuple(str, int)): the basis label and index for the\n preparation op.", "source": "juraj_google_style"} -{"code": "def AnalyzeClient(self, client):\n\n\n # Start with a universal keyword, used to find all clients.\n #\n # TODO(user): Remove the universal keyword once we have a better way\n # to do this, i.e., once we have a storage library which can list all\n # clients directly.\n\n keywords = set([\".\"])\n\n def TryAppend(prefix, keyword):\n precondition.AssertType(prefix, Text)\n precondition.AssertType(keyword, Text)\n if keyword:\n keyword_string = self._NormalizeKeyword(keyword)\n keywords.add(keyword_string)\n if prefix:\n keywords.add(prefix + \":\" + keyword_string)\n\n def TryAppendPrefixes(prefix, keyword, delimiter):\n TryAppend(prefix, keyword)\n segments = keyword.split(delimiter)\n for i in range(1, len(segments)):\n TryAppend(prefix, delimiter.join(segments[0:i]))\n return len(segments)\n\n def TryAppendIP(ip):\n TryAppend(\"ip\", ip)\n # IP4v?\n if TryAppendPrefixes(\"ip\", Text(ip), \".\") == 4:\n return\n # IP6v?\n TryAppendPrefixes(\"ip\", Text(ip), \":\")\n\n def TryAppendMac(mac):\n TryAppend(\"mac\", mac)\n if len(mac) == 12:\n # If looks like a mac address without \":\" symbols, also add the keyword\n # with them.\n TryAppend(\"mac\", \":\".join([mac[i:i + 2] for i in range(0, 12, 2)]))\n\n TryAppend(\"host\", client.knowledge_base.fqdn)\n host = client.knowledge_base.fqdn.split(\".\", 1)[0]\n TryAppendPrefixes(\"host\", host, \"-\")\n TryAppendPrefixes(\"host\", client.knowledge_base.fqdn, \".\")\n TryAppend(\"\", client.knowledge_base.os)\n TryAppend(\"\", client.Uname())\n TryAppend(\"\", client.os_release)\n TryAppend(\"\", client.os_version)\n TryAppend(\"\", client.kernel)\n TryAppend(\"\", client.arch)\n\n kb = client.knowledge_base\n if kb:\n for user in kb.users:\n TryAppend(\"user\", user.username)\n TryAppend(\"\", user.full_name)\n if user.full_name:\n for name in user.full_name.split():\n # full_name often includes nicknames and similar, wrapped in\n # punctuation, e.g. \"Thomas 'TJ' Jones\". We remove the most common\n # wrapping characters.\n TryAppend(\"\", name.strip(\"\\\"'()\"))\n\n for ip in client.GetIPAddresses():\n TryAppendIP(ip)\n for mac in client.GetMacAddresses():\n TryAppendMac(mac)\n\n client_info = client.startup_info.client_info\n if client_info:\n TryAppend(\"client\", client_info.client_name)\n TryAppend(\"client\", Text(client_info.client_version))\n if client_info.labels:\n for label in client_info.labels:\n TryAppend(\"label\", label)\n\n return keywords", "docstring": "Finds the client_id and keywords for a client.\n\nArgs:\n client: A Client object record to find keywords for.\n\nReturns:\n A list of keywords related to client.", "source": "juraj_google_style"} -{"code": "def obtain(self, dest):\n # type: (str) -> None\n\n url, rev_options = self.get_url_rev_options(self.url)\n\n if not os.path.exists(dest):\n self.fetch_new(dest, url, rev_options)\n return\n\n rev_display = rev_options.to_display()\n if self.is_repository_directory(dest):\n existing_url = self.get_remote_url(dest)\n if self.compare_urls(existing_url, url):\n logger.debug(\n '%s in %s exists, and has correct URL (%s)',\n self.repo_name.title(),\n display_path(dest),\n url,\n )\n if not self.is_commit_id_equal(dest, rev_options.rev):\n logger.info(\n 'Updating %s %s%s',\n display_path(dest),\n self.repo_name,\n rev_display,\n )\n self.update(dest, url, rev_options)\n else:\n logger.info('Skipping because already up-to-date.')\n return\n\n logger.warning(\n '%s %s in %s exists with URL %s',\n self.name,\n self.repo_name,\n display_path(dest),\n existing_url,\n )\n prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',\n ('s', 'i', 'w', 'b'))\n else:\n logger.warning(\n 'Directory %s already exists, and is not a %s %s.',\n dest,\n self.name,\n self.repo_name,\n )\n # https://github.com/python/mypy/issues/1174\n prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore\n ('i', 'w', 'b'))\n\n logger.warning(\n 'The plan is to install the %s repository %s',\n self.name,\n url,\n )\n response = ask_path_exists('What to do? %s' % prompt[0], prompt[1])\n\n if response == 'a':\n sys.exit(-1)\n\n if response == 'w':\n logger.warning('Deleting %s', display_path(dest))\n rmtree(dest)\n self.fetch_new(dest, url, rev_options)\n return\n\n if response == 'b':\n dest_dir = backup_dir(dest)\n logger.warning(\n 'Backing up %s to %s', display_path(dest), dest_dir,\n )\n shutil.move(dest, dest_dir)\n self.fetch_new(dest, url, rev_options)\n return\n\n # Do nothing if the response is \"i\".\n if response == 's':\n logger.info(\n 'Switching %s %s to %s%s',\n self.repo_name,\n display_path(dest),\n url,\n rev_display,\n )\n self.switch(dest, url, rev_options)", "docstring": "Install or update in editable mode the package represented by this\n VersionControl object.\n\nArgs:\n dest: the repository directory in which to install or update.", "source": "juraj_google_style"} -{"code": "def remove_flow_controller(cls, name):\n\n if name not in cls.registered_controllers:\n raise KeyError(\"Flow controller not found: %s\" % name)\n del cls.registered_controllers[name]", "docstring": "Removes a flow controller.\n\nArgs:\n name (string): Name of the controller to remove.\n\nRaises:\n KeyError: If the controller to remove was not registered.", "source": "juraj_google_style"} -{"code": "def _ReadRecordSchemaIndexes(self, tables, file_object, record_offset):\n\n _ = self._ReadRecordHeader(file_object, record_offset)\n\n attribute_value_offsets = self._ReadRecordAttributeValueOffset(\n file_object, record_offset + 24, 5)\n\n if attribute_value_offsets != (0x2d, 0x31, 0x35, 0x39, 0x3d):\n raise errors.ParseError('Unsupported record attribute value offsets')\n\n file_offset = file_object.tell()\n data_type_map = self._GetDataTypeMap('keychain_record_schema_indexes')\n\n record_values, _ = self._ReadStructureFromFileObject(\n file_object, file_offset, data_type_map)\n\n if record_values.relation_identifier not in tables:\n raise errors.ParseError(\n 'CSSM_DL_DB_SCHEMA_INDEXES defines relation identifier not defined '\n 'in CSSM_DL_DB_SCHEMA_INFO.')\n\n table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES, None)\n if not table:\n raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_INDEXES table.')\n\n record = collections.OrderedDict({\n 'RelationID': record_values.relation_identifier,\n 'IndexID': record_values.index_identifier,\n 'AttributeID': record_values.attribute_identifier,\n 'IndexType': record_values.index_type,\n 'IndexedDataLocation': record_values.index_data_location})\n\n table.records.append(record)", "docstring": "Reads a schema indexes (CSSM_DL_DB_SCHEMA_INDEXES) record.\n\nArgs:\n tables (dict[int, KeychainDatabaseTable]): tables per identifier.\n file_object (file): file-like object.\n record_offset (int): offset of the record relative to the start of\n the file.\n\nRaises:\n ParseError: if the record cannot be read.", "source": "juraj_google_style"} -{"code": "def script_dir_plus_file(filename, pyobject, follow_symlinks=True):\n\n return join(script_dir(pyobject, follow_symlinks), filename)", "docstring": "Get current script's directory and then append a filename\n\nArgs:\n filename (str): Filename to append to directory path\n pyobject (Any): Any Python object in the script\n follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.\n\nReturns:\n str: Current script's directory and with filename appended", "source": "juraj_google_style"} -{"code": "def raster_statistics(raster_file):\n\n ds = gdal_Open(raster_file)\n band = ds.GetRasterBand(1)\n minv, maxv, meanv, std = band.ComputeStatistics(False)\n return minv, maxv, meanv, std", "docstring": "Get basic statistics of raster data.\n\nArgs:\n raster_file: raster file path.\n\nReturns:\n min, max, mean, std.", "source": "juraj_google_style"} -{"code": "def __init__(self, data=None, **kwargs):\n\n if data not in _DATA_OPTIONS:\n raise ValueError(\"data must be one of %s\" % _DATA_OPTIONS)\n\n super(CycleGANConfig, self).__init__(**kwargs)\n self.data = data", "docstring": "Constructs a CycleGANConfig.\n\nArgs:\n data: `str`, one of `_DATA_OPTIONS`.\n **kwargs: keyword arguments forwarded to super.", "source": "juraj_google_style"} -{"code": "def check_schema_transforms_match(schema, inverted_features):\n\n num_target_transforms = 0\n\n for col_schema in schema:\n col_name = col_schema['name']\n col_type = col_schema['type'].lower()\n\n # Check each transform and schema are compatible\n if col_name in inverted_features:\n for transform in inverted_features[col_name]:\n transform_name = transform['transform']\n if transform_name == constant.TARGET_TRANSFORM:\n num_target_transforms += 1\n continue\n\n elif col_type in constant.NUMERIC_SCHEMA:\n if transform_name not in constant.NUMERIC_TRANSFORMS:\n raise ValueError(\n 'Transform %s not supported by schema %s' % (transform_name, col_type))\n elif col_type == constant.STRING_SCHEMA:\n if (transform_name not in constant.CATEGORICAL_TRANSFORMS + constant.TEXT_TRANSFORMS and\n transform_name != constant.IMAGE_TRANSFORM):\n raise ValueError(\n 'Transform %s not supported by schema %s' % (transform_name, col_type))\n else:\n raise ValueError('Unsupported schema type %s' % col_type)\n\n # Check each transform is compatible for the same source column.\n # inverted_features[col_name] should belong to exactly 1 of the 5 groups.\n if col_name in inverted_features:\n transform_set = {x['transform'] for x in inverted_features[col_name]}\n if 1 != sum([transform_set.issubset(set(constant.NUMERIC_TRANSFORMS)),\n transform_set.issubset(set(constant.CATEGORICAL_TRANSFORMS)),\n transform_set.issubset(set(constant.TEXT_TRANSFORMS)),\n transform_set.issubset(set([constant.IMAGE_TRANSFORM])),\n transform_set.issubset(set([constant.TARGET_TRANSFORM]))]):\n message = % (str(constant.TEXT_TRANSFORMS),\n str(constant.CATEGORICAL_TRANSFORMS),\n str(constant.NUMERIC_TRANSFORMS),\n constant.IMAGE_TRANSFORM,\n constant.TARGET_TRANSFORM,\n col_name,\n str(transform_set))\n raise ValueError(message)\n\n if num_target_transforms != 1:\n raise ValueError('Must have exactly one target transform')", "docstring": "Checks that the transform and schema do not conflict.\n\nArgs:\n schema: schema list\n inverted_features: inverted_features dict\n\nRaises:\n ValueError if transform cannot be applied given schema type.", "source": "juraj_google_style"} -{"code": "def __mod__(self, other: Union[_FormatArg, Iterable[_FormatArg]]) -> bytes:\n\n if isinstance(other, bytes):\n return self.format([other])\n elif hasattr(other, '__bytes__'):\n supports_bytes = cast(SupportsBytes, other)\n return self.format([bytes(supports_bytes)])\n elif hasattr(other, '__iter__'):\n items = cast(Iterable[_FormatArg], other)\n return self.format(items)\n return NotImplemented", "docstring": "String interpolation, shortcut for :meth:`.format`.\n\nArgs:\n other: The data interpolated into the format string.", "source": "juraj_google_style"} -{"code": "def DNN(input_shape,\n dense_layers,\n output_layer=[1, 'sigmoid'],\n optimizer='adam',\n loss='binary_crossentropy'):\n\n\n inputs = Input(shape=input_shape)\n\n dense = inputs\n\n for i, d in enumerate(dense_layers):\n dense = Dense(d, activation='relu')(dense)\n dense = BatchNormalization()(dense)\n dense = Dropout(0.3)(dense)\n\n output = Dense(output_layer[0], activation=output_layer[1])(dense)\n\n model = Model(inputs=inputs, outputs=output)\n model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])\n\n return model", "docstring": "Summary\n\nArgs:\n input_shape (list): The shape of the input layer\n targets (int): Number of targets\n dense_layers (list): Dense layer descriptor [fully_connected]\n optimizer (str or object optional): Keras optimizer as string or keras optimizer\n\nReturns:\n TYPE: model, build_arguments", "source": "juraj_google_style"} -{"code": "def TerminateAFF4Flow(cls, flow_id, reason=None, status=None, token=None):\n\n flow_obj = aff4.FACTORY.Open(\n flow_id, aff4_type=GRRFlow, mode=\"rw\", token=token)\n\n if not flow_obj:\n raise FlowError(\"Could not terminate flow %s\" % flow_id)\n\n with flow_obj:\n runner = flow_obj.GetRunner()\n if not runner.IsRunning():\n return\n\n if token is None:\n token = access_control.ACLToken()\n\n if reason is None:\n reason = \"Manual termination by console.\"\n\n # This calls runner.Terminate to kill the flow\n runner.Error(reason, status_code=status)\n\n flow_obj.Log(\"Terminated by user {0}. Reason: {1}\".format(\n token.username, reason))\n\n # From now on we run with supervisor access\n super_token = token.SetUID()\n\n # Also terminate its children\n children_to_kill = aff4.FACTORY.MultiOpen(\n flow_obj.ListChildren(), token=super_token, aff4_type=GRRFlow)\n\n for child_obj in children_to_kill:\n cls.TerminateAFF4Flow(\n child_obj.urn, reason=\"Parent flow terminated.\", token=super_token)", "docstring": "Terminate a flow.\n\nArgs:\n flow_id: The flow session_id to terminate.\n reason: A reason to log.\n status: Status code used in the generated status message.\n token: The access token to be used for this request.\n\nRaises:\n FlowError: If the flow can not be found.", "source": "juraj_google_style"} -{"code": "def call_rpc(self, rpc_id, payload=bytes()):\n\n\n # If we define the RPC locally, call that one. We use this for reporting\n # our status\n if super(ServiceDelegateTile, self).has_rpc(rpc_id):\n return super(ServiceDelegateTile, self).call_rpc(rpc_id, payload)\n\n async def _awaitable_wrapper():\n\n # FIXME: We set the timeout here to a very large number since we don't\n # know what an appropriate timeout is and don't want to restrict the\n # run time of RPCs that could be long running. The caller of the RPC\n # through the tile will know what an appropriate timeout is for the\n # RPC that they are trying to call.\n resp = await self._client.send_rpc(self._service, rpc_id, payload, timeout=120.0)\n result = resp['result']\n\n if result == 'success':\n return resp['response']\n elif result == 'service_not_found':\n raise TileNotFoundError(\"Could not find service by name\", name=self._service)\n elif result == 'rpc_not_found':\n raise RPCNotFoundError(\"Could not find RPC on service\", name=self._service, rpc_id=rpc_id)\n elif result == 'invalid_arguments':\n raise RPCInvalidArgumentsError(\"Invalid arguments to RPC\", name=self._service, rpc_id=rpc_id)\n elif result == 'invalid_response':\n raise RPCInvalidReturnValueError(\"Invalid response from RPC\", name=self._service, rpc_id=rpc_id)\n elif result == 'execution_exception':\n raise InternalError(\"Exception raised during processing RPC\", name=self._service, rpc_id=rpc_id)\n else:\n raise InternalError(\"Unknown response received from delegated RPC\", name=self._service, rpc_id=rpc_id, result=result)\n\n return _awaitable_wrapper()", "docstring": "Call an RPC by its ID.\n\nArgs:\n rpc_id (int): The number of the RPC\n payload (bytes): A byte string of payload parameters up to 20 bytes\n\nReturns:\n str: The response payload from the RPC", "source": "juraj_google_style"} -{"code": "def save(self, revision=True, *args, **kwargs):\n\n\n if revision:\n # If this is a revision, set it to be the head of the list and increment the revision id\n self.head = True\n self.revision_id += 1\n\n previous_revision = self.get_previous_revision()\n\n if not self.is_parent():\n # If this is a revision, delete the old head of the list.\n type(self).objects \\\n .filter(parent=self.parent, head=True) \\\n .update(head=None)\n\n # Clear the instance id to force Django to save a new instance.\n # Both fields (pk, id) required for this to work -- something to do with model inheritance\n self.pk = None\n self.id = None\n\n # New version is unpublished by default\n self.is_published = None\n\n # Set created_at to current time, but only for first version\n if not self.created_at:\n self.created_at = timezone.now()\n self.updated_at = timezone.now()\n\n if revision:\n self.updated_at = timezone.now()\n\n super(Publishable, self).save(*args, **kwargs)\n\n # Update the parent foreign key\n if not self.parent:\n self.parent = self\n super(Publishable, self).save(update_fields=['parent'])\n\n if revision:\n # Set latest version for all articles\n type(self).objects \\\n .filter(parent=self.parent) \\\n .update(latest_version=self.revision_id)\n\n self.latest_version = self.revision_id\n\n return self", "docstring": "Handles the saving/updating of a Publishable instance.\n\nArgs:\n revision - if True, a new version of this Publishable will be created.", "source": "juraj_google_style"} -{"code": "def remove_son(self, son):\n\n self._sons = [x for x in self._sons if x.node_id != son.node_id]", "docstring": "Remove the son node. Do nothing if the node is not a son\n\nArgs:\n fathers: list of fathers to add", "source": "juraj_google_style"} -{"code": "def record_padding(self, iso_size):\n # type: (int) -> bytes\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('This IsoHybrid object is not yet initialized')\n\n return b'\\x00' * self._calc_cc(iso_size)[1]", "docstring": "A method to record padding for the ISO hybridization.\n\n Parameters:\n iso_size - The size of the ISO, excluding the hybridization.\n\nReturns:\n A string of zeros the right size to pad the ISO.", "source": "juraj_google_style"} -{"code": "def _normalize_angle(angle, range, step):\n\n while angle <= range[0]:\n angle += step\n while angle >= range[1]:\n angle -= step\n return angle", "docstring": "Finds an angle that matches the given one modulo step.\n\n Increments and decrements the given value with a given step.\n\nArgs:\n range: a 2-tuple of min and max target values.\n step: tuning step.\n\nReturns:\n Normalized value within a given range.", "source": "juraj_google_style"} -{"code": "def from_ddy_file(cls, file_path):\n\n # check that the file is there\n if not os.path.isfile(file_path):\n raise ValueError(\n 'Cannot find a .ddy file at {}'.format(file_path))\n if not file_path.lower().endswith('.ddy'):\n raise ValueError(\n 'DDY file does not have a .ddy extension.')\n\n # check the python version and open the file\n try:\n iron_python = True if platform.python_implementation() == 'IronPython' \\\n else False\n except Exception:\n iron_python = True\n\n if iron_python:\n ddywin = codecs.open(file_path, 'r')\n else:\n ddywin = codecs.open(file_path, 'r', encoding='utf-8', errors='ignore')\n\n try:\n ddytxt = ddywin.read()\n location_format = re.compile(\n r\"(Site:Location,(.|\\n)*?((;\\s*!)|(;\\s*\\n)|(;\\n)))\")\n design_day_format = re.compile(\n r\"(SizingPeriod:DesignDay,(.|\\n)*?((;\\s*!)|(;\\s*\\n)|(;\\n)))\")\n location_matches = location_format.findall(ddytxt)\n des_day_matches = design_day_format.findall(ddytxt)\n except Exception as e:\n import traceback\n raise Exception('{}\\n{}'.format(e, traceback.format_exc()))\n else:\n # check to be sure location was found\n assert len(location_matches) > 0, 'No location objects found ' \\\n 'in .ddy file.'\n\n # build design day and location objects\n location = Location.from_location(location_matches[0][0])\n design_days = [DesignDay.from_ep_string(\n match[0], location) for match in des_day_matches]\n finally:\n ddywin.close()\n\n cls_ = cls(location, design_days)\n cls_._file_path = os.path.normpath(file_path)\n return cls_", "docstring": "Initalize from a ddy file object from an existing ddy file.\n\nArgs:\n file_path: A string representing a complete path to the .ddy file.", "source": "juraj_google_style"} -{"code": "def _kl_bernoulli_bernoulli(a, b, name=None):\n\n with tf.name_scope(name or \"kl_bernoulli_bernoulli\"):\n delta_probs0 = tf.nn.softplus(-b.logits) - tf.nn.softplus(-a.logits)\n delta_probs1 = tf.nn.softplus(b.logits) - tf.nn.softplus(a.logits)\n return (tf.sigmoid(a.logits) * delta_probs0\n + tf.sigmoid(-a.logits) * delta_probs1)", "docstring": "Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.\n\nArgs:\n a: instance of a Bernoulli distribution object.\n b: instance of a Bernoulli distribution object.\n name: (optional) Name to use for created operations.\n default is \"kl_bernoulli_bernoulli\".\n\nReturns:\n Batchwise KL(a || b)", "source": "juraj_google_style"} -{"code": "def __is_noncopyable_single(class_, already_visited_cls_vars=None):\n\n # It is not enough to check base classes, we should also to check\n # member variables.\n logger = utils.loggers.cxx_parser\n\n if has_copy_constructor(class_) \\\n and has_public_constructor(class_) \\\n and has_public_assign(class_) \\\n and has_public_destructor(class_):\n msg = os.linesep.join([\n \"__is_noncopyable_single - %s - COPYABLE:\" % class_.decl_string,\n \" trivial copy constructor: yes\",\n \" public constructor: yes\",\n \" public assign: yes\",\n \" public destructor: yes\"])\n logger.debug(msg)\n return False\n\n if already_visited_cls_vars is None:\n already_visited_cls_vars = []\n\n if find_noncopyable_vars(class_, already_visited_cls_vars):\n logger.debug(\n (\"__is_noncopyable_single(TRUE) - %s - contains noncopyable \" +\n \"members\"), class_.decl_string)\n return True\n\n logger.debug((\n \"__is_noncopyable_single(FALSE) - %s - COPYABLE, because is \" +\n \"doesn't contains noncopyable members\"), class_.decl_string)\n return False", "docstring": "Implementation detail.\n\n Checks if the class is non copyable, without considering the base classes.\n\nArgs:\n class_ (declarations.class_t): the class to be checked\n already_visited_cls_vars (list): optional list of vars that should not\n be checked a second time, to prevent infinite recursions.\n\nReturns:\n bool: if the class is non copyable", "source": "juraj_google_style"} -{"code": "def to_text_diagram(\n self,\n *,\n use_unicode_characters: bool = True,\n transpose: bool = False,\n precision: Optional[int] = 3,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT) -> str:\n\n diagram = self.to_text_diagram_drawer(\n use_unicode_characters=use_unicode_characters,\n precision=precision,\n qubit_order=qubit_order,\n transpose=transpose)\n\n return diagram.render(\n crossing_char=(None\n if use_unicode_characters\n else ('-' if transpose else '|')),\n horizontal_spacing=1 if transpose else 3,\n use_unicode_characters=use_unicode_characters)", "docstring": "Returns text containing a diagram describing the circuit.\n\nArgs:\n use_unicode_characters: Determines if unicode characters are\n allowed (as opposed to ascii-only diagrams).\n transpose: Arranges qubit wires vertically instead of horizontally.\n precision: Number of digits to display in text diagram\n qubit_order: Determines how qubits are ordered in the diagram.\n\nReturns:\n The text diagram.", "source": "juraj_google_style"} -{"code": "def kill_proc_tree(pid: int,\n including_parent: bool = True,\n timeout_s: float = 5) \\\n -> Tuple[Set[psutil.Process], Set[psutil.Process]]:\n # noqa\n parent = psutil.Process(pid)\n to_kill = parent.children(recursive=True) # type: List[psutil.Process]\n if including_parent:\n to_kill.append(parent)\n for proc in to_kill:\n proc.kill() # SIGKILL\n gone, still_alive = psutil.wait_procs(to_kill, timeout=timeout_s)\n return gone, still_alive", "docstring": "Kills a tree of processes, starting with the parent. Slightly modified from\n https://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows.\n\nArgs:\n pid: process ID of the parent\n including_parent: kill the parent too?\n timeout_s: timeout to wait for processes to close\n\nReturns:\n tuple: ``(gone, still_alive)``, where both are sets of\n :class:`psutil.Process` objects", "source": "juraj_google_style"} -{"code": "def add_rect(self, width, height, rid=None):\n\n assert(width > 0 and height >0)\n\n # Search best position and orientation\n rect, _ = self._select_position(width, height)\n if not rect:\n return None\n\n # Subdivide all the max rectangles intersecting with the selected \n # rectangle.\n self._split(rect)\n\n # Remove any max_rect contained by another \n self._remove_duplicates()\n\n # Store and return rectangle position.\n rect.rid = rid\n self.rectangles.append(rect)\n return rect", "docstring": "Add rectangle of widthxheight dimensions.\n\nArgs:\n width (int, float): Rectangle width\n height (int, float): Rectangle height\n rid: Optional rectangle user id\n\nReturns:\n Rectangle: Rectangle with placemente coordinates\n None: If the rectangle couldn be placed.", "source": "juraj_google_style"} -{"code": "def has_current_path(self, path, **kwargs):\n\n\n try:\n return self.assert_current_path(path, **kwargs)\n except ExpectationNotMet:\n return False", "docstring": "Checks if the page has the given path.\n\nArgs:\n path (str | RegexObject): The string or regex that the current \"path\" should match.\n **kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`.\n\nReturns:\n bool: Whether it matches.", "source": "juraj_google_style"} -{"code": "def global_horizontal_radiation(self, value=9999.0):\n\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type float '\n 'for field `global_horizontal_radiation`'.format(value))\n if value < 0.0:\n raise ValueError('value need to be greater or equal 0.0 '\n 'for field `global_horizontal_radiation`')\n\n self._global_horizontal_radiation = value", "docstring": "Corresponds to IDD Field `global_horizontal_radiation`\n\nArgs:\n value (float): value for IDD Field `global_horizontal_radiation`\n Unit: Wh/m2\n value >= 0.0\n Missing value: 9999.0\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\nRaises:\n ValueError: if `value` is not a valid value", "source": "juraj_google_style"} -{"code": "def sub_escapes(sval):\n\n sval = sval.replace('\\\\a', '\\a')\n sval = sval.replace('\\\\b', '\\x00')\n sval = sval.replace('\\\\f', '\\f')\n sval = sval.replace('\\\\n', '\\n')\n sval = sval.replace('\\\\r', '\\r')\n sval = sval.replace('\\\\t', '\\t')\n sval = sval.replace('\\\\v', '\\v')\n sval = sval.replace('\\\\\\\\', '\\\\')\n return sval", "docstring": "Process escaped characters in ``sval``.\n\nArgs:\n - `sval`:", "source": "juraj_google_style"} -{"code": "def parse_compounds(compound_info, case_id, variant_type):\n\n # We need the case to construct the correct id\n compounds = []\n if compound_info:\n for family_info in compound_info.split(','):\n splitted_entry = family_info.split(':')\n # This is the family id\n if splitted_entry[0] == case_id:\n for compound in splitted_entry[1].split('|'):\n splitted_compound = compound.split('>')\n compound_obj = {}\n compound_name = splitted_compound[0]\n compound_obj['variant'] = generate_md5_key(compound_name.split('_') +\n [variant_type, case_id])\n\n try:\n compound_score = float(splitted_compound[1])\n except (TypeError, IndexError):\n compound_score = 0.0\n\n compound_obj['score'] = compound_score\n compound_obj['display_name'] = compound_name\n\n compounds.append(compound_obj)\n\n return compounds", "docstring": "Get a list with compounds objects for this variant.\n\nArgs:\n compound_info(str): A Variant dictionary\n case_id (str): unique family id\n variant_type(str): 'research' or 'clinical'\n\nReturns:\n compounds(list(dict)): A list of compounds", "source": "juraj_google_style"} -{"code": "def transform(self, input_df):\n\n\n # Shallow copy the dataframe (we'll be making changes to some columns)\n _df = input_df.copy(deep=False)\n\n # Convert all columns that are/should be categorical\n for column in self.cat_columns:\n\n # Sanity check\n if column not in _df:\n raise RuntimeError('Required column {:s} not found'.format(column))\n\n # If the column isn't already a category then change it\n if _df[column].dtype == 'object':\n print('Changing column {:s} to category'.format(column))\n _df[column] = pd.Categorical(_df[column])\n\n # Remove any columns that aren't bool/int/float/category\n _df = _df.select_dtypes(include=['bool', 'int', 'float', 'category'])\n\n # Normalize any numeric columns if normalize specified\n if self.normalize:\n for column in list(_df.select_dtypes(include=[np.number]).columns.values):\n print('Normalizing column {:s}...'.format(column))\n smin, smax = self.norm_map[column]\n _df[column] = (_df[column] - smin) / (smax - smin)\n\n # Now that categorical columns are setup call the dummy_encoder\n return self.dummy_encoder.transform(_df)", "docstring": "Convert the dataframe to a matrix (numpy ndarray)\n\nArgs:\n input_df (dataframe): The dataframe to convert", "source": "juraj_google_style"} -{"code": "def search_messages(self, *, query: str, **kwargs) -> SlackResponse:\n\n self._validate_xoxp_token()\n kwargs.update({\"query\": query})\n return self.api_call(\"search.messages\", http_verb=\"GET\", params=kwargs)", "docstring": "Searches for messages matching a query.\n\nArgs:\n query (str): Search query. May contains booleans, etc.\n e.g. 'pickleface'", "source": "juraj_google_style"} -{"code": "def get_params_and_defaults(param_list, db):\n\n return [[p, d] for p, d in db.get_all_values_of_all_params().items()]", "docstring": "Deduce [parameter, default] pairs from simulations available in the db.\n\nArgs:\n param_list (list): List of parameters to query for.\n db (DatabaseManager): Database where to query for defaults.", "source": "juraj_google_style"} -{"code": "def show_plot(plot, width=PREVIEW_WIDTH, height=PREVIEW_HEIGHT):\n\n return SVG(data=plot_to_svg(plot, width, height))", "docstring": "Preview a plot in a jupyter notebook.\n\nArgs:\n plot (list): the plot to display (list of layers)\n width (int): the width of the preview\n height (int): the height of the preview\n\nReturns:\n An object that renders in Jupyter as the provided plot", "source": "juraj_google_style"} -{"code": "def do_patch(endpoint, body, access_token):\n\n headers = {\"content-type\": \"application/json\", \"Authorization\": 'Bearer ' + access_token}\n headers['User-Agent'] = get_user_agent()\n return requests.patch(endpoint, data=body, headers=headers)", "docstring": "Do an HTTP PATCH request and return JSON.\n\nArgs:\n endpoint (str): Azure Resource Manager management endpoint.\n body (str): JSON body of information to patch.\n access_token (str): A valid Azure authentication token.\n\nReturns:\n HTTP response. JSON body.", "source": "juraj_google_style"} -{"code": "def add_dicts(one_dict, other_dict):\n\n result = other_dict.copy()\n for k, v in one_dict.items():\n if v is None:\n v = 0\n\n if isinstance(v, dict):\n result[k] = add_dicts(v, other_dict.get(k, {}))\n else:\n other_value = result.get(k, 0)\n if other_value is None:\n other_value = 0\n result[k] = other_value + v\n\n return result", "docstring": "Suma clave a clave los dos diccionarios. Si algún valor es un\n diccionario, llama recursivamente a la función. Ambos diccionarios deben\n tener exactamente las mismas claves, y los valores asociados deben ser\n sumables, o diccionarios.\n\nArgs:\n one_dict (dict)\n other_dict (dict)\n\nReturns:\n dict: resultado de la suma", "source": "juraj_google_style"} -{"code": "def create_reset_score(cls, student_item):\n\n # By setting the \"reset\" flag, we ensure that the \"highest\"\n # score in the score summary will point to this score.\n # By setting points earned and points possible to 0,\n # we ensure that this score will be hidden from the user.\n return cls.objects.create(\n student_item=student_item,\n submission=None,\n points_earned=0,\n points_possible=0,\n reset=True,\n )", "docstring": "Create a \"reset\" score (a score with a null submission).\n\n Only scores created after the most recent \"reset\" score\n should be used to determine a student's effective score.\n\nArgs:\n student_item (StudentItem): The student item model.\n\nReturns:\n Score: The newly created \"reset\" score.\n\nRaises:\n DatabaseError: An error occurred while creating the score", "source": "juraj_google_style"} -{"code": "def new(self):\n # type: () -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Header Descriptor already initialized')\n\n self.unique_id = 261\n\n self._initialized = True", "docstring": "A method to create a new UDF Logical Volume Header Descriptor.\n\n Parameters:\n None.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def loads(string, triples=False, cls=PENMANCodec, **kwargs):\n\n codec = cls(**kwargs)\n return list(codec.iterdecode(string, triples=triples))", "docstring": "Deserialize a list of PENMAN-encoded graphs from *string*.\n\nArgs:\n string: a string containing graph data\n triples: if True, read graphs as triples instead of as PENMAN\n cls: serialization codec class\n kwargs: keyword arguments passed to the constructor of *cls*\n\nReturns:\n a list of Graph objects", "source": "juraj_google_style"} -{"code": "def _PrintTasksInformation(self, storage_reader):\n\n table_view = views.ViewsFactory.GetTableView(\n self._views_format_type, title='Tasks')\n\n for task_start, _ in storage_reader.GetSessions():\n start_time = timelib.Timestamp.CopyToIsoFormat(\n task_start.timestamp)\n task_identifier = uuid.UUID(hex=task_start.identifier)\n task_identifier = '{0!s}'.format(task_identifier)\n table_view.AddRow([task_identifier, start_time])\n\n table_view.Write(self._output_writer)", "docstring": "Prints information about the tasks.\n\nArgs:\n storage_reader (StorageReader): storage reader.", "source": "juraj_google_style"} -{"code": "def load_panel_app(adapter, panel_id=None, institute='cust000'):\n\n base_url = 'https://panelapp.genomicsengland.co.uk/WebServices/{0}/'\n\n hgnc_map = adapter.genes_by_alias()\n\n if panel_id:\n panel_ids = [panel_id]\n\n if not panel_id:\n\n LOG.info(\"Fetching all panel app panels\")\n data = get_request(base_url.format('list_panels'))\n\n json_lines = json.loads(data)\n\n panel_ids = [panel_info['Panel_Id'] for panel_info in json_lines['result']]\n\n for panel_id in panel_ids:\n panel_data = get_request(base_url.format('get_panel') + panel_id)\n\n parsed_panel = parse_panel_app_panel(\n panel_info = json.loads(panel_data)['result'], \n hgnc_map=hgnc_map,\n institute=institute\n )\n parsed_panel['panel_id'] = panel_id\n\n if len(parsed_panel['genes']) == 0:\n LOG.warning(\"Panel {} is missing genes. Skipping.\".format(parsed_panel['display_name']))\n continue\n\n try:\n adapter.load_panel(parsed_panel=parsed_panel)\n except Exception as err:\n raise err", "docstring": "Load PanelApp panels into scout database\n\n If no panel_id load all PanelApp panels\n\nArgs:\n adapter(scout.adapter.MongoAdapter)\n panel_id(str): The panel app panel id", "source": "juraj_google_style"} -{"code": "def mimic_adam_with_adafactor(hparams):\n\n assert \"adam\" in hparams.optimizer\n hparams.optimizer = \"adafactor\"\n hparams.optimizer_adafactor_beta1 = hparams.optimizer_adam_beta1\n hparams.optimizer_adafactor_beta2 = hparams.optimizer_adam_beta2\n hparams.optimizer_adafactor_multiply_by_parameter_scale = False\n hparams.optimizer_adafactor_factored = False\n hparams.optimizer_adafactor_clipping_threshold = None\n hparams.optimizer_adafactor_decay_type = \"adam\"", "docstring": "Switch from Adam to Adafactor, approximating the behavior of Adam.\n\n Some minor things may be different, like epsilon and beta1 correction.\n\nArgs:\n hparams: model hyperparameters where \"adam\" in hparams.optimizer", "source": "juraj_google_style"} -{"code": "def get_data(self, label: str) -> Any:\n\n return self._get_resource(label, self._data, \"data\")", "docstring": "Get a data resource by label\n\nArgs:\n label (str): The labvel for the data resource to fetch\n\nReturns:\n The requeted data object", "source": "juraj_google_style"} -{"code": "def pmap(f, iterable, n=None, dummy=False, p=None):\n\n\n # make it easier to debug.\n if n == 1:\n for r in it.starmap(f, iterable):\n yield r\n raise StopIteration\n\n\n if p is None:\n po = pool(n, dummy)\n else:\n po = p\n assert hasattr(po, 'imap')\n f = _func_star(f)\n\n try:\n for r in po.imap(f, iterable):\n yield r\n\n # explicitly clean up created pool\n finally:\n if p is None:\n try:\n po.close()\n po.join()\n except:\n pass", "docstring": "parallel map of a function to an iterable\n if each item in iterable is itself an iterable, then\n automatically call f(*item) instead of f(item)\n\nArgs:\n f: function\n iterable: any iterable where each item is sent to f\n n: number of cpus (default is number on machine)\n dummy: use dummy pool.\n p: existing pool to re-use", "source": "juraj_google_style"} -{"code": "def evaluate_stacked_ensemble(path, ensemble_id):\n\n with functions.DBContextManager(path) as session:\n stacked_ensemble = session.query(models.StackedEnsemble).filter_by(\n id=ensemble_id).first()\n if not stacked_ensemble:\n raise exceptions.UserError('Stacked ensemble {} '\n 'does not exist'.format(ensemble_id))\n\n stacked_ensemble.job_id = get_current_job().id\n stacked_ensemble.job_status = 'started'\n\n session.add(stacked_ensemble)\n session.commit()\n\n try:\n meta_features_list = []\n for base_learner in stacked_ensemble.base_learners:\n mf = np.load(base_learner.meta_features_path(path))\n if len(mf.shape) == 1:\n mf = mf.reshape(-1, 1)\n meta_features_list.append(mf)\n\n secondary_features = np.concatenate(meta_features_list, axis=1)\n\n # Get data\n extraction = session.query(models.Extraction).first()\n return_splits_iterable = functions.import_object_from_string_code(\n extraction.meta_feature_generation['source'],\n 'return_splits_iterable'\n )\n X, y = extraction.return_train_dataset()\n\n # We need to retrieve original order of meta-features\n indices_list = [test_index for train_index, test_index in return_splits_iterable(X, y)]\n indices = np.concatenate(indices_list)\n X, y = X[indices], y[indices]\n\n est = stacked_ensemble.return_secondary_learner()\n\n return_splits_iterable_stacked_ensemble = functions.import_object_from_string_code(\n extraction.stacked_ensemble_cv['source'],\n 'return_splits_iterable'\n )\n preds = []\n trues_list = []\n for train_index, test_index in return_splits_iterable_stacked_ensemble(secondary_features, y):\n X_train, X_test = secondary_features[train_index], secondary_features[test_index]\n y_train, y_test = y[train_index], y[test_index]\n est = est.fit(X_train, y_train)\n preds.append(\n getattr(est, stacked_ensemble.base_learner_origin.\n meta_feature_generator)(X_test)\n )\n trues_list.append(y_test)\n preds = np.concatenate(preds, axis=0)\n y_true = np.concatenate(trues_list)\n\n for key in stacked_ensemble.base_learner_origin.metric_generators:\n metric_generator = functions.import_object_from_string_code(\n stacked_ensemble.base_learner_origin.metric_generators[key],\n 'metric_generator'\n )\n stacked_ensemble.individual_score[key] = metric_generator(y_true, preds)\n\n stacked_ensemble.job_status = 'finished'\n session.add(stacked_ensemble)\n session.commit()\n\n except:\n session.rollback()\n stacked_ensemble.job_status = 'errored'\n stacked_ensemble.description['error_type'] = repr(sys.exc_info()[0])\n stacked_ensemble.description['error_value'] = repr(sys.exc_info()[1])\n stacked_ensemble.description['error_traceback'] = \\\n traceback.format_exception(*sys.exc_info())\n session.add(stacked_ensemble)\n session.commit()\n raise", "docstring": "Evaluates the ensemble and updates the database when finished/\n\nArgs:\n path (str): Path to Xcessiv notebook\n\n ensemble_id (str): Ensemble ID", "source": "juraj_google_style"} -{"code": "def median(series):\n\n\n if np.issubdtype(series.dtype, np.number):\n return series.median()\n else:\n return np.nan", "docstring": "Returns the median value of a series.\n\nArgs:\n series (pandas.Series): column to summarize.", "source": "juraj_google_style"} -{"code": "def sg_parallel(func):\n r\n @wraps(func)\n def wrapper(**kwargs):\n r\n # parse option\n opt = tf.sg_opt(kwargs)\n\n # loop for all available GPUs\n res = []\n for i in range(sg_gpus()):\n # specify device\n with tf.device('/gpu:%d' % i):\n # give new scope only to operation\n with tf.name_scope('gpu_%d' % i):\n # save reuse flag\n with sg_context(reuse=(True if i > 0 else False)):\n # call function\n res.append(func(opt * tf.sg_opt(gpu_index=i)))\n\n return res\n\n return wrapper", "docstring": "r\"\"\"Decorates function as multiple gpu support towers.\n\nArgs:\n func: function to decorate", "source": "juraj_google_style"} -{"code": "def populate_native_libraries(version):\n\n with open(BINARY_EXT_TEMPLATE, \"r\") as file_obj:\n template = file_obj.read()\n contents = template.format(revision=version)\n with open(BINARY_EXT_FILE, \"w\") as file_obj:\n file_obj.write(contents)", "docstring": "Populates ``binary-extension.rst`` with release-specific data.\n\nArgs:\n version (str): The current version.", "source": "juraj_google_style"} -{"code": "def get_service_health(service_id: str) -> str:\n\n # Check if the current and actual replica levels are the same\n if DC.get_replicas(service_id) != DC.get_actual_replica(service_id):\n health_status = \"Unhealthy\"\n else:\n health_status = \"Healthy\"\n\n return health_status", "docstring": "Get the health of a service using service_id.\n\nArgs:\n service_id\n\nReturns:\n str, health status", "source": "juraj_google_style"} -{"code": "def get_user(self, user):\n\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_user(user)", "docstring": "Get user's data (first and last name, email, etc).\n\nArgs:\n user (string): User name.\n\nReturns:\n (dictionary): User's data encoded in a dictionary.\n\nRaises:\n requests.HTTPError on failure.", "source": "juraj_google_style"} -{"code": "def fixminimized(self, alphabet):\n\n endstate = len(list(self.states))\n for state in self.states:\n for char in alphabet:\n found = 0\n for arc in state.arcs:\n if self.isyms.find(arc.ilabel) == char:\n found = 1\n break\n if found == 0:\n self.add_arc(state.stateid, endstate, char)\n\n self[endstate].final = TropicalWeight(float('inf'))\n\n for char in alphabet:\n self.add_arc(endstate, endstate, char)", "docstring": "After pyfst minimization,\n all unused arcs are removed,\n and all sink states are removed.\n However this may break compatibility.\n\nArgs:\n alphabet (list): The input alphabet\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def parse_ensembl_gene_request(result):\n\n LOG.info(\"Parsing genes from request\")\n\n for index, row in result.iterrows():\n # print(index, row)\n ensembl_info = {}\n\n # Pandas represents missing data with nan which is a float\n if type(row['hgnc_symbol']) is float:\n # Skip genes without hgnc information\n continue\n\n ensembl_info['chrom'] = row['chromosome_name']\n ensembl_info['gene_start'] = int(row['start_position'])\n ensembl_info['gene_end'] = int(row['end_position'])\n ensembl_info['ensembl_gene_id'] = row['ensembl_gene_id']\n ensembl_info['hgnc_symbol'] = row['hgnc_symbol']\n\n hgnc_id = row['hgnc_id']\n\n if type(hgnc_id) is float:\n hgnc_id = int(hgnc_id)\n else:\n hgnc_id = int(hgnc_id.split(':')[-1])\n\n ensembl_info['hgnc_id'] = hgnc_id\n\n yield ensembl_info", "docstring": "Parse a dataframe with ensembl gene information\n\nArgs:\n res(pandas.DataFrame)\n\nYields:\n gene_info(dict)", "source": "juraj_google_style"} -{"code": "def _table_viewer(table, rows_per_page=25, fields=None):\n\n\n # TODO(gram): rework this to use google.datalab.utils.commands.chart_html\n\n if not table.exists():\n raise Exception('Table %s does not exist' % table.full_name)\n\n if not table.is_listable():\n return \"Done\"\n\n _HTML_TEMPLATE = u\n\n if fields is None:\n fields = google.datalab.utils.commands.get_field_list(fields, table.schema)\n div_id = google.datalab.utils.commands.Html.next_id()\n meta_count = ('rows: %d' % table.length) if table.length >= 0 else ''\n meta_name = table.full_name if table.job is None else ('job: %s' % table.job.id)\n if table.job:\n if table.job.cache_hit:\n meta_cost = 'cached'\n else:\n bytes = bigquery._query_stats.QueryStats._size_formatter(table.job.bytes_processed)\n meta_cost = '%s processed' % bytes\n meta_time = 'time: %.1fs' % table.job.total_time\n else:\n meta_cost = ''\n meta_time = ''\n\n data, total_count = google.datalab.utils.commands.get_data(table, fields, first_row=0,\n count=rows_per_page)\n\n if total_count < 0:\n # The table doesn't have a length metadata property but may still be small if we fetched less\n # rows than we asked for.\n fetched_count = len(data['rows'])\n if fetched_count < rows_per_page:\n total_count = fetched_count\n\n chart = 'table' if 0 <= total_count <= rows_per_page else 'paged_table'\n meta_entries = [meta_count, meta_time, meta_cost, meta_name]\n meta_data = '(%s)' % (', '.join([entry for entry in meta_entries if len(entry)]))\n\n return _HTML_TEMPLATE.format(div_id=div_id,\n static_table=google.datalab.utils.commands.HtmlBuilder\n .render_chart_data(data),\n meta_data=meta_data,\n chart_style=chart,\n source_index=google.datalab.utils.commands\n .get_data_source_index(table.full_name),\n fields=','.join(fields),\n total_rows=total_count,\n rows_per_page=rows_per_page,\n data=json.dumps(data, cls=google.datalab.utils.JSONEncoder))", "docstring": "Return a table viewer.\n\n This includes a static rendering of the first page of the table, that gets replaced\n by the charting code in environments where Javascript is executable and BQ is available.\n\nArgs:\n table: the table to view.\n rows_per_page: how many rows to display at one time.\n fields: an array of field names to display; default is None which uses the full schema.\n\nReturns:\n A string containing the HTML for the table viewer.", "source": "juraj_google_style"} -{"code": "def broadcast(self, gossip_message, message_type, exclude=None):\n\n with self._lock:\n if exclude is None:\n exclude = []\n for connection_id in self._peers.copy():\n if connection_id not in exclude and \\\n self._network.is_connection_handshake_complete(\n connection_id):\n self.send(\n message_type,\n gossip_message.SerializeToString(),\n connection_id,\n one_way=True)", "docstring": "Broadcast gossip messages.\n\n Broadcast the message to all peers unless they are in the excluded\n list.\n\nArgs:\n gossip_message: The message to be broadcast.\n message_type: Type of the message.\n exclude: A list of connection_ids that should be excluded from this\n broadcast.", "source": "juraj_google_style"} -{"code": "def from_statements(\n cls, sts: List[Influence], assign_default_polarities: bool = True\n ):\n\n\n _dict = {}\n for s in sts:\n if assign_default_polarities:\n for delta in deltas(s):\n if delta[\"polarity\"] is None:\n delta[\"polarity\"] = 1\n concepts = nameTuple(s)\n\n # Excluding self-loops for now:\n if concepts[0] != concepts[1]:\n if all(\n map(exists, (delta[\"polarity\"] for delta in deltas(s)))\n ):\n if concepts in _dict:\n _dict[concepts].append(s)\n else:\n _dict[concepts] = [s]\n\n edges = [\n (*concepts, {\"InfluenceStatements\": statements})\n for concepts, statements in _dict.items()\n ]\n return cls(edges)", "docstring": "Construct an AnalysisGraph object from a list of INDRA statements.\n Unknown polarities are set to positive by default.\n\nArgs:\n sts: A list of INDRA Statements\n\nReturns:\n An AnalysisGraph instance constructed from a list of INDRA\n statements.", "source": "juraj_google_style"} -{"code": "def get_num_days_required(offset, period='d', perc_required=0.90):\n\n x = pd.to_datetime('2010-01-01')\n delta = x - (x - offset)\n # convert to 'trading days' - rough guestimate\n days = delta.days * 0.69\n\n if period == 'd':\n req = days * perc_required\n elif period == 'm':\n req = (days / 20) * perc_required\n elif period == 'y':\n req = (days / 252) * perc_required\n else:\n raise NotImplementedError(\n 'period not supported. Supported periods are d, m, y')\n\n return req", "docstring": "Estimates the number of days required to assume that data is OK.\n\n Helper function used to determine if there are enough \"good\" data\n days over a given period.\n\nArgs:\n * offset (DateOffset): Offset (lookback) period.\n * period (str): Period string.\n * perc_required (float): percentage of number of days\n expected required.", "source": "juraj_google_style"} -{"code": "def erase(self):\n\n try:\n # This has to be in a try-catch, as the device may not be in a\n # state where it can halt, but we still want to try and erase.\n if not self.halted():\n self.halt()\n except errors.JLinkException:\n # Can't halt, so just continue to erasing.\n pass\n\n res = self._dll.JLINK_EraseChip()\n if res < 0:\n raise errors.JLinkEraseException(res)\n\n return res", "docstring": "Erases the flash contents of the device.\n\n This erases the flash memory of the target device. If this method\n fails, the device may be left in an inoperable state.\n\nArgs:\n self (JLink): the ``JLink`` instance\n\nReturns:\n Number of bytes erased.", "source": "juraj_google_style"} -{"code": "def write(self, data):\n\n start_time = time.time()\n self._get_write_buffer().write(data)\n ctx = context.get()\n operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx)\n operation.counters.Increment(\n COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(ctx)", "docstring": "Write data to the GoogleCloudStorage file.\n\nArgs:\n data: string containing the data to be written.", "source": "juraj_google_style"} -{"code": "def get_ilo_sso_url(self, ip=None):\n\n uri = \"{}/iloSsoUrl\".format(self.data[\"uri\"])\n\n if ip:\n uri = \"{}?ip={}\".format(uri, ip)\n\n return self._helper.do_get(uri)", "docstring": "Retrieves the URL to launch a Single Sign-On (SSO) session for the iLO web interface. If the server hardware is\n unsupported, the resulting URL will not use SSO and the iLO web interface will prompt for credentials.\n This is not supported on G7/iLO3 or earlier servers.\n\nArgs:\n ip: IP address or host name of the server's iLO management processor\n\nReturns:\n URL", "source": "juraj_google_style"} -{"code": "def BuildServiceStub(self, cls):\n\n\n def _ServiceStubInit(stub, rpc_channel):\n stub.rpc_channel = rpc_channel\n self.cls = cls\n cls.__init__ = _ServiceStubInit\n for method in self.descriptor.methods:\n setattr(cls, method.name, self._GenerateStubMethod(method))", "docstring": "Constructs the stub class.\n\nArgs:\n cls: The class that will be constructed.", "source": "juraj_google_style"} -{"code": "def _UpdateUserGroups(self, user, groups):\n\n groups = ','.join(groups)\n self.logger.debug('Updating user %s with groups %s.', user, groups)\n command = self.usermod_cmd.format(user=user, groups=groups)\n try:\n subprocess.check_call(command.split(' '))\n except subprocess.CalledProcessError as e:\n self.logger.warning('Could not update user %s. %s.', user, str(e))\n return False\n else:\n self.logger.debug('Updated user account %s.', user)\n return True", "docstring": "Update group membership for a Linux user.\n\nArgs:\n user: string, the name of the Linux user account.\n groups: list, the group names to add the user as a member.\n\nReturns:\n bool, True if user update succeeded.", "source": "juraj_google_style"} -{"code": "def __init__(self, wrapper):\n\n self._wrapper = wrapper\n self.make_request = self._wrapper.request_parking", "docstring": "Initialization of the API module.\n\nArgs:\n wrapper (Wrapper): Object that performs the requests to endpoints.", "source": "juraj_google_style"} -{"code": "def is_transcript_available(video_id, language_code=None):\n\n filter_attrs = {'video__edx_video_id': video_id}\n if language_code:\n filter_attrs['language_code'] = language_code\n\n transcript_set = VideoTranscript.objects.filter(**filter_attrs)\n return transcript_set.exists()", "docstring": "Returns whether the transcripts are available for a video.\n\nArgs:\n video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component.\n language_code: it will the language code of the requested transcript.", "source": "juraj_google_style"} -{"code": "def vgg13(pretrained=False, **kwargs):\n\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['B']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))\n return model", "docstring": "VGG 13-layer model (configuration \"B\")\n\nArgs:\n pretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "juraj_google_style"} -{"code": "def to_representation(self, instance):\n\n updated_program = copy.deepcopy(instance)\n enterprise_customer_catalog = self.context['enterprise_customer_catalog']\n updated_program['enrollment_url'] = enterprise_customer_catalog.get_program_enrollment_url(\n updated_program['uuid']\n )\n for course in updated_program['courses']:\n course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(course['key'])\n for course_run in course['course_runs']:\n course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(\n course_run['key']\n )\n return updated_program", "docstring": "Return the updated program data dictionary.\n\nArgs:\n instance (dict): The program data.\n\nReturns:\n dict: The updated program data.", "source": "juraj_google_style"} -{"code": "def combine(self, x):\n\n depth = tf.shape(x)[-1]\n x *= tf.expand_dims(self._nonpadding, -1)\n ret = tf.unsorted_segment_sum(\n x, self._flat_indices, num_segments=self._batch * self._length)\n ret = tf.reshape(ret, [self._batch, self._length, depth])\n return ret", "docstring": "Return the output from the experts.\n\n When one example goes to multiple experts, the outputs are summed.\n\nArgs:\n x: a Tensor with shape [batch, num_experts, expert_capacity, depth]\n\nReturns:\n a `Tensor` with shape `[batch, length, depth]", "source": "juraj_google_style"} -{"code": "def main_only_quicksetup_rootlogger(level: int = logging.DEBUG,\n with_process_id: bool = False,\n with_thread_id: bool = False) -> None:\n\n # Nasty. Only call from \"if __name__ == '__main__'\" clauses!\n rootlogger = logging.getLogger()\n configure_logger_for_colour(rootlogger, level, remove_existing=True,\n with_process_id=with_process_id,\n with_thread_id=with_thread_id)", "docstring": "Quick function to set up the root logger for colour.\n\n Should ONLY be called from the ``if __name__ == 'main'`` script;\n see https://docs.python.org/3.4/howto/logging.html#library-config.\n\nArgs:\n level: log level to set\n with_process_id: include the process ID in the logger's name?\n with_thread_id: include the thread ID in the logger's name?", "source": "juraj_google_style"} -{"code": "def start_vm(access_token, subscription_id, resource_group, vm_name):\n\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourceGroups/', resource_group,\n '/providers/Microsoft.Compute/virtualMachines/',\n vm_name,\n '/start',\n '?api-version=', COMP_API])\n return do_post(endpoint, '', access_token)", "docstring": "Start a virtual machine.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n vm_name (str): Name of the virtual machine.\n\nReturns:\n HTTP response.", "source": "juraj_google_style"} -{"code": "def create_model(self, ModelName, PrimaryContainer, *args, **kwargs): # pylint: disable=unused-argument\n\n LocalSagemakerClient._models[ModelName] = _LocalModel(ModelName, PrimaryContainer)", "docstring": "Create a Local Model Object\n\nArgs:\n ModelName (str): the Model Name\n PrimaryContainer (dict): a SageMaker primary container definition", "source": "juraj_google_style"} -{"code": "def deleted(self, main_type, sub_type, deleted_since, owner=None, filters=None, params=None):\n\n params = params or {}\n\n if filters and filters.filters:\n params['filters'] = filters.filters_string\n if owner:\n params['owner'] = owner\n if deleted_since:\n params['deleteSince'] = deleted_since\n\n if not sub_type:\n url = '/v2/{}/deleted'.format(main_type)\n else:\n url = '/v2/{}/{}/deleted'.format(main_type, sub_type)\n\n return self.tcex.session.get(url, params=params)", "docstring": "Args:\n owner:\n filters:\n main_type:\n sub_type:\n deleted_since:\n params:\n\n Return:", "source": "juraj_google_style"} -{"code": "def normalize_version(version):\n\n rv = []\n for x in version.split(\".\"):\n try:\n rv.append(int(x))\n except ValueError:\n for y in re.split(\"([0-9]+)\", x):\n if y == '':\n continue\n try:\n rv.append(int(y))\n except ValueError:\n rv.append(y)\n return rv", "docstring": "Helper function to normalize version.\n Returns a comparable object.\n\nArgs:\n version (str) version, e.g. \"0.1.0\"", "source": "juraj_google_style"} -{"code": "def build_chunk(oscillators):\n\n step_random_processes(oscillators)\n subchunks = []\n for osc in oscillators:\n osc.amplitude.step_amp()\n osc_chunk = osc.get_samples(config.CHUNK_SIZE)\n if osc_chunk is not None:\n subchunks.append(osc_chunk)\n if len(subchunks):\n new_chunk = sum(subchunks)\n else:\n new_chunk = numpy.zeros(config.CHUNK_SIZE)\n # If we exceed the maximum amplitude, handle it gracefully\n chunk_amplitude = amplitude.find_amplitude(new_chunk)\n if chunk_amplitude > config.MAX_AMPLITUDE:\n # Normalize the amplitude chunk to mitigate immediate clipping\n new_chunk = amplitude.normalize_amplitude(new_chunk,\n config.MAX_AMPLITUDE)\n # Pick some of the offending oscillators (and some random others)\n # and lower their drift targets\n avg_amp = (sum(osc.amplitude.value for osc in oscillators) /\n len(oscillators))\n for osc in oscillators:\n if (osc.amplitude.value > avg_amp and rand.prob_bool(0.1) or\n rand.prob_bool(0.01)):\n osc.amplitude.drift_target = rand.weighted_rand(\n [(-5, 1), (0, 10)])\n osc.amplitude.change_rate = rand.weighted_rand(\n osc.amplitude.change_rate_weights)\n return new_chunk.astype(config.SAMPLE_DATA_TYPE).tostring()", "docstring": "Build an audio chunk and progress the oscillator states.\n\nArgs:\n oscillators (list): A list of oscillator.Oscillator objects\n to build chunks from\n\nReturns:\n str: a string of audio sample bytes ready to be written to a wave file", "source": "juraj_google_style"} -{"code": "def get_by_block(self, block_number):\n\n blocklist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_BLOCK).snapshot()\n block_bytes = block_number.to_bytes(4, 'little')\n results = []\n for val in blocklist_snapshot.iterator(prefix=block_bytes, include_key=False):\n event = SmartContractEvent.FromByteArray(val)\n results.append(event)\n\n return results", "docstring": "Look up notifications for a block\n\nArgs:\n block_number (int): height of block to search for notifications\n\nReturns:\n list: a list of notifications", "source": "juraj_google_style"} -{"code": "def info(self, collector_id):\n\n cid = self.collector_id\n if collector_id:\n cid = collector_id\n\n url = '{0}/{1}'.format(self.url, cid)\n request = requests.get(url, auth=self.auth)\n return request.json()", "docstring": "Return a dict of collector.\n\nArgs:\n collector_id (int): id of collector (optional)", "source": "juraj_google_style"} -{"code": "def if_sqlserver_disable_constraints(session: SqlASession,\n tablename: str) -> None:\n # noqa\n engine = get_engine_from_session(session)\n if is_sqlserver(engine):\n quoted_tablename = quote_identifier(tablename, engine)\n session.execute(\n \"ALTER TABLE {} NOCHECK CONSTRAINT all\".format(\n quoted_tablename))\n yield\n session.execute(\n \"ALTER TABLE {} WITH CHECK CHECK CONSTRAINT all\".format(\n quoted_tablename))\n else:\n yield", "docstring": "If we're running under SQL Server, disable constraint checking for the\n specified table while the resource is held.\n\nArgs:\n session: SQLAlchemy :class:`Session`\n tablename: table name\n\n See\n https://stackoverflow.com/questions/123558/sql-server-2005-t-sql-to-temporarily-disable-a-trigger", "source": "juraj_google_style"} -{"code": "def is_dsub_operation(op):\n\n if not is_pipeline(op):\n return False\n\n for name in ['dsub-version', 'job-id', 'job-name', 'user-id']:\n if not get_label(op, name):\n return False\n\n return True", "docstring": "Determine if a pipelines operation is a dsub request.\n\n We don't have a rigorous way to identify an operation as being submitted\n by dsub. Our best option is to check for certain fields that have always\n been part of dsub operations.\n\n - labels: job-id, job-name, and user-id have always existed. The dsub-version\n label has always existed for the google-v2 provider.\n\nArgs:\n op: a pipelines operation.\n\nReturns:\n Boolean, true if the pipeline run was generated by dsub.", "source": "juraj_google_style"} -{"code": "def get_equivalent_kpoints(self, index):\n\n # if the kpoint has no label it can\"t have a repetition along the band\n # structure line object\n\n if self.kpoints[index].label is None:\n return [index]\n\n list_index_kpoints = []\n for i in range(len(self.kpoints)):\n if self.kpoints[i].label == self.kpoints[index].label:\n list_index_kpoints.append(i)\n\n return list_index_kpoints", "docstring": "Returns the list of kpoint indices equivalent (meaning they are the\n same frac coords) to the given one.\n\nArgs:\n index: the kpoint index\n\nReturns:\n a list of equivalent indices\n\n TODO: now it uses the label we might want to use coordinates instead\n (in case there was a mislabel)", "source": "juraj_google_style"} -{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n\n local_buffer = utils.BytearrayStream()\n\n if self._unique_identifier:\n self._unique_identifier.write(\n local_buffer,\n kmip_version=kmip_version\n )\n else:\n raise exceptions.InvalidField(\n \"The GetAttributes response payload is missing the unique \"\n \"identifier field.\"\n )\n\n if kmip_version < enums.KMIPVersion.KMIP_2_0:\n for attribute in self._attributes:\n attribute.write(local_buffer, kmip_version=kmip_version)\n else:\n if self._attributes:\n # TODO (ph) Add a new utility to avoid using TemplateAttributes\n template_attribute = objects.TemplateAttribute(\n attributes=self.attributes\n )\n attributes = objects.convert_template_attribute_to_attributes(\n template_attribute\n )\n attributes.write(local_buffer, kmip_version=kmip_version)\n else:\n raise exceptions.InvalidField(\n \"The GetAttributes response payload is missing the \"\n \"attributes list.\"\n )\n\n self.length = local_buffer.length()\n super(GetAttributesResponsePayload, self).write(\n output_buffer,\n kmip_version=kmip_version\n )\n output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the GetAttributes response payload to a\n stream.\n\nArgs:\n output_buffer (stream): A data stream in which to encode object\n data, supporting a write method; usually a BytearrayStream\n object.\n kmip_version (KMIPVersion): An enumeration defining the KMIP\n version with which the object will be encoded. Optional,\n defaults to KMIP 1.0.", "source": "juraj_google_style"} -{"code": "def device_set(self, device):\n\n\n\t\tif device['id'].startswith('0x'):\n\t\t\tself.device_id = device['id'][2:]\n\t\telif device['id'].startswith('ios:'):\n\t\t\tself.device_id = device['id'].replace(':', '')\n\t\telse:\n\t\t\tself.device_id = device['id']", "docstring": "Set device used by :class:`MobileClient` instance.\n\n Parameters:\n device (dict): A device dict as returned by :meth:`devices`.", "source": "juraj_google_style"} -{"code": "def unpack_rpc_payload(resp_format, payload):\n\n\n code = _create_argcode(resp_format, payload)\n return struct.unpack(code, payload)", "docstring": "Unpack an RPC payload according to resp_format.\n\nArgs:\n resp_format (str): a struct format code (without the <) for the\n parameter format for this RPC. This format code may include the final\n character V, which means that it expects a variable length bytearray.\n payload (bytes): The binary payload that should be unpacked.\n\nReturns:\n list: A list of the unpacked payload items.", "source": "juraj_google_style"} -{"code": "def get_factors_iterative2(n):\n\n\n ans, stack, x = [], [], 2\n while True:\n if x > n // x:\n if not stack:\n return ans\n ans.append(stack + [n])\n x = stack.pop()\n n *= x\n x += 1\n elif n % x == 0:\n stack.append(x)\n n //= x\n else:\n x += 1", "docstring": "[summary]\n analog as above\n\nArgs:\n n {[int]} -- [description]\n\nReturns:\n [list of lists] -- [all factors of n]", "source": "juraj_google_style"} -{"code": "def _Matches(path, pattern_list):\n\n # Note: This code does not scale to large pattern_list sizes.\n return any(fnmatch.fnmatchcase(path, pattern) for pattern in pattern_list)", "docstring": "Returns true if path matches any patten found in pattern_list.\n\nArgs:\n path: A dot separated path to a package, class, method or variable\n pattern_list: A list of wildcard patterns\n\nReturns:\n True if path matches any wildcard found in pattern_list.", "source": "juraj_google_style"} -{"code": "def GetHasher(cls, hasher_name):\n\n hasher_name = hasher_name.lower()\n if hasher_name not in cls._hasher_classes:\n raise KeyError(\n 'hasher class not set for name: {0:s}.'.format(hasher_name))\n\n hasher_class = cls._hasher_classes[hasher_name]\n return hasher_class()", "docstring": "Retrieves an instance of a specific hasher.\n\nArgs:\n hasher_name (str): the name of the hasher to retrieve.\n\nReturns:\n BaseHasher: hasher.\n\nRaises:\n KeyError: if hasher class is not set for the corresponding name.", "source": "juraj_google_style"} -{"code": "def __init__(self, alts_list):\n\n self.alts = alts_list\n self.alts_set = set(alts_list)\n self.m = len(alts_list)\n if len(self.alts) != len(self.alts_set):\n raise ValueError(\"Alternatives must not contain duplicates\")\n self.alts_to_ranks = None # Maps alternatives to ranking (projective)\n self.ranks_to_alts = None", "docstring": "Description:\n Initializes the aggregator with the set of alternatives\n and the number of candidates\n Parameters:\n alts_list: the set of integer alternatives (a.k.a candidates)", "source": "juraj_google_style"} -{"code": "def url_to_fn(url):\n\n url = url.replace(\"http://\", \"\").replace(\"https://\", \"\")\n url = url.split(\"?\")[0]\n\n return url.replace(\"%\", \"_\").replace(\"/\", \"_\")", "docstring": "Convert `url` to filename used to download the datasets.\n\n ``http://kitakitsune.org/xe`` -> ``kitakitsune.org_xe``.\n\nArgs:\n url (str): URL of the resource.\n\nReturns:\n str: Normalized URL.", "source": "juraj_google_style"} -{"code": "def setup(app):\n\n\n for name, (default, rebuild, _) in ref.CONFIG_VALUES.iteritems():\n app.add_config_value(name, default, rebuild)\n\n app.add_directive('javaimport', ref.JavarefImportDirective)\n app.add_role('javaref', ref.JavarefRole(app))\n\n app.connect('builder-inited', initialize_env)\n app.connect('env-purge-doc', ref.purge_imports)\n app.connect('env-merge-info', ref.merge_imports)\n app.connect('build-finished', ref.cleanup)", "docstring": "Register the extension with Sphinx.\n\nArgs:\n app: The Sphinx application.", "source": "juraj_google_style"} -{"code": "def __init__(self, choices=None, validator=None, **kwargs):\n\n self.choices = choices\n subvalidator = validator or String()\n self.validator = List(validator=subvalidator)\n\n # Check the choices match the validator\n for choice in self.choices:\n subvalidator.Validate(choice)\n super(MultiChoice, self).__init__(**kwargs)", "docstring": "Create a multichoice object and validate choices.\n\nArgs:\n choices: list of available choices\n validator: validator to use for each of the list *items* the validator for\n the top level is a list.\n **kwargs: passed through to parent class.", "source": "juraj_google_style"} -{"code": "def consult_robots_txt(self, request: HTTPRequest) -> bool:\n\n if not self._robots_txt_checker:\n return True\n\n result = yield from self._robots_txt_checker.can_fetch(request)\n return result", "docstring": "Consult by fetching robots.txt as needed.\n\nArgs:\n request: The request to be made\n to get the file.\n\nReturns:\n True if can fetch\n\n Coroutine", "source": "juraj_google_style"} -{"code": "def from_json(cls, data):\n\n required_keys = ('location', 'design_days')\n for key in required_keys:\n assert key in data, 'Required key \"{}\" is missing!'.format(key)\n\n return cls(Location.from_json(data['location']),\n [DesignDay.from_json(des_day) for des_day in data['design_days']])", "docstring": "Create a DDY from a dictionary.\n\nArgs:\n data = {\n \"location\": ladybug Location schema,\n \"design_days\": [] // list of ladybug DesignDay schemas}", "source": "juraj_google_style"} -{"code": "def execute_command(self, command):\n\n logger.debug('Executing commands:\\n %s' % command)\n\n err_msg = 'Something happened when executing some commands on device'\n\n chan = self.ssh.get_transport().open_session()\n chan.settimeout(5)\n\n chan.exec_command(command)\n\n error_chan = chan.makefile_stderr()\n output_chan = chan.makefile()\n\n error = ''\n output = ''\n for e in error_chan.read():\n error = error + self._read_wrapper(e)\n for o in output_chan.read():\n output = output + self._read_wrapper(o)\n\n if len(error) > 0:\n msg = '%s %s:\\n%s\\n%s' % (err_msg, self.ssh.get_host_keys().keys()[0], command, error)\n logger.error(msg)\n raise exceptions.CommandExecutionException(msg)\n\n regex = re.compile('Command fail')\n if len(regex.findall(output)) > 0:\n msg = '%s %s:\\n%s\\n%s' % (err_msg, self.ssh.get_host_keys().keys()[0], command, output)\n logger.error(msg)\n raise exceptions.CommandExecutionException(msg)\n\n output = output.splitlines()\n\n # We look for the prompt and remove it\n i = 0\n for line in output:\n current_line = line.split('#')\n\n if len(current_line) > 1:\n output[i] = current_line[1]\n else:\n output[i] = current_line[0]\n i += 1\n\n return output[:-1]", "docstring": "This method will execute the commands on the device without as if you were just connected to it (it will not\n enter into any vdom). This method is not recommended unless you are 100% sure of what you are doing.\n\nArgs:\n * **command** (str) -- Command to execute.\n\nReturns:\n A list of strings containing the output.\n\nRaises:\n exceptions.CommandExecutionException -- If it detects any problem with the command.", "source": "juraj_google_style"} -{"code": "def insort_event_right(self, event, lo=0, hi=None):\n\n\n if lo < 0:\n raise ValueError('lo must be non-negative')\n if hi is None:\n hi = len(self.queue)\n while lo < hi:\n mid = (lo + hi) // 2\n if event[0] < self.queue[mid][0]:\n hi = mid\n else:\n lo = mid + 1\n self.queue.insert(lo, event)", "docstring": "Insert event in queue, and keep it sorted assuming queue is sorted.\n\n If event is already in queue, insert it to the right of the rightmost\n event (to keep FIFO order).\n\n Optional args lo (default 0) and hi (default len(a)) bound the\n slice of a to be searched.\n\nArgs:\n event: a (time in sec since unix epoch, callback, args, kwds) tuple.", "source": "juraj_google_style"} -{"code": "def rouge_l_fscore(predictions, labels, **unused_kwargs):\n\n outputs = tf.to_int32(tf.argmax(predictions, axis=-1))\n # Convert the outputs and labels to a [batch_size, input_length] tensor.\n outputs = tf.squeeze(outputs, axis=[-1, -2])\n labels = tf.squeeze(labels, axis=[-1, -2])\n rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),\n tf.float32)\n return rouge_l_f_score, tf.constant(1.0)", "docstring": "ROUGE scores computation between labels and predictions.\n\n This is an approximate ROUGE scoring method since we do not glue word pieces\n or decode the ids and tokenize the output.\n\nArgs:\n predictions: tensor, model predictions\n labels: tensor, gold output.\n\nReturns:\n rouge_l_fscore: approx rouge-l f1 score.", "source": "juraj_google_style"} -{"code": "def setNetworkName(self, networkName='GRL'):\n\n print '%s call setNetworkName' % self.port\n print networkName\n try:\n cmd = 'networkname %s' % networkName\n datasetCmd = 'dataset networkname %s' % networkName\n self.hasActiveDatasetToCommit = True\n return self.__sendCommand(cmd)[0] == 'Done' and self.__sendCommand(datasetCmd)[0] == 'Done'\n except Exception, e:\n ModuleHelper.WriteIntoDebugLogger(\"setNetworkName() Error: \" + str(e))", "docstring": "set Thread Network name\n\nArgs:\n networkName: the networkname string to be set\n\nReturns:\n True: successful to set the Thread Networkname\n False: fail to set the Thread Networkname", "source": "juraj_google_style"} -{"code": "def save(self, sess, save_path, timestep=None):\n\n\n if self._saver is None:\n raise TensorForceError(\"register_saver_ops should be called before save\")\n return self._saver.save(\n sess=sess,\n save_path=save_path,\n global_step=timestep,\n write_meta_graph=False,\n write_state=True, # Do we need this?\n )", "docstring": "Saves this component's managed variables.\n\nArgs:\n sess: The session for which to save the managed variables.\n save_path: The path to save data to.\n timestep: Optional, the timestep to append to the file name.\n\nReturns:\n Checkpoint path where the model was saved.", "source": "juraj_google_style"} -{"code": "def SubtractFromBalance(self, assetId, fixed8_val):\n\n found = False\n for key, balance in self.Balances.items():\n if key == assetId:\n self.Balances[assetId] = self.Balances[assetId] - fixed8_val\n found = True\n if not found:\n self.Balances[assetId] = fixed8_val * Fixed8(-1)", "docstring": "Subtract amount to the specified balance.\n\nArgs:\n assetId (UInt256):\n fixed8_val (Fixed8): amount to add.", "source": "juraj_google_style"} -{"code": "def get_default_connection_info(self, provider_name):\n\n provider = self._provider_client.get_by_name(provider_name)\n if provider:\n return provider['defaultConnectionInfo']\n else:\n return {}", "docstring": "Gets default connection info for a specific provider.\n\nArgs:\n provider_name: Name of the provider.\n\nReturns:\n dict: Default connection information.", "source": "juraj_google_style"} -{"code": "def _find_current_phase(self, global_step):\n\n epoch_size = sum(phase.steps for phase in self._phases)\n epoch = int(global_step // epoch_size)\n steps_in = global_step % epoch_size\n for phase in self._phases:\n if steps_in < phase.steps:\n return phase, epoch, steps_in\n steps_in -= phase.steps", "docstring": "Determine the current phase based on the global step.\n\n This ensures continuing the correct phase after restoring checkoints.\n\nArgs:\n global_step: The global number of steps performed across all phases.\n\nReturns:\n Tuple of phase object, epoch number, and phase steps within the epoch.", "source": "juraj_google_style"} -{"code": "def _safe_get(self, revision, key):\n\n if self.has_revision(revision):\n return self.raw_answers[revision].get(key)\n else:\n return None", "docstring": "Get an answer data (vote or rationale) by revision\n\nArgs:\n revision (int): the revision number for student answer, could be\n 0 (original) or 1 (revised)\n key (str); key for retrieve answer data, could be VOTE_KEY or\n RATIONALE_KEY\n\nReturns:\n the answer data or None if revision doesn't exists", "source": "juraj_google_style"} -{"code": "def parse(self, data, extent, desc_tag):\n # type: (bytes, int, UDFTag) -> None\n\n if self._initialized:\n raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor already initialized')\n\n (tag_unused, self.vol_desc_seqnum, self.part_flags, self.part_num,\n part_contents, part_contents_use, self.access_type,\n self.part_start_location, self.part_length, impl_ident,\n self.implementation_use, reserved_unused) = struct.unpack_from(self.FMT, data, 0)\n\n self.desc_tag = desc_tag\n\n self.part_contents = UDFEntityID()\n self.part_contents.parse(part_contents)\n if self.part_contents.identifier[:6] != b'+NSR02':\n raise pycdlibexception.PyCdlibInvalidISO(\"Partition Contents Identifier not '+NSR02'\")\n\n self.impl_ident = UDFEntityID()\n self.impl_ident.parse(impl_ident)\n\n self.part_contents_use = UDFPartitionHeaderDescriptor()\n self.part_contents_use.parse(part_contents_use)\n\n self.orig_extent_loc = extent\n\n self._initialized = True", "docstring": "Parse the passed in data into a UDF Partition Volume Descriptor.\n\n Parameters:\n data - The data to parse.\n extent - The extent that this descriptor currently lives at.\n desc_tag - A UDFTag object that represents the Descriptor Tag.\n\nReturns:\n Nothing.", "source": "juraj_google_style"} -{"code": "def to_hg_scheme_url(cls, url):\n\n regexes = cls._get_url_scheme_regexes()\n for scheme_key, pattern, regex in regexes:\n match = regex.match(url)\n if match is not None:\n groups = match.groups()\n if len(groups) == 2:\n return u''.join(\n scheme_key,\n '://',\n pattern.replace('{1}', groups[0]),\n groups[1])\n elif len(groups) == 1:\n return u''.join(\n scheme_key,\n '://',\n pattern,\n groups[0])", "docstring": "Convert a URL to local mercurial URL schemes\n\nArgs:\n url (str): URL to map to local mercurial URL schemes\n\n example::\n\n # schemes.gh = git://github.com/\n >> remote_url = git://github.com/westurner/dotfiles'\n >> to_hg_scheme_url(remote_url)\n << gh://westurner/dotfiles", "source": "juraj_google_style"} -{"code": "def transpose(self, name=None, activate_final=None):\n\n if name is None:\n name = self.module_name + \"_transpose\"\n if activate_final is None:\n activate_final = self.activate_final\n output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers]\n output_sizes.reverse()\n return MLP(\n name=name,\n output_sizes=output_sizes,\n activation=self.activation,\n activate_final=activate_final,\n initializers=self.initializers,\n partitioners=self.partitioners,\n regularizers=self.regularizers,\n use_bias=self.use_bias,\n use_dropout=self.use_dropout)", "docstring": "Returns transposed `MLP`.\n\nArgs:\n name: Optional string specifying the name of the transposed module. The\n default name is constructed by appending \"_transpose\"\n to `self.module_name`.\n activate_final: Optional boolean determining if the activation and batch\n normalization, if turned on, are applied to the final layer.\n\nReturns:\n Matching transposed `MLP` module.", "source": "juraj_google_style"} -{"code": "def d_grade_ipix(ipix, nside_in, nside_out, nest=False):\n\n\n if nside_in==nside_out: return ipix\n if not (nside_in > nside_out): \n raise ValueError(\"nside_out must be less than nside_in\")\n\n return hp.vec2pix(nside_out, *hp.pix2vec(nside_in, ipix, nest), nest=nest)", "docstring": "Return the indices of the super-pixels which contain each of the\n sub-pixels (nside_in > nside_out).\n\n Parameters:\n -----------\n ipix : index of the input subpixels\n nside_in : nside of the input subpix\n nside_out : nside of the desired superpixels\n\nReturns:\n --------\n ipix_out : superpixels for each subpixel", "source": "juraj_google_style"} -{"code": "def get_updates(\n self,\n display_all_distributions=False,\n verbose=False\n ): # pragma: no cover\n\n if verbose:\n logging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format='%(message)s',\n )\n logging.info('Checking installed packages for updates...')\n\n updates = self._get_environment_updates(\n display_all_distributions=display_all_distributions\n )\n\n if updates:\n for update in updates:\n logging.info(update)\n\n if updates and self._csv_file_name:\n self.write_updates_to_csv(updates)\n\n if updates and self._new_config:\n self.write_new_config(updates)\n\n return updates", "docstring": "When called, get the environment updates and write updates to a CSV\n file and if a new config has been provided, write a new configuration\n file.\n\nArgs:\n display_all_distributions (bool): Return distribution even if it is\n up-to-date.\n verbose (bool): If ``True``, log to terminal to terminal.", "source": "juraj_google_style"} -{"code": "def GetFileSystem(self, path_spec):\n\n identifier = self._GetFileSystemCacheIdentifier(path_spec)\n return self._file_system_cache.GetObject(identifier)", "docstring": "Retrieves a file system object defined by path specification.\n\nArgs:\n path_spec (PathSpec): path specification.\n\nReturns:\n FileSystem: a file system object or None if not cached.", "source": "juraj_google_style"} -{"code": "def __sendCommand(self, cmd):\n\n logging.info('%s: sendCommand[%s]', self.port, cmd)\n if self.logThreadStatus == self.logStatus['running']:\n self.logThreadStatus = self.logStatus['pauseReq']\n while self.logThreadStatus != self.logStatus['paused'] and self.logThreadStatus != self.logStatus['stop']:\n pass\n\n ssh_stdin = None\n ssh_stdout = None\n ssh_stderr = None\n try:\n # command retransmit times\n retry_times = 3\n while retry_times > 0:\n retry_times -= 1\n try:\n if self._is_net:\n ssh_stdin, ssh_stdout, ssh_stderr = self.handle.exec_command(cmd)\n else:\n self._sendline(cmd)\n self._expect(cmd)\n except Exception as e:\n logging.exception('%s: failed to send command[%s]: %s', self.port, cmd, str(e))\n if retry_times == 0:\n raise\n else:\n break\n\n line = None\n response = []\n retry_times = 20\n stdout_lines = []\n stderr_lines = []\n if self._is_net:\n stdout_lines = ssh_stdout.readlines()\n stderr_lines = ssh_stderr.readlines()\n if stderr_lines:\n for stderr_line in stderr_lines:\n if re.search(r'Not\\s+Found|failed\\s+with\\s+error', stderr_line.strip(), re.M | re.I):\n print \"Command failed:\" + stderr_line\n return 'Fail'\n print \"Got line: \" + stderr_line\n logging.info('%s: the read line is[%s]', self.port, stderr_line)\n response.append(str(stderr_line.strip()))\n elif stdout_lines:\n for stdout_line in stdout_lines:\n logging.info('%s: the read line is[%s]', self.port, stdout_line)\n if re.search(r'Not\\s+Found|failed\\s+with\\s+error', stdout_line.strip(), re.M | re.I):\n print \"Command failed\"\n return 'Fail'\n print \"Got line: \" + stdout_line\n logging.info('%s: send command[%s] done!', self.port, cmd)\n response.append(str(stdout_line.strip()))\n response.append(WPAN_CARRIER_PROMPT)\n return response\n else:\n while retry_times > 0:\n line = self._readline()\n print \"read line: %s\" % line\n logging.info('%s: the read line is[%s]', self.port, line)\n if line:\n response.append(line)\n if re.match(WPAN_CARRIER_PROMPT, line):\n break\n elif re.search(r'Not\\s+Found|failed\\s+with\\s+error', line, re.M | re.I):\n print \"Command failed\"\n return 'Fail'\n\n retry_times -= 1\n time.sleep(0.1)\n\n if retry_times == 0:\n raise Exception('%s: failed to find end of response' % self.port)\n logging.info('%s: send command[%s] done!', self.port, cmd)\n return response\n except Exception, e:\n ModuleHelper.WriteIntoDebugLogger('sendCommand() Error: ' + str(e))\n raise", "docstring": "send specific command to reference unit over serial port\n\nArgs:\n cmd: OpenThread_WpanCtl command string\n\nReturns:\n Fail: Failed to send the command to reference unit and parse it\n Value: successfully retrieve the desired value from reference unit\n Error: some errors occur, indicates by the followed specific error number", "source": "juraj_google_style"} -{"code": "def service_configuration_check(config):\n\n ipv4_enabled = config.getboolean('daemon', 'ipv4')\n ipv6_enabled = config.getboolean('daemon', 'ipv6')\n services = config.sections()\n # we don't need it during sanity check for services check\n services.remove('daemon')\n ip_prefixes = []\n\n for service in services:\n for option, getter in SERVICE_OPTIONS_TYPE.items():\n try:\n getattr(config, getter)(service, option)\n except configparser.NoOptionError as error:\n if option not in SERVICE_OPTIONAL_OPTIONS:\n raise ValueError(error)\n except configparser.Error as error:\n raise ValueError(error)\n except ValueError as exc:\n msg = (\"invalid data for '{opt}' option in service check \"\n \"{name}: {err}\"\n .format(opt=option, name=service, err=exc))\n raise ValueError(msg)\n\n if (config.get(service, 'on_disabled') != 'withdraw' and\n config.get(service, 'on_disabled') != 'advertise'):\n msg = (\"'on_disabled' option has invalid value ({val}) for \"\n \"service check {name}, 'on_disabled option should be set \"\n \"either to 'withdraw' or to 'advertise'\"\n .format(name=service,\n val=config.get(service, 'on_disabled')))\n raise ValueError(msg)\n\n ip_prefixes.append(config.get(service, 'ip_prefix'))\n\n if not valid_ip_prefix(config.get(service, 'ip_prefix')):\n msg = (\"invalid value ({val}) for 'ip_prefix' option in service \"\n \"check {name}. It should be an IP PREFIX in form of \"\n \"ip/prefixlen.\"\n .format(name=service, val=config.get(service, 'ip_prefix')))\n raise ValueError(msg)\n\n _ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix'))\n if not ipv6_enabled and _ip_prefix.version == 6:\n raise ValueError(\"IPv6 support is disabled in \"\n \"anycast-healthchecker while there is an IPv6 \"\n \"prefix configured for {name} service check\"\n .format(name=service))\n if not ipv4_enabled and _ip_prefix.version == 4:\n raise ValueError(\"IPv4 support is disabled in \"\n \"anycast-healthchecker while there is an IPv4 \"\n \"prefix configured for {name} service check\"\n .format(name=service))\n\n cmd = shlex.split(config.get(service, 'check_cmd'))\n try:\n proc = subprocess.Popen(cmd)\n proc.kill()\n except (OSError, subprocess.SubprocessError) as exc:\n msg = (\"failed to run check command '{cmd}' for service check \"\n \"{name}: {err}\"\n .format(name=service,\n cmd=config.get(service, 'check_cmd'),\n err=exc))\n raise ValueError(msg)\n\n occurrences_of_ip_prefixes = Counter(ip_prefixes)\n for ip_prefix, counter in occurrences_of_ip_prefixes.items():\n if counter > 1:\n raise ValueError(\"{ip} is used by {c} service checks\"\n .format(ip=ip_prefix, c=counter))", "docstring": "Perform a sanity check against options for each service check.\n\nArgs:\n config (obj): A configparser object which holds our configuration.\n\nReturns:\n None if all sanity checks are successfully passed otherwise raises a\n ValueError exception.", "source": "juraj_google_style"} -{"code": "def main(params=None):\n\n if params == None:\n parser = getParser()\n args = parser.parse_args(params)\n else:\n args = params\n\n print(general.title(banner.text))\n\n extraWords = args.extra_words\n\n try:\n if args.name == None and args.surname1 == None and args.surname2 == None and args.city == None and args.country == None and args.year == None:\n print(\"\\nCollecting information about the profile\")\n print(\"----------------------------------------\\n\")\n\n args.name = raw_input(general.emphasis(\"Insert a name: \".ljust(35, \" \"))).replace(' ','')\n args.surname1 = raw_input(general.emphasis(\"Insert the first surname: \".ljust(35, \" \"))).replace(' ','')\n args.surname2 = raw_input(general.emphasis(\"Insert the second surname: \".ljust(35, \" \"))).replace(' ','')\n args.year = raw_input(general.emphasis(\"Insert a year (e. g.: birthyear): \".ljust(35, \" \"))).replace(' ','')\n args.city = raw_input(general.emphasis(\"Insert a city: \".ljust(35, \" \"))).replace(' ','')\n args.country = raw_input(general.emphasis(\"Insert a country: \".ljust(35, \" \"))).replace(' ','')\n\n if args.extra_words == []:\n print(\"\\nAdditional transformations to be added\")\n print(\"--------------------------------------\\n\")\n inputText = raw_input(general.emphasis(\"Extra words to add (',' separated): \".ljust(35, \" \"))).replace(' ','')\n extraWords += inputText.lower().split(',')\n except KeyboardInterrupt:\n print(\"\\n\\nThe user manually aborted the program. Exiting...\")\n sys.exit(2)\n\n lista=[]\n\n print(\"\\nInput data:\")\n print(\"-----------\\n\")\n if args.name != \"\":\n print(\"Name: \".ljust(20, \" \") + args.name)\n if args.surname1 != \"\":\n print(\"First Surname: \".ljust(20, \" \") + args.surname1)\n if args.surname2 != \"\":\n print(\"Second Surname: \".ljust(20, \" \") + args.surname2)\n if args.year != \"\":\n print(\"Year: \".ljust(20, \" \") + args.year)\n if args.city != \"\":\n print(\"City: \".ljust(20, \" \") + args.city)\n if args.country != \"\":\n print(\"Country: \".ljust(20, \" \") + args.country)\n\n aliases = generate(\n name=args.name,\n surname1=args.surname1,\n surname2=args.surname2,\n city=args.city,\n country=args.country,\n year=args.year,\n useNumbers=args.numbers,\n useCommonWords=args.common_words,\n useLeet=args.leet,\n useLocales=args.locales,\n extraWords=extraWords\n )\n\n print(\"Writing the results onto the file:\\n\\t\" + general.emphasis(args.outputFile))\n\n oF=open(args.outputFile, \"w\")\n for l in aliases:\n oF.write(l+\"\\n\")\n oF.close()\n\n\n # Urging users to place an issue on Github...\n print(banner.footer)", "docstring": "Main function to launch alias_generator.\n\nArgs:\n -----\n params: A list with the parameters as grabbed by the terminal. It is\n None when this is called by an entry_point. If it is called by osrf\n the data is already parsed.", "source": "juraj_google_style"} -{"code": "def _entry_allocated_bitmap(self, entry_number):\n\n index, offset = divmod(entry_number, 8)\n return bool(self._bitmap[index] & (1 << offset))", "docstring": "Checks if a particular index is allocated.\n\nArgs:\n entry_number (int): Index to verify\n\nReturns:\n bool: True if it is allocated, False otherwise.", "source": "juraj_google_style"} -{"code": "def random_init_mapping(candidate_mapping):\n\n # if needed, a fixed seed could be passed here to generate same random (to help debugging)\n random.seed()\n matched_dict = {}\n result = []\n for c in candidate_mapping:\n candidates = list(c)\n if not candidates:\n # -1 indicates no possible mapping\n result.append(-1)\n continue\n found = False\n while candidates:\n # randomly generate an index in [0, length of candidates)\n rid = random.randint(0, len(candidates) - 1)\n candidate = candidates[rid]\n # check if it has already been matched\n if candidate in matched_dict:\n candidates.pop(rid)\n else:\n matched_dict[candidate] = 1\n result.append(candidate)\n found = True\n break\n if not found:\n result.append(-1)\n return result", "docstring": "Generate a random node mapping.\n\nArgs:\n candidate_mapping: candidate_mapping: candidate node match list\n\nReturns:\n randomly-generated node mapping between two AMRs", "source": "juraj_google_style"} -{"code": "def get_parameter_names(self, include_frozen=False):\n\n if include_frozen:\n return self.parameter_names\n return tuple(p\n for p, f in zip(self.parameter_names, self.unfrozen_mask)\n if f)", "docstring": "Get a list of the parameter names\n\nArgs:\n include_frozen (Optional[bool]): Should the frozen parameters be\n included in the returned value? (default: ``False``)", "source": "juraj_google_style"} -{"code": "def sign(self, message):\n\n message = _helpers._to_bytes(message, encoding='utf-8')\n return rsa.pkcs1.sign(message, self._key, 'SHA-256')", "docstring": "Signs a message.\n\nArgs:\n message: bytes, Message to be signed.\n\nReturns:\n string, The signature of the message for the given key.", "source": "juraj_google_style"} -{"code": "def filter_by_hoys(self, hoys):\n\n _moys = tuple(int(hour * 60) for hour in hoys)\n return self.filter_by_moys(_moys)", "docstring": "Filter the Data Collection based on an analysis period.\n\nArgs:\n hoys: A List of hours of the year 0..8759\n\nReturns:\n A new Data Collection with filtered data", "source": "juraj_google_style"} -{"code": "def actnorm_3d(name, x, logscale_factor=3.):\n\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n x = tf.unstack(x, axis=1)\n x_normed = []\n for ind, x_step in enumerate(x):\n x_step, _ = actnorm(\"actnorm_%d\" % ind, x_step,\n logscale_factor=logscale_factor)\n x_normed.append(x_step)\n return tf.stack(x_normed, axis=1), None", "docstring": "Applies actnorm to each time-step independently.\n\n There are a total of 2*n_channels*n_steps parameters learnt.\n\nArgs:\n name: variable scope.\n x: 5-D Tensor, (NTHWC)\n logscale_factor: Increases the learning rate of the scale by\n logscale_factor.\n\nReturns:\n x: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization.", "source": "juraj_google_style"} -{"code": "def rmod(self, other, axis=\"columns\", level=None, fill_value=None):\n\n return self._binary_op(\n \"rmod\", other, axis=axis, level=level, fill_value=fill_value\n )", "docstring": "Mod this DataFrame against another DataFrame/Series/scalar.\n\nArgs:\n other: The object to use to apply the div against this.\n axis: The axis to div over.\n level: The Multilevel index level to apply div over.\n fill_value: The value to fill NaNs with.\n\nReturns:\n A new DataFrame with the rdiv applied.", "source": "juraj_google_style"} -{"code": "def _get_fans(shape):\n r\n if len(shape) == 2:\n fan_in = shape[0]\n fan_out = shape[1]\n elif len(shape) == 4 or len(shape) == 5:\n # assuming convolution kernels (2D or 3D).\n kernel_size = np.prod(shape[:2])\n fan_in = shape[-2] * kernel_size\n fan_out = shape[-1] * kernel_size\n else:\n # no specific assumptions\n fan_in = np.sqrt(np.prod(shape))\n fan_out = np.sqrt(np.prod(shape))\n return fan_in, fan_out", "docstring": "r\"\"\"Returns the size of input dimension and output dimension, given `shape`.\n\nArgs:\n shape: A list of integers.\n\nReturns:\n fan_in: An int. The value of input dimension.\n fan_out: An int. The value of output dimension.", "source": "juraj_google_style"} -{"code": "def optional(self, value = None):\n\n\n\t\t# If there's no value, this is a getter\n\t\tif value is None:\n\t\t\treturn this._optional\n\n\t\t# Else, set the flag\n\t\telse:\n\t\t\tthis._optional = value and True or False", "docstring": "Optional\n\n Getter/Setter method for optional flag\n\nArgs:\n value (bool): If set, the method is a setter\n\nReturns:\n bool | None", "source": "juraj_google_style"} -{"code": "def sort_dependencies(self, image, dependencies=None):\n\n if dependencies is None:\n dependencies = OrderedDict() # using this as an ordered set - not storing any values\n\n if image in dependencies:\n return\n\n requires = self.ymldefs[image].get('requires', [])\n\n for dep in requires:\n self.sort_dependencies(dep, dependencies)\n\n dependencies[image] = None\n return dependencies.keys()", "docstring": "Topologically sort the docker commands by their requirements\n\nNote:\n Circular \"requires\" dependencies are assumed to have already been checked in\n get_external_base_image, they are not checked here\n\nArgs:\n image (str): process this docker image's dependencies\n dependencies (OrderedDict): running cache of sorted dependencies (ordered dict)\n\nReturns:\n List[str]: list of dependencies a topologically-sorted build order", "source": "juraj_google_style"} -{"code": "def _label_path_from_index(self, index):\n\n label_file = os.path.join(self.label_dir, index + self.label_extension)\n assert os.path.exists(label_file), 'Path does not exist: {}'.format(label_file)\n return label_file", "docstring": "given image index, find out annotation path\n\n Parameters:\n ----------\n index: int\n index of a specific image\n\nReturns:\n ----------\n full path of annotation file", "source": "juraj_google_style"} -{"code": "def __init__(self, linter_name, path, msg, line_nr=None, col=None):\n\n # Set all attributes in the constructor for convenience.\n # pylint: disable=too-many-arguments\n if line_nr:\n line_nr = int(line_nr)\n if col:\n col = int(col)\n self._linter_name = linter_name\n self.path = path\n self.line_nr = line_nr\n self.msg = msg\n self.col = col", "docstring": "Optionally set all attributes.\n\nArgs:\n path (str): Relative file path.\n line (int): Line number.\n msg (str): Explanation of what is wrong.\n col (int): Column where the problem begins.", "source": "juraj_google_style"} -{"code": "def _get(self, url, params=None):\n\n if not params:\n params = {}\n\n params.update({'login': self.login, 'key': self.key})\n\n response_json = requests.get(self.api_url + url, params).json()\n\n return self._process_response(response_json)", "docstring": "Used by every other method, it makes a GET request with the given params.\n\nArgs:\n url (str): relative path of a specific service (account_info, ...).\n params (:obj:`dict`, optional): contains parameters to be sent in the GET request.\n\nReturns:\n dict: results of the response of the GET request.", "source": "juraj_google_style"} -{"code": "def wv45(msg):\n\n d = hex2bin(data(msg))\n if d[12] == '0':\n return None\n\n ws = bin2int(d[13:15])\n return ws", "docstring": "Wake vortex.\n\nArgs:\n msg (String): 28 bytes hexadecimal message string\n\nReturns:\n int: Wake vortex level. 0=NIL, 1=Light, 2=Moderate, 3=Severe", "source": "juraj_google_style"} -{"code": "def additive_coupling(name, x, mid_channels=512, reverse=False,\n activation=\"relu\", dropout=0.0):\n\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n output_channels = common_layers.shape_list(x)[-1] // 2\n x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1)\n\n z1 = x1\n shift = conv_stack(\"nn\", x1, mid_channels, output_channels=output_channels,\n activation=activation, dropout=dropout)\n\n if not reverse:\n z2 = x2 + shift\n else:\n z2 = x2 - shift\n return tf.concat([z1, z2], axis=3), 0.0", "docstring": "Reversible additive coupling layer.\n\nArgs:\n name: variable scope.\n x: 4-D Tensor, shape=(NHWC).\n mid_channels: number of channels in the coupling layer.\n reverse: Forward or reverse operation.\n activation: \"relu\" or \"gatu\"\n dropout: default, 0.0\n\nReturns:\n output: 4-D Tensor, shape=(NHWC)\n objective: 0.0", "source": "juraj_google_style"} -{"code": "def read(self, filename, binary_mode=False, size=None, offset=None):\n\n s3 = boto3.resource(\"s3\")\n bucket, path = self.bucket_and_path(filename)\n args = {}\n endpoint = 0\n if size is not None or offset is not None:\n if offset is None:\n offset = 0\n endpoint = '' if size is None else (offset + size)\n args['Range'] = 'bytes={}-{}'.format(offset, endpoint)\n try:\n stream = s3.Object(bucket, path).get(**args)['Body'].read()\n except botocore.exceptions.ClientError as exc:\n if exc.response['Error']['Code'] == '416':\n if size is not None:\n # Asked for too much, so request just to the end. Do this\n # in a second request so we don't check length in all cases.\n client = boto3.client(\"s3\")\n obj = client.head_object(Bucket=bucket, Key=path)\n len = obj['ContentLength']\n endpoint = min(len, offset + size)\n if offset == endpoint:\n # Asked for no bytes, so just return empty\n stream = b''\n else:\n args['Range'] = 'bytes={}-{}'.format(offset, endpoint)\n stream = s3.Object(bucket, path).get(**args)['Body'].read()\n else:\n raise\n if binary_mode:\n return bytes(stream)\n else:\n return stream.decode('utf-8')", "docstring": "Reads contents of a file to a string.\n\nArgs:\n filename: string, a path\n binary_mode: bool, read as binary if True, otherwise text\n size: int, number of bytes or characters to read, otherwise\n read all the contents of the file from the offset\n offset: int, offset into file to read from, otherwise read\n from the very beginning\n\nReturns:\n Subset of the contents of the file as a string or bytes.", "source": "juraj_google_style"} -{"code": "def get_sites_in_sphere(self, pt, r):\n\n neighbors = []\n for site in self._sites:\n dist = site.distance_from_point(pt)\n if dist <= r:\n neighbors.append((site, dist))\n return neighbors", "docstring": "Find all sites within a sphere from a point.\n\nArgs:\n pt (3x1 array): Cartesian coordinates of center of sphere.\n r (float): Radius of sphere.\n\nReturns:\n [(site, dist) ...] since most of the time, subsequent processing\n requires the distance.", "source": "juraj_google_style"} -{"code": "def save(self, filename):\n\n handle = open(filename, 'wb')\n handle.write(P_LAY % self._code())\n handle.close()", "docstring": "Saves pipeline as a Python source code file.\n\nArgs:\n - filename(``path``) Path to save the pipeline source code.", "source": "juraj_google_style"} -{"code": "def __parse_entry(entry_line):\n\n if entry_line.startswith(\"!\"):\n entry_line = sub(r\"!\\w*?_\", '', entry_line)\n else:\n entry_line = entry_line.strip()[1:]\n try:\n entry_type, entry_name = [i.strip() for i in entry_line.split(\"=\", 1)]\n except ValueError:\n entry_type = [i.strip() for i in entry_line.split(\"=\", 1)][0]\n entry_name = ''\n return entry_type, entry_name", "docstring": "Parse the SOFT file entry name line that starts with '^', '!' or '#'.\n\nArgs:\n entry_line (:obj:`str`): Line from SOFT to be parsed.\n\nReturns:\n :obj:`2-tuple`: Type of entry, value of entry.", "source": "juraj_google_style"} -{"code": "def generate_csr(private_key_bytes, subject_name, fqdn_list):\n\n return (\n cryptography.x509.CertificateSigningRequestBuilder()\n .subject_name(subject_name)\n .add_extension(\n extension=cryptography.x509.SubjectAlternativeName(\n [cryptography.x509.DNSName(v) for v in fqdn_list]\n ),\n critical=False,\n )\n .sign(\n private_key=private_key_bytes,\n algorithm=cryptography.hazmat.primitives.hashes.SHA256(),\n backend=cryptography.hazmat.backends.default_backend(),\n )\n )", "docstring": "Generate a Certificate Signing Request (CSR).\n\nArgs:\n private_key_bytes: bytes\n Private key with which the CSR will be signed.\n\n subject_name: str\n Certificate Subject Name\n\n fqdn_list:\n List of Fully Qualified Domain Names (FQDN) and/or IP addresses for which\n this certificate will provide authentication.\n\n E.g.: ['my.membernode.org', '1.2.3.4']", "source": "juraj_google_style"} -{"code": "def _get_target_encoder(self, x, y):\n\n\n assert len(x) == len(y)\n\n # NaN cannot be used as a key for dict. So replace it with a random integer\n df = pd.DataFrame({y.name: y, x.name: x.fillna(NAN_INT)})\n return df.groupby(x.name)[y.name].mean().to_dict()", "docstring": "Return a mapping from categories to average target values.\n\nArgs:\n x (pandas.Series): a categorical column to encode.\n y (pandas.Series): the target column\n\nReturns:\n target_encoder (dict): mapping from categories to average target values", "source": "juraj_google_style"} -{"code": "def getsize(self, path):\n\n try:\n file_obj = self.filesystem.resolve(path)\n if (self.filesystem.ends_with_path_separator(path) and\n S_IFMT(file_obj.st_mode) != S_IFDIR):\n error_nr = (errno.EINVAL if self.filesystem.is_windows_fs\n else errno.ENOTDIR)\n self.filesystem.raise_os_error(error_nr, path)\n return file_obj.st_size\n except IOError as exc:\n raise os.error(exc.errno, exc.strerror)", "docstring": "Return the file object size in bytes.\n\nArgs:\n path: path to the file object.\n\nReturns:\n file size in bytes.", "source": "juraj_google_style"} -{"code": "def __init__(self, tokenizer=None, trie=None):\n\n pyee.EventEmitter.__init__(self)\n self.tokenizer = tokenizer or EnglishTokenizer()\n self.trie = trie or Trie()\n self.regular_expressions_entities = []\n self._regex_strings = set()\n self.tagger = EntityTagger(self.trie, self.tokenizer, self.regular_expressions_entities)\n self.intent_parsers = []", "docstring": "Initialize the IntentDeterminationEngine\n\nArgs:\n tokenizer(tokenizer) : tokenizer used to break up spoken text\n example EnglishTokenizer()\n trie(Trie): tree of matches to Entites", "source": "juraj_google_style"} -{"code": "def CallDhclient(\n interfaces, logger, dhclient_script=None):\n\n logger.info('Enabling the Ethernet interfaces %s.', interfaces)\n\n dhclient_command = ['dhclient']\n\n if dhclient_script and os.path.exists(dhclient_script):\n dhclient_command += ['-sf', dhclient_script]\n\n try:\n subprocess.check_call(dhclient_command + ['-x'] + interfaces)\n subprocess.check_call(dhclient_command + interfaces)\n except subprocess.CalledProcessError:\n logger.warning('Could not enable interfaces %s.', interfaces)", "docstring": "Configure the network interfaces using dhclient.\n\nArgs:\n interfaces: list of string, the output device names to enable.\n logger: logger object, used to write to SysLog and serial port.\n dhclient_script: string, the path to a dhclient script used by dhclient.", "source": "juraj_google_style"} -{"code": "def get_consensus_module(module_name):\n\n module_package = module_name\n if module_name == 'genesis':\n module_package = (\n 'sawtooth_validator.journal.consensus.genesis.'\n 'genesis_consensus'\n )\n elif module_name == 'devmode':\n module_package = (\n 'sawtooth_validator.journal.consensus.dev_mode.'\n 'dev_mode_consensus'\n )\n\n try:\n return importlib.import_module(module_package)\n except ImportError:\n raise UnknownConsensusModuleError(\n 'Consensus module \"{}\" does not exist.'.format(module_name))", "docstring": "Returns a consensus module by name.\n\nArgs:\n module_name (str): The name of the module to load.\n\nReturns:\n module: The consensus module.\n\nRaises:\n UnknownConsensusModuleError: Raised if the given module_name does\n not correspond to a consensus implementation.", "source": "juraj_google_style"} -{"code": "def assert_visible(self, selector, testid=None, **kwargs):\n\n self.info_log(\n \"Assert visible selector(%s) testid(%s)\" % (selector, testid)\n )\n\n highlight = kwargs.get(\n 'highlight',\n BROME_CONFIG['highlight']['highlight_on_assertion_success']\n )\n self.debug_log(\"effective highlight: %s\" % highlight)\n\n wait_until_visible = kwargs.get(\n 'wait_until_visible',\n BROME_CONFIG['proxy_driver']['wait_until_visible_before_assert_visible'] # noqa\n )\n self.debug_log(\"effective wait_until_visible: %s\" % wait_until_visible)\n\n if wait_until_visible:\n self.wait_until_visible(selector, raise_exception=False)\n\n element = self.find(\n selector,\n raise_exception=False,\n wait_until_visible=False,\n wait_until_present=False\n )\n if element and element.is_displayed(raise_exception=False):\n if highlight:\n element.highlight(\n style=BROME_CONFIG['highlight']['style_on_assertion_success'] # noqa\n )\n if testid is not None:\n self.create_test_result(testid, True)\n\n return True\n else:\n if testid is not None:\n self.create_test_result(testid, False)\n\n return False", "docstring": "Assert that the element is visible in the dom\n\nArgs:\n selector (str): the selector used to find the element\n testid (str): the test_id or a str\n\n Kwargs:\n wait_until_visible (bool)\n highlight (bool)\n\nReturns:\n bool: True is the assertion succeed; False otherwise.", "source": "juraj_google_style"} -{"code": "def MakeSuiteFromHist(hist, name=None):\n\n if name is None:\n name = hist.name\n\n # make a copy of the dictionary\n d = dict(hist.GetDict())\n return MakeSuiteFromDict(d, name)", "docstring": "Makes a normalized suite from a Hist object.\n\nArgs:\n hist: Hist object\n name: string name\n\nReturns:\n Suite object", "source": "juraj_google_style"} -{"code": "def get_resource_url(cls, resource, base_url):\n\n if resource.Meta.resource_name:\n url = '{}/{}'.format(base_url, resource.Meta.resource_name)\n else:\n p = inflect.engine()\n plural_name = p.plural(resource.Meta.name.lower())\n url = '{}/{}'.format(base_url, plural_name)\n return cls._parse_url_and_validate(url)", "docstring": "Construct the URL for talking to this resource.\n\n i.e.:\n\n http://myapi.com/api/resource\n\n Note that this is NOT the method for calling individual instances i.e.\n\n http://myapi.com/api/resource/1\n\nArgs:\n resource: The resource class instance\n base_url: The Base URL of this API service.\n\nReturns:\n resource_url: The URL for this resource", "source": "juraj_google_style"} -{"code": "def uint8sc(im):\n\n im = np.asarray(im)\n immin = im.min()\n immax = im.max()\n imrange = immax - immin\n return cv2.convertScaleAbs(im - immin, alpha=255 / imrange)", "docstring": "Scale the image to uint8\n\n Parameters:\n -----------\n im: 2d array\n The image\n\nReturns:\n --------\n im: 2d array (dtype uint8)\n The scaled image to uint8", "source": "juraj_google_style"} -{"code": "def delete(self, uri, force=False, timeout=-1, custom_headers=None):\n\n if force:\n uri += '?force=True'\n\n logger.debug(\"Delete resource (uri = %s)\" % (str(uri)))\n\n task, body = self._connection.delete(uri, custom_headers=custom_headers)\n\n if not task:\n # 204 NO CONTENT\n # Successful return from a synchronous delete operation.\n return True\n\n task = self._task_monitor.wait_for_task(task, timeout=timeout)\n\n return task", "docstring": "Deletes current resource.\n\nArgs:\n force: Flag to delete the resource forcefully, default is False.\n timeout: Timeout in seconds.\n custom_headers: Allows to set custom http headers.", "source": "juraj_google_style"} -{"code": "def restore(self, state):\n\n self._clear()\n self._parseUserInfo({'labels': state['labels']})\n self._parseNodes(state['nodes'])\n self._keep_version = state['keep_version']", "docstring": "Unserialize saved note data.\n\nArgs:\n state (dict): Serialized state to load.", "source": "juraj_google_style"} -{"code": "def __init__(self, url_formatter, mapsources):\n\n super().__init__(url_formatter)\n self.map_folders = {\n root: {\n \"folders\": folders,\n \"maps\": maps\n } for root, folders, maps in walk_mapsources(mapsources)\n }\n self.add_maps(parent=self.kml_doc)", "docstring": "Create a KML master document.\n\nArgs:\n mapsources (list of MapSource):", "source": "juraj_google_style"} -{"code": "def get_variables(scope=None, suffix=None):\n\n candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]\n if suffix is not None:\n candidates = [var for var in candidates if var.op.name.endswith(suffix)]\n return candidates", "docstring": "Gets the list of variables, filtered by scope and/or suffix.\n\nArgs:\n scope: an optional scope for filtering the variables to return.\n suffix: an optional suffix for filtering the variables to return.\n\nReturns:\n a copied list of variables with scope and suffix.", "source": "juraj_google_style"} -{"code": "def search(self, search_phrase, limit=None):\n\n\n query, query_params = self._make_query_from_terms(search_phrase, limit=limit)\n\n self._parsed_query = (str(query), query_params)\n\n assert isinstance(query, TextClause)\n\n datasets = {}\n\n def make_result(vid=None, b_score=0, p_score=0):\n res = DatasetSearchResult()\n res.b_score = b_score\n res.p_score = p_score\n res.partitions = set()\n res.vid = vid\n return res\n\n if query_params:\n results = self.execute(query, **query_params)\n\n for result in results:\n vid, dataset_score = result\n\n datasets[vid] = make_result(vid, b_score=dataset_score)\n\n\n logger.debug('Extending datasets with partitions.')\n\n for partition in self.backend.partition_index.search(search_phrase):\n\n if partition.dataset_vid not in datasets:\n datasets[partition.dataset_vid] = make_result(partition.dataset_vid)\n\n datasets[partition.dataset_vid].p_score += partition.score\n datasets[partition.dataset_vid].partitions.add(partition)\n\n return list(datasets.values())", "docstring": "Finds datasets by search phrase.\n\nArgs:\n search_phrase (str or unicode):\n limit (int, optional): how many results to return. None means without limit.\n\nReturns:\n list of DatasetSearchResult instances.", "source": "juraj_google_style"} -{"code": "def compose(*funcs):\n\n if not funcs:\n return lambda *args: args[0] if args else None\n\n if len(funcs) == 1:\n return funcs[0]\n\n last = funcs[-1]\n rest = funcs[0:-1]\n return lambda *args: reduce(lambda ax, func: func(ax),\n reversed(rest), last(*args))", "docstring": "chained function composition wrapper\n\n creates function f, where f(x) = arg0(arg1(arg2(...argN(x))))\n\n if *funcs is empty, an identity function is returned.\n\nArgs:\n *funcs: list of functions to chain\n\nReturns:\n a new function composed of chained calls to *args", "source": "juraj_google_style"} -{"code": "def transform_string_replace(source, needle, haystack, name=None):\n\n\n with ops.name_scope(name, \"TransformStringReplace\", [source]):\n source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)\n if isinstance(source, tf.SparseTensor):\n result = tf.SparseTensor(\n indices=source.indices,\n values=ops_module.transform_string_replace(source.values, needle, haystack),\n dense_shape=source.dense_shape\n )\n else:\n result = ops_module.transform_string_replace(source, needle, haystack)\n\n return result", "docstring": "Replace all substrings from `needle` to corresponding strings in `haystack` with source.\n\nArgs:\n source: `Tensor` or `SparseTensor` of any shape, source strings for replacing.\n needle: List of strings to search in source\n haystack: List of strings to replace with. Should have same length as `needle`.\n name: A name for the operation (optional).\n\nReturns:\n `Tensor` or `SparseTensor` of same shape and size as input.", "source": "juraj_google_style"} -{"code": "def DeleteRecords(cls, ids, token):\n\n with data_store.DB.GetMutationPool() as mutation_pool:\n mutation_pool.QueueDeleteRecords(ids)", "docstring": "Delete records identified by ids.\n\nArgs:\n ids: A list of ids provided by ClaimRecords.\n token: The database access token to delete with.\n\nRaises:\n LockError: If the queue is not locked.", "source": "juraj_google_style"} -{"code": "def join_dags(self, names=None):\n\n return self._client.send(\n Request(\n action='join_dags',\n payload={'names': names}\n )\n ).success", "docstring": "Wait for the specified dags to terminate.\n\n This function blocks until the specified dags terminate. If no dags are specified\n wait for all dags of the workflow, except the dag of the task calling this signal,\n to terminate.\n\nArgs:\n names (list): The names of the dags that have to terminate.\n\nReturns:\n bool: True if all the signal was sent successfully.", "source": "juraj_google_style"} -{"code": "def AddSerializedFile(self, serialized_file_desc_proto):\n\n\n # pylint: disable=g-import-not-at-top\n from google.protobuf import descriptor_pb2\n file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(\n serialized_file_desc_proto)\n self.Add(file_desc_proto)", "docstring": "Adds the FileDescriptorProto and its types to this pool.\n\nArgs:\n serialized_file_desc_proto: A bytes string, serialization of the\n FileDescriptorProto to add.", "source": "juraj_google_style"} -{"code": "def _comparison_functions(cls, partial=False):\n\n\n def prerelease_cmp(a, b):\n\n if a and b:\n return identifier_list_cmp(a, b)\n elif a:\n # Versions with prerelease field have lower precedence\n return -1\n elif b:\n return 1\n else:\n return 0\n\n def build_cmp(a, b):\n\n if a == b:\n return 0\n else:\n return NotImplemented\n\n def make_optional(orig_cmp_fun):\n\n @functools.wraps(orig_cmp_fun)\n def alt_cmp_fun(a, b):\n if a is None or b is None:\n return 0\n return orig_cmp_fun(a, b)\n\n return alt_cmp_fun\n\n if partial:\n return [\n base_cmp, # Major is still mandatory\n make_optional(base_cmp),\n make_optional(base_cmp),\n make_optional(prerelease_cmp),\n make_optional(build_cmp),\n ]\n else:\n return [\n base_cmp,\n base_cmp,\n base_cmp,\n prerelease_cmp,\n build_cmp,\n ]", "docstring": "Retrieve comparison methods to apply on version components.\n\n This is a private API.\n\nArgs:\n partial (bool): whether to provide 'partial' or 'strict' matching.\n\nReturns:\n 5-tuple of cmp-like functions.", "source": "juraj_google_style"} -{"code": "def _create_controller_info_record(self, controller_module_name):\n\n module = self._controller_modules[controller_module_name]\n controller_info = None\n try:\n controller_info = module.get_info(\n copy.copy(self._controller_objects[controller_module_name]))\n except AttributeError:\n logging.warning('No optional debug info found for controller '\n '%s. To provide it, implement `get_info`.',\n controller_module_name)\n try:\n yaml.dump(controller_info)\n except TypeError:\n logging.warning('The info of controller %s in class \"%s\" is not '\n 'YAML serializable! Coercing it to string.',\n controller_module_name, self._class_name)\n controller_info = str(controller_info)\n return records.ControllerInfoRecord(\n self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME,\n controller_info)", "docstring": "Creates controller info record for a particular controller type.\n\n Info is retrieved from all the controller objects spawned from the\n specified module, using the controller module's `get_info` function.\n\nArgs:\n controller_module_name: string, the name of the controller module\n to retrieve info from.\n\nReturns:\n A records.ControllerInfoRecord object.", "source": "juraj_google_style"} -{"code": "def populate_settings_dir(force: bool = False) -> bool:\n\n res = False\n if _default_settings_path == _settings_path:\n return res\n\n for src in list(_default_settings_path.glob('**/*.json')):\n dest = _settings_path / src.relative_to(_default_settings_path)\n if not force and dest.exists():\n continue\n res = True\n dest.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(src, dest)\n return res", "docstring": "Populate settings directory with default settings files\n\nArgs:\n force: if ``True``, replace existing settings files with default ones\n\nReturns:\n ``True`` if any files were copied and ``False`` otherwise", "source": "juraj_google_style"} -{"code": "def upload_marcxml(self, marcxml, mode):\n\n if mode not in [\"-i\", \"-r\", \"-c\", \"-a\", \"-ir\"]:\n raise NameError, \"Incorrect mode \" + str(mode)\n\n # Are we running locally? If so, submit directly\n if self.local:\n (code, marcxml_filepath) = tempfile.mkstemp(prefix=\"upload_%s\" % \\\n time.strftime(\"%Y%m%d_%H%M%S_\",\n time.localtime()))\n marcxml_file_d = os.fdopen(code, \"w\")\n marcxml_file_d.write(marcxml)\n marcxml_file_d.close()\n return task_low_level_submission(\"bibupload\", \"\", mode, marcxml_filepath)\n else:\n params = urllib.urlencode({'file': marcxml,\n 'mode': mode})\n ## We don't use self.browser as batchuploader is protected by IP\n opener = urllib2.build_opener()\n opener.addheaders = [('User-Agent', CFG_USER_AGENT)]\n return opener.open(self.server_url + \"/batchuploader/robotupload\", params,)", "docstring": "Uploads a record to the server\n\n Parameters:\n marcxml - *str* the XML to upload.\n mode - *str* the mode to use for the upload.\n \"-i\" insert new records\n \"-r\" replace existing records\n \"-c\" correct fields of records\n \"-a\" append fields to records\n \"-ir\" insert record or replace if it exists", "source": "juraj_google_style"} -{"code": "def __init__(self, cookie_identifier):\n\n data_type = '{0:s}:{1:s}'.format(self.DATA_TYPE, cookie_identifier)\n\n super(GoogleAnalyticsEventData, self).__init__(data_type=data_type)\n self.cookie_name = None\n self.domain_hash = None\n self.pages_viewed = None\n self.sessions = None\n self.sources = None\n self.url = None\n self.visitor_id = None", "docstring": "Initializes event data.\n\nArgs:\n cookie_identifier (str): unique identifier of the cookie.", "source": "juraj_google_style"} -{"code": "def union(self, other):\n\n operation = bool.__or__\n self.cross_product(other, operation)\n return self", "docstring": "Constructs an unminimized DFA recognizing the union of the languages of two given DFAs.\n\nArgs:\n other (DFA): The other DFA that will be used\n for the union operation\n\nReturns:\n DFA: The resulting DFA", "source": "juraj_google_style"} -{"code": "def __init__(self, dir=dir, size=10000, upstream=None, **kwargs):\n\n\n from ambry.dbexceptions import ConfigurationError\n\n super(FsLimitedCache, self).__init__(dir, upstream=upstream, **kwargs)\n\n self._size = size\n self.maxsize = int(size) * 1048578 # size in MB\n\n self.readonly = False\n self.usreadonly = False\n self._database = None\n\n self.use_db = True\n\n if not os.path.isdir(self.cache_dir):\n os.makedirs(self.cache_dir)\n\n if not os.path.isdir(self.cache_dir):\n raise ConfigurationError(\n \"Cache dir '{}' is not valid\".format(\n self.cache_dir))", "docstring": "Init a new FileSystem Cache\n\nArgs:\n cache_dir\n maxsize. Maximum size of the cache, in GB", "source": "juraj_google_style"} -{"code": "def __init__(self, channel):\n\n self.MemberAdd = channel.unary_unary(\n '/etcdserverpb.Cluster/MemberAdd',\n request_serializer=rpc__pb2.MemberAddRequest.SerializeToString,\n response_deserializer=rpc__pb2.MemberAddResponse.FromString,\n )\n self.MemberRemove = channel.unary_unary(\n '/etcdserverpb.Cluster/MemberRemove',\n request_serializer=rpc__pb2.MemberRemoveRequest.SerializeToString,\n response_deserializer=rpc__pb2.MemberRemoveResponse.FromString,\n )\n self.MemberUpdate = channel.unary_unary(\n '/etcdserverpb.Cluster/MemberUpdate',\n request_serializer=rpc__pb2.MemberUpdateRequest.SerializeToString,\n response_deserializer=rpc__pb2.MemberUpdateResponse.FromString,\n )\n self.MemberList = channel.unary_unary(\n '/etcdserverpb.Cluster/MemberList',\n request_serializer=rpc__pb2.MemberListRequest.SerializeToString,\n response_deserializer=rpc__pb2.MemberListResponse.FromString,\n )", "docstring": "Constructor.\n\nArgs:\n channel: A grpc.Channel.", "source": "juraj_google_style"} -{"code": "def make_tstore_conn(params, **kwargs):\n\n log.setLevel(params.get('log_level', __LOG_LEVEL__))\n log.debug(\"\\n%s\", params)\n params.update(kwargs)\n try:\n vendor = RdfwConnections['triplestore'][params.get('vendor')]\n except KeyError:\n vendor = RdfwConnections['triplestore']['blazegraph']\n conn = vendor(**params)\n return conn", "docstring": "Returns a triplestore connection\n\nArgs:\n attr_name: The name the connection will be assigned in the\n config manager\n params: The paramaters of the connection\n\n kwargs:\n log_level: logging level to use", "source": "juraj_google_style"} -{"code": "def _calculateCrcString(inputstring):\n\n _checkString(inputstring, description='input CRC string')\n\n # Preload a 16-bit register with ones\n register = 0xFFFF\n\n for char in inputstring:\n register = (register >> 8) ^ _CRC16TABLE[(register ^ ord(char)) & 0xFF]\n\n return _numToTwoByteString(register, LsbFirst=True)", "docstring": "Calculate CRC-16 for Modbus.\n\nArgs:\n inputstring (str): An arbitrary-length message (without the CRC).\n\nReturns:\n A two-byte CRC string, where the least significant byte is first.", "source": "juraj_google_style"} -{"code": "def __init__(self, name, context=None):\n\n super(Job, self).__init__(name)\n if context is None:\n context = datalab.Context.default()\n self._context = context\n self._api = discovery.build('ml', 'v1', credentials=self._context.credentials)\n if not name.startswith('projects/'):\n name = 'projects/' + self._context.project_id + '/jobs/' + name\n self._name = name\n self._refresh_state()", "docstring": "Initializes an instance of a CloudML Job.\n\nArgs:\n name: the name of the job. It can be an operation full name\n (\"projects/[project_id]/jobs/[operation_name]\") or just [operation_name].\n context: an optional Context object providing project_id and credentials.", "source": "juraj_google_style"} -{"code": "def insert_arguments_into_query(compilation_result, arguments):\n\n _ensure_arguments_are_provided(compilation_result.input_metadata, arguments)\n\n if compilation_result.language == MATCH_LANGUAGE:\n return insert_arguments_into_match_query(compilation_result, arguments)\n elif compilation_result.language == GREMLIN_LANGUAGE:\n return insert_arguments_into_gremlin_query(compilation_result, arguments)\n elif compilation_result.language == SQL_LANGUAGE:\n return insert_arguments_into_sql_query(compilation_result, arguments)\n else:\n raise AssertionError(u'Unrecognized language in compilation result: '\n u'{}'.format(compilation_result))", "docstring": "Insert the arguments into the compiled GraphQL query to form a complete query.\n\nArgs:\n compilation_result: a CompilationResult object derived from the GraphQL compiler\n arguments: dict, mapping argument name to its value, for every parameter the query expects.\n\nReturns:\n string, a query in the appropriate output language, with inserted argument data", "source": "juraj_google_style"} -{"code": "def _faster_to_representation(self, instance):\n\n\n ret = {}\n fields = self._readable_fields\n\n is_fast = isinstance(instance, prefetch.FastObject)\n id_fields = self._readable_id_fields\n\n for field in fields:\n attribute = None\n\n # we exclude dynamic fields here because the proper fastquery\n # dereferencing happens in the `get_attribute` method now\n if (\n is_fast and\n not isinstance(\n field,\n (DynamicGenericRelationField, DynamicRelationField)\n )\n ):\n if field in id_fields and field.source not in instance:\n # TODO - make better.\n attribute = instance.get(field.source + '_id')\n ret[field.field_name] = attribute\n continue\n else:\n try:\n attribute = instance[field.source]\n except KeyError:\n # slower, but does more stuff\n # Also, some temp debugging\n if hasattr(instance, field.source):\n attribute = getattr(instance, field.source)\n else:\n # Fall back on DRF behavior\n attribute = field.get_attribute(instance)\n print(\n 'Missing %s from %s' % (\n field.field_name,\n self.__class__.__name__\n )\n )\n else:\n try:\n attribute = field.get_attribute(instance)\n except SkipField:\n continue\n\n if attribute is None:\n # We skip `to_representation` for `None` values so that\n # fields do not have to explicitly deal with that case.\n ret[field.field_name] = None\n else:\n ret[field.field_name] = field.to_representation(attribute)\n\n return ret", "docstring": "Modified to_representation with optimizations.\n\n 1) Returns a plain old dict as opposed to OrderedDict.\n (Constructing ordered dict is ~100x slower than `{}`.)\n 2) Ensure we use a cached list of fields\n (this optimization exists in DRF 3.2 but not 3.1)\n\nArgs:\n instance: a model instance or data object\n\nReturns:\n Dict of primitive datatypes.", "source": "juraj_google_style"} -{"code": "def FetchBlobsForSignedBinary(\n binary_urn,\n token = None\n):\n\n if _ShouldUseLegacyDatastore():\n try:\n aff4_stream = aff4.FACTORY.Open(\n binary_urn, aff4_type=collects.GRRSignedBlob, mode=\"r\", token=token)\n except aff4.InstantiationError:\n raise SignedBinaryNotFoundError(binary_urn)\n timestamp = aff4_stream.Get(aff4_stream.Schema.TYPE).age\n return (blob for blob in aff4_stream), timestamp\n else:\n try:\n references, timestamp = data_store.REL_DB.ReadSignedBinaryReferences(\n _SignedBinaryIDFromURN(binary_urn))\n except db.UnknownSignedBinaryError:\n raise SignedBinaryNotFoundError(binary_urn)\n blob_ids = [r.blob_id for r in references.items]\n raw_blobs = (data_store.BLOBS.ReadBlob(blob_id) for blob_id in blob_ids)\n blobs = (\n rdf_crypto.SignedBlob.FromSerializedString(raw_blob)\n for raw_blob in raw_blobs)\n return blobs, timestamp", "docstring": "Retrieves blobs for the given binary from the datastore.\n\nArgs:\n binary_urn: RDFURN that uniquely identifies the binary.\n token: ACL token to use with the legacy (non-relational) datastore.\n\nReturns:\n A tuple containing an iterator for all the binary's blobs and an\n RDFDatetime representing when the binary's contents were saved\n to the datastore.\n\nRaises:\n SignedBinaryNotFoundError: If no signed binary with the given URN exists.", "source": "juraj_google_style"} -{"code": "def __init__(self, job_context, shard_state):\n\n self.job_context = job_context\n self.id = shard_state.shard_id\n self.number = shard_state.shard_number\n self.attempt = shard_state.retries + 1\n self._state = shard_state", "docstring": "Init.\n\n The signature of __init__ is subject to change.\n\n Read only properties:\n job_context: JobContext object.\n id: str. of format job_id-shard_number.\n number: int. shard number. 0 indexed.\n attempt: int. The current attempt at executing this shard.\n Starting at 1.\n\nArgs:\n job_context: map_job.JobConfig.\n shard_state: model.ShardState.", "source": "juraj_google_style"} -{"code": "def coordinate_tensor(shape, axis):\n\n if axis < 0:\n axis = tf.size(shape) + axis # Convert to positive for the one_hot indice\n\n r = tf.range(shape[axis])\n r_shape = tf.one_hot(\n axis, tf.size(shape), on_value=-1, off_value=1, dtype=tf.int32)\n return tf.zeros(shape, dtype=tf.int32) + tf.reshape(r, r_shape)", "docstring": "Return a tensor with given shape containing coordinate along given axis.\n\nArgs:\n shape: a Tensor representing the shape of the output Tensor\n axis: an integer\n\nReturns:\n A tensor with shape shape and type tf.int32, where each elements its\n coordinate along the given axis.", "source": "juraj_google_style"} -{"code": "def GreaterThan(self, value):\n\n self._awql = self._CreateSingleValueCondition(value, '>')\n return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"greater than\".\n\nArgs:\n value: The value to be used in the WHERE condition.\n\nReturns:\n The query builder that this WHERE builder links to.", "source": "juraj_google_style"} -{"code": "def post_pipeline(self, pipeline):\n\n url = \"{0}/pipelines\".format(API_URL)\n\n if isinstance(pipeline, str):\n pipeline_json = pipeline\n else:\n pipeline_json = json.dumps(pipeline)\n\n pipeline_dict = json.loads(pipeline_json)\n\n self.log.debug('Pipeline JSON:\\n%s', pipeline_json)\n\n pipeline_response = requests.post(\n url, data=pipeline_json, headers=self.header, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n\n self.log.debug('Pipeline creation response:\\n%s', pipeline_response.text)\n\n if not pipeline_response.ok:\n raise SpinnakerPipelineCreationFailed('Pipeline for {0}: {1}'.format(self.app_name,\n pipeline_response.json()))\n\n self.log.info('Successfully created \"%s\" pipeline in application \"%s\".', pipeline_dict['name'],\n pipeline_dict['application'])", "docstring": "Send Pipeline JSON to Spinnaker.\n\nArgs:\n pipeline (json): json of the pipeline to be created in Spinnaker", "source": "juraj_google_style"} -{"code": "def cardinal(self, to):\n\n return sum(1 for _ in filter(\n lambda d: not d.external and d.target in to, self.dependencies))", "docstring": "Return the number of dependencies of this module to the given node.\n\nArgs:\n to (Package/Module): the target node.\n\nReturns:\n int: number of dependencies.", "source": "juraj_google_style"} -{"code": "def merge_all_models_into_first_model(biop_structure):\n\n from string import ascii_uppercase\n idx = 1\n first_model = biop_structure[0]\n\n for m in biop_structure.get_models():\n # Don't duplicate the original model\n if first_model.id == m.id:\n continue\n for c in m.get_chains():\n c.id = ascii_uppercase[idx]\n first_model.add(c)\n idx += 1", "docstring": "Merge all existing models into a Structure's first_model attribute.\n\n This directly modifies the Biopython Structure object. Chains IDs will start from A and increment for each new\n chain (model that is converted).\n\nArgs:\n biop_structure (Structure): Structure with multiple models that should be merged", "source": "juraj_google_style"} -{"code": "def incoming_edges(self, node):\n\n edges = self.edges()\n in_edges = []\n for out_node, in_node in edges:\n if node is in_node:\n in_edges.append((out_node, in_node))\n return tuple(in_edges)", "docstring": "Returns a ``tuple`` of incoming edges for a **node object**.\n\nArgs:\n - node(``object``) **node object** present in the graph to be queried\n for incoming edges.", "source": "juraj_google_style"} -{"code": "def _log_submission(submission, student_item):\n\n logger.info(\n u\"Created submission uuid={submission_uuid} for \"\n u\"(course_id={course_id}, item_id={item_id}, \"\n u\"anonymous_student_id={anonymous_student_id})\"\n .format(\n submission_uuid=submission[\"uuid\"],\n course_id=student_item[\"course_id\"],\n item_id=student_item[\"item_id\"],\n anonymous_student_id=student_item[\"student_id\"]\n )\n )", "docstring": "Log the creation of a submission.\n\nArgs:\n submission (dict): The serialized submission model.\n student_item (dict): The serialized student item model.\n\nReturns:\n None", "source": "juraj_google_style"} -{"code": "def _add_string_to_commastring(self, field, string):\n # type: (str, str) -> bool\n\n if string in self._get_stringlist_from_commastring(field):\n return False\n strings = '%s,%s' % (self.data.get(field, ''), string)\n if strings[0] == ',':\n strings = strings[1:]\n self.data[field] = strings\n return True", "docstring": "Add a string to a comma separated list of strings\n\nArgs:\n field (str): Field containing comma separated list\n string (str): String to add\n\nReturns:\n bool: True if string added or False if string already present", "source": "juraj_google_style"} -{"code": "def synthesize(self, duration, tick_frequency):\n\n sr = self.samplerate.samples_per_second\n # create a short, tick sound\n tick = np.random.uniform(low=-1., high=1., size=int(sr * .1))\n tick *= np.linspace(1, 0, len(tick))\n # create silence\n samples = np.zeros(int(sr * (duration / Seconds(1))))\n ticks_per_second = Seconds(1) / tick_frequency\n # introduce periodic ticking sound\n step = int(sr // ticks_per_second)\n for i in range(0, len(samples), step):\n size = len(samples[i:i + len(tick)])\n samples[i:i + len(tick)] += tick[:size]\n return AudioSamples(samples, self.samplerate)", "docstring": "Synthesize periodic \"ticks\", generated from white noise and an envelope\n\nArgs:\n duration (numpy.timedelta64): The total duration of the sound to be\n synthesized\n tick_frequency (numpy.timedelta64): The frequency of the ticking\n sound", "source": "juraj_google_style"} -{"code": "def to_df(self, **kwargs):\n\n\n return pd.read_sql(sql=self.statement, con=self.session.bind, **kwargs)", "docstring": "[pandas.read_sql]\n\nArgs:\n Query {[type]} -- [description]\n\nReturns:\n [pd.DataFrame or generate] -- [description]", "source": "juraj_google_style"} -{"code": "def __init__(self, values, mode='rgb', alpha=1.0, wref=_DEFAULT_WREF):\n\n if not(isinstance(values, tuple)):\n raise TypeError('values must be a tuple')\n\n if mode=='rgb':\n self.__rgb = values\n self.__hsl = Color.RgbToHsl(*values)\n elif mode=='hsl':\n self.__hsl = values\n self.__rgb = Color.HslToRgb(*values)\n else:\n raise ValueError('Invalid color mode: ' + mode)\n\n self.__a = alpha\n self.__wref = wref", "docstring": "Instantiate a new grapefruit.Color object.\n\n Parameters:\n :values:\n The values of this color, in the specified representation.\n :mode:\n The representation mode used for values.\n :alpha:\n the alpha value (transparency) of this color.\n :wref:\n The whitepoint reference, default is 2° D65.", "source": "juraj_google_style"} -{"code": "def __init__(self, protocol):\n\n self._protocol = protocol\n self._current_consumer = self._HEADER\n self._message = None\n self._buf_header = None", "docstring": "Configure a Receiver with a specific Bokeh protocol version.\n\nArgs:\n protocol (Protocol) :\n A Bokeh protocol object to use to assemble collected message\n fragments.", "source": "juraj_google_style"} -{"code": "def __init__(self, idx, name=\"select_input\"):\n\n super(SelectInput, self).__init__(name=name)\n self._check_type(idx)\n self._idx = idx", "docstring": "Module constructor.\n\nArgs:\n idx: Indexes of the tensors to select. If `idx` is an integer, then\n a `Tensor` is returned. If `idx` is a (nested) list/tuple, then a\n (nested) tuple of `Tensor` is returned.\n name: Name of the module.\n\nRaises:\n TypeError: If `idx` is not an list, tuple or integer.", "source": "juraj_google_style"} -{"code": "def get_drift_corrected_structures(self, start=None, stop=None, step=None):\n\n coords = np.array(self.structure.cart_coords)\n species = self.structure.species_and_occu\n lattices = self.lattices\n nsites, nsteps, dim = self.corrected_displacements.shape\n\n for i in range(start or 0, stop or nsteps, step or 1):\n latt = lattices[0] if len(lattices) == 1 else lattices[i]\n yield Structure(\n latt, species,\n coords + self.corrected_displacements[:, i, :],\n coords_are_cartesian=True)", "docstring": "Returns an iterator for the drift-corrected structures. Use of\n iterator is to reduce memory usage as # of structures in MD can be\n huge. You don't often need all the structures all at once.\n\nArgs:\n start, stop, step (int): applies a start/stop/step to the iterator.\n Faster than applying it after generation, as it reduces the\n number of structures created.", "source": "juraj_google_style"} -{"code": "def _modeIsValid(self, mode):\n\n try:\n # Suport for version 2 of wrappers\n return mode in self.modes.keys()\n except AttributeError as e:\n # Legacy for mantaining old wrappers\n if mode in self.isValidMode.keys():\n if mode in self.isValidMode.keys():\n return True\n return False", "docstring": "Verification of whether the mode is a correct option to be used.\n\nArgs:\n -----\n mode: Mode to be executed.\n\nReturns:\n -------\n True if the mode exists in the three main folders.", "source": "juraj_google_style"} -{"code": "def add_file_ident_desc(self, new_fi_desc, logical_block_size):\n # type: (UDFFileIdentifierDescriptor, int) -> int\n\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('UDF File Entry not initialized')\n\n if self.icb_tag.file_type != 4:\n raise pycdlibexception.PyCdlibInvalidInput('Can only add a UDF File Identifier to a directory')\n\n self.fi_descs.append(new_fi_desc)\n\n num_bytes_to_add = UDFFileIdentifierDescriptor.length(len(new_fi_desc.fi))\n\n old_num_extents = 0\n # If info_len is 0, then this is a brand-new File Entry, and thus the\n # number of extents it is using is 0.\n if self.info_len > 0:\n old_num_extents = utils.ceiling_div(self.info_len, logical_block_size)\n\n self.info_len += num_bytes_to_add\n new_num_extents = utils.ceiling_div(self.info_len, logical_block_size)\n\n self.log_block_recorded = new_num_extents\n\n self.alloc_descs[0][0] = self.info_len\n if new_fi_desc.is_dir():\n self.file_link_count += 1\n\n return new_num_extents - old_num_extents", "docstring": "A method to add a new UDF File Identifier Descriptor to this UDF File\n Entry.\n\n Parameters:\n new_fi_desc - The new UDF File Identifier Descriptor to add.\n logical_block_size - The logical block size to use.\n\nReturns:\n The number of extents added due to adding this File Identifier Descriptor.", "source": "juraj_google_style"} -{"code": "def __init__(self, tcex, domain, data_type, mapping=None):\n\n self.tcex = tcex\n self.domain = domain\n self.data_type = data_type\n self.mapping = mapping or {'dynamic': False}\n\n # properties\n\n # This module requires a token.\n if self.tcex.default_args.tc_token is None:\n raise RuntimeError(\n 'The DataModel TcEx Module requires a Token to interact with the '\n 'ThreatConnect platform.'\n )\n\n self._create_index() # create the initial index.\n self._update_mappings()", "docstring": "Initialize class properties.\n\nArgs:\n tcex ([type]): [description]\n domain (str): A value of “system”, “organization”, or “local”.\n data_type (str): A free form type name for the data.\n mapping (dict, optional): Defaults to None. Elasticsearch mappings data.\n\nRaises:\n RuntimeError: [description]", "source": "juraj_google_style"} -{"code": "def unescape(inp, quote='\"'):\n\n if len(inp) < 2:\n return inp\n\n output = \"\"\n unesc = False\n for act in inp:\n if act == quote and unesc:\n output = output[:-1]\n\n output += act\n\n if act == \"\\\\\":\n unesc = not unesc\n else:\n unesc = False\n\n return output", "docstring": "Unescape `quote` in string `inp`.\n\n Example usage::\n\n >> unescape('hello \\\\\"')\n 'hello \"'\n\nArgs:\n inp (str): String in which `quote` will be unescaped.\n quote (char, default \"): Specify which character will be unescaped.\n\nReturns:\n str: Unescaped string.", "source": "juraj_google_style"} -{"code": "def list_nics(access_token, subscription_id):\n\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/providers/Microsoft.Network/',\n '/networkInterfaces?api-version=', NETWORK_API])\n return do_get(endpoint, access_token)", "docstring": "List the network interfaces in a subscription.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n\nReturns:\n HTTP response. JSON body of NICs list with properties.", "source": "juraj_google_style"} -{"code": "def __getattr__(self, name):\n\n # try:\n # print('xxxxx', name, self._PROBES())\n # xx = self.read_probes(name)\n # print(xx)\n # return xx\n # # return self.read_probes(name)\n # except:\n # # restores standard behavior for missing keys\n # if not str(name) in ['_initialized', '_settings']:\n # print('class ' + type(self).__name__ + ' has no attribute ' + str(name))\n # raise AttributeError('class ' + type(self).__name__ + ' has no attribute ' + str(name))\n\n if not str(name) in ['_initialized', '_settings']:\n try:\n # print('xxxxx name', name)\n xx = self.read_probes(name)\n # print(xx)\n return xx\n # return self.read_probes(name)\n except:\n # restores standard behavior for missing keys\n print(('class ' + type(self).__name__ + ' has no attribute ' + str(name)))\n raise AttributeError('class ' + type(self).__name__ + ' has no attribute ' + str(name))", "docstring": "allows to read instrument inputs in the form value = instrument.input\n\nArgs:\n name: name of input channel\n\n Returns: value of input channel", "source": "juraj_google_style"} -{"code": "def fit(self, sents, **kwargs):\n\n tokens = list(itertools.chain.from_iterable(sents))\n counter = Counter(tokens)\n self.vocab = self.build_vocab(counter, **kwargs)", "docstring": "Builds a vocabulary object based on the tokens in the input.\n\nArgs:\n sents: A list of lists of tokens (representing sentences)\n\n Vocab kwargs include:\n max_size\n min_freq\n specials\n unk_init", "source": "juraj_google_style"} -{"code": "def __setitem__(self, key, value):\n\n if key is None:\n key = self.default_key(value)\n if key in self:\n raise KeyError(\n \"key %s already registered in registry %s\" % (key, self._name))\n if not callable(value):\n raise ValueError(\"value must be callable\")\n self.validate(key, value)\n self._registry[key] = value\n self.on_set(key, value)", "docstring": "Validate, set, and (if successful) call `on_set` for the given item.\n\nArgs:\n key: key to store value under. If `None`, `self.default_key(value)` is\n used.\n value: callable stored under the given key.\n\nRaises:\n KeyError: if key is already in registry.", "source": "juraj_google_style"} -{"code": "def paracrawl_v3_pairs(paracrawl_file):\n\n raw_sentences = _raw_sentences(paracrawl_file)\n for s_en in raw_sentences:\n try:\n s_xx = next(raw_sentences)\n if s_en and s_xx: # Prevent empty string examples.\n yield s_en, s_xx\n except StopIteration:\n tf.logging.error(\n 'Unmatched final sentence while reading in sentence pairs: [%s]',\n s_en)", "docstring": "Generates raw (English, other) pairs from a ParaCrawl V3.0 data file.\n\nArgs:\n paracrawl_file: A ParaCrawl V3.0 en-.. data file.\n\nYields:\n Pairs of (sentence_en, sentence_xx), as Unicode strings.\n\nRaises:\n StopIteration: If the file ends while this method is in the middle of\n creating a translation pair.", "source": "juraj_google_style"} -{"code": "def ParseTab(page, bbox, columns = None):\n\n tab_rect = fitz.Rect(bbox).irect\n xmin, ymin, xmax, ymax = tuple(tab_rect)\n\n if tab_rect.isEmpty or tab_rect.isInfinite:\n print(\"Warning: incorrect rectangle coordinates!\")\n return []\n\n if type(columns) is not list or columns == []:\n coltab = [tab_rect.x0, tab_rect.x1]\n else:\n coltab = sorted(columns)\n\n if xmin < min(coltab):\n coltab.insert(0, xmin)\n if xmax > coltab[-1]:\n coltab.append(xmax)\n\n words = page.getTextWords()\n\n if words == []:\n print(\"Warning: page contains no text\")\n return []\n\n alltxt = []\n\n # get words contained in table rectangle and distribute them into columns\n for w in words:\n ir = fitz.Rect(w[:4]).irect # word rectangle\n if ir in tab_rect:\n cnr = 0 # column index\n for i in range(1, len(coltab)): # loop over column coordinates\n if ir.x0 < coltab[i]: # word start left of column border\n cnr = i - 1\n break\n alltxt.append([ir.x0, ir.y0, ir.x1, cnr, w[4]])\n\n if alltxt == []:\n print(\"Warning: no text found in rectangle!\")\n return []\n\n alltxt.sort(key = itemgetter(1)) # sort words vertically\n\n # create the table / matrix\n spantab = [] # the output matrix\n\n for y, zeile in groupby(alltxt, itemgetter(1)):\n schema = [\"\"] * (len(coltab) - 1)\n for c, words in groupby(zeile, itemgetter(3)):\n entry = \" \".join([w[4] for w in words])\n schema[c] = entry\n spantab.append(schema)\n\n return spantab", "docstring": "Returns the parsed table of a page in a PDF / (open) XPS / EPUB document.\n Parameters:\n page: fitz.Page object\n bbox: containing rectangle, list of numbers [xmin, ymin, xmax, ymax]\n columns: optional list of column coordinates. If None, columns are generated\n Returns the parsed table as a list of lists of strings.\n The number of rows is determined automatically\n from parsing the specified rectangle.", "source": "juraj_google_style"} -{"code": "def ParseOptions(cls, options, output_module):\n\n if not isinstance(output_module, mysql_4n6time.MySQL4n6TimeOutputModule):\n raise errors.BadConfigObject(\n 'Output module is not an instance of MySQL4n6TimeOutputModule')\n\n MySQL4n6TimeDatabaseArgumentsHelper.ParseOptions(options, output_module)\n shared_4n6time_output.Shared4n6TimeOutputArgumentsHelper.ParseOptions(\n options, output_module)", "docstring": "Parses and validates options.\n\nArgs:\n options (argparse.Namespace): parser options.\n output_module (OutputModule): output module to configure.\n\nRaises:\n BadConfigObject: when the output module object is of the wrong type.", "source": "juraj_google_style"} -{"code": "def open_file_for_write(filepath, mode=None):\n\n stream = StringIO()\n yield stream\n content = stream.getvalue()\n\n filepath = os.path.realpath(filepath)\n tmpdir = tmpdir_manager.mkdtemp()\n cache_filepath = os.path.join(tmpdir, os.path.basename(filepath))\n\n debug_print(\"Writing to %s (local cache of %s)\", cache_filepath, filepath)\n\n with atomic_write(filepath, overwrite=True) as f:\n f.write(content)\n\n if mode is not None:\n os.chmod(filepath, mode)\n\n with open(cache_filepath, 'w') as f:\n f.write(content)\n\n file_cache[filepath] = cache_filepath", "docstring": "Writes both to given filepath, and tmpdir location.\n\n This is to get around the problem with some NFS's where immediately reading\n a file that has just been written is problematic. Instead, any files that we\n write, we also write to /tmp, and reads of these files are redirected there.\n\nArgs:\n filepath (str): File to write.\n mode (int): Same mode arg as you would pass to `os.chmod`.\n\nYields:\n File-like object.", "source": "juraj_google_style"} -{"code": "def __init__(self, num_experts, gates):\n\n self._gates = gates\n self._num_experts = num_experts\n\n where = tf.to_int32(tf.where(tf.transpose(gates) > 0))\n self._expert_index, self._batch_index = tf.unstack(where, num=2, axis=1)\n self._part_sizes_tensor = tf.reduce_sum(tf.to_int32(gates > 0), [0])\n self._nonzero_gates = tf.gather(\n tf.reshape(self._gates, [-1]),\n self._batch_index * num_experts + self._expert_index)", "docstring": "Create a SparseDispatcher.\n\nArgs:\n num_experts: an integer.\n gates: a `Tensor` of shape `[batch_size, num_experts]`.\n\nReturns:\n a SparseDispatcher", "source": "juraj_google_style"} -{"code": "def __init__(self, instance_id: str = None):\n\n self.instance_id = instance_id\n if instance_id:\n self.channel_id += \"#\" + instance_id", "docstring": "Initialize the channel.\n Inherited initializer must call the \"super init\" method\n at the beginning.\n\nArgs:\n instance_id: Instance ID of the channel.", "source": "juraj_google_style"} -{"code": "def get_data(name, train_batch_size, test_batch_size):\n\n if name not in ['mnist', 'cifar10']:\n raise ValueError(\n 'Expected dataset \\'mnist\\' or \\'cifar10\\', but got %s' % name)\n dataset = getattr(tf.keras.datasets, name)\n num_classes = 10\n\n # Extract the raw data.\n raw_data = dataset.load_data()\n (images_train, labels_train), (images_test, labels_test) = raw_data\n\n # Normalize inputs and fix types.\n images_train = images_train.astype(np.float32) / 255.\n images_test = images_test.astype(np.float32) / 255.\n labels_train = labels_train.astype(np.int32).squeeze()\n labels_test = labels_test.astype(np.int32).squeeze()\n\n # Add a dummy 'color channel' dimension if it is not present.\n if images_train.ndim == 3:\n images_train = np.expand_dims(images_train, -1)\n images_test = np.expand_dims(images_test, -1)\n\n # Put the data onto the graph as constants.\n train_data = tf.data.Dataset.from_tensor_slices((images_train, labels_train))\n test_data = tf.data.Dataset.from_tensor_slices((images_test, labels_test))\n\n # Create iterators for each dataset.\n train_iterator = (\n train_data\n # Note: For larger datasets e.g. ImageNet, it will not be feasible to have\n # a shuffle buffer this large.\n .shuffle(buffer_size=len(images_train))\n .batch(train_batch_size)\n .repeat()\n .make_one_shot_iterator()\n )\n test_iterator = test_data.batch(test_batch_size).make_initializable_iterator()\n return dict(\n train_iterator=train_iterator,\n test_iterator=test_iterator,\n num_classes=num_classes)", "docstring": "Gets training and testing dataset iterators.\n\nArgs:\n name: String. Name of dataset, either 'mnist' or 'cifar10'.\n train_batch_size: Integer. Batch size for training.\n test_batch_size: Integer. Batch size for testing.\n\nReturns:\n Dict containing:\n train_iterator: A tf.data.Iterator, over training data.\n test_iterator: A tf.data.Iterator, over test data.\n num_classes: Integer. Number of class labels.", "source": "juraj_google_style"} -{"code": "def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):\n\n # Get the AdGroupAdService\n adgroup_ad_service = client.GetService('AdGroupAdService', 'v201809')\n\n expanded_text_ad = {\n 'xsi_type': 'ExpandedTextAd',\n 'headlinePart1': 'Luxury Cruise to {=%s.Name}' % feed_name,\n 'headlinePart2': 'Only {=%s.Price}' % feed_name,\n 'description': 'Offer ends in {=countdown(%s.Date)}!' % feed_name,\n 'finalUrls': ['http://www.example.com'],\n }\n\n # We add the same ad to both ad groups. When they serve, they will show\n # different values, since they match different feed items.\n operations = [{\n 'operator': 'ADD',\n 'operand': {\n 'adGroupId': adgroup,\n 'ad': expanded_text_ad\n }\n } for adgroup in adgroup_ids]\n\n response = adgroup_ad_service.mutate(operations)\n\n if response and 'value' in response:\n for ad in response['value']:\n print ('Created an ad with ID \"%s\", type \"%s\", and status \"%s\".'\n % (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))\n else:\n raise errors.GoogleAdsError('No ads were added.')", "docstring": "Creates ExpandedTextAds that use ad customizations for specified AdGroups.\n\nArgs:\n client: an AdWordsClient instance.\n adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.\n feed_name: the name of the feed used to apply customizations.\n\nRaises:\n GoogleAdsError: if no ExpandedTextAds were added.", "source": "juraj_google_style"} -{"code": "def read_submissions_from_directory(dirname, use_gpu):\n\n result = []\n for sub_dir in os.listdir(dirname):\n submission_path = os.path.join(dirname, sub_dir)\n try:\n if not os.path.isdir(submission_path):\n continue\n if not os.path.exists(os.path.join(submission_path, 'metadata.json')):\n continue\n with open(os.path.join(submission_path, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n submission_type = metadata['type']\n if submission_type == 'attack' or submission_type == 'targeted_attack':\n submission = Attack(submission_path, container, entry_point, use_gpu)\n elif submission_type == 'defense':\n submission = Defense(submission_path, container, entry_point, use_gpu)\n else:\n raise ValueError('Invalid type of submission: %s' % submission_type)\n result.append(submission)\n except (IOError, KeyError, ValueError):\n print('Failed to read submission from directory ', submission_path)\n return result", "docstring": "Scans directory and read all submissions.\n\nArgs:\n dirname: directory to scan.\n use_gpu: whether submissions should use GPU. This argument is\n used to pick proper Docker container for each submission and create\n instance of Attack or Defense class.\n\nReturns:\n List with submissions (subclasses of Submission class).", "source": "juraj_google_style"} -{"code": "def getDescriptor(self, desc_type, desc_index, length, endpoint = -1):\n r\n return control.get_descriptor(self.dev, length, desc_type, desc_index)", "docstring": "r\"\"\"Retrieves a descriptor from the device identified by the type\n and index of the descriptor.\n\nArgs:\n desc_type: descriptor type.\n desc_index: index of the descriptor.\n len: descriptor length.\n endpoint: ignored.", "source": "juraj_google_style"} -{"code": "def _parse_local_interface(self, config):\n\n match = re.search(r'local-interface (\\w+)', config)\n value = match.group(1) if match else None\n return dict(local_interface=value)", "docstring": "Scans the config block and parses the local-interface value\n\nArgs:\n config (str): The config block to scan\n\nReturns:\n dict: A dict object that is intended to be merged into the\n resource dict", "source": "juraj_google_style"} -{"code": "def __init__(self, text: str, name: YangIdentifier = None, rev: str = None):\n\n super().__init__(text)\n self.name = name\n self.rev = rev", "docstring": "Initialize the parser instance.\n\nArgs:\n name: Expected module name.\n rev: Expected revision date.", "source": "juraj_google_style"} -{"code": "def get_help_datapacks(filepath, prefix=\"!\"):\n\n\n help_contents = get_help_data(filepath)\n\n datapacks = []\n\n # Add the content\n for d in help_contents:\n heading = d\n content = \"\"\n\n if \"commands\" in d.lower():\n for c in help_contents[d]:\n if \"name\" not in c:\n continue\n\n content += \"- `\"\n command = prefix + c[\"name\"]\n content += \"{}\".format(command)\n if \"params\" in c:\n for param in c[\"params\"]:\n content += \" [{}]\".format(param)\n content += \"`: \"\n if \"description\" in c:\n content += c[\"description\"]\n content += \"\\n\"\n else:\n content += help_contents[d]\n\n datapacks.append((heading, content, False))\n\n return datapacks", "docstring": "Load help text from a file and give it as datapacks\n\nArgs:\n filepath (str): The file to load help text from\n prefix (str): The prefix to use for commands\n\nReturns:\n datapacks (list): The datapacks from the file", "source": "juraj_google_style"} -{"code": "def md5sum(self, f):\n\n m = hashlib.md5()\n fh = open(f, 'r')\n while 1:\n chunk = fh.read(BUF_SIZE)\n if not chunk: break\n m.update(chunk)\n fh.close()\n return m.hexdigest()", "docstring": "md5sums a file, returning the hex digest\n\n Parameters:\n - f filename string", "source": "juraj_google_style"} -{"code": "def ticker(self, contract: Contract) -> Ticker:\n\n return self.wrapper.tickers.get(id(contract))", "docstring": "Get ticker of the given contract. It must have been requested before\n with reqMktData with the same contract object. The ticker may not be\n ready yet if called directly after :meth:`.reqMktData`.\n\nArgs:\n contract: Contract to get ticker for.", "source": "juraj_google_style"} -{"code": "def get_multiplicon_seeds(self, redundant=False):\n\n for node in self._multiplicon_graph.nodes():\n if not len(self._multiplicon_graph.in_edges(node)):\n if not self.is_redundant_multiplicon(node):\n yield node\n elif redundant:\n yield node\n else:\n continue\n else:\n continue", "docstring": "Return a generator of the IDs of multiplicons that are initial\n seeding 'pairs' in level 2 multiplicons.\n\nArgs:\n o redundant - if true, report redundant multiplicons", "source": "juraj_google_style"} -{"code": "def vsiprefix(path):\n\n vpath = path.lower()\n scheme = VSI_SCHEMES.get(urlparse(vpath).scheme, '')\n for ext in VSI_TYPES:\n if ext in vpath:\n filesys = VSI_TYPES[ext]\n break\n else:\n filesys = ''\n if filesys and scheme:\n filesys = filesys[:-1]\n return ''.join((filesys, scheme, path))", "docstring": "Returns a GDAL virtual filesystem prefixed path.\n\nArgs:\n path -- file path as str", "source": "juraj_google_style"} -{"code": "def _infer_binary_broadcast_shape(shape1, shape2, given_output_shape=None):\n\n shape1 = convert_to_shape(shape1)\n shape2 = convert_to_shape(shape2)\n given_output_shape = convert_to_shape(given_output_shape)\n if given_output_shape is not None:\n return given_output_shape\n if is_subsequence(shape1.dims, shape2.dims):\n return shape2\n if is_subsequence(shape2.dims, shape1.dims):\n return shape1\n return Shape(\n shape1.dims + [d for d in shape2.dims if d not in shape1.dims])", "docstring": "Infer shape of the output of a binary op with broadcasting.\n\n If the output shape is not given with given_output_shape, then we check\n to see if one of the shapes is a subsequence of the other one, and we\n return the one that is the supersequence. Otherwise, we list the dimensions\n of shape1, followed by all new dimensions in shape2.\n\nArgs:\n shape1: a Shape\n shape2: a Shape\n given_output_shape: an optional Shape\n\nReturns:\n a Shape", "source": "juraj_google_style"} -{"code": "def onTagDel(self, name, func):\n\n if '*' in name:\n self.ontagdelglobs.add(name, func)\n else:\n self.ontagdels[name].append(func)", "docstring": "Register a callback for tag deletion.\n\nArgs:\n name (str): The name of the tag or tag glob.\n func (function): The callback func(node, tagname, tagval).", "source": "juraj_google_style"} -{"code": "def add_user(\n self, user, first_name=None, last_name=None, email=None, password=None):\n\n self.service.add_user(\n user, first_name, last_name, email, password,\n self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Add a new user.\n\nArgs:\n user (string): User name.\n first_name (optional[string]): User's first name. Defaults to None.\n last_name (optional[string]): User's last name. Defaults to None.\n email: (optional[string]): User's email address. Defaults to None.\n password: (optional[string]): User's password. Defaults to None.\n\nRaises:\n requests.HTTPError on failure.", "source": "juraj_google_style"} -{"code": "def send_global(self, room: str, message: Message) -> None:\n\n self._global_send_queue.put((room, message))\n self._global_send_event.set()", "docstring": "Sends a message to one of the global rooms\n\n These rooms aren't being listened on and therefore no reply could be heard, so these\n messages are sent in a send-and-forget async way.\n The actual room name is composed from the suffix given as parameter and chain name or id\n e.g.: raiden_ropsten_discovery\n Params:\n room: name suffix as passed in config['global_rooms'] list\n message: Message instance to be serialized and sent", "source": "juraj_google_style"} -{"code": "def _output_dir(\n self,\n ext,\n is_instance=False,\n interpolatable=False,\n autohinted=False,\n is_variable=False,\n ):\n\n\n assert not (is_variable and any([is_instance, interpolatable]))\n # FIXME? Use user configurable destination folders.\n if is_variable:\n dir_prefix = \"variable_\"\n elif is_instance:\n dir_prefix = \"instance_\"\n else:\n dir_prefix = \"master_\"\n dir_suffix = \"_interpolatable\" if interpolatable else \"\"\n output_dir = dir_prefix + ext + dir_suffix\n if autohinted:\n output_dir = os.path.join(\"autohinted\", output_dir)\n return output_dir", "docstring": "Generate an output directory.\n\nArgs:\n ext: extension string.\n is_instance: The output is instance font or not.\n interpolatable: The output is interpolatable or not.\n autohinted: The output is autohinted or not.\n is_variable: The output is variable font or not.\n\nReturns:\n output directory string.", "source": "juraj_google_style"} -{"code": "def _data_from_df(df):\n\n _df = df.copy()\n\n # Flatten columns\n if isinstance(df.columns, pd.MultiIndex):\n try:\n _df.columns = ['_'.join(col) for col in _df.columns.values]\n except TypeError:\n raise TypeError('Could not flatten MultiIndex columns. '\n 'use string column names or flatten manually')\n # Transform columns CategoricalIndex in list\n if isinstance(df.columns, pd.CategoricalIndex):\n _df.columns = df.columns.tolist()\n # Flatten index\n index_name = ColumnDataSource._df_index_name(df)\n if index_name == 'index':\n _df.index = pd.Index(_df.index.values)\n else:\n _df.index = pd.Index(_df.index.values, name=index_name)\n _df.reset_index(inplace=True)\n\n tmp_data = {c: v.values for c, v in _df.iteritems()}\n\n new_data = {}\n for k, v in tmp_data.items():\n new_data[k] = v\n\n return new_data", "docstring": "Create a ``dict`` of columns from a Pandas ``DataFrame``,\n suitable for creating a ColumnDataSource.\n\nArgs:\n df (DataFrame) : data to convert\n\nReturns:\n dict[str, np.array]", "source": "juraj_google_style"} -{"code": "def format_comment(comment_data):\n\n format_pieces = []\n # Line and column information\n if 'line' in comment_data:\n format_pieces.append('line {line}')\n if 'column' in comment_data:\n if format_pieces:\n format_pieces.append(', ')\n format_pieces.append('col {column}')\n if format_pieces:\n format_pieces.append(': ')\n\n # Severity and Id information\n if 'severity' in comment_data:\n format_pieces.append('{severity}: ')\n\n if 'message_id' in comment_data:\n format_pieces.append('[{message_id}]: ')\n\n # The message\n if 'message' in comment_data:\n format_pieces.append('{message}')\n\n return ''.join(format_pieces).format(**comment_data)", "docstring": "Formats the data returned by the linters.\n\n Given a dictionary with the fields: line, column, severity, message_id,\n message, will generate a message like:\n\n 'line {line}, col {column}: {severity}: [{message_id}]: {message}'\n\n Any of the fields may nbe absent.\n\nArgs:\n comment_data: dictionary with the linter data.\n\nReturns:\n a string with the formatted message.", "source": "juraj_google_style"} -{"code": "def setUpMethods(self, port):\n\n assert isinstance(port, WSDLTools.Port), \\\n 'expecting WSDLTools.Port not: ' %type(port)\n\n binding = port.getBinding()\n portType = port.getPortType()\n service = port.getService()\n s = self._services[service.name]\n for bop in binding.operations:\n try:\n op = portType.operations[bop.name]\n except KeyError, ex:\n raise WsdlGeneratorError,\\\n 'Port(%s) PortType(%s) missing operation(%s) defined in Binding(%s)' \\\n %(port.name, portType.name, op.name, binding.name)\n\n soap_action = wsaction_in = wsaction_out = None\n if op.input is not None:\n wsaction_in = op.getInputAction()\n if op.output is not None:\n wsaction_out = op.getOutputAction()\n\n for ext in bop.extensions:\n if isinstance(ext, WSDLTools.SoapOperationBinding) is False: continue\n soap_action = ext.soapAction\n if not soap_action: break\n if wsaction_in is None: break\n if wsaction_in == soap_action: break\n if self.strict is False:\n warnings.warn(\\\n 'Port(%s) operation(%s) in Binding(%s) soapAction(%s) != WS-Action(%s)' \\\n %(port.name, op.name, binding.name, soap_action, wsaction_in),\n )\n break\n raise WsdlGeneratorError,\\\n 'Port(%s) operation(%s) in Binding(%s) soapAction(%s) MUST match WS-Action(%s)' \\\n %(port.name, op.name, binding.name, soap_action, wsaction_in)\n\n method_name = self.getMethodName(op.name)\n\n m = s.newMethod()\n print >>m, '%sdef %s(self, ps, address):' %(self.getIndent(level=1), method_name)\n\n msgin_name = msgout_name = None\n msgin,msgout = op.getInputMessage(),op.getOutputMessage()\n if msgin is not None: \n msgin_name = TextProtect(msgin.name)\n if msgout is not None: \n msgout_name = TextProtect(msgout.name)\n\n indent = self.getIndent(level=2)\n for l in self.createMethodBody(msgin_name, msgout_name):\n print >>m, indent + l\n\n print >>m, ''\n print >>m, '%ssoapAction[\\'%s\\'] = \\'%s\\'' %(self.getIndent(level=1), wsaction_in, method_name)\n print >>m, '%swsAction[\\'%s\\'] = \\'%s\\'' %(self.getIndent(level=1), method_name, wsaction_out)\n print >>m, '%sroot[(%s.typecode.nspname,%s.typecode.pname)] = \\'%s\\'' \\\n %(self.getIndent(level=1), msgin_name, msgin_name, method_name)", "docstring": "set up all methods representing the port operations.\n Parameters:\n port -- Port that defines the operations.", "source": "juraj_google_style"} -{"code": "def export(self, top=True):\n\n out = []\n if top:\n out.append(self._internal_name)\n out.append(self._to_str(self.holiday_name))\n out.append(self._to_str(self.holiday_day))\n return \",\".join(out)", "docstring": "Exports object to its string representation.\n\nArgs:\n top (bool): if True appends `internal_name` before values.\n All non list objects should be exported with value top=True,\n all list objects, that are embedded in as fields inlist objects\n should be exported with `top`=False\n\nReturns:\n str: The objects string representation", "source": "juraj_google_style"} -{"code": "def space(self, newlines=1):\n\n space = Space()\n for line in range(newlines):\n space.add_line('\\n')\n self._container.structure.insert(self._idx, space)\n self._idx += 1\n return self", "docstring": "Creates a vertical space of newlines\n\nArgs:\n newlines (int): number of empty lines\n\nReturns:\n self for chaining", "source": "juraj_google_style"} -{"code": "def parse_delete(prs, conn):\n\n prs_delete = prs.add_parser(\n 'delete', help='delete a record of specific zone')\n set_option(prs_delete, 'domain')\n conn_options(prs_delete, conn)\n prs_delete.set_defaults(func=delete)", "docstring": "Delete record.\n\nArgs:\n prs: parser object of argparse\n conn: dictionary of connection information", "source": "juraj_google_style"} -{"code": "def get_accounts_for_service(cls, service_type):\n\n return [\n a for a in cls.get_accounts().values()\n if a.service_type == service_type\n ]", "docstring": "Get a list of accounts for a given music service.\n\nArgs:\n service_type (str): The service_type to use.\n\nReturns:\n list: A list of `Account` instances.", "source": "juraj_google_style"} -{"code": "def normalize(self, mode=\"max\", value=1):\n\n if mode.lower() == \"sum\":\n factor = np.sum(self.y, axis=0)\n elif mode.lower() == \"max\":\n factor = np.max(self.y, axis=0)\n else:\n raise ValueError(\"Unsupported normalization mode %s!\" % mode)\n\n self.y /= factor / value", "docstring": "Normalize the spectrum with respect to the sum of intensity\n\nArgs:\n mode (str): Normalization mode. Supported modes are \"max\" (set the\n max y value to value, e.g., in XRD patterns), \"sum\" (set the\n sum of y to a value, i.e., like a probability density).\n value (float): Value to normalize to. Defaults to 1.", "source": "juraj_google_style"} -{"code": "def serve_doc(app, url):\n\n @app.route(url, doc=False)\n def index(env, req):\n ret = ''\n for d in env['doc']:\n ret += 'URL: {url}, supported methods: {methods}{doc}\\n'.format(**d)\n return ret", "docstring": "Serve API documentation extracted from request handler docstrings\n\n Parameters:\n * app: Grole application object\n * url: URL to serve at", "source": "juraj_google_style"} -{"code": "def __init__(self, output_mediator):\n\n super(ElasticsearchOutputModule, self).__init__(output_mediator)\n self._raw_fields = False", "docstring": "Initializes an Elasticsearch output module.\n\nArgs:\n output_mediator (OutputMediator): mediates interactions between output\n modules and other components, such as storage and dfvfs.", "source": "juraj_google_style"} -{"code": "def _ParseLogLine(self, parser_mediator, structure):\n\n if not self._xchat_year:\n return\n\n time_elements_tuple = self._GetTimeElementsTuple(structure)\n\n try:\n date_time = dfdatetime_time_elements.TimeElements(\n time_elements_tuple=time_elements_tuple)\n date_time.is_local_time = True\n except ValueError:\n parser_mediator.ProduceExtractionWarning(\n 'invalid date time value: {0!s}'.format(structure.date_time))\n return\n\n self._last_month = time_elements_tuple[1]\n\n event_data = XChatLogEventData()\n event_data.nickname = structure.nickname\n # The text string contains multiple unnecessary whitespaces that need to\n # be removed, thus the split and re-join.\n event_data.text = ' '.join(structure.text.split())\n\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_ADDED,\n time_zone=parser_mediator.timezone)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a log line.\n\nArgs:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n structure (pyparsing.ParseResults): structure of tokens derived from\n a line of a text file.", "source": "juraj_google_style"} -{"code": "def list_deployment_operations(access_token, subscription_id, rg_name, deployment_name):\n\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourcegroups/', rg_name,\n '/providers/Microsoft.Resources/deployments/', deployment_name,\n '/operations',\n '?api-version=', BASE_API])\n return do_get(endpoint, access_token)", "docstring": "List all operations involved in a given deployment.\n\nArgs:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n rg_name (str): Azure resource group name.\n\nReturns:\n HTTP response. JSON body.", "source": "juraj_google_style"} -{"code": "def framebuffer(self, color_attachments=(), depth_attachment=None) -> 'Framebuffer':\n\n\n if type(color_attachments) is Texture or type(color_attachments) is Renderbuffer:\n color_attachments = (color_attachments,)\n\n ca_mglo = tuple(x.mglo for x in color_attachments)\n da_mglo = None if depth_attachment is None else depth_attachment.mglo\n\n res = Framebuffer.__new__(Framebuffer)\n res.mglo, res._size, res._samples, res._glo = self.mglo.framebuffer(ca_mglo, da_mglo)\n res._color_attachments = tuple(color_attachments)\n res._depth_attachment = depth_attachment\n res.ctx = self\n res.extra = None\n return res", "docstring": "A :py:class:`Framebuffer` is a collection of buffers that can be used as the destination for rendering.\n The buffers for Framebuffer objects reference images from either Textures or Renderbuffers.\n\nArgs:\n color_attachments (list): A list of :py:class:`Texture` or :py:class:`Renderbuffer` objects.\n depth_attachment (Renderbuffer or Texture): The depth attachment.\n\nReturns:\n :py:class:`Framebuffer` object", "source": "juraj_google_style"} -{"code": "def load_state(self, in_path):\n\n\n with open(in_path, \"r\") as infile:\n state = json.load(infile)\n\n self.restore_state(state)", "docstring": "Load the current state of this emulated object from a file.\n\n The file should have been produced by a previous call to save_state.\n\nArgs:\n in_path (str): The path to the saved state dump that you wish\n to load.", "source": "juraj_google_style"} -{"code": "def upload(s3_conn, filepath, s3_path):\n\n bucket_name, prefix = split_s3_path(s3_path)\n bucket = s3_conn.get_bucket(bucket_name)\n filename = os.path.basename(filepath)\n\n key = boto.s3.key.Key(\n bucket=bucket,\n name='{}/{}'.format(prefix, filename)\n )\n logging.info('uploading from %s to %s', filepath, key)\n key.set_contents_from_filename(filepath)", "docstring": "Uploads the given file to s3\n\nArgs:\n s3_conn: (boto.s3.connection) an s3 connection\n filepath (str) the local filename\n s3_path (str) the destination path on s3", "source": "juraj_google_style"} -{"code": "def Collect(\n self, knowledge_base, artifact_definition, searcher):\n\n for source in artifact_definition.sources:\n if source.type_indicator not in (\n artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY,\n artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):\n continue\n\n if source.type_indicator == (\n artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):\n key_value_pairs = [{'key': key} for key in source.keys]\n else:\n key_value_pairs = source.key_value_pairs\n\n for key_value_pair in key_value_pairs:\n key_path = key_value_pair['key']\n\n # The artifact definitions currently incorrectly define\n # CurrentControlSet so we correct it here for now.\n # Also see: https://github.com/ForensicArtifacts/artifacts/issues/120\n key_path_upper = key_path.upper()\n if key_path_upper.startswith('%%CURRENT_CONTROL_SET%%'):\n key_path = '{0:s}{1:s}'.format(\n 'HKEY_LOCAL_MACHINE\\\\System\\\\CurrentControlSet', key_path[23:])\n\n find_spec = registry_searcher.FindSpec(key_path_glob=key_path)\n\n for key_path in searcher.Find(find_specs=[find_spec]):\n try:\n registry_key = searcher.GetKeyByPath(key_path)\n except IOError as exception:\n raise errors.PreProcessFail((\n 'Unable to retrieve Windows Registry key: {0:s} with error: '\n '{1!s}').format(key_path, exception))\n\n if registry_key:\n value_name = key_value_pair.get('value', None)\n self._ParseKey(knowledge_base, registry_key, value_name)", "docstring": "Collects values using a Windows Registry value artifact definition.\n\nArgs:\n knowledge_base (KnowledgeBase): to fill with preprocessing information.\n artifact_definition (artifacts.ArtifactDefinition): artifact definition.\n searcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to\n preprocess the Windows Registry.\n\nRaises:\n PreProcessFail: if the Windows Registry key or value cannot be read.", "source": "juraj_google_style"} -{"code": "def _check_property(self, rest=None, require_indexed=True):\n\n if require_indexed and not self._indexed:\n raise InvalidPropertyError('Property is unindexed %s' % self._name)\n if rest:\n raise InvalidPropertyError('Referencing subproperty %s.%s '\n 'but %s is not a structured property' %\n (self._name, rest, self._name))", "docstring": "Internal helper to check this property for specific requirements.\n\n Called by Model._check_properties().\n\nArgs:\n rest: Optional subproperty to check, of the form 'name1.name2...nameN'.\n\nRaises:\n InvalidPropertyError if this property does not meet the given\n requirements or if a subproperty is specified. (StructuredProperty\n overrides this method to handle subproperties.)", "source": "juraj_google_style"} -{"code": "def load_bmp(path):\n\n surface = object.__new__(Surface)\n surface._ptr = check_ptr_err(lib.SDL_LoadBMP_RW(lib.SDL_RWFromFile(path, \"rb\"), 1))\n return surface", "docstring": "Load a surface from a file.\n\nArgs:\n path (str): Path to the BMP file to load.\n\nReturns:\n Surface: A surface containing the pixels loaded from the file.\n\nRaises:\n SDLError: If the file cannot be loaded.", "source": "juraj_google_style"} -{"code": "def trace_region_count(self):\n\n cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region count.')\n return data.value", "docstring": "Retrieves a count of the number of available trace regions.\n\nArgs:\n self (JLink): the ``JLink`` instance.\n\nReturns:\n Count of the number of available trace regions.", "source": "juraj_google_style"} -{"code": "def _add_wire(self, wire):\n\n if wire not in self.wires:\n self.wires.append(wire)\n self._max_node_id += 1\n input_map_wire = self.input_map[wire] = self._max_node_id\n\n self._max_node_id += 1\n output_map_wire = self._max_node_id\n\n wire_name = \"%s[%s]\" % (wire[0].name, wire[1])\n\n inp_node = DAGNode(data_dict={'type': 'in', 'name': wire_name, 'wire': wire},\n nid=input_map_wire)\n outp_node = DAGNode(data_dict={'type': 'out', 'name': wire_name, 'wire': wire},\n nid=output_map_wire)\n self._id_to_node[input_map_wire] = inp_node\n self._id_to_node[output_map_wire] = outp_node\n\n self.input_map[wire] = inp_node\n self.output_map[wire] = outp_node\n\n self._multi_graph.add_node(inp_node)\n self._multi_graph.add_node(outp_node)\n\n self._multi_graph.add_edge(inp_node,\n outp_node)\n\n self._multi_graph.adj[inp_node][outp_node][0][\"name\"] \\\n = \"%s[%s]\" % (wire[0].name, wire[1])\n self._multi_graph.adj[inp_node][outp_node][0][\"wire\"] \\\n = wire\n else:\n raise DAGCircuitError(\"duplicate wire %s\" % (wire,))", "docstring": "Add a qubit or bit to the circuit.\n\nArgs:\n wire (tuple): (Register,int) containing a register instance and index\n This adds a pair of in and out nodes connected by an edge.\n\nRaises:\n DAGCircuitError: if trying to add duplicate wire", "source": "juraj_google_style"}