code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC): if (not _is_pil_image(img)): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) coeffs = _get_perspective_coeffs(startpoints, endpoints) return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation)
Perform perspective transform of the given PIL Image. Args: img (PIL Image): Image to be transformed. coeffs (tuple) : 8-tuple (a, b, c, d, e, f, g, h) which contains the coefficients. for a perspective transform. interpolation: Default- Image.BICUBIC Returns: PIL Image: Perspectively transformed Image.
codesearchnet
def query_band(self, value): self._query_band = value if (value is None): try: del self._connectionXML.attrib['query-band-spec'] except KeyError: pass else: self._connectionXML.set('query-band-spec', value)
Set the connection's query_band property. Args: value: New query_band value. String. Returns: Nothing.
codesearchnet
def get_connection_string(params, hide_password=True): connection_string = params['driver'] + ': user = params.get('user', None) password = params.get('password', None) host = params.get('host', None) port = params.get('port', None) database = params.get('database', None) if database is None: raise ValueError("Field 'database' of connection parameters cannot be None.") if password is None and user is not None: password = Client._get_password(params) if password is None: raise RuntimeError("Password not defined and not available in keyring.") if host is not None: if user is not None: connection_string += user if len(password) > 0: if hide_password: connection_string += ":[password hidden]" else: connection_string += ":" + password connection_string += "@" connection_string += host if port is not None: connection_string += ':' + str(port) connection_string += '/' + database return connection_string
Get a database connection string Args: params (dict): database configuration, as defined in :mod:`ozelot.config` hide_password (bool): if True, the password is hidden in the returned string (use this for logging purposes). Returns: str: connection string
juraj-google-style
def add_tags(self): session = boto3.session.Session(profile_name=self.env, region_name=self.region) resource = session.resource('ec2') group_id = get_security_group_id(self.app_name, self.env, self.region) security_group = resource.SecurityGroup(group_id) try: tag = security_group.create_tags(DryRun=False, Tags=[{'Key': 'app_group', 'Value': self.group}, {'Key': 'app_name', 'Value': self.app_name}]) self.log.debug('Security group has been tagged: %s', tag) except botocore.exceptions.ClientError as error: self.log.warning(error) return True
Add tags to security group. Returns: True: Upon successful completion.
codesearchnet
def insert(self, iterable, index=0, data=None, weight=1.0): if (index == len(iterable)): self.is_terminal = True self.key = iterable self.weight = weight if data: self.data.add(data) else: if (iterable[index] not in self.children): self.children[iterable[index]] = TrieNode() self.children[iterable[index]].insert(iterable, (index + 1), data)
Insert new node into tree Args: iterable(hashable): key used to find in the future. data(object): data associated with the key index(int): an index used for insertion. weight(float): the wait given for the item added.
codesearchnet
def int(name, default=None, allow_none=False, fallback=None): value = read(name, default, allow_none, fallback=fallback) if isinstance(value, builtins.str): value = value.strip() if value is None and allow_none: return None else: return builtins.int(value)
Get a string environment value or the default. Args: name: The environment variable name default: The default value to use if no environment variable is found allow_none: If the return value can be `None` (i.e. optional)
juraj-google-style
def plot_state_hinton(rho, title='', figsize=None): if not HAS_MATPLOTLIB: raise ImportError('Must have Matplotlib installed.') rho = _validate_input_state(rho) if figsize is None: figsize = (8, 5) num = int(np.log2(len(rho))) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize) max_weight = 2 ** np.ceil(np.log(np.abs(rho).max()) / np.log(2)) datareal = np.real(rho) dataimag = np.imag(rho) column_names = [bin(i)[2:].zfill(num) for i in range(2**num)] row_names = [bin(i)[2:].zfill(num) for i in range(2**num)] lx = len(datareal[0]) ly = len(datareal[:, 0]) ax1.patch.set_facecolor('gray') ax1.set_aspect('equal', 'box') ax1.xaxis.set_major_locator(plt.NullLocator()) ax1.yaxis.set_major_locator(plt.NullLocator()) for (x, y), w in np.ndenumerate(datareal): color = 'white' if w > 0 else 'black' size = np.sqrt(np.abs(w) / max_weight) rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, edgecolor=color) ax1.add_patch(rect) ax1.set_xticks(np.arange(0, lx+0.5, 1)) ax1.set_yticks(np.arange(0, ly+0.5, 1)) ax1.set_yticklabels(row_names, fontsize=14) ax1.set_xticklabels(column_names, fontsize=14, rotation=90) ax1.autoscale_view() ax1.invert_yaxis() ax1.set_title('Real[rho]', fontsize=14) ax2.patch.set_facecolor('gray') ax2.set_aspect('equal', 'box') ax2.xaxis.set_major_locator(plt.NullLocator()) ax2.yaxis.set_major_locator(plt.NullLocator()) for (x, y), w in np.ndenumerate(dataimag): color = 'white' if w > 0 else 'black' size = np.sqrt(np.abs(w) / max_weight) rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, edgecolor=color) ax2.add_patch(rect) if np.any(dataimag != 0): ax2.set_xticks(np.arange(0, lx+0.5, 1)) ax2.set_yticks(np.arange(0, ly+0.5, 1)) ax2.set_yticklabels(row_names, fontsize=14) ax2.set_xticklabels(column_names, fontsize=14, rotation=90) ax2.autoscale_view() ax2.invert_yaxis() ax2.set_title('Imag[rho]', fontsize=14) if title: fig.suptitle(title, fontsize=16) plt.tight_layout() plt.close(fig) return fig
Plot a hinton diagram for the quanum state. Args: rho (ndarray): Numpy array for state vector or density matrix. title (str): a string that represents the plot title figsize (tuple): Figure size in inches. Returns: matplotlib.Figure: The matplotlib.Figure of the visualization Raises: ImportError: Requires matplotlib.
juraj-google-style
def save_image(tensor, filename, nrow=8, padding=2, normalize=False, range=None, scale_each=False, pad_value=0): from PIL import Image grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value, normalize=normalize, range=range, scale_each=scale_each) ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy() im = Image.fromarray(ndarr) im.save(filename)
Save a given Tensor into an image file. Args: tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, saves the tensor as a grid of images by calling ``make_grid``. **kwargs: Other arguments are documented in ``make_grid``.
juraj-google-style
def get_token(self): if (self.token == None): self.token = self.authenticate(self.username, self.password) try: return str(self.token, 'utf-8') except TypeError: return self.token
Method to retrieve an auth token. The cached global token is looked up and returned if it exists. If it is `None` a new one is requested and returned. Returns: Simplenote API token as string
codesearchnet
def is_diagonal_scale(scale): if not isinstance(scale, tf.linalg.LinearOperator): raise TypeError("Expected argument 'scale' to be instance of LinearOperator" ". Found: %s" % scale) return (isinstance(scale, tf.linalg.LinearOperatorIdentity) or isinstance(scale, tf.linalg.LinearOperatorScaledIdentity) or isinstance(scale, tf.linalg.LinearOperatorDiag))
Returns `True` if `scale` is a `LinearOperator` that is known to be diag. Args: scale: `LinearOperator` instance. Returns: Python `bool`. Raises: TypeError: If `scale` is not a `LinearOperator`.
juraj-google-style
def __init__(self, receive_port): super(InteractiveDebuggerDataServer, self).__init__( receive_port, InteractiveDebuggerDataStreamHandler) self._incoming_channel = queue.Queue() self._outgoing_channel = comm_channel_lib.CommChannel() self._run_states = RunStates(breakpoints_func=lambda: self.breakpoints) self._tensor_store = tensor_store_lib.TensorStore() self._source_manager = SourceManager() curried_handler_constructor = functools.partial( InteractiveDebuggerDataStreamHandler, self._incoming_channel, self._outgoing_channel, self._run_states, self._tensor_store) grpc_debug_server.EventListenerBaseServicer.__init__( self, receive_port, curried_handler_constructor)
Receives health pills from a debugger and writes them to disk. Args: receive_port: The port at which to receive health pills from the TensorFlow debugger. always_flush: A boolean indicating whether the EventsWriter will be flushed after every write. Can be used for testing.
juraj-google-style
def add_streamer(self, binary_descriptor): streamer = streamer_descriptor.parse_binary_descriptor(binary_descriptor) try: self.graph.add_streamer(streamer) self.streamer_status[(len(self.graph.streamers) - 1)] = StreamerStatus() return Error.NO_ERROR except ResourceUsageError: return _pack_sgerror(SensorGraphError.NO_MORE_STREAMER_RESOURCES)
Add a streamer to the sensor_graph using a binary streamer descriptor. Args: binary_descriptor (bytes): An encoded binary streamer descriptor. Returns: int: A packed error code
codesearchnet
def get_config_parameter(config: ConfigParser, section: str, param: str, fn: Callable[[Any], Any], default: Any) -> Any: try: value = fn(config.get(section, param)) except (TypeError, ValueError, NoOptionError): log.warning( "Configuration variable {} not found or improper in section [{}]; " "using default of {!r}", param, section, default) if default is None: value = default else: value = fn(default) return value
Fetch parameter from ``configparser`` ``.INI`` file. Args: config: :class:`ConfigParser` object section: section name within config file param: name of parameter within section fn: function to apply to string parameter (e.g. ``int``) default: default value Returns: parameter value, or ``None`` if ``default is None``, or ``fn(default)``
juraj-google-style
def log_error(cls, msg): cls.error_logger.error(msg) cls.debug_logger.debug(msg)
Logs the provided error message to both the error logger and the debug logger logging instances. Args: msg: `str`. The error message to log.
codesearchnet
def element(self, using, value): return self._execute(Command.FIND_CHILD_ELEMENT, {'using': using, 'value': value})
find an element in the current element. Support: Android iOS Web(WebView) Args: using(str): The element location strategy. value(str): The value of the location strategy. Returns: WebElement Object. Raises: WebDriverException.
codesearchnet
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return inputs.get(self)
Returns dense `Tensor` representing numeric feature. Args: inputs: A `_LazyBuilder` object to access inputs. weight_collections: Unused `weight_collections` since no variables are created in this function. trainable: Unused `trainable` bool since no variables are created in this function. Returns: Dense `Tensor` created within `_transform_feature`.
github-repos
def stringize(self, rnf_profile): coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right))) return '({},{},{},{},{})'.format(str(self.genome_id).zfill(rnf_profile.genome_id_width), str(self.chr_id).zfill(rnf_profile.chr_id_width), self.direction, str(self.left).zfill(coor_width), str(self.right).zfill(coor_width))
Create RNF representation of this segment. Args: rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths).
codesearchnet
def get_kpoint_weights(self, kpoints, atol=1e-05): kpts = np.array(kpoints) shift = [] mesh = [] for i in range(3): nonzero = [i for i in kpts[(:, i)] if (abs(i) > 1e-05)] if (len(nonzero) != len(kpts)): if (not nonzero): mesh.append(1) else: m = np.abs(np.round((1 / np.array(nonzero)))) mesh.append(int(max(m))) shift.append(0) else: m = np.abs(np.round((0.5 / np.array(nonzero)))) mesh.append(int(max(m))) shift.append(1) (mapping, grid) = spglib.get_ir_reciprocal_mesh(np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec) mapping = list(mapping) grid = ((np.array(grid) + (np.array(shift) * (0.5, 0.5, 0.5))) / mesh) weights = [] mapped = defaultdict(int) for k in kpoints: for (i, g) in enumerate(grid): if np.allclose(pbc_diff(k, g), (0, 0, 0), atol=atol): mapped[tuple(g)] += 1 weights.append(mapping.count(mapping[i])) break if ((len(mapped) != len(set(mapping))) or (not all([(v == 1) for v in mapped.values()]))): raise ValueError('Unable to find 1:1 corresponding between input kpoints and irreducible grid!') return [(w / sum(weights)) for w in weights]
Calculate the weights for a list of kpoints. Args: kpoints (Sequence): Sequence of kpoints. np.arrays is fine. Note that the code does not check that the list of kpoints provided does not contain duplicates. atol (float): Tolerance for fractional coordinates comparisons. Returns: List of weights, in the SAME order as kpoints.
codesearchnet
def additive_coupling(name, x, mid_channels=512, reverse=False, activation="relu", dropout=0.0): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): output_channels = common_layers.shape_list(x)[-1] x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1) z1 = x1 shift = conv_stack("nn", x1, mid_channels, output_channels=output_channels, activation=activation, dropout=dropout) if not reverse: z2 = x2 + shift else: z2 = x2 - shift return tf.concat([z1, z2], axis=3), 0.0
Reversible additive coupling layer. Args: name: variable scope. x: 4-D Tensor, shape=(NHWC). mid_channels: number of channels in the coupling layer. reverse: Forward or reverse operation. activation: "relu" or "gatu" dropout: default, 0.0 Returns: output: 4-D Tensor, shape=(NHWC) objective: 0.0
juraj-google-style
def TrimVariableTable(self, new_size): def ProcessBufferFull(variables): for variable in variables: var_index = variable.get('varTableIndex') if var_index is not None and (var_index >= new_size): variable['varTableIndex'] = 0 members = variable.get('members') if members is not None: ProcessBufferFull(members) del self._var_table[new_size:] ProcessBufferFull(self.breakpoint['evaluatedExpressions']) for stack_frame in self.breakpoint['stackFrames']: ProcessBufferFull(stack_frame['arguments']) ProcessBufferFull(stack_frame['locals']) ProcessBufferFull(self._var_table)
Trims the variable table in the formatted breakpoint message. Removes trailing entries in variables table. Then scans the entire breakpoint message and replaces references to the trimmed variables to point to var_index of 0 ("buffer full"). Args: new_size: desired size of variables table.
juraj-google-style
def get_course_final_price(self, mode, currency='$', enterprise_catalog_uuid=None): try: price_details = self.client.baskets.calculate.get(sku=[mode['sku']], username=self.user.username, catalog=enterprise_catalog_uuid) except (SlumberBaseException, ConnectionError, Timeout) as exc: LOGGER.exception('Failed to get price details for sku %s due to: %s', mode['sku'], str(exc)) price_details = {} price = price_details.get('total_incl_tax', mode['min_price']) if (price != mode['min_price']): return format_price(price, currency) return mode['original_price']
Get course mode's SKU discounted price after applying any entitlement available for this user. Returns: str: Discounted price of the course mode.
codesearchnet
def ExpandWindowsEnvironmentVariables(data_string, knowledge_base): r win_environ_regex = re.compile(r"%([^%]+?)%") components = [] offset = 0 for match in win_environ_regex.finditer(data_string): components.append(data_string[offset:match.start()]) kb_value = getattr(knowledge_base, "environ_%s" % match.group(1).lower(), None) if isinstance(kb_value, string_types) and kb_value: components.append(kb_value) else: components.append("%%%s%%" % match.group(1)) offset = match.end() components.append(data_string[offset:]) return "".join(components)
r"""Take a string and expand any windows environment variables. Args: data_string: A string, e.g. "%SystemRoot%\\LogFiles" knowledge_base: A knowledgebase object. Returns: A string with available environment variables expanded. If we can't expand we just return the string with the original variables.
juraj-google-style
def HasOutputClass(cls, name): if not isinstance(name, py2to3.STRING_TYPES): return False return name.lower() in cls._output_classes
Determines if a specific output class is registered with the manager. Args: name (str): name of the output module. Returns: bool: True if the output class is registered.
juraj-google-style
def get_country_info_from_m49(cls, m49, use_live=True, exception=None): iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_info_from_iso3(iso3, exception=exception) return None
Get country name from M49 code Args: m49 (int): M49 numeric code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str]]: Country information
juraj-google-style
async def remove_participant(self, p: Participant): (await self.connection('DELETE', 'tournaments/{}/participants/{}'.format(self._id, p._id))) if (p in self.participants): self.participants.remove(p)
remove a participant from the tournament |methcoro| Args: p: the participant to remove Raises: APIException
codesearchnet
def from_raw(self, raw: RawScalar) -> Optional[ScalarValue]: if isinstance(raw, str): return raw
Return a cooked value of the receiver type. Args: raw: Raw value obtained from JSON parser.
codesearchnet
def userinfo(self, access_token): return self.get(url='https:
Returns the user information based on the Auth0 access token. This endpoint will work only if openid was granted as a scope for the access_token. Args: access_token (str): Auth0 access token (obtained during login). Returns: The user profile.
codesearchnet
def missing_values(self, flatten: bool=True) -> Dict[Union[str, int], Any]:
Returns missing values from this object. Args: flatten: If True, convert nested structures into a flattened dict using key path (delimited by '.' and '[]') as key. Returns: A dict of key to MISSING_VALUE.
github-repos
def _MaybeCaptured(t): if not isinstance(t, ops.EagerTensor) and _IsFunction(t.op.graph) and (t.op.type == 'Placeholder'): for input_t, placeholder_t in _Captures(t.op.graph): if t is placeholder_t: return _MaybeCaptured(input_t) return t
If t is a captured value placeholder, returns the original captured value. Args: t: Tensor Returns: A tensor, potentially from a different Graph/FuncGraph.
github-repos
def sanity_check_type(self, other): if type(self) is not type(other): raise ValueError('No TypeSpec is compatible with both %s and %s' % (self, other)) if self._input_workers.serialize() != other._input_workers.serialize(): raise ValueError('_input_workers is not compatible with both %s and %s' % (self, other)) if self._strategy is not other._strategy: raise ValueError('tf.distribute strategy is not compatible with both %s and %s' % (self, other))
Returns the most specific TypeSpec compatible with `self` and `other`. Args: other: A `TypeSpec`. Raises: ValueError: If there is no TypeSpec that is compatible with both `self` and `other`.
github-repos
def _DownloadScript(self, url, dest_dir): if url.startswith(r'gs: url = re.sub('^gs: return self._DownloadAuthUrl(url, dest_dir) header = r'http[s]?: domain = r'storage\.googleapis\.com' bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])' obj = r'(?P<obj>[^\*\?]+)' gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj)) match = gs_regex.match(url) if match: return self._DownloadAuthUrl(url, dest_dir) gs_regex = re.compile( r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj)) match = gs_regex.match(url) if match: return self._DownloadAuthUrl(url, dest_dir) return self._DownloadUrl(url, dest_dir)
Download the contents of the URL to the destination. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script.
juraj-google-style
def update_branch(profile, name, sha): ref = ('heads/' + name) data = refs.update_ref(profile, ref, sha) return data
Move a branch's HEAD to a new SHA. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the branch to update. sha The commit SHA to point the branch's HEAD to. Returns: A dict with data about the branch.
codesearchnet
def check_email_exists_by_subject(self, subject, match_recipient=None): self._mail.select("inbox") try: matches = self.__search_email_by_subject(subject, match_recipient) if len(matches) <= 0: return False else: return True except Exception as e: raise e
Searches for Email by Subject. Returns True or False. Args: subject (str): Subject to search for. Kwargs: match_recipient (str) : Recipient to match exactly. (don't care if not specified) Returns: True - email found, False - email not found
juraj-google-style
def ConfigsToTest(): def Config(input_size, filter_size, out_size, stride=1, padding='SAME', dilations=None): return (input_size, filter_size, out_size, stride, padding, dilations) return [Config([4, 5, 5, 48], [1, 1, 48, 2], [4, 5, 5, 96]), Config([4, 8, 8, 84], [1, 3, 84, 1], [4, 8, 8, 84]), Config([4, 17, 17, 48], [3, 1, 48, 4], [4, 17, 17, 192]), Config([4, 9, 27, 8], [3, 3, 8, 1], [4, 9, 27, 8]), Config([4, 31, 31, 7], [3, 3, 7, 1], [4, 31, 31, 7]), Config([4, 35, 35, 2], [5, 5, 2, 1], [4, 35, 35, 2]), Config([4, 147, 147, 2], [3, 3, 2, 8], [4, 49, 49, 16], 3, padding='VALID'), Config([3, 299, 299, 3], [3, 2, 3, 8], [3, 150, 150, 24], 2), Config([5, 183, 183, 1], [5, 5, 1, 2], [5, 92, 92, 2], 2), Config([5, 183, 183, 1], [5, 5, 1, 2], [5, 183, 183, 2], dilations=[2, 2]), Config([5, 41, 35, 2], [4, 7, 2, 2], [5, 32, 23, 4], padding='VALID', dilations=[3, 2])]
Iterator for different convolution shapes, strides and paddings. Returns: List of tuples (input_size, filter_size, out_size, stride, padding, dilations), the depthwise convolution parameters.
github-repos
def notify_by(self, invoice_id, medium, **kwargs): url = "{}/{}/notify_by/{}".format(self.base_url, invoice_id, medium) return self.post_url(url, {}, **kwargs)
Send/Resend notifications to customer via email/sms Args: invoice_id : Id for trigger notify medium : Medium for triggering notification via email or sms Returns: {"success": true}
juraj-google-style
def __init__(self, kind=None, project=None, namespace=None, ancestor=None, filters=(), projection=(), order=(), distinct_on=(), limit=None): self.kind = kind self.project = project self.namespace = namespace self.ancestor = ancestor self.filters = filters or () self.projection = projection self.order = order self.distinct_on = distinct_on self.limit = limit
Represents a Datastore query. Args: kind: (str) The kind to query. project: (str) Required. Project associated with query. namespace: (str, ValueProvider(str)) (Optional) Namespace to restrict results to. ancestor: (:class:`~apache_beam.io.gcp.datastore.v1new.types.Key`) (Optional) key of the ancestor to which this query's results are restricted. filters: (sequence of tuple[str, str, str], sequence of tuple[ValueProvider(str), ValueProvider(str), ValueProvider(str)]) Property filters applied by this query. The sequence is ``(property_name, operator, value)``. projection: (sequence of string) fields returned as part of query results. order: (sequence of string) field names used to order query results. Prepend ``-`` to a field name to sort it in descending order. distinct_on: (sequence of string) field names used to group query results. limit: (int) Maximum amount of results to return.
github-repos
def _post_process(self, feed_item, item): campaign = self._campaign_dao.get(feed_item, required=True) feed_item[FieldMap.CAMPAIGN_ID] = campaign['id'] feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name'] landing_page = self._landing_page_dao.get(feed_item, required=True) if landing_page: feed_item[FieldMap.AD_LANDING_PAGE_ID] = landing_page['id'] self._sub_entity_map(feed_item['creative_assignment'], item, campaign) self._sub_entity_map(feed_item['placement_assignment'], item, campaign) self._sub_entity_map(feed_item['event_tag_assignment'], item, campaign)
Maps ids and names of related entities so they can be updated in the Bulkdozer feed. When Bulkdozer is done processing an item, it writes back the updated names and ids of related objects, this method makes sure those are updated in the ad feed. Args: feed_item: Feed item representing the ad from the Bulkdozer feed. item: The DCM ad being updated or created.
github-repos
def read_at(self, d, index=False): for i, iv in enumerate(self): if iv.spans(d): return i if index else iv return None
Get the index of the interval at a particular 'depth' (though this might be an elevation or age or anything). Args: d (Number): The 'depth' to query. index (bool): Whether to return the index instead of the interval. Returns: Interval: The interval, or if ``index==True`` the index of the interval, at the specified 'depth', or ``None`` if the depth is outside the striplog's range.
juraj-google-style
def from_keras_log(csv_path, output_dir_path, **kwargs): data = pd.read_csv(csv_path, sep=None, engine='python') _from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs)
Plot accuracy and loss from a Keras CSV log. Args: csv_path: The path to the CSV log with the actual data. output_dir_path: The path to the directory where the resultings plots should end up.
codesearchnet
def transform_and_overwrite_file(self, file_path: str, transformation: Optional[Callable[[Iterator[str]], Iterator[str]]]=None) -> None: if transformation is None: transformation = self.annotate_test_file with open(file_path, mode='r') as original_file, tempfile.NamedTemporaryFile(mode='w', delete=False) as transformed_file: transformed_file.writelines(transformation(original_file)) shutil.move(transformed_file.name, file_path)
Transforms the contents of `file_path`, overwriting the file. Args: file_path: The path to the file whose contents are to be transformed. transformation: A function that takes an iterator over the lines of an HLO file and returns an iterator over the lines of the transformed file. If this is left as `None`, `self.annotate_test_file` will be used.
github-repos
def restore(cdiff, a): left = a.splitlines(1) if isinstance(a, string_types) else a lrest = [] iline = 0 for i, line in enumerate(left): if iline not in cdiff: lrest.append(" " + line) iline += 1 else: cs = [l[0] for l in cdiff[iline]] add = cs.count('+') - cs.count('-') lrest.extend(cdiff[iline]) iline += add + 1 for i in sorted(cdiff.keys()): if i >= len(left): lrest.extend(cdiff[i]) from difflib import restore return list(restore(lrest, 2))
Restores the full text of either the edited text using the compressed diff. Args: cdiff (dict): compressed diff returned by :func:`~acorn.logging.diff.compress`. a (str or list): *original* string or list of strings to use as a reference to restore the edited version.
juraj-google-style
def get_table_schema(schema): if schema is None: return schema elif isinstance(schema, str): return bigquery_tools.parse_table_schema_from_json(schema) elif isinstance(schema, dict): return bigquery_tools.parse_table_schema_from_json(json.dumps(schema)) else: raise TypeError('Unexpected schema argument: %s.' % schema)
Transform the table schema into a bigquery.TableSchema instance. Args: schema: The schema to be used if the BigQuery table to write has to be created. This is a dictionary object created in the WriteToBigQuery transform. Returns: table_schema: The schema to be used if the BigQuery table to write has to be created but in the bigquery.TableSchema format.
github-repos
def replace_code(code: str, replace_pattern: str) -> str: if len(replace_pattern) > 0: patterns = replace_pattern.replace('with', '').split(',') patterns = [_re_replace_pattern.search(p) for p in patterns] for pattern in patterns: if pattern is None: continue obj1, obj2, option = pattern.groups() code = re.sub(obj1, obj2, code) if option.strip() == 'all-casing': code = re.sub(obj1.lower(), obj2.lower(), code) code = re.sub(obj1.upper(), obj2.upper(), code) return code
Replace `code` by a pattern of the form `with X1->X2,Y1->Y2,Z1->Z2`. Args: code (`str`): The code to be modified. replace_pattern (`str`): The pattern used to modify `code`. Returns: `str`: The modified code.
github-repos
def _assert_sparse_compatible(sparse_tensors): checks = [] first = sparse_tensors[0] for t in sparse_tensors[1:]: checks.append(check_ops.assert_equal(first.dense_shape, t.dense_shape, message='Mismatched shapes!')) checks.append(check_ops.assert_equal(first.indices, t.indices, message='Mismatched indices!')) return checks
Check that all of `sparse_tensors` have same `indices` and `dense_shape`. Args: sparse_tensors: A list of sparse tensors. Returns: An op to be used as a control dependency.
github-repos
def print_summary(model, line_length=None, positions=None, print_fn=None): if print_fn is None: print_fn = print if model.__class__.__name__ == 'Sequential': sequential_like = True elif not model._is_graph_network: sequential_like = True else: sequential_like = True nodes_by_depth = model._nodes_by_depth.values() nodes = [] for v in nodes_by_depth: if len(v) > 1 or (len(v) == 1 and len(nest.flatten(v[0].keras_inputs)) > 1): sequential_like = False break nodes += v if sequential_like: for layer in model.layers: flag = False for node in layer._inbound_nodes: if node in nodes: if flag: sequential_like = False break else: flag = True if not sequential_like: break if sequential_like: line_length = line_length or 65 positions = positions or [0.45, 0.85, 1.0] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] to_display = ['Layer (type)', 'Output Shape', 'Param else: line_length = line_length or 98 positions = positions or [0.33, 0.55, 0.67, 1.0] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] to_display = ['Layer (type)', 'Output Shape', 'Param relevant_nodes = [] for v in model._nodes_by_depth.values(): relevant_nodes += v def print_row(fields, positions): line = '' for i in range(len(fields)): if i > 0: line = line[:-1] + ' ' line += str(fields[i]) line = line[:positions[i]] line += ' ' * (positions[i] - len(line)) print_fn(line) print_fn('Model: "{}"'.format(model.name)) print_fn('_' * line_length) print_row(to_display, positions) print_fn('=' * line_length) def print_layer_summary(layer): try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' except RuntimeError: output_shape = '?' name = layer.name cls_name = layer.__class__.__name__ if not layer.built and (not getattr(layer, '_is_graph_network', False)): params = '0 (unused)' else: params = layer.count_params() fields = [name + ' (' + cls_name + ')', output_shape, params] print_row(fields, positions) def print_layer_summary_with_connections(layer): try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' connections = [] for node in layer._inbound_nodes: if relevant_nodes and node not in relevant_nodes: continue for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound(): connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index, tensor_index)) name = layer.name cls_name = layer.__class__.__name__ if not connections: first_connection = '' else: first_connection = connections[0] fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection] print_row(fields, positions) if len(connections) > 1: for i in range(1, len(connections)): fields = ['', '', '', connections[i]] print_row(fields, positions) layers = model.layers for i in range(len(layers)): if sequential_like: print_layer_summary(layers[i]) else: print_layer_summary_with_connections(layers[i]) if i == len(layers) - 1: print_fn('=' * line_length) else: print_fn('_' * line_length) if hasattr(model, '_collected_trainable_weights'): trainable_count = count_params(model._collected_trainable_weights) else: trainable_count = count_params(model.trainable_weights) non_trainable_count = count_params(model.non_trainable_weights) print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count)) print_fn('Trainable params: {:,}'.format(trainable_count)) print_fn('Non-trainable params: {:,}'.format(non_trainable_count)) print_fn('_' * line_length)
Prints a summary of a model. Args: model: Keras model instance. line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to `[.33, .55, .67, 1.]`. print_fn: Print function to use. It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. It defaults to `print` (prints to stdout).
github-repos
def __init__(self, graph, fetches, feeds, feed_handles=None): with graph.as_default(): self._fetch_mapper = _FetchMapper.for_fetch(fetches) self._fetches = [] self._targets = [] self._feeds = feeds self._feed_handles = feed_handles or {} self._ops = [] self._fetch_handles = {} for fetch in self._fetch_mapper.unique_fetches(): if isinstance(fetch, ops.Operation): self._assert_fetchable(graph, fetch) self._targets.append(fetch) self._ops.append(True) else: self._assert_fetchable(graph, fetch.op) self._fetches.append(fetch) self._ops.append(False) if isinstance(fetch, tensor.Tensor) and (fetch.op.type == 'GetSessionHandle' or fetch.op.type == 'GetSessionHandleV2'): self._fetch_handles[fetch.ref()] = fetch.op.inputs[0].dtype self._final_fetches = [x for x in self._fetches if x.ref() not in feeds]
Creates a fetch handler. Args: graph: Graph of the fetches. Used to check for fetchability and to convert all fetches to tensors or ops as needed. fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple, or dict. feeds: A feed dict where keys are Tensors. feed_handles: A dict from feed Tensors to TensorHandle objects used as direct feeds.
github-repos
def group(self, group_type, name, **kwargs): group_obj = Group(group_type, name, **kwargs) return self._group(group_obj)
Add Group data to Batch object. Args: group_type (str): The ThreatConnect define Group type. name (str): The name for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Group.
juraj-google-style
def _get_elmt_amt_in_rxt(self, rxt): return sum([rxt.get_el_amount(e) for e in self.pd.elements])
Computes total number of atoms in a reaction formula for elements not in external reservoir. This method is used in the calculation of reaction energy per mol of reaction formula. Args: rxt (Reaction): a reaction. Returns: Total number of atoms for non_reservoir elements.
juraj-google-style
def query(self, connection, query, fetch=True): self.install_module(connection) statements = sqlparse.parse(sqlparse.format(query, strip_comments=True)) logger.debug('Finding and installing all partitions from query. \n query: {}'.format(query)) new_query = [] if len(statements) > 1: raise BadSQLError("Can only query a single statement") if len(statements) == 0: raise BadSQLError("DIdn't get any statements in '{}'".format(query)) statement = statements[0] logger.debug( 'Searching statement for partition ref.\n statement: {}'.format(statement.to_unicode())) logger.debug( 'Executing updated query after partition install.' '\n query before update: {}\n query to execute (updated query): {}' .format(statement, new_query)) return self._execute(connection, statement.to_unicode(), fetch=fetch)
Creates virtual tables for all partitions found in the query and executes query. Args: query (str): sql query fetch (bool): fetch result from database if True, do not fetch overwise.
juraj-google-style
def process_result_value(self, value, dialect): masks = list() if value: for e in enums.CryptographicUsageMask: if e.value & value: masks.append(e) return masks
Returns a new list of enums.CryptographicUsageMask Enums. This converts the integer value into the list of enums. Args: value(int): The integer value stored in the database that is used to create the list of enums.CryptographicUsageMask Enums. dialect(string): SQL dialect
juraj-google-style
def get_metrics(self, name=None): return self._get_elements(self.metrics, 'metrics', Metric, name=name)
Get metrics for this operator. Args: name(str, optional): Only return metrics matching `name`, where `name` can be a regular expression. If `name` is not supplied, then all metrics for this operator are returned. Returns: list(Metric): List of matching metrics. Retrieving a list of metrics whose name contains the string "temperatureSensor" could be performed as followed Example: >>> from streamsx import rest >>> sc = rest.StreamingAnalyticsConnection() >>> instances = sc.get_instances() >>> operator = instances[0].get_operators()[0] >>> metrics = op.get_metrics(name='*temperatureSensor*')
codesearchnet
def get_available_transcript_languages(video_id): available_languages = VideoTranscript.objects.filter( video__edx_video_id=video_id ).values_list( 'language_code', flat=True ) return list(available_languages)
Get available transcript languages Arguments: video_id(unicode): An id identifying the Video. Returns: A list containing transcript language codes for the Video.
juraj-google-style
def iter(self): page = 1 fetch_all = True url = '{}/{}'.format(__endpoint__, self.type.RESOURCE) if ('page' in self.params): page = self.params['page'] fetch_all = False response = RestClient.get(url, self.params)[self.type.RESOURCE] while len(response): for item in response: (yield self.type(item)) if (not fetch_all): break else: page += 1 self.where(page=page) response = RestClient.get(url, self.params)[self.type.RESOURCE]
Gets all resources, automating paging through data Returns: iterable of object: Iterable of resource objects
codesearchnet
def _ProcessAMCacheFileKey(self, am_entry, parser_mediator): amcache_datetime = am_entry.get_value_by_name( self._AMCACHE_DATETIME).get_data_as_integer() event_data = AmcacheEventData() event_data.full_path = am_entry.get_value_by_name( self._AMCACHE_FULL_PATH).get_data_as_string() event_data.sha1 = am_entry.get_value_by_name( self._AMCACHE_SHA1).get_data_as_string()[4:] productname = am_entry.get_value_by_name(self._AMCACHE_PRODUCTNAME) if productname: event_data.productname = productname.get_data_as_string() companyname = am_entry.get_value_by_name(self._AMCACHE_COMPANYNAME) if companyname: event_data.companyname = companyname.get_data_as_string() fileversion = am_entry.get_value_by_name(self._AMCACHE_FILEVERSION) if fileversion: event_data.fileversion = fileversion.get_data_as_string() languagecode = am_entry.get_value_by_name(self._AMCACHE_LANGUAGECODE) if languagecode: event_data.languagecode = languagecode.get_data_as_integer() filesize = am_entry.get_value_by_name(self._AMCACHE_FILESIZE) if filesize: event_data.filesize = filesize.get_data_as_integer() filedescription = am_entry.get_value_by_name(self._AMCACHE_FILEDESCRIPTION) if filedescription: event_data.filedescription = filedescription.get_data_as_string() linkerts = am_entry.get_value_by_name(self._AMCACHE_LINKERTS) if linkerts: event_data.linkerts = linkerts.get_data_as_integer() lastmodifiedts = am_entry.get_value_by_name(self._AMCACHE_LASTMODIFIEDTS) if lastmodifiedts: event_data.lastmodifiedts = lastmodifiedts.get_data_as_integer() createdts = am_entry.get_value_by_name(self._AMCACHE_CREATEDTS) if createdts: event_data.createdts = createdts.get_data_as_integer() programid = am_entry.get_value_by_name(self._AMCACHE_PROGRAMID) if programid: event_data.programid = programid.get_data_as_string() event = time_events.DateTimeValuesEvent( filetime.Filetime(amcache_datetime), definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) if event_data.createdts: event = time_events.DateTimeValuesEvent( filetime.Filetime(event_data.createdts), definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) if event_data.lastmodifiedts: event = time_events.DateTimeValuesEvent( filetime.Filetime(event_data.lastmodifiedts), definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) if event_data.linkerts: event = time_events.DateTimeValuesEvent( posix_time.PosixTime(event_data.linkerts), definitions.TIME_DESCRIPTION_CHANGE) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an Amcache Root/File key for events. Args: am_entry (pyregf.key): amcache File key. parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs.
juraj-google-style
def ask_stories(self, raw=False, limit=None): ask_stories = self._get_stories('askstories', limit) if raw: ask_stories = [story.raw for story in ask_stories] return ask_stories
Returns list of item ids of latest Ask HN stories Args: limit (int): specifies the number of stories to be returned. raw (bool): Flag to indicate whether to transform all objects into raw json. Returns: `list` object containing ids of Ask HN stories.
codesearchnet
def ListPlugins(logdir): plugins_dir = os.path.join(logdir, _PLUGINS_DIR) try: entries = tf.io.gfile.listdir(plugins_dir) except tf.errors.NotFoundError: return [] return [x.rstrip('/') for x in entries if x.endswith('/') or _IsDirectory(plugins_dir, x)]
List all the plugins that have registered assets in logdir. If the plugins_dir does not exist, it returns an empty list. This maintains compatibility with old directories that have no plugins written. Args: logdir: A directory that was created by a TensorFlow events writer. Returns: a list of plugin names, as strings
juraj-google-style
def get_descriptor_defaults(self, api_info, hostname=None): if self.__request: hostname = self.__request.reconstruct_hostname() protocol = self.__request.url_scheme else: hostname = (hostname or util.get_app_hostname() or api_info.hostname) protocol = 'http' if ((hostname and hostname.startswith('localhost')) or util.is_running_on_devserver()) else 'https' full_base_path = '{0}{1}/{2}/'.format(api_info.base_path, api_info.name, api_info.path_version) base_url = '{0}: root_url = '{0}: defaults = { 'kind': 'discovery 'discoveryVersion': 'v1', 'id': '{0}:{1}'.format(api_info.name, api_info.path_version), 'name': api_info.name, 'version': api_info.api_version, 'icons': { 'x16': 'https: 'x32': 'https: }, 'protocol': 'rest', 'servicePath': '{0}/{1}/'.format(api_info.name, api_info.path_version), 'batchPath': 'batch', 'basePath': full_base_path, 'rootUrl': root_url, 'baseUrl': base_url, 'description': 'This is an API', } if api_info.description: defaults['description'] = api_info.description if api_info.title: defaults['title'] = api_info.title if api_info.documentation: defaults['documentationLink'] = api_info.documentation if api_info.canonical_name: defaults['canonicalName'] = api_info.canonical_name return defaults
Gets a default configuration for a service. Args: api_info: _ApiInfo object for this service. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: A dictionary with the default configuration.
juraj-google-style
def synthesize(self, duration): sr = self.samplerate.samples_per_second seconds = (duration / Seconds(1)) samples = np.random.uniform(low=(- 1.0), high=1.0, size=int((sr * seconds))) return AudioSamples(samples, self.samplerate)
Synthesize white noise Args: duration (numpy.timedelta64): The duration of the synthesized sound
codesearchnet
def populations(): city_pops = {} fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv') with open(fname, 'rU') as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: city_pops[row[0]] = int(row[1]) return city_pops
Get a dictionary of Backpage city names mapped to their citizen populations. Returns: dictionary of Backpage city names mapped to their populations (integers)
codesearchnet
def unique_name(self, name, mark_as_used=True): scope_name = tf.get_variable_scope().name if scope_name: name = ((scope_name + '/') + name) name_key = name.lower() i = self._names_in_use.get(name_key, 0) if mark_as_used: self._names_in_use[name_key] = (i + 1) if (i > 0): base_name_key = name_key while (name_key in self._names_in_use): name_key = ('%s_%d' % (base_name_key, i)) i += 1 if mark_as_used: self._names_in_use[name_key] = 1 name = ('%s_%d' % (name, (i - 1))) return name
Like tf.Graph.unique_name, returns a unique operation name for `name`. Args: name: The name for an operation. mark_as_used: whether to mark this name as being used. Returns: A string to use as the name for the operation.
codesearchnet
def get_events(self, event_title, regex=False): regex_val = 0 if regex: regex_val = 1 r = requests.get('{0}/events/?api_key={1}&username={2}&c-title={3}&regex={4}'.format(self.url, self.api_key, self.username, event_title, regex_val), verify=self.verify) if (r.status_code == 200): json_obj = json.loads(r.text) return json_obj else: log.error('Non-200 status code from get_event: {}'.format(r.status_code)) return None
Search for events with the provided title Args: event_title: The title of the event Returns: An event JSON object returned from the server with the following: { "meta":{ "limit": 20, "next": null, "offset": 0, "previous": null, "total_count": 3 }, "objects": [{}, {}, etc] } or None if an error occurred.
codesearchnet
def load_requires_from_file(filepath): with open(filepath) as fp: return [pkg_name.strip() for pkg_name in fp.readlines()]
Read a package list from a given file path. Args: filepath: file path of the package list. Returns: a list of package names.
codesearchnet
def __init__(self, _channel, loop=None, executor=None, standalone_pool_for_streaming=False): self._channel = _channel if loop is None: loop = _asyncio.get_event_loop() self._loop = loop self._executor = executor self._standalone_pool = standalone_pool_for_streaming self._subscribe_map = {}
Constructor. Args: _channel: wrapped grpc.Channel loop: asyncio event loop executor: a thread pool, or None to use the default pool of the loop standalone_pool_for_streaming: create a new thread pool (with 1 thread) for each streaming method
juraj-google-style
def reward(self, action=None): reward = 0. if self._check_success(): reward = 1.0 if self.reward_shaping: cube_pos = self.sim.data.body_xpos[self.cube_body_id] gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id] dist = np.linalg.norm(gripper_site_pos - cube_pos) reaching_reward = 1 - np.tanh(10.0 * dist) reward += reaching_reward touch_left_finger = False touch_right_finger = False for i in range(self.sim.data.ncon): c = self.sim.data.contact[i] if c.geom1 in self.l_finger_geom_ids and c.geom2 == self.cube_geom_id: touch_left_finger = True if c.geom1 == self.cube_geom_id and c.geom2 in self.l_finger_geom_ids: touch_left_finger = True if c.geom1 in self.r_finger_geom_ids and c.geom2 == self.cube_geom_id: touch_right_finger = True if c.geom1 == self.cube_geom_id and c.geom2 in self.r_finger_geom_ids: touch_right_finger = True if touch_left_finger and touch_right_finger: reward += 0.25 return reward
Reward function for the task. The dense reward has three components. Reaching: in [0, 1], to encourage the arm to reach the cube Grasping: in {0, 0.25}, non-zero if arm is grasping the cube Lifting: in {0, 1}, non-zero if arm has lifted the cube The sparse reward only consists of the lifting component. Args: action (np array): unused for this task Returns: reward (float): the reward
juraj-google-style
def exists(self, filename): if is_package(filename): filepath = os.path.join(self.connection["mount_point"], "Packages", filename) else: filepath = os.path.join(self.connection["mount_point"], "Scripts", filename) return os.path.exists(filepath)
Report whether a file exists on the distribution point. Determines file type by extension. Args: filename: Filename you wish to check. (No path! e.g.: "AdobeFlashPlayer-14.0.0.176.pkg")
juraj-google-style
def parse(self, arguments): new_values = self._parse(arguments) if self.present: self.value.extend(new_values) else: self.value = new_values self.present += len(new_values)
Parses one or more arguments with the installed parser. Args: arguments: a single argument or a list of arguments (typically a list of default values); a single argument is converted internally into a list containing one item.
juraj-google-style
def xxd_output_to_bytes(input_cc_file): pattern = re.compile('\\W*(0x[0-9a-fA-F,x ]+).*') model_bytearray = bytearray() with open(input_cc_file) as file_handle: for line in file_handle: values_match = pattern.match(line) if values_match is None: continue list_text = values_match.group(1) values_text = filter(None, list_text.split(',')) values = [int(x, base=16) for x in values_text] model_bytearray.extend(values) return bytes(model_bytearray)
Converts xxd output C++ source file to bytes (immutable). Args: input_cc_file: Full path name to th C++ source file dumped by xxd Raises: RuntimeError: If input_cc_file path is invalid. IOError: If input_cc_file cannot be opened. Returns: A bytearray corresponding to the input cc file array.
github-repos
def __fetch_route53_zone_records(self, zone_id): route53 = self.session.client('route53') done = False nextName = nextType = None records = {} try: while (not done): if (nextName and nextType): response = route53.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=nextName, StartRecordType=nextType) else: response = route53.list_resource_record_sets(HostedZoneId=zone_id) if response['IsTruncated']: nextName = response['NextRecordName'] nextType = response['NextRecordType'] else: done = True if ('ResourceRecordSets' in response): for record in response['ResourceRecordSets']: record_id = self._get_resource_hash(zone_id, record) if ('AliasTarget' in record): value = record['AliasTarget']['DNSName'] records[record_id] = {'id': record_id, 'name': record['Name'].rstrip('.'), 'type': 'ALIAS', 'ttl': 0, 'value': [value]} else: value = [y['Value'] for y in record['ResourceRecords']] records[record_id] = {'id': record_id, 'name': record['Name'].rstrip('.'), 'type': record['Type'], 'ttl': record['TTL'], 'value': value} return list(records.values()) finally: del route53
Return all resource records for a specific Route53 zone Args: zone_id (`str`): Name / ID of the hosted zone Returns: `dict`
codesearchnet
def peek(self, iroute: "InstanceRoute") -> Optional[Value]: val = self.value sn = self.schema_node for sel in iroute: val, sn = sel.peek_step(val, sn) if val is None: return None return val
Return a value within the receiver's subtree. Args: iroute: Instance route (relative to the receiver).
juraj-google-style
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]]=None): logits = outputs.logits if target_sizes is not None: if len(logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') if is_torch_tensor(target_sizes): target_sizes = target_sizes.numpy() semantic_segmentation = [] for idx in range(len(logits)): resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = logits.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`MobileNetV2ForSemanticSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple]` of length `batch_size`, *optional*): List of tuples corresponding to the requested final size (height, width) of each prediction. If unset, predictions will not be resized. Returns: semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
github-repos
def fixity(self, response_format=None): if (not response_format): response_format = self.repo.default_serialization response = self.repo.api.http_request('GET', ('%s/fcr:fixity' % self.uri)) fixity_graph = self.repo.api.parse_rdf_payload(response.content, response.headers) for outcome in fixity_graph.objects(None, self.rdf.prefixes.premis.hasEventOutcome): if (outcome.toPython() == 'SUCCESS'): verdict = True else: verdict = False return {'verdict': verdict, 'premis_graph': fixity_graph}
Issues fixity check, return parsed graph Args: None Returns: (dict): ('verdict':(bool): verdict of fixity check, 'premis_graph':(rdflib.Graph): parsed PREMIS graph from check)
codesearchnet
def _add_value(self, field_name: str, value, provenance_path=None) -> bool: if not isinstance(value, list): value = [value] all_valid = True for x in value: valid = self._add_single_value(field_name, x, provenance_path=provenance_path) all_valid = all_valid and valid return all_valid
Helper function to add values to a knowledge graph Args: field_name: a field in the knowledge graph, assumed correct value: any Python type Returns: True if the value is compliant with the field schema, False otherwise
juraj-google-style
def _execute(self, command, params=None): if (not params): params = {} params['id'] = self._id return self._parent.execute(command, params)
Executes a command against the underlying HTML element. Args: command: The name of the command to _execute as a string. params: A dictionary of named parameters to send with the command. Returns: The command's JSON response loaded into a dictionary object.
codesearchnet
def read_var_bytes(self, max_size=sys.maxsize) -> bytes: length = self.read_var_int(max_size) return self.read_bytes(length)
Read a variable length of bytes from the stream. Args: max_size (int): (Optional) maximum number of bytes to read. Returns: bytes:
codesearchnet
def content_metadata_uploads(self, mirror=False): excludes_str = '' includes_cmds = [] cmd_base = self._get_upload_cmd(mirror=mirror) for content in self.s3props.get('content_metadata'): full_path = os.path.join(self.artifact_path, content['path']) if not os.listdir(full_path): raise S3ArtifactNotFound excludes_str += '--exclude "{}/*" '.format(content['path']) include_cmd = '{} --exclude "*", --include "{}/*"'.format(cmd_base, content['path']) include_cmd += ' --content-encoding {} --metadata-directive REPLACE'.format(content['content-encoding']) includes_cmds.append(include_cmd) exclude_cmd = '{} {}'.format(cmd_base, excludes_str) result = subprocess.run(exclude_cmd, check=True, shell=True, stdout=subprocess.PIPE) LOG.info("Uploaded files without metadata with command: %s", exclude_cmd) LOG.debug("Upload Command Output: %s", result.stdout) for include_cmd in includes_cmds: result = subprocess.run(include_cmd, check=True, shell=True, stdout=subprocess.PIPE) LOG.info("Uploaded files with metadata with command: %s", include_cmd) LOG.debug("Upload Command Output: %s", result.stdout) return True
Finds all specified encoded directories and uploads in multiple parts, setting metadata for objects. Args: mirror (bool): If true, uses a flat directory structure instead of nesting under a version. Returns: bool: True if uploaded
juraj-google-style
def _create_graph(structure_dict): graph = pydot.Dot() for node in structure_dict['nodes']: graph.add_node(pydot.Node(node)) for (node1, node2) in structure_dict['edges']: graph.add_edge(pydot.Edge(node1, node2)) return graph
Creates pydot graph from the pipeline structure dict. Args: structure_dict (dict): dict returned by step.upstream_structure Returns: graph (pydot.Dot): object representing upstream pipeline structure (with regard to the current Step).
codesearchnet
def loop(coord, timer_interval_secs, target, args=None, kwargs=None): looper = LooperThread(coord, timer_interval_secs, target=target, args=args, kwargs=kwargs) looper.start() return looper
Start a LooperThread that calls a function periodically. If `timer_interval_secs` is None the thread calls `target(args)` repeatedly. Otherwise `target(args)` is called every `timer_interval_secs` seconds. The thread terminates when a stop of the coordinator is requested. Args: coord: A Coordinator. timer_interval_secs: Number. Time boundaries at which to call `target`. target: A callable object. args: Optional arguments to pass to `target` when calling it. kwargs: Optional keyword arguments to pass to `target` when calling it. Returns: The started thread.
github-repos
def get(self, request, *args, **kwargs): context = self.get_context_data(**kwargs) context.update(self.extra_context) context['crumbs'] = self.get_crumbs() context['title'] = self.title context['suit'] = 'suit' in settings.INSTALLED_APPS if context.get('dashboard_grid', None) is None and self.grid: context['dashboard_grid'] = self.grid return self.render_to_response(context)
Django view get function. Add items of extra_context, crumbs and grid to context. Args: request (): Django's request object. *args (): request args. **kwargs (): request kwargs. Returns: response: render to response with context.
juraj-google-style
def add_extension_to_message(extension: message.Message, msg: message.Message) -> None: desc = msg.DESCRIPTOR fields_by_url = {get_inlined_extension_url(field): field for field in desc.fields if field.name != 'id'} id_field = desc.fields_by_name.get('id') if proto_utils.field_is_set(extension, id_field): proto_utils.set_value_at_field(msg, id_field, cast(Any, extension).id) if proto_utils.field_is_set(extension, 'value'): if len(fields_by_url) != 1: raise fhir_errors.InvalidFhirError(f'Expected a single field, found {len(fields_by_url)}; {desc.full_name} is an invalid extension type.') field = list(fields_by_url.items())[0][1] if proto_utils.field_is_repeated(field): raise fhir_errors.InvalidFhirError(f'Expected {field.full_name} to be a singular field. {desc.full_name} is an invalid extension type.') _add_extension_value_to_message(extension, msg, field) return child_extensions = proto_utils.get_value_at_field(extension, 'extension') for child_extension in child_extensions: field = fields_by_url.get(child_extension.url.value) if field is None: raise ValueError(f'Message of type: {desc.full_name} has no field with name: {child_extension.url.value}.') if proto_utils.field_is_set(child_extension, 'value'): _add_extension_value_to_message(child_extension, msg, field) continue if not proto_utils.field_is_repeated(field): if proto_utils.field_is_set(msg, field): raise ValueError(f'Field: {field.full_name} is already set on message: {desc.full_name}.') if proto_utils.field_content_length(child_extension, 'extension') > 1: raise ValueError(f'Cardinality mismatch between field: {field.full_name} and extension: {desc.full_name}.') child_message = proto_utils.set_in_parent_or_add(msg, field) add_extension_to_message(child_extension, child_message)
Recursively parses extension and adds to message. Args: extension: The FHIR extension to serialize and add. msg: The message to add the extension onto Raises: InvalidFhirError: In the event that a value is set on the extension, but the corresponding message field to copy it to is repeated (extension values are singular only).
github-repos
def _as_document(self, dataset): assert isinstance(dataset, Dataset) execute = object_session(dataset).connection().execute query = text() columns = u('\n').join( [u(' ').join(list(text_type(e) for e in t)) for t in execute(query, dataset_vid=str(dataset.identity.vid))]) doc = '\n'.join([u('{}').format(x) for x in [dataset.config.metadata.about.title, dataset.config.metadata.about.summary, dataset.identity.id_, dataset.identity.vid, dataset.identity.source, dataset.identity.name, dataset.identity.vname, columns]]) parts = u('{}').format(dataset.identity.source).split('.') sources = (['.'.join(g) for g in [parts[-i:] for i in range(2, len(parts) + 1)]] + ['.'.join(g) for g in [parts[:i] for i in range(0, len(parts))]]) def resum(g): try: return str(GVid.parse(g).summarize()) except (KeyError, ValueError): return g def as_list(value): if not value: return [] if isinstance(value, string_types): lst = [value] else: try: lst = list(value) except TypeError: lst = [value] return lst about_time = as_list(dataset.config.metadata.about.time) about_grain = as_list(dataset.config.metadata.about.grain) keywords = ( list(dataset.config.metadata.about.groups) + list(dataset.config.metadata.about.tags) + about_time + [resum(g) for g in about_grain] + sources) document = dict( vid=u('{}').format(dataset.identity.vid), title=u('{} {}').format(dataset.identity.name, dataset.config.metadata.about.title), doc=u('{}').format(doc), keywords=' '.join(u('{}').format(x) for x in keywords) ) return document
Converts dataset to document indexed by to FTS index. Args: dataset (orm.Dataset): dataset to convert. Returns: dict with structure matches to BaseDatasetIndex._schema.
juraj-google-style
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()): for i, ext in enumerate(exclude_exts): if not ext.strip().startswith("."): exclude_exts[i] = "." + ext.strip() paths = [] for fname in os.listdir(dirname): root, ext = os.path.splitext(fname) path = os.path.join(dirname, fname) if (ext in exclude_exts or fname in exclude_fnames or fname.startswith(".") or not os.path.isfile(path)): continue paths.append(path) pseudos = [] for path in paths: try: pseudo = self.parse(path) except: pseudo = None if pseudo is not None: pseudos.append(pseudo) self._parsed_paths.extend(path) else: self._wrong_paths.extend(path) return pseudos
Analyze the files contained in directory dirname. Args: dirname: directory path exclude_exts: list of file extensions that should be skipped. exclude_fnames: list of file names that should be skipped. Returns: List of pseudopotential objects.
juraj-google-style
def as_vartype(vartype): if isinstance(vartype, Vartype): return vartype try: if isinstance(vartype, str): vartype = Vartype[vartype] elif isinstance(vartype, frozenset): vartype = Vartype(vartype) else: vartype = Vartype(frozenset(vartype)) except (ValueError, KeyError): raise TypeError(("expected input vartype to be one of: " "Vartype.SPIN, 'SPIN', {-1, 1}, " "Vartype.BINARY, 'BINARY', or {0, 1}.")) return vartype
Cast various inputs to a valid vartype object. Args: vartype (:class:`.Vartype`/str/set): Variable type. Accepted input values: * :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}`` * :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}`` Returns: :class:`.Vartype`: Either :class:`.Vartype.SPIN` or :class:`.Vartype.BINARY`. See also: :func:`~dimod.decorators.vartype_argument`
juraj-google-style
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab']) with open(out_vocab_file, 'w') as file: file.write(json.dumps(self.encoder)) return (out_vocab_file,)
Saves the tokenizer's vocabulary dictionary to the provided save_directory. Args: save_directory (`str`): A path to the directory where to saved. It will be created if it doesn't exist. filename_prefix (`Optional[str]`, *optional*): A prefix to add to the names of the files saved by the tokenizer.
github-repos
def _as_variant_tensor(self): raise NotImplementedError(f'{type(self)}.as_variant_tensor()')
Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset. Returns: A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.
github-repos
def prepare_image_question_encoder(image_feat, question, hparams): encoder_input = tf.concat([image_feat, question], axis=1) encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) encoder_self_attention_bias = ignore_padding encoder_decoder_attention_bias = ignore_padding if hparams.pos == "timing": question = common_attention.add_timing_signal_1d(question) elif hparams.pos == "emb": question = common_attention.add_positional_embedding( question, hparams.max_length, "inputs_positional_embedding", None) encoder_input = tf.concat([image_feat, question], axis=1) return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias)
Prepare encoder. Args: image_feat: a Tensor. question: a Tensor. hparams: run hyperparameters Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention
juraj-google-style
def load_region(adapter, case_id, hgnc_id=None, chrom=None, start=None, end=None): if hgnc_id: gene_obj = adapter.hgnc_gene(hgnc_id) if not gene_obj: ValueError("Gene {} does not exist in database".format(hgnc_id)) chrom = gene_obj['chromosome'] start = gene_obj['start'] end = gene_obj['end'] case_obj = adapter.case(case_id=case_id) if not case_obj: raise ValueError("Case {} does not exist in database".format(case_id)) log.info("Load clinical SNV variants for case: {0} region: chr {1}, start" " {2}, end {3}".format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='snv', chrom=chrom, start=start, end=end) vcf_sv_file = case_obj['vcf_files'].get('vcf_sv') if vcf_sv_file: log.info("Load clinical SV variants for case: {0} region: chr {1}, " "start {2}, end {3}".format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='sv', chrom=chrom, start=start, end=end) vcf_str_file = case_obj['vcf_files'].get('vcf_str') if vcf_str_file: log.info("Load clinical STR variants for case: {0} region: chr {1}, " "start {2}, end {3}".format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='str', chrom=chrom, start=start, end=end) if case_obj['is_research']: log.info("Load research SNV variants for case: {0} region: chr {1}, " "start {2}, end {3}".format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='research', category='snv', chrom=chrom, start=start, end=end) vcf_sv_research = case_obj['vcf_files'].get('vcf_sv_research') if vcf_sv_research: log.info("Load research SV variants for case: {0} region: chr {1}," " start {2}, end {3}".format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='research', category='sv', chrom=chrom, start=start, end=end)
Load all variants in a region defined by a HGNC id Args: adapter (MongoAdapter) case_id (str): Case id hgnc_id (int): If all variants from a gene should be uploaded chrom (str): If variants from coordinates should be uploaded start (int): Start position for region end (int): Stop position for region
juraj-google-style
def _StopAnalysisProcesses(self, abort=False): logger.debug('Stopping analysis processes.') self._StopMonitoringProcesses() if abort: self._AbortTerminate() if not self._use_zeromq: logger.debug('Emptying queues.') for event_queue in self._event_queues.values(): event_queue.Empty() for event_queue in self._event_queues.values(): event_queue.PushItem(plaso_queue.QueueAbort(), block=False) self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) for event_queue in self._event_queues.values(): event_queue.Close(abort=abort) if abort: self._AbortKill() else: self._AbortTerminate() self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) for event_queue in self._event_queues.values(): event_queue.Close(abort=True)
Stops the analysis processes. Args: abort (bool): True to indicated the stop is issued on abort.
juraj-google-style
def get(self, uri): uri = self.URI + uri return self._client.get(uri)
Gets an index resource by URI. Args: uri: The resource URI. Returns: dict: The index resource.
juraj-google-style
def __remove_alias(type_): if isinstance(type_, cpptypes.declarated_t) and \ isinstance(type_.declaration, typedef.typedef_t): return __remove_alias(type_.declaration.decl_type) if isinstance(type_, cpptypes.compound_t): type_.base = __remove_alias(type_.base) return type_ return type_
Implementation detail. Args: type_ (type_t): type Returns: type_t: the type associated to the inputted type
juraj-google-style
def get_fieldset_index(fieldsets, index_or_name): if isinstance(index_or_name, six.integer_types): return index_or_name for key, value in enumerate(fieldsets): if value[0] == index_or_name: return key raise KeyError("Key not found: '{}'.".format(index_or_name))
Return the index of a fieldset in the ``fieldsets`` list. Args: fieldsets (list): The original ``fieldsets`` list. index_or_name (int or str): The value of the reference element, or directly its numeric index. Returns: (int) The index of the fieldset in the ``fieldsets`` list.
juraj-google-style
def PushEvent(self, event): event_string = event.GetAttributeValuesString() heap_values = (event.timestamp, event.timestamp_desc, event_string, event) heapq.heappush(self._heap, heap_values)
Pushes an event onto the heap. Args: event (EventObject): event.
juraj-google-style
def execute(self, sensor_graph, scope_stack): parent = scope_stack[-1] alloc = parent.allocator output = alloc.allocate_stream(DataStream.UnbufferedType, attach=True) trigger_stream, trigger_cond = parent.trigger_chain() streamer_const = alloc.allocate_stream(DataStream.ConstantType, attach=True) sensor_graph.add_node(u"({} {} && {} always) => {} using trigger_streamer".format(trigger_stream, trigger_cond, streamer_const, output)) sensor_graph.add_constant(streamer_const, self.index)
Execute this statement on the sensor_graph given the current scope tree. This adds a single node to the sensor graph with the trigger_streamer function as is processing function. Args: sensor_graph (SensorGraph): The sensor graph that we are building or modifying scope_stack (list(Scope)): A stack of nested scopes that may influence how this statement allocates clocks or other stream resources.
juraj-google-style
def params_size(num_components, component_params_size, name=None): with tf.compat.v1.name_scope(name, 'MixtureSameFamily_params_size', [num_components, component_params_size]): num_components = tf.convert_to_tensor(value=num_components, name='num_components', dtype_hint=tf.int32) component_params_size = tf.convert_to_tensor(value=component_params_size, name='component_params_size') num_components = dist_util.prefer_static_value(num_components) component_params_size = dist_util.prefer_static_value(component_params_size) return (num_components + (num_components * component_params_size))
Number of `params` needed to create a `MixtureSameFamily` distribution. Arguments: num_components: Number of component distributions in the mixture distribution. component_params_size: Number of parameters needed to create a single component distribution. name: The name to use for the op to compute the number of parameters (if such an op needs to be created). Returns: params_size: The number of parameters needed to create the mixture distribution.
codesearchnet
def arctan(self: EventSetOrNode) -> EventSetOrNode: from temporian.core.operators.unary import arctan return arctan(self)
Calculates the inverse tangent of an [`EventSet`][temporian.EventSet]'s features. Can only be used on floating point features. Example: ```python >>> a = tp.event_set( ... timestamps=[1, 2, 3, 4], ... features={"M": [0, 1.0, -1.0, 5.0]}, ... ) >>> a.arctan() indexes: ... timestamps: [1. 2. 3. 4.] 'M': [ 0. 0.7854 -0.7854 1.3734] ... ``` Returns: EventSetOrNode with inverse tangent of input features.
github-repos
def update_email_asset(self, asset_id, name, asset_type): self.update_asset('EMAIL', asset_id, name, asset_type)
Updates a Email Asset Args: name: The name provided to the email asset asset_type: The type provided to the email asset asset_id: Returns:
juraj-google-style
def add_handler(self, handler): handler['logger'] = self._get_logger(handler) handler['reads'] = 0 handler['data_read'] = 0 self.capture_handlers.append(handler)
Add an additional handler Args: handler: A dictionary of handler configuration for the handler that should be added. See :func:`__init__` for details on valid parameters.
juraj-google-style
def _RunIpRoute(self, args=None, options=None): args = args or [] options = options or {} command = ['ip', 'route'] command.extend(args) for item in options.items(): command.extend(item) try: process = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() except OSError as e: self.logger.warning('Exception running %s. %s.', command, str(e)) else: if process.returncode: message = 'Non-zero exit status running %s. %s.' self.logger.warning(message, command, stderr.strip()) else: return stdout.decode('utf-8', 'replace') return ''
Run a command with ip route and return the response. Args: args: list, the string ip route command args to execute. options: dict, the string parameters to append to the ip route command. Returns: string, the standard output from the ip route command execution.
juraj-google-style
def backward_propagation(parameters, cache, X, Y): m = X.shape[1] W1 = parameters["W1"] W2 = parameters["W2"] A1 = cache["A1"] A2 = cache["A2"] dZ2 = A2 - Y dW2 = 1.0 / m * np.dot(dZ2, A1.T) db2 = 1.0 / m * np.sum(dZ2, axis=1, keepdims=True) dZ1 = W2.T * dZ2 * (1 - np.power(A1, 2)) dW1 = 1.0 / m * np.dot(dZ1, X.T) db1 = 1.0 / m * np.sum(dZ1, axis=1, keepdims=True) grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2} return grads
Implement the backward propagation using the instructions above. Arguments: parameters -- python dictionary containing our parameters cache -- a dictionary containing "Z1", "A1", "Z2" and "A2". X -- input data of shape (2, number of examples) Y -- "true" labels vector of shape (1, number of examples) Returns: grads -- python dictionary containing your gradients with respect to different parameters
juraj-google-style
def cmd_list(options): (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str))
Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser.
juraj-google-style
def numbafy(fn, args, compiler='jit', **nbkws): kwargs = {} if (not isinstance(args, (tuple, list))): args = (args,) if isinstance(compiler, six.string_types): compiler_ = getattr(nb, compiler, None) if (compiler is None): raise AttributeError('No numba function with name {}.'.format(compiler)) compiler = compiler_ if (compiler in (nb.jit, nb.njit, nb.autojit)): kwargs.update(jitkwargs) sig = nbkws.pop('signature', None) else: kwargs.update(veckwargs) sig = nbkws.pop('signatures', None) if (sig is None): warn("Vectorization without 'signatures' can lead to wrong results!") kwargs.update(nbkws) if isinstance(fn, sy.Expr): fn = sy.expand_func(fn) func = sy.lambdify(args, fn, modules='numpy') if (sig is None): try: func = compiler(**kwargs)(func) except RuntimeError: kwargs['cache'] = False func = compiler(**kwargs)(func) else: try: func = compiler(sig, **kwargs)(func) except RuntimeError: kwargs['cache'] = False func = compiler(sig, **kwargs)(func) return func
Compile a string, sympy expression or symengine expression using numba. Not all functions are supported by Python's numerical package (numpy). For difficult cases, valid Python code (as string) may be more suitable than symbolic expressions coming from sympy, symengine, etc. When compiling vectorized functions, include valid signatures (see `numba`_ documentation). Args: fn: Symbolic expression as sympy/symengine expression or string args (iterable): Symbolic arguments compiler: String name or callable numba compiler nbkws: Compiler keyword arguments (if none provided, smart defaults are used) Returns: func: Compiled function Warning: For vectorized functions, valid signatures are (almost always) required.
codesearchnet