code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList'
) -> 'QubitOrder':
if isinstance(val, collections.Iterable):
return QubitOrder.explicit(val)
if isinstance(val, QubitOrder):
return val
raise ValueError(
"Don't know how to interpret <{}> as a Basis.".format(val))
|
Converts a value into a basis.
Args:
val: An iterable or a basis.
Returns:
The basis implied by the value.
|
juraj-google-style
|
def get_all_prefixes(module_name):
parts = module_name.split('.')
name = parts[0]
out = [name]
for part in parts[1:]:
name = '.'.join([name, part])
out.append(name)
return out
|
Return all the prefixes of a module name.
e.g. x.y.z => x, x.y, x.y.z
Args:
module_name: module name
Returns:
List of prefixes
|
github-repos
|
def run_multiple_processes(args_list: List[List[str]],
die_on_failure: bool = True) -> None:
for procargs in args_list:
start_process(procargs)
wait_for_processes(die_on_failure=die_on_failure)
|
Fire up multiple processes, and wait for them to finihs.
Args:
args_list: command arguments for each process
die_on_failure: see :func:`wait_for_processes`
|
juraj-google-style
|
def _log_band_edge_information(bs, edge_data):
if bs.is_spin_polarized:
spins = edge_data['band_index'].keys()
b_indices = [(', '.join([str((i + 1)) for i in edge_data['band_index'][spin]]) + '({})'.format(spin.name.capitalize())) for spin in spins]
b_indices = ', '.join(b_indices)
else:
b_indices = ', '.join([str((i + 1)) for i in edge_data['band_index'][Spin.up]])
kpoint = edge_data['kpoint']
kpoint_str = kpt_str.format(k=kpoint.frac_coords)
k_indices = ', '.join(map(str, edge_data['kpoint_index']))
if kpoint.label:
k_loc = kpoint.label
else:
branch = bs.get_branch(edge_data['kpoint_index'][0])[0]
k_loc = 'between {}'.format(branch['name'])
logging.info(' Energy: {:.3f} eV'.format(edge_data['energy']))
logging.info(' k-point: {}'.format(kpoint_str))
logging.info(' k-point location: {}'.format(k_loc))
logging.info(' k-point indices: {}'.format(k_indices))
logging.info(' Band indices: {}'.format(b_indices))
|
Log data about the valence band maximum or conduction band minimum.
Args:
bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
The band structure.
edge_data (dict): The :obj:`dict` from ``bs.get_vbm()`` or
``bs.get_cbm()``
|
codesearchnet
|
def db_ws010c(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `db_ws010c`'.format(value))
self._db_ws010c = value
|
Corresponds to IDD Field `db_ws010c`
Mean coincident dry-bulb temperature to wind speed corresponding to 1.0% cumulative frequency for coldest month
Args:
value (float): value for IDD Field `db_ws010c`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def _VerifyValues(self, tensor_in_sizes, glimpse_sizes, offsets, expected_rows, expected_cols):
rows = tensor_in_sizes[0]
cols = tensor_in_sizes[1]
t_rows = array_ops.tile([[1.0 * r] for r in range(1, rows + 1)], [1, cols], name='tile_rows')
t_rows_4d = array_ops.transpose(array_ops.expand_dims(array_ops.expand_dims(t_rows, 0), 3), [0, 2, 1, 3])
t_cols = array_ops.tile([[1.0 * r for r in range(1, cols + 1)]], [rows, 1], name='tile_cols')
t_cols_4d = array_ops.transpose(array_ops.expand_dims(array_ops.expand_dims(t_cols, 0), 3), [0, 2, 1, 3])
t1 = constant_op.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
t2 = constant_op.constant([offsets[1], offsets[0]], shape=[1, 2])
glimpse_rows = array_ops.transpose(image_ops.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3])
glimpse_cols = array_ops.transpose(image_ops.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3])
with self.cached_session() as sess:
value_rows, value_cols = self.evaluate([glimpse_rows, glimpse_cols])
self.assertEqual(value_rows.shape[1], glimpse_sizes[0])
self.assertEqual(value_rows.shape[2], glimpse_sizes[1])
self.assertEqual(value_cols.shape[1], glimpse_sizes[0])
self.assertEqual(value_cols.shape[2], glimpse_sizes[1])
min_random_val = 0
max_random_val = max(rows, cols)
for i in range(glimpse_sizes[0]):
for j in range(glimpse_sizes[1]):
if expected_rows[i] is None or expected_cols[j] is None:
self.assertGreaterEqual(value_rows[0][i][j][0], min_random_val)
self.assertLessEqual(value_rows[0][i][j][0], max_random_val)
self.assertGreaterEqual(value_cols[0][i][j][0], min_random_val)
self.assertLessEqual(value_cols[0][i][j][0], max_random_val)
else:
self.assertEqual(value_rows[0][i][j][0], expected_rows[i])
self.assertEqual(value_cols[0][i][j][0], expected_cols[j])
|
Verifies the output values of the glimpse extraction kernel.
Args:
tensor_in_sizes: Input tensor dimensions in [input_rows, input_cols].
glimpse_sizes: Dimensions of the glimpse in [glimpse_rows, glimpse_cols].
offsets: Relative location of the center of the glimpse in the input
image expressed as [row_offset, col_offset].
expected_rows: A list containing the expected row numbers (None for
out of bound entries that are expected to be replaced by uniform
random entries in [0,1) ).
expected_cols: Same as expected_rows, but for column numbers.
|
github-repos
|
def ParseAutofillRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = ChromeAutofillEventData()
event_data.field_name = self._GetRowValue(query_hash, row, 'name')
event_data.value = self._GetRowValue(query_hash, row, 'value')
event_data.usage_count = self._GetRowValue(query_hash, row, 'count')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'date_created')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if (event_data.usage_count > 1):
timestamp = self._GetRowValue(query_hash, row, 'date_last_used')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_USED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses an autofill entry row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
codesearchnet
|
def load_subset_weights_from_hdf5_group(f):
weight_names = load_attributes_from_hdf5_group(f, 'weight_names')
return [np.asarray(f[weight_name]) for weight_name in weight_names]
|
Load layer weights of a model from hdf5.
Args:
f: A pointer to a HDF5 group.
Returns:
List of NumPy arrays of the weight values.
Raises:
ValueError: in case of mismatch between provided model
and weights file.
|
github-repos
|
def get_storage_pools(self, id_or_uri):
uri = self._client.build_uri(id_or_uri) + "/storage-pools"
return self._client.get(uri)
|
Gets a list of Storage pools. Returns a list of storage pools belonging to the storage system referred by the
Path property {ID} parameter or URI.
Args:
id_or_uri: Can be either the storage system ID (serial number) or the storage system URI.
Returns:
dict: Host types.
|
juraj-google-style
|
def update(self, token_id: int):
raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
|
Reads in a token and returns booleans that indicate the progress made by it. This function will update the
state of this object unlikes `does_advance(self, token_id: int)`.
This isn't to test whether a certain token will advance the progress; it's to update its state as if it has
been generated. This becomes important if token_id != desired token (refer to else statement in
PhrasalConstraint)
Args:
token_id(`int`):
The id of a newly generated token in the beam search.
Return:
stepped(`bool`):
Whether this constraint has become one step closer to being fulfuilled.
completed(`bool`):
Whether this constraint has been completely fulfilled by this token being generated.
reset (`bool`):
Whether this constraint has reset its progress by this token being generated.
|
github-repos
|
def update_shapes_dict_for_target_fn(target_fn, shapes_dict, call_spec, class_name):
if utils.is_default(target_fn):
return None
sig = inspect.signature(target_fn)
expected_names = []
for name, param in sig.parameters.items():
if param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY, param.KEYWORD_ONLY):
expected_names.append(name)
if len(expected_names) == 1:
key = expected_names[0]
values = tuple(shapes_dict.values())
if values:
input_shape = values[0]
else:
input_shape = None
return {key: input_shape}
kwargs = {}
for name in expected_names:
method_name = target_fn.__name__
error_preamble = f'For a `{method_name}()` method with more than one argument, all arguments should have a `_shape` suffix and match an argument from `call()`. E.g. `{method_name}(self, foo_shape, bar_shape)` '
if not name.endswith('_shape'):
raise ValueError(f"{error_preamble} For layer '{class_name}', Received `{method_name}()` argument `{name}`, which does not end in `_shape`.")
expected_call_arg = utils.removesuffix(name, '_shape')
if expected_call_arg not in call_spec.arguments_dict:
raise ValueError(f"{error_preamble} For layer '{class_name}', received `{method_name}()` argument `{name}`, but `call()` does not have argument `{expected_call_arg}`.")
if name in shapes_dict:
kwargs[name] = shapes_dict[name]
return kwargs
|
Updates a `shapes_dict` for `build()` or `compute_output_shape()`.
This function will align a dictionary of the shapes of all tensor
passed to `call`, with the signatures of `build()` or
`compute_output_shape()`.
The alignment is a follows:
- If `build()` or `compute_output_shape()` accept only one argument,
forward the shape of the first positional argument from call without
checking any argument names.
- If `build()` or `compute_output_shape()` accept multiple arguments,
enforce that all argument names match a call argument name, e.g.
`foo_shape` would match call argument `foo`.
Returns:
An updated `shapes_dict` that can be used to invoke
`target_fn(**shapes_dict)`.
|
github-repos
|
def is_cpu(self):
return (self._device.get_info(cl.device_info.TYPE) == cl.device_type.CPU)
|
Check if the device associated with this environment is a CPU.
Returns:
boolean: True if the device is an CPU, false otherwise.
|
codesearchnet
|
def save_issue_data_task(self, issue, task_id, namespace='open'):
issue_data = self.get_saved_issue_data(issue, namespace)
if not issue_data.has_key('tasks'):
issue_data['tasks'] = [task_id]
elif task_id not in issue_data['tasks']:
issue_data['tasks'].append(task_id)
|
Saves a issue data (tasks, etc.) to local data.
Args:
issue:
`int`. Github issue number.
task:
`int`. Asana task ID.
namespace:
`str`. Namespace for storing this issue.
|
juraj-google-style
|
def _decode(obj):
if obj is None:
return u''
if six.PY3 and isinstance(obj, six.binary_type):
return obj.decode('latin1')
elif six.PY3:
return str(obj)
elif isinstance(obj, six.text_type):
return obj
else:
return str(obj).decode('utf-8')
|
Decode an object to unicode.
Args:
obj (bytes or str or unicode or anything serializable): object to be decoded
Returns:
object decoded in unicode.
|
juraj-google-style
|
def __init__(self, **connection_args):
super(DistributionServer, self).__init__(**connection_args)
self.connection["url"] = self.connection["jss"].base_url
|
Set up a connection to a distribution server.
Args:
connection_args: Dict, with required key:
jss: A JSS Object.
|
juraj-google-style
|
def _read_coord_h5(files, shapes, header, twod):
meshes = []
for h5file, shape in zip(files, shapes):
meshes.append({})
with h5py.File(h5file, 'r') as h5f:
for coord, mesh in h5f.items():
meshes[-1][coord] = mesh[()].reshape(shape).T
meshes[-1][coord] = _make_3d(meshes[-1][coord], twod)
header['ncs'] = _ncores(meshes, twod)
header['nts'] = list((meshes[0]['X'].shape[i] - 1) * header['ncs'][i]
for i in range(3))
header['nts'] = np.array([max(1, val) for val in header['nts']])
meshes = _conglomerate_meshes(meshes, header)
if np.any(meshes['Z'][:, :, 0] != 0):
header['x_mesh'] = np.copy(meshes['Y'])
header['y_mesh'] = np.copy(meshes['Z'])
header['z_mesh'] = np.copy(meshes['X'])
header['r_mesh'] = np.sqrt(header['x_mesh']**2 + header['y_mesh']**2 +
header['z_mesh']**2)
header['t_mesh'] = np.arccos(header['z_mesh'] / header['r_mesh'])
header['p_mesh'] = np.roll(
np.arctan2(header['y_mesh'], -header['x_mesh']) + np.pi, -1, 1)
header['e1_coord'] = header['t_mesh'][:, 0, 0]
header['e2_coord'] = header['p_mesh'][0, :, 0]
header['e3_coord'] = header['r_mesh'][0, 0, :]
else:
header['e1_coord'] = meshes['X'][:, 0, 0]
header['e2_coord'] = meshes['Y'][0, :, 0]
header['e3_coord'] = meshes['Z'][0, 0, :]
header['aspect'] = (header['e1_coord'][-1] - header['e2_coord'][0],
header['e1_coord'][-1] - header['e2_coord'][0])
header['rcmb'] = header['e3_coord'][0]
if header['rcmb'] == 0:
header['rcmb'] = -1
else:
header['e3_coord'] = header['e3_coord'] - header['rcmb']
if twod is None or 'X' in twod:
header['e1_coord'] = header['e1_coord'][:-1]
if twod is None or 'Y' in twod:
header['e2_coord'] = header['e2_coord'][:-1]
header['e3_coord'] = header['e3_coord'][:-1]
|
Read all coord hdf5 files of a snapshot.
Args:
files (list of pathlib.Path): list of NodeCoordinates files of
a snapshot.
shapes (list of (int,int)): shape of mesh grids.
header (dict): geometry info.
twod (str): 'XZ', 'YZ' or None depending on what is relevant.
|
juraj-google-style
|
def hpo_terms(store, query=None, limit=None):
hpo_phenotypes = {}
if limit:
limit = int(limit)
hpo_phenotypes['phenotypes'] = list(store.hpo_terms(text=query, limit=limit))
return hpo_phenotypes
|
Retrieves a list of HPO terms from scout database
Args:
store (obj): an adapter to the scout database
query (str): the term to search in the database
limit (str): the number of desired results
Returns:
hpo_phenotypes (dict): the complete list of HPO objects stored in scout
|
codesearchnet
|
def pretty_print(input_word, anagrams, by_length=False):
scores = {}
if by_length:
noun = "tiles"
for word, score in anagrams:
try:
scores[len(word)].append("{0} ({1:d})".format(word, score))
except KeyError:
scores[len(word)] = ["{0} ({1:d})".format(word, score)]
else:
noun = "points"
for word, score in anagrams:
try:
scores[score].append(word)
except KeyError:
scores[score] = [word]
print("Anagrams for {0}{1}:".format(input_word, " (score)" * by_length))
if not valid_scrabble_word(input_word):
print("{0} is not possible in Scrabble.".format(input_word))
for key, value in sorted(scores.items(), reverse=True):
print("{0:d} {1}: {2}".format(key, noun, ", ".join(value)))
|
Prints the anagram results sorted by score to stdout.
Args:
input_word: the base word we searched on
anagrams: generator of (word, score) from anagrams_in_word
by_length: a boolean to declare printing by length instead of score
|
juraj-google-style
|
def default(self, obj):
from ..model import Model
from ..colors import Color
from .has_props import HasProps
if pd and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj, force_list=True)
elif isinstance(obj, np.ndarray):
return transform_array(obj, force_list=True)
elif isinstance(obj, collections.deque):
return list(map(self.default, obj))
elif isinstance(obj, Model):
return obj.ref
elif isinstance(obj, HasProps):
return obj.properties_with_values(include_defaults=False)
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
|
The required ``default`` method for ``JSONEncoder`` subclasses.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
|
juraj-google-style
|
def dump_orm_object_as_insert_sql(engine: Engine,
obj: object,
fileobj: TextIO) -> None:
insp = inspect(obj)
meta = MetaData(bind=engine)
table_name = insp.mapper.mapped_table.name
table = Table(table_name, meta, autoload=True)
query = select(table.columns)
for orm_pkcol in insp.mapper.primary_key:
core_pkcol = table.columns.get(orm_pkcol.name)
pkval = getattr(obj, orm_pkcol.name)
query = query.where(core_pkcol == pkval)
cursor = engine.execute(query)
row = cursor.fetchone()
row_dict = dict(row)
statement = table.insert(values=row_dict)
insert_str = get_literal_query(statement, bind=engine)
writeline_nl(fileobj, insert_str)
|
Takes a SQLAlchemy ORM object, and writes ``INSERT`` SQL to replicate it
to the output file-like object.
Args:
engine: SQLAlchemy :class:`Engine`
obj: SQLAlchemy ORM object to write
fileobj: file-like object to write to
|
juraj-google-style
|
def get_iterator_type(script_settings, subscripts={}):
if 'iterator_type' in script_settings:
if script_settings['iterator_type'] == 'Loop':
iterator_type = 'loop'
elif script_settings['iterator_type'] == 'Parameter Sweep':
iterator_type = 'sweep'
else:
raise TypeError('unknown iterator type')
else:
if 'sweep_param' in script_settings:
iterator_type = 'sweep'
elif 'num_loops' in script_settings:
iterator_type = 'loop'
else:
raise TypeError('unknown iterator type')
return iterator_type
|
figures out the iterator type based on the script settings and (optionally) subscripts
Args:
script_settings: iterator_type
subscripts: subscripts
Returns:
|
juraj-google-style
|
def tf_retrieve_indices(self, indices):
states = dict()
for name in sorted(self.states_memory):
states[name] = tf.gather(params=self.states_memory[name], indices=indices)
internals = dict()
for name in sorted(self.internals_memory):
internals[name] = tf.gather(params=self.internals_memory[name], indices=indices)
actions = dict()
for name in sorted(self.actions_memory):
actions[name] = tf.gather(params=self.actions_memory[name], indices=indices)
terminal = tf.gather(params=self.terminal_memory, indices=indices)
reward = tf.gather(params=self.reward_memory, indices=indices)
if self.include_next_states:
assert util.rank(indices) == 1
next_indices = (indices + 1) % self.capacity
next_states = dict()
for name in sorted(self.states_memory):
next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices)
next_internals = dict()
for name in sorted(self.internals_memory):
next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices)
return dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
else:
return dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
|
Fetches experiences for given indices.
Args:
indices: Index tensor
Returns: Batch of experiences
|
juraj-google-style
|
def _format_field_name(self, field_name) -> str:
field = self._get_model_field(field_name)
return self.qn(field.column)
|
Formats a field's name for usage in SQL.
Arguments:
field_name:
The field name to format.
Returns:
The specified field name formatted for
usage in SQL.
|
codesearchnet
|
def list_bucket(self, bucket):
self.response.write('Listbucket result:\n')
page_size = 1
stats = gcs.listbucket((bucket + '/foo'), max_keys=page_size)
while True:
count = 0
for stat in stats:
count += 1
self.response.write(repr(stat))
self.response.write('\n')
if ((count != page_size) or (count == 0)):
break
stats = gcs.listbucket((bucket + '/foo'), max_keys=page_size, marker=stat.filename)
|
Create several files and paginate through them.
Production apps should set page_size to a practical value.
Args:
bucket: bucket.
|
codesearchnet
|
def readCmd(cls, cmd):
args = shlex.split(cmd)
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(proc_stdout, proc_stderr) = proc.communicate(input=None)
return proc_stdout.decode()
|
run command and return the str format stdout
Args:
cmd: string
Returns:
str: what the command's echo
|
juraj-google-style
|
def __init__(
self, knowledge_base, formatter_mediator, fields_filter=None,
preferred_encoding='utf-8'):
super(OutputMediator, self).__init__()
self._formatter_mediator = formatter_mediator
self._knowledge_base = knowledge_base
self._preferred_encoding = preferred_encoding
self._timezone = pytz.UTC
self.fields_filter = fields_filter
|
Initializes an output mediator.
Args:
knowledge_base (KnowledgeBase): knowledge base.
formatter_mediator (FormatterMediator): formatter mediator.
fields_filter (Optional[FilterObject]): filter object that indicates
which fields to output.
preferred_encoding (Optional[str]): preferred encoding to output.
|
juraj-google-style
|
def toregex(text, exact=False):
if isregex(text):
return text
escaped = re.escape(normalize_text(text))
if exact:
escaped = '\\A{}\\Z'.format(escaped)
return re.compile(escaped)
|
Returns a compiled regular expression for the given text.
Args:
text (str | RegexObject): The text to match.
exact (bool, optional): Whether the generated regular expression should match exact
strings. Defaults to False.
Returns:
RegexObject: A compiled regular expression that will match the text.
|
codesearchnet
|
def concat_excel_reports(addresses, output_file_name, endpoint, report_type, retry, api_key, api_secret, files_path):
master_workbook = openpyxl.Workbook()
if ((api_key is not None) and (api_secret is not None)):
client = ApiClient(api_key, api_secret)
else:
client = ApiClient()
errors = []
for (index, addr) in enumerate(addresses):
print('Processing {}'.format(addr[0]))
result = _get_excel_report(client, endpoint, addr[0], addr[1], report_type, retry)
if (not result['success']):
print('Error retrieving report for {}'.format(addr[0]))
print(result['content'])
errors.append({'address': addr[0], 'message': result['content']})
continue
orig_wb = openpyxl.load_workbook(filename=io.BytesIO(result['content']))
_save_individual_file(orig_wb, files_path, addr[0])
for sheet_name in orig_wb.get_sheet_names():
if (sheet_name in master_workbook.get_sheet_names()):
master_ws = master_workbook.get_sheet_by_name(sheet_name)
else:
master_ws = master_workbook.create_sheet(sheet_name)
orig_rows = orig_wb.get_sheet_by_name(sheet_name).rows
if ((sheet_name == 'Summary') or (sheet_name == 'Chart Data')):
_process_non_standard_sheet(master_ws, orig_rows, addr, index)
continue
_process_standard_sheet(master_ws, orig_rows, addr, index)
master_workbook.remove(master_workbook.worksheets[0])
if (len(errors) > 0):
errors_sheet = master_workbook.create_sheet('Errors')
for (error_idx, error) in enumerate(errors):
errors_sheet.cell(row=(error_idx + 1), column=1, value=error['address'])
errors_sheet.cell(row=(error_idx + 1), column=2, value=error['message'])
adjust_column_width_workbook(master_workbook)
output_file_path = os.path.join(files_path, output_file_name)
master_workbook.save(output_file_path)
print('Saved output to {}'.format(output_file_path))
|
Creates an Excel file made up of combining the Value Report or Rental Report Excel
output for the provided addresses.
Args:
addresses: A list of (address, zipcode) tuples
output_file_name: A file name for the Excel output
endpoint: One of 'value_report' or 'rental_report'
report_type: One of 'full' or 'summary'
retry: optional boolean to retry if rate limit is reached
api_key: optional API Key
api_secret: optional API Secret
files_path: Path to save individual files. If None, don't save files
|
codesearchnet
|
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
if (IsBlankLine(line) and (not nesting_state.InNamespaceBody()) and (not nesting_state.InExternC())):
elided = clean_lines.elided
prev_line = elided[(linenum - 1)]
prevbrace = prev_line.rfind('{')
if ((prevbrace != (- 1)) and (prev_line[prevbrace:].find('}') == (- 1))):
exception = False
if Match(' {6}\\w', prev_line):
search_position = (linenum - 2)
while ((search_position >= 0) and Match(' {6}\\w', elided[search_position])):
search_position -= 1
exception = ((search_position >= 0) and (elided[search_position][:5] == ' :'))
else:
exception = (Match(' {4}\\w[^\\(]*\\)\\s*(const\\s*)?(\\{\\s*$|:)', prev_line) or Match(' {4}:', prev_line))
if (not exception):
error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block should be deleted.')
if ((linenum + 1) < clean_lines.NumLines()):
next_line = raw[(linenum + 1)]
if (next_line and Match('\\s*}', next_line) and (next_line.find('} else ') == (- 1))):
error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block should be deleted.')
matched = Match('\\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3, ('Do not leave a blank line after "%s:"' % matched.group(1)))
next_line_start = 0
if ((linenum + 1) < clean_lines.NumLines()):
next_line = raw[(linenum + 1)]
next_line_start = (len(next_line) - len(next_line.lstrip()))
CheckComment(line, filename, linenum, next_line_start, error)
line = clean_lines.elided[linenum]
if (Search('\\w\\s+\\[', line) and (not Search('(?:delete|return)\\s+\\[', line))):
error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [')
if (Search('for *\\(.*[^:]:[^: ]', line) or Search('for *\\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop')
|
Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
|
codesearchnet
|
def convert_optional_traversals_to_compound_match_query(match_query, complex_optional_roots, location_to_optional_roots):
tree = construct_optional_traversal_tree(complex_optional_roots, location_to_optional_roots)
rooted_optional_root_location_subsets = tree.get_all_rooted_subtrees_as_lists()
omitted_location_subsets = [(set(complex_optional_roots) - set(subset)) for subset in rooted_optional_root_location_subsets]
sorted_omitted_location_subsets = sorted(omitted_location_subsets)
compound_match_traversals = []
for omitted_locations in reversed(sorted_omitted_location_subsets):
new_match_traversals = []
for match_traversal in match_query.match_traversals:
location = match_traversal[0].as_block.location
optional_root_locations_stack = location_to_optional_roots.get(location, None)
if (optional_root_locations_stack is not None):
optional_root_location = optional_root_locations_stack[(- 1)]
else:
optional_root_location = None
if ((optional_root_location is None) or (optional_root_location not in omitted_locations)):
new_match_traversal = _prune_traverse_using_omitted_locations(match_traversal, set(omitted_locations), complex_optional_roots, location_to_optional_roots)
new_match_traversals.append(new_match_traversal)
else:
pass
compound_match_traversals.append(new_match_traversals)
match_queries = [MatchQuery(match_traversals=match_traversals, folds=match_query.folds, output_block=match_query.output_block, where_block=match_query.where_block) for match_traversals in compound_match_traversals]
return CompoundMatchQuery(match_queries=match_queries)
|
Return 2^n distinct MatchQuery objects in a CompoundMatchQuery.
Given a MatchQuery containing `n` optional traverses that expand vertex fields,
construct `2^n` different MatchQuery objects:
one for each possible subset of optional edges that can be followed.
For each edge `e` in a subset of optional edges chosen to be omitted,
discard all traversals following `e`, and add filters specifying that `e` *does not exist*.
Args:
match_query: MatchQuery object containing n `@optional` scopes which expand vertex fields
complex_optional_roots: list of @optional locations (location preceding an @optional
traverse) that expand vertex fields within
location_to_optional_roots: dict mapping from location -> optional_roots where location is
within some number of @optionals and optional_roots is a list
of optional root locations preceding the successive @optional
scopes within which the location resides
Returns:
CompoundMatchQuery object containing 2^n MatchQuery objects,
one for each possible subset of the n optional edges being followed
|
codesearchnet
|
def match(self, expected, actual, assert_items_equal=False):
if isinstance(expected, np.ndarray):
expected = expected.tolist()
if isinstance(actual, np.ndarray):
actual = actual.tolist()
self.assertEqual(type(expected), type(actual))
if nest.is_nested(expected):
self.assertEqual(len(expected), len(actual))
if isinstance(expected, dict):
for key1, key2 in zip(sorted(expected), sorted(actual)):
self.assertEqual(key1, key2)
self.match(expected[key1], actual[key2])
elif assert_items_equal:
for item1, item2 in zip(sorted(expected), sorted(actual)):
self.match(item1, item2)
else:
for item1, item2 in zip(expected, actual):
self.match(item1, item2)
elif isinstance(expected, sparse_tensor.SparseTensorValue):
self.match((expected.indices, expected.values, expected.dense_shape), (actual.indices, actual.values, actual.dense_shape))
elif isinstance(expected, ragged_tensor_value.RaggedTensorValue):
self.match((expected.values, expected.row_splits), (actual.values, actual.row_splits))
else:
self.assertEqual(expected, actual)
|
Matches nested structures.
Recursively matches shape and values of `expected` and `actual`.
Handles scalars, numpy arrays and other python sequence containers
e.g. list, dict, as well as SparseTensorValue and RaggedTensorValue.
Args:
expected: Nested structure 1.
actual: Nested structure 2.
assert_items_equal: Tests the output has the expected elements regardless
of order.
Raises:
AssertionError if matching fails.
|
github-repos
|
def add_child(self, key, value):
if type(value) in (list, tuple, dict):
if type(value)==dict:
for k in value.keys():
self.add_child(k, value[k])
return
i = 0
for child in value:
self.add_child(key[i], child)
i = i + 1
return
if hasattr(value, 'attributes'):
value.attributes['data-parent-widget'] = self.identifier
value._parent = self
if key in self.children:
self._render_children_list.remove(key)
self._render_children_list.append(key)
self.children[key] = value
|
Adds a child to the Tag
To retrieve the child call get_child or access to the Tag.children[key] dictionary.
Args:
key (str): Unique child's identifier, or iterable of keys
value (Tag, str): can be a Tag, an iterable of Tag or a str. In case of iterable
of Tag is a dict, each item's key is set as 'key' param
|
juraj-google-style
|
def moves_from_last_n_games(self, n, moves, shuffle,
column_family, column):
self.wait_for_fresh_games()
latest_game = self.latest_game_number
utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game))
if latest_game == 0:
raise ValueError('Cannot find a latest game in the table')
start = int(max(0, latest_game - n))
ds = self.moves_from_games(start, latest_game, moves, shuffle,
column_family, column)
return ds
|
Randomly choose a given number of moves from the last n games.
Args:
n: number of games at the end of this GameQueue to source.
moves: number of moves to be sampled from `n` games.
shuffle: if True, shuffle the selected moves.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
Returns:
a dataset containing the selected moves.
|
juraj-google-style
|
def interpolate_graph(message, graph):
parsed_messaged, _, node_tags = parse_message(message)
error_message = ['Graph execution error:', '']
for tag in node_tags:
try:
op = graph.get_operation_by_name(tag.name)
except KeyError:
continue
else:
error_message.append(_build_node_error_message(op))
error_message.append(parsed_messaged.strip())
return '\n'.join(error_message)
|
Interpolates an error message.
The error message can contain tags of form `{{node_type node_name}}`
which will be parsed to identify the tf.Graph and op. If the op contains
traceback, the traceback will be attached to the error message.
Args:
message: A string to interpolate.
graph: ops.Graph object containing all nodes referenced in the error
message.
Returns:
The error message string with node definition traceback.
|
github-repos
|
def prefixlen_to_mask(prefixlen):
prefixlen = (prefixlen or '32')
addr = ('0.0.0.0/%s' % prefixlen)
return str(netaddr.IPNetwork(addr).netmask)
|
Converts a prefix length to a dotted decimal subnet mask
Args:
prefixlen (str): The prefix length value to convert
Returns:
str: The subt mask as a dotted decimal string
|
codesearchnet
|
def fit(dataset_train: Dataset, dataset_val: typing.Optional[Dataset], features: typing.List[str], iters: int, weights_filename: str, log_filename: str, out_span: int) -> jax.Array:
with open(weights_filename, 'w') as f:
f.write('')
with open(log_filename, 'w') as f:
f.write('iter\ttrain_accuracy\ttrain_precision\ttrain_recall\ttrain_fscore')
if dataset_val:
f.write('\ttest_accuracy\ttest_precision\ttest_recall\ttest_fscore')
f.write('\n')
print('Outputting learned weights to %s ...' % weights_filename)
M = len(features)
scores = jnp.zeros(M)
feature_score_buffer: typing.List[typing.Tuple[str, float]] = []
N_train = dataset_train.Y.shape[0]
N_test = dataset_val.Y.shape[0] if dataset_val else 0
Y_train = dataset_train.Y > 0
Y_test = dataset_val.Y > 0 if dataset_val else None
w = jnp.abs(dataset_train.Y) / jnp.sum(jnp.abs(dataset_train.Y))
def output_progress(t: int) -> None:
with open(weights_filename, 'a') as f:
f.write('\n'.join(('%s\t%.6f' % p for p in feature_score_buffer)) + '\n')
feature_score_buffer.clear()
print('=== %s ===' % t)
print()
with open(log_filename, 'a') as f:
pred_train = pred(scores, dataset_train.X_rows, dataset_train.X_cols, N_train)
metrics_train = get_metrics(pred_train, Y_train)
print('train accuracy:\t%.5f' % metrics_train.accuracy)
print('train prec.:\t%.5f' % metrics_train.precision)
print('train recall:\t%.5f' % metrics_train.recall)
print('train fscore:\t%.5f' % metrics_train.fscore)
print()
f.write('%d\t%.5f\t%.5f\t%.5f\t%.5f' % (t, metrics_train.accuracy, metrics_train.precision, metrics_train.recall, metrics_train.fscore))
if dataset_val:
pred_test = pred(scores, dataset_val.X_rows, dataset_val.X_cols, N_test)
metrics_test = get_metrics(pred_test, Y_test)
print('test accuracy:\t%.5f' % metrics_test.accuracy)
print('test prec.:\t%.5f' % metrics_test.precision)
print('test recall:\t%.5f' % metrics_test.recall)
print('test fscore:\t%.5f' % metrics_test.fscore)
print()
f.write('\t%.5f\t%.5f\t%.5f\t%.5f' % (metrics_test.accuracy, metrics_test.precision, metrics_test.recall, metrics_test.fscore))
f.write('\n')
for t in range(iters):
w, scores, best_feature_index, score = update(w, scores, dataset_train.X_rows, dataset_train.X_cols, Y_train)
w.block_until_ready()
feature = features[best_feature_index]
feature_score_buffer.append((feature, score))
if (t + 1) % out_span == 0:
output_progress(t + 1)
if len(feature_score_buffer) > 0:
output_progress(t + 1)
return scores
|
Trains an AdaBoost binary classifier.
Args:
dataset_train (Dataset): A training dataset.
dataset_val (Optional[Dataset]): A validation dataset.
features (List[str]): Features, which correspond to the columns of entries.
iters (int): A number of training iterations.
weights_filename (str): A file path to write the learned weights.
log_filename (str): A file path to log the accuracy along with training.
out_span (int): Iteration span to output metics and weights.
Returns:
scores (jax.Array): The contribution scores.
|
github-repos
|
def __init__(self, fold_scope_location):
super(FoldCountContextField, self).__init__(fold_scope_location)
self.fold_scope_location = fold_scope_location
self.validate()
|
Construct a new FoldCountContextField object for this fold.
Args:
fold_scope_location: FoldScopeLocation specifying the fold whose size is being output.
Returns:
new FoldCountContextField object
|
juraj-google-style
|
def _get_hash(self):
if (self.optionals and self.optionals.ISBN):
isbn = self.optionals.ISBN.replace('-', '')
if (len(isbn) <= 10):
return ('97880' + isbn)
return isbn
if (self.optionals and self.optionals.EAN):
return self.optionals.EAN
return (self.title + ','.join(map((lambda x: x.name), self.authors)))
|
Create hash of the class.
Hash should be unique for given ebook, so ISBN is main component of the
hash if provided.
Returns:
str: Hash.
|
codesearchnet
|
def type_check(self, instance):
raise NotImplementedError
|
Determines if the type of 'instance' satisfies this type constraint.
Args:
instance: An instance of a Python object.
Raises:
:class:`TypeError`: The passed **instance** doesn't satisfy
this :class:`TypeConstraint`. Subclasses of
:class:`TypeConstraint` are free to raise any of the subclasses of
:class:`TypeError` defined above, depending on
the manner of the type hint error.
All :class:`TypeConstraint` sub-classes must define this method in other
for the class object to be created.
|
github-repos
|
def _step(time, output_ta_t, prev_output, *states):
current_input = tuple((ta[time] for ta in input_ta))
current_input = tree.pack_sequence_as(inputs, current_input)
mask_t = masking_fn(time)
output, new_states = step_function(current_input, tuple(states) + tuple(constants))
flat_output = tree.flatten(output)
flat_mask_output = flat_zero_output if zero_output_for_mask else tree.flatten(prev_output)
flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output)
flat_state = tree.flatten(states)
flat_new_state = tree.flatten(new_states)
flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state)
new_states = tree.pack_sequence_as(new_states, flat_final_state)
ta_index_to_write = time if return_all_outputs else 0
for ta, out in zip(output_ta_t, flat_new_output):
ta[ta_index_to_write] = out
return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)
|
RNN step function.
Args:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
|
github-repos
|
def _ParseIdentifierMappingsTable(self, parser_mediator, esedb_table):
identifier_mappings = {}
for esedb_record in esedb_table.records:
if parser_mediator.abort:
break
identifier, mapped_value = self._ParseIdentifierMappingRecord(
parser_mediator, esedb_table.name, esedb_record)
if identifier is None or mapped_value is None:
continue
if identifier in identifier_mappings:
parser_mediator.ProduceExtractionWarning(
'identifier: {0:d} already exists in mappings.'.format(identifier))
continue
identifier_mappings[identifier] = mapped_value
return identifier_mappings
|
Extracts identifier mappings from the SruDbIdMapTable table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
esedb_table (pyesedb.table): table.
Returns:
dict[int, str]: mapping of numeric identifiers to their string
representation.
|
juraj-google-style
|
def generator_container(generator_function):
@functools.wraps(generator_function)
def generator_container_wrapper(*args, **kwargs):
return GeneratorContainer(generator_function, *args, **kwargs)
return generator_container_wrapper
|
Function Decorator: Containerize calls to a generator function.
Args:
generator_function(func): The generator function being containerized.
Returns:
func: A wrapper function that containerizes the calls to the generator
function.
|
juraj-google-style
|
def error_print(msg, color=colorama.Fore.RED, file=sys.stderr):
if CLI_QUIET:
return
file.write('{sep}{bright}{color}Error: {normal}{msg}{sep}{reset}'.format(
sep=_linesep_for_file(file), bright=colorama.Style.BRIGHT, color=color,
normal=colorama.Style.NORMAL, msg=msg, reset=colorama.Style.RESET_ALL))
file.flush()
|
Print the error message to the file in the specified color.
Args:
msg: The error message to be printed.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together here, but note that style
strings will not be applied.
file: A file object to which the baracketed text will be written. Intended
for use with CLI output file objects, specifically sys.stderr.
|
juraj-google-style
|
def initial_value_of_masked_time_series(time_series_tensor, broadcast_mask):
num_timesteps = tf.shape(input=time_series_tensor)[(- 1)]
unmasked_negindices = (tf.cast((~ broadcast_mask), tf.int32) * tf.range(num_timesteps, 0, (- 1)))
first_unmasked_indices = (num_timesteps - tf.reduce_max(input_tensor=unmasked_negindices, axis=(- 1)))
if (first_unmasked_indices.shape.ndims is None):
raise NotImplementedError('Cannot compute initial values of a masked time series withdynamic rank.')
return tf.squeeze(tf.compat.v1.batch_gather(params=time_series_tensor, indices=first_unmasked_indices[(..., tf.newaxis)]), axis=(- 1))
|
Get the first unmasked entry of each time series in the batch.
Args:
time_series_tensor: float `Tensor` of shape [..., num_timesteps].
broadcast_mask: bool `Tensor` of same shape as `time_series`.
|
codesearchnet
|
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')
decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
rngs = {}
if dropout_rng is not None:
rngs['dropout'] = dropout_rng
inputs = {'params': params or self.params}
if past_key_values:
inputs['cache'] = past_key_values
mutable = ['cache']
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = module.model.variables['params']['shared']['embedding']
lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states)
else:
lm_logits = module.lm_head(hidden_states)
lm_logits += module.final_logits_bias
return (lm_logits, outputs)
outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)
if past_key_values is None:
lm_logits, decoder_outputs = outputs
else:
(lm_logits, decoder_outputs), past = outputs
if return_dict:
outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)
else:
outputs = (lm_logits,) + decoder_outputs[1:]
if past_key_values is not None and return_dict:
outputs['past_key_values'] = unfreeze(past['cache'])
return outputs
elif past_key_values is not None and (not return_dict):
outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]
return outputs
|
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> logits = outputs.logits
```
|
github-repos
|
def update_instance(self, data):
for (key, val) in iteritems(data):
if (not hasattr(self, key)):
raise AttributeError('No field named {key} for model {model}'.format(key=key, model=self.__class__.__name__))
setattr(self, key, val)
self.save()
return self
|
Update a single record by id with the provided data.
Args:
data (dict): The new data to update the record with.
Returns:
self: This is an instance of itself with the updated data.
Raises:
AttributeError: This is raised if a key in the ``data`` isn't
a field on the model.
|
codesearchnet
|
def _list(self, dir_or_prefix):
if not self.exists(dir_or_prefix):
return
def list_files(root):
for dirpath, _, files in os.walk(root):
for filename in files:
yield self.join(dirpath, filename)
try:
for f in list_files(dir_or_prefix):
try:
yield FileMetadata(f, os.path.getsize(f), os.path.getmtime(f))
except OSError:
pass
except Exception as e:
raise BeamIOError('List operation failed', {dir_or_prefix: e})
|
List files in a location.
Listing is non-recursive, for filesystems that support directories.
Args:
dir_or_prefix: (string) A directory or location prefix (for filesystems
that don't have directories).
Returns:
Generator of ``FileMetadata`` objects.
Raises:
``BeamIOError``: if listing fails, but not if no files were found.
|
github-repos
|
def unravel_sections(section_data):
sections = []
for type, subsection_list in section_data.items():
for section in subsection_list:
section['sectionType'] = type
sections.append(section)
return sections
|
Unravels section type dictionary into flat list of sections with
section type set as an attribute.
Args:
section_data(dict): Data return from py:method::get_sections
Returns:
list: Flat list of sections with ``sectionType`` set to
type (i.e. recitation, lecture, etc)
|
juraj-google-style
|
def _UnserializableObjectFallback(self, obj):
if isinstance(obj, libpython.PyInstanceObjectPtr):
in_class = obj.pyop_field('in_class')
result_dict = in_class.pyop_field('cl_dict').proxyval(set())
instanceproxy = obj.proxyval(set())
result_dict.update(instanceproxy.attrdict)
result_dict['__pyringe_type_name__'] = instanceproxy.cl_name
result_dict['__pyringe_address__'] = instanceproxy.address
return result_dict
if isinstance(obj, libpython.HeapTypeObjectPtr):
try:
type_ptr = obj.field('ob_type')
tp_dict = type_ptr.cast(GdbCache.TYPE)['tp_dict'].cast(GdbCache.DICT)
result_dict = libpython.PyDictObjectPtr(tp_dict).proxyval(set())
except gdb.error:
result_dict = {}
try:
result_dict.update(obj.get_attr_dict().proxyval(set()))
result_dict['__pyringe_type_name__'] = obj.safe_tp_name()
result_dict['__pyringe_address__'] = long(obj._gdbval)
return result_dict
except TypeError:
pass
try:
proxy = obj.proxyval(set())
if isinstance(proxy, dict):
return {str(key): val for (key, val) in proxy.iteritems()}
return proxy
except AttributeError:
return str(obj)
|
Handles sanitizing of unserializable objects for Json.
For instances of heap types, we take the class dict, augment it with the
instance's __dict__, tag it and transmit it over to the RPC client to be
reconstructed there. (Works with both old and new style classes)
Args:
obj: The object to Json-serialize
Returns:
A Json-serializable version of the parameter
|
codesearchnet
|
def AddDescriptor(self, desc):
if not isinstance(desc, descriptor.Descriptor):
raise TypeError('Expected instance of descriptor.Descriptor.')
self._descriptors[desc.full_name] = desc
self._AddFileDescriptor(desc.file)
|
Adds a Descriptor to the pool, non-recursively.
If the Descriptor contains nested messages or enums, the caller must
explicitly register them. This method also registers the FileDescriptor
associated with the message.
Args:
desc: A Descriptor.
|
juraj-google-style
|
def __init__(self, **options):
self._def = {}
for opt_name, opt_meta in options.items():
if _is_valid(opt_name):
self._def[opt_name] = opt_meta
self[opt_name] = opt_meta.default
else:
raise error.OptionError(opt_name)
|
Initialization of instances.
Args:
options (:class:`ConfOpt`): option metadata. The name of each
*option* is the name of the keyword argument passed on to this
function. Option names should be valid identifiers, otherwise
an :class:`~loam.error.OptionError` is raised.
|
juraj-google-style
|
def find_synonymous_field(field, model=DEFAULT_MODEL, app=DEFAULT_APP, score_cutoff=50, root_preference=1.02):
fields = (util.listify(field) + list(synonyms(field)))
model = get_model(model, app)
available_field_names = model._meta.get_all_field_names()
(best_match, best_ratio) = (None, None)
for (i, field_name) in enumerate(fields):
match = fuzzy.extractOne(str(field_name), available_field_names)
if (match and (match[1] >= score_cutoff)):
if ((not best_match) or (match[1] > (root_preference * best_ratio))):
(best_match, best_ratio) = match
return best_match
|
Use a dictionary of synonyms and fuzzy string matching to find a similarly named field
Returns:
A single model field name (string)
Examples:
>>> find_synonymous_field('date', model='WikiItem')
'end_date_time'
>>> find_synonymous_field('date', model='WikiItem')
'date_time'
>>> find_synonymous_field('time', model='WikiItem')
'date_time'
|
codesearchnet
|
def kill_reporter(self, check_alive=True):
if PY3:
self._kill_process_type(ray_constants.PROCESS_TYPE_REPORTER, check_alive=check_alive)
|
Kill the reporter.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
|
codesearchnet
|
def set_expected_update_frequency(self, update_frequency):
try:
int(update_frequency)
except ValueError:
update_frequency = Dataset.transform_update_frequency(update_frequency)
if (not update_frequency):
raise HDXError('Invalid update frequency supplied!')
self.data['data_update_frequency'] = update_frequency
|
Set expected update frequency
Args:
update_frequency (str): Update frequency
Returns:
None
|
codesearchnet
|
def to_service(self, service, version):
service_url = self._service_locator.get_service_url(service, version)
return self.__copy_and_set('service_url', self.__strip_trailing_slashes(service_url))
|
Sets the service name and version the request should target
Args:
service (str): The name of the service as displayed in the services.json file
version (str): The version of the service as displayed in the services.json file
Returns:
The request builder instance in order to chain calls
|
juraj-google-style
|
def fetch(self):
raise NotImplementedError('Must be implemented in subclasses.')
|
Wait for the result of `RemoteValue` and return the numpy result.
This makes the value concrete by copying the remote value to local.
Returns:
The numpy array structure of the actual output of the `tf.function`
associated with this `RemoteValue`, previously returned by a
`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` call.
This can be a single value, or a structure of values, depending on the
output of the `tf.function`.
Raises:
tf.errors.CancelledError: If the function that produces this `RemoteValue`
is aborted or cancelled due to failure.
|
github-repos
|
def _ReadRecordSchemaAttributes(self, tables, file_object, record_offset):
record_header = self._ReadRecordHeader(file_object, record_offset)
attribute_value_offsets = self._ReadRecordAttributeValueOffset(
file_object, record_offset + 24, 6)
file_offset = file_object.tell()
attribute_values_data_offset = file_offset - record_offset
attribute_values_data_size = record_header.data_size - (
file_offset - record_offset)
attribute_values_data = file_object.read(attribute_values_data_size)
relation_identifier = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[0])
attribute_identifier = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[1])
attribute_name_data_type = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[2])
attribute_name = self._ReadAttributeValueString(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[3])
attribute_data_type = self._ReadAttributeValueInteger(
attribute_values_data, record_offset, attribute_values_data_offset,
attribute_value_offsets[5])
table = tables.get(relation_identifier, None)
if not table:
raise errors.ParseError(
'Missing table for relation identifier: 0x{0:08}'.format(
relation_identifier))
if attribute_name is None and attribute_value_offsets[1] != 0:
attribute_value_offset = attribute_value_offsets[1]
attribute_value_offset -= attribute_values_data_offset + 1
attribute_name = attribute_values_data[
attribute_value_offset:attribute_value_offset + 4]
attribute_name = attribute_name.decode('ascii')
column = KeychainDatabaseColumn()
column.attribute_data_type = attribute_data_type
column.attribute_identifier = attribute_identifier
column.attribute_name = attribute_name
table.columns.append(column)
table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES, None)
if not table:
raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_ATTRIBUTES table.')
record = collections.OrderedDict({
'RelationID': relation_identifier,
'AttributeID': attribute_identifier,
'AttributeNameFormat': attribute_name_data_type,
'AttributeName': attribute_name,
'AttributeFormat': attribute_data_type})
table.records.append(record)
|
Reads a schema attributes (CSSM_DL_DB_SCHEMA_ATTRIBUTES) record.
Args:
tables (dict[int, KeychainDatabaseTable]): tables per identifier.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
Raises:
ParseError: if the record cannot be read.
|
juraj-google-style
|
def iou(boxes1, boxes2):
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
|
Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
|
juraj-google-style
|
def rename_custom_ops(model, map_custom_op_renames):
for op_code in model.operatorCodes:
if op_code.customCode:
op_code_str = op_code.customCode.decode('ascii')
if op_code_str in map_custom_op_renames:
op_code.customCode = map_custom_op_renames[op_code_str].encode('ascii')
|
Rename custom ops so they use the same naming style as builtin ops.
Args:
model: The input tflite model.
map_custom_op_renames: A mapping from old to new custom op names.
|
github-repos
|
def create_van_der_corput_samples(idx, number_base=2):
assert (number_base > 1)
idx = (numpy.asarray(idx).flatten() + 1)
out = numpy.zeros(len(idx), dtype=float)
base = float(number_base)
active = numpy.ones(len(idx), dtype=bool)
while numpy.any(active):
out[active] += ((idx[active] % number_base) / base)
idx
base *= number_base
active = (idx > 0)
return out
|
Van der Corput samples.
Args:
idx (int, numpy.ndarray):
The index of the sequence. If array is provided, all values in
array is returned.
number_base (int):
The numerical base from where to create the samples from.
Returns (float, numpy.ndarray):
Van der Corput samples.
|
codesearchnet
|
def get_file_size(file_object):
position = file_object.tell()
file_object.seek(0, 2)
file_size = file_object.tell()
file_object.seek(position, 0)
return file_size
|
Returns the size, in bytes, of a file. Expects an object that supports
seek and tell methods.
Args:
file_object (file_object) - The object that represents the file
Returns:
(int): size of the file, in bytes
|
juraj-google-style
|
def from_json_and_lambdas(cls, file: str, lambdas):
with open(file, 'r') as f:
data = json.load(f)
return cls.from_dict(data, lambdas)
|
Builds a GrFN from a JSON object.
Args:
cls: The class variable for object creation.
file: Filename of a GrFN JSON file.
Returns:
type: A GroundedFunctionNetwork object.
|
codesearchnet
|
def __init__(self, num_tasks):
self._num_tasks = num_tasks
self._next_task = 0
|
Create a new `_RoundRobinStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
|
github-repos
|
def is_ready(self, node_id, metadata_priority=True):
if not self._can_send_request(node_id):
return False
if metadata_priority:
if self._metadata_refresh_in_progress:
return False
if self.cluster.ttl() == 0:
return False
return True
|
Check whether a node is ready to send more requests.
In addition to connection-level checks, this method also is used to
block additional requests from being sent during a metadata refresh.
Arguments:
node_id (int): id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if the node is ready and metadata is not refreshing
|
juraj-google-style
|
def _entry_allocated_bitmap(self, entry_number):
(index, offset) = divmod(entry_number, 8)
return bool((self._bitmap[index] & (1 << offset)))
|
Checks if a particular index is allocated.
Args:
entry_number (int): Index to verify
Returns:
bool: True if it is allocated, False otherwise.
|
codesearchnet
|
def unzip(archive, destination, filenames=None):
close = False
try:
if (not isinstance(archive, zipfile.ZipFile)):
archive = zipfile.ZipFile(archive, 'r', allowZip64=True)
close = True
logger.info(('Extracting: %s -> %s' % (archive.filename, destination)))
if isinstance(filenames, str):
filenames = [filenames]
if (filenames is None):
filenames = archive.namelist()
for filename in filenames:
if filename.endswith('/'):
shell.mkdir(os.path.join(destination, filename))
elif (not _extract_file(archive, destination, filename)):
raise Exception()
logger.info(('Extracting zip archive "%s" succeeded' % archive.filename))
return True
except Exception:
logger.exception(('Error while unzipping archive %s' % archive.filename))
return False
finally:
if close:
archive.close()
|
Unzip a zip archive into destination directory.
It unzips either the whole archive or specific file(s) from the archive.
Usage:
>>> output = os.path.join(os.getcwd(), 'output')
>>> # Archive can be an instance of a ZipFile class
>>> archive = zipfile.ZipFile('test.zip', 'r')
>>> # Or just a filename
>>> archive = 'test.zip'
>>> # Extracts all files
>>> unzip(archive, output)
>>> # Extract only one file
>>> unzip(archive, output, 'my_file.txt')
>>> # Extract a list of files
>>> unzip(archive, output, ['my_file1.txt', 'my_file2.txt'])
>>> unzip_file('test.zip', 'my_file.txt', output)
Args:
archive (zipfile.ZipFile or str): Zipfile object to extract from or
path to the zip archive.
destination (str): Path to the output directory.
filenames (str or list of str or None): Path(s) to the filename(s)
inside the zip archive that you want to extract.
|
codesearchnet
|
def __init__(self, name=IGNORED, origin=IGNORED, context=IGNORED):
if context != IGNORED and (not isinstance(context, dict)):
raise ValueError('context must be a Python dictionary.')
self.name = name
self.origin = origin
self.context = context
|
Creates a MetricsStructuredNameMatcher.
Any property not passed in to the constructor will be ignored when matching.
Args:
name: A string with the metric name.
origin: A string with the metric namespace.
context: A key:value dictionary that will be matched to the
structured name.
|
github-repos
|
def __init__(self, promise):
super(BrokenPromise, self).__init__()
self._promise = promise
|
Configure the broken promise error.
Args:
promise (Promise): The promise that was not satisfied.
|
juraj-google-style
|
def dqdv_cycle(cycle, splitter=True, **kwargs):
c_first = cycle.loc[(cycle['direction'] == (- 1))]
c_last = cycle.loc[(cycle['direction'] == 1)]
converter = Converter(**kwargs)
converter.set_data(c_first['capacity'], c_first['voltage'])
converter.inspect_data()
converter.pre_process_data()
converter.increment_data()
converter.post_process_data()
voltage_first = converter.voltage_processed
incremental_capacity_first = converter.incremental_capacity
if splitter:
voltage_first = np.append(voltage_first, np.NaN)
incremental_capacity_first = np.append(incremental_capacity_first, np.NaN)
converter = Converter(**kwargs)
converter.set_data(c_last['capacity'], c_last['voltage'])
converter.inspect_data()
converter.pre_process_data()
converter.increment_data()
converter.post_process_data()
voltage_last = converter.voltage_processed[::(- 1)]
incremental_capacity_last = converter.incremental_capacity[::(- 1)]
voltage = np.concatenate((voltage_first, voltage_last))
incremental_capacity = np.concatenate((incremental_capacity_first, incremental_capacity_last))
return (voltage, incremental_capacity)
|
Convenience functions for creating dq-dv data from given capacity and
voltage cycle.
Returns the a DataFrame with a 'voltage' and a 'incremental_capacity'
column.
Args:
cycle (pandas.DataFrame): the cycle data ('voltage', 'capacity',
'direction' (1 or -1)).
splitter (bool): insert a np.NaN row between charge and discharge.
Returns:
List of step numbers corresponding to the selected steptype.
Returns a pandas.DataFrame
instead of a list if pdtype is set to True.
Example:
>>> cycle_df = my_data.get_cap(
>>> ... 1,
>>> ... categorical_column=True,
>>> ... method = "forth-and-forth"
>>> ... )
>>> voltage, incremental = ica.dqdv_cycle(cycle_df)
|
codesearchnet
|
def plot_summaries(self, show=False, save=True, figure_type=None):
if (not figure_type):
figure_type = self.default_figure_type
if (not (figure_type in self.default_figure_types)):
logger.debug('unknown figure type selected')
figure_type = self.default_figure_type
(color_list, symbol_list) = self._create_colors_markers_list()
summary_df = self.summary_df
selected_summaries = self.selected_summaries
batch_dir = self.batch_dir
batch_name = self.name
(fig, ax) = plot_summary_figure(self.info_df, summary_df, color_list, symbol_list, selected_summaries, batch_dir, batch_name, show=show, save=save, figure_type=figure_type)
self.figure[figure_type] = fig
self.axes[figure_type] = ax
|
Plot summary graphs.
Args:
show: shows the figure if True.
save: saves the figure if True.
figure_type: optional, figure type to create.
|
codesearchnet
|
def display_required_items(msg_type):
print("Configure a profile for: " + msg_type)
print("You will need the following information:")
for k, v in CONFIG[msg_type]["settings"].items():
print(" * " + v)
print("Authorization/credentials required:")
for k, v in CONFIG[msg_type]["auth"].items():
print(" * " + v)
|
Display the required items needed to configure a profile for the given
message type.
Args:
:msg_type: (str) message type to create config entry.
|
juraj-google-style
|
def parse_attributes(attributes=None, classname=None):
if not attributes:
attributes = {}
attributes.setdefault('class', DEFAULT_CLASS_NAME)
if classname:
attributes['class'] = classname
return attributes
|
Parses attributes,
Args:
attributes (dict): Input attributes.
classname (:obj:`str`, optional): Class name of output SPAN tags.
Returns:
Parsed attributes. (dict)
|
juraj-google-style
|
def write_files(abs_data, basename='absorption', prefix=None, directory=None):
for (i, absorption) in enumerate(abs_data):
num_txt = ('_{}'.format((i + 1)) if (len(abs_data) > 1) else '')
prefix_txt = ('{}_'.format(prefix) if prefix else '')
filename = (((prefix_txt + basename) + num_txt) + '.dat')
if directory:
filename = os.path.join(directory, filename)
header = 'energy(eV)'
if (len(absorption[1].shape) == 2):
header += ' alpha_xx alpha_yy alpha_zz'
data = np.concatenate((absorption[0][(:, None)], absorption[1]), axis=1)
else:
header += ' alpha'
data = np.stack((absorption[0], absorption[1]), axis=1)
np.savetxt(filename, data, header=header)
|
Write the absorption or loss spectra to a file.
Note that this function expects to receive an iterable series of spectra.
Args:
abs_data (tuple): Series (either :obj:`list` or :obj:`tuple`) of
optical absorption or loss spectra. Each spectrum should be
formatted as a :obj:`tuple` of :obj:`list` of :obj:`float`. If the
data has been averaged, each spectrum should be::
([energies], [alpha])
Else, if the data has not been averaged, each spectrum should be::
([energies], [alpha_xx, alpha_yy, alpha_zz]).
prefix (:obj:`str`, optional): Prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
|
codesearchnet
|
def merge_requests(self, **kwargs):
path = ('%s/%s/merge_requests' % (self.manager.path, self.get_id()))
return self.manager.gitlab.http_get(path, **kwargs)
|
List the merge requests related to the commit.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the references could not be retrieved
Returns:
list: The merge requests related to the commit.
|
codesearchnet
|
def __init__(self, name, exchange, topics=[], enable_ping=True,
listen_all=False):
self.name = name
self.exchange = exchange
self.topics = topics
self.listeners = []
self.listen_all = listen_all
if enable_ping:
self.listeners.append(self._handle_ping)
if 'ping' not in self.topics:
self.topics.append('ping')
self._channel = None
self._conn = None
self._queue_name = None
|
Initialize the client with connection settings.
Args:
name; name of the client
exchange: name of the exchange to connect to
topics: list of routing keys to listen to
enable_ping: enable answering to ping requests
By default, the 'ping' routing key will be added in order to enable
response to ping requests expect specified otherwise.
|
juraj-google-style
|
def _get_filename_from_url(url):
parse = urlparse(url)
return os.path.basename(parse.path)
|
Return a filename from a URL
Args:
url (str): URL to extract filename from
Returns:
(str): Filename in URL
|
codesearchnet
|
def gumbel_sample(shape):
uniform_samples = tf.random_uniform(shape, minval=1e-05, maxval=0.99998)
return (- tf.log((- tf.log(uniform_samples))))
|
Sample from the Gumbel distribution, protect from overflows.
Args:
shape: Shape of Gumbel samples.
Returns:
Noise drawn from Gumbel distribution.
|
codesearchnet
|
def compute_inv_covariance(L_aug, Y, k, p):
return np.linalg.inv(compute_covariance(L_aug, Y, k, p))
|
Given label matrix L and labels Y, compute the covariance.
Args:
L: (np.array) [n, d] The augmented (indicator) label matrix
Y: (np.array int) [n] The true labels in {1,...,k}
|
juraj-google-style
|
def group_items(items, groupids):
if callable(groupids):
keyfunc = groupids
pair_list = ((keyfunc(item), item) for item in items)
else:
pair_list = zip(groupids, items)
groupid_to_items = defaultdict(list)
for (key, item) in pair_list:
groupid_to_items[key].append(item)
return groupid_to_items
|
r"""
Groups a list of items by group id.
Args:
items (Iterable): a list of items to group
groupids (Iterable or Callable): a corresponding list of item groupids
or a function mapping an item to a groupid.
Returns:
dict: groupid_to_items: maps a groupid to a list of items
CommandLine:
python -m ubelt.util_dict group_items
Example:
>>> import ubelt as ub
>>> items = ['ham', 'jam', 'spam', 'eggs', 'cheese', 'banana']
>>> groupids = ['protein', 'fruit', 'protein', 'protein', 'dairy', 'fruit']
>>> groupid_to_items = ub.group_items(items, groupids)
>>> print(ub.repr2(groupid_to_items, nl=0))
{'dairy': ['cheese'], 'fruit': ['jam', 'banana'], 'protein': ['ham', 'spam', 'eggs']}
|
codesearchnet
|
def remove_temp_dir_with_filepath(filepath, strategy):
remove_temp_dirpath(os.path.dirname(filepath), strategy)
|
Removes the temp path for file after writing is finished.
Args:
filepath: Original filepath that would be used without distribution.
strategy: The tf.distribute strategy object currently used.
|
github-repos
|
def do_conneg(accept, supported):
for result in parse_accept_header(accept):
mime_type = result[0]
if (mime_type in supported):
return mime_type
return None
|
Parse accept header and look for preferred type in supported list.
Arguments:
accept - HTTP Accept header
supported - list of MIME type supported by the server
Returns:
supported MIME type with highest q value in request, else None.
FIXME - Should replace this with negotiator2
|
codesearchnet
|
def request(self, request_method, api_method, *args, **kwargs):
url = self._build_url(api_method)
resp = requests.request(request_method, url, *args, **kwargs)
try:
rv = resp.json()
except ValueError:
raise RequestFailedError(resp, 'not a json body')
if not resp.ok:
raise RequestFailedError(resp, rv.get('error'))
return rv
|
Perform a request.
Args:
request_method: HTTP method for this request.
api_method: API method name for this request.
*args: Extra arguments to pass to the request.
**kwargs: Extra keyword arguments to pass to the request.
Returns:
A dict contains the request response data.
Raises:
RequestFailedError: Raises when BearyChat's OpenAPI responses
with status code != 2xx
|
juraj-google-style
|
async def get_jsone_context_and_template(chain, parent_link, decision_link, tasks_for):
if tasks_for == 'action':
jsone_context, tmpl = await get_action_context_and_template(
chain, parent_link, decision_link
)
else:
tmpl = await get_in_tree_template(decision_link)
jsone_context = await populate_jsone_context(
chain, parent_link, decision_link, tasks_for
)
return jsone_context, tmpl
|
Get the appropriate json-e context and template for any parent task.
Args:
chain (ChainOfTrust): the chain of trust.
parent_link (LinkOfTrust): the parent link to test.
decision_link (LinkOfTrust): the parent link's decision task link.
tasks_for (str): the reason the parent link was created (cron,
hg-push, action)
Returns:
(dict, dict): the json-e context and template.
|
juraj-google-style
|
def add(self, timestamp, information):
try:
item = Schema(CollectorStage.schema_event_items()).validate({
'timestamp': timestamp, 'information': information
})
self.events.append(item)
except SchemaError as exception:
Logger.get_logger(__name__).error(exception)
raise RuntimeError(str(exception))
|
Add event information.
Args:
timestamp (int): event timestamp.
information (dict): event information.
Raises:
RuntimeError: when validation of parameters has failed.
|
juraj-google-style
|
def delete(self, key, noreply=None):
if noreply is None:
noreply = self.default_noreply
cmd = b'delete ' + self.check_key(key)
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'delete', noreply)
if noreply:
return True
return results[0] == b'DELETED'
|
The memcached "delete" command.
Args:
key: str, see class docs for details.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the key was deleted, and False if it wasn't found.
|
juraj-google-style
|
def recipe_anonymize(config, auth, from_project, from_dataset, to_project, to_dataset):
anonymize(config, {'auth': auth, 'bigquery': {'from': {'project': from_project, 'dataset': from_dataset}, 'to': {'project': to_project, 'dataset': to_dataset}}})
|
Copies tables and view from one dataset to another and anynonamizes all rows.
Used to create sample datasets for dashboards.
Args:
auth (authentication) - Credentials used.
from_project (string) - Original project to read from.
from_dataset (string) - Original dataset to read from.
to_project (string) - Anonymous data will be writen to.
to_dataset (string) - Anonymous data will be writen to.
|
github-repos
|
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):
return Element2D.clone(self, data, shared_data, new_type,
*args, **overrides)
|
Clones the object, overriding data and parameters.
Args:
data: New data replacing the existing data
shared_data (bool, optional): Whether to use existing data
new_type (optional): Type to cast object to
*args: Additional arguments to pass to constructor
**overrides: New keyword arguments to pass to constructor
Returns:
Cloned Spline
|
juraj-google-style
|
def FromBinary(cls, script_data, allow_unknown=True, show_rpcs=False):
curr = 0
records = []
header = cls.ParseHeader(script_data)
curr = header.header_length
cls.logger.debug('Parsed script header: %s, skipping %d bytes', header, curr)
record_count = 0
record_data = bytearray()
partial_match = None
match_offset = 0
while (curr < len(script_data)):
if ((len(script_data) - curr) < UpdateRecord.HEADER_LENGTH):
raise ArgumentError('Script ended with a partial record', remaining_length=(len(script_data) - curr))
(total_length, record_type) = struct.unpack_from('<LB', script_data[curr:])
cls.logger.debug('Found record of type %d, length %d', record_type, total_length)
record_data += script_data[curr:(curr + total_length)]
record_count += 1
curr += total_length
try:
if (show_rpcs and (record_type == SendRPCRecord.MatchType())):
cls.logger.debug(' {0}'.format(hexlify(record_data)))
record = SendRPCRecord.FromBinary(record_data[UpdateRecord.HEADER_LENGTH:], record_count)
elif (show_rpcs and (record_type == SendErrorCheckingRPCRecord.MatchType())):
cls.logger.debug(' {0}'.format(hexlify(record_data)))
record = SendErrorCheckingRPCRecord.FromBinary(record_data[UpdateRecord.HEADER_LENGTH:], record_count)
else:
record = UpdateRecord.FromBinary(record_data, record_count)
except DeferMatching as defer:
if (defer.partial_match is not None):
partial_match = defer.partial_match
match_offset = curr
continue
except DataError:
if ((record_count > 1) and partial_match):
record = partial_match
curr = match_offset
elif (not allow_unknown):
raise
elif (allow_unknown and (record_count > 1)):
raise ArgumentError('A record matched an initial record subset but failed matching a subsequent addition without leaving a partial_match')
else:
record = UnknownRecord(record_type, record_data[UpdateRecord.HEADER_LENGTH:])
record_count = 0
record_data = bytearray()
partial_match = None
match_offset = 0
records.append(record)
return UpdateScript(records)
|
Parse a binary update script.
Args:
script_data (bytearray): The binary data containing the script.
allow_unknown (bool): Allow the script to contain unknown records
so long as they have correct headers to allow us to skip them.
show_rpcs (bool): Show SendRPCRecord matches for each record rather than
the more specific operation
Raises:
ArgumentError: If the script contains malformed data that cannot
be parsed.
DataError: If the script contains unknown records and allow_unknown=False
Returns:
UpdateScript: The parsed update script.
|
codesearchnet
|
def create_queue(self, register=False):
queue = asyncio.Queue(loop=self._loop)
if register:
self._work_queues.add(queue)
return queue
|
Create a new work queue and optionally register it.
This will make sure the queue is attached to the correct event loop.
You can optionally choose to automatically register it so that
wait_idle() will block until the queue is empty.
Args:
register (bool): Whether to call register_workqueue() automatically.
Returns:
asyncio.Queue: The newly created queue.
|
codesearchnet
|
def read_uint16(self, little_endian=True):
if little_endian:
endian = "<"
else:
endian = ">"
return self.unpack('%sH' % endian, 2)
|
Read 2 byte as an unsigned integer value from the stream.
Args:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int:
|
juraj-google-style
|
def print_info(self, obj=None, buf=sys.stdout):
if (not obj):
self._print_info(buf)
return True
b = False
for fn in (self._print_tool_info, self._print_package_info, self._print_suite_info, self._print_context_info):
b_ = fn(obj, buf, b)
b |= b_
if b_:
((print >> buf), '')
if (not b):
((print >> buf), ("Rez does not know what '%s' is" % obj))
return b
|
Print a status message about the given object.
If an object is not provided, status info is shown about the current
environment - what the active context is if any, and what suites are
visible.
Args:
obj (str): String which may be one of the following:
- A tool name;
- A package name, possibly versioned;
- A context filepath;
- A suite filepath;
- The name of a context in a visible suite.
|
codesearchnet
|
def FormatAST(ast, style_config=None, lines=None):
style.SetGlobalStyle(style.CreateStyleFromConfig(style_config))
llines = pyparser.ParseCode(ast)
for lline in llines:
lline.CalculateFormattingInformation()
lines = _LineRangesToSet(lines)
_MarkLinesToFormat(llines, lines)
return reformatter.Reformat(_SplitSemicolons(llines), lines)
|
Format a parsed lib2to3 pytree.
This provides an alternative entry point to YAPF.
Arguments:
unformatted_source: (unicode) The code to format.
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT_STYLE_FACTORY
lines: (list of tuples of integers) A list of tuples of lines, [start, end],
that we want to format. The lines are 1-based indexed. It can be used by
third-party code (e.g., IDEs) when reformatting a snippet of code rather
than a whole file.
Returns:
The source formatted according to the given formatting style.
|
github-repos
|
def create_window(size=None, samples=16, *, fullscreen=False, title=None, threaded=True) -> Window:
if (size is None):
(width, height) = (1280, 720)
else:
(width, height) = size
if ((samples < 0) or ((samples & (samples - 1)) != 0)):
raise Exception(('Invalid number of samples: %d' % samples))
window = Window.__new__(Window)
window.wnd = glwnd.create_window(width, height, samples, fullscreen, title, threaded)
return window
|
Create the main window.
Args:
size (tuple): The width and height of the window.
samples (int): The number of samples.
Keyword Args:
fullscreen (bool): Fullscreen?
title (bool): The title of the window.
threaded (bool): Threaded?
Returns:
Window: The main window.
|
codesearchnet
|
def get_role(self, item, state_root, from_state=False):
if from_state:
if self._identity_view is None:
self.update_view(state_root)
value = self._identity_view.get_role(item)
return value
value = self._cache.get(item)
if value is None:
if self._identity_view is None:
self.update_view(state_root)
value = self._identity_view.get_role(item)
self._cache[item] = value
return value
|
Used to retrieve an identity role.
Args:
item (string): the name of the role to be fetched
state_root(string): The state root of the previous block.
from_state (bool): Whether the identity value should be read
directly from state, instead of using the cached values.
This should be used when the state_root passed is not from
the current chain head.
|
juraj-google-style
|
def success(channel, post):
datapacks = [("Game", post[0], True), ("Upvotes", post[2], True)]
gui = ui_embed.UI(
channel,
"Link",
post[1],
modulename=modulename,
colour=0xFF8800,
thumbnail=post[1],
datapacks=datapacks
)
return gui
|
Creates an embed UI containing the Reddit posts
Args:
channel (discord.Channel): The Discord channel to bind the embed to
post (tuple): Tuples of (field, value, percentile)
Returns:
|
juraj-google-style
|
def append(self, annotation):
self._annotations[annotation.id] = annotation
self._dirty = True
return annotation
|
Add an annotation.
Args:
annotation (gkeepapi.node.Annotation): An Annotation object.
Returns:
gkeepapi.node.Annotation: The Annotation.
|
codesearchnet
|
def _Aff4Size(aff4_obj):
if (not isinstance(aff4_obj, aff4.AFF4Stream)):
message = 'Expected an instance of `%s` but received `%s`'
raise TypeError((message % (aff4.AFF4Stream, type(aff4_obj))))
return int(aff4_obj.Get(aff4_obj.Schema.SIZE))
|
Retrieves the total size in bytes of an AFF4 object.
Args:
aff4_obj: An AFF4 stream instance to retrieve size for.
Returns:
An integer representing number of bytes.
Raises:
TypeError: If `aff4_obj` is not an instance of AFF4 stream.
|
codesearchnet
|
def publishFeatureCollections(self, configs):
if self.securityhandler is None:
print ("Security handler required")
return
config = None
res = None
resItm = None
try:
res = []
if isinstance(configs, list):
for config in configs:
if 'ReplaceTag' in config:
resItm = {"ReplaceTag":config['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
if 'Zip' in config:
resItm['FCInfo'] = self._publishFeatureCollection(config=config)
if not resItm['FCInfo'] is None and 'id' in resItm['FCInfo']:
print ("%s feature collection created" % resItm['FCInfo']['id'])
res.append(resItm)
else:
print (str(resItm['FCInfo']))
return res
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishFeatureCollections",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
resItm = None
config = None
del resItm
del config
gc.collect()
|
Publishes feature collections to a feature service.
Args:
configs (list): A list of JSON configuration feature service details to publish.
Returns:
dict: A dictionary of results objects.
|
juraj-google-style
|
def memory_write16(self, addr, data, zone=None):
return self.memory_write(addr, data, zone, 16)
|
Writes half-words to memory of a target system.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to write to
data (list): list of half-words to write
zone (str): optional memory zone to access
Returns:
Number of half-words written to target.
Raises:
JLinkException: on memory access error.
|
juraj-google-style
|
def laid_out_pcoord(self, mesh_axis):
divisor = list_product(self.shape.to_integer_list[(mesh_axis + 1):])
modulus = self.shape[mesh_axis].size
def my_fn(pnum):
return ((pnum
return self.slicewise(my_fn, self.laid_out_pnum())
|
Returns a LaidOutTensor containing the processor coordinate.
Args:
mesh_axis: int.
Returns:
LaidOutTensor where each slice is an integer scalar.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.