code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def collect_and_execute_subfields(
self,
return_type: GraphQLObjectType,
field_nodes: List[FieldNode],
path: ResponsePath,
result: Any,
) -> AwaitableOrValue[Dict[str, Any]]:
sub_field_nodes = self.collect_subfields(return_type, field_nodes)
return self.execute_fields(return_type, result, path, sub_field_nodes) | Collect sub-fields to execute to complete this value. |
def conn_az(cred, crid):
driver = get_driver(Provider.AZURE_ARM)
try:
az_obj = driver(tenant_id=cred['az_tenant_id'],
subscription_id=cred['az_sub_id'],
key=cred['az_app_id'],
secret=cred['az_app_sec'])
except SSLError as e:
abort_err("\r SSL Error with Azure: {}".format(e))
except InvalidCredsError as e:
abort_err("\r Error with Azure Credentials: {}".format(e))
return {crid: az_obj} | Establish connection to Azure service. |
def _get_java_env(self):
"Set env vars from connection if set"
env = super(_DistributedSubmitter, self)._get_java_env()
if self._streams_connection is not None:
sc = self._streams_connection
if isinstance(sc._delegator, streamsx.rest_primitives._StreamsRestDelegator):
env.pop('STREAMS_DOMAIN_ID', None)
env.pop('STREAMS_INSTANCE_ID', None)
else:
env['STREAMS_DOMAIN_ID'] = sc.get_domains()[0].id
if not ConfigParams.SERVICE_DEFINITION in self._config():
env['STREAMS_REST_URL'] = sc.resource_url
env['STREAMS_USERNAME'] = sc.session.auth[0]
env['STREAMS_PASSWORD'] = sc.session.auth[1]
return env | Set env vars from connection if set |
def scalar_term(self, st):
if isinstance(st, binary_type):
return _ScalarTermS(st, self._jinja_sub)
elif isinstance(st, text_type):
return _ScalarTermU(st, self._jinja_sub)
elif st is None:
return _ScalarTermU(u(''), self._jinja_sub)
else:
return st | Return a _ScalarTermS or _ScalarTermU from a string, to perform text and HTML substitutions |
def write_lines(self, lines, level=0):
for line in lines:
self.write_line(line, level) | Append multiple new lines |
def copy(self):
variance = self.variance.copy() if self.variance is not None else None
headers = self.headers.copy() if self.headers is not None else None
return self.__class__(self.disp.copy(), self.flux.copy(),
variance=variance, headers=headers) | Creates a copy of the object |
def prepare_sql(sql, add_semicolon=True, invalid_starts=('--', '/*', '*/', ';')):
return PrepareSQL(sql, add_semicolon, invalid_starts).prepared | Wrapper method for PrepareSQL class. |
def round_to_float(number, precision):
rounded = Decimal(str(floor((number + precision / 2) // precision))
) * Decimal(str(precision))
return float(rounded) | Round a float to a precision |
def write_branch_data(self, file):
branch_sheet = self.book.add_sheet("Branches")
for i, branch in enumerate(self.case.branches):
for j, attr in enumerate(BRANCH_ATTRS):
branch_sheet.write(i, j, getattr(branch, attr)) | Writes branch data to an Excel spreadsheet. |
def _parse_output_keys(val):
out = {}
for k in val.split(","):
if ":" in k:
name, attrs = k.split(":")
out[name] = attrs.split(";")
else:
out[k] = None
return out | Parse expected output keys from string, handling records. |
def serve(application, host='127.0.0.1', port=8080):
WSGIServer((host, int(port)), application).serve_forever() | Gevent-based WSGI-HTTP server. |
def new_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
for i in range(12042, 16042):
try:
s.bind(('127.0.0.1', i))
s.close()
return i
except socket.error:
pass
raise Exception('No local port available') | Find a free local port and allocate it |
def _process_docs(self, anexec, docblocks, parent, module, docsearch):
key = "{}.{}".format(parent.name, anexec.name)
if key in docblocks:
docs = self.docparser.to_doc(docblocks[key][0], anexec.name)
anexec.docstart, anexec.docend = (docblocks[key][1], docblocks[key][2])
self.docparser.process_execdocs(docs, anexec, key) | Associates the docstrings from the docblocks with their parameters. |
def _get_xml(xml_str):
try:
xml_data = etree.XML(xml_str)
except etree.XMLSyntaxError as err:
raise SaltCloudSystemExit('opennebula returned: {0}'.format(xml_str))
return xml_data | Intrepret the data coming from opennebula and raise if it's not XML. |
def trace_decorator(self):
def decorator(func):
def wrapper(*args, **kwargs):
self.tracer.start_span(name=func.__name__)
return_value = func(*args, **kwargs)
self.tracer.end_span()
return return_value
return wrapper
return decorator | Decorator to trace a function. |
def generate(
cls: Type[T], data: Generic, name: str = None, *, recursive: bool = True
) -> T:
if name is None:
name = cls.__name__
kls = make_class(name, {k: ib(default=None) for k in data}, bases=(cls,))
data = {
k: (
cls.generate(v, k.title())
if recursive and isinstance(v, Mapping)
else v
)
for k, v in data.items()
}
return kls(**data) | Build dataclasses and objects from dictionaries, recursively. |
def render_field(field, **kwargs):
renderer_cls = get_field_renderer(**kwargs)
return renderer_cls(field, **kwargs).render() | Render a field to a Bootstrap layout |
def EndOfEventAction(self, event):
self.log.debug('Processesing simulated event %d', event.GetEventID())
docs = self.sd.getDocs()
self.sd.clearDocs()
for processor in self.processors:
docs = processor.process(docs)
if not docs:
self.log.warning('%s did not return documents in process()!',
processor.__class__.__name__) | At the end of an event, grab sensitive detector hits then run processor loop |
def seq_ratio(word1, word2):
raw_ratio = SequenceMatcher(None, word1, word2).ratio()
return int(round(100 * raw_ratio)) | Returns sequence match ratio for two words |
def validate_account_id(sts_client, account_id):
resp = sts_client.get_caller_identity()
if 'Account' in resp:
if resp['Account'] == account_id:
LOGGER.info('Verified current AWS account matches required '
'account id %s.',
account_id)
else:
LOGGER.error('Current AWS account %s does not match '
'required account %s in Runway config.',
resp['Account'],
account_id)
sys.exit(1)
else:
LOGGER.error('Error checking current account ID')
sys.exit(1) | Exit if get_caller_identity doesn't match account_id. |
def run(q_prompt=False):
lines, columns = console_size()
q(r'\c %d %d' % (lines, columns))
if len(sys.argv) > 1:
try:
q(r'\l %s' % sys.argv[1])
except kerr as e:
print(e)
raise SystemExit(1)
else:
del sys.argv[1]
if q_prompt:
q()
ptp.run() | Run a prompt-toolkit based REPL |
def basename(path: Optional[str]) -> Optional[str]:
if path is not None:
return os.path.basename(path) | Returns the final component of a pathname and None if the argument is None |
def __get_return_value(self, messageKey, value):
if value:
return value
else:
if self._fallback:
return self._fallback.gettext(messageKey)
else:
return messageKey | Determines the return value; used to prevent code duplication |
def register_scr_task(self, *args, **kwargs):
kwargs["task_class"] = ScrTask
return self.register_task(*args, **kwargs) | Register a screening task. |
def hide_errors(self):
for method in self.methods.values():
method["last_output"] = {}
self.allow_config_clicks = False
self.error_hide = True
self.set_updated() | hide the module in the i3bar |
def bencode(obj):
if isinstance(obj, int):
return "i" + str(obj) + "e"
if isinstance(obj, str):
if not obj:
return None
return str(len(obj)) + ":" + obj
if isinstance(obj, list):
res = "l"
for elem in obj:
elem = bencode(elem)
if elem:
res += elem
return res + "e"
if isinstance(obj, dict):
res = "d"
for key in sorted(obj.keys()):
if key in obj:
value = bencode(obj[key])
key = bencode(key)
if key and value:
res += key + value
return res + "e"
if isinstance(obj, unicode):
return bencode(obj.encode('utf-8'))
if isinstance(obj, collections.OrderedDict):
return bencode(dict(obj))
raise Exception("Unknown object: %s (%s)" % (repr(obj), repr(type(obj)))) | Bencodes obj and returns it as a string |
def subsample(self, down_to=1, new_path=None):
if new_path is None: subsampled = self.__class__(new_temp_path())
elif isinstance(new_path, FASTA): subsampled = new_path
else: subsampled = self.__class__(new_path)
if down_to > len(self):
message = "Can't subsample %s down to %i. Only down to %i."
print Color.ylw + message % (self, down_to, len(self)) + Color.end
self.copy(new_path)
return
subsampled.create()
for seq in isubsample(self, down_to): subsampled.add_seq(seq)
subsampled.close()
assert len(subsampled) == down_to
return subsampled | Pick a number of sequences from the file pseudo-randomly. |
def _comp_bbox(el, el2):
if _comp_bbox_keys_required <= set(el.keys()) and \
_comp_bbox_keys_required <= set(el2.keys()):
if _box_in_box(el2, el):
return 1
if _box_in_box(el, el2):
return -1
return 0 | Return 1 if el in el2, -1 if el2 in el, else 0 |
def save(self):
content = self.dumps()
fileutils.save_text_to_file(content, self.file_path) | Saves the settings contents |
def _get_query_parts(self, query_str, search_options=None):
if search_options is None:
search_options = {}
if query_str is None:
raise NipapValueError("'query_string' must not be None")
query_str_parts = []
try:
for part in shlex.split(query_str.encode('utf-8')):
query_str_parts.append({ 'string': part.decode('utf-8') })
except ValueError as exc:
if unicode(exc) == 'No closing quotation':
raise NipapValueError(unicode(exc))
raise exc
if len(query_str_parts) == 0:
query_str_parts.append({ 'string': '' })
return query_str_parts | Split a query string into its parts |
def simple_predictive_probability_multistate(M_c, X_L_list, X_D_list, Y, Q):
logprobs = [float(simple_predictive_probability(M_c, X_L, X_D, Y, Q))
for X_L, X_D in zip(X_L_list, X_D_list)]
return logmeanexp(logprobs) | Returns the simple predictive probability, averaged over each sample. |
def comparator(operator):
@wraps(operator)
def wrapper(self, other):
if not isinstance(other, (VersionInfo, dict)):
return NotImplemented
return operator(self, other)
return wrapper | Wrap a VersionInfo binary op method in a type-check |
def _select_loci(c):
loci_len = {k: len(v) for k, v in c.loci2seq.iteritems()}
logger.debug("_select_loci: number of loci %s" % len(c.loci2seq.keys()))
loci_len_sort = sorted(loci_len.iteritems(), key=operator.itemgetter(1), reverse=True)
max_size = loci_len_sort[0][1]
logger.debug("_select_loci: max size %s" % max_size)
loci_clean = {locus: c.loci2seq[locus] for locus, size in loci_len_sort if size > 0.8 * max_size}
c.loci2seq = loci_clean
removed = list(set(c.idmembers.keys()) - set(_get_seqs(c)))
c.add_id_member(removed, loci_len_sort[0][0])
logger.debug("_select_loci: number of loci %s after cleaning" % len(c.loci2seq.keys()))
return c | Select only loci with most abundant sequences |
def rpc_get_account_record(self, address, token_type, **con_info):
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_token_type(token_type):
return {'error': 'Invalid token type', 'http_status': 400}
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
account = db.get_account(address, token_type)
db.close()
if account is None:
return {'error': 'No such account', 'http_status': 404}
state = self.export_account_state(account)
return self.success_response({'account': state}) | Get the current state of an account |
def snippets(self):
return [strip_suffix(f, '.yaml') for f in self._stripped_files if self._snippets_pattern.match(f)] | Get all snippets in this DAP |
def reset(self, seed):
logger.debug(f'Resetting {self} (seed={seed})')
self.seed_generator.reset(seed)
for c in self.clones:
c.reset(seed) | Reset this generator's seed generator and any clones. |
def _process_first_group(self, group):
if "-" in group:
if len(group.split("-")) == 2:
arr = group.split("-")
start = self._parse_codepoint(arr[0])
end = self._parse_codepoint(arr[1])
else:
start = self._parse_codepoint(group)
end = start
result = []
if (start > -1) and (end >= start):
for index in range(start, end + 1):
result.append(gf.safe_unichr(index))
return result | Process the first group of a rule. |
def _transform_snapshots_for_blockhash(storage: SQLiteStorage, cache: BlockHashCache) -> None:
snapshots = storage.get_snapshots()
snapshot_records = [
TransformSnapshotRecord(
data=snapshot.data,
identifier=snapshot.identifier,
storage=storage,
cache=cache,
)
for snapshot in snapshots
]
pool_generator = Pool(len(snapshots)).imap(_do_transform_snapshot, snapshot_records)
updated_snapshots_data = []
for result in pool_generator:
updated_snapshots_data.append(result)
storage.update_snapshots(updated_snapshots_data) | Upgrades the snapshots by adding the blockhash to it and to any pending transactions |
def packages(ciprcfg, env, opts, console):
for name, source in ciprcfg.packages.items():
console.normal('- %s' % name)
if opts.long_details:
console.normal(' - directory: %s' % path.join(env.package_dir, name))
console.normal(' - source: %s' % source) | List installed packages for this project |
def check_for_allowed_file(f):
for ext in SUPPORTED_EXTENSIONS:
if f.endswith(ext):
return True
log.error("Failed upload: Not an allowed file extension: %s", f)
raise SystemExit | Checks a file extension against a list of seq file exts |
def value(self, data):
value = data.get(self.name)
if value:
return int(value)
return self.default | Get value from data. |
def crossvalidate(self, foldsfile):
options = "-F " + self.format + " " + self.timbloptions + " -t cross_validate"
print("Instantiating Timbl API : " + options,file=stderr)
if sys.version < '3':
self.api = timblapi.TimblAPI(b(options), b"")
else:
self.api = timblapi.TimblAPI(options, "")
if self.debug:
print("Enabling debug for timblapi",file=stderr)
self.api.enableDebug()
print("Calling Timbl Test : " + options,file=stderr)
if sys.version < '3':
self.api.test(b(foldsfile),b'',b'')
else:
self.api.test(u(foldsfile),'','')
a = self.api.getAccuracy()
del self.api
return a | Train & Test using cross validation, testfile is a file that contains the filenames of all the folds! |
def _parse_arguments():
parser = get_base_arguments(get_parser())
parser = get_tc_arguments(parser)
args, unknown = parser.parse_known_args()
return args, unknown | Static method for paring arguments |
def isCode(self, block, column):
dataObject = block.userData()
data = dataObject.data if dataObject is not None else None
return self._syntax.isCode(data, column) | Check if character at column is a a code |
def inheritance_patch(attrs):
for key, obj in attrs.items():
if isinstance(obj, attribute):
if getattr(obj, 'attr_write', None) == AttrWriteType.READ_WRITE:
if not getattr(obj, 'fset', None):
method_name = obj.write_method_name or "write_" + key
obj.fset = attrs.get(method_name) | Patch tango objects before they are processed by the metaclass. |
def _build_filtered_query(self, f, operator):
self._filtered = True
if isinstance(f, Filter):
filter_object = f
else:
filter_object = Filter(operator).filter(f)
self._filter_dsl = filter_object | Create the root of the filter tree |
def split_history_item(history):
try:
log_file, description = shlex.split(history)
except ValueError:
log_file = history.strip()
description = None
return log_file, description | Return the log file and optional description for item. |
def add_stylesheets(self, *css_files):
for css_file in css_files:
self.main_soup.style.append(self._text_file(css_file)) | add stylesheet files in HTML head |
def handle_resource_update_success(resource):
update_fields = []
if resource.state == resource.States.ERRED:
resource.recover()
update_fields.append('state')
if resource.state in (resource.States.UPDATING, resource.States.CREATING):
resource.set_ok()
update_fields.append('state')
if resource.error_message:
resource.error_message = ''
update_fields.append('error_message')
if update_fields:
resource.save(update_fields=update_fields)
logger.warning('%s %s (PK: %s) was successfully updated.' % (
resource.__class__.__name__, resource, resource.pk)) | Recover resource if its state is ERRED and clear error message. |
def boto_client(self, service, *args, **kwargs):
return self.boto_session.client(service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)) | A wrapper to apply configuration options to boto clients |
def encode_setid(uint128):
hi, lo = divmod(uint128, 2**64)
return b32encode(struct.pack('<QQ', lo, hi))[:-6].lower() | Encode uint128 setid as stripped b32encoded string |
def _chunk(iterable, size):
args = (iter(iterable),) * size
return (
itertools.takewhile(lambda x: x is not None, group)
for group in itertools.zip_longest(*args)
) | Split an iterable into chunks of a fixed size. |
def generic_find_uq_constraint_name(table, columns, insp):
for uq in insp.get_unique_constraints(table):
if columns == set(uq['column_names']):
return uq['name'] | Utility to find a unique constraint name in alembic migrations |
def inputs_outputs(self):
r = fapi.get_inputs_outputs(self.namespace, self.name,
self.snapshot_id, self.api_url)
fapi._check_response_code(r, 200)
return r.json() | Get information on method inputs & outputs. |
def fetch_state_data(self, states):
print("Fetching census data")
for table in CensusTable.objects.all():
api = self.get_series(table.series)
for variable in table.variables.all():
estimate = "{}_{}".format(table.code, variable.code)
print(
">> Fetching {} {} {}".format(
table.year, table.series, estimate
)
)
for state in tqdm(states):
self.get_state_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
)
self.get_county_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
)
self.get_district_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
) | Fetch census estimates from table. |
def decode(self, ids):
ids = text_encoder.pad_decr(ids)
subword_ids = ids
del ids
subwords = []
prev_bytes = []
def consume_prev_bytes():
if prev_bytes:
bytestr = b"".join(prev_bytes)
bytes_text = bytestr.decode("utf-8", "replace")
subwords.append(bytes_text)
return []
for subword_id in subword_ids:
subword = self._id_to_subword(subword_id)
if isinstance(subword, six.binary_type):
prev_bytes.append(subword)
else:
prev_bytes = consume_prev_bytes()
trimmed, add_space = _trim_underscore_and_tell(subword)
subwords.append(trimmed)
if add_space:
subwords.append(" ")
prev_bytes = consume_prev_bytes()
return tf.compat.as_text("".join(subwords)) | Decodes a list of integers into text. |
def first_ipv4(self) -> Optional[AddressInfo]:
for info in self._address_infos:
if info.family == socket.AF_INET:
return info | The first IPv4 address. |
def _load_config(self):
if (
self.config
):
return
repo_root = self.repo_root
if not repo_root:
raise NotInProject(
"No git repository was found in the current path. You must be in a git repository to set up and use CCI for a project."
)
if not self.config_project_path:
raise ProjectConfigNotFound(
"The file {} was not found in the repo root: {}. Are you in a CumulusCI Project directory?".format(
self.config_filename, repo_root
)
)
with open(self.config_project_path, "r") as f_config:
project_config = ordered_yaml_load(f_config)
if project_config:
self.config_project.update(project_config)
if self.config_project_local_path:
with open(self.config_project_local_path, "r") as f_local_config:
local_config = ordered_yaml_load(f_local_config)
if local_config:
self.config_project_local.update(local_config)
if self.additional_yaml:
additional_yaml_config = ordered_yaml_load(self.additional_yaml)
if additional_yaml_config:
self.config_additional_yaml.update(additional_yaml_config)
self.config = merge_config(
OrderedDict(
[
("global_config", self.config_global),
("global_local", self.config_global_local),
("project_config", self.config_project),
("project_local_config", self.config_project_local),
("additional_yaml", self.config_additional_yaml),
]
)
) | Loads the configuration from YAML, if no override config was passed in initially. |
def start(self, build_requests=None, callback=None):
if callback:
self.callback = callback
if build_requests:
self.build_requests = build_requests
self.sw = threading.Thread(target=self.run)
self.sw.start() | Run the client using a background thread. |
def read(self, size=None):
blob = self.s.read(size)
if size is not None and len(blob) < size:
raise EOFError
if self._captured:
self._captured.write(blob)
return blob | Read `size` bytes from stream. |
def check_relations(self, relations):
for rel in relations:
if not rel:
continue
fields = rel.split('.', 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError('Unknown field "{}"'.format(local_field))
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field.name, field.__class__.__name__))
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:]) | Recursive function which checks if a relation is valid. |
def atlasdb_num_peers( con=None, path=None ):
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "SELECT MAX(peer_index) FROM peers;"
args = ()
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
ret = []
for row in res:
tmp = {}
tmp.update(row)
ret.append(tmp)
assert len(ret) == 1
return ret[0]['MAX(peer_index)'] | How many peers are there in the db? |
def load(self, model, value):
try:
return self._cattrs_converter.structure(value, model)
except (ValueError, TypeError) as e:
raise SerializationException(str(e)) | Converts unstructured data into structured data, recursively. |
def annoy():
"Annoy everyone with meaningless banter"
def a1():
yield 'OOOOOOOHHH, WHAT DO YOU DO WITH A DRUNKEN SAILOR'
yield 'WHAT DO YOU DO WITH A DRUNKEN SAILOR'
yield "WHAT DO YOU DO WITH A DRUNKEN SAILOR, EARLY IN THE MORNIN'?"
def a2():
yield "I'M HENRY THE EIGHTH I AM"
yield "HENRY THE EIGHTH I AM I AM"
yield (
"I GOT MARRIED TO THE GIRL NEXT DOOR; SHE'S BEEN MARRIED "
"SEVEN TIMES BEFORE")
def a3():
yield "BOTHER!"
yield "BOTHER BOTHER BOTHER!"
yield "BOTHER BOTHER BOTHER BOTHER!"
def a4():
yield "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
yield "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
yield "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
def a5():
yield "YOUR MOTHER WAS A HAMSTER!"
yield "AND YOUR FATHER SMELLED OF ELDERBERRIES!"
def a6():
yield(
"My Tallest! My Tallest! Hey! Hey My Tallest! My Tallest? My "
"Tallest! Hey! Hey! Hey! My Taaaaaaallist! My Tallest? My "
"Tallest! Hey! Hey My Tallest! My Tallest? It's me! My Tallest? "
"My Tallest!")
return random.choice([a1, a2, a3, a4, a5, a6])() | Annoy everyone with meaningless banter |
def list_to_compare_dict(self, list_form):
compare_dict = {}
for field in list_form:
if field['name'] in compare_dict:
self.pr_dbg("List has duplicate field %s:\n%s" %
(field['name'], compare_dict[field['name']]))
if compare_dict[field['name']] != field:
self.pr_dbg("And values are different:\n%s" % field)
return None
compare_dict[field['name']] = field
for ign_f in self.mappings_ignore:
compare_dict[field['name']][ign_f] = 0
return compare_dict | Convert list into a data structure we can query easier |
def _nvram_file(self):
return os.path.join(self.working_dir, "nvram_{:05d}".format(self.application_id)) | Path to the nvram file |
def execute(task_function, *args, **kwargs):
if get_setting('TEST_DISABLE_ASYNC_DELAY'):
logger.debug('Running function "%s" synchronously because '\
'TEST_DISABLE_ASYNC_DELAY is True'
% task_function.__name__)
return task_function(*args, **kwargs)
db.connections.close_all()
task_function.delay(*args, **kwargs) | Run a task asynchronously |
def _save_tracker_uri_to_file(self):
if not self.tracker_file_name:
return
f = None
try:
f = open(self.tracker_file_name, 'w')
f.write(self.tracker_uri)
except IOError, e:
raise ResumableUploadException(
'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured upload tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
finally:
if f:
f.close() | Saves URI to tracker file if one was passed to constructor. |
def encoded_class(block, offset=0):
if not block:
raise InvalidFileFormatNull
for key in __magicmap__:
if block.find(key, offset, offset + len(key)) > -1:
return __magicmap__[key]
raise InvalidFileFormat | predicate indicating whether a block of memory includes a magic number |
def start(self, timeout=None):
self.thread.start()
start_time = time.time()
if not timeout:
timeout = self.timeout
while start_time + timeout > time.time():
self.thread.join(1)
if self.started:
return True
if self.error:
return False
return False | Start running the command |
def local_bind_hosts(self):
self._check_is_started()
return [_server.local_host for _server in self._server_list if
_server.local_host is not None] | Return a list containing the IP addresses listening for the tunnels |
def read_int_option (self, section, option, key=None, min=None, max=None):
if self.has_option(section, option):
num = self.getint(section, option)
if min is not None and num < min:
raise LinkCheckerError(
_("invalid value for %s: %d must not be less than %d") % (option, num, min))
if max is not None and num < max:
raise LinkCheckerError(
_("invalid value for %s: %d must not be greater than %d") % (option, num, max))
if key is None:
key = option
self.config[key] = num | Read an integer option. |
def min_ems(self, value: float) -> 'Size':
raise_not_number(value)
self.minimum = '{}em'.format(value)
return self | Set the minimum size in ems. |
def Reset(self):
self.state = "INITIAL"
self.state_stack = []
self.buffer = ""
self.error = 0
self.verbose = 0
self.processed = 0
self.processed_buffer = "" | Reset the lexer to process a new data feed. |
def debug_form_contents(form: cgi.FieldStorage,
to_stderr: bool = True,
to_logger: bool = False) -> None:
for k in form.keys():
text = "{0} = {1}".format(k, form.getvalue(k))
if to_stderr:
sys.stderr.write(text)
if to_logger:
log.info(text) | Writes the keys and values of a CGI form to ``stderr``. |
def d8hdisttostrm(np, p, src, dist, thresh, workingdir=None,
mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
fname = TauDEM.func_name('d8hdisttostrm')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': p, '-src': src},
workingdir,
{'-thresh': thresh},
{'-dist': dist},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | Run D8 horizontal distance down to stream. |
def _add_edges(self, ast_node, trunk=None):
atom_indices = self._atom_indices
for atom in ast_node.tail:
if atom.head == 'atom':
atom_idx = atom_indices[id(atom)]
if atom.is_first_kid and atom.parent().head == 'branch':
trunk_idx = atom_indices[id(trunk)]
self.add_edge(atom_idx, trunk_idx)
if not atom.is_last_kid:
if atom.next_kid.head == 'atom':
next_idx = atom_indices[id(atom.next_kid)]
self.add_edge(atom_idx, next_idx)
elif atom.next_kid.head == 'branch':
trunk = atom
else:
return
elif atom.head == 'branch':
self._add_edges(atom, trunk) | Add all bonds in the SMARTS string as edges in the graph. |
def from_string(self, value):
if value.startswith('{') and value.endswith('}'):
text = value[1:-1].strip()
else:
text = value.strip()
result = {}
for val in text.split(','):
tokens = val.split(':')
if len(tokens) != 2:
raise ValueError('invalid entry in dictionary: ' + val)
result[str(int(tokens[0].strip()))] = float(tokens[1].strip())
return result | Convert string to dictionary. |
def thread_local_property(name):
name = '_thread_local_' + name
def fget(self):
try:
return getattr(self, name).value
except AttributeError:
return None
def fset(self, value):
getattr(self, name).value = value
return property(fget=fget, fset=fset) | Creates a thread local ``property``. |
def Copy(self,
old_urn,
new_urn,
age=NEWEST_TIME,
limit=None,
update_timestamps=False):
new_urn = rdfvalue.RDFURN(new_urn)
if update_timestamps and age != NEWEST_TIME:
raise ValueError(
"Can't update timestamps unless reading the latest version.")
values = {}
for predicate, value, ts in data_store.DB.ResolvePrefix(
old_urn,
AFF4_PREFIXES,
timestamp=self.ParseAgeSpecification(age),
limit=limit):
if update_timestamps:
values.setdefault(predicate, []).append((value, None))
else:
values.setdefault(predicate, []).append((value, ts))
if values:
with data_store.DB.GetMutationPool() as pool:
pool.MultiSet(new_urn, values, replace=False)
self._UpdateChildIndex(new_urn, pool) | Make a copy of one AFF4 object to a different URN. |
def _path_parts(self, pth):
res = re.split(r"[\\/]", pth)
if res and os.path.splitdrive(res[0]) == (res[0], ''):
res[0] += os.path.sep
return res | Return a list of all directories in the path ``pth``. |
def _write_gen_model_stats(self, iteration:int)->None:
"Writes gradient statistics for generator to Tensorboard."
generator = self.learn.gan_trainer.generator
self.stats_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='gen_model_stats')
self.gen_stats_updated = True | Writes gradient statistics for generator to Tensorboard. |
def attrgetcol(self, groupname, attrname):
values = []
for rownr in range(self.attrnrows(groupname)):
values.append(self.attrget(groupname, attrname, rownr))
return values | Get the value of an attribute for all rows in a group. |
def tags(tag_references):
if not tag_references:
return blank()
tag_row = []
for tag_detail in tag_references:
tag = utils.lookup(tag_detail, 'tag', 'name')
if tag is not None:
tag_row.append(tag)
return listing(tag_row, separator=', ') | Returns a formatted list of tags. |
def relation_(self, table, origin_field, search_field, destination_field=None,
id_field="id"):
df = self._relation(table, origin_field,
search_field, destination_field, id_field)
return self._duplicate_(df) | Returns a DataSwim instance with a column filled from a relation foreign key |
def deploy(overwrite=False):
check_settings()
if overwrite:
rmvirtualenv()
deploy_funcs = [deploy_project,deploy_templates, deploy_static, deploy_media, deploy_webconf, deploy_wsgi]
if not patch_project() or overwrite:
deploy_funcs = [deploy_db,mkvirtualenv,pip_install_requirements] + deploy_funcs
for func in deploy_funcs: func() | deploy a versioned project on the host |
def _check_row_size(self, array):
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError, "array should contain %d elements" \
% self._row_size | Check that the specified array fits the previous rows size |
def equipable_classes(self):
sitem = self._schema_item
return [c for c in sitem.get("used_by_classes", self.equipped.keys()) if c] | Returns a list of classes that _can_ use the item. |
def generate_cache_key(cached, **kwargs):
if isinstance(cached, QuerySet):
key = str(cached.query)
elif isinstance(cached, (Model, ModelBase)):
key = '%s.%s:%s' % (cached._meta.app_label,
cached._meta.module_name,
','.join('%s=%s' % item for item in kwargs.iteritems()))
else:
raise AttributeError("Objects must be queryset or model.")
if not key:
raise Exception('Cache key cannot be empty.')
key = clean_cache_key(key)
return key | Auto generate cache key for model or queryset |
def names_dict(self):
INCLUDE_KEYS = ['name', 'vname', 'vid']
d = {k: v for k, v in iteritems(self.dict) if k in INCLUDE_KEYS}
d['fqname'] = self.fqname
return d | A dictionary with only the generated names, name, vname and fqname. |
def reverseCommit(self):
self.baseClass.setText(self.textBefore)
self.qteWidget.SCISetStylingEx(0, 0, self.styleBefore) | Put the document into the 'before' state. |
def translate_syntax_error(error, source=None):
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno) | Rewrites a syntax error to please traceback systems. |
def fill(self, path):
self.bindir = set(os.listdir(path + 'bin/'))
self.lib_sitepackages = set(os.listdir(glob.glob(
path + 'lib/python?.?/site-packages/')[0])) | Scans content of directories |
def npz_convert(self, infile, item):
data = np.load(infile)
labels = self._labels(data)
features = data['features']
self._write(item, labels, features) | Convert a numpy NPZ file to h5features. |
def purge_old_request_logs(delete_before_days=7):
delete_before_date = timezone.now() - timedelta(days=delete_before_days)
logs_deleted = RequestLog.objects.filter(
created_on__lte=delete_before_date).delete()
return logs_deleted | Purges old request logs from the database table |
def register_form_factory(Form, app):
if app.config.get('RECAPTCHA_PUBLIC_KEY') and \
app.config.get('RECAPTCHA_PRIVATE_KEY'):
class RegisterForm(Form):
recaptcha = FormField(RegistrationFormRecaptcha, separator='.')
return RegisterForm
return Form | Return extended registration form. |
async def _reset_protocol(self, exc=None):
protocol = await self._get_protocol()
await protocol.shutdown()
self._protocol = None
for ob_error in self._observations_err_callbacks:
ob_error(exc)
self._observations_err_callbacks.clear() | Reset the protocol if an error occurs. |
def render_headers(self):
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines) | Renders the headers for this request field. |
def _python_installed(ret, python, user=None):
default = __salt__['pyenv.default'](runas=user)
for version in __salt__['pyenv.versions'](user):
if version == python:
ret['result'] = True
ret['comment'] = 'Requested python exists.'
ret['default'] = default == python
break
return ret | Check to see if given python is installed. |
def _item_sources(self):
return [self.data_vars, self.coords, {d: self[d] for d in self.dims},
LevelCoordinatesSource(self)] | List of places to look-up items for key-completion |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.