code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _get_parents_from_parts(kwargs):
parent_builder = []
if kwargs['last_child_num'] is not None:
parent_builder.append('{type}/{name}/'.format(**kwargs))
for index in range(1, kwargs['last_child_num']):
child_namespace = kwargs.get('child_namespace_{}'.format(index))
if child_namespace is not None:
parent_builder.append('providers/{}/'.format(child_namespace))
kwargs['child_parent_{}'.format(index)] = ''.join(parent_builder)
parent_builder.append(
'{{child_type_{0}}}/{{child_name_{0}}}/'
.format(index).format(**kwargs))
child_namespace = kwargs.get('child_namespace_{}'.format(kwargs['last_child_num']))
if child_namespace is not None:
parent_builder.append('providers/{}/'.format(child_namespace))
kwargs['child_parent_{}'.format(kwargs['last_child_num'])] = ''.join(parent_builder)
kwargs['resource_parent'] = ''.join(parent_builder) if kwargs['name'] else None
return kwargs | Get the parents given all the children parameters. |
def getFeature(self, compoundId):
feature = self._getFeatureById(compoundId.featureId)
feature.id = str(compoundId)
return feature | find a feature and return ga4gh representation, use compoundId as
featureId |
def handle_command_line(options):
options = merge(options, constants.DEFAULT_OPTIONS)
engine = plugins.ENGINES.get_engine(
options[constants.LABEL_TEMPLATE_TYPE],
options[constants.LABEL_TMPL_DIRS],
options[constants.LABEL_CONFIG_DIR],
)
if options[constants.LABEL_TEMPLATE] is None:
if options[constants.POSITIONAL_LABEL_TEMPLATE] is None:
raise exceptions.NoTemplate(constants.ERROR_NO_TEMPLATE)
else:
engine.render_string_to_file(
options[constants.POSITIONAL_LABEL_TEMPLATE],
options[constants.LABEL_CONFIG],
options[constants.LABEL_OUTPUT],
)
else:
engine.render_to_file(
options[constants.LABEL_TEMPLATE],
options[constants.LABEL_CONFIG],
options[constants.LABEL_OUTPUT],
)
engine.report()
HASH_STORE.save_hashes()
exit_code = reporter.convert_to_shell_exit_code(
engine.number_of_templated_files()
)
return exit_code | act upon command options |
def tokenise(string, strict=False, replace=False,
diphtongs=False, tones=False, unknown=False, merge=None):
words = string.strip().replace('_', ' ').split()
output = []
for word in words:
tokens = tokenise_word(word, strict, replace, tones, unknown)
if diphtongs:
tokens = group(are_diphtong, tokens)
if merge is not None:
tokens = group(merge, tokens)
output.extend(tokens)
return output | Tokenise an IPA string into a list of tokens. Raise ValueError if there is
a problem; if strict=True, this includes the string not being compliant to
the IPA spec.
If replace=True, replace some common non-IPA symbols with their IPA
counterparts. If diphtongs=True, try to group diphtongs into single tokens.
If tones=True, do not ignore tone symbols. If unknown=True, do not ignore
symbols that cannot be classified into a relevant category. If merge is not
None, use it for within-word token grouping.
Part of ipatok's public API. |
def get_forecast_api(self, longitude: str, latitude: str) -> {}:
api_url = APIURL_TEMPLATE.format(longitude, latitude)
response = urlopen(api_url)
data = response.read().decode('utf-8')
json_data = json.loads(data)
return json_data | gets data from API |
def add_exception_handler(self, exception_handler):
if exception_handler is None:
raise RuntimeConfigException(
"Valid Exception Handler instance to be provided")
if not isinstance(exception_handler, AbstractExceptionHandler):
raise RuntimeConfigException(
"Input should be an ExceptionHandler instance")
self.exception_handlers.append(exception_handler) | Register input to the exception handlers list.
:param exception_handler: Exception Handler instance to be
registered.
:type exception_handler: AbstractExceptionHandler
:return: None |
def at(self, time_str):
assert self.unit in ('days', 'hours') or self.start_day
hour, minute = time_str.split(':')
minute = int(minute)
if self.unit == 'days' or self.start_day:
hour = int(hour)
assert 0 <= hour <= 23
elif self.unit == 'hours':
hour = 0
assert 0 <= minute <= 59
self.at_time = datetime.time(hour, minute)
return self | Schedule the job every day at a specific time.
Calling this is only valid for jobs scheduled to run
every N day(s).
:param time_str: A string in `XX:YY` format.
:return: The invoked job instance |
def compare(self, path, prefixed_path, source_storage):
comparitor = getattr(self, 'compare_%s' % self.comparison_method, None)
if not comparitor:
comparitor = self._create_comparitor(self.comparison_method)
return comparitor(path, prefixed_path, source_storage) | Returns True if the file should be copied. |
def escape(self, text, quote = True):
if isinstance(text, bytes):
return escape_b(text, quote)
else:
return escape(text, quote) | Escape special characters in HTML |
def GetClientVersion(client_id, token=None):
if data_store.RelationalDBEnabled():
sinfo = data_store.REL_DB.ReadClientStartupInfo(client_id=client_id)
if sinfo is not None:
return sinfo.client_info.client_version
else:
return config.CONFIG["Source.version_numeric"]
else:
with aff4.FACTORY.Open(client_id, token=token) as client:
cinfo = client.Get(client.Schema.CLIENT_INFO)
if cinfo is not None:
return cinfo.client_version
else:
return config.CONFIG["Source.version_numeric"] | Returns last known GRR version that the client used. |
def export(self, source=None):
uidentities = {}
uids = api.unique_identities(self.db, source=source)
for uid in uids:
enrollments = [rol.to_dict()
for rol in api.enrollments(self.db, uuid=uid.uuid)]
u = uid.to_dict()
u['identities'].sort(key=lambda x: x['id'])
uidentities[uid.uuid] = u
uidentities[uid.uuid]['enrollments'] = enrollments
blacklist = [mb.excluded for mb in api.blacklist(self.db)]
obj = {'time': str(datetime.datetime.now()),
'source': source,
'blacklist': blacklist,
'organizations': {},
'uidentities': uidentities}
return json.dumps(obj, default=self._json_encoder,
indent=4, separators=(',', ': '),
sort_keys=True) | Export a set of unique identities.
Method to export unique identities from the registry. Identities schema
will follow Sorting Hat JSON format.
When source parameter is given, only those unique identities which have
one or more identities from the given source will be exported.
:param source: source of the identities to export
:returns: a JSON formatted str |
def _x_start_ok(self, client_properties, mechanism, response, locale):
if self.server_capabilities.get('consumer_cancel_notify'):
if 'capabilities' not in client_properties:
client_properties['capabilities'] = {}
client_properties['capabilities']['consumer_cancel_notify'] = True
if self.server_capabilities.get('connection.blocked'):
if 'capabilities' not in client_properties:
client_properties['capabilities'] = {}
client_properties['capabilities']['connection.blocked'] = True
args = AMQPWriter()
args.write_table(client_properties)
args.write_shortstr(mechanism)
args.write_longstr(response)
args.write_shortstr(locale)
self._send_method((10, 11), args) | Select security mechanism and locale
This method selects a SASL security mechanism. ASL uses SASL
(RFC2222) to negotiate authentication and encryption.
PARAMETERS:
client_properties: table
client properties
mechanism: shortstr
selected security mechanism
A single security mechanisms selected by the client,
which must be one of those specified by the server.
RULE:
The client SHOULD authenticate using the highest-
level security profile it can handle from the list
provided by the server.
RULE:
The mechanism field MUST contain one of the
security mechanisms proposed by the server in the
Start method. If it doesn't, the server MUST close
the socket.
response: longstr
security response data
A block of opaque data passed to the security
mechanism. The contents of this data are defined by
the SASL security mechanism. For the PLAIN security
mechanism this is defined as a field table holding two
fields, LOGIN and PASSWORD.
locale: shortstr
selected message locale
A single message local selected by the client, which
must be one of those specified by the server. |
def oauth_request(self, url, method, **kwargs):
if self.session is None:
self.session = self.get_session()
return self._internal_request(self.session, url, method, **kwargs) | Makes a request to url using an oauth session
:param str url: url to send request to
:param str method: type of request (get/put/post/patch/delete)
:param kwargs: extra params to send to the request api
:return: Response of the request
:rtype: requests.Response |
def UNEXPOSED(self, _cursor_type):
_decl = _cursor_type.get_declaration()
name = self.get_unique_name(_decl)
if self.is_registered(name):
obj = self.get_registered(name)
else:
obj = self.parse_cursor(_decl)
return obj | Handles unexposed types.
Returns the canonical type instead. |
def _get_cmap(kwargs: dict) -> colors.Colormap:
from matplotlib.colors import ListedColormap
cmap = kwargs.pop("cmap", default_cmap)
if isinstance(cmap, list):
return ListedColormap(cmap)
if isinstance(cmap, str):
try:
cmap = plt.get_cmap(cmap)
except BaseException as exc:
try:
import seaborn as sns
sns_palette = sns.color_palette(cmap, n_colors=256)
cmap = ListedColormap(sns_palette, name=cmap)
except ImportError:
raise exc
return cmap | Get the colour map for plots that support it.
Parameters
----------
cmap : str or colors.Colormap or list of colors
A map or an instance of cmap. This can also be a seaborn palette
(if seaborn is installed). |
def polynomial_exp_mod( base, exponent, polymod, p ):
assert exponent < p
if exponent == 0: return [ 1 ]
G = base
k = exponent
if k%2 == 1: s = G
else: s = [ 1 ]
while k > 1:
k = k // 2
G = polynomial_multiply_mod( G, G, polymod, p )
if k%2 == 1: s = polynomial_multiply_mod( G, s, polymod, p )
return s | Polynomial exponentiation modulo a polynomial over ints mod p.
Polynomials are represented as lists of coefficients
of increasing powers of x. |
def try_pick_piece_of_work(self, worker_id, submission_id=None):
client = self._datastore_client
unclaimed_work_ids = None
if submission_id:
unclaimed_work_ids = [
k for k, v in iteritems(self.work)
if is_unclaimed(v) and (v['submission_id'] == submission_id)
]
if not unclaimed_work_ids:
unclaimed_work_ids = [k for k, v in iteritems(self.work)
if is_unclaimed(v)]
if unclaimed_work_ids:
next_work_id = random.choice(unclaimed_work_ids)
else:
return None
try:
with client.transaction() as transaction:
work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id,
KIND_WORK, next_work_id)
work_entity = client.get(work_key, transaction=transaction)
if not is_unclaimed(work_entity):
return None
work_entity['claimed_worker_id'] = worker_id
work_entity['claimed_worker_start_time'] = get_integer_time()
transaction.put(work_entity)
except Exception:
return None
return next_work_id | Tries pick next unclaimed piece of work to do.
Attempt to claim work piece is done using Cloud Datastore transaction, so
only one worker can claim any work piece at a time.
Args:
worker_id: ID of current worker
submission_id: if not None then this method will try to pick
piece of work for this submission
Returns:
ID of the claimed work piece |
def phases_with(self, **kwargs) -> [PhaseOutput]:
return [phase for phase in self.phases if
all([getattr(phase, key) == value for key, value in kwargs.items()])] | Filters phases. If no arguments are passed all phases are returned. Arguments must be key value pairs, with
phase, data or pipeline as the key.
Parameters
----------
kwargs
Filters, e.g. pipeline=pipeline1 |
def generate_hotp(secret, counter=4):
msg = struct.pack('>Q', counter)
digest = hmac.new(to_bytes(secret), msg, hashlib.sha1).digest()
ob = digest[19]
if PY2:
ob = ord(ob)
pos = ob & 15
base = struct.unpack('>I', digest[pos:pos + 4])[0] & 0x7fffffff
token = base % 1000000
return token | Generate a HOTP code.
:param secret: A secret token for the authentication.
:param counter: HOTP is a counter based algorithm. |
def settled(self, block_identifier: BlockSpecification) -> bool:
return self.token_network.channel_is_settled(
participant1=self.participant1,
participant2=self.participant2,
block_identifier=block_identifier,
channel_identifier=self.channel_identifier,
) | Returns if the channel is settled. |
def get_perceval_params_from_url(cls, urls):
params = []
dparam = cls.get_arthur_params_from_url(urls)
params.append(dparam["url"])
return params | Get the perceval params given the URLs for the data source |
def _get_from_dapi_or_mirror(link):
exception = False
try:
req = requests.get(_api_url() + link, timeout=5)
except requests.exceptions.RequestException:
exception = True
attempts = 1
while exception or str(req.status_code).startswith('5'):
if attempts > 5:
raise DapiCommError('Could not connect to the API endpoint, sorry.')
exception = False
try:
req = requests.get(_api_url(attempts % 2) + link, timeout=5*attempts)
except requests.exceptions.RequestException:
exception = True
attempts += 1
return req | Tries to get the link form DAPI or the mirror |
def bind(value, name):
if isinstance(value, Markup):
return value
elif requires_in_clause(value):
raise MissingInClauseException(
)
else:
return _bind_param(_thread_local.bind_params, name, value) | A filter that prints %s, and stores the value
in an array, so that it can be bound using a prepared statement
This filter is automatically applied to every {{variable}}
during the lexing stage, so developers can't forget to bind |
def log_warning(msg, logger="TaskLogger"):
tasklogger = get_tasklogger(logger)
tasklogger.warning(msg)
return tasklogger | Log a WARNING message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger |
def sorted_source_files(self):
assert self.final, 'Call build() before using the graph.'
out = []
for node in nx.topological_sort(self.graph):
if isinstance(node, NodeSet):
out.append(node.nodes)
else:
out.append([node])
return list(reversed(out)) | Returns a list of targets in topologically sorted order. |
def collapse_focussed(self):
if implementsCollapseAPI(self._tree):
w, focuspos = self.get_focus()
self._tree.collapse(focuspos)
self._walker.clear_cache()
self.refresh() | Collapse currently focussed position; works only if the underlying
tree allows it. |
def drive_enclosures(self):
if not self.__drive_enclures:
self.__drive_enclures = DriveEnclosures(self.__connection)
return self.__drive_enclures | Gets the Drive Enclosures API client.
Returns:
DriveEnclosures: |
def get_language_database():
lang = None
language = get_language()
if language:
for x in settings.LANGUAGES_DATABASES:
if x.upper() == language.upper():
lang = language
break
if lang is None:
lang = settings.LANGUAGES_DATABASES[0]
return lang.lower() | Return the language to be used to search the database contents |
def read_vpcs_stdout(self):
output = ""
if self._vpcs_stdout_file:
try:
with open(self._vpcs_stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warn("Could not read {}: {}".format(self._vpcs_stdout_file, e))
return output | Reads the standard output of the VPCS process.
Only use when the process has been stopped or has crashed. |
def libvlc_media_duplicate(p_md):
f = _Cfunctions.get('libvlc_media_duplicate', None) or \
_Cfunction('libvlc_media_duplicate', ((1,),), class_result(Media),
ctypes.c_void_p, Media)
return f(p_md) | Duplicate a media descriptor object.
@param p_md: a media descriptor object. |
def get_port(self, adapter_number, port_number):
for port in self.ports:
if port.adapter_number == adapter_number and port.port_number == port_number:
return port
return None | Return the port for this adapter_number and port_number
or returns None if the port is not found |
def _decref_dependencies_recursive(self, term, refcounts, garbage):
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
if refcounts[parent] == 0:
garbage.add(parent)
self._decref_dependencies_recursive(parent, refcounts, garbage) | Decrement terms recursively.
Notes
-----
This should only be used to build the initial workspace, after that we
should use:
:meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies` |
def register_migration(self, migration: 'Migration'):
if migration.from_ver >= migration.to_ver:
raise ValueError('Migration cannot downgrade verson')
if migration.from_ver != self._final_ver:
raise ValueError('Cannot register disjoint migration')
self._migrations[migration.from_ver] = migration
self._final_ver = migration.to_ver | Register a migration.
You can only register migrations in order. For example, you can
register migrations from version 1 to 2, then 2 to 3, then 3 to
4. You cannot register 1 to 2 followed by 3 to 4. |
def from_deformation(cls, deformation):
dfm = Deformation(deformation)
return cls(0.5 * (np.dot(dfm.trans, dfm) - np.eye(3))) | Factory method that returns a Strain object from a deformation
gradient
Args:
deformation (3x3 array-like): |
def contains_point(self, p):
for iv in self.s_center:
if iv.contains_point(p):
return True
branch = self[p > self.x_center]
return branch and branch.contains_point(p) | Returns whether this node or a child overlaps p. |
def filter_query(self, query):
for filter_class in list(self.filter_classes):
query = filter_class().filter_query(self.request, query, self)
return query | Filter the given query using the filter classes specified on the view if any are specified. |
def sentiment(
text = None,
confidence = False
):
try:
words = text.split(" ")
words = [word for word in words if word]
features = word_features(words)
classification = classifier.classify(features)
confidence_classification = classifier.prob_classify(features).prob(classification)
except:
classification = None
confidence_classification = None
if confidence:
return (
classification,
confidence_classification
)
else:
return classification | This function accepts a string text input. It calculates the sentiment of
the text, "pos" or "neg". By default, it returns this calculated sentiment.
If selected, it returns a tuple of the calculated sentiment and the
classificaton confidence. |
def get_patch_op(self, keypath, value, op='replace'):
if isinstance(value, bool):
value = str(value).lower()
return {'op': op, 'path': '/*/*/{}'.format(keypath), 'value': value} | Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods. |
def drdpgr(body, lon, lat, alt, re, f):
body = stypes.stringToCharP(body)
lon = ctypes.c_double(lon)
lat = ctypes.c_double(lat)
alt = ctypes.c_double(alt)
re = ctypes.c_double(re)
f = ctypes.c_double(f)
jacobi = stypes.emptyDoubleMatrix()
libspice.drdpgr_c(body, lon, lat, alt, re, f, jacobi)
return stypes.cMatrixToNumpy(jacobi) | This routine computes the Jacobian matrix of the transformation
from planetographic to rectangular coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/drdpgr_c.html
:param body: Body with which coordinate system is associated.
:type body: str
:param lon: Planetographic longitude of a point (radians).
:type lon: float
:param lat: Planetographic latitude of a point (radians).
:type lat: float
:param alt: Altitude of a point above reference spheroid.
:type alt: float
:param re: Equatorial radius of the reference spheroid.
:type re: float
:param f: Flattening coefficient.
:type f: float
:return: Matrix of partial derivatives.
:rtype: 3x3-Element Array of floats |
def templates_in(path):
ext = '.cpp'
return (
Template(f[0:-len(ext)], load_file(os.path.join(path, f)))
for f in os.listdir(path) if f.endswith(ext)
) | Enumerate the templates found in path |
def on_batch_end(self, train, **kwargs):
"Take the stored results and puts it in `self.stats`"
if train: self.stats.append(self.hooks.stored) | Take the stored results and puts it in `self.stats` |
def run_commands(commands,
directory,
env=None
):
if env is None:
env = os.environ.copy()
for step in commands:
if isinstance(step, (list, six.string_types)):
execution_dir = directory
raw_command = step
elif step.get('command'):
execution_dir = os.path.join(directory,
step.get('cwd')) if step.get('cwd') else directory
raw_command = step['command']
else:
raise AttributeError("Invalid command step: %s" % step)
command_list = raw_command.split(' ') if isinstance(raw_command, six.string_types) else raw_command
if platform.system().lower() == 'windows':
command_list = fix_windows_command_list(command_list)
with change_dir(execution_dir):
check_call(command_list, env=env) | Run list of commands. |
def apply_inverse(self, y):
self._recompute()
return self.solver.solve(self._process_input(y)) | Apply the inverse of the covariance matrix to a vector or matrix
Solve ``K.x = y`` for ``x`` where ``K`` is the covariance matrix of
the GP with the white noise and ``yerr`` components included on the
diagonal.
Args:
y (array[n] or array[n, nrhs]): The vector or matrix ``y``
described above.
Returns:
array[n] or array[n, nrhs]: The solution to the linear system.
This will have the same shape as ``y``.
Raises:
ValueError: For mismatched dimensions. |
def _fetch_data(self):
r
with h5py.File(self.name + '.hdf5') as f:
for item in f.keys():
obj_name, propname = item.split('|')
propname = propname.split('_')
propname = propname[0] + '.' + '_'.join(propname[1:])
self[obj_name][propname] = f[item] | r"""
Retrieve data from an HDF5 file and place onto correct objects in the
project
See Also
--------
_dump_data
Notes
-----
In principle, after data is fetched from and HDF5 file, it should
physically stay there until it's called upon. This let users manage
the data as if it's in memory, even though it isn't. This behavior
has not been confirmed yet, which is why these functions are hidden. |
def _sync_reminders(self, reminders_json):
for reminder_json in reminders_json:
reminder_id = reminder_json['id']
task_id = reminder_json['item_id']
if task_id not in self.tasks:
continue
task = self.tasks[task_id]
self.reminders[reminder_id] = Reminder(reminder_json, task) | Populate the user's reminders from a JSON encoded list. |
def _count_relevant_tb_levels(tb):
length = contiguous_unittest_frames = 0
while tb:
length += 1
if _is_unittest_frame(tb):
contiguous_unittest_frames += 1
else:
contiguous_unittest_frames = 0
tb = tb.tb_next
return length - contiguous_unittest_frames | Return the number of frames in ``tb`` before all that's left is unittest frames.
Unlike its namesake in unittest, this doesn't bail out as soon as it hits a
unittest frame, which means we don't bail out as soon as somebody uses the
mock library, which defines ``__unittest``. |
def eglQueryString(display, name):
out = _lib.eglQueryString(display, name)
if not out:
raise RuntimeError('Could not query %s' % name)
return out | Query string from display |
def check_account_address(address):
if address == 'treasury' or address == 'unallocated':
return True
if address.startswith('not_distributed_') and len(address) > len('not_distributed_'):
return True
if re.match(OP_C32CHECK_PATTERN, address):
try:
c32addressDecode(address)
return True
except:
pass
return check_address(address) | verify that a string is a valid account address.
Can be a b58-check address, a c32-check address, as well as the string "treasury" or "unallocated" or a string starting with 'not_distributed_'
>>> check_account_address('16EMaNw3pkn3v6f2BgnSSs53zAKH4Q8YJg')
True
>>> check_account_address('16EMaNw3pkn3v6f2BgnSSs53zAKH4Q8YJh')
False
>>> check_account_address('treasury')
True
>>> check_account_address('unallocated')
True
>>> check_account_address('neither')
False
>>> check_account_address('not_distributed')
False
>>> check_account_address('not_distributed_')
False
>>> check_account_address('not_distributed_asdfasdfasdfasdf')
True
>>> check_account_address('SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7')
True
>>> check_account_address('SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ8')
False |
def service_param_string(params):
p = []
k = []
for param in params:
name = fix_param_name(param['name'])
if 'required' in param and param['required'] is True:
p.append(name)
else:
if 'default' in param:
k.append('{name}={default}'.format(name=name, default=param['default']))
else:
k.append('{name}=None'.format(name=name))
p.sort(lambda a, b: len(a) - len(b))
k.sort()
a = p + k
return ', '.join(a) | Takes a param section from a metadata class and returns a param string for the service method |
def get_finished_results(self):
task_and_results = []
for pending_result in self.pending_results:
if pending_result.ready():
ret = pending_result.get()
task_id, result = ret
task = self.task_id_to_task[task_id]
self.process_all_messages_in_queue()
task.after_run(result)
task_and_results.append((task, result))
self.pending_results.remove(pending_result)
return task_and_results | Go through pending results and retrieve the results if they are done.
Then start child tasks for the task that finished. |
def _hjoin_multiline(join_char, strings):
cstrings = [string.split("\n") for string in strings]
max_num_lines = max(len(item) for item in cstrings)
pp = []
for k in range(max_num_lines):
p = [cstring[k] for cstring in cstrings]
pp.append(join_char + join_char.join(p) + join_char)
return "\n".join([p.rstrip() for p in pp]) | Horizontal join of multiline strings |
def compile_regex(self, pattern, flags=0):
pattern_re = regex.compile('(?P<substr>%\{(?P<fullname>(?P<patname>\w+)(?::(?P<subname>\w+))?)\})')
while 1:
matches = [md.groupdict() for md in pattern_re.finditer(pattern)]
if len(matches) == 0:
break
for md in matches:
if md['patname'] in self.pattern_dict:
if md['subname']:
if '(?P<' in self.pattern_dict[md['patname']]:
repl = regex.sub('\(\?P<(\w+)>', '(?P<%s>' % md['subname'],
self.pattern_dict[md['patname']], 1)
else:
repl = '(?P<%s>%s)' % (md['subname'],
self.pattern_dict[md['patname']])
else:
repl = self.pattern_dict[md['patname']]
pattern = pattern.replace(md['substr'], repl)
else:
return
return regex.compile(pattern, flags) | Compile regex from pattern and pattern_dict |
def make_session(scraper):
cache_path = os.path.join(scraper.config.data_path, 'cache')
cache_policy = scraper.config.cache_policy
cache_policy = cache_policy.lower().strip()
session = ScraperSession()
session.scraper = scraper
session.cache_policy = cache_policy
adapter = CacheControlAdapter(
FileCache(cache_path),
cache_etags=True,
controller_class=PolicyCacheController
)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session | Instantiate a session with the desired configuration parameters,
including the cache policy. |
def limit_inference(iterator, size):
yield from islice(iterator, size)
has_more = next(iterator, False)
if has_more is not False:
yield Uninferable
return | Limit inference amount.
Limit inference amount to help with performance issues with
exponentially exploding possible results.
:param iterator: Inference generator to limit
:type iterator: Iterator(NodeNG)
:param size: Maximum mount of nodes yielded plus an
Uninferable at the end if limit reached
:type size: int
:yields: A possibly modified generator
:rtype param: Iterable |
def move_to(self, folder):
if isinstance(folder, Folder):
self.move_to(folder.id)
else:
self._move_to(folder) | Moves the email to the folder specified by the folder parameter.
Args:
folder: A string containing the folder ID the message should be moved to, or a Folder instance |
def task_delete(self, **kw):
def validate(task):
if task['status'] == Status.DELETED:
raise ValueError("Task is already deleted.")
return self._task_change_status(Status.DELETED, validate, **kw) | Marks a task as deleted, optionally specifying a completion
date with the 'end' argument. |
def email(self, name, to, from_addr, subject, body, header, owner=None, **kwargs):
return Email(self.tcex, name, to, from_addr, subject, body, header, owner=owner, **kwargs) | Create the Email TI object.
Args:
owner:
to:
from_addr:
name:
subject:
header:
body:
**kwargs:
Return: |
def task_remove_user(self, *args, **kwargs):
if not self.cur_task:
return
i = self.task_user_tablev.currentIndex()
item = i.internalPointer()
if item:
user = item.internal_data()
self.cur_task.users.remove(user)
item.set_parent(None) | Remove the selected user from the task
:returns: None
:rtype: None
:raises: None |
def _filter_by_statement(self, statement):
self.__class__._check_conditional_statement(statement, 1)
_filt_values, _filt_datetimes = [], []
for i, a in enumerate(self._values):
if eval(statement, {'a': a}):
_filt_values.append(a)
_filt_datetimes.append(self.datetimes[i])
return _filt_values, _filt_datetimes | Filter the data collection based on a conditional statement. |
def _maybe_append_formatted_extension(numobj, metadata, num_format, number):
if numobj.extension:
if num_format == PhoneNumberFormat.RFC3966:
return number + _RFC3966_EXTN_PREFIX + numobj.extension
else:
if metadata.preferred_extn_prefix is not None:
return number + metadata.preferred_extn_prefix + numobj.extension
else:
return number + _DEFAULT_EXTN_PREFIX + numobj.extension
return number | Appends the formatted extension of a phone number to formatted number,
if the phone number had an extension specified. |
def modify(self, **kwargs):
for key in kwargs:
if key not in ['email', 'cellphone', 'countrycode', 'countryiso',
'defaultsmsprovider', 'directtwitter',
'twitteruser', 'name']:
sys.stderr.write("'%s'" % key + ' is not a valid argument ' +
'of <PingdomContact>.modify()\n')
response = self.pingdom.request('PUT', 'notification_contacts/%s' % self.id, kwargs)
return response.json()['message'] | Modify a contact.
Returns status message
Optional Parameters:
* name -- Contact name
Type: String
* email -- Contact email address
Type: String
* cellphone -- Cellphone number, without the country code part. In
some countries you are supposed to exclude leading zeroes.
(Requires countrycode and countryiso)
Type: String
* countrycode -- Cellphone country code (Requires cellphone and
countryiso)
Type: String
* countryiso -- Cellphone country ISO code. For example: US (USA),
GB (Britain) or SE (Sweden) (Requires cellphone and
countrycode)
Type: String
* defaultsmsprovider -- Default SMS provider
Type: String ['clickatell', 'bulksms', 'esendex',
'cellsynt']
* directtwitter -- Send tweets as direct messages
Type: Boolean
Default: True
* twitteruser -- Twitter user
Type: String |
def upload(self, file, path):
with open(file, "rb") as f:
resp = self._sendRequest("PUT", path, data=f)
if resp.status_code != 201:
raise YaDiskException(resp.status_code, resp.content) | Upload file. |
def use_tsig(self, keyring, keyname=None, fudge=300,
original_id=None, tsig_error=0, other_data='',
algorithm=dns.tsig.default_algorithm):
self.keyring = keyring
if keyname is None:
self.keyname = self.keyring.keys()[0]
else:
if isinstance(keyname, (str, unicode)):
keyname = dns.name.from_text(keyname)
self.keyname = keyname
self.keyalgorithm = algorithm
self.fudge = fudge
if original_id is None:
self.original_id = self.id
else:
self.original_id = original_id
self.tsig_error = tsig_error
self.other_data = other_data | When sending, a TSIG signature using the specified keyring
and keyname should be added.
@param keyring: The TSIG keyring to use; defaults to None.
@type keyring: dict
@param keyname: The name of the TSIG key to use; defaults to None.
The key must be defined in the keyring. If a keyring is specified
but a keyname is not, then the key used will be the first key in the
keyring. Note that the order of keys in a dictionary is not defined,
so applications should supply a keyname when a keyring is used, unless
they know the keyring contains only one key.
@type keyname: dns.name.Name or string
@param fudge: TSIG time fudge; default is 300 seconds.
@type fudge: int
@param original_id: TSIG original id; defaults to the message's id
@type original_id: int
@param tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@param other_data: TSIG other data.
@type other_data: string
@param algorithm: The TSIG algorithm to use; defaults to
dns.tsig.default_algorithm |
def read_requirements(path,
strict_bounds,
conda_format=False,
filter_names=None):
real_path = join(dirname(abspath(__file__)), path)
with open(real_path) as f:
reqs = _filter_requirements(f.readlines(), filter_names=filter_names,
filter_sys_version=not conda_format)
if not strict_bounds:
reqs = map(_with_bounds, reqs)
if conda_format:
reqs = map(_conda_format, reqs)
return list(reqs) | Read a requirements.txt file, expressed as a path relative to Zipline root.
Returns requirements with the pinned versions as lower bounds
if `strict_bounds` is falsey. |
def connect_array(self, address, connection_key, connection_type, **kwargs):
data = {"management_address": address,
"connection_key": connection_key,
"type": connection_type}
data.update(kwargs)
return self._request("POST", "array/connection", data) | Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later. |
def _dump(self):
try:
self.temp.seek(0)
arr = np.fromfile(self.temp, self.dtype)
self.count_arr = arr['count']
self.elapsed_arr = arr['elapsed']
if self.calc_stats:
self.count_mean = np.mean(self.count_arr)
self.count_std = np.std(self.count_arr)
self.elapsed_mean = np.mean(self.elapsed_arr)
self.elapsed_std = np.std(self.elapsed_arr)
self._output()
finally:
self.temp.close()
self._cleanup() | dump data for an individual metric. For internal use only. |
def _avoid_duplicate_arrays(types):
arrays = [t for t in types if isinstance(t, dict) and t["type"] == "array"]
others = [t for t in types if not (isinstance(t, dict) and t["type"] == "array")]
if arrays:
items = set([])
for t in arrays:
if isinstance(t["items"], (list, tuple)):
items |= set(t["items"])
else:
items.add(t["items"])
if len(items) == 1:
items = items.pop()
else:
items = sorted(list(items))
arrays = [{"type": "array", "items": items}]
return others + arrays | Collapse arrays when we have multiple types. |
def _collect_state_names(self, variable):
"Return a list of states that the variable takes in the data"
states = sorted(list(self.data.ix[:, variable].dropna().unique()))
return states | Return a list of states that the variable takes in the data |
def build_simple_fault_source_node(fault_source):
source_nodes = [build_simple_fault_geometry(fault_source)]
source_nodes.extend(get_fault_source_nodes(fault_source))
return Node("simpleFaultSource",
get_source_attributes(fault_source),
nodes=source_nodes) | Parses a simple fault source to a Node class
:param fault_source:
Simple fault source as instance of :class:
`openquake.hazardlib.source.simple_fault.SimpleFaultSource`
:returns:
Instance of :class:`openquake.baselib.node.Node` |
def handle_activity_legacy(_: str, __: int, tokens: ParseResults) -> ParseResults:
legacy_cls = language.activity_labels[tokens[MODIFIER]]
tokens[MODIFIER] = ACTIVITY
tokens[EFFECT] = {
NAME: legacy_cls,
NAMESPACE: BEL_DEFAULT_NAMESPACE
}
log.log(5, 'upgraded legacy activity to %s', legacy_cls)
return tokens | Handle BEL 1.0 activities. |
def dump_limits(conf_file, limits_file, debug=False):
conf = config.Config(conf_file=conf_file)
db = conf.get_database()
limits_key = conf['control'].get('limits_key', 'limits')
lims = [limits.Limit.hydrate(db, msgpack.loads(lim))
for lim in db.zrange(limits_key, 0, -1)]
root = etree.Element('limits')
limit_tree = etree.ElementTree(root)
for idx, lim in enumerate(lims):
if debug:
print >>sys.stderr, "Dumping limit index %d: %r" % (idx, lim)
make_limit_node(root, lim)
if limits_file == '-':
limits_file = sys.stdout
if debug:
print >>sys.stderr, "Dumping limits to file %r" % limits_file
limit_tree.write(limits_file, xml_declaration=True, encoding='UTF-8',
pretty_print=True) | Dump the current limits from the Redis database.
:param conf_file: Name of the configuration file, for connecting
to the Redis database.
:param limits_file: Name of the XML file that the limits will be
dumped to. Use '-' to dump to stdout.
:param debug: If True, debugging messages are emitted while
dumping the limits. |
def _cache_key_select_state(method, self, workflow_id, field_id, field_title):
key = update_timer(), workflow_id, field_id, field_title
return key | This function returns the key used to decide if select_state has to be recomputed |
def iswc(name=None):
if name is None:
name = 'ISWC Field'
field = pp.Regex('T[0-9]{10}')
field.setName(name)
field.leaveWhitespace()
return field.setResultsName('iswc') | ISWC field.
A ISWC code written on a field follows the Pattern TNNNNNNNNNC.
This being:
- T: header, it is always T.
- N: numeric value.
- C: control digit.
So, for example, an ISWC code field can contain T0345246801.
:param name: name for the field
:return: a parser for the ISWC field |
def check_valid_package(package,
cyg_arch='x86_64',
mirrors=None):
if mirrors is None:
mirrors = [{DEFAULT_MIRROR: DEFAULT_MIRROR_KEY}]
LOG.debug('Checking Valid Mirrors: %s', mirrors)
for mirror in mirrors:
for mirror_url, key in mirror.items():
if package in _get_all_packages(mirror_url, cyg_arch):
return True
return False | Check if the package is valid on the given mirrors.
Args:
package: The name of the package
cyg_arch: The cygwin architecture
mirrors: any mirrors to check
Returns (bool): True if Valid, otherwise False
CLI Example:
.. code-block:: bash
salt '*' cyg.check_valid_package <package name> |
def _get_pdb_format_version(self):
if not self.format_version:
version = None
version_lines = None
try:
version_lines = [line for line in self.parsed_lines['REMARK'] if int(line[7:10]) == 4 and line[10:].strip()]
except: pass
if version_lines:
assert(len(version_lines) == 1)
version_line = version_lines[0]
version_regex = re.compile('.*?FORMAT V.(.*),')
mtch = version_regex.match(version_line)
if mtch and mtch.groups(0):
try:
version = float(mtch.groups(0)[0])
except:
pass
self.format_version = version | Remark 4 indicates the version of the PDB File Format used to generate the file. |
def _add_function(self, func, identify_observed):
key = self.make_key(func)
if key not in self.observers:
self.observers[key] = ObserverFunction(
func, identify_observed, (key, self.observers))
return True
else:
return False | Add a function as an observer.
Args:
func: The function to register as an observer.
identify_observed: See docstring for add_observer.
Returns:
True if the function is added, otherwise False. |
def list_formats(self, node, path=(), formats=None):
if formats == None:
formats = []
for child in node.children:
self.list_formats(child, path + (child.name,), formats)
path and formats.append(".".join(path))
return sorted(formats) | Lists the object formats in sorted order.
:param node: Root node to start listing the formats from.
:type node: AbstractCompositeNode
:param path: Walked paths.
:type path: tuple
:param formats: Formats.
:type formats: list
:return: Formats.
:rtype: list |
def run(self):
salt.utils.process.appendproctitle(self.__class__.__name__)
self._post_fork_init()
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
salt.daemons.masterapi.clean_proc_dir(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval) | This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained. |
def delimit(delimiters, content):
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]]) | Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"' |
def predict(fqdn, result, *argl, **argd):
out = None
if len(argl) > 0:
machine = argl[0]
if isclassifier(machine):
out = classify_predict(fqdn, result, None, *argl, **argd)
elif isregressor(machine):
out = regress_predict(fqdn, result, None, *argl, **argd)
return out | Analyzes the result of a generic predict operation performed by
`sklearn`.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call. |
async def on_raw_error(self, message):
error = protocol.ServerError(' '.join(message.params))
await self.on_data_error(error) | Server encountered an error and will now close the connection. |
def dir_list(load):
ret = set()
files = file_list(load)
for f in files:
dirname = f
while dirname:
dirname = os.path.dirname(dirname)
if dirname:
ret.add(dirname)
return list(ret) | Return a list of all directories in a specified environment |
def _path_is_executable_others(path):
prevpath = None
while path and path != prevpath:
try:
if not os.stat(path).st_mode & stat.S_IXOTH:
return False
except OSError:
return False
prevpath = path
path, _ = os.path.split(path)
return True | Check every part of path for executable permission |
def _recursiveSetNodePath(self, nodePath):
self._nodePath = nodePath
for childItem in self.childItems:
childItem._recursiveSetNodePath(nodePath + '/' + childItem.nodeName) | Sets the nodePath property and updates it for all children. |
def set_iam_policy(self, policy, client=None):
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params["userProject"] = self.user_project
resource = policy.to_api_repr()
resource["resourceId"] = self.path
info = client._connection.api_request(
method="PUT",
path="%s/iam" % (self.path,),
query_params=query_params,
data=resource,
_target_object=None,
)
return Policy.from_api_repr(info) | Update the IAM policy for the bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
If :attr:`user_project` is set, bills the API request to that project.
:type policy: :class:`google.api_core.iam.Policy`
:param policy: policy instance used to update bucket's IAM policy.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`google.api_core.iam.Policy`
:returns: the policy instance, based on the resource returned from
the ``setIamPolicy`` API request. |
def iconGlyph(self):
if self._h5Dataset.attrs.get('CLASS', None) == b'DIMENSION_SCALE':
return RtiIconFactory.DIMENSION
else:
return RtiIconFactory.ARRAY | Shows an Array icon for regular datasets but a dimension icon for dimension scales |
def add_handler(self, handler: Handler, group: int = 0):
if isinstance(handler, DisconnectHandler):
self.disconnect_handler = handler.callback
else:
self.dispatcher.add_handler(handler, group)
return handler, group | Use this method to register an update handler.
You can register multiple handlers, but at most one handler within a group
will be used for a single update. To handle the same update more than once, register
your handler using a different group id (lower group id == higher priority).
Args:
handler (``Handler``):
The handler to be registered.
group (``int``, *optional*):
The group identifier, defaults to 0.
Returns:
A tuple of (handler, group) |
def extract_archive(client, archive_path, extract_path=None):
command = 'tar -xf {path}'.format(path=archive_path)
if extract_path:
command += ' -C {extract_path}'.format(extract_path=extract_path)
out = execute_ssh_command(client, command)
return out | Extract the archive in current path using the provided client.
If extract_path is provided extract the archive there. |
def _load_image_set_index(self, shuffle):
self.num_images = 0
for db in self.imdbs:
self.num_images += db.num_images
indices = list(range(self.num_images))
if shuffle:
random.shuffle(indices)
return indices | get total number of images, init indices
Parameters
----------
shuffle : bool
whether to shuffle the initial indices |
def substitute(self, var_map, cont=False, tag=None):
return self.apply(substitute, var_map=var_map, cont=cont, tag=tag) | Substitute sub-expressions both on the lhs and rhs
Args:
var_map (dict): Dictionary with entries of the form
``{expr: substitution}`` |
def query(self, queryString):
self._setHeaders('query')
return self._sforce.service.query(queryString) | Executes a query against the specified object and returns data that matches
the specified criteria. |
def push_fbo(self, fbo, offset, csize):
self._fb_stack.append((fbo, offset, csize))
try:
fbo.activate()
h, w = fbo.color_buffer.shape[:2]
self.push_viewport((0, 0, w, h))
except Exception:
self._fb_stack.pop()
raise
self._update_transforms() | Push an FBO on the stack.
This activates the framebuffer and causes subsequent rendering to be
written to the framebuffer rather than the canvas's back buffer. This
will also set the canvas viewport to cover the boundaries of the
framebuffer.
Parameters
----------
fbo : instance of FrameBuffer
The framebuffer object .
offset : tuple
The location of the fbo origin relative to the canvas's framebuffer
origin.
csize : tuple
The size of the region in the canvas's framebuffer that should be
covered by this framebuffer object. |
def profile_remove(name, **kwargs):
ctx = Context(**kwargs)
ctx.execute_action('profile:remove', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
}) | Remove profile from the storage. |
def add_group(self, group_attribs=None, parent=None):
if parent is None:
parent = self.tree.getroot()
elif not self.contains_group(parent):
warnings.warn('The requested group {0} does not belong to '
'this Document'.format(parent))
if group_attribs is None:
group_attribs = {}
else:
group_attribs = group_attribs.copy()
return SubElement(parent, '{{{0}}}g'.format(
SVG_NAMESPACE['svg']), group_attribs) | Add an empty group element to the SVG. |
def get_image_format(filename):
image = None
bad_image = 1
image_format = NONE_FORMAT
sequenced = False
try:
bad_image = Image.open(filename).verify()
image = Image.open(filename)
image_format = image.format
sequenced = _is_image_sequenced(image)
except (OSError, IOError, AttributeError):
pass
if sequenced:
image_format = gif.SEQUENCED_TEMPLATE.format(image_format)
elif image is None or bad_image or image_format == NONE_FORMAT:
image_format = ERROR_FORMAT
comic_format = comic.get_comic_format(filename)
if comic_format:
image_format = comic_format
if (Settings.verbose > 1) and image_format == ERROR_FORMAT and \
(not Settings.list_only):
print(filename, "doesn't look like an image or comic archive.")
return image_format | Get the image format. |
def get_minimum(self, other):
return Point(min(self.x, other.x), min(self.y, other.y)) | Updates this vector so its components are the lower of its
current components and those of the given other value. |
def userdata_to_db(session, method='update', autocommit=False):
try:
folder = config['import']['folder']
except KeyError:
return
if folder:
folder_to_db(folder, session, method=method, autocommit=autocommit) | Add catchments from a user folder to the database.
The user folder is specified in the ``config.ini`` file like this::
[import]
folder = path/to/import/folder
If this configuration key does not exist this will be silently ignored.
:param session: database session to use, typically `floodestimation.db.Session()`
:type session: :class:`sqlalchemy.orm.session.Session`
:param method: - ``create``: only new catchments will be loaded, it must not already exist in the database.
- ``update``: any existing catchment in the database will be updated. Otherwise it will be created.
:type method: str
:param autocommit: Whether to commit the database session immediately. Default: ``False``.
:type autocommit: bool |
def InjectString(self, codestring, wait_for_completion=True):
if self.inferior.is_running and self.inferior.gdb.IsAttached():
try:
self.inferior.gdb.InjectString(
self.inferior.position,
codestring,
wait_for_completion=wait_for_completion)
except RuntimeError:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
else:
logging.error('Not attached to any process.') | Try to inject python code into current thread.
Args:
codestring: Python snippet to execute in inferior. (may contain newlines)
wait_for_completion: Block until execution of snippet has completed. |
def activation_done(self, *args, **kwargs):
if self._save:
self.save_task()
else:
super().activation_done(*args, **kwargs) | Complete the ``activation`` or save only, depending on form submit. |
def system_properties_absent(name, server=None):
ret = {'name': '', 'result': None, 'comment': None, 'changes': {}}
try:
data = __salt__['glassfish.get_system_properties'](server=server)
except requests.ConnectionError as error:
if __opts__['test']:
ret['changes'] = {'Name': name}
ret['result'] = None
return ret
else:
ret['error'] = "Can't connect to the server"
return ret
if name in data:
if not __opts__['test']:
try:
__salt__['glassfish.delete_system_properties'](name, server=server)
ret['result'] = True
ret['comment'] = 'System properties deleted'
except CommandExecutionError as error:
ret['comment'] = error
ret['result'] = False
else:
ret['result'] = None
ret['comment'] = 'System properties would have been deleted'
ret['changes'] = {'Name': name}
else:
ret['result'] = True
ret['comment'] = 'System properties are already absent'
return ret | Ensures that the system property doesn't exists
name
Name of the system property |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.