code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def create_gist(self, description, files, public=True):
new_gist = {'description': description, 'public': public,
'files': files}
url = self._build_url('gists')
json = self._json(self._post(url, data=new_gist), 201)
return Gist(json, self) if json else None | Create a new gist.
If no login was provided, it will be anonymous.
:param str description: (required), description of gist
:param dict files: (required), file names with associated dictionaries
for content, e.g. ``{'spam.txt': {'content': 'File contents
...'}}``
:param bool public: (optional), make the gist public if True
:returns: :class:`Gist <github3.gists.Gist>` |
def _makeButtons(self):
self.button = button = urwid.Button(u"OK")
urwid.connect_signal(button, "click", self._completed)
return [self.button] | Makes buttons and wires them up. |
def normalise(self, to_currency):
out = Money(currency=to_currency)
for money in self._money_obs:
out += converter.convert(money, to_currency)
return Balance([out]) | Normalise this balance into a single currency
Args:
to_currency (str): Destination currency
Returns:
(Balance): A new balance object containing a single Money value in the specified currency |
def random(args):
from random import sample
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, N = args
N = int(N)
assert N > 0
f = Fasta(fastafile)
fw = must_open("stdout", "w")
for key in sample(f.keys(), N):
rec = f[key]
SeqIO.write([rec], fw, "fasta")
fw.close() | %prog random fasta 100 > random100.fasta
Take number of records randomly from fasta |
def create_zone(domain, profile, type='master', ttl=None):
conn = _get_driver(profile=profile)
zone = conn.create_record(domain, type=type, ttl=ttl)
return _simple_zone(zone) | Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param profile: The profile key
:type profile: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
CLI Example:
.. code-block:: bash
salt myminion libcloud_dns.create_zone google.com profile1 |
def GetClientStates(self, client_list, client_chunk=50):
for client_group in collection.Batch(client_list, client_chunk):
for fd in aff4.FACTORY.MultiOpen(
client_group,
mode="r",
aff4_type=aff4_grr.VFSGRRClient,
token=self.token):
result = {}
result["age"] = fd.Get(fd.Schema.PING)
result["hostname"] = fd.Get(fd.Schema.HOSTNAME)
yield (fd.urn, result) | Take in a client list and return dicts with their age and hostname. |
def minimize_dihedrals(self):
r
new = self.copy()
def convert_d(d):
r = d % 360
return r - (r // 180) * 360
new.unsafe_loc[:, 'dihedral'] = convert_d(new.loc[:, 'dihedral'])
return new | r"""Give a representation of the dihedral with minimized absolute value.
Mathematically speaking the angles in a zmatrix are
representations of an equivalence class.
We will denote an equivalence relation with :math:`\sim`
and use :math:`\alpha` for an angle and :math:`\delta` for a dihedral
angle. Then the following equations hold true.
.. math::
(\alpha, \delta) &\sim (-\alpha, \delta + \pi) \\
\alpha &\sim \alpha \mod 2\pi \\
\delta &\sim \delta \mod 2\pi
This function asserts:
.. math::
-\pi \leq \delta \leq \pi
The main application of this function is the construction of
a transforming movement from ``zmat1`` to ``zmat2``.
This is under the assumption that ``zmat1`` and ``zmat2`` are the same
molecules (regarding their topology) and have the same
construction table (:meth:`~Cartesian.get_construction_table`)::
with cc.TestOperators(False):
D = zm2 - zm1
zmats1 = [zm1 + D * i / n for i in range(n)]
zmats2 = [zm1 + D.minimize_dihedrals() * i / n for i in range(n)]
The movement described by ``zmats1`` might be too large,
because going from :math:`5^\circ` to :math:`355^\circ` is
:math:`350^\circ` in this case and not :math:`-10^\circ` as
in ``zmats2`` which is the desired :math:`\Delta` in most cases.
Args:
None
Returns:
Zmat: Zmatrix with accordingly changed angles and dihedrals. |
def _load_data_flow_models(self):
self.data_flows = []
for data_flow in self.state.data_flows.values():
self._add_model(self.data_flows, data_flow, DataFlowModel) | Adds models for each data flow of the state |
def check_key(user,
key,
enc,
comment,
options,
config='.ssh/authorized_keys',
cache_keys=None,
fingerprint_hash_type=None):
if cache_keys is None:
cache_keys = []
enc = _refine_enc(enc)
current = auth_keys(user,
config=config,
fingerprint_hash_type=fingerprint_hash_type)
nline = _format_auth_line(key, enc, comment, options)
if key in current:
cline = _format_auth_line(key,
current[key]['enc'],
current[key]['comment'],
current[key]['options'])
if cline != nline:
return 'update'
else:
return 'add'
return 'exists' | Check to see if a key needs updating, returns "update", "add" or "exists"
CLI Example:
.. code-block:: bash
salt '*' ssh.check_key <user> <key> <enc> <comment> <options> |
def save_notebook(self, body):
directory = os.path.dirname(self.output_path)
full_path = os.path.join(directory, self.notebook_name)
try:
with open(full_path, 'w') as fh:
fh.write(json.dumps(body, indent=2))
except ValueError:
print('ERROR: Could not save executed notebook to path: ' +
self.output_path +
' -- Please provide a valid absolute path.') | Save notebook depending on user provided output path. |
def assets_set_asset(self, asset_name, file, **kwargs):
content_type = mimetypes.MimeTypes().guess_type(file)
files = {
asset_name: (file, open(file, 'rb'), content_type[0], {'Expires': '0'}),
}
return self.__call_api_post('assets.setAsset', kwargs=kwargs, use_json=False, files=files) | Set an asset image by name. |
def feed(self, json_item):
@MethodTimer.timeout(self.settings['KAFKA_FEED_TIMEOUT'], False)
def _feed(json_item):
producer = self._create_producer()
topic = self.settings['KAFKA_INCOMING_TOPIC']
if not self.logger.json:
self.logger.info('Feeding JSON into {0}\n{1}'.format(
topic, json.dumps(json_item, indent=4)))
else:
self.logger.info('Feeding JSON into {0}\n'.format(topic),
extra={'value': json_item})
if producer is not None:
producer.send(topic, json_item)
producer.flush()
producer.close(timeout=10)
return True
else:
return False
result = _feed(json_item)
if result:
self.logger.info("Successfully fed item to Kafka")
else:
self.logger.error("Failed to feed item into Kafka") | Feeds a json item into the Kafka topic
@param json_item: The loaded json object |
def filter_matching(self, urls):
for url in urls:
if urlparse(url).hostname in self:
yield url | Get URLs with hosts matching any listed ones.
:param urls: an iterable containing URLs to filter
:returns: a generator yielding matching URLs
:raises InvalidURLError: if there are any invalid URLs in
the sequence |
def from_parameter(cls: Type[UnlockParameterType], parameter: str) -> Optional[Union[SIGParameter, XHXParameter]]:
sig_param = SIGParameter.from_parameter(parameter)
if sig_param:
return sig_param
else:
xhx_param = XHXParameter.from_parameter(parameter)
if xhx_param:
return xhx_param
return None | Return UnlockParameter instance from parameter string
:param parameter: Parameter string
:return: |
def getblockhash(self, index: int) -> str:
return cast(str, self.api_fetch('getblockhash?index=' + str(index))) | Returns the hash of the block at ; index 0 is the genesis block. |
def _generate_full_recipe_message(self, destination, message, add_path_step):
if add_path_step and self.recipe_pointer:
recipe_path = self.recipe_path + [self.recipe_pointer]
else:
recipe_path = self.recipe_path
return {
"environment": self.environment,
"payload": message,
"recipe": self.recipe.recipe,
"recipe-path": recipe_path,
"recipe-pointer": destination,
} | Factory function to generate independent message objects for
downstream recipients with different destinations. |
def is_empty(self):
if(((self.channels == []) and (not self.shape == (0, 0))) or
((not self.channels == []) and (self.shape == (0, 0)))):
raise RuntimeError("Channels-shape mismatch.")
return self.channels == [] and self.shape == (0, 0) | Checks for an empty image. |
def stringify_seconds(seconds=0):
seconds = int(seconds)
minutes = seconds / 60
ti = {'h': 0, 'm': 0, 's': 0}
if seconds > 0:
ti['s'] = seconds % 60
ti['m'] = minutes % 60
ti['h'] = minutes / 60
return "%dh %dm %ds" % (ti['h'], ti['m'], ti['s']) | Takes time as a value of seconds and deduces the delta in human-readable
HHh MMm SSs format. |
def _log_task_info(headers, extra_task_info=None):
ran_at = time.time()
task_eta = float(headers.get('X-Appengine-Tasketa', 0.0))
task_info = {
'retry_count': headers.get('X-Appengine-Taskretrycount', ''),
'execution_count': headers.get('X-Appengine-Taskexecutioncount', ''),
'task_eta': task_eta,
'ran': ran_at,
'gae_latency_seconds': ran_at - task_eta
}
if extra_task_info:
task_info['extra'] = extra_task_info
logging.debug('TASK-INFO: %s', json.dumps(task_info)) | Processes the header from task requests to log analytical data. |
def intinlist(lst):
for item in lst:
try:
item = int(item)
return True
except ValueError:
pass
return False | test if int in list |
def get_changed_devices(self, timestamp):
if timestamp is None:
payload = {}
else:
payload = {
'timeout': SUBSCRIPTION_WAIT,
'minimumdelay': SUBSCRIPTION_MIN_WAIT
}
payload.update(timestamp)
payload.update({
'id': 'lu_sdata',
})
logger.debug("get_changed_devices() requesting payload %s", str(payload))
r = self.data_request(payload, TIMEOUT*2)
r.raise_for_status()
if r.text == "":
raise PyveraError("Empty response from Vera")
try:
result = r.json()
except ValueError as ex:
raise PyveraError("JSON decode error: " + str(ex))
if not ( type(result) is dict
and 'loadtime' in result and 'dataversion' in result ):
raise PyveraError("Unexpected/garbled response from Vera")
device_data = result.get('devices')
timestamp = {
'loadtime': result.get('loadtime'),
'dataversion': result.get('dataversion')
}
return [device_data, timestamp] | Get data since last timestamp.
This is done via a blocking call, pass NONE for initial state. |
def get_wharton_gsrs_formatted(self, sessionid, date=None):
gsrs = self.get_wharton_gsrs(sessionid, date)
return self.switch_format(gsrs) | Return the wharton GSR listing formatted in studyspaces format. |
def _safe_type(value):
if type(value) is str: dtype = 'string'
if type(value) is unicode: dtype = 'string'
if type(value) is int: dtype = 'integer'
if type(value) is float: dtype = 'real'
return dtype | Converts Python type names to XGMML-safe type names. |
def version_msg():
python_version = sys.version[:3]
location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
message = u'Cookiecutter %(version)s from {} (Python {})'
return message.format(location, python_version) | Return the Cookiecutter version, location and Python powering it. |
def get_start_of_line_position(self, after_whitespace=False):
if after_whitespace:
current_line = self.current_line
return len(current_line) - len(current_line.lstrip()) - self.cursor_position_col
else:
return - len(self.current_line_before_cursor) | Relative position for the start of this line. |
def unpack(self, unpacker):
(a, b, c) = unpacker.unpack_struct(_HHI)
self.max_stack = a
self.max_locals = b
self.code = unpacker.read(c)
uobjs = unpacker.unpack_objects
self.exceptions = tuple(uobjs(JavaExceptionInfo, self))
self.attribs.unpack(unpacker) | unpacks a code block from a buffer. Updates the internal structure
of this instance |
def append(self, item):
try:
self._data[self._position] = item
except IndexError:
self._grow()
self._data[self._position] = item
self._position += 1
return self | append a single item to the array, growing the wrapped numpy array
if necessary |
def is_valid_resource_name(rname, exception_type=None):
match = _ARMNAME_RE.match(rname)
if match:
return True
if exception_type:
raise exception_type()
return False | Validates the given resource name to ARM guidelines, individual services may be more restrictive.
:param rname: The resource name being validated.
:type rname: str
:param exception_type: Raises this Exception if invalid.
:type exception_type: :class:`Exception`
:returns: A boolean describing whether the name is valid.
:rtype: bool |
def to_versions(self):
versions = []
for bound in self.bounds:
if bound.lower.inclusive and bound.upper.inclusive \
and (bound.lower.version == bound.upper.version):
versions.append(bound.lower.version)
return versions or None | Returns exact version ranges as Version objects, or None if there
are no exact version ranges present. |
def get_object_position(self, object_name, relative_to_object=None):
h = self.get_object_handle(object_name)
relative_handle = (-1 if relative_to_object is None
else self.get_object_handle(relative_to_object))
return self.call_remote_api('simxGetObjectPosition',
h, relative_handle,
streaming=True) | Gets the object position. |
def _nat_rules_for_internet_access(self, acl_no, network, netmask,
inner_itfc, outer_itfc, vrf_name):
acl_present = self._check_acl(acl_no, network, netmask)
if not acl_present:
conf_str = snippets.CREATE_ACL % (acl_no, network, netmask)
self._edit_running_config(conf_str, 'CREATE_ACL')
pool_name = "%s_nat_pool" % vrf_name
conf_str = asr1k_snippets.SET_DYN_SRC_TRL_POOL % (acl_no, pool_name,
vrf_name)
try:
self._edit_running_config(conf_str, 'SET_DYN_SRC_TRL_POOL')
except Exception as dyn_nat_e:
LOG.info("Ignore exception for SET_DYN_SRC_TRL_POOL: %s. "
"The config seems to be applied properly but netconf "
"seems to report an error.", dyn_nat_e)
conf_str = snippets.SET_NAT % (inner_itfc, 'inside')
self._edit_running_config(conf_str, 'SET_NAT')
conf_str = snippets.SET_NAT % (outer_itfc, 'outside')
self._edit_running_config(conf_str, 'SET_NAT') | Configure the NAT rules for an internal network.
Configuring NAT rules in the ASR1k is a three step process. First
create an ACL for the IP range of the internal network. Then enable
dynamic source NATing on the external interface of the ASR1k for this
ACL and VRF of the neutron router. Finally enable NAT on the interfaces
of the ASR1k where the internal and external networks are connected.
:param acl_no: ACL number of the internal network.
:param network: internal network
:param netmask: netmask of the internal network.
:param inner_itfc: (name of) interface connected to the internal
network
:param outer_itfc: (name of) interface connected to the external
network
:param vrf_name: VRF corresponding to this virtual router
:return: True if configuration succeeded
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
IOSXEConfigException |
def _get_average_time_stamp(action_set):
total_time_stamps = sum(rule.time_stamp * rule.numerosity
for rule in action_set)
total_numerosity = sum(rule.numerosity for rule in action_set)
return total_time_stamps / (total_numerosity or 1) | Return the average time stamp for the rules in this action
set. |
def param_remove(self, param: 'str') -> None:
for attr in self._param_attr_dicts:
if param in self.__dict__[attr]:
self.__dict__[attr].pop(param)
for attr in self._param_attr_lists:
if param in self.__dict__[attr]:
self.__dict__[attr].remove(param) | Remove a param from this model
:param param: name of the parameter to be removed
:type param: str |
def convert_json_object(obographdoc, **args):
digraph = networkx.MultiDiGraph()
xref_graph = networkx.MultiGraph()
logical_definitions = []
property_chain_axioms = []
context = obographdoc.get('@context',{})
logging.info("CONTEXT: {}".format(context))
mapper = OboJsonMapper(digraph=digraph, context=context)
ogs = obographdoc['graphs']
base_og = ogs[0]
for og in ogs:
mapper.add_obograph_digraph(og, xref_graph=xref_graph,
logical_definitions=logical_definitions,
property_chain_axioms=property_chain_axioms, **args)
return {
'id': base_og.get('id'),
'meta': base_og.get('meta'),
'graph': mapper.digraph,
'xref_graph': xref_graph,
'graphdoc': obographdoc,
'logical_definitions': logical_definitions,
'property_chain_axioms': property_chain_axioms
} | Return a networkx MultiDiGraph of the ontologies
serialized as a json object |
def get_build_file_path(self, build_module) -> str:
project_root = Path(self.project_root)
build_module = norm_proj_path(build_module, '')
return str(project_root / build_module /
(BUILD_PROJ_FILE if '' == build_module
else self.build_file_name)) | Return a full path to the build file of `build_module`.
The returned path will always be OS-native, regardless of the format
of project_root (native) and build_module (with '/'). |
def submit_ham(self, params):
for required in ['blog', 'user_ip', 'user_agent']:
if required not in params:
raise MissingParams(required)
response = self._request('submit-ham', params)
if response.status is 200:
return response.read() == "true"
return False | For submitting a ham comment to Akismet. |
def _encode_resp(self, value):
if isinstance(value, bytes):
return b''.join(
[b'$',
ascii(len(value)).encode('ascii'), CRLF, value, CRLF])
elif isinstance(value, str):
return self._encode_resp(value.encode('utf-8'))
elif isinstance(value, int):
return self._encode_resp(ascii(value).encode('ascii'))
elif isinstance(value, float):
return self._encode_resp(ascii(value).encode('ascii'))
elif isinstance(value, list):
output = [b'*', ascii(len(value)).encode('ascii'), CRLF]
for item in value:
output.append(self._encode_resp(item))
return b''.join(output)
else:
raise ValueError('Unsupported type: {0}'.format(type(value))) | Dynamically build the RESP payload based upon the list provided.
:param mixed value: The list of command parts to encode
:rtype: bytes |
def before(self, *nodes: Union[AbstractNode, str]) -> None:
if self.parentNode:
node = _to_node_list(nodes)
self.parentNode.insertBefore(node, self) | Insert nodes before this node.
If nodes contains ``str``, it will be converted to Text node. |
def rnumlistwithreplacement(howmany, max, min=0):
if checkquota() < 1:
raise Exception("Your www.random.org quota has already run out.")
requestparam = build_request_parameterWR(howmany, min, max)
request = urllib.request.Request(requestparam)
request.add_header('User-Agent', 'randomwrapy/0.1 very alpha')
opener = urllib.request.build_opener()
numlist = opener.open(request).read()
return numlist.split() | Returns a list of howmany integers with a maximum value = max.
The minimum value defaults to zero. |
def update_count(cat_id):
entry2 = TabTag.update(
count=TabPost2Tag.select().where(
TabPost2Tag.tag_id == cat_id
).count()
).where(TabTag.uid == cat_id)
entry2.execute() | Update the count of certain category. |
def get_script(self):
uri = "{}/script".format(self.data["uri"])
return self._helper.do_get(uri) | Gets the configuration script of the logical enclosure by ID or URI.
Return:
str: Configuration script. |
def compute(chart, date, fixedObjects=False):
sun = chart.getObject(const.SUN)
prevSr = ephem.prevSolarReturn(date, sun.lon)
nextSr = ephem.nextSolarReturn(date, sun.lon)
rotation = 30 * (date.jd - prevSr.jd) / (nextSr.jd - prevSr.jd)
age = math.floor((date.jd - chart.date.jd) / 365.25)
rotation = 30 * age + rotation
pChart = chart.copy()
for obj in pChart.objects:
if not fixedObjects:
obj.relocate(obj.lon + rotation)
for house in pChart.houses:
house.relocate(house.lon + rotation)
for angle in pChart.angles:
angle.relocate(angle.lon + rotation)
return pChart | Returns a profection chart for a given
date. Receives argument 'fixedObjects' to
fix chart objects in their natal locations. |
def u16(self, name, value=None, align=None):
self.uint(2, name, value, align) | Add an unsigned 2 byte integer field to template.
This is an convenience method that simply calls `Uint` keyword with predefined length. |
def reload(self, schedule):
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule) | Reload the schedule from saved schedule file. |
def editpermissions_anonymous_user_view(self, request, forum_id=None):
forum = get_object_or_404(Forum, pk=forum_id) if forum_id else None
context = self.get_forum_perms_base_context(request, forum)
context['forum'] = forum
context['title'] = '{} - {}'.format(_('Forum permissions'), _('Anonymous user'))
context['form'] = self._get_permissions_form(
request, UserForumPermission, {'forum': forum, 'anonymous_user': True},
)
return render(request, self.editpermissions_anonymous_user_view_template_name, context) | Allows to edit anonymous user permissions for the considered forum.
The view displays a form to define which permissions are granted for the anonymous user for
the considered forum. |
def insert_sequences_into_tree(aln, moltype, params={}):
new_aln=get_align_for_phylip(StringIO(aln))
aln2 = Alignment(new_aln)
seqs = aln2.toFasta()
parsinsert_app = ParsInsert(params=params)
result = parsinsert_app(seqs)
tree = DndParser(result['Tree'].read(), constructor=PhyloNode)
result.cleanUp()
return tree | Returns a tree from placement of sequences |
def info_community(self,teamid):
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/standings.phtml',"User-Agent": user_agent}
req = self.session.get('http://'+self.domain+'/teamInfo.phtml?tid='+teamid,headers=headers).content
soup = BeautifulSoup(req)
info = []
for i in soup.find('table',cellpadding=2).find_all('tr')[1:]:
info.append('%s\t%s\t%s\t%s\t%s'%(i.find('td').text,i.find('a')['href'].split('pid=')[1],i.a.text,i.find_all('td')[2].text,i.find_all('td')[3].text))
return info | Get comunity info using a ID |
def set_last_build_date(self):
try:
self.last_build_date = self.soup.find('lastbuilddate').string
except AttributeError:
self.last_build_date = None | Parses last build date and set value |
def sort_set(s):
if not isinstance(s, Set):
raise TypeError("sets only")
s = frozenset(s)
if s not in _sort_set_memo:
_sort_set_memo[s] = sorted(s, key=_sort_set_key)
return _sort_set_memo[s] | Return a sorted list of the contents of a set
This is intended to be used to iterate over world state, where you just need keys
to be in some deterministic order, but the sort order should be obvious from the key.
Non-strings come before strings and then tuples. Tuples compare element-wise as normal.
But ultimately all comparisons are between values' ``repr``.
This is memoized. |
def map_output(self, node_id, node_output_name, parameter_name):
self.output_mapping[parameter_name] = (node_id, node_output_name)
dependents = self.dependents_by_node_id.get(node_id, set())
dependents.add('output_{}'.format(parameter_name))
self.dependents_by_node_id[node_id] = dependents | Maps the output from a node to a workflow output.
:param node_id: The id of the node to map from.
:param node_output_name: The output parameter name for the node task to map to the workflow output.
:param parameter_name: The workflow output parameter name. |
def get_possible_actions(self):
possible_actions = self.wrapped.get_possible_actions()
if len(possible_actions) <= 20:
try:
possible_actions = list(set(possible_actions))
except TypeError:
possible_actions = list(possible_actions)
try:
possible_actions.sort()
except TypeError:
pass
self.logger.info('Possible actions:')
for action in possible_actions:
self.logger.info(' %s', action)
else:
self.logger.info("%d possible actions.", len(possible_actions))
return possible_actions | Return a sequence containing the possible actions that can be
executed within the environment.
Usage:
possible_actions = scenario.get_possible_actions()
Arguments: None
Return:
A sequence containing the possible actions which can be
executed within this scenario. |
def generic_distribution(target, seeds, func):
r
seeds = target[seeds]
value = func.ppf(seeds)
return value | r"""
Accepts an 'rv_frozen' object from the Scipy.stats submodule and returns
values from the distribution for the given seeds
This uses the ``ppf`` method of the stats object
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
seeds : string, optional
The dictionary key on the Geometry object containing random seed values
(between 0 and 1) to use in the statistical distribution.
func : object
An 'rv_frozen' object from the Scipy.stats library with all of the
parameters pre-specified.
Examples
--------
The following code illustrates the process of obtaining a 'frozen' Scipy
stats object and adding it as a model:
>>> import scipy
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[3, 3, 3])
>>> geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps, throats=pn.Ts)
>>> geo.add_model(propname='pore.seed',
... model=op.models.geometry.pore_seed.random)
Now retrieve the stats distribution and add to ``geo`` as a model:
>>> stats_obj = scipy.stats.weibull_min(c=2, scale=.0001, loc=0)
>>> geo.add_model(propname='pore.size',
... model=op.models.geometry.pore_size.generic_distribution,
... seeds='pore.seed',
... func=stats_obj)
>>> import matplotlib.pyplot as plt
>>> fig = plt.hist(stats_obj.ppf(q=scipy.rand(1000)), bins=50) |
def crashlog_cleanrange(from_day, up_to_day, **kwargs):
ctx = Context(**kwargs)
ctx.execute_action('crashlog:cleanwhen', **{
'storage': ctx.repo.create_secure_service('storage'),
'from_day': from_day,
'to_day': up_to_day,
}) | Remove all crashlogs from one date up to another.
The date can be specified as DAY-[MONTH-[YEAR]].
Example:
today, yesterday, 10, 10-09, 10-09-2015 |
def sort(self, request, reverse=False):
field = self.model._meta.fields.get(self.columns_sort)
if not field:
return self.collection
if reverse:
field = field.desc()
return self.collection.order_by(field) | Sort current collection. |
def main():
t, kf, t0, major, minor, prod, beta = sympy.symbols(
't k_f t0 Y Z X beta', negative=False)
for f in funcs:
args = [t, kf, prod, major, minor]
if f in (pseudo_rev, binary_rev):
args.insert(2, kf/beta)
expr = f(*args, backend='sympy')
with open(f.__name__ + '.png', 'wb') as ofh:
sympy.printing.preview(expr, output='png', filename='out.png',
viewer='BytesIO', outputbuffer=ofh)
with open(f.__name__ + '_diff.png', 'wb') as ofh:
sympy.printing.preview(expr.diff(t).subs({t0: 0}).simplify(),
output='png', filename='out.png',
viewer='BytesIO', outputbuffer=ofh) | This example demonstrates how to generate pretty equations from the analytic
expressions found in ``chempy.kinetics.integrated``. |
def get_tree_from_sha(self, ref):
try:
return self.repo.revparse_single(ref).tree
except (KeyError, TypeError, ValueError, AttributeError):
return None | Return a pygit2.Tree object matching a SHA |
def should_run(self):
if self.is_orchestrator():
self.log.warning("%s plugin set to run on orchestrator. Skipping", self.key)
return False
if self.operator_manifests_extract_platform != self.platform:
self.log.info("Only platform [%s] will upload operators metadata. Skipping",
self.operator_manifests_extract_platform)
return False
if is_scratch_build():
self.log.info("Scratch build. Skipping")
return False
if not self.has_operator_manifest():
self.log.info("Operator manifests label not set in Dockerfile. Skipping")
return False
return True | Check if the plugin should run or skip execution.
:return: bool, False if plugin should skip execution |
def _build_b(self, force=False):
r
if force:
self._pure_b = None
if self._pure_b is None:
b = np.zeros(shape=(self.Np, ), dtype=float)
self._pure_b = b
self.b = self._pure_b.copy() | r"""
Builds the RHS matrix, without applying any boundary conditions or
source terms. This method is trivial an basically creates a column
vector of 0's.
Parameters
----------
force : Boolean (default is ``False``)
If set to ``True`` then the b matrix is built from new. If
``False`` (the default), a cached version of b is returned. The
cached version is *clean* in the sense that no boundary conditions
or sources terms have been added to it. |
def mangle_volume(citation_elements):
volume_re = re.compile(ur"(\d+)([A-Z])", re.U | re.I)
for el in citation_elements:
if el['type'] == 'JOURNAL':
matches = volume_re.match(el['volume'])
if matches:
el['volume'] = matches.group(2) + matches.group(1)
return citation_elements | Make sure the volume letter is before the volume number
e.g. transforms 100B to B100 |
def load_code_info(self):
return PhaseGroup(
setup=load_code_info(self.setup),
main=load_code_info(self.main),
teardown=load_code_info(self.teardown),
name=self.name) | Load coded info for all contained phases. |
def aesthetics(cls):
main = cls.DEFAULT_AES.keys() | cls.REQUIRED_AES
other = {'group'}
if 'color' in main:
other.add('colour')
if 'outlier_color' in main:
other.add('outlier_colour')
return main | other | Return all the aesthetics for this geom
geoms should not override this method. |
def page_crawled(self, page_resp):
page_text = utils.parse_text(page_resp)
page_hash = utils.hash_text(''.join(page_text))
if page_hash not in self.page_cache:
utils.cache_page(self.page_cache, page_hash, self.args['cache_size'])
return False
return True | Check if page has been crawled by hashing its text content.
Add new pages to the page cache.
Return whether page was found in cache. |
def run_pty(self, command):
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
return channel.recv(1024) | Execute a command on the remote host with a pseudo-terminal.
Returns a string containing the output of the command. |
def collect_dashboard_js(collector):
dashmat = collector.configuration["dashmat"]
modules = collector.configuration["__active_modules__"]
compiled_static_prep = dashmat.compiled_static_prep
compiled_static_folder = dashmat.compiled_static_folder
npm_deps = list_npm_modules(collector, no_print=True)
react_server = ReactServer()
react_server.prepare(npm_deps, compiled_static_folder)
for dashboard in collector.configuration["dashboards"].values():
log.info("Generating compiled javascript for dashboard:{0}".format(dashboard.path))
filename = dashboard.path.replace("_", "__").replace("/", "_")
location = os.path.join(compiled_static_folder, "dashboards", "{0}.js".format(filename))
if os.path.exists(location):
os.remove(location)
generate_dashboard_js(dashboard, react_server, compiled_static_folder, compiled_static_prep, modules) | Generate dashboard javascript for each dashboard |
def view(A, offset=0):
from numpy.lib.stride_tricks import as_strided
assert A.ndim == 2, "only implemented for 2 dimensions"
assert A.shape[0] == A.shape[1], "attempting to get the view of non-square matrix?!"
if offset > 0:
return as_strided(A[0, offset:], shape=(A.shape[0] - offset, ), strides=((A.shape[0]+1)*A.itemsize, ))
elif offset < 0:
return as_strided(A[-offset:, 0], shape=(A.shape[0] + offset, ), strides=((A.shape[0]+1)*A.itemsize, ))
else:
return as_strided(A, shape=(A.shape[0], ), strides=((A.shape[0]+1)*A.itemsize, )) | Get a view on the diagonal elements of a 2D array.
This is actually a view (!) on the diagonal of the array, so you can
in-place adjust the view.
:param :class:`ndarray` A: 2 dimensional numpy array
:param int offset: view offset to give back (negative entries allowed)
:rtype: :class:`ndarray` view of diag(A)
>>> import numpy as np
>>> X = np.arange(9).reshape(3,3)
>>> view(X)
array([0, 4, 8])
>>> d = view(X)
>>> d += 2
>>> view(X)
array([ 2, 6, 10])
>>> view(X, offset=-1)
array([3, 7])
>>> subtract(X, 3, offset=-1)
array([[ 2, 1, 2],
[ 0, 6, 5],
[ 6, 4, 10]]) |
def console_set_alignment(con: tcod.console.Console, alignment: int) -> None:
lib.TCOD_console_set_alignment(_console(con), alignment) | Change this consoles current alignment mode.
* tcod.LEFT
* tcod.CENTER
* tcod.RIGHT
Args:
con (Console): Any Console instance.
alignment (int):
.. deprecated:: 8.5
Set :any:`Console.default_alignment` instead. |
def compatible_api_version(server_version):
try:
semver = server_version.split('.')
if semver[0] != '1':
logger.error(
'Server API version (%s) is too new for us. Please update the executor installation.' % server_version)
return False
else:
return True
except Exception:
logger.error(
'Cannot understand the server API version (%s). Please update the executor installation.' % server_version)
return False | Check if this server API version is compatible to us. |
def get_tab(self, tab_name, allow_disabled=False):
tab = self._tabs.get(tab_name, None)
if tab and tab._allowed and (tab._enabled or allow_disabled):
return tab
return None | Returns a specific tab from this tab group.
If the tab is not allowed or not enabled this method returns ``None``.
If the tab is disabled but you wish to return it anyway, you can pass
``True`` to the allow_disabled argument. |
def _call_setnx(self, command, value):
result = self._traverse_command(command, value)
if self.indexable and value is not None and result:
self.index(value)
return result | Index only if value has been set. |
def _selectedRepoRow(self):
selectedModelIndexes = \
self.reposTableWidget.selectionModel().selectedRows()
for index in selectedModelIndexes:
return index.row() | Return the currently select repo |
def conformPadding(cls, chars):
pad = chars
if pad and pad[0] not in PAD_MAP:
pad = cls.getPaddingChars(cls.getPaddingNum(pad))
return pad | Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters |
def new_issuer(self, issuer_idx, info=None):
new_issuer_idx = issuer_idx
if new_issuer_idx in self._issuers.keys():
new_issuer_idx = naming.index_name_if_in_list(new_issuer_idx, self._issuers.keys())
new_issuer = issuers.Issuer(new_issuer_idx, info=info)
self._issuers[new_issuer_idx] = new_issuer
return new_issuer | Add a new issuer to the dataset with the given data.
Parameters:
issuer_idx (str): The id to associate the issuer with. If None or already exists, one is
generated.
info (dict, list): Additional info of the issuer.
Returns:
Issuer: The newly added issuer. |
def predict(self, X):
self._check_method("predict")
X = self._check_array(X)
if isinstance(X, da.Array):
result = X.map_blocks(
_predict, dtype="int", estimator=self._postfit_estimator, drop_axis=1
)
return result
elif isinstance(X, dd._Frame):
return X.map_partitions(
_predict, estimator=self._postfit_estimator, meta=np.array([1])
)
else:
return _predict(X, estimator=self._postfit_estimator) | Predict for X.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
Parameters
----------
X : array-like
Returns
-------
y : array-like |
def getControllerRoleForTrackedDeviceIndex(self, unDeviceIndex):
fn = self.function_table.getControllerRoleForTrackedDeviceIndex
result = fn(unDeviceIndex)
return result | Returns the controller type associated with a device index. This function is deprecated in favor of the new IVRInput system. |
def _memcache_key(self, timestamped=False):
request = tuple(map(str, self.package_requests))
repo_ids = []
for path in self.package_paths:
repo = package_repository_manager.get_repository(path)
repo_ids.append(repo.uid)
t = ["resolve",
request,
tuple(repo_ids),
self.package_filter_hash,
self.package_orderers_hash,
self.building,
config.prune_failed_graph]
if timestamped and self.timestamp:
t.append(self.timestamp)
return str(tuple(t)) | Makes a key suitable as a memcache entry. |
def send_request(cls, sock, working_dir, command, *arguments, **environment):
for argument in arguments:
cls.write_chunk(sock, ChunkType.ARGUMENT, argument)
for item_tuple in environment.items():
cls.write_chunk(sock,
ChunkType.ENVIRONMENT,
cls.ENVIRON_SEP.join(cls._decode_unicode_seq(item_tuple)))
cls.write_chunk(sock, ChunkType.WORKING_DIR, working_dir)
cls.write_chunk(sock, ChunkType.COMMAND, command) | Send the initial Nailgun request over the specified socket. |
def labels(self, qids):
if len(qids) > 50:
raise ValueError("The limit is 50.")
self.domain = 'www.wikidata.org'
self.uri = self.wiki_uri(self.domain)
query = self.WIKIDATA.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
LANG=self.variant or self.lang,
PROPS='labels')
qids = '|'.join(qids)
query += "&ids=%s" % qids
self.set_status('labels', qids)
return query | Returns Wikidata labels query string |
def parse_datetime(date_str):
is_common_era = True
date_str_parts = date_str.split("-")
if date_str_parts and date_str_parts[0] == '':
is_common_era = False
if len(date_str_parts) == 2:
date_str = date_str + "-01-01T00:00:00Z"
parsed_datetime = {
'is_common_era': is_common_era,
'parsed_datetime': None
}
if is_common_era:
if date_str == '*':
return parsed_datetime
default = datetime.datetime.now().replace(
hour=0, minute=0, second=0, microsecond=0,
day=1, month=1
)
parsed_datetime['parsed_datetime'] = parse(date_str, default=default)
return parsed_datetime
parsed_datetime['parsed_datetime'] = date_str
return parsed_datetime | Parses a date string to date object.
for BCE dates, only supports the year part. |
def trigger(self, when=1):
tw = Window(self.stream, self._config['type'])
tw._config['evictPolicy'] = self._config['evictPolicy']
tw._config['evictConfig'] = self._config['evictConfig']
if self._config['evictPolicy'] == 'TIME':
tw._config['evictTimeUnit'] = 'MILLISECONDS'
if isinstance(when, datetime.timedelta):
tw._config['triggerPolicy'] = 'TIME'
tw._config['triggerConfig'] = int(when.total_seconds() * 1000.0)
tw._config['triggerTimeUnit'] = 'MILLISECONDS'
elif isinstance(when, int):
tw._config['triggerPolicy'] = 'COUNT'
tw._config['triggerConfig'] = when
else:
raise ValueError(when)
return tw | Declare a window with this window's size and a trigger policy.
When the window is triggered is defined by `when`.
If `when` is an `int` then the window is triggered every
`when` tuples. For example, with ``when=5`` the window
will be triggered every five tuples.
If `when` is an `datetime.timedelta` then it is the period
of the trigger. With a `timedelta` representing one minute
then the window is triggered every minute.
By default, when `trigger` has not been called on a `Window`
it triggers for every tuple inserted into the window
(equivalent to ``when=1``).
Args:
when: The size of the window, either an `int` to define the
number of tuples or `datetime.timedelta` to define the
duration of the window.
Returns:
Window: Window that will be triggered.
.. warning:: A trigger is only supported for a sliding window
such as one created by :py:meth:`last`. |
def base_url(self, value):
logger.debug('StackInABox({0}): Updating URL from {1} to {2}'
.format(self.__id, self.__base_url, value))
self.__base_url = value
for k, v in six.iteritems(self.services):
matcher, service = v
service.base_url = StackInABox.__get_service_url(value,
service.name)
logger.debug('StackInABox({0}): Service {1} has url {2}'
.format(self.__id, service.name, service.base_url)) | Set the Base URL property, updating all associated services. |
def exclusively(f):
@wraps(f)
def exclusively_f(self, *a, **kw):
with self._lock:
return f(self, *a, **kw)
return exclusively_f | Decorate a function to make it thread-safe by serializing invocations
using a per-instance lock. |
def colorize(text, color, background=False):
if color in _styles:
c = Color(text)
c.styles = [_styles.index(color) + 1]
return c
c = Color(text)
if background:
c.bgcolor = _color2ansi(color)
else:
c.fgcolor = _color2ansi(color)
return c | Colorize text with hex code.
:param text: the text you want to paint
:param color: a hex color or rgb color
:param background: decide to colorize background
::
colorize('hello', 'ff0000')
colorize('hello', '#ff0000')
colorize('hello', (255, 0, 0)) |
def validate(instance):
jsonschema.validate(
to_dict(instance, dict_type=dict), build_schema(instance.__class__)
) | Validates a given ``instance``.
:param object instance: The instance to validate
:raises jsonschema.exceptions.ValidationError: On failed validation |
def get_grapheme_cluster_break_property(value, is_bytes=False):
obj = unidata.ascii_grapheme_cluster_break if is_bytes else unidata.unicode_grapheme_cluster_break
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['graphemeclusterbreak'].get(negated, negated)
else:
value = unidata.unicode_alias['graphemeclusterbreak'].get(value, value)
return obj[value] | Get `GRAPHEME CLUSTER BREAK` property. |
def get_uvi_history(self, params_dict):
lat = str(params_dict['lat'])
lon = str(params_dict['lon'])
start = str(params_dict['start'])
end = str(params_dict['end'])
params = dict(lat=lat, lon=lon, start=start, end=end)
uri = http_client.HttpClient.to_url(UV_INDEX_HISTORY_URL,
self._API_key,
None)
_, json_data = self._client.cacheable_get_json(uri, params=params)
return json_data | Invokes the UV Index History endpoint
:param params_dict: dict of parameters
:returns: a string containing raw JSON data
:raises: *ValueError*, *APICallError* |
def get(self) -> Union[Event, None]:
message = self._queue.get_message()
if message and message['type'] == 'message':
event_id = DB.get_event(self._pub_key, self._processed_key)
event_data_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_data_str)
event_dict['id'] = event_id
event_dict['subscriber'] = self._subscriber
return Event.from_config(event_dict)
return None | Get the latest event from the queue.
Call this method to query the queue for the latest event.
If no event has been published None is returned.
Returns:
Event or None |
def set_field_value(self, name, value):
name = self.get_real_name(name)
if not name or not self._can_write_field(name):
return
if name in self.__deleted_fields__:
self.__deleted_fields__.remove(name)
if self.__original_data__.get(name) == value:
try:
self.__modified_data__.pop(name)
except KeyError:
pass
else:
self.__modified_data__[name] = value
self._prepare_child(value)
if name not in self.__structure__ or not self.__structure__[name].read_only:
return
try:
value.set_read_only(True)
except AttributeError:
pass | Set the value to the field modified_data |
def cmd_work(self, connection, sender, target, payload):
connection.action(target, "is doing something...")
time.sleep(int(payload or "5"))
connection.action(target, "has finished !")
connection.privmsg(target, "My answer is: 42.") | Does some job |
def class_instance_as_index(node):
context = contextmod.InferenceContext()
context.callcontext = contextmod.CallContext(args=[node])
try:
for inferred in node.igetattr("__index__", context=context):
if not isinstance(inferred, bases.BoundMethod):
continue
for result in inferred.infer_call_result(node, context=context):
if isinstance(result, nodes.Const) and isinstance(result.value, int):
return result
except exceptions.InferenceError:
pass
return None | Get the value as an index for the given instance.
If an instance provides an __index__ method, then it can
be used in some scenarios where an integer is expected,
for instance when multiplying or subscripting a list. |
def xpathNewContext(self):
ret = libxml2mod.xmlXPathNewContext(self._o)
if ret is None:raise xpathError('xmlXPathNewContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp | Create a new xmlXPathContext |
def score(self):
if not self.scoreProperties:
self.scoreProperties = self.getScoreProperties()
return sum(self.scoreProperties.values()) | Returns the sum of the accidental dignities
score. |
def save_screenshot(driver, name):
if hasattr(driver, 'save_screenshot'):
screenshot_dir = os.environ.get('SCREENSHOT_DIR')
if not screenshot_dir:
LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot')
return
elif not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
image_name = os.path.join(screenshot_dir, name + '.png')
driver.save_screenshot(image_name)
else:
msg = (
u"Browser does not support screenshots. "
u"Could not save screenshot '{name}'"
).format(name=name)
LOGGER.warning(msg) | Save a screenshot of the browser.
The location of the screenshot can be configured
by the environment variable `SCREENSHOT_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name for the screenshot, which will be used in the output file name.
Returns:
None |
def get_contact(self, jid):
try:
return self.get_contacts()[jid.bare()]
except KeyError:
raise ContactNotFound
except AttributeError:
raise AttributeError("jid must be an aioxmpp.JID object") | Returns a contact
Args:
jid (aioxmpp.JID): jid of the contact
Returns:
dict: the roster of contacts |
def forecast(self,parensemble=None):
if parensemble is None:
parensemble = self.parensemble
self.logger.log("evaluating ensemble")
failed_runs, obsensemble = self._calc_obs(parensemble)
if failed_runs is not None:
self.logger.warn("dropping failed realizations")
parensemble.loc[failed_runs, :] = np.NaN
parensemble = parensemble.dropna()
obsensemble.loc[failed_runs, :] = np.NaN
obsensemble = obsensemble.dropna()
self.logger.log("evaluating ensemble")
return obsensemble | for the enkf formulation, this simply moves the ensemble forward by running the model
once for each realization |
def compute_default_choice(self):
choices = self.choices
if len(choices) == 0:
return None
high_choice = max(choices, key=lambda choice: choice.performance)
self.redis.hset(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "default-choice", high_choice.name)
self.refresh()
return high_choice | Computes and sets the default choice |
def get_sessions(self, app_path):
if app_path not in self._applications:
raise ValueError("Application %s does not exist on this server" % app_path)
return list(self._applications[app_path].sessions) | Gets all currently active sessions for an application.
Args:
app_path (str) :
The configured application path for the application to return
sessions for.
Returns:
list[ServerSession] |
def value_from_message(self, message):
message = super(DateTimeField, self).value_from_message(message)
if message.time_zone_offset is None:
return datetime.datetime.utcfromtimestamp(
message.milliseconds / 1000.0)
milliseconds = (message.milliseconds -
60000 * message.time_zone_offset)
timezone = util.TimeZoneOffset(message.time_zone_offset)
return datetime.datetime.fromtimestamp(milliseconds / 1000.0,
tz=timezone) | Convert DateTimeMessage to a datetime.
Args:
A DateTimeMessage instance.
Returns:
A datetime instance. |
def overrides(method):
for super_class in _get_base_classes(sys._getframe(2), method.__globals__):
if hasattr(super_class, method.__name__):
super_method = getattr(super_class, method.__name__)
if hasattr(super_method, "__finalized__"):
finalized = getattr(super_method, "__finalized__")
if finalized:
raise AssertionError('Method "%s" is finalized' %
method.__name__)
if not method.__doc__:
method.__doc__ = super_method.__doc__
return method
raise AssertionError('No super class method found for "%s"' %
method.__name__) | Decorator to indicate that the decorated method overrides a method in
superclass.
The decorator code is executed while loading class. Using this method
should have minimal runtime performance implications.
This is based on my idea about how to do this and fwc:s highly improved
algorithm for the implementation fwc:s
algorithm : http://stackoverflow.com/a/14631397/308189
my answer : http://stackoverflow.com/a/8313042/308189
How to use:
from overrides import overrides
class SuperClass(object):
def method(self):
return 2
class SubClass(SuperClass):
@overrides
def method(self):
return 1
:raises AssertionError if no match in super classes for the method name
:return method with possibly added (if the method doesn't have one)
docstring from super class |
def finish_review(self, success=True, error=False):
if self.set_status:
if error:
self.github_repo.create_status(
state="error",
description="Static analysis error! inline-plz failed to run.",
context="inline-plz",
sha=self.last_sha,
)
elif success:
self.github_repo.create_status(
state="success",
description="Static analysis complete! No errors found in your PR.",
context="inline-plz",
sha=self.last_sha,
)
else:
self.github_repo.create_status(
state="failure",
description="Static analysis complete! Found errors in your PR.",
context="inline-plz",
sha=self.last_sha,
) | Mark our review as finished. |
def GPIO_config(self, gpio_enable, gpio_config):
'Enable or disable slave-select pins as gpio.'
self.bus.write_byte_data(self.address, 0xF6, gpio_enable)
self.bus.write_byte_data(self.address, 0xF7, gpio_config)
return | Enable or disable slave-select pins as gpio. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.