Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
373,200
|
def _to_sqlite3_by_table(self, conn, table_name):
def _insert_item(item):
if isinstance(item, dict):
cols = .join(item.keys())
placeholders = .join( * len(item))
sql = .format(table_name, cols, placeholders)
conn.execute(sql, tuple(item.values()))
elif is_namedtuple(item):
cols = .join(item._fields)
placeholders = .join( * len(item))
sql = .format(table_name, cols, placeholders)
conn.execute(sql, item)
elif isinstance(item, (list, tuple)):
placeholders = .join( * len(item))
sql = .format(table_name, placeholders)
conn.execute(sql, item)
else:
raise TypeError(
.format(type(item)))
self.for_each(_insert_item)
|
Saves the sequence to the specified table of sqlite3 database.
Each element can be a dictionary, namedtuple, tuple or list.
Target table must be created in advance.
:param conn: path or sqlite connection, cursor
:param table_name: table name string
|
373,201
|
def add_to_recent(self, notebook):
if notebook not in self.recent_notebooks:
self.recent_notebooks.insert(0, notebook)
self.recent_notebooks = self.recent_notebooks[:20]
|
Add an entry to recent notebooks.
We only maintain the list of the 20 most recent notebooks.
|
373,202
|
def create(self, width, height):
return Image.new(self.mode, (width, height))
|
Create an image of type.
Parameters
----------
width: `int`
Image width.
height: `int`
Image height.
Returns
-------
`PIL.Image.Image`
|
373,203
|
def call_from_executor(self, callback, _max_postpone_until=None):
assert _max_postpone_until is None or isinstance(_max_postpone_until, float)
self._calls_from_executor.append((callback, _max_postpone_until))
if self._schedule_pipe:
try:
os.write(self._schedule_pipe[1], b)
except (AttributeError, IndexError, OSError):
pass
|
Call this function in the main event loop.
Similar to Twisted's ``callFromThread``.
:param _max_postpone_until: `None` or `time.time` value. For interal
use. If the eventloop is saturated, consider this task to be low
priority and postpone maximum until this timestamp. (For instance,
repaint is done using low priority.)
|
373,204
|
def GetTransPosition(df,field,dic,refCol="transcript_id"):
try:
gen=str(int(df[field]))
transid=df[refCol]
bases=dic.get(transid).split(",")
bases=bases.index(str(gen))+1
except:
bases=np.nan
return bases
|
Maps a genome position to transcript positon"
:param df: a Pandas dataframe
:param field: the head of the column containing the genomic position
:param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'}
:param refCol: header of the reference column with IDs, eg. 'transcript_id'
:returns: position on transcript
|
373,205
|
def onCancelButton(self, event):
if self.grid.changes:
dlg1 = wx.MessageDialog(self, caption="Message:",
message="Are you sure you want to exit this grid?\nYour changes will not be saved.\n ",
style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
dlg1.Destroy()
self.Destroy()
else:
self.Destroy()
if self.main_frame:
self.main_frame.Show()
self.main_frame.Raise()
|
Quit grid with warning if unsaved changes present
|
373,206
|
def examples_section(doc, header_level):
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
|
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
|
373,207
|
def get_inflators_cn_to_cn(target_year):
inflateur de vieillissement à partir des masses de comptabilité nationale.
consoCN_COICOP_{}consoCN_COICOP_{}'.format(target_year)].to_dict()
return dict(
(key, target_year_cn_aggregates[key] / data_year_cn_aggregates[key])
for key in data_year_cn_aggregates.keys()
)
|
Calcule l'inflateur de vieillissement à partir des masses de comptabilité nationale.
|
373,208
|
def _get_body_instance(self):
simple_body = {
MultipartType.OFPMP_FLOW: FlowStatsRequest,
MultipartType.OFPMP_AGGREGATE: AggregateStatsRequest,
MultipartType.OFPMP_PORT_STATS: PortStatsRequest,
MultipartType.OFPMP_QUEUE: QueueStatsRequest,
MultipartType.OFPMP_GROUP: GroupStatsRequest,
MultipartType.OFPMP_METER: MeterMultipartRequest,
MultipartType.OFPMP_EXPERIMENTER: ExperimenterMultipartHeader
}
array_of_bodies = {MultipartType.OFPMP_TABLE_FEATURES: TableFeatures}
if isinstance(self.multipart_type, UBInt16):
self.multipart_type = self.multipart_type.enum_ref(
self.multipart_type.value)
pyof_class = simple_body.get(self.multipart_type, None)
if pyof_class:
return pyof_class()
array_of_class = array_of_bodies.get(self.multipart_type, None)
if array_of_class:
return FixedTypeList(pyof_class=array_of_class)
return BinaryData(b)
|
Return the body instance.
|
373,209
|
def _sumLists(a, b):
val = 0
for i in map(lambda a, b: a * b, a, b):
val += i
return val
|
Algorithm to check validity of NBI and NIF.
Receives string with a umber to validate.
|
373,210
|
def add_handler_spec(f, handler_spec, *, kwargs=None):
handler_dict = automake_magic_attr(f)
if kwargs is None:
kwargs = {}
if kwargs != handler_dict.setdefault(handler_spec, kwargs):
raise ValueError(
"The additional keyword arguments to the handler are incompatible")
|
Attach a handler specification (see :class:`HandlerSpec`) to a function.
:param f: Function to attach the handler specification to.
:param handler_spec: Handler specification to attach to the function.
:type handler_spec: :class:`HandlerSpec`
:param kwargs: additional keyword arguments passed to the function
carried in the handler spec.
:type kwargs: :class:`dict`
:raises ValueError: if the handler was registered with
different `kwargs` before
This uses a private attribute, whose exact name is an implementation
detail. The `handler_spec` is stored in a :class:`dict` bound to the
attribute.
.. versionadded:: 0.11
The `kwargs` argument. If two handlers with the same spec, but
different arguments are registered for one function, an error
will be raised. So you should always include all possible
arguments, this is the responsibility of the calling decorator.
|
373,211
|
def pick_four_unique_nodes_quickly(n, seed=None):
rng = get_rng(seed)
k = rng.randint(n**4)
a = k % n
b = k // n % n
c = k // n ** 2 % n
d = k // n ** 3 % n
if (a != b and a != c and a != d and b != c and b != d and c != d):
return (a, b, c, d)
else:
return pick_four_unique_nodes_quickly(n, rng)
|
This is equivalent to np.random.choice(n, 4, replace=False)
Another fellow suggested np.random.random_sample(n).argpartition(4) which is
clever but still substantially slower.
|
373,212
|
def get_listeners(self, event_type: str) -> List[Callable]:
if event_type not in self.events:
raise ValueError(f)
return self.events.get_listeners(event_type)
|
Get all listeners of a particular type of event.
|
373,213
|
def plotEzJz(self,*args,**kwargs):
labeldict= {:r,:r,:r,:r,
:r,:r,:r,
:r,:r,:r,:r}
if not in kwargs:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit first or specify pot=")
else:
pot= kwargs.pop()
d1= kwargs.pop(,)
self.EzJz= [(evaluatePotentials(pot,self.orbit[ii,0],self.orbit[ii,3],
t=self.t[ii],use_physical=False)-
evaluatePotentials(pot,self.orbit[ii,0],0.,
phi= self.orbit[ii,5],t=self.t[ii],
use_physical=False)+
self.orbit[ii,4]**2./2.)/\
nu.sqrt(evaluateDensities(pot,self.orbit[ii,0],0.,
phi=self.orbit[ii,5],
t=self.t[ii],
use_physical=False))\
for ii in range(len(self.t))]
if not in kwargs:
kwargs[]= labeldict[d1]
if not in kwargs:
kwargs[]= r
if d1 == :
return plot.bovy_plot(nu.array(self.t),
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == :
return plot.bovy_plot(self.orbit[:,3],
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == :
return plot.bovy_plot(self.orbit[:,0],
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == :
return plot.bovy_plot(self.orbit[:,1],
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == :
return plot.bovy_plot(self.orbit[:,2],
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == :
return plot.bovy_plot(self.orbit[:,4],
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
|
NAME:
plotEzJz
PURPOSE:
plot E_z(.)/sqrt(dens(R)) along the orbit
INPUT:
pot= Potential instance or list of instances in which the orbit was
integrated
d1= - plot Ez vs d1: e.g., 't', 'z', 'R', 'vR', 'vT', 'vz'
+bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2010-08-08 - Written - Bovy (NYU)
|
373,214
|
def get_adapter_for_persistent_model(self, persistent_model, rest_model=None):
persistent_signature = self.generate_signature(persistent_model)
if persistent_signature in self._persistent_map:
sub_map = self._persistent_map[persistent_signature]
if rest_model is None:
return self._persistent_map[persistent_signature][self.DEFAULT_REST_ADAPTER]
else:
rest_sig = self.generate_signature(rest_model)
if rest_sig in sub_map:
return self._persistent_map[persistent_signature][rest_sig]
raise TypeError("No registered Data Adapter for class %s" % persistent_signature)
|
:param persistent_model: instance of persistent model
:param rest_model: specific REST model
:return: the matching model adapter
:rtype: ModelAdapter
|
373,215
|
async def expand_now(self, building: UnitTypeId=None, max_distance: Union[int, float]=10, location: Optional[Point2]=None):
if not building:
start_townhall_type = {Race.Protoss: UnitTypeId.NEXUS, Race.Terran: UnitTypeId.COMMANDCENTER, Race.Zerg: UnitTypeId.HATCHERY}
building = start_townhall_type[self.race]
assert isinstance(building, UnitTypeId)
if not location:
location = await self.get_next_expansion()
if location:
await self.build(building, near=location, max_distance=max_distance, random_alternative=False, placement_step=1)
|
Takes new expansion.
|
373,216
|
def uri_tree_encode(uri_tree, type_host = HOST_REG_NAME):
scheme, authority, path, query, fragment = uri_tree
if authority:
user, passwd, host, port = authority
if user:
user = pct_encode(user, USER_ENCDCT)
if passwd:
passwd = pct_encode(passwd, PASSWD_ENCDCT)
if host and type_host == HOST_REG_NAME:
host = pct_encode(host, REG_NAME_ENCDCT)
if isinstance(port, (int, long)):
port = str(port)
authority = (user, passwd, host, port)
if path:
path = pct_encode(path, P_ENCDCT)
if (not authority) and (not scheme):
sppath = path.split(, 1)
if in sppath[0]:
sppath[0] = sppath[0].replace(, )
path = .join(sppath)
if query:
query = tuple([(query_elt_encode(x, QUERY_KEY_ENCDCT),
query_elt_encode(y, QUERY_VAL_ENCDCT)) for (x, y) in query])
if fragment:
fragment = pct_encode(fragment, FRAG_ENCDCT)
return (scheme, authority, path, query, fragment)
|
Percent/Query encode a raw URI tree.
|
373,217
|
def get_ip(request):
if getsetting():
return getsetting()
forwarded_for = request.META.get()
if not forwarded_for:
return UNKNOWN_IP
for ip in forwarded_for.split():
ip = ip.strip()
if not ip.startswith() and not ip == :
return ip
return UNKNOWN_IP
|
Return the IP address inside the HTTP_X_FORWARDED_FOR var inside
the `request` object.
The return of this function can be overrided by the
`LOCAL_GEOLOCATION_IP` variable in the `conf` module.
This function will skip local IPs (starting with 10. and equals to
127.0.0.1).
|
373,218
|
def search_in_dirs(fname, search_dpaths=[], shortcircuit=True,
return_tried=False, strict=False):
fpath_list = []
tried_list = []
for dpath in search_dpaths:
fpath = join(dpath, fname)
if return_tried:
tried_list.append(fpath)
if exists(fpath):
if shortcircuit:
if return_tried:
return fpath, tried_list
return fpath
else:
fpath_list.append(fpath)
if strict and len(fpath_list) == 0:
msg = ( % (fname,))
if return_tried:
msg += + .join(tried_list)
raise Exception(msg)
if shortcircuit:
if return_tried:
return None, tried_list
return None
else:
if return_tried:
return fpath_list, tried_list
return fpath_list
|
search_in_dirs
Args:
fname (str): file name
search_dpaths (list):
shortcircuit (bool):
return_tried (bool): return tried paths
strict (bool): (default = False)
Returns:
fpath: None
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> fname = 'Inno Setup 5\ISCC.exe'
>>> search_dpaths = ut.get_install_dirs()
>>> shortcircuit = True
>>> fpath = ut.search_in_dirs(fname, search_dpaths, shortcircuit)
>>> print(fpath)
|
373,219
|
def psql_csv_run(sql_command, error_handler=None):
csv_query = (
.format(query=sql_command))
new_env = os.environ.copy()
new_env.setdefault(, )
new_env["PGOPTIONS"] +=
psql_proc = popen_nonblock([PSQL_BIN, , , ,
, , csv_query],
stdout=PIPE,
env=new_env)
stdout = psql_proc.communicate()[0].decode()
if psql_proc.returncode != 0:
if error_handler is not None:
error_handler(psql_proc)
else:
assert error_handler is None
raise UserException(
,
.format(sql_command),
)
assert psql_proc.returncode == 0
return csv.reader(iter(stdout.strip().split()))
|
Runs psql and returns a CSVReader object from the query
This CSVReader includes header names as the first record in all
situations. The output is fully buffered into Python.
|
373,220
|
def release(self, tid, fh):
try:
try:
self.fds[fh].unregisterHandler(fh)
except AttributeError:
pass
del self.fds[fh]
except KeyError:
raise FuseOSError(errno.EBADF)
return 0
|
Close file. Descriptor is removed from ``self.fds``.
Parameters
----------
tid : str
Path to file. Ignored.
fh : int
File descriptor to release.
|
373,221
|
def containsUid(self, uid):
s uid, or is the uid of any children at any level down
'
if self.uid == uid:
return True
for child in self.children:
if child.containsUid(uid):
return True
return False
|
containsUid - Check if the uid (unique internal ID) appears anywhere as a direct child to this node, or the node itself.
@param uid <uuid.UUID> - uuid to check
@return <bool> - True if #uid is this node's uid, or is the uid of any children at any level down
|
373,222
|
def _post_md5_skip_on_check(self, key, filename, size, md5_match):
with self._md5_meta_lock:
rfile = self._md5_map.pop(key)
lpath = pathlib.Path(filename)
if md5_match:
if size is None:
size = lpath.stat().st_size
with self._transfer_lock:
self._transfer_set.remove(
blobxfer.operations.download.Downloader.
create_unique_transfer_operation_id(rfile))
self._download_total -= 1
self._download_bytes_total -= size
if self._general_options.dry_run:
logger.info(.format(
rfile.path, lpath))
else:
if self._general_options.dry_run:
with self._transfer_lock:
self._transfer_set.remove(
blobxfer.operations.download.Downloader.
create_unique_transfer_operation_id(rfile))
self._download_total -= 1
self._download_bytes_total -= size
logger.info(
.format(
rfile.path, lpath))
else:
self._add_to_download_queue(lpath, rfile)
|
Perform post MD5 skip on check
:param Downloader self: this
:param str key: md5 map key
:param str filename: local filename
:param int size: size of checked data
:param bool md5_match: if MD5 matches
|
373,223
|
def updateEvolution(self):
self.pEvolution = np.zeros((self.pCount,self.pNextCount))
for j in range(self.pCount):
pNow = self.pGrid[j]
pNextMean = self.pNextIntercept + self.pNextSlope*pNow
dist = approxUniform(N=self.pNextCount,bot=pNextMean-self.pNextWidth,top=pNextMean+self.pNextWidth)[1]
self.pEvolution[j,:] = dist
|
Updates the "population punk proportion" evolution array. Fasion victims
believe that the proportion of punks in the subsequent period is a linear
function of the proportion of punks this period, subject to a uniform
shock. Given attributes of self pNextIntercept, pNextSlope, pNextCount,
pNextWidth, and pGrid, this method generates a new array for the attri-
bute pEvolution, representing a discrete approximation of next period
states for each current period state in pGrid.
Parameters
----------
none
Returns
-------
none
|
373,224
|
def login(context, request):
__ac_name = request.get("__ac_name", None)
__ac_password = request.get("__ac_password", None)
logger.info("*** LOGIN %s ***" % __ac_name)
if __ac_name is None:
api.fail(400, "__ac_name is missing")
if __ac_password is None:
api.fail(400, "__ac_password is missing")
acl_users = api.get_tool("acl_users")
acl_users.credentials_cookie_auth.login()
if api.is_anonymous():
api.fail(401, "Invalid Credentials")
return get(context, request, username=__ac_name)
|
Login Route
Login route to authenticate a user against Plone.
|
373,225
|
def pp_file_to_dataframe(pp_filename):
df = pd.read_csv(pp_filename, delim_whitespace=True,
header=None, names=PP_NAMES,usecols=[0,1,2,3,4])
df.loc[:,"name"] = df.name.apply(str).apply(str.lower)
return df
|
read a pilot point file to a pandas Dataframe
Parameters
----------
pp_filename : str
pilot point file
Returns
-------
df : pandas.DataFrame
a dataframe with pp_utils.PP_NAMES for columns
|
373,226
|
def _get_dvs(service_instance, dvs_name):
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
|
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
|
373,227
|
def example(self) -> str:
if self.rgb_mode:
colorcode = .format(*self.rgb)
else:
colorcode = .format(self.code)
return .format(code=colorcode, s=self)
|
Same as str(self), except the color codes are actually used.
|
373,228
|
def ajax_count_plus(self, slug):
output = {
: 1 if MWiki.view_count_plus(slug) else 0,
}
return json.dump(output, self)
|
post count plus one via ajax.
|
373,229
|
def simBirth(self,which_agents):
N = np.sum(which_agents)
self.aLvlNow[which_agents] = drawLognormal(N,mu=self.aLvlInitMean,sigma=self.aLvlInitStd,seed=self.RNG.randint(0,2**31-1))
self.eStateNow[which_agents] = 1.0
self.t_age[which_agents] = 0
self.t_cycle[which_agents] = 0
return None
|
Makes new consumers for the given indices. Initialized variables include aNrm, as
well as time variables t_age and t_cycle. Normalized assets are drawn from a lognormal
distributions given by aLvlInitMean and aLvlInitStd.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
|
373,230
|
def get_selinux_context(path):
*
out = __salt__[]([, , path], python_shell=False)
try:
ret = re.search(r, out).group(0)
except AttributeError:
ret = (
.format(path)
)
return ret
|
Get an SELinux context from a given path
CLI Example:
.. code-block:: bash
salt '*' file.get_selinux_context /etc/hosts
|
373,231
|
def destroy_balancer(balancer_id, profile, **libcloud_kwargs):
s destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
'
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
balancer = conn.get_balancer(balancer_id)
return conn.destroy_balancer(balancer, **libcloud_kwargs)
|
Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
|
373,232
|
def _aggregate_one_result(
self, sock_info, slave_ok, cmd, collation=None, session=None):
result = self._command(
sock_info,
cmd,
slave_ok,
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
batch = result[][]
return batch[0] if batch else None
|
Internal helper to run an aggregate that returns a single result.
|
373,233
|
def _dataframe_from_csv(reader, delimiter, with_header, skipspace):
sep = delimiter
header = 0
if not with_header:
header = None
return pd.read_csv(
reader,
header=header,
sep=sep,
skipinitialspace=skipspace,
encoding=
)
|
Returns csv data as a pandas Dataframe object
|
373,234
|
def template(self, key):
try:
return self._templates[key]
except KeyError:
return Template.Plugins[key]
|
Returns the template associated with this scaffold.
:param key | <str>
:return <projex.scaffold.Template> || None
|
373,235
|
def load_parameter_definitions(self, sheet_name: str = None):
definitions = self.excel_handler.load_definitions(sheet_name, filename=self.filename)
self.definition_version = self.excel_handler.version
return definitions
|
Load variable text from rows in excel file.
If no spreadsheet arg is given, all spreadsheets are loaded.
The first cell in the first row in a spreadsheet must contain the keyword 'variable' or the sheet is ignored.
Any cells used as titles (with no associated value) are also added to the returned dictionary. However, the
values associated with each header will be None. For example, given the speadsheet:
| variable | A | B |
|----------|---|---|
| Title | | |
| Entry | 1 | 2 |
The following list of definitions would be returned:
[ { variable: 'Title', A: None, B: None }
, { variable: 'Entry', A: 1 , B: 2 }
]
:param sheet_name:
:return: list of dicts with {header col name : cell value} pairs
|
373,236
|
def _network_event_lifecycle_cb(conn, net, event, detail, opaque):
_salt_send_event(opaque, conn, {
: {
: net.name(),
: net.UUIDString()
},
: _get_libvirt_enum_string(, event),
:
})
|
Network lifecycle events handler
|
373,237
|
def record_make_all_subfields_volatile(rec):
for tag in rec.keys():
for field_position, field in enumerate(rec[tag]):
for subfield_position, subfield in enumerate(field[0]):
if subfield[1][:9] != "VOLATILE:":
record_modify_subfield(rec, tag, subfield[0], "VOLATILE:" + subfield[1],
subfield_position, field_position_local=field_position)
|
Turns all subfields to volatile
|
373,238
|
def _CreateReadAccessHelper(self):
h = CheckAccessHelper("read")
h.Allow("aff4:/")
h.Allow("aff4:/users")
h.Allow("aff4:/users/*", self._IsHomeDir)
h.Allow("aff4:/foreman", self._UserHasAdminLabel)
h.Allow("aff4:/blobs")
h.Allow("aff4:/blobs/*")
h.Allow("aff4:/FP")
h.Allow("aff4:/FP/*")
h.Allow("aff4:/files")
h.Allow("aff4:/files/*")
h.Allow("aff4:/index")
h.Allow("aff4:/index/*")
h.Allow("aff4:/client_index")
h.Allow("aff4:/client_index/*")
h.Allow("aff4:/ACL")
h.Allow("aff4:/ACL/*")
h.Allow("aff4:/stats")
h.Allow("aff4:/stats/*")
h.Allow("aff4:/config")
h.Allow("aff4:/config/*")
h.Allow("aff4:/flows")
h.Allow("aff4:/flows/*")
h.Allow("aff4:/hunts")
h.Allow("aff4:/hunts/*")
h.Allow("aff4:/cron")
h.Allow("aff4:/cron/*")
h.Allow("aff4:/audit")
h.Allow("aff4:/audit/*")
h.Allow("aff4:/audit/logs")
h.Allow("aff4:/audit/logs/*")
h.Allow(self.CLIENT_URN_PATTERN)
h.Allow(self.CLIENT_URN_PATTERN + "/*", self._HasAccessToClient)
h.Allow("aff4:/artifact_store")
h.Allow("aff4:/artifact_store/*")
return h
|
Creates a CheckAccessHelper for controlling read access.
This function and _CreateQueryAccessHelper essentially define GRR's ACL
policy. Please refer to these 2 functions to either review or modify
GRR's ACLs.
Read access gives you the ability to open and read aff4 objects for which
you already have the URN.
Returns:
CheckAccessHelper for controlling read access.
|
373,239
|
def fetch(self):
params = values.of({})
payload = self._version.fetch(
,
self._uri,
params=params,
)
return TaskQueueInstance(
self._version,
payload,
workspace_sid=self._solution[],
sid=self._solution[],
)
|
Fetch a TaskQueueInstance
:returns: Fetched TaskQueueInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance
|
373,240
|
def get_top(modality_type, value=None):
if modality_type in (ModalityType.AUDIO,
ModalityType.AUDIO_SPECTRAL,
ModalityType.GENERIC_L2_LOSS,
ModalityType.IDENTITY,
ModalityType.IDENTITY_SYMBOL,
ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.SPEECH_RECOGNITION,
ModalityType.VIDEO_IDENTITY):
return identity_top
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.SIGMOID_CLASS_LABEL):
return class_label_top
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL):
return symbol_top
elif modality_type == ModalityType.IMAGE:
return image_top
elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS:
return image_channel_compress_top
elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM:
return image_channel_embeddings_top
elif modality_type in (ModalityType.REAL,
ModalityType.REAL_L2_LOSS,
ModalityType.REAL_LOG_POISSON_LOSS):
return real_top
elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL:
return sigmoid_max_pooling_class_label_top
elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL:
return softmax_average_pooling_class_label_top
elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL:
return softmax_last_timestep_class_label_top
elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL:
return softmax_max_pooling_class_label_top
elif modality_type == ModalityType.SYMBOL_ONE_HOT:
return symbol_one_hot_top
elif modality_type in (ModalityType.VIDEO,
ModalityType.VIDEO_BITWISE,
ModalityType.VIDEO_PIXEL_NOISE):
return video_top
elif modality_type in (ModalityType.VIDEO_L1,
ModalityType.VIDEO_L2):
return video_l1_top
elif modality_type in (ModalityType.VIDEO_L1_RAW,
ModalityType.VIDEO_L2_RAW):
return video_raw_top
return value
|
Gets default top transformation; if none available, return value.
|
373,241
|
def register_whoosheer(self, wh):
self.whoosheers.append(wh)
for model in wh.models:
event.listen(model, .format(INSERT_KWD), self.after_insert)
event.listen(model, .format(UPDATE_KWD), self.after_update)
event.listen(model, .format(DELETE_KWD), self.after_delete)
query_class = getattr(model, , None)
if query_class is not None and isclass(query_class):
if issubclass(query_class, self.query):
pass
elif query_class not in (BaseQuery, SQLAQuery, WhoosheeQuery):
query_class_name = query_class.__name__
model.query_class = type(
"Whooshee{}".format(query_class_name), (query_class, self.query), {}
)
else:
model.query_class = self.query
else:
model.query_class = self.query
if self.app:
wh.app = self.app
return wh
|
This will register the given whoosher on `whoosheers`, create the
neccessary SQLAlchemy event listeners, replace the `query_class` with
our own query class which will provide the search functionality
and store the app on the whoosheer, so that we can always work
with that.
:param wh: The whoosher which should be registered.
|
373,242
|
def tag(self, path, name):
if not path[len(path) - 1] == :
path +=
config = self.get_config()
folder = self.find_folder({
: path
}, config)
if not folder:
raise custom_errors.FileNotInConfig(path)
old_name = folder[]
folder[] = name
dir_config = self.adapter.get_dir_config(path)
dir_config[] = name
self.adapter.set_dir_config(dir_config)
self.set_config(config)
return old_name
|
Change name associated with path
|
373,243
|
def get_analysis_data_for(self, ar):
analyses = ar.objectValues("Analysis")
out = []
for an in analyses:
info = self.get_base_info(an)
info.update({
"service_uid": an.getServiceUID(),
})
out.append(info)
return out
|
Return the Analysis data for this AR
|
373,244
|
def raw(self, query, settings=None, stream=False):
query = self._substitute(query, None)
return self._send(query, settings=settings, stream=stream).text
|
Performs a query and returns its output as text.
- `query`: the SQL query to execute.
- `settings`: query settings to send as HTTP GET parameters
- `stream`: if true, the HTTP response from ClickHouse will be streamed.
|
373,245
|
def calculate_perf_counter_100ns_queuelen_type(previous, current, property_name):
n0 = previous[property_name]
n1 = current[property_name]
d0 = previous["Timestamp_Sys100NS"]
d1 = current["Timestamp_Sys100NS"]
if n0 is None or n1 is None:
return
return (n1 - n0) / (d1 - d0)
|
PERF_COUNTER_100NS_QUEUELEN_TYPE
Average length of a queue to a resource over time in 100 nanosecond units.
https://msdn.microsoft.com/en-us/library/aa392905(v=vs.85).aspx
Formula (n1 - n0) / (d1 - d0)
|
373,246
|
def log_analyzer2(path):
with handle(MalformedLogEntryError,
lambda (c):
invoke_restart(,
+ c.text)):
for filename in find_all_logs(path):
analyze_log(filename)
|
This procedure considers every line which can't be parsed
as a line with ERROR level.
|
373,247
|
def connect(self, *args, **kwargs):
self._bootstrap_cm_list_from_file()
CMClient.connect(self, *args, **kwargs)
|
Attempt to establish connection, see :meth:`.CMClient.connect`
|
373,248
|
def get_vm(self, resource_group_name, vm_name):
s where the decorator comes in
this will return all the data about the virtual machine
instanceView')
|
you need to retry this just in case the credentials token expires,
that's where the decorator comes in
this will return all the data about the virtual machine
|
373,249
|
def from_string(cls, string, format_=None, fps=None, **kwargs):
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
|
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
|
373,250
|
def unpack_rows(self, column_types, connection):
for _ in iter_range(self.num_rows):
yield tuple(typ.from_resultset(self.payload, connection) for typ in column_types)
|
Unpack rows for data (from a select statement) from payload and yield a single row at a time.
:param column_types: a tuple of column descriptors
e.g. (<class 'pyhdb.protocol.types.String'>, <class 'pyhdb.protocol.types.ClobType'>)
:param connection: a db connection object
:returns: a generator object
|
373,251
|
def delete_rrset(self, zone_name, rtype, owner_name):
return self.rest_api_connection.delete("/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name)
|
Deletes an RRSet.
Arguments:
zone_name -- The zone containing the RRSet to be deleted. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
|
373,252
|
def duplicate_files(self):
result=list()
files = SubmissionFile.valid_ones.order_by()
for key, dup_group in groupby(files, lambda f: f.md5):
file_list=[entry for entry in dup_group]
if len(file_list)>1:
for entry in file_list:
if entry.submissions.filter(assignment=self).count()>0:
result.append([key, file_list])
break
return result
|
Search for duplicates of submission file uploads for this assignment.
This includes the search in other course, whether inactive or not.
Returns a list of lists, where each latter is a set of duplicate submissions
with at least on of them for this assignment
|
373,253
|
def insert_pattern(pattern, model, index=0):
if not pattern:
return False
pattern = pattern.replace(QChar(QChar.ParagraphSeparator), QString("\n"))
pattern = foundations.common.get_first_item(foundations.strings.to_string(pattern).split("\n"))
model.insert_pattern(foundations.strings.to_string(pattern), index)
return True
|
Inserts given pattern into given Model.
:param pattern: Pattern.
:type pattern: unicode
:param model: Model.
:type model: PatternsModel
:param index: Insertion indes.
:type index: int
:return: Method success.
:rtype: bool
|
373,254
|
def url(context, view, subdomain=UNSET, *args, **kwargs):
if subdomain is UNSET:
request = context.get()
if request is not None:
subdomain = getattr(request, , None)
else:
subdomain = None
elif subdomain is :
subdomain = None
return reverse(view, subdomain=subdomain, args=args, kwargs=kwargs)
|
Resolves a URL in a template, using subdomain-based URL resolution.
If no subdomain is provided and a ``request`` is in the template context
when rendering, the URL will be resolved relative to the current request's
subdomain. If no ``request`` is provided, the URL will be resolved relative
to current domain with the ``settings.ROOT_URLCONF``.
Usage::
{% load subdomainurls %}
{% url 'view-name' subdomain='subdomain' %}
.. note:: This tag uses the variable URL syntax introduced in Django
1.3 as ``{% load url from future %}`` and was made the standard in Django
1.5. If you are upgrading a legacy application from one of the previous
template tag formats, make sure to quote your constant string URL names
to avoid :exc:`~django.core.urlresolver.NoReverseMatch` errors during
template rendering.
|
373,255
|
def track_progress(
measure: MeasureProgress,
target: MetricProgress,
interval_check: float,
capture_maybe: Optional[CaptureProgress] = None
) -> None:
def measure_to_target() -> MeasureComparison:
return list(zip(measure(), target))
def is_finished(progress: MeasureComparison) -> bool:
return all(p >= t for p, t in progress)
capture = capture_maybe or capture_print()
rt_started = now_real()
while True:
advance(interval_check)
rt_elapsed = now_real() - rt_started
progress = measure_to_target()
ratio_progress_min = min(m / t for m, t in progress)
if ratio_progress_min == 0.0:
rt_total_projected = inf
else:
rt_total_projected = rt_elapsed / ratio_progress_min
capture(ratio_progress_min, rt_total_projected - rt_elapsed, progress)
if is_finished(progress):
stop()
break
|
Tracks progress against a certain end condition of the simulation (for instance, a certain duration on the simulated
clock), reporting this progress as the simulation chugs along. Stops the simulation once the target has been
reached. By default, the progress is reported as printout on standard output, in a manner that works best for
digital terminals.
|
373,256
|
def intersects_segment(self, seg):
return any(value.intersects_segment(seg) for value in self.itervalues())
|
Returns True if any segmentlist in self intersects the
segment, otherwise returns False.
|
373,257
|
def load_jupyter_server_extension(nb_server_app):
app = nb_server_app.web_app
host_pattern =
app.add_handlers(host_pattern, [
(utils.url_path_join(app.settings[], ),
handlers.HttpOverWebSocketHandler),
(utils.url_path_join(app.settings[],
),
handlers.HttpOverWebSocketDiagnosticHandler),
])
print(
)
|
Called by Jupyter when this module is loaded as a server extension.
|
373,258
|
def makeAnimation(self):
aclip=mpy.AudioFileClip("sound.wav")
self.iS=self.iS.set_audio(aclip)
self.iS.write_videofile("mixedVideo.webm",15,audio=True)
print("wrote "+"mixedVideo.webm")
|
Use pymovie to render (visual+audio)+text overlays.
|
373,259
|
def fetch(self, category, filter_classified=False, **kwargs):
if category not in self.categories:
cause = "%s category not valid for %s" % (category, self.__class__.__name__)
raise BackendError(cause=cause)
if filter_classified and self.archive:
cause = "classified fields filtering is not compatible with archiving items"
raise BackendError(cause=cause)
if self.archive:
self.archive.init_metadata(self.origin, self.__class__.__name__, self.version, category,
kwargs)
self.client = self._init_client()
for item in self.fetch_items(category, **kwargs):
if filter_classified:
item = self.filter_classified_data(item)
yield self.metadata(item, filter_classified=filter_classified)
|
Fetch items from the repository.
The method retrieves items from a repository.
To removed classified fields from the resulting items, set
the parameter `filter_classified`. Take into account this
parameter is incompatible with archiving items. Raw client
data are archived before any other process. Therefore,
classified data are stored within the archive. To prevent
from possible data leaks or security issues when users do
not need these fields, archiving and filtering are not
compatible.
:param category: the category of the items fetched
:param filter_classified: remove classified fields from the resulting items
:param kwargs: a list of other parameters (e.g., from_date, offset, etc.
specific for each backend)
:returns: a generator of items
:raises BackendError: either when the category is not valid or
'filter_classified' and 'archive' are active at the same time.
|
373,260
|
def swap(self, core, other):
params = {
: ,
: core,
: other,
}
return self._get_url(self.url, params=params)
|
http://wiki.apache.org/solr/CoreAdmin#head-928b872300f1b66748c85cebb12a59bb574e501b
|
373,261
|
def get_dataset(self, name, multi_instance=0):
return [elem for elem in self._data_list
if elem.name == name and elem.multi_id == multi_instance][0]
|
get a specific dataset.
example:
try:
gyro_data = ulog.get_dataset('sensor_gyro')
except (KeyError, IndexError, ValueError) as error:
print(type(error), "(sensor_gyro):", error)
:param name: name of the dataset
:param multi_instance: the multi_id, defaults to the first
:raises KeyError, IndexError, ValueError: if name or instance not found
|
373,262
|
def render(self, template, **kwargs):
kwargs["cache_key"] = "%s" % kwargs["url"].values()
kwargs["lang"] = self.get_locale()
kwargs["assets"] = self.assets
kwargs["main_collections"] = self.main_collections(kwargs["lang"])
kwargs["cache_active"] = self.cache is not None
kwargs["cache_time"] = 0
kwargs["cache_key"], kwargs["cache_key_i18n"] = self.make_cache_keys(request.endpoint, kwargs["url"])
kwargs["template"] = template
for plugin in self.__plugins_render_views__:
kwargs.update(plugin.render(**kwargs))
return render_template(kwargs["template"], **kwargs)
|
Render a route template and adds information to this route.
:param template: Template name.
:type template: str
:param kwargs: dictionary of named arguments used to be passed to the template
:type kwargs: dict
:return: Http Response with rendered template
:rtype: flask.Response
|
373,263
|
def scale_up(self, n, pods=None, **kwargs):
maximum = dask.config.get()
if maximum is not None and maximum < n:
logger.info("Tried to scale beyond maximum number of workers %d > %d",
n, maximum)
n = maximum
pods = pods or self._cleanup_terminated_pods(self.pods())
to_create = n - len(pods)
new_pods = []
for i in range(3):
try:
for _ in range(to_create):
new_pods.append(self.core_api.create_namespaced_pod(
self.namespace, self.pod_template))
to_create -= 1
break
except kubernetes.client.rest.ApiException as e:
if e.status == 500 and in e.body:
logger.info("Server timeout, retry
time.sleep(1)
last_exception = e
continue
else:
raise
else:
raise last_exception
return new_pods
|
Make sure we have n dask-workers available for this cluster
Examples
--------
>>> cluster.scale_up(20) # ask for twenty workers
|
373,264
|
def validate_proof(proof: List[Keccak256], root: Keccak256, leaf_element: Keccak256) -> bool:
hash_ = leaf_element
for pair in proof:
hash_ = hash_pair(hash_, pair)
return hash_ == root
|
Checks that `leaf_element` was contained in the tree represented by
`merkleroot`.
|
373,265
|
def get_database_columns(self, tables=None, database=None):
source = database if database else self.database
tables = tables if tables else self.tables
return {tbl: self.get_columns(tbl) for tbl in tqdm(tables, total=len(tables),
desc=.format(source))}
|
Retrieve a dictionary of columns.
|
373,266
|
def meta(self):
if not self._pv.meta_data_property or not self._meta_target:
return {}
return getattr(self._meta_target, self._pv.meta_data_property)
|
Value of the bound meta-property on the target.
|
373,267
|
def vocab_account_type(instance):
for key, obj in instance[].items():
if in obj and obj[] == :
try:
acct_type = obj[]
except KeyError:
continue
if acct_type not in enums.ACCOUNT_TYPE_OV:
yield JSONError("Object is a User Account Object "
"with an of , which is not a "
"value in the account-type-ov vocabulary."
% (key, acct_type), instance[], )
|
Ensure a user-account objects' 'account-type' property is from the
account-type-ov vocabulary.
|
373,268
|
def get_code_language(self):
js_source = self.get_js_source()
if self.options.get("include_html", False):
resources = get_sphinx_resources(include_bokehjs_api=True)
html_source = BJS_HTML.render(
css_files=resources.css_files,
js_files=resources.js_files,
bjs_script=js_source)
return [html_source, "html"]
else:
return [js_source, "javascript"]
|
This is largely copied from bokeh.sphinxext.bokeh_plot.run
|
373,269
|
def _to_json(self, include_references=True):
if include_references:
return json.dumps(self._resource._properties, cls=PotionJSONEncoder)
else:
return json.dumps(
{
k: v
for k, v in self._resource._properties.items()
if not isinstance(v, Resource) and not k.startswith("$")
},
cls=PotionJSONEncoder,
)
|
Convert the model to JSON using the PotionJSONEncode and automatically
resolving the resource as needed (`_properties` call handles this).
|
373,270
|
def stream_events(self, filter: Callable[[Event], bool] = None, *, max_queue_size: int = 0):
return stream_events([self], filter, max_queue_size=max_queue_size)
|
Shortcut for calling :func:`stream_events` with this signal in the first argument.
|
373,271
|
def types(**args):
def l(func):
if hasattr(func, ):
func.__annotations__.update(args)
else:
func.__annotations__ = args
return func
return l
|
Specifies the types used for the arguments of a published service.
@types(a=int, b = str)
def f(a, b):
pass
|
373,272
|
def list_commands(self, ctx):
self.connect(ctx)
if not hasattr(ctx, "widget"):
return super(Engineer, self).list_commands(ctx)
return ctx.widget.engineer_list_commands() + super(Engineer, self).list_commands(ctx)
|
list all commands exposed to engineer
|
373,273
|
async def set_property_value(self, turn_context: TurnContext, property_name: str, value: object) -> None:
if turn_context == None:
raise TypeError()
if not property_name:
raise TypeError()
cached_state = turn_context.turn_state.get(self._context_service_key)
cached_state.state[property_name] = value
|
Deletes a property from the state cache in the turn context.
:param turn_context: The context object for this turn.
:param property_name: The value to set on the property.
:return: None
|
373,274
|
def files(self):
if self._rundir[] is UNDETERMINED:
out_stem = pathlib.Path(self.par[][] + )
out_dir = self.path / out_stem.parent
if out_dir.is_dir():
self._rundir[] = set(out_dir.iterdir())
else:
self._rundir[] = set()
return self._rundir[]
|
Set of found binary files output by StagYY.
|
373,275
|
def _rspiral(width, height):
x0 = 0
y0 = 0
x1 = width - 1
y1 = height - 1
while x0 < x1 and y0 < y1:
for x in range(x0, x1):
yield x, y0
for y in range(y0, y1):
yield x1, y
for x in range(x1, x0, -1):
yield x, y1
for y in range(y1, y0, -1):
yield x0, y
x0 += 1
y0 += 1
x1 -= 1
y1 -= 1
if x0 == x1:
for y in range(y0, y1 + 1):
yield x0, y
elif y0 == y1:
for x in range(x0, x1 + 1):
yield x, y0
|
Reversed spiral generator.
Parameters
----------
width : `int`
Spiral width.
height : `int`
Spiral height.
Returns
-------
`generator` of (`int`, `int`)
Points.
|
373,276
|
def main():
configure_obj = None
parser = argparse.ArgumentParser(description=)
parser.add_argument(
, ,
help=,
)
parser.add_argument(
, ,
help=,
)
parser.add_argument(
, ,
help=,
)
parser.add_argument(
, ,
dest=,
action=,
help=,
)
parser.add_argument(
, ,
dest=,
action=,
help=,
)
parser.add_argument(
, ,
dest=,
action=,
help=,
)
parser.set_defaults(force=False)
parser.add_argument(
, ,
dest=,
action=,
help=,
)
parser.set_defaults(login_mysql=False)
parser.add_argument(
, ,
dest=,
action=,
help=,
)
parser.set_defaults(login_server=False)
parser.add_argument(
, ,
help=,
default=
)
parser.add_argument(
, ,
help=,
)
parser.add_argument(
, ,
help=,
nargs=2
)
parser.add_argument(
, ,
help=,
)
parser.add_argument(
, ,
help=,
)
parser.add_argument(
, ,
help=,
)
parser.add_argument(
, ,
help=,
)
parser.add_argument(
, ,
help=,
action=
)
args = parser.parse_args()
if args.config is None and args.host is None:
raise ValueError()
if args.host is not None:
if (args.user and (args.passwd or args.keyfile)) is None:
raise ValueError()
configure = None
if args.config is not None:
configure = configparser.ConfigParser()
configure.read(args.config[0])
config_sections = configure.sections()
if args.config[1] not in config_sections:
raise KeyError(
.format(args.config[1], config_sections))
configure_obj = configure[args.config[1]]
ServerBase(
args, configure_obj,
configure=configure
)
|
Check args
|
373,277
|
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
payload = {: table_name,
: utils.marshall(key_dict),
: consistent_read}
if expression_attribute_names:
payload[] = expression_attribute_names
if projection_expression:
payload[] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload[] = return_consumed_capacity
return self.execute(, payload)
|
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
|
373,278
|
def to_networkx(self, labels=None, edge_labels=False):
import networkx as nx
graph = nx.DiGraph()
for node in self._traverse_nodes():
u = node.key
graph.add_node(u)
graph.nodes[u][] = node.value
if labels is not None:
label = .join([str(getattr(node, k)) for k in labels])
graph.nodes[u][] = label
if node.left is not None:
v = node.left.key
graph.add_node(v)
graph.add_edge(u, v)
if edge_labels:
graph.edge[u][v][] =
if node.right is not None:
v = node.right.key
graph.add_node(v)
graph.add_edge(u, v)
if edge_labels:
graph.edge[u][v][] =
return graph
|
Get a networkx representation of the binary search tree.
|
373,279
|
def decode(self, bytes, raw=False):
sec = super(Time32Type, self).decode(bytes)
return sec if raw else dmc.toLocalTime(sec)
|
decode(bytearray, raw=False) -> value
Decodes the given bytearray containing the elapsed time in
seconds since the GPS epoch and returns the corresponding
Python :class:`datetime`.
If the optional parameter ``raw`` is ``True``, the integral
number of seconds will be returned instead.
|
373,280
|
def _unpack_case(self, case):
base_mva = case.base_mva
b = case.connected_buses
l = case.online_branches
g = case.online_generators
nb = len(b)
nl = len(l)
ng = len(g)
return b, l, g, nb, nl, ng, base_mva
|
Returns the contents of the case to be used in the OPF.
|
373,281
|
def _include_file(context, uri, calling_uri, **kwargs):
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(
context._clean_inheritance_tokens(),
template)
callable_(ctx, **_kwargs_for_include(callable_, context._data, **kwargs))
|
locate the template from the given uri and include it in
the current output.
|
373,282
|
def write(self, outfile=None, section=None):
with io.open(outfile or self.user_config_file(), ) as f:
self.data.write(outfile=f, section=section)
|
Write the current config to a file (defaults to user config).
:param str outfile: The path to the file to write to.
:param None/str section: The config section to write, or :data:`None`
to write the entire config.
|
373,283
|
def get_suitable_slot_for_duplicate(self, src_slot):
slot_from = to_int(src_slot, 0)
if slot_from < 1:
return -1
container = self.get_container_at(slot_from)
if not container or not IAnalysisRequest.providedBy(container):
return -1
occupied = self.get_slot_positions(type=)
wst = self.getWorksheetTemplate()
if not wst:
slot_to = max(occupied) + 1
return slot_to
layout = wst.getLayout()
for pos in layout:
if pos[] != or to_int(pos[]) != slot_from:
continue
slot_to = int(pos[])
if slot_to in occupied:
continue
return slot_to
occupied.append(len(layout))
slot_to = max(occupied) + 1
return slot_to
|
Returns the suitable position for a duplicate analysis, taking into
account if there is a WorksheetTemplate assigned to this worksheet.
By default, returns a new slot at the end of the worksheet unless there
is a slot defined for a duplicate of the src_slot in the worksheet
template layout not yet used.
:param src_slot:
:return: suitable slot position for a duplicate of src_slot
|
373,284
|
def calibration_stimulus(self, mode):
if mode == :
return self.tone_calibrator.stimulus
elif mode ==:
return self.bs_calibrator.stimulus
|
Gets the stimulus model for calibration
:param mode: Type of stimulus to get: tone or noise
:type mode: str
:returns: :class:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel>`
|
373,285
|
def _merge_align_bams(data):
for key in (["work_bam"], ["work_bam_plus", "disc"], ["work_bam_plus", "sr"], ["umi_bam"]):
in_files = tz.get_in(key, data, [])
if not isinstance(in_files, (list, tuple)):
in_files = [in_files]
in_files = [x for x in in_files if x and x != "None"]
if in_files:
ext = "-%s" % key[-1] if len(key) > 1 else ""
out_file = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data),
"%s-sort%s.bam" % (dd.get_sample_name(data), ext))
merged_file = merge_bam_files(in_files, utils.safe_makedir(os.path.dirname(out_file)),
data, out_file=out_file)
data = tz.update_in(data, key, lambda x: merged_file)
else:
data = tz.update_in(data, key, lambda x: None)
if "align_bam" in data and "work_bam" in data:
data["align_bam"] = data["work_bam"]
return data
|
Merge multiple alignment BAMs, including split and discordant reads.
|
373,286
|
def _build_header(self):
logger.debug("===============")
logger.debug("Building header")
logger.debug("===============")
self.template += hs.header
|
Adds the header template to the master template string
|
373,287
|
def segmentlistdict_fromsearchsummary_in(xmldoc, program = None):
stbl = lsctables.SearchSummaryTable.get_table(xmldoc)
ptbl = lsctables.ProcessTable.get_table(xmldoc)
return stbl.get_in_segmentlistdict(program and ptbl.get_ids_by_program(program))
|
Convenience wrapper for a common case usage of the segmentlistdict
class: searches the process table in xmldoc for occurances of a
program named program, then scans the search summary table for
matching process IDs and constructs a segmentlistdict object from
the in segments in those rows.
Note: the segmentlists in the segmentlistdict are not necessarily
coalesced, they contain the segments as they appear in the
search_summary table.
|
373,288
|
def draw(self):
if not self.visible:
return
self.window.blit(self.image, self.loc)
|
Draws the image at the given location.
|
373,289
|
def start_health_check(self, recipient):
if recipient not in self.addresses_events:
self.whitelist(recipient)
ping_nonce = self.nodeaddresses_to_nonces.setdefault(
recipient,
{: 0},
)
events = healthcheck.HealthEvents(
event_healthy=Event(),
event_unhealthy=Event(),
)
self.addresses_events[recipient] = events
greenlet_healthcheck = gevent.spawn(
healthcheck.healthcheck,
self,
recipient,
self.event_stop,
events.event_healthy,
events.event_unhealthy,
self.nat_keepalive_retries,
self.nat_keepalive_timeout,
self.nat_invitation_timeout,
ping_nonce,
)
greenlet_healthcheck.name = f
greenlet_healthcheck.link_exception(self.on_error)
self.greenlets.append(greenlet_healthcheck)
|
Starts a task for healthchecking `recipient` if there is not
one yet.
It also whitelists the address
|
373,290
|
def validate(self):
for validator in self.validators:
try:
validator(self.obj)
except ValidationError as e:
self.errors.append(e.error)
if not self.errors and self._has_unvalidated_prefix():
self._move_to_validated()
return not self.errors
|
Validates the given Amazon S3 file with :attr:`validators`. If errors
occur they are appended to :attr:`errors`. If the file is valid and a
`AWS_UNVALIDATED_PREFIX` config is present, its value will be removed
from the file key.
:return: a boolean indicating if the file vas valid.
|
373,291
|
def dump(self):
return json.dumps(
self.primitive,
sort_keys=True,
ensure_ascii=False,
separators=(, ))
|
Item as a JSON representation.
|
373,292
|
def request(self, url, post=None, method="GET"):
dsid = self.get_dsid()
baseurl = "https://auth.api.swedbank.se/TDE_DAP_Portal_REST_WEB/api/v1/%s?dsid=%s" % (
url, dsid)
if self.pch is None:
self.pch = build_opener(HTTPCookieProcessor(self.cj))
if post:
post = bytearray(post, "utf-8")
request = Request(baseurl, data=post)
request.add_header("Content-Type", "application/json")
else:
request = Request(baseurl)
request.add_header("User-Agent", self.useragent)
request.add_header("Authorization", self.get_authkey())
request.add_header("Accept", "*/*")
request.add_header("Accept-Language", "sv-se")
request.add_header("Connection", "keep-alive")
request.add_header("Proxy-Connection", "keep-alive")
self.cj.set_cookie(
Cookie(version=0, name=, value=dsid, port=None,
port_specified=False, domain=,
domain_specified=False, domain_initial_dot=False,
path=,
path_specified=True, secure=False, expires=None,
discard=True, comment=None, comment_url=None,
rest={: None}, rfc2109=False))
request.get_method = lambda: method
tmp = self.pch.open(request)
self.data = tmp.read().decode("utf8")
|
Make the request
|
373,293
|
def sign(self, issuer_cert, issuer_key, digest):
digest_obj = _lib.EVP_get_digestbyname(digest)
_openssl_assert(digest_obj != _ffi.NULL)
_lib.X509_CRL_set_issuer_name(
self._crl, _lib.X509_get_subject_name(issuer_cert._x509))
_lib.X509_CRL_sort(self._crl)
result = _lib.X509_CRL_sign(self._crl, issuer_key._pkey, digest_obj)
_openssl_assert(result != 0)
|
Sign the CRL.
Signing a CRL enables clients to associate the CRL itself with an
issuer. Before a CRL is meaningful to other OpenSSL functions, it must
be signed by an issuer.
This method implicitly sets the issuer's name based on the issuer
certificate and private key used to sign the CRL.
.. versionadded:: 16.1.0
:param X509 issuer_cert: The issuer's certificate.
:param PKey issuer_key: The issuer's private key.
:param bytes digest: The digest method to sign the CRL with.
|
373,294
|
def save_controls(self, parameterstep: = None,
simulationstep: = None,
auxfiler: = None):
if auxfiler:
auxfiler.save(parameterstep, simulationstep)
for element in printtools.progressbar(self):
element.model.parameters.save_controls(
parameterstep=parameterstep,
simulationstep=simulationstep,
auxfiler=auxfiler)
|
Save the control parameters of the |Model| object handled by
each |Element| object and eventually the ones handled by the
given |Auxfiler| object.
|
373,295
|
def Shah(m, x, D, rhol, mul, kl, Cpl, P, Pc):
r
VL = m/(rhol*pi/4*D**2)
ReL = Reynolds(V=VL, D=D, rho=rhol, mu=mul)
Prl = Prandtl(Cp=Cpl, k=kl, mu=mul)
hL = turbulent_Dittus_Boelter(ReL, Prl)*kl/D
Pr = P/Pc
return hL*((1-x)**0.8 + 3.8*x**0.76*(1-x)**0.04/Pr**0.38)
|
r'''Calculates heat transfer coefficient for condensation
of a fluid inside a tube, as presented in [1]_ and again by the same
author in [2]_; also given in [3]_. Requires no properties of the gas.
Uses the Dittus-Boelter correlation for single phase heat transfer
coefficient, with a Reynolds number assuming all the flow is liquid.
.. math::
h_{TP} = h_L\left[(1-x)^{0.8} +\frac{3.8x^{0.76}(1-x)^{0.04}}
{P_r^{0.38}}\right]
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific interval [-]
D : float
Diameter of the channel [m]
rhol : float
Density of the liquid [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
P : float
Pressure of the fluid, [Pa]
Pc : float
Critical pressure of the fluid, [Pa]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ is well written an unambiguous as to how to apply this equation.
Examples
--------
>>> Shah(m=1, x=0.4, D=.3, rhol=800, mul=1E-5, kl=0.6, Cpl=2300, P=1E6, Pc=2E7)
2561.2593415479214
References
----------
.. [1] Shah, M. M. "A General Correlation for Heat Transfer during Film
Condensation inside Pipes." International Journal of Heat and Mass
Transfer 22, no. 4 (April 1, 1979): 547-56.
doi:10.1016/0017-9310(79)90058-9.
.. [2] Shah, M. M., Heat Transfer During Film Condensation in Tubes and
Annuli: A Review of the Literature, ASHRAE Transactions, vol. 87, no.
3, pp. 1086-1100, 1981.
.. [3] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1st.
Wiley-Interscience, 1991.
|
373,296
|
def push_log(self, info, level, *args, **kwargs):
log.log(level, info, *args, **kwargs)
|
Writes logs. To be fully implemented by subclasses.
:param info: Log message content.
:type info: unicode | str
:param level: Logging level.
:type level: int
:param args: Positional arguments to pass to logger.
:param kwargs: Keyword arguments to pass to logger.
|
373,297
|
def set_trunk_groups(self, intf, value=None, default=False, disable=False):
if default:
cmd =
return self.configure_interface(intf, cmd)
if disable:
cmd =
return self.configure_interface(intf, cmd)
current_value = self.get(intf)[]
failure = False
value = make_iterable(value)
for name in set(value).difference(current_value):
if not self.add_trunk_group(intf, name):
failure = True
for name in set(current_value).difference(value):
if not self.remove_trunk_group(intf, name):
failure = True
return not failure
|
Configures the switchport trunk group value
Args:
intf (str): The interface identifier to configure.
value (str): The set of values to configure the trunk group
default (bool): Configures the trunk group default value
disable (bool): Negates all trunk group settings
Returns:
True if the config operation succeeds otherwise False
|
373,298
|
def add_videos_to_playlist(self, access_token, playlist_id, video_ids):
url =
data = {
: self.client_id,
: access_token,
: playlist_id,
: video_ids
}
r = requests.post(url, data=data)
check_error(r)
return r.json()[]
|
doc: http://open.youku.com/docs/doc?id=75
|
373,299
|
def get(self, url=None, params=None, retry=True):
headers = self._gen_headers(self.access_token, url)
attempts = 1
while attempts <= HTTP_ATTEMPTS_MAX:
try:
res = requests.get(url,
headers=headers,
params=params,
timeout=15,
verify=self.certs)
res.raise_for_status()
return res.json()
except requests.exceptions.RequestException as e:
attempts += 1
if res.status_code in [400]:
raise e
elif retry and res.status_code in [403]:
self.relogin_oauth2()
|
Execute HTTP GET
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.