Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
375,200
|
def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.dialog_node is not None:
_dict[] = self.dialog_node
if hasattr(self, ) and self.description is not None:
_dict[] = self.description
if hasattr(self, ) and self.conditions is not None:
_dict[] = self.conditions
if hasattr(self, ) and self.parent is not None:
_dict[] = self.parent
if hasattr(self,
) and self.previous_sibling is not None:
_dict[] = self.previous_sibling
if hasattr(self, ) and self.output is not None:
_dict[] = self.output._to_dict()
if hasattr(self, ) and self.context is not None:
_dict[] = self.context
if hasattr(self, ) and self.metadata is not None:
_dict[] = self.metadata
if hasattr(self, ) and self.next_step is not None:
_dict[] = self.next_step._to_dict()
if hasattr(self, ) and self.title is not None:
_dict[] = self.title
if hasattr(self, ) and self.node_type is not None:
_dict[] = self.node_type
if hasattr(self, ) and self.event_name is not None:
_dict[] = self.event_name
if hasattr(self, ) and self.variable is not None:
_dict[] = self.variable
if hasattr(self, ) and self.actions is not None:
_dict[] = [x._to_dict() for x in self.actions]
if hasattr(self, ) and self.digress_in is not None:
_dict[] = self.digress_in
if hasattr(self, ) and self.digress_out is not None:
_dict[] = self.digress_out
if hasattr(self,
) and self.digress_out_slots is not None:
_dict[] = self.digress_out_slots
if hasattr(self, ) and self.user_label is not None:
_dict[] = self.user_label
if hasattr(self, ) and self.disabled is not None:
_dict[] = self.disabled
if hasattr(self, ) and self.created is not None:
_dict[] = datetime_to_string(self.created)
if hasattr(self, ) and self.updated is not None:
_dict[] = datetime_to_string(self.updated)
return _dict
|
Return a json dictionary representing this model.
|
375,201
|
def aggregate_weights(weights, drop_date=False):
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=[, ],
columns=[], values=, fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts
|
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
|
375,202
|
def init(names, host=None, saltcloud_mode=False, quiet=False, **kwargs):
path = kwargs.get(, None)
if quiet:
log.warning(" argument is being deprecated."
)
ret = {: , : True}
if host is None:
ret[] =
ret[] = False
return ret
if isinstance(names, six.string_types):
names = names.split()
if not isinstance(names, list):
ret[] =
ret[] = False
return ret
client = salt.client.get_local_client(__opts__[])
alive = False
try:
if client.cmd(host, , timeout=20).get(host, None):
alive = True
except (TypeError, KeyError):
pass
if not alive:
ret[] = .format(host)
ret[] = False
return ret
log.info()
data = __salt__[](host, quiet=True, path=path)
for host, containers in six.iteritems(data):
for name in names:
if name in sum(six.itervalues(containers), []):
log.info(
%s\%s\
, name, host
)
if host not in data:
ret[] = {0}\.format(host)
ret[] = False
return ret
kw = salt.utils.args.clean_kwargs(**kwargs)
pub_key = kw.get(, None)
priv_key = kw.get(, None)
explicit_auth = pub_key and priv_key
approve_key = kw.get(, True)
seeds = {}
seed_arg = kwargs.get(, True)
if approve_key and not explicit_auth:
skey = salt.key.Key(__opts__)
all_minions = skey.all_keys().get(, [])
for name in names:
seed = seed_arg
if name in all_minions:
try:
if client.cmd(name, , timeout=20).get(name, None):
seed = False
except (TypeError, KeyError):
pass
seeds[name] = seed
kv = salt.utils.virt.VirtKey(host, name, __opts__)
if kv.authorize():
log.info()
else:
ret[] =
ret[] = False
return ret
log.info(%s\%s\, names, host)
cmds = []
for name in names:
args = [name]
kw = salt.utils.args.clean_kwargs(**kwargs)
if saltcloud_mode:
kw = copy.deepcopy(kw)
kw[] = name
saved_kwargs = kw
kw = client.cmd(
host, , args + [kw],
tgt_type=, timeout=600).get(host, {})
kw.update(saved_kwargs)
name = kw.pop(, name)
kw[] = seeds.get(name, seed_arg)
if not kw[]:
kw.pop(, )
cmds.append(
(host,
name,
client.cmd_iter(host, , args, kwarg=kw, timeout=600)))
done = ret.setdefault(, [])
errors = ret.setdefault(, _OrderedDict())
for ix, acmd in enumerate(cmds):
hst, container_name, cmd = acmd
containers = ret.setdefault(hst, [])
herrs = errors.setdefault(hst, _OrderedDict())
serrs = herrs.setdefault(container_name, [])
sub_ret = next(cmd)
error = None
if isinstance(sub_ret, dict) and host in sub_ret:
j_ret = sub_ret[hst]
container = j_ret.get(, {})
if container and isinstance(container, dict):
if not container.get(, False):
error = container
else:
error = .format(
container_name, container, sub_ret)
else:
error = sub_ret
if not error:
error =
if error:
ret[] = False
serrs.append(error)
else:
container[] = name
containers.append(container)
done.append(container)
ret[] = bool(len(done))
for container in done:
container_name = container[]
key = os.path.join(__opts__[], , container_name)
if explicit_auth:
fcontent =
if os.path.exists(key):
with salt.utils.files.fopen(key) as fic:
fcontent = salt.utils.stringutils.to_unicode(fic.read()).strip()
pub_key = salt.utils.stringutils.to_unicode(pub_key)
if pub_key.strip() != fcontent:
with salt.utils.files.fopen(key, ) as fic:
fic.write(salt.utils.stringutils.to_str(pub_key))
fic.flush()
mid = j_ret.get(, None)
if not mid:
continue
def testping(**kw):
mid_ = kw[]
ping = client.cmd(mid_, , timeout=20)
time.sleep(1)
if ping:
return
raise Exception(.format(mid_))
ping = salt.utils.cloud.wait_for_fun(testping, timeout=21, mid=mid)
if ping != :
ret[] = False
ret[] = False
if not done:
ret[] = False
if not quiet:
__jid_event__.fire_event({: ret}, )
return ret
|
Initialize a new container
.. code-block:: bash
salt-run lxc.init name host=minion_id [cpuset=cgroups_cpuset] \\
[cpushare=cgroups_cpushare] [memory=cgroups_memory] \\
[template=lxc_template_name] [clone=original name] \\
[profile=lxc_profile] [network_proflile=network_profile] \\
[nic=network_profile] [nic_opts=nic_opts] \\
[start=(true|false)] [seed=(true|false)] \\
[install=(true|false)] [config=minion_config] \\
[snapshot=(true|false)]
names
Name of the containers, supports a single name or a comma delimited
list of names.
host
Minion on which to initialize the container **(required)**
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
saltcloud_mode
init the container with the saltcloud opts format instead
See lxc.init_interface module documentation
cpuset
cgroups cpuset.
cpushare
cgroups cpu shares.
memory
cgroups memory limit, in MB
.. versionchanged:: 2015.5.0
If no value is passed, no limit is set. In earlier Salt versions,
not passing this value causes a 1024MB memory limit to be set, and
it was necessary to pass ``memory=0`` to set no limit.
template
Name of LXC template on which to base this container
clone
Clone this container from an existing container
profile
A LXC profile (defined in config or pillar).
network_profile
Network profile to use for the container
.. versionadded:: 2015.5.2
nic
.. deprecated:: 2015.5.0
Use ``network_profile`` instead
nic_opts
Extra options for network interfaces. E.g.:
``{"eth0": {"mac": "aa:bb:cc:dd:ee:ff", "ipv4": "10.1.1.1", "ipv6": "2001:db8::ff00:42:8329"}}``
start
Start the newly created container.
seed
Seed the container with the minion config and autosign its key.
Default: true
install
If salt-minion is not already installed, install it. Default: true
config
Optional config parameters. By default, the id is set to
the name of the container.
|
375,203
|
def sources_to_nr_vars(sources):
sources = default_sources(**sources)
try:
return OrderedDict((SOURCE_VAR_TYPES[name], nr)
for name, nr in sources.iteritems())
except KeyError as e:
raise KeyError((
) % (e, SOURCE_VAR_TYPES.keys()))
|
Converts a source type to number of sources mapping into
a source numbering variable to number of sources mapping.
If, for example, we have 'point', 'gaussian' and 'sersic'
source types, then passing the following dict as an argument
sources_to_nr_vars({'point':10, 'gaussian': 20})
will return an OrderedDict
{'npsrc': 10, 'ngsrc': 20, 'nssrc': 0 }
|
375,204
|
def connect_from(self, vertex, weight=1):
for edge in self.edges_in:
if vertex == edge.vertex_out:
return edge
return Edge(vertex, self, weight)
|
Connect another vertex to this one.
Args:
vertex (Vertex): vertex to connect from.
weight (int): weight of the edge.
Returns:
Edge: the newly created edge.
|
375,205
|
def f_measure(precision, recall, beta=1.0):
if precision == 0 and recall == 0:
return 0.0
return (1 + beta**2)*precision*recall/((beta**2)*precision + recall)
|
Compute the f-measure from precision and recall scores.
Parameters
----------
precision : float in (0, 1]
Precision
recall : float in (0, 1]
Recall
beta : float > 0
Weighting factor for f-measure
(Default value = 1.0)
Returns
-------
f_measure : float
The weighted f-measure
|
375,206
|
def get_distinct_values_from_cols(self, l_col_list):
uniq_vals = []
for l_col_name in l_col_list:
uniq_vals.append(set(self.get_col_data_by_name(l_col_name)))
if len(l_col_list) == 0:
return []
elif len(l_col_list) == 1:
return sorted([v for v in uniq_vals])
elif len(l_col_list) == 2:
res = []
res = [(a, b) for a in uniq_vals[0] for b in uniq_vals[1]]
return res
else:
print ("TODO ")
return -44
|
returns the list of distinct combinations in a dataset
based on the columns in the list. Note that this is
currently implemented as MAX permutations of the combo
so it is not guarenteed to have values in each case.
|
375,207
|
def _create_hosting_device_templates_from_config(self):
hdt_dict = config.get_specific_config()
attr_info = ciscohostingdevicemanager.RESOURCE_ATTRIBUTE_MAP[
ciscohostingdevicemanager.DEVICE_TEMPLATES]
adm_context = bc.context.get_admin_context()
for hdt_uuid, kv_dict in hdt_dict.items():
hdt_uuid = config.uuidify(hdt_uuid)
try:
self.get_hosting_device_template(adm_context, hdt_uuid)
is_create = False
except ciscohostingdevicemanager.HostingDeviceTemplateNotFound:
is_create = True
kv_dict[] = hdt_uuid
kv_dict[] = self.l3_tenant_id()
config.verify_resource_dict(kv_dict, True, attr_info)
hdt = {ciscohostingdevicemanager.DEVICE_TEMPLATE: kv_dict}
try:
if is_create:
self.create_hosting_device_template(adm_context, hdt)
else:
self.update_hosting_device_template(adm_context,
kv_dict[], hdt)
except n_exc.NeutronException:
with excutils.save_and_reraise_exception():
LOG.error(
,
hdt_uuid)
|
To be called late during plugin initialization so that any hosting
device templates defined in the config file is properly inserted in
the DB.
|
375,208
|
def send_response(self, response):
response_bytes = response.encode(config.CODEC)
log.debug("About to send reponse: %r", response_bytes)
self.socket.send(response_bytes)
|
Send a unicode object as reply to the most recently-issued command
|
375,209
|
def transform_cb(self, setting, value):
self.make_callback()
whence = 0
self.redraw(whence=whence)
|
Handle callback related to changes in transformations.
|
375,210
|
def solution(self, x0, y0):
def soln(x):
if numpy.size(x) > 1:
x = [soln.x] + list(x)
ans = self(soln.y, interval=x)
soln.x = x[-1]
soln.y = ans[-1]
return ans
else:
soln.y = self(soln.y, interval=(soln.x, x))
soln.x = x
return soln.y
soln.x = x0
soln.y = y0
return soln
|
Create a solution function ``y(x)`` such that ``y(x0) = y0``.
A list of solution values ``[y(x0), y(x1) ...]`` is returned if the
function is called with a list ``[x0, x1 ...]`` of ``x`` values.
|
375,211
|
def cmd_create(self, name, auto=False):
LOGGER.setLevel()
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config[],
migrate_table=self.app.config[])
if auto:
auto = self.models
router.create(name, auto=auto)
|
Create a new migration.
|
375,212
|
def astra_parallel_3d_geom_to_vec(geometry):
angles = geometry.angles
mid_pt = geometry.det_params.mid_pt
vectors = np.zeros((angles.shape[-1], 12))
vectors[:, 0:3] = -geometry.det_to_src(angles, mid_pt)
vectors[:, 3:6] = geometry.det_point_position(angles, mid_pt)
det_axes = moveaxis(geometry.det_axes(angles), -2, 0)
px_sizes = geometry.det_partition.cell_sides
vectors[:, 9:12] = det_axes[0] * px_sizes[0]
vectors[:, 6:9] = det_axes[1] * px_sizes[1]
new_ind = []
for i in range(4):
new_ind += [2 + 3 * i, 1 + 3 * i, 0 + 3 * i]
vectors = vectors[:, new_ind]
return vectors
|
Create vectors for ASTRA projection geometries from ODL geometry.
The 3D vectors are used to create an ASTRA projection geometry for
parallel beam geometries, see ``'parallel3d_vec'`` in the
`ASTRA projection geometry documentation`_.
Each row of the returned vectors corresponds to a single projection
and consists of ::
(rayX, rayY, rayZ, dX, dY, dZ, uX, uY, uZ, vX, vY, vZ)
with
- ``ray``: the ray direction
- ``d`` : the center of the detector
- ``u`` : the vector from detector pixel ``(0,0)`` to ``(0,1)``
- ``v`` : the vector from detector pixel ``(0,0)`` to ``(1,0)``
Parameters
----------
geometry : `Geometry`
ODL projection geometry from which to create the ASTRA geometry.
Returns
-------
vectors : `numpy.ndarray`
Array of shape ``(num_angles, 12)`` containing the vectors.
References
----------
.. _ASTRA projection geometry documentation:
http://www.astra-toolbox.com/docs/geom3d.html#projection-geometries
|
375,213
|
def events_system(self):
response = self._get(url.events_system)
self._check_response(response, 200)
return self._create_response(response).get("events")
|
Get all system events. Uses GET to /events/system interface.
:Returns: (list) Events
|
375,214
|
def translate_changes(initial_change):
agenda = [initial_change]
result = []
while agenda:
change = agenda.pop(0)
if isinstance(change, rope_change.ChangeSet):
agenda.extend(change.changes)
elif isinstance(change, rope_change.ChangeContents):
result.append({: ,
: change.resource.real_path,
: change.new_contents,
: change.get_description()})
elif isinstance(change, rope_change.CreateFile):
result.append({: ,
: ,
: change.resource.real_path})
elif isinstance(change, rope_change.CreateFolder):
result.append({: ,
: ,
: change.resource.real_path})
elif isinstance(change, rope_change.MoveResource):
result.append({: ,
: (
if change.new_resource.is_folder()
else ),
: change.resource.real_path,
: change.new_resource.real_path})
elif isinstance(change, rope_change.RemoveResource):
if change.resource.is_folder():
result.append({: ,
: ,
: change.resource.real_path})
else:
result.append({: ,
: ,
: change.resource.real_path})
return result
|
Translate rope.base.change.Change instances to dictionaries.
See Refactor.get_changes for an explanation of the resulting
dictionary.
|
375,215
|
def object_as_dict(obj):
return {c.key: getattr(obj, c.key)
for c in inspect(obj).mapper.column_attrs}
|
Turn an SQLAlchemy model into a dict of field names and values.
Based on https://stackoverflow.com/a/37350445/1579058
|
375,216
|
def p_arr_decl_initialized(p):
def check_bound(boundlist, remaining):
lineno = p.lineno(8)
if not boundlist:
if not isinstance(remaining, list):
return True
for row in remaining:
if not check_bound(boundlist[1:], row):
return False
return True
if p[8] is None:
p[0] = None
return
if check_bound(p[4].children, p[8]):
id_, lineno = p[2][0]
SYMBOL_TABLE.declare_array(id_, lineno, p[6], p[4], default_value=p[8])
p[0] = None
|
var_arr_decl : DIM idlist LP bound_list RP typedef RIGHTARROW const_vector
| DIM idlist LP bound_list RP typedef EQ const_vector
|
375,217
|
def remove(self, nodes):
nodes = nodes if isinstance(nodes, list) else [nodes]
for node in nodes:
k = self.id(node)
self.edges = list(filter(lambda e: e[0] != k and e[1] != k, self.edges))
del self.nodes[k]
|
Remove a node and its edges.
|
375,218
|
def indexXY(self, index):
rect = self.visualRect(index)
return rect.x(), rect.y()
|
Returns the top left coordinates of the item for the given index
:param index: index for the item
:type index: :qtdoc:`QModelIndex`
:returns: (int, int) -- (x, y) view coordinates of item
|
375,219
|
def is_contiguous(self):
if self._keyframe is None:
raise RuntimeError()
if self._keyframe.is_contiguous:
return self._offsetscounts[0][0], self._keyframe.is_contiguous[1]
return None
|
Return offset and size of contiguous data, else None.
|
375,220
|
def __serve_forever(self):
thread_list = {}
while self.screen.is_end == False:
logger.debug("Iter through the following server list: {}".format(self.get_servers_list()))
for v in self.get_servers_list():
key = v["key"]
thread = thread_list.get(key, None)
if thread is None or thread.is_alive() == False:
thread = threading.Thread(target=self.__update_stats, args=[v])
thread_list[key] = thread
thread.start()
if self.screen.active_server is None:
self.screen.update(self.get_servers_list())
else:
self.__display_server(self.get_servers_list()[self.screen.active_server])
for thread in thread_list.values():
thread.join()
|
Main client loop.
|
375,221
|
def _proxy(self):
if self._context is None:
self._context = AvailablePhoneNumberCountryContext(
self._version,
account_sid=self._solution[],
country_code=self._solution[],
)
return self._context
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AvailablePhoneNumberCountryContext for this AvailablePhoneNumberCountryInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext
|
375,222
|
def submit_all(self):
for args in self.task_args:
self.submit(*args)
return self.get_results()
|
:returns: an IterResult object
|
375,223
|
def fast_kde(x, y, gridsize=(200,200), extents=None, nocorrelation=False, weights=None):
x, y = np.asarray(x), np.asarray(y)
x, y = np.squeeze(x), np.squeeze(y)
if x.size != y.size:
raise ValueError()
nx, ny = gridsize
n = x.size
if weights is None:
weights = np.ones(n)
else:
weights = np.squeeze(np.asarray(weights))
if weights.size != x.size:
raise ValueError(
)
if extents is None:
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
else:
xmin, xmax, ymin, ymax = list(map(float, extents))
dx = (xmax - xmin) / (nx - 1)
dy = (ymax - ymin) / (ny - 1)
xyi = np.vstack((x,y)).T
xyi -= [xmin, ymin]
xyi /= [dx, dy]
xyi = np.floor(xyi, xyi).T
grid = sp.sparse.coo_matrix((weights, xyi), shape=(nx, ny)).toarray()
cov = np.cov(xyi)
if nocorrelation:
cov[1,0] = 0
cov[0,1] = 0
scotts_factor = np.power(n, -1.0 / 6)
std_devs = np.diag(np.sqrt(cov))
kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)
inv_cov = np.linalg.inv(cov * scotts_factor**2)
xx = np.arange(kern_nx, dtype=np.float) - kern_nx / 2.0
yy = np.arange(kern_ny, dtype=np.float) - kern_ny / 2.0
xx, yy = np.meshgrid(xx, yy)
kernel = np.vstack((xx.flatten(), yy.flatten()))
kernel = np.dot(inv_cov, kernel) * kernel
kernel = np.sum(kernel, axis=0) / 2.0
kernel = np.exp(-kernel)
kernel = kernel.reshape((kern_ny, kern_nx))
grid = sp.signal.convolve2d(grid, kernel, mode=, boundary=).T
return grid
|
Performs a gaussian kernel density estimate over a regular grid using a
convolution of the gaussian kernel with a 2D histogram of the data.
This function is typically several orders of magnitude faster than
scipy.stats.kde.gaussian_kde for large (>1e7) numbers of points and
produces an essentially identical result.
Input:
x: The x-coords of the input data points
y: The y-coords of the input data points
gridsize: (default: 200x200) A (nx,ny) tuple of the size of the output
grid
extents: (default: extent of input data) A (xmin, xmax, ymin, ymax)
tuple of the extents of output grid
nocorrelation: (default: False) If True, the correlation between the
x and y coords will be ignored when preforming the KDE.
weights: (default: None) An array of the same shape as x & y that
weighs each sample (x_i, y_i) by each value in weights (w_i).
Defaults to an array of ones the same size as x & y.
Output:
A gridded 2D kernel density estimate of the input points.
|
375,224
|
def zone(self) -> Optional[str]:
if self._device_category == DC_BASEUNIT:
return None
return .format(self._group_number, self._unit_number)
|
Zone the device is assigned to.
|
375,225
|
def compiled_quil(self):
prog = self._raw.get("program", {}).get("compiled-quil", None)
if prog is not None:
return parse_program(prog)
else:
if self._raw[] == :
return self.result()
|
If the Quil program associated with the Job was compiled (e.g., to translate it to the
QPU's natural gateset) return this compiled program.
:rtype: Optional[Program]
|
375,226
|
def line(self, lines):
shapeType = POLYLINE
self._shapeparts(parts=lines, shapeType=shapeType)
|
Creates a POLYLINE shape.
Lines is a collection of lines, each made up of a list of xy values.
|
375,227
|
def readlink(self, path):
path = self._adjust_cwd(path)
self._log(DEBUG, % path)
t, msg = self._request(CMD_READLINK, path)
if t != CMD_NAME:
raise SFTPError()
count = msg.get_int()
if count == 0:
return None
if count != 1:
raise SFTPError( % count)
return _to_unicode(msg.get_string())
|
Return the target of a symbolic link (shortcut). You can use
L{symlink} to create these. The result may be either an absolute or
relative pathname.
@param path: path of the symbolic link file
@type path: str
@return: target path
@rtype: str
|
375,228
|
def write_xml(xml, output_file=None):
gen_filename = "requirements-{:%Y%m%d%H%M%S}.xml".format(datetime.datetime.now())
utils.write_xml(xml, output_loc=output_file, filename=gen_filename)
|
Outputs the XML content into a file.
|
375,229
|
def missing_parameter_values(self, parameter_values):
if not self._is_valid_parameter_values(parameter_values):
raise InvalidParameterValues("Parameter values are required to process a policy template")
return list(set(self.parameters.keys()) - set(parameter_values.keys()))
|
Checks if the given input contains values for all parameters used by this template
:param dict parameter_values: Dictionary of values for each parameter used in the template
:return list: List of names of parameters that are missing.
:raises InvalidParameterValues: When parameter values is not a valid dictionary
|
375,230
|
def getDataset(self, itemId):
if self._url.lower().find() > -1:
url = self._url
else:
url = self._url + "/datasets"
return OpenDataItem(url=url,
itemId=itemId,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
gets a dataset class
|
375,231
|
def get_profile(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "get_profile" not in self._inner_api_calls:
self._inner_api_calls[
"get_profile"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_profile,
default_retry=self._method_configs["GetProfile"].retry,
default_timeout=self._method_configs["GetProfile"].timeout,
client_info=self._client_info,
)
request = profile_service_pb2.GetProfileRequest(name=name)
return self._inner_api_calls["get_profile"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
Gets the specified profile.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ProfileServiceClient()
>>>
>>> name = client.profile_path('[PROJECT]', '[TENANT]', '[PROFILE]')
>>>
>>> response = client.get_profile(name)
Args:
name (str): Required.
Resource name of the profile to get.
The format is
"projects/{project\_id}/tenants/{tenant\_id}/profiles/{profile\_id}",
for example, "projects/api-test-project/tenants/foo/profiles/bar".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Profile` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
375,232
|
def get_collection_instance(klass, api_client = None, request_api=True, **kwargs):
_type = klass
if api_client is None and request_api:
api_client = api.APIClient()
if isinstance(klass, dict):
_type = klass[]
obj = CollectionResource(_type, api_client, **kwargs)
return obj
def sort(self, direction = "ASC"):
direction = directtion.upper()
if direction in [,]:
self.sort = direction
else:
raise SaleskingException("SORT_INVALIDDIRECTION","Invalid sorting direction - please choose either ASC or DESC");
def sort_by(self, property):
seek =u"sort_by"
if seek in self.schema[][][]:
if seek in self.schema[][][][][]:
self.sort_by = property
return self
else:
raise SaleskingException("SORTBY_INVALIDPROPERTY","Invalid property for sorting");
else:
raise SaleskingException("SORTBY_CANNOTSORT","object type doesnt support sorting");
|
instatiates the collection lookup of json type klass
:param klass: json file name
:param api_client: transportation api
:param request_api: if True uses the default APIClient
|
375,233
|
def stickers_translate_get(self, api_key, s, **kwargs):
kwargs[] = True
if kwargs.get():
return self.stickers_translate_get_with_http_info(api_key, s, **kwargs)
else:
(data) = self.stickers_translate_get_with_http_info(api_key, s, **kwargs)
return data
|
Sticker Translate Endpoint
The translate API draws on search, but uses the Giphy `special sauce` to handle translating from one vocabulary to another. In this case, words and phrases to GIFs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.stickers_translate_get(api_key, s, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: Giphy API Key. (required)
:param str s: Search term. (required)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
|
375,234
|
def _competition(self, x):
index = 0
minimum = euclidean_distance_square(self._weights[0], x)
for i in range(1, self._size, 1):
candidate = euclidean_distance_square(self._weights[i], x)
if candidate < minimum:
index = i
minimum = candidate
return index
|
!
@brief Calculates neuron winner (distance, neuron index).
@param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point.
@return (uint) Returns index of neuron that is winner.
|
375,235
|
def _kill_process(self, pid, cgroups=None, sig=signal.SIGKILL):
if self._user is not None:
if not cgroups:
cgroups = find_cgroups_of_process(pid)
pids = cgroups.get_all_tasks(FREEZER)
try:
if pid == next(pids):
pid = next(pids)
except StopIteration:
pass
finally:
pids.close()
self._kill_process0(pid, sig)
|
Try to send signal to given process, either directly of with sudo.
Because we cannot send signals to the sudo process itself,
this method checks whether the target is the sudo process
and redirects the signal to sudo's child in this case.
|
375,236
|
def _read_register(self, reg):
self.buf[0] = reg
with self.i2c_device as i2c:
i2c.write(self.buf, end=1, stop=False)
i2c.readinto(self.buf, end=2)
return self.buf[0] << 8 | self.buf[1]
|
Read 16 bit register value.
|
375,237
|
def remove_cache(self, namespace, key=None):
if key is None:
self.cursor.execute(
, (namespace,))
else:
self.cursor.execute(
,
(namespace, key))
|
Remove all cached values for the specified namespace,
optionally specifying a key
|
375,238
|
def coroutine(func):
def decorator(*args, **kwargs):
generator = func(*args, **kwargs)
next(generator)
return lambda *args: generator.send(args)
return decorator
|
A decorator to wrap a generator function into a callable interface.
>>> @coroutine
... def sum(count):
... sum = 0
... for _ in range(0, count):
... # note that generator arguments are passed as a tuple, hence `num, = ...` instead of `num = ...`
... num, = yield sum
... sum += num
... yield sum
...
>>> add = sum(2)
>>> add(2)
2
>>> add(3)
5
>>> add(4)
Traceback (most recent call last):
...
StopIteration
As you can see, this lets you keep state between calls easily, as expected from a generator, while calling the
function looks like a function. The same without `@coroutine` would look like this:
>>> def sum(count):
... sum = 0
... for _ in range(0, count):
... num = yield sum
... sum += num
... yield sum
...
>>> add = sum(2)
>>> next(add) # initial next call is necessary
0
>>> add.send(2) # to call the function, next or send must be used
2
>>> add.send(3)
5
>>> add.send(4)
Traceback (most recent call last):
...
StopIteration
Here is an example that shows how to translate traditional functions to use this decorator:
>>> def foo(a, b):
... # do some foo
... return a + b
...
>>> def bar(c):
... # do some bar
... return 2*c
...
>>> foo(1, 2)
3
>>> bar(3)
6
>>> @coroutine
... def func_maker():
... a, b = yield
... # do some foo
... c, = yield foo(a, b)
... # do some bar
... yield bar(c)
...
>>> func_once = func_maker()
>>> func_once(1, 2)
3
>>> func_once(3)
6
The two differences are that a) using traditional functions, func1 and func2 don't share any context and b) using
the decorator, both calls use the same function name, and calling the function is limited to wice (in this case).
|
375,239
|
def endswith(self, search_str):
for entry in reversed(list(open(self._jrnl_file, ))[-5:]):
if search_str in entry:
return True
return False
|
Check whether the provided string exists in Journal file.
Only checks the last 5 lines of the journal file. This method is
usually used when tracking a journal from an active Revit session.
Args:
search_str (str): string to search for
Returns:
bool: if True the search string is found
|
375,240
|
def purge(vm_, dirs=False, removables=None, **kwargs):
s to also delete the directories containing the virtual machine disk
images - USE WITH EXTREME CAUTION!
Pass removables=False to avoid deleting cdrom and floppy images. To avoid
disruption, the default but dangerous value is True. This will be changed
to the safer False default value in Sodium.
:param vm_: domain name
:param dirs: pass True to remove containing directories
:param removables: pass True to remove removable devices
.. versionadded:: 2019.2.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt virt.purge <domain> removables=False
Sodiumremovables argument default value is True, but will be changed to False by default in {version}. Please set to True to maintain the current behavior in the future.unknownshutdowntypecdromfloppyzfsdataset is busyfile/dev/zvol/Destroying VM ZFS volume %szfs.destroyfilefileVIR_DOMAIN_UNDEFINE_NVRAM', False):
try:
dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
except libvirt.libvirtError:
dom.undefine()
else:
dom.undefine()
conn.close()
return True
|
Recursively destroy and delete a persistent virtual machine, pass True for
dir's to also delete the directories containing the virtual machine disk
images - USE WITH EXTREME CAUTION!
Pass removables=False to avoid deleting cdrom and floppy images. To avoid
disruption, the default but dangerous value is True. This will be changed
to the safer False default value in Sodium.
:param vm_: domain name
:param dirs: pass True to remove containing directories
:param removables: pass True to remove removable devices
.. versionadded:: 2019.2.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.purge <domain> removables=False
|
375,241
|
def _infer_all_output_dims(self, inputs):
batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)
out_channels = (self.output_channels,)
if self._n == 1:
out_shape = (1,) + self.output_shape
else:
out_shape = self.output_shape
if self._data_format.startswith("NC"):
out_shape_tuple = out_channels + out_shape
elif self._data_format.startswith("N") and self._data_format.endswith("C"):
out_shape_tuple = out_shape + out_channels
output_shape = tf.concat([batch_size, out_shape_tuple], 0)
return output_shape
|
Calculate the output shape for `inputs` after a deconvolution.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
Returns:
output_shape: A tensor of shape (`batch_size`, `conv_output_shape`).
|
375,242
|
def more_than_one_error(self, field):
msg = .format(field)
self.logger.log(msg)
self.error = True
|
Logs a more than one error.
field is the field/property that has more than one defined.
|
375,243
|
def from_dynacRepr(cls, pynacRepr):
pynacList = pynacRepr[1][0]
L = float(pynacList[3])
TTF = float(pynacList[4])
TTFprime = float(pynacList[5])
TTFprimeprime = float(pynacList[13])
EField = float(pynacList[10])
phase = float(pynacList[11])
F = float(pynacList[14])
atten = float(pynacList[15])
gap = cls(L, TTF, TTFprime, TTFprimeprime, EField, phase, F, atten)
gap.gapID = Param(val = int(pynacList[0]), unit = None)
gap.energy = Param(val = float(pynacList[1]), unit = )
gap.beta = Param(val = float(pynacList[2]), unit = None)
gap.S = Param(val = float(pynacList[6]), unit = None)
gap.SP = Param(val = float(pynacList[7]), unit = None)
gap.quadLength = Param(val = float(pynacList[8]), unit = )
gap.quadStrength = Param(val = float(pynacList[9]), unit = )
gap.accumLen = Param(val = float(pynacList[12]), unit = )
return gap
|
Construct a ``AccGap`` instance from the Pynac lattice element
|
375,244
|
def distrib_release():
with settings(hide(, )):
kernel = (run() or ).strip().lower()
if kernel == LINUX:
return run()
elif kernel == SUNOS:
return run()
|
Get the release number of the distribution.
Example::
from burlap.system import distrib_id, distrib_release
if distrib_id() == 'CentOS' and distrib_release() == '6.1':
print(u"CentOS 6.2 has been released. Please upgrade.")
|
375,245
|
def t_IDENTIFER(self, t):
r
t.type = SpecParser.reserved.get(t.value, )
return t
|
r'\#?[a-zA-Z_][a-zA-Z_0-9]*
|
375,246
|
def write_document(self, document: BioCDocument):
tree = self.encoder.encode(document)
self.__writer.send(tree)
|
Encode and write a single document.
|
375,247
|
def get_hash(fName, readSize, dire=pDir()):
if not fileExists(fName, dire):
return -1
readSize = readSize * 1024
fName = os.path.join(dire, fName)
with open(fName, ) as f:
size = os.path.getsize(fName)
if size < readSize * 2:
return -1
data = f.read(readSize)
f.seek(-readSize, os.SEEK_END)
data += f.read(readSize)
return md5(data).hexdigest()
|
creates the required hash
|
375,248
|
def redact_secrets(line):
def redact(match):
if match.group(2) in SECRET_WHITELIST:
return match.group(0)
return match.group(1) +
return SECRET_PATTERN.sub(redact, line)
|
Returns a sanitized string for any ``line`` that looks like it contains a
secret (i.e. matches SECRET_PATTERN).
|
375,249
|
def pager(__text: str, *, pager: Optional[str] = ):
if pager:
run([pager, ], input=__text.encode())
else:
print(__text)
|
Pass output through pager.
See :manpage:`less(1)`, if you wish to configure the default pager. For
example, you may wish to check ``FRSX`` options.
Args:
__text: Text to page
pager: Pager to use
|
375,250
|
def has_active_subscription(self, plan=None):
if plan is None:
valid_subscriptions = self._get_valid_subscriptions()
if len(valid_subscriptions) == 0:
return False
elif len(valid_subscriptions) == 1:
return True
else:
raise TypeError(
"plan cannot be None if more than one valid subscription exists for this customer."
)
else:
if isinstance(plan, StripeModel):
plan = plan.id
return any(
[
subscription.is_valid()
for subscription in self.subscriptions.filter(plan__id=plan)
]
)
|
Checks to see if this customer has an active subscription to the given plan.
:param plan: The plan for which to check for an active subscription. If plan is None and
there exists only one active subscription, this method will check if that subscription
is valid. Calling this method with no plan and multiple valid subscriptions for this customer will
throw an exception.
:type plan: Plan or string (plan ID)
:returns: True if there exists an active subscription, False otherwise.
:throws: TypeError if ``plan`` is None and more than one active subscription exists for this customer.
|
375,251
|
def run(self):
t_last_click = -1
while True:
d = self.device.read(13)
if d is not None and self._enabled:
if d[0] == 1:
self.y = convert(d[1], d[2])
self.x = convert(d[3], d[4])
self.z = convert(d[5], d[6]) * -1.0
self.roll = convert(d[7], d[8])
self.pitch = convert(d[9], d[10])
self.yaw = convert(d[11], d[12])
self._control = [
self.x,
self.y,
self.z,
self.roll,
self.pitch,
self.yaw,
]
elif d[0] == 3:
if d[1] == 1:
t_click = time.time()
elapsed_time = t_click - t_last_click
t_last_click = t_click
self.single_click_and_hold = True
if d[1] == 0:
self.single_click_and_hold = False
if d[1] == 2:
self._reset_state = 1
self._enabled = False
self._reset_internal_state()
|
Listener method that keeps pulling new messages.
|
375,252
|
def count_of_certain_kind(kind):
recs = TabPost.select().where(TabPost.kind == kind)
return recs.count()
|
Get the count of certain kind.
|
375,253
|
def browse(fileNames=None,
inspectorFullName=None,
select=None,
profile=DEFAULT_PROFILE,
resetProfile=False,
resetAllProfiles=False,
resetRegistry=False):
from argos.qt import QtWidgets, QtCore
from argos.application import ArgosApplication
from argos.repo.testdata import createArgosTestData
try:
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
except Exception as ex:
logger.debug("AA_UseHighDpiPixmaps not available in PyQt4: {}".format(ex))
argosApp = ArgosApplication()
if resetProfile:
argosApp.deleteProfile(profile)
if resetAllProfiles:
argosApp.deleteAllProfiles()
if resetRegistry:
argosApp.deleteRegistries()
argosApp.loadOrInitRegistries()
argosApp.loadFiles(fileNames)
if DEBUGGING:
argosApp.repo.insertItem(createArgosTestData())
argosApp.loadProfile(profile=profile, inspectorFullName=inspectorFullName)
if select:
for mainWindow in argosApp.mainWindows:
mainWindow.trySelectRtiByPath(select)
return argosApp.execute()
|
Opens the main window(s) for the persistent settings of the given profile,
and executes the application.
:param fileNames: List of file names that will be added to the repository
:param inspectorFullName: The full path name of the inspector that will be loaded
:param select: a path of the repository item that will selected at start up.
:param profile: the name of the profile that will be loaded
:param resetProfile: if True, the profile will be reset to it standard settings.
:param resetAllProfiles: if True, all profiles will be reset to it standard settings.
:param resetRegistry: if True, the registry will be reset to it standard settings.
:return:
|
375,254
|
def iter_symbols(code):
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, six.string_types):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
|
Yield names and strings used by `code` and its nested code objects
|
375,255
|
def set_weight(self, weight):
if weight is not None and np.all(weight == 1):
weight = None
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name=)
self.set_field(, weight)
return self
|
Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point.
Returns
-------
self : Dataset
Dataset with set weight.
|
375,256
|
def stop(self, devices):
for device in devices:
self.logger.info(, device.id)
try:
device.power_off()
except packet.baseapi.Error:
raise PacketManagerException(.format(device.id))
|
Power-Off one or more running devices.
|
375,257
|
def fw_retry_failures(self):
if not self.fw_init:
return
try:
self.fw_retry_failures_create()
self.fw_retry_failures_delete()
except Exception as exc:
LOG.error("Exception in retry failures %s", str(exc))
|
Top level retry routine called.
|
375,258
|
def from_dict(cls, data):
fulfillment = data[]
if not isinstance(fulfillment, (Fulfillment, type(None))):
try:
fulfillment = Fulfillment.from_uri(data[])
except ASN1DecodeError:
raise InvalidSignature("Fulfillment URI couldnfulfillmentfulfillsowners_before'], fulfills)
|
Transforms a Python dictionary to an Input object.
Note:
Optionally, this method can also serialize a Cryptoconditions-
Fulfillment that is not yet signed.
Args:
data (dict): The Input to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Input`
Raises:
InvalidSignature: If an Input's URI couldn't be parsed.
|
375,259
|
def connect_to_images(region=None, public=True):
return _create_client(ep_name="image", region=region, public=public)
|
Creates a client for working with Images.
|
375,260
|
def alpha_view(qimage):
bytes = byte_view(qimage, byteorder = None)
if bytes.shape[2] != 4:
raise ValueError("For alpha_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
return bytes[...,_bgra[3]]
|
Returns alpha view of a given 32-bit color QImage_'s memory.
The result is a 2D numpy.uint8 array, equivalent to
byte_view(qimage)[...,3]. The image must have 32 bit pixel size,
i.e. be RGB32, ARGB32, or ARGB32_Premultiplied. Note that it is
not enforced that the given qimage has a format that actually
*uses* the alpha channel -- for Format_RGB32, the alpha channel
usually contains 255 everywhere.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:rtype: numpy.ndarray_ with shape (height, width) and dtype uint8
|
375,261
|
def add_dicts(*args):
counters = [Counter(arg) for arg in args]
return dict(reduce(operator.add, counters))
|
Adds two or more dicts together. Common keys will have their values added.
For example::
>>> t1 = {'a':1, 'b':2}
>>> t2 = {'b':1, 'c':3}
>>> t3 = {'d':4}
>>> add_dicts(t1, t2, t3)
{'a': 1, 'c': 3, 'b': 3, 'd': 4}
|
375,262
|
def Deserialize(self, reader):
super(SpentCoinState, self).Deserialize(reader)
self.TransactionHash = reader.ReadUInt256()
self.TransactionHeight = reader.ReadUInt32()
count = reader.ReadVarInt()
items = [0] * count
for i in range(0, count):
index = reader.ReadUInt16()
height = reader.ReadUInt32()
items[i] = SpentCoinItem(index=index, height=height)
self.Items = items
|
Deserialize full object.
Args:
reader (neocore.IO.BinaryReader):
|
375,263
|
def clone(self):
result = copy.copy(self)
result.compound_masses = copy.deepcopy(self.compound_masses)
return result
|
Create a complete copy of self.
:returns: A MaterialPackage that is identical to self.
|
375,264
|
def parse_version(v):
parts = v.split(".")
parts = (parts + 3 * [])[:3]
return tuple(int(x) for x in parts)
|
Take a string version and conver it to a tuple (for easier comparison), e.g.:
"1.2.3" --> (1, 2, 3)
"1.2" --> (1, 2, 0)
"1" --> (1, 0, 0)
|
375,265
|
def create(
cls,
path,
template_engine=None,
output_filename=None,
output_ext=None,
view_name=None
):
if isinstance(path, dict):
return StatikViewComplexPath(
path,
template_engine,
output_filename=output_filename,
output_ext=output_ext,
view_name=view_name
)
elif isinstance(path, basestring):
return StatikViewSimplePath(
path,
output_filename=output_filename,
output_ext=output_ext,
view_name=view_name
)
else:
raise ValueError(
"Unrecognised structure for \"path\" configuration in view: %s" % view_name
)
|
Create the relevant subclass of StatikView based on the given path variable and
parameters.
|
375,266
|
def colon_subscripts(u):
if u.__class__ in (node.arrayref,node.cellarrayref):
for w in u.args:
if w.__class__ is node.expr and w.op == ":":
w._replace(op="::")
|
Array colon subscripts foo(1:10) and colon expressions 1:10 look
too similar to each other. Now is the time to find out who is who.
|
375,267
|
def remove(self, key, column_path, timestamp, consistency_level):
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_remove(key, column_path, timestamp, consistency_level)
return d
|
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
Parameters:
- key
- column_path
- timestamp
- consistency_level
|
375,268
|
def fig_to_geojson(fig=None, **kwargs):
if fig is None:
fig = plt.gcf()
renderer = LeafletRenderer(**kwargs)
exporter = Exporter(renderer)
exporter.run(fig)
return renderer.geojson()
|
Returns a figure's GeoJSON representation as a dictionary
All arguments passed to fig_to_html()
Returns
-------
GeoJSON dictionary
|
375,269
|
def add_type(self, type: type, serialize: Callable[[Any], str], unserialize: Callable[[str], Any]) -> None:
self.types.append(HierarkeyType(type=type, serialize=serialize, unserialize=unserialize))
|
Adds serialization support for a new type.
:param type: The type to add support for.
:param serialize: A callable that takes an object of type ``type`` and returns a string.
:param unserialize: A callable that takes a string and returns an object of type ``type``.
|
375,270
|
def setText(self, text: str):
undoObj = UndoSetText(self, text)
self.qteUndoStack.push(undoObj)
|
Undo safe wrapper for the native ``setText`` method.
|Args|
* ``text`` (**str**): text to insert at the specified position.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
|
375,271
|
def controldata(self):
result = {}
except subprocess.CalledProcessError:
logger.exception("Error when calling pg_controldata")
return result
|
return the contents of pg_controldata, or non-True value if pg_controldata call failed
|
375,272
|
def listidentifiers(**kwargs):
e_tree, e_listidentifiers = verb(**kwargs)
result = get_records(**kwargs)
for record in result.items:
pid = oaiid_fetcher(record[], record[][])
header(
e_listidentifiers,
identifier=pid.pid_value,
datestamp=record[],
sets=record[][].get(, {}).get(, []),
)
resumption_token(e_listidentifiers, result, **kwargs)
return e_tree
|
Create OAI-PMH response for verb ListIdentifiers.
|
375,273
|
def forwardMessage(self, chat_id, from_chat_id, message_id,
disable_notification=None):
p = _strip(locals())
return self._api_request(, _rectify(p))
|
See: https://core.telegram.org/bots/api#forwardmessage
|
375,274
|
def coerce(self, value):
if isinstance(value, LinearOrderedCell) and (self.domain == value.domain or \
list_diff(self.domain, value.domain) == []):
return value
elif value in self.domain:
return LinearOrderedCell(self.domain, value, value)
elif isinstance(value, (list, tuple)) and all(map(value in self.domain, value)):
if len(value) == 1:
return LinearOrderedCell(self.domain, value[0], value[0])
elif len(value) == 2:
return LinearOrderedCell(self.domain, *value)
else:
sorted_vals = sorted(value, key=lambda x: self.to_i(x))
return LinearOrderedCell(self.domain, sorted_vals[0], sorted_vals[-1])
else:
raise Exception("Cannot coerce %s into LinearOrderedCell" % (str(value)))
|
Takes one or two values in the domain and returns a LinearOrderedCell
with the same domain
|
375,275
|
def delete_subnet_group(name, region=None, key=None, keyid=None,
profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {: bool(conn)}
r = conn.delete_db_subnet_group(DBSubnetGroupName=name)
return {: bool(r), :
.format(name)}
except ClientError as e:
return {: __utils__[](e)}
|
Delete an RDS subnet group.
CLI example::
salt myminion boto_rds.delete_subnet_group my-subnet-group \
region=us-east-1
|
375,276
|
def gen_lazy_function(self):
if self._value is None:
if self._random is not None:
self.value = self._random(**self._parents.value)
else:
raise ValueError(
+
self.__name__ +
"value'] = self
arguments = DictContainer(arguments)
self._logp = LazyFunction(fun=self._logp_fun,
arguments=arguments,
ultimate_args=self.extended_parents | set(
[self]),
cache_depth=self._cache_depth)
self._logp.force_compute()
self._logp_partial_gradients = {}
for parameter, function in six.iteritems(self._logp_partial_gradient_functions):
lazy_logp_partial_gradient = LazyFunction(fun=function,
arguments=arguments,
ultimate_args=self.extended_parents | set(
[self]),
cache_depth=self._cache_depth)
self._logp_partial_gradients[parameter] = lazy_logp_partial_gradient
|
Will be called by Node at instantiation.
|
375,277
|
def clear(self):
for variable in self._project.variables.list(all=True):
variable.delete()
|
Clears all of the build variables.
|
375,278
|
def _get_default_values(self, default_values=None):
if not default_values:
default_values = self.DEFAULT_VALUES
if default_values:
api_version = str(self._connection._apiVersion)
values = default_values.get(api_version, {}).copy()
else:
values = {}
return values
|
Gets the default values set for a resource
|
375,279
|
def answerPreCheckoutQuery(self, pre_checkout_query_id, ok,
error_message=None):
p = _strip(locals())
return self._api_request(, _rectify(p))
|
See: https://core.telegram.org/bots/api#answerprecheckoutquery
|
375,280
|
def get_internal_urls(self):
internal_urls = self.get_subfields("856", "u", i1="4", i2="0")
internal_urls.extend(self.get_subfields("998", "a"))
internal_urls.extend(self.get_subfields("URL", "u"))
return map(lambda x: x.replace("&", "&"), internal_urls)
|
URL's, which may point to edeposit, aleph, kramerius and so on.
Fields ``856u40``, ``998a`` and ``URLu``.
Returns:
list: List of internal URLs.
|
375,281
|
def hardware_flexport_flexport_type_instance(self, **kwargs):
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id_key = ET.SubElement(flexport, "id")
id_key.text = kwargs.pop()
flexport_type = ET.SubElement(flexport, "flexport_type")
instance = ET.SubElement(flexport_type, "instance")
instance.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
375,282
|
def getResultFromProcess(res, tempname, process):
if not isinstance(res, (UndefinedValue, Exception)):
value = getRepresentation(tempname, process)
return value, res
else:
return res, str(res)
|
Get a value from process, return tuple of value, res if succesful
|
375,283
|
def parse_bdstoken(content):
bdstoken =
bds_re = re.compile(, re.IGNORECASE)
bds_match = bds_re.search(content)
if bds_match:
bdstoken = bds_match.group(1)
return bdstoken
|
从页面中解析出bdstoken等信息.
这些信息都位于页面底部的<script>, 只有在授权后的页面中才出现.
这里, 为了保证兼容性, 就不再使用cssselect模块解析了.
@return 返回bdstoken
|
375,284
|
def speed_rms(Temperature,element,isotope):
r
atom = Atom(element, isotope)
return sqrt(3*Temperature*k_B/atom.mass)
|
r"""This function calculates the average speed (in meters per second)
of an atom in a vapour assuming a Maxwell-Boltzmann velocity distribution.
This is simply
sqrt(8*k_B*T/m/pi)
where k_B is Boltzmann's constant, T is the temperature (in Kelvins) and
m is the mass of the atom (in kilograms).
>>> print speed_rms(25+273.15,"Rb",85)
295.945034349
>>> print speed_rms(25+273.15,"Cs",133)
236.550383496
|
375,285
|
def restore(self, hist_uid):
if self.check_post_role()[]:
pass
else:
return False
histinfo = MWikiHist.get_by_uid(hist_uid)
if histinfo:
pass
else:
return False
postinfo = MWiki.get_by_uid(histinfo.wiki_id)
cur_cnt = tornado.escape.xhtml_unescape(postinfo.cnt_md)
old_cnt = tornado.escape.xhtml_unescape(histinfo.cnt_md)
MWiki.update_cnt(
histinfo.wiki_id,
{: old_cnt, : self.userinfo.user_name}
)
MWikiHist.update_cnt(
histinfo.uid,
{: cur_cnt, : postinfo.user_name}
)
if postinfo.kind == :
self.redirect(.format(postinfo.title))
elif postinfo.kind == :
self.redirect(.format(postinfo.uid))
|
Restore by ID
|
375,286
|
def main(argv, reactor=None):
if reactor is None:
from twisted.internet import gtk2reactor
gtk2reactor.install()
from twisted.internet import reactor
try:
AWSStatusIndicator(reactor)
gobject.set_application_name("aws-status")
reactor.run()
except ValueError:
pass
|
Run the client GUI.
Typical use:
>>> sys.exit(main(sys.argv))
@param argv: The arguments to run it with, e.g. sys.argv.
@param reactor: The reactor to use. Must be compatible with gtk as this
module uses gtk API"s.
@return exitcode: The exit code it returned, as per sys.exit.
|
375,287
|
def close(self):
if self._sock:
with utils.ignore_except():
self._sock.close()
self._sock = None
self._recvbuf = []
self._recvbuf_partial =
|
Close the connection.
:param purge: If True (the default), the receive buffer will
be purged.
|
375,288
|
def upload_image(self,
image_file,
referer_url=None,
title=None,
desc=None,
created_at=None,
collection_id=None):
url = self.upload_url +
data = {}
if referer_url is not None:
data[] = referer_url
if title is not None:
data[] = title
if desc is not None:
data[] = desc
if created_at is not None:
data[] = str(created_at)
if collection_id is not None:
data[] = collection_id
files = {
: image_file
}
response = self._request_url(
url, , data=data, files=files, with_access_token=True)
headers, result = self._parse_and_check(response)
return Image.from_dict(result)
|
Upload an image
:param image_file: File-like object of an image file
:param referer_url: Referer site URL
:param title: Site title
:param desc: Comment
:param created_at: Image's created time in unix time
:param collection_id: Collection ID
|
375,289
|
def create_or_replace_primary_key(self,
table: str,
fieldnames: Sequence[str]) -> int:
sql = .format(self.get_current_schema_expr())
row = self.fetchone(sql, table)
has_pk_already = True if row[0] >= 1 else False
drop_pk_if_exists = " DROP PRIMARY KEY," if has_pk_already else ""
fieldlist = ",".join([self.delimit(f) for f in fieldnames])
sql = ("ALTER TABLE " + self.delimit(table) +
drop_pk_if_exists +
" ADD PRIMARY KEY(" + fieldlist + ")")
return self.db_exec(sql)
|
Make a primary key, or replace it if it exists.
|
375,290
|
def ok(self):
try:
v = int(self._value)
if v < 0:
return False
else:
return True
except:
return False
|
Returns True if OK to use, else False
|
375,291
|
def Start(self):
try:
self.stats = {}
self.BeginProcessing()
processed_count = 0
if data_store.RelationalDBEnabled():
for client_info_batch in _IterateAllClients(
recency_window=self.recency_window):
for client_info in client_info_batch:
self.ProcessClientFullInfo(client_info)
processed_count += len(client_info_batch)
self.Log("Processed %d clients.", processed_count)
self.HeartBeat()
else:
root_children = aff4.FACTORY.Open(
aff4.ROOT_URN, token=self.token).OpenChildren(mode="r")
for batch in collection.Batch(root_children, CLIENT_READ_BATCH_SIZE):
for child in batch:
if not isinstance(child, aff4_grr.VFSGRRClient):
continue
last_ping = child.Get(child.Schema.PING)
self.ProcessLegacyClient(last_ping, child)
processed_count += 1
self.HeartBeat()
self.FinishProcessing()
for fd in itervalues(self.stats):
fd.Close()
logging.info("%s: processed %d clients.", self.__class__.__name__,
processed_count)
except Exception as e:
logging.exception("Error while calculating stats: %s", e)
raise
|
Retrieve all the clients for the AbstractClientStatsCollectors.
|
375,292
|
def adapt_files(solver):
print("adapting {0}solvers', solver)
for arch in to_extract[solver]:
arch = os.path.join(root, arch)
extract_archive(arch, solver, put_inside=True)
for fnames in to_move[solver]:
old = os.path.join(root, fnames[0])
new = os.path.join(root, fnames[1])
os.rename(old, new)
for f in to_remove[solver]:
f = os.path.join(root, f)
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
|
Rename and remove files whenever necessary.
|
375,293
|
def get_match(sport, team1, team2):
sport = sport.lower()
team1_pattern = re.compile(team1, re.I)
team2_pattern = re.compile(team2, re.I)
matches = get_sport(sport)
for match in matches:
if re.search(team1_pattern, match.home_team) or re.search(team1_pattern, match.away_team) \
and re.search(team2_pattern, match.away_team) or re.search(team2_pattern, match.home_team):
return match
raise errors.MatchError(sport, [team1, team2])
|
Get live scores for a single match
:param sport: the sport being played
:type sport: string
:param team1: first team participating in the match
:ttype team1: string
:param team2: second team participating in the match
:type team2: string
:return: A specific match
:rtype: Match
|
375,294
|
def assign(self, pm):
if isinstance(pm, QPixmap):
self._pm = pm
else:
self._xpmstr = pm
self._pm = None
self._icon = None
|
Reassign pixmap or xpm string array to wrapper
|
375,295
|
def ssh_version():
ret = subprocess.Popen(
[, ],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
try:
version_parts = ret[1].split(b)[0].split(b)[1]
parts = []
for part in version_parts:
try:
parts.append(int(part))
except ValueError:
return tuple(parts)
return tuple(parts)
except IndexError:
return (2, 0)
|
Returns the version of the installed ssh command
|
375,296
|
def set_up(self, test_args=(), clear=True, debug=False):
self.test_args = test_args
self.debug, self.clear = debug, clear
|
Sets properties right before calling run.
``test_args`` The arguments to pass to the test runner.
``clear`` Boolean. Set to True if we should clear console before
running the tests.
``debug`` Boolean. Set to True if we want to print debugging
information.
|
375,297
|
def execute():
boto_server_error_retries = 3
for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()):
try:
table_num_consec_read_checks = \
CHECK_STATUS[][table_name][]
except KeyError:
table_num_consec_read_checks = 0
try:
table_num_consec_write_checks = \
CHECK_STATUS[][table_name][]
except KeyError:
table_num_consec_write_checks = 0
try:
table_num_consec_read_checks, table_num_consec_write_checks = \
table.ensure_provisioning(
table_name,
table_key,
table_num_consec_read_checks,
table_num_consec_write_checks)
CHECK_STATUS[][table_name] = {
: table_num_consec_read_checks,
: table_num_consec_write_checks
}
gsi_names = set()
for gst_instance in dynamodb.table_gsis(table_name):
gsi_name = gst_instance[u]
try:
gsi_keys = get_table_option(table_key, ).keys()
except AttributeError:
continue
for gsi_key in gsi_keys:
try:
if re.match(gsi_key, gsi_name):
logger.debug(
.format(
table_name, gsi_name, gsi_key))
gsi_names.add((gsi_name, gsi_key))
except re.error:
logger.error(.format(
gsi_key))
sys.exit(1)
for gsi_name, gsi_key in sorted(gsi_names):
unique_gsi_name = .join([table_name, gsi_name])
try:
gsi_num_consec_read_checks = \
CHECK_STATUS[][unique_gsi_name][]
except KeyError:
gsi_num_consec_read_checks = 0
try:
gsi_num_consec_write_checks = \
CHECK_STATUS[][unique_gsi_name][]
except KeyError:
gsi_num_consec_write_checks = 0
gsi_num_consec_read_checks, gsi_num_consec_write_checks = \
gsi.ensure_provisioning(
table_name,
table_key,
gsi_name,
gsi_key,
gsi_num_consec_read_checks,
gsi_num_consec_write_checks)
CHECK_STATUS[][unique_gsi_name] = {
: gsi_num_consec_read_checks,
: gsi_num_consec_write_checks
}
except JSONResponseError as error:
exception = error.body[].split()[1]
if exception == :
logger.error(.format(
table_name,
table_name))
continue
except BotoServerError as error:
if boto_server_error_retries > 0:
logger.error(
.format(
error.status,
error.reason,
error.message))
logger.error(
)
boto_server_error_retries -= 1
continue
else:
raise
if not get_global_option():
logger.debug(.format(
get_global_option()))
time.sleep(get_global_option())
|
Ensure provisioning
|
375,298
|
def infile(self):
return os.path.join(OPTIONS[],
.format(self.name, OPTIONS[]))
|
Path of the input file
|
375,299
|
def p_rule(self, rule):
if len(rule[1]) == 4:
rule[0] = Guideline(rule[1][1], rule[1][2], rule[1][3])
else:
indentsize = rule[1][0]
number = rule[1][1]
text = rule[1][2]
parent = None
if self.prev_indent > indentsize:
self.current_rule = self.current_rule[0:indentsize+1]
if parent:
parent.add_child(reg)
else:
rule[0] = reg
if self.prev_indent >= indentsize:
self.current_rule.pop()
self.current_rule.append(reg)
self.prev_indent = indentsize
|
rule : GUIDELINE
| REGULATION
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.