Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
373,800
|
def cancel_completion(self):
self._consecutive_tab = 0
self._slice_start = 0
self._console_widget._clear_temporary_buffer()
self._index = (0, 0)
if(self._sliding_interval):
self._sliding_interval = None
|
Cancel the completion
should be called when the completer have to be dismissed
This reset internal variable, clearing the temporary buffer
of the console where the completion are shown.
|
373,801
|
def commitreturn(self, cursor, qstring, vals=()):
"careful: dons from decorator)"
cursor.execute(qstring, vals)
return cursor.fetchall()[0]
|
careful: don't pass cursor (it's from decorator)
|
373,802
|
def _string_to_sign(item, table_name, attribute_actions):
hasher = hashes.Hash(hashes.SHA256(), backend=default_backend())
data_to_sign = bytearray()
data_to_sign.extend(_hash_data(hasher=hasher, data="TABLE>{}<TABLE".format(table_name).encode(TEXT_ENCODING)))
for key in sorted(item.keys()):
action = attribute_actions.action(key)
if action is CryptoAction.DO_NOTHING:
continue
data_to_sign.extend(_hash_data(hasher=hasher, data=key.encode(TEXT_ENCODING)))
if action is CryptoAction.SIGN_ONLY:
data_to_sign.extend(SignatureValues.PLAINTEXT.sha256)
else:
data_to_sign.extend(SignatureValues.ENCRYPTED.sha256)
data_to_sign.extend(_hash_data(hasher=hasher, data=serialize_attribute(item[key])))
return bytes(data_to_sign)
|
Generate the string to sign from an encrypted item and configuration.
:param dict item: Encrypted DynamoDB item
:param str table_name: Table name to use when generating the string to sign
:param AttributeActions attribute_actions: Actions to take for item
|
373,803
|
def plot_correlation(self, freq=None, title=None,
figsize=(12, 6), **kwargs):
if title is None:
title = self._get_default_plot_title(
freq, )
rets = self._get_series(freq).to_returns().dropna()
return rets.plot_corr_heatmap(title=title, figsize=figsize, **kwargs)
|
Utility function to plot correlations.
Args:
* freq (str): Pandas data frequency alias string
* title (str): Plot title
* figsize (tuple (x,y)): figure size
* kwargs: passed to Pandas' plot_corr_heatmap function
|
373,804
|
def verify(self, key):
answer = _lib.NETSCAPE_SPKI_verify(self._spki, key._pkey)
if answer <= 0:
_raise_current_error()
return True
|
Verifies a signature on a certificate request.
:param PKey key: The public key that signature is supposedly from.
:return: ``True`` if the signature is correct.
:rtype: bool
:raises OpenSSL.crypto.Error: If the signature is invalid, or there was
a problem verifying the signature.
|
373,805
|
def JUMPI(self, dest, cond):
self.pc = Operators.ITEBV(256, cond != 0, dest, self.pc + self.instruction.size)
self._set_check_jmpdest(cond != 0)
|
Conditionally alter the program counter
|
373,806
|
def deobfuscate(cls, data):
return base64.urlsafe_b64decode(str((
data + b[(len(data) - 1) % 4:])))
|
Reverses the obfuscation done by the :meth:`obfuscate` method.
If an identifier arrives without correct base64 padding this
function will append it to the end.
|
373,807
|
def write_pdf(pdf_obj, destination):
reader = PdfFileReader(pdf_obj)
writer = PdfFileWriter()
page_count = reader.getNumPages()
for page_number in range(page_count):
page = reader.getPage(page_number)
writer.addPage(page)
with open(destination, "wb") as outputStream:
writer.write(outputStream)
|
Write PDF object to file
:param pdf_obj: PDF object to be written to file
:param destination: Desintation path
|
373,808
|
def alpha_(self,x):
def alpha(xmin,x=x):
x = [i for i in x if i>=xmin]
n = sum(x)
divsum = sum([math.log(i/xmin) for i in x])
if divsum == 0:
return float()
a = 1 + float(n) / divsum
return a
return alpha
|
Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
|
373,809
|
def resize(self, new_data_size):
resize_bytes(
self.__fileobj, self.data_size, new_data_size, self.data_offset)
self._update_size(new_data_size)
|
Resize the file and update the chunk sizes
|
373,810
|
def SdkSetup(self):
if self.vc_ver > 9.0:
return []
return [os.path.join(self.si.WindowsSdkDir, )]
|
Microsoft Windows SDK Setup
|
373,811
|
def post_mortem(tb=None, host=, port=5555, patch_stdstreams=False):
if tb is None:
t, v, tb = sys.exc_info()
exc_data = traceback.format_exception(t, v, tb)
else:
exc_data = traceback.format_tb(tb)
if tb is None:
raise ValueError(
)
pdb = WebPdb.active_instance
if pdb is None:
pdb = WebPdb(host, port, patch_stdstreams)
else:
pdb.remove_trace()
pdb.console.writeline()
pdb.console.writeline(.join(exc_data))
pdb.reset()
pdb.interaction(None, tb)
|
Start post-mortem debugging for the provided traceback object
If no traceback is provided the debugger tries to obtain a traceback
for the last unhandled exception.
Example::
try:
# Some error-prone code
assert ham == spam
except:
web_pdb.post_mortem()
:param tb: traceback for post-mortem debugging
:type tb: types.TracebackType
:param host: web-UI hostname or IP-address
:type host: str
:param port: web-UI port. If ``port=-1``, choose a random port value
between 32768 and 65536.
:type port: int
:param patch_stdstreams: redirect all standard input and output
streams to the web-UI.
:type patch_stdstreams: bool
:raises ValueError: if no valid traceback is provided and the Python
interpreter is not handling any exception
|
373,812
|
def create(text,score,prompt_string, dump_data=False):
if dump_data:
dump_input_data(text, score)
algorithm = select_algorithm(score)
results = {: [], : False, : 0, : 0,
: "", : "", : algorithm,
: score, : text, : prompt_string}
if len(text)!=len(score):
msg = "Target and text lists must be same length."
results[].append(msg)
log.exception(msg)
return results
try:
e_set = model_creator.create_essay_set(text, score, prompt_string)
except:
msg = "essay set creation failed."
results[].append(msg)
log.exception(msg)
try:
feature_ext, classifier, cv_error_results = model_creator.extract_features_and_generate_model(e_set, algorithm = algorithm)
results[]=cv_error_results[]
results[]=cv_error_results[]
results[]=feature_ext
results[]=classifier
results[] = algorithm
results[]=True
except:
msg = "feature extraction and model creation failed."
results[].append(msg)
log.exception(msg)
return results
|
Creates a machine learning model from input text, associated scores, a prompt, and a path to the model
TODO: Remove model path argument, it is needed for now to support legacy code
text - A list of strings containing the text of the essays
score - a list of integers containing score values
prompt_string - the common prompt for the set of essays
|
373,813
|
def _expand_json(self, j):
decompressed_json = copy.copy(j)
decompressed_json.pop(, None)
compressed_data = base64.b64decode(j[])
original_json = zlib.decompress(compressed_data).decode()
decompressed_json[] = json.loads(original_json)
return decompressed_json
|
Decompress the BLOB portion of the usernotes.
Arguments:
j: the JSON returned from the wiki page (dict)
Returns a Dict with the 'blob' key removed and a 'users' key added
|
373,814
|
def resize(self, new_size):
self._client.post(.format(Disk.api_endpoint), model=self, data={"size": new_size})
return True
|
Resizes this disk. The Linode Instance this disk belongs to must have
sufficient space available to accommodate the new size, and must be
offline.
**NOTE** If resizing a disk down, the filesystem on the disk must still
fit on the new disk size. You may need to resize the filesystem on the
disk first before performing this action.
:param new_size: The intended new size of the disk, in MB
:type new_size: int
:returns: True if the resize was initiated successfully.
:rtype: bool
|
373,815
|
def cli(ctx, config, quiet):
ctx.obj = {}
ctx.obj[] = load_config(config.read())
ctx.obj[] = quiet
log(ctx, + rnd_scotty_quote() + )
|
AWS ECS Docker Deployment Tool
|
373,816
|
def populate(self, blueprint, documents):
documents = self.finish(blueprint, documents)
frames = []
for document in documents:
meta_document = {}
for field_name in blueprint._meta_fields:
meta_document[field_name] = document[field_name]
document.pop(field_name)
frame = blueprint.get_frame_cls()(document)
for key, value in meta_document.items():
setattr(frame, key, value)
frames.append(frame)
blueprint.on_fake(frames)
frames = blueprint.get_frame_cls().insert_many(frames)
blueprint.on_faked(frames)
return frames
|
Populate the database with documents
|
373,817
|
def update(self):
super(AnalysisRequestsView, self).update()
self.workflow = api.get_tool("portal_workflow")
self.member = self.mtool.getAuthenticatedMember()
self.roles = self.member.getRoles()
setup = api.get_bika_setup()
if not setup.getSamplingWorkflowEnabled():
self.review_states = filter(
lambda x: x.get("id") != "to_be_sampled", self.review_states)
if not setup.getScheduleSamplingEnabled():
self.review_states = filter(
lambda x: x.get("id") != "scheduled_sampling",
self.review_states)
if not setup.getSamplePreservationEnabled():
self.review_states = filter(
lambda x: x.get("id") != "to_be_preserved", self.review_states)
if not setup.getRejectionReasons():
self.review_states = filter(
lambda x: x.get("id") != "rejected", self.review_states)
self.hideclientlink = "RegulatoryInspector" in self.roles \
and "Manager" not in self.roles \
and "LabManager" not in self.roles \
and "LabClerk" not in self.roles
if self.context.portal_type == "AnalysisRequestsFolder" and \
(self.mtool.checkPermission(AddAnalysisRequest, self.context)):
self.context_actions[_("Add")] = \
{"url": "ar_add?ar_count=1",
: ,
"icon": "++resource++bika.lims.images/add.png"}
self.editresults = -1
self.clients = {}
self.printwfenabled = \
self.context.bika_setup.getPrintingWorkflowEnabled()
printed_colname = "Printed"
if not self.printwfenabled and printed_colname in self.columns:
del self.columns[printed_colname]
tmprvs = []
for rs in self.review_states:
tmprs = rs
tmprs["columns"] = [c for c in rs.get("columns", []) if
c != printed_colname]
tmprvs.append(tmprs)
self.review_states = tmprvs
elif self.printwfenabled:
review_states = []
for review_state in self.review_states:
review_state.get("custom_transitions", []).extend(
[{"id": "print_sample",
"title": _("Print"),
"url": "workflow_action?action=print_sample"}, ])
review_states.append(review_state)
self.review_states = review_states
if self.copy_to_new_allowed:
review_states = []
for review_state in self.review_states:
review_state.get("custom_transitions", []).extend(
[{"id": "copy_to_new",
"title": _("Copy to new"),
"url": "workflow_action?action=copy_to_new"}, ])
review_states.append(review_state)
self.review_states = review_states
|
Called before the listing renders
|
373,818
|
def _create_entry(self, name, values, fbterm=False):
if fbterm:
attr = _PaletteEntryFBTerm(self, name.upper(), .join(values))
else:
attr = _PaletteEntry(self, name.upper(), .join(values))
setattr(self, name, attr)
return attr
|
Render first values as string and place as first code,
save, and return attr.
|
373,819
|
def welcome_if_new(self, node):
if not self.router.is_new_node(node):
return
log.info("never seen %s before, adding to router", node)
for key, value in self.storage:
keynode = Node(digest(key))
neighbors = self.router.find_neighbors(keynode)
if neighbors:
last = neighbors[-1].distance_to(keynode)
new_node_close = node.distance_to(keynode) < last
first = neighbors[0].distance_to(keynode)
this_closest = self.source_node.distance_to(keynode) < first
if not neighbors or (new_node_close and this_closest):
asyncio.ensure_future(self.call_store(node, key, value))
self.router.add_contact(node)
|
Given a new node, send it all the keys/values it should be storing,
then add it to the routing table.
@param node: A new node that just joined (or that we just found out
about).
Process:
For each key in storage, get k closest nodes. If newnode is closer
than the furtherst in that list, and the node for this server
is closer than the closest in that list, then store the key/value
on the new node (per section 2.5 of the paper)
|
373,820
|
def run(self):
r
max_order = self.__max_order
stoichiometry_matrix = self.model.stoichiometry_matrix
propensities = self.model.propensities
species = self.model.species
n_counter, k_counter = generate_n_and_k_counters(max_order, species)
dmu_over_dt = generate_dmu_over_dt(species, propensities, n_counter, stoichiometry_matrix)
central_moments_exprs = eq_central_moments(n_counter, k_counter, dmu_over_dt, species, propensities, stoichiometry_matrix, max_order)
central_from_raw_exprs = raw_to_central(n_counter, species, k_counter)
central_moments_exprs = self._substitute_raw_with_central(central_moments_exprs, central_from_raw_exprs, n_counter, k_counter)
mfk = self._generate_mass_fluctuation_kinetics(central_moments_exprs, dmu_over_dt, n_counter)
mfk = self.closure.close(mfk, central_from_raw_exprs, n_counter, k_counter)
prob_lhs = self._generate_problem_left_hand_side(n_counter, k_counter)
out_problem = ODEProblem("MEA", prob_lhs, mfk, sp.Matrix(self.model.parameters))
return out_problem
|
r"""
Overrides the default run() method.
Performs the complete analysis on the model specified during initialisation.
:return: an ODE problem which can be further used in inference and simulation.
:rtype: :class:`~means.core.problems.ODEProblem`
|
373,821
|
def geo_area(arg, use_spheroid=None):
op = ops.GeoArea(arg, use_spheroid)
return op.to_expr()
|
Compute area of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid: default None
Returns
-------
area : double scalar
|
373,822
|
def random_string_array(max_len=1, min_len=1,
elem_max_len=1, elem_min_len=1,
strings=string.ascii_letters, **kwargs):
string_array = list()
for _ in range(random.randint(min_len, max_len)):
string_array.append(Randomize.random_string(max_len=elem_max_len, min_len=elem_min_len,
chars=strings, **kwargs).value)
return SeedStringArray(string_array)
|
:param max_len: max value of len(array)
:param min_len: min value of len(array)
:param elem_max_len: max value of len(array[index])
:param elem_min_len: min value of len(array[index])
:param strings: allowed string characters in each element of array,
or predefined list of strings, or function pointer
:param **kwargs: keyworded arguments for strings if it's a function pointer
:return: SeedStringArray
|
373,823
|
def ParseContainersTable(
self, parser_mediator, database=None, table=None, **unused_kwargs):
if database is None:
raise ValueError()
if table is None:
raise ValueError()
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = MsieWebCacheContainersEventData()
event_data.container_identifier = record_values.get(, None)
event_data.directory = record_values.get(, None)
event_data.name = record_values.get(, None)
event_data.set_identifier = record_values.get(, None)
timestamp = record_values.get(, None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, )
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = record_values.get(, None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
container_identifier = record_values.get(, None)
container_name = record_values.get(, None)
if not container_identifier or not container_name:
continue
table_name = .format(container_identifier)
esedb_table = database.get_table_by_name(table_name)
if not esedb_table:
parser_mediator.ProduceExtractionWarning(
.format(table_name))
continue
self._ParseContainerTable(parser_mediator, esedb_table, container_name)
|
Parses the Containers table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
|
373,824
|
def clear(self):
for i in reversed(list(range(self.extra_keywords_layout.count()))):
self.extra_keywords_layout.itemAt(i).widget().setParent(None)
self.widgets_dict = OrderedDict()
|
Clear current state.
|
373,825
|
def delete_key(self, key_to_delete):
log = logging.getLogger(self.cls_logger + )
log.info(.format(k=key_to_delete))
try:
self.s3client.delete_object(Bucket=self.bucket_name, Key=key_to_delete)
except ClientError:
_, ex, trace = sys.exc_info()
log.error(.format(k=key_to_delete, e=str(ex)))
return False
else:
log.info(.format(k=key_to_delete))
return True
|
Deletes the specified key
:param key_to_delete:
:return:
|
373,826
|
def query_boost_version(boost_root):
boost_version = None
if os.path.exists(os.path.join(boost_root,)):
with codecs.open(os.path.join(boost_root,), , ) as f:
for line in f.readlines():
parts = line.split()
if len(parts) >= 5 and parts[1] == :
boost_version = parts[3]
break
if not boost_version:
boost_version =
return boost_version
|
Read in the Boost version from a given boost_root.
|
373,827
|
def merge_conf(to_hash, other_hash, path=[]):
"merges other_hash into to_hash"
for key in other_hash:
if (key in to_hash and isinstance(to_hash[key], dict)
and isinstance(other_hash[key], dict)):
merge_conf(to_hash[key], other_hash[key], path + [str(key)])
else:
to_hash[key] = other_hash[key]
return to_hash
|
merges other_hash into to_hash
|
373,828
|
def split_python_text_into_lines(text):
def parentesis_are_balanced(line):
def balanced(str_, i=0, cnt=0, left=, right=):
if i == len(str_):
return cnt == 0
if cnt < 0:
return False
if str_[i] == left:
return balanced(str_, i + 1, cnt + 1)
elif str_[i] == right:
return balanced(str_, i + 1, cnt - 1)
return balanced(str_, i + 1, cnt)
return balanced(line)
lines = text.split()
new_lines = []
current_line =
for line in lines:
current_line += line
if parentesis_are_balanced(current_line):
new_lines.append(current_line)
current_line =
return lines
|
# TODO: make it so this function returns text so one statment is on one
# line that means no splitting up things like function definitions into
# multiple lines
|
373,829
|
def load_dict(self, source, namespace=, make_namespaces=False):
namespacekeyvaluename.space.keyvalue
stack = [(namespace, source)]
while stack:
prefix, source = stack.pop()
if not isinstance(source, dict):
raise TypeError( % type(key))
for key, value in source.items():
if not isinstance(key, str):
raise TypeError( % type(key))
full_key = prefix + + key if prefix else key
if isinstance(value, dict):
stack.append((full_key, value))
if make_namespaces:
self[full_key] = self.Namespace(self, full_key)
else:
self[full_key] = value
return self
|
Import values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> ConfigDict().load_dict({'name': {'space': {'key': 'value'}}})
{'name.space.key': 'value'}
|
373,830
|
def list_enrollment_claims(self, **kwargs):
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, EnrollmentClaim)
api = self._get_api(enrollment.PublicAPIApi)
return PaginatedResponse(
api.get_device_enrollments,
lwrap_type=EnrollmentClaim,
**kwargs
)
|
List
|
373,831
|
def to_dict(self, remove_nones=False):
content = {
: self.uri,
: self.protocol_info,
: self.import_uri,
: self.size,
: self.duration,
: self.bitrate,
: self.sample_frequency,
: self.bits_per_sample,
: self.nr_audio_channels,
: self.resolution,
: self.color_depth,
: self.protection,
}
if remove_nones:
nones = [k for k in content if content[k] is None]
for k in nones:
del content[k]
return content
|
Return a dict representation of the `DidlResource`.
Args:
remove_nones (bool, optional): Optionally remove dictionary
elements when their value is `None`.
Returns:
dict: a dict representing the `DidlResource`
|
373,832
|
def interleave(*args):
result = []
for array in zip(*args):
result.append(tuple(flatten(array)))
return result
|
Interleaves the elements of the provided arrays.
>>> a = [(0, 0), (1, 0), (2, 0), (3, 0)]
>>> b = [(0, 0), (0, 1), (0, 2), (0, 3)]
>>> interleave(a, b)
[(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)]
This is useful for combining multiple vertex attributes into a single
vertex buffer. The shader attributes can be assigned a slice of the
vertex buffer.
|
373,833
|
def wait_port_open(server, port, timeout=None):
import socket
import errno
import time
sleep_s = 0
if timeout:
from time import time as now
end = now() + timeout
while True:
logging.debug("Sleeping for %s second(s)", sleep_s)
time.sleep(sleep_s)
s = socket.socket()
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
logging.info("connect %s %d", server, port)
s.connect((server, port))
except ConnectionError as err:
logging.debug("ConnectionError %s", err)
if sleep_s == 0:
sleep_s = 1
except socket.gaierror as err:
logging.debug("gaierror %s",err)
return False
except socket.timeout as err:
if timeout:
return False
except TimeoutError as err:
raise
else:
s.close()
logging.info("wait_port_open: port %s:%s is open", server, port)
return True
|
Wait for network service to appear
@param server: host to connect to (str)
@param port: port (int)
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
|
373,834
|
def get_image(self, size=SIZE_EXTRA_LARGE):
doc = self._request(self.ws_prefix + ".getInfo", True)
return _extract_all(doc, "image")[size]
|
Returns the user's avatar
size can be one of:
SIZE_EXTRA_LARGE
SIZE_LARGE
SIZE_MEDIUM
SIZE_SMALL
|
373,835
|
def _handle_pagerange(pagerange):
try:
pr = re.compile("pp\.\s([0-9]+)\-([0-9]+)")
start, end = re.findall(pr, pagerange)[0]
except IndexError:
start = end = 0
return unicode(start), unicode(end)
|
Yields start and end pages from DfR pagerange field.
Parameters
----------
pagerange : str or unicode
DfR-style pagerange, e.g. "pp. 435-444".
Returns
-------
start : str
Start page.
end : str
End page.
|
373,836
|
def add_port_profile(self, profile_name, vlan_id, device_id):
if not self.get_port_profile_for_vlan(vlan_id, device_id):
port_profile = ucsm_model.PortProfile(profile_id=profile_name,
vlan_id=vlan_id,
device_id=device_id,
created_on_ucs=False)
with self.session.begin(subtransactions=True):
self.session.add(port_profile)
return port_profile
|
Adds a port profile and its vlan_id to the table.
|
373,837
|
def ConsultarCertificacionUltNroOrden(self, pto_emision=1):
"Consulta el último No de orden registrado para CG"
ret = self.client.cgConsultarUltimoNroOrden(
auth={
: self.Token, : self.Sign,
: self.Cuit, },
ptoEmision=pto_emision,
)
ret = ret[]
self.__analizar_errores(ret)
self.NroOrden = ret[]
return True
|
Consulta el último No de orden registrado para CG
|
373,838
|
def mbar_log_W_nk(u_kn, N_k, f_k):
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
logW = f_k - u_kn.T - log_denominator_n[:, np.newaxis]
return logW
|
Calculate the log weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
logW_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized log weights.
Notes
-----
Equation (9) in JCP MBAR paper.
|
373,839
|
def _enum_member_error(err, eid, name, value, bitmask):
exception, msg = ENUM_ERROR_MAP[err]
enum_name = idaapi.get_enum_name(eid)
return exception((
).format(
enum_name,
name,
value,
bitmask,
msg
))
|
Format enum member error.
|
373,840
|
def set_next_week_day(val, week_day, iso=False):
return _set_week_day(val, week_day,
val.isoweekday() if iso else val.weekday(), sign=1)
|
Set week day.
New date will be greater or equal than input date.
:param val: datetime or date
:type val: datetime.datetime | datetime.date
:param week_day: Week day to set
:type week_day: int
:param iso: week_day in ISO format, or not
:type iso: bool
:return: datetime.datetime | datetime.date
|
373,841
|
def _register_bindings(self, data):
self._register_diff_order_book_channels()
self._register_live_orders_channels()
self._register_live_trades_channels()
self._register_order_book_channels()
|
connection_handler method which is called when we connect to pusher.
Responsible for binding callbacks to channels before we connect.
:return:
|
373,842
|
def blend_html_colour_to_white(html_colour, alpha):
html_colour = html_colour.upper()
has_hash = False
if html_colour[0] == :
has_hash = True
html_colour = html_colour[1:]
r_str = html_colour[0:2]
g_str = html_colour[2:4]
b_str = html_colour[4:6]
r = int(r_str, 16)
g = int(g_str, 16)
b = int(b_str, 16)
r = int(alpha * r + (1 - alpha) * 255)
g = int(alpha * g + (1 - alpha) * 255)
b = int(alpha * b + (1 - alpha) * 255)
out = .format(r, g, b)
if has_hash:
out = + out
return out
|
:param html_colour: Colour string like FF552B or #334455
:param alpha: Alpha value
:return: Html colour alpha blended onto white
|
373,843
|
def classify(self, text=u):
text = self.lm.normalize(text)
tokenz = LM.tokenize(text, mode=)
result = self.lm.calculate(doc_terms=tokenz)
if self.unk and self.lm.karbasa(result) < self.min_karbasa:
lang =
else:
lang = result[]
return lang
|
Predicts the Language of a given text.
:param text: Unicode text to be classified.
|
373,844
|
def update_resources_from_resfile(self, srcpath, types=None, names=None,
languages=None):
UpdateResourcesFromResFile(self.filename, srcpath, types, names,
languages)
|
Update or add resources from dll/exe file srcpath.
types = a list of resource types to update (None = all)
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all)
|
373,845
|
def mount_status_send(self, target_system, target_component, pointing_a, pointing_b, pointing_c, force_mavlink1=False):
return self.send(self.mount_status_encode(target_system, target_component, pointing_a, pointing_b, pointing_c), force_mavlink1=force_mavlink1)
|
Message with some status from APM to GCS about camera or antenna mount
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
pointing_a : pitch(deg*100) (int32_t)
pointing_b : roll(deg*100) (int32_t)
pointing_c : yaw(deg*100) (int32_t)
|
373,846
|
def set(cls, obj, keys, value, fill_list_value=None):
current = obj
keys_list = keys.split(".")
for idx, key in enumerate(keys_list, 1):
if type(current) == list:
try:
key = int(key)
except ValueError:
raise cls.Missing(key)
try:
if idx == len(keys_list):
if type(current) == list:
safe_list_set(
current,
key,
lambda: copy.copy(fill_list_value),
value
)
else:
current[key] = value
return
if type(key) == int:
try:
current[key]
except IndexError:
cnext = container_for_key(keys_list[idx])
if type(cnext) == list:
def fill_with():
return []
else:
def fill_with():
return {}
safe_list_set(
current,
key,
fill_with,
[] if type(cnext) == list else {}
)
else:
if key not in current:
current[key] = container_for_key(keys_list[idx])
current = current[key]
except (IndexError, KeyError, TypeError):
raise cls.Missing(key)
|
sets the value for the given keys on obj. if any of the given
keys does not exist, create the intermediate containers.
|
373,847
|
def first_spark_call():
tb = traceback.extract_stack()
if len(tb) == 0:
return None
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
for i in range(0, len(tb)):
file, line, fun, what = tb[i]
if file.startswith(sparkpath):
first_spark_frame = i
break
if first_spark_frame == 0:
file, line, fun, what = tb[0]
return CallSite(function=fun, file=file, linenum=line)
sfile, sline, sfun, swhat = tb[first_spark_frame]
ufile, uline, ufun, uwhat = tb[first_spark_frame - 1]
return CallSite(function=sfun, file=ufile, linenum=uline)
|
Return a CallSite representing the first Spark call in the current call stack.
|
373,848
|
def is_recording():
curr = ctypes.c_bool()
check_call(_LIB.MXAutogradIsRecording(ctypes.byref(curr)))
return curr.value
|
Get status on recording/not recording.
Returns
-------
Current state of recording.
|
373,849
|
def uuids(self):
for f in self._seq:
if isinstance(f, File):
yield f.uuid
elif isinstance(f, six.string_types):
yield f
else:
raise ValueError(
.format(type(f)))
|
Extract uuid from each item of specified ``seq``.
|
373,850
|
def satosa_logging(logger, level, message, state, **kwargs):
if state is None:
session_id = "UNKNOWN"
else:
try:
session_id = state[LOGGER_STATE_KEY]
except KeyError:
session_id = uuid4().urn
state[LOGGER_STATE_KEY] = session_id
logger.log(level, "[{id}] {msg}".format(id=session_id, msg=message), **kwargs)
|
Adds a session ID to the message.
:type logger: logging
:type level: int
:type message: str
:type state: satosa.state.State
:param logger: Logger to use
:param level: Logger level (ex: logging.DEBUG/logging.WARN/...)
:param message: Message
:param state: The current state
:param kwargs: set exc_info=True to get an exception stack trace in the log
|
373,851
|
def loop(self, *tags):
for i in range(len(self.words)):
yield tuple([self.get(i, tag=tag) for tag in tags])
|
Iterates over the tags in the entire Sentence,
For example, Sentence.loop(POS, LEMMA) yields tuples of the part-of-speech tags and lemmata.
Possible tags: WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag.
Any order or combination of tags can be supplied.
|
373,852
|
def InitSiteCheck(self):
self.contribution.propagate_average_up(cols=[, , ],
target_df_name=,
source_df_name=)
self.contribution.propagate_lithology_cols()
site_df = self.contribution.tables[].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
, , self.panel,
main_frame=self.main_frame)
self.grid_frame.exitButton.SetLabel()
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitLocCheck),
self.grid_frame.exitButton)
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label=,
name=)
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSampCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
self.grid_frame.do_fit(None, self.min_size)
self.grid_frame.Centre()
return
|
make an interactive grid in which users can edit site names
as well as which location a site belongs to
|
373,853
|
def failed_hosts(self) -> Dict[str, "MultiResult"]:
return {k: v for k, v in self.result.items() if v.failed}
|
Hosts that failed to complete the task
|
373,854
|
def run(command, *args):
if command == :
return clusters.run(command, *args)
elif command == :
return topologies.run(command, *args)
elif command == :
return physicalplan.run_containers(command, *args)
elif command == :
return physicalplan.run_metrics(command, *args)
elif command == :
return logicalplan.run_components(command, *args)
elif command == :
return logicalplan.run_spouts(command, *args)
elif command == :
return logicalplan.run_bolts(command, *args)
elif command == :
return help.run(command, *args)
elif command == :
return version.run(command, *args)
return 1
|
run command
|
373,855
|
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode=)
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
|
Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
|
373,856
|
def update_ssl_termination(self, securePort=None, enabled=None,
secureTrafficOnly=None):
return self.manager.update_ssl_termination(self, securePort=securePort,
enabled=enabled, secureTrafficOnly=secureTrafficOnly)
|
Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys.
|
373,857
|
def handle_left_double_click(self, info):
if (self.double_click_focus == False):
print(self.pre, "handle_left_double_click: focus on")
self.cb_focus()
else:
print(self.pre, "handle_left_double_click: focus off")
self.cb_unfocus()
self.double_click_focus = not(
self.double_click_focus)
|
Whatever we want to do, when the VideoWidget has been double-clicked with the left button
|
373,858
|
def _parse_guild_disband_info(self, info_container):
m = disband_regex.search(info_container.text)
if m:
self.disband_condition = m.group(2)
self.disband_date = parse_tibia_date(m.group(1).replace("\xa0", " "))
|
Parses the guild's disband info, if available.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container.
|
373,859
|
def register(self, resource_class, content_type, configuration=None):
if not issubclass(resource_class, Resource):
raise ValueError(
% resource_class)
if not content_type in self.__rpr_classes:
raise ValueError(
% content_type)
rpr_cls = self.__rpr_classes[content_type]
self.__rpr_factories[(resource_class, content_type)] = \
rpr_cls.create_from_resource_class
if issubclass(rpr_cls, MappingResourceRepresenter):
mp_reg = self.__mp_regs[content_type]
mp = mp_reg.find_mapping(resource_class)
if mp is None:
new_mp = mp_reg.create_mapping(resource_class, configuration)
elif not configuration is None:
if resource_class is mp.mapped_class:
mp.configuration.update(configuration)
new_mp = mp
else:
new_mp = mp_reg.create_mapping(
resource_class,
configuration=mp.configuration)
new_mp.configuration.update(configuration)
elif not resource_class is mp.mapped_class:
new_mp = mp_reg.create_mapping(resource_class,
configuration=mp.configuration)
else:
new_mp = None
if not new_mp is None:
mp_reg.set_mapping(new_mp)
|
Registers a representer factory for the given combination of resource
class and content type.
:param configuration: representer configuration. A default instance
will be created if this is not given.
:type configuration:
:class:`everest.representers.config.RepresenterConfiguration`
|
373,860
|
def speaker_durations(utterances: List[Utterance]) -> List[Tuple[str, int]]:
speaker_utters = make_speaker_utters(utterances)
speaker_duration_tuples = []
for speaker in speaker_utters:
speaker_duration_tuples.append((speaker, total_duration(speaker_utters[speaker])))
return speaker_duration_tuples
|
Takes a list of utterances and itemizes them by speaker, returning a
list of tuples of the form (Speaker Name, duration).
|
373,861
|
def sof(self):
for m in self._markers:
if m.marker_code in JPEG_MARKER_CODE.SOF_MARKER_CODES:
return m
raise KeyError()
|
First start of frame (SOFn) marker in this sequence.
|
373,862
|
def execute(self, env, args):
task_name = args.task_name
clone_task = args.clone_task
if not env.task.create(task_name, clone_task):
raise errors.FocusError(u
.format(task_name))
if not args.skip_edit:
task_config = env.task.get_config_path(task_name)
if not _edit_task_config(env, task_config, confirm=True):
raise errors.FocusError(u
.format(task_config))
|
Creates a new task.
`env`
Runtime ``Environment`` instance.
`args`
Arguments object from arg parser.
|
373,863
|
def swap_twitter_subject(subject, body):
if subject.startswith():
lines = body.split()
for idx, line in enumerate(lines):
if re.match(r, line) is not None:
try:
subject = lines[idx + 1]
except IndexError:
pass
break
return subject, body
|
If subject starts from 'Tweet from...'
then we need to get first meaning line from the body.
|
373,864
|
def get_profile_dir ():
if os.name == :
basedir = unicode(os.environ["APPDATA"], nt_filename_encoding)
dirpath = os.path.join(basedir, u"Mozilla", u"Firefox", u"Profiles")
elif os.name == :
basedir = unicode(os.environ["HOME"])
dirpath = os.path.join(basedir, u".mozilla", u"firefox")
return dirpath
|
Return path where all profiles of current user are stored.
|
373,865
|
def setItemStyle(self, itemStyle):
self._itemStyle = itemStyle
if itemStyle == XGanttWidgetItem.ItemStyle.Group and \
self.icon(0).isNull():
ico = projexui.resources.find()
expand_ico = projexui.resources.find()
self.setIcon(0, QIcon(ico))
self.setExpandedIcon(0, QIcon(expand_ico))
|
Sets the item style that will be used for this widget. If you are
trying to set a style on an item that has children, make sure to turn
off the useGroupStyleWithChildren option, or it will always display as
a group.
:param itemStyle | <XGanttWidgetItem.ItemStyle>
|
373,866
|
def clamped(self, point_or_rect):
if isinstance(point_or_rect, Rect):
return Rect(np.minimum(self.mins, point_or_rect.mins),
np.maximum(self.maxes, point_or_rect.maxes))
return np.clip(point_or_rect, self.mins, self.maxes)
|
Returns the point or rectangle clamped to this rectangle.
|
373,867
|
def _bracket_exact_exec(self, symbol):
if symbol in self.context.module.executables:
return self.context.module.executables[symbol]
if symbol in self.context.module.interfaces:
return self.context.module.interfaces[symbol]
if symbol in cache.builtin:
return cache.builtin[symbol]
return self.context.module.get_dependency_element(symbol)
|
Checks builtin, local and global executable collections for the
specified symbol and returns it as soon as it is found.
|
373,868
|
def save_project(self, project):
pid = project.pid
if project.is_active:
logger.debug("project is active")
ds_project = self.get_project(pid)
if ds_project is None:
self._create_project(pid)
name = self._truncate(project.name, 40)
self._set_project(pid, name, project.institute)
else:
logger.debug("project is not active")
ds_project = self.get_project(pid)
if ds_project is not None:
self._delete_project(pid)
return
|
Called when project is saved/updated.
|
373,869
|
def vline_score(self, x, ymin, ymax):
return self._vline_score[x, ymin, ymax]
|
Returns the number of unbroken paths of qubits
>>> [(x,y,1,k) for y in range(ymin,ymax+1)]
for :math:`k = 0,1,\cdots,L-1`. This is precomputed for speed.
|
373,870
|
def convert_to(obj, ac_ordered=False, ac_dict=None, **options):
options.update(ac_ordered=ac_ordered, ac_dict=ac_dict)
if anyconfig.utils.is_dict_like(obj):
return _make_recur(obj, convert_to, **options)
if anyconfig.utils.is_list_like(obj):
return _make_iter(obj, convert_to, **options)
return obj
|
Convert a mapping objects to a dict or object of 'to_type' recursively.
Borrowed basic idea and implementation from bunch.unbunchify. (bunch is
distributed under MIT license same as this.)
:param obj: A mapping objects or other primitive object
:param ac_ordered: Use OrderedDict instead of dict to keep order of items
:param ac_dict: Callable to convert 'obj' to mapping object
:param options: Optional keyword arguments.
:return: A dict or OrderedDict or object of 'cls'
>>> OD = anyconfig.compat.OrderedDict
>>> convert_to(OD((('a', 1) ,)), cls=dict)
{'a': 1}
>>> convert_to(OD((('a', OD((('b', OD((('c', 1), ))), ))), )), cls=dict)
{'a': {'b': {'c': 1}}}
|
373,871
|
def get_child_repository_ids(self, repository_id):
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=repository_id)
return self._hierarchy_session.get_children(id_=repository_id)
|
Gets the ``Ids`` of the children of the given repository.
arg: repository_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
373,872
|
def _preoptimize_model(self, initials, method):
start_values = []
start_values.append(np.ones(len(self.X_names))*-2.0)
start_values.append(np.ones(len(self.X_names))*-3.0)
start_values.append(np.ones(len(self.X_names))*-4.0)
start_values.append(np.ones(len(self.X_names))*-5.0)
best_start = self.latent_variables.get_z_starting_values()
best_lik = self.neg_loglik(self.latent_variables.get_z_starting_values())
proposal_start = best_start.copy()
for start in start_values:
proposal_start[:len(self.X_names)] = start
proposal_likelihood = self.neg_loglik(proposal_start)
if proposal_likelihood < best_lik:
best_lik = proposal_likelihood
best_start = proposal_start.copy()
return best_start
|
Preoptimizes the model by estimating a static model, then a quick search of good dynamic parameters
Parameters
----------
initials : np.array
A vector of inital values
method : str
One of 'MLE' or 'PML' (the optimization options)
Returns
----------
Y_exp : np.array
Vector of past values and predictions
|
373,873
|
def getcellvalue(self, window_name, object_name, row_index, column=0):
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
count = len(object_handle.AXRows)
if row_index < 0 or row_index > count:
raise LdtpServerException( % row_index)
cell = object_handle.AXRows[row_index]
count = len(cell.AXChildren)
if column < 0 or column > count:
raise LdtpServerException( % column)
obj = cell.AXChildren[column]
if not re.search("AXColumn", obj.AXRole):
obj = cell.AXChildren[column]
return obj.AXValue
|
Get cell value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column: Column index to get, default value 0
@type column: integer
@return: cell value on success.
@rtype: string
|
373,874
|
def batched_expiration_maintenance(self, elapsed_time):
num_iterations = self.num_batched_maintenance(elapsed_time)
self.refresh_head, nonzero = maintenance(self.cellarray, self.nbr_bits, num_iterations, self.refresh_head)
if num_iterations != 0:
self.estimate_z = float(nonzero) / float(num_iterations)
self._estimate_count()
processed_interval = num_iterations * self.compute_refresh_time()
return processed_interval
|
Batched version of expiration_maintenance()
Cython version
|
373,875
|
def normalize_arxiv_category(category):
category = _NEW_CATEGORIES.get(category.lower(), category)
for valid_category in valid_arxiv_categories():
if (category.lower() == valid_category.lower() or
category.lower().replace(, ) == valid_category.lower()):
return valid_category
return category
|
Normalize arXiv category to be schema compliant.
This properly capitalizes the category and replaces the dash by a dot if
needed. If the category is obsolete, it also gets converted it to its
current equivalent.
Example:
>>> from inspire_schemas.utils import normalize_arxiv_category
>>> normalize_arxiv_category('funct-an') # doctest: +SKIP
u'math.FA'
|
373,876
|
def func_from_info(self):
info = self.funcinfo
functype = info[]
if functype in [, , ]:
the_modelclass = get_module_member_by_dottedpath(info[])
if functype == :
the_modelobject = the_modelclass.objects.get(pk=info[])
the_callable = get_member(the_modelobject, info[])
else:
the_callable = get_member(the_modelclass, info[])
return the_callable
elif functype == :
mod = import_module(info[])
the_callable = get_member(mod, info[])
return the_callable
else:
raise ValueError(f"Unknown functype '{functype} in task {self.pk} ({self.label})")
|
Find and return a callable object from a task info dictionary
|
373,877
|
def exit_fullscreen(self):
self.term.stream.write(self.term.exit_fullscreen)
self.term.stream.write(self.term.normal_cursor)
|
Invoke before printing out anything.
This method should be replaced by or merged to blessings package
|
373,878
|
def has_successor(self, u, v, t=None):
return self.has_interaction(u, v, t)
|
Return True if node u has successor v at time t (optional).
This is true if graph has the edge u->v.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
t : snapshot id (default=None)
If None will be returned the presence of the interaction on the flattened graph.
|
373,879
|
def one_or_more(
schema: dict, unique_items: bool = True, min: int = 1, max: int = None
) -> dict:
multi_schema = {
"type": "array",
"items": schema,
"minItems": min,
"uniqueItems": unique_items,
}
if max:
multi_schema["maxItems"] = max
return {"oneOf": [multi_schema, schema]}
|
Helper function to construct a schema that validates items matching
`schema` or an array containing items matching `schema`.
:param schema: The schema to use
:param unique_items: Flag if array items should be unique
:param min: Correlates to ``minLength`` attribute of JSON Schema array
:param max: Correlates to ``maxLength`` attribute of JSON Schema array
|
373,880
|
def _calc_inst_pmf(self):
t = self.t_
epsilon = self.epsilon
alpha = self.alpha
preds = self._preds_avg_in_strata
weights = self.strata.weights_[:,np.newaxis]
p1 = self._BB_model.theta_[:,np.newaxis]
p0 = 1 - p1
if t==0:
F = self._F_guess[self.opt_class]
else:
F = self._estimate[t - 1, self.opt_class]
nonfinite = ~np.isfinite(F)
F[nonfinite] = self._F_guess[self.opt_class][nonfinite]
sqrt_arg = np.sum(preds * (alpha**2 * F**2 * p0 + (1 - F)**2 * p1) + \
(1 - preds) * (1 - alpha)**2 * F**2 * p1, \
axis=1, keepdims=True)
inst_pmf = weights * np.sqrt(sqrt_arg)
inst_pmf /= np.sum(inst_pmf)
inst_pmf *= (1 - epsilon)
inst_pmf += epsilon * weights
if self.record_inst_hist:
self._inst_pmf[:,t] = inst_pmf.ravel()
else:
self._inst_pmf = inst_pmf.ravel()
|
Calculate the epsilon-greedy instrumental distribution
|
373,881
|
def reloadFileAtIndex(self, itemIndex, rtiClass=None):
fileRtiParentIndex = itemIndex.parent()
fileRti = self.getItem(itemIndex)
position = fileRti.childNumber()
fileName = fileRti.fileName
if rtiClass is None:
rtiClass = type(fileRti)
self.deleteItemAtIndex(itemIndex)
return self.loadFile(fileName, rtiClass, position=position, parentIndex=fileRtiParentIndex)
|
Reloads the item at the index by removing the repo tree item and inserting a new one.
The new item will have by of type rtiClass. If rtiClass is None (the default), the
new rtiClass will be the same as the old one.
|
373,882
|
def update_config(self):
filter = self.config[]
rows = len(filter)
cols = len(filter[0])
filter_f = __builtin__.open(self.config[], )
filter_f.write("CONV NORM\n")
filter_f.write("
(rows, cols))
for row in filter:
filter_f.write(" ".join(map(repr, row)))
filter_f.write("\n")
filter_f.close()
parameters_f = __builtin__.open(self.config[], )
for parameter in self.config[]:
print(parameter, file=parameters_f)
parameters_f.close()
nnw_f = __builtin__.open(self.config[], )
nnw_f.write(nnw_config)
nnw_f.close()
main_f = __builtin__.open(self.config[], )
for key in self.config.keys():
if (key in SExtractor._SE_config_special_keys):
continue
if (key == "PHOT_AUTOPARAMS"):
value = " ".join(map(str, self.config[key]))
else:
value = str(self.config[key])
print(("%-16s %-16s
main_f.close()
|
Update the configuration files according to the current
in-memory SExtractor configuration.
|
373,883
|
def create_reserved_ip_address(self, name, label=None, location=None):
_validate_not_none(, name)
return self._perform_post(
self._get_reserved_ip_path(),
_XmlSerializer.create_reserved_ip_to_xml(name, label, location),
as_async=True)
|
Reserves an IPv4 address for the specified subscription.
name:
Required. Specifies the name for the reserved IP address.
label:
Optional. Specifies a label for the reserved IP address. The label
can be up to 100 characters long and can be used for your tracking
purposes.
location:
Required. Specifies the location of the reserved IP address. This
should be the same location that is assigned to the cloud service
containing the deployment that will use the reserved IP address.
To see the available locations, you can use list_locations.
|
373,884
|
def getAllMetadata(self, remote, address):
if self._server is not None:
return self._server.getAllMetadata(remote, address)
|
Get all metadata of device
|
373,885
|
def detect(self, text):
if isinstance(text, list):
result = []
for item in text:
lang = self.detect(item)
result.append(lang)
return result
data = self._translate(text, dest=, src=)
src =
confidence = 0.0
try:
src = .join(data[8][0])
confidence = data[8][-2][0]
except Exception:
pass
result = Detected(lang=src, confidence=confidence)
return result
|
Detect language of the input text
:param text: The source text(s) whose language you want to identify.
Batch detection is supported via sequence input.
:type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
:rtype: Detected
:rtype: :class:`list` (when a list is passed)
Basic usage:
>>> from googletrans import Translator
>>> translator = Translator()
>>> translator.detect('이 문장은 한글로 쓰여졌습니다.')
<Detected lang=ko confidence=0.27041003>
>>> translator.detect('この文章は日本語で書かれました。')
<Detected lang=ja confidence=0.64889508>
>>> translator.detect('This sentence is written in English.')
<Detected lang=en confidence=0.22348526>
>>> translator.detect('Tiu frazo estas skribita en Esperanto.')
<Detected lang=eo confidence=0.10538048>
Advanced usage:
>>> langs = translator.detect(['한국어', '日本語', 'English', 'le français'])
>>> for lang in langs:
... print(lang.lang, lang.confidence)
ko 1
ja 0.92929292
en 0.96954316
fr 0.043500196
|
373,886
|
def _format_disk_metrics(self, metrics):
for name, raw_val in metrics.iteritems():
if raw_val:
match = DISK_STATS_RE.search(raw_val)
if match is None or len(match.groups()) != 2:
self.log.warning(t parse value %s for disk metric %s. Dropping it.s error prone.
unit = unit.lower()
try:
val = int(float(val) * UNIT_MAP[unit])
metrics[name] = val
except KeyError:
self.log.error( % (unit, name))
metrics[name] = None
return metrics
|
Cast the disk stats to float and convert them to bytes
|
373,887
|
def save(self, filename, config):
return open(os.path.expanduser(filename), ).write(json.dumps(config, cls=HCEncoder, sort_keys=True, indent=2, separators=(, )))
|
Loads a config from disk
|
373,888
|
def to_fixed(stype):
output = []
if is_int_type(stype):
output = to_word(stype)
output.append()
output.append()
elif stype == :
output.append()
REQUIRES.add()
return output
|
Returns the instruction sequence for converting the given
type stored in DE,HL to fixed DE,HL.
|
373,889
|
def clear_history(pymux, variables):
" Clear scrollback buffer. "
pane = pymux.arrangement.get_active_pane()
if pane.display_scroll_buffer:
raise CommandException()
else:
pane.process.screen.clear_history()
|
Clear scrollback buffer.
|
373,890
|
def bids_to_you(self):
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",: +self.domain+,"User-Agent": user_agent}
req = self.session.get(+self.domain+,headers=headers).content
soup = BeautifulSoup(req)
table = []
for i in soup.find(,{,}).find_all()[1:]:
player,owner,team,price,bid_date,trans_date,status = self._parse_bid_table(i)
table.append([player,owner,team,price,bid_date,trans_date,status])
return table
|
Get bids made to you
@return: [[player,owner,team,money,date,datechange,status],]
|
373,891
|
def train(self, data_iterator):
feature_iterator, label_iterator = tee(data_iterator, 2)
x_train = np.asarray([x for x, y in feature_iterator])
y_train = np.asarray([y for x, y in label_iterator])
if x_train.size == 0:
return
optimizer = get_optimizer(self.master_optimizer)
self.model = model_from_yaml(self.yaml, self.custom_objects)
self.model.compile(optimizer=optimizer,
loss=self.master_loss, metrics=self.master_metrics)
self.model.set_weights(self.parameters.value)
epochs = self.train_config[]
batch_size = self.train_config.get()
nb_train_sample = x_train.shape[0]
nb_batch = int(np.ceil(nb_train_sample / float(batch_size)))
index_array = np.arange(nb_train_sample)
batches = [
(i * batch_size, min(nb_train_sample, (i + 1) * batch_size))
for i in range(0, nb_batch)
]
if self.frequency == :
for epoch in range(epochs):
weights_before_training = self.client.get_parameters()
self.model.set_weights(weights_before_training)
self.train_config[] = 1
if x_train.shape[0] > batch_size:
self.model.fit(x_train, y_train, **self.train_config)
self.train_config[] = epochs
weights_after_training = self.model.get_weights()
deltas = subtract_params(
weights_before_training, weights_after_training)
self.client.update_parameters(deltas)
elif self.frequency == :
for epoch in range(epochs):
if x_train.shape[0] > batch_size:
for (batch_start, batch_end) in batches:
weights_before_training = self.client.get_parameters()
self.model.set_weights(weights_before_training)
batch_ids = index_array[batch_start:batch_end]
x = slice_arrays(x_train, batch_ids)
y = slice_arrays(y_train, batch_ids)
self.model.train_on_batch(x, y)
weights_after_training = self.model.get_weights()
deltas = subtract_params(
weights_before_training, weights_after_training)
self.client.update_parameters(deltas)
else:
raise ValueError(
.format(self.frequency))
yield []
|
Train a keras model on a worker and send asynchronous updates
to parameter server
|
373,892
|
def _detect(self):
results = []
self.results = []
self.visited_all_paths = {}
for contract in self.slither.contracts:
for function in contract.functions:
if function.is_implemented:
uninitialized_storage_variables = [v for v in function.local_variables if v.is_storage and v.uninitialized]
function.entry_point.context[self.key] = uninitialized_storage_variables
self._detect_uninitialized(function, function.entry_point, [])
for(function, uninitialized_storage_variable) in self.results:
var_name = uninitialized_storage_variable.name
info = "{} in {}.{} ({}) is a storage variable never initialiazed\n"
info = info.format(var_name, function.contract.name, function.name, uninitialized_storage_variable.source_mapping_str)
json = self.generate_json_result(info)
self.add_variable_to_json(uninitialized_storage_variable, json)
self.add_function_to_json(function, json)
results.append(json)
return results
|
Detect uninitialized storage variables
Recursively visit the calls
Returns:
dict: [contract name] = set(storage variable uninitialized)
|
373,893
|
def ack(self, frame):
if not frame.message_id:
raise ProtocolError("No message-id specified for ACK command.")
self.engine.queue_manager.ack(self.engine.connection, frame)
|
Handles the ACK command: Acknowledges receipt of a message.
|
373,894
|
def _non_idempotent_tasks(self, output):
output = re.sub(r, , output)
output = util.strip_ansi_escape(output)
output_lines = output.split()
res = []
task_line =
for _, line in enumerate(output_lines):
if line.startswith():
task_line = line
elif line.startswith():
host_name = re.search(r, line).groups()[0]
task_name = re.search(r, task_line).groups()[0]
res.append(u.format(host_name, task_name))
return res
|
Parses the output to identify the non idempotent tasks.
:param (str) output: A string containing the output of the ansible run.
:return: A list containing the names of the non idempotent tasks.
|
373,895
|
def columns_by_index(self) -> Dict[str, List[Well]]:
col_dict = self._create_indexed_dictionary(group=2)
return col_dict
|
Accessor function used to navigate through a labware by column name.
With indexing one can treat it as a typical python dictionary.
To access row A for example,
simply write: labware.columns_by_index()['1']
This will output ['A1', 'B1', 'C1', 'D1'...].
:return: Dictionary of Well lists keyed by column name
|
373,896
|
def flip_iterable_dict(d: dict) -> dict:
value_keys = disjoint_union((cartesian_product((v, k))
for k, v in d.items()))
return dict(value_keys)
|
Transform dictionary to unpack values to map to respective key.
|
373,897
|
def _get_log_covariance(self, log_variance_mat, log_expectation_symbols, covariance_matrix, x, y):
r
if x == y:
return log_variance_mat[x, x]
elif self.is_multivariate:
denom = sp.exp(log_expectation_symbols[x] +
log_expectation_symbols[y] +
(log_variance_mat[x, x] + log_variance_mat[y, y])/ sp.Integer(2))
return sp.log(sp.Integer(1) + covariance_matrix[x, y] / denom)
else:
return sp.Integer(0)
|
r"""
Compute log covariances according to:\\
:math:`\log{(Cov(x_i,x_j))} = \frac { 1 + Cov(x_i,x_j)}{\exp[\log \mathbb{E}(x_i) + \log \mathbb{E}(x_j)+\frac{1}{2} (\log Var(x_i) + \log Var(x_j)]}`
:param log_variance_mat: a column matrix of log variance
:param log_expectation_symbols: a column matrix of log expectations
:param covariance_matrix: a matrix of covariances
:param x: x-coordinate in matrix of log variances and log covariances
:param y: y-coordinate in matrix of log variances and log covariances
:return: the log covariance between x and y
|
373,898
|
def _parse_xml(child, parser):
name, modifiers, dtype, kind = _parse_common(child)
name = _isense_builtin_symbol(name)
if child.tag == "subroutine":
parent = Subroutine(name, modifiers, None)
elif child.tag == "function":
parent = Function(name, modifiers, dtype, kind, None)
if parent is not None:
for kid in child:
if kid.tag == "parameter":
_parse_parameter(kid, parser, parent)
elif kid.tag == "summary":
_parse_summary(kid, parser, parent)
elif kid.tag == "usage":
_parse_usage(kid, parser, parent)
return parent
|
Parses the specified child XML tag and creates a Subroutine or
Function object out of it.
|
373,899
|
def global_env_valid(env):
if env not in EFConfig.ACCOUNT_SCOPED_ENVS:
raise ValueError("Invalid global env: {}; global envs are: {}".format(env, EFConfig.ACCOUNT_SCOPED_ENVS))
return True
|
Given an env, determine if it's a valid "global" or "mgmt" env as listed in EFConfig
Args:
env: the env to check
Returns:
True if the env is a valid global env in EFConfig
Raises:
ValueError with message if the env is not valid
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.