repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
openpaperwork/paperwork-backend | paperwork_backend/common/page.py | https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/common/page.py#L272-L282 | def __get_keywords(self):
"""
Get all the keywords related of this page
Returns:
An array of strings
"""
txt = self.text
for line in txt:
for word in split_words(line):
yield(word) | [
"def",
"__get_keywords",
"(",
"self",
")",
":",
"txt",
"=",
"self",
".",
"text",
"for",
"line",
"in",
"txt",
":",
"for",
"word",
"in",
"split_words",
"(",
"line",
")",
":",
"yield",
"(",
"word",
")"
] | Get all the keywords related of this page
Returns:
An array of strings | [
"Get",
"all",
"the",
"keywords",
"related",
"of",
"this",
"page"
] | python | train |
RI-imaging/qpformat | qpformat/file_formats/series_hdf5_hyperspy.py | https://github.com/RI-imaging/qpformat/blob/364e29d7d9e8b9f1d7a4a25c753d1baf9d73d5eb/qpformat/file_formats/series_hdf5_hyperspy.py#L101-L113 | def verify(path):
"""Verify that `path` has the HyperSpy file format"""
valid = False
try:
h5 = h5py.File(path, mode="r")
except (OSError, IsADirectoryError):
pass
else:
if ("file_format" in h5.attrs and
h5.attrs["file_format"].lower() == "hyperspy" and
"Experiments" in h5):
valid = True
return valid | [
"def",
"verify",
"(",
"path",
")",
":",
"valid",
"=",
"False",
"try",
":",
"h5",
"=",
"h5py",
".",
"File",
"(",
"path",
",",
"mode",
"=",
"\"r\"",
")",
"except",
"(",
"OSError",
",",
"IsADirectoryError",
")",
":",
"pass",
"else",
":",
"if",
"(",
... | Verify that `path` has the HyperSpy file format | [
"Verify",
"that",
"path",
"has",
"the",
"HyperSpy",
"file",
"format"
] | python | train |
fhcrc/seqmagick | seqmagick/transform.py | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L242-L254 | def cut_sequences_relative(records, slices, record_id):
"""
Cuts records to slices, indexed by non-gap positions in record_id
"""
with _record_buffer(records) as r:
try:
record = next(i for i in r() if i.id == record_id)
except StopIteration:
raise ValueError("Record with id {0} not found.".format(record_id))
new_slices = _update_slices(record, slices)
for record in multi_cut_sequences(r(), new_slices):
yield record | [
"def",
"cut_sequences_relative",
"(",
"records",
",",
"slices",
",",
"record_id",
")",
":",
"with",
"_record_buffer",
"(",
"records",
")",
"as",
"r",
":",
"try",
":",
"record",
"=",
"next",
"(",
"i",
"for",
"i",
"in",
"r",
"(",
")",
"if",
"i",
".",
... | Cuts records to slices, indexed by non-gap positions in record_id | [
"Cuts",
"records",
"to",
"slices",
"indexed",
"by",
"non",
"-",
"gap",
"positions",
"in",
"record_id"
] | python | train |
BlackEarth/bf | bf/pdf.py | https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/pdf.py#L11-L60 | def gswrite(self, fn=None, device='jpeg', res=600, alpha=4, quality=90, gs=None):
"use ghostscript to create output file(s) from the PDF"
gs = (gs or self.gs or os.environ.get('gs') or 'gs')
# count the number of pages
if fn is None:
fn = os.path.splitext(self.fn)[0] + DEVICE_EXTENSIONS[device]
fn = File(fn=fn).fn # normalize path
if not os.path.exists(os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn))
log.debug("PDF.gswrite():\n\tself.fn = %s\n\tout fn = %s" % (self.fn, fn))
if os.path.splitext(self.fn)[-1].lower()=='.pdf':
cmd = [gs, '-q', '-dNODISPLAY', '-c',
"(%s) (r) file runpdfbegin pdfpagecount = quit" % self.fn]
log.debug(cmd)
out = subprocess.check_output(cmd).decode('utf-8').strip()
if out=='': out = '1'
pages = int(out)
log.debug("%d page(s)" % pages)
else:
pages = 1
if pages > 1:
# add a counter to the filename, which tells gs to create a file for every page in the input
fb, ext = os.path.splitext(fn)
n = len(re.split('.', str(pages))) - 1
counter = "-%%0%dd" % n
fn = fb + counter + ext
# remove any existing output filenames that match the pattern
for existingfn in glob(fb+'*'+ext):
log.debug("REMOVING %s" % existingfn)
os.remove(existingfn)
callargs = [gs, '-q', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dUseCropBox',
'-sDEVICE=%s' % device, '-r%d' % res]
if device=='jpeg':
callargs += ['-dJPEGQ=%d' % quality]
if 'png' in device or 'jpeg' in device or 'tiff' in device:
callargs += [
'-dTextAlphaBits=%d' % alpha,
'-dGraphicsAlphaBits=%d' % alpha]
callargs += ['-sOutputFile=%s' % fn, self.fn]
try:
log.debug(callargs)
subprocess.check_output(callargs)
except subprocess.CalledProcessError as e:
log.error(callargs)
log.error(str(e.output, 'utf-8'))
fns = sorted(glob(re.sub('%\d+d','*', fn)))
log.debug('\n\t'+'\n\t'.join(fns))
return fns | [
"def",
"gswrite",
"(",
"self",
",",
"fn",
"=",
"None",
",",
"device",
"=",
"'jpeg'",
",",
"res",
"=",
"600",
",",
"alpha",
"=",
"4",
",",
"quality",
"=",
"90",
",",
"gs",
"=",
"None",
")",
":",
"gs",
"=",
"(",
"gs",
"or",
"self",
".",
"gs",
... | use ghostscript to create output file(s) from the PDF | [
"use",
"ghostscript",
"to",
"create",
"output",
"file",
"(",
"s",
")",
"from",
"the",
"PDF"
] | python | train |
saltstack/salt | salt/states/boto_vpc.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_vpc.py#L1377-L1454 | def nat_gateway_absent(name=None, subnet_name=None, subnet_id=None,
region=None, key=None, keyid=None, profile=None,
wait_for_delete_retries=0):
'''
Ensure the nat gateway in the named subnet is absent.
This function requires boto3.
.. versionadded:: 2016.11.0
name
Name of the state.
subnet_name
Name of the subnet within which the nat gateway should exist
subnet_id
Id of the subnet within which the nat gateway should exist.
Either subnet_name or subnet_id must be provided.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
wait_for_delete_retries
NAT gateway may take some time to be go into deleted or failed state.
During the deletion process, subsequent release of elastic IPs may fail;
this state will automatically retry this number of times to ensure
the NAT gateway is in deleted or failed state before proceeding.
Default is set to 0 for backward compatibility.
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
r = __salt__['boto_vpc.describe_nat_gateways'](subnet_name=subnet_name,
subnet_id=subnet_id,
region=region, key=key, keyid=keyid,
profile=profile)
if not r:
ret['comment'] = 'Nat gateway does not exist.'
return ret
if __opts__['test']:
ret['comment'] = 'Nat gateway is set to be removed.'
ret['result'] = None
return ret
for gw in r:
rtbl_id = gw.get('NatGatewayId')
r = __salt__['boto_vpc.delete_nat_gateway'](nat_gateway_id=rtbl_id,
release_eips=True,
region=region,
key=key, keyid=keyid,
profile=profile,
wait_for_delete=True,
wait_for_delete_retries=wait_for_delete_retries)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to delete nat gateway: {0}'.format(r['error']['message'])
return ret
ret['comment'] = ', '.join((ret['comment'], 'Nat gateway {0} deleted.'.format(rtbl_id)))
ret['changes']['old'] = {'nat_gateway': rtbl_id}
ret['changes']['new'] = {'nat_gateway': None}
return ret | [
"def",
"nat_gateway_absent",
"(",
"name",
"=",
"None",
",",
"subnet_name",
"=",
"None",
",",
"subnet_id",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"wait_for_delete_retr... | Ensure the nat gateway in the named subnet is absent.
This function requires boto3.
.. versionadded:: 2016.11.0
name
Name of the state.
subnet_name
Name of the subnet within which the nat gateway should exist
subnet_id
Id of the subnet within which the nat gateway should exist.
Either subnet_name or subnet_id must be provided.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
wait_for_delete_retries
NAT gateway may take some time to be go into deleted or failed state.
During the deletion process, subsequent release of elastic IPs may fail;
this state will automatically retry this number of times to ensure
the NAT gateway is in deleted or failed state before proceeding.
Default is set to 0 for backward compatibility. | [
"Ensure",
"the",
"nat",
"gateway",
"in",
"the",
"named",
"subnet",
"is",
"absent",
"."
] | python | train |
openstax/cnx-easybake | cnxeasybake/oven.py | https://github.com/openstax/cnx-easybake/blob/f8edf018fb7499f6f18af0145c326b93a737a782/cnxeasybake/oven.py#L589-L595 | def push_pending_elem(self, element, pseudo):
"""Create and place pending target element onto stack."""
self.push_target_elem(element, pseudo)
elem = etree.Element('div')
actions = self.state[self.state['current_step']]['actions']
actions.append(('move', elem))
actions.append(('target', Target(elem))) | [
"def",
"push_pending_elem",
"(",
"self",
",",
"element",
",",
"pseudo",
")",
":",
"self",
".",
"push_target_elem",
"(",
"element",
",",
"pseudo",
")",
"elem",
"=",
"etree",
".",
"Element",
"(",
"'div'",
")",
"actions",
"=",
"self",
".",
"state",
"[",
"... | Create and place pending target element onto stack. | [
"Create",
"and",
"place",
"pending",
"target",
"element",
"onto",
"stack",
"."
] | python | train |
PMBio/limix-backup | limix/stats/geno_summary.py | https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/stats/geno_summary.py#L38-L61 | def calc_LD(M,pos,i_start=[0],max_dist=1000000):
"""calculate linkage disequilibrium correlations:
M: genotype matrix
pos: position vector
i_start: index to start from for LD calculation
dist: distance
"""
RV = []
DIST = []
for start in i_start:
pos0 = pos[start]
v0 = M[:,start]
Iselect = sp.nonzero(sp.absolute(pos-pos0)<=max_dist)[0]
rv = sp.zeros(len(Iselect))
for i in range(len(Iselect)):
rv[i] = (sp.corrcoef(v0,M[:,Iselect[i]])[0,1])**2
#sort by distance
dist = sp.absolute(pos[Iselect]-pos0)
RV.extend(rv)
DIST.extend(dist)
RV = sp.array(RV)
DIST = sp.array(DIST)
II = DIST.argsort()
return [DIST[II],RV[II]] | [
"def",
"calc_LD",
"(",
"M",
",",
"pos",
",",
"i_start",
"=",
"[",
"0",
"]",
",",
"max_dist",
"=",
"1000000",
")",
":",
"RV",
"=",
"[",
"]",
"DIST",
"=",
"[",
"]",
"for",
"start",
"in",
"i_start",
":",
"pos0",
"=",
"pos",
"[",
"start",
"]",
"v... | calculate linkage disequilibrium correlations:
M: genotype matrix
pos: position vector
i_start: index to start from for LD calculation
dist: distance | [
"calculate",
"linkage",
"disequilibrium",
"correlations",
":",
"M",
":",
"genotype",
"matrix",
"pos",
":",
"position",
"vector",
"i_start",
":",
"index",
"to",
"start",
"from",
"for",
"LD",
"calculation",
"dist",
":",
"distance"
] | python | train |
materialsproject/pymatgen | pymatgen/command_line/bader_caller.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/command_line/bader_caller.py#L314-L366 | def from_path(cls, path, suffix=""):
"""
Convenient constructor that takes in the path name of VASP run
to perform Bader analysis.
Args:
path (str): Name of directory where VASP output files are
stored.
suffix (str): specific suffix to look for (e.g. '.relax1'
for 'CHGCAR.relax1.gz').
"""
def _get_filepath(filename):
name_pattern = filename + suffix + '*' if filename != 'POTCAR' \
else filename + '*'
paths = glob.glob(os.path.join(path, name_pattern))
fpath = None
if len(paths) >= 1:
# using reverse=True because, if multiple files are present,
# they likely have suffixes 'static', 'relax', 'relax2', etc.
# and this would give 'static' over 'relax2' over 'relax'
# however, better to use 'suffix' kwarg to avoid this!
paths.sort(reverse=True)
warning_msg = "Multiple files detected, using %s" \
% os.path.basename(paths[0]) if len(paths) > 1 \
else None
fpath = paths[0]
else:
warning_msg = "Could not find %s" % filename
if filename in ['AECCAR0', 'AECCAR2']:
warning_msg += ", cannot calculate charge transfer."
elif filename == "POTCAR":
warning_msg += ", interpret Bader results with caution."
if warning_msg:
warnings.warn(warning_msg)
return fpath
chgcar_filename = _get_filepath("CHGCAR")
if chgcar_filename is None:
raise IOError("Could not find CHGCAR!")
potcar_filename = _get_filepath("POTCAR")
aeccar0 = _get_filepath("AECCAR0")
aeccar2 = _get_filepath("AECCAR2")
if (aeccar0 and aeccar2):
# `chgsum.pl AECCAR0 AECCAR2` equivalent to obtain chgref_file
chgref = Chgcar.from_file(aeccar0) + Chgcar.from_file(aeccar2)
chgref_filename = "CHGREF"
chgref.write_file(chgref_filename)
else:
chgref_filename = None
return cls(chgcar_filename, potcar_filename=potcar_filename,
chgref_filename=chgref_filename) | [
"def",
"from_path",
"(",
"cls",
",",
"path",
",",
"suffix",
"=",
"\"\"",
")",
":",
"def",
"_get_filepath",
"(",
"filename",
")",
":",
"name_pattern",
"=",
"filename",
"+",
"suffix",
"+",
"'*'",
"if",
"filename",
"!=",
"'POTCAR'",
"else",
"filename",
"+",... | Convenient constructor that takes in the path name of VASP run
to perform Bader analysis.
Args:
path (str): Name of directory where VASP output files are
stored.
suffix (str): specific suffix to look for (e.g. '.relax1'
for 'CHGCAR.relax1.gz'). | [
"Convenient",
"constructor",
"that",
"takes",
"in",
"the",
"path",
"name",
"of",
"VASP",
"run",
"to",
"perform",
"Bader",
"analysis",
"."
] | python | train |
halcy/Mastodon.py | mastodon/Mastodon.py | https://github.com/halcy/Mastodon.py/blob/35c43562dd3d34d6ebf7a0f757c09e8fcccc957c/mastodon/Mastodon.py#L2911-L2918 | def __decode_webpush_b64(self, data):
"""
Re-pads and decodes urlsafe base64.
"""
missing_padding = len(data) % 4
if missing_padding != 0:
data += '=' * (4 - missing_padding)
return base64.urlsafe_b64decode(data) | [
"def",
"__decode_webpush_b64",
"(",
"self",
",",
"data",
")",
":",
"missing_padding",
"=",
"len",
"(",
"data",
")",
"%",
"4",
"if",
"missing_padding",
"!=",
"0",
":",
"data",
"+=",
"'='",
"*",
"(",
"4",
"-",
"missing_padding",
")",
"return",
"base64",
... | Re-pads and decodes urlsafe base64. | [
"Re",
"-",
"pads",
"and",
"decodes",
"urlsafe",
"base64",
"."
] | python | train |
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L289-L292 | def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth) | [
"def",
"get_privileges",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"url",
"(",
"'GET_USER_PRIVILEGES'",
")",
"return",
"self",
".",
"dispatch",
"(",
"'GET'",
",",
"url",
",",
"auth",
"=",
"self",
".",
"auth",
")"
] | Get privledges for this user. | [
"Get",
"privledges",
"for",
"this",
"user",
"."
] | python | train |
celery/django-celery | djcelery/views.py | https://github.com/celery/django-celery/blob/5d1ecb09c6304d22cc447c7c08fba0bd1febc2ef/djcelery/views.py#L46-L57 | def apply(request, task_name):
"""View applying a task.
**Note:** Please use this with caution. Preferably you shouldn't make this
publicly accessible without ensuring your code is safe!
"""
try:
task = tasks[task_name]
except KeyError:
raise Http404('apply: no such task')
return task_view(task)(request) | [
"def",
"apply",
"(",
"request",
",",
"task_name",
")",
":",
"try",
":",
"task",
"=",
"tasks",
"[",
"task_name",
"]",
"except",
"KeyError",
":",
"raise",
"Http404",
"(",
"'apply: no such task'",
")",
"return",
"task_view",
"(",
"task",
")",
"(",
"request",
... | View applying a task.
**Note:** Please use this with caution. Preferably you shouldn't make this
publicly accessible without ensuring your code is safe! | [
"View",
"applying",
"a",
"task",
"."
] | python | train |
openstack/proliantutils | proliantutils/redfish/redfish.py | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/redfish.py#L649-L727 | def get_server_capabilities(self):
"""Returns the server capabilities
raises: IloError on an error from iLO.
"""
capabilities = {}
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
sushy_manager = self._get_sushy_manager(PROLIANT_MANAGER_ID)
try:
count = len(sushy_system.pci_devices.gpu_devices)
boot_mode = rf_utils.get_supported_boot_mode(
sushy_system.supported_boot_mode)
capabilities.update(
{'pci_gpu_devices': count,
'ilo_firmware_version': sushy_manager.firmware_version,
'rom_firmware_version': sushy_system.rom_version,
'server_model': sushy_system.model,
'nic_capacity': sushy_system.pci_devices.max_nic_capacity,
'boot_mode_bios': boot_mode.boot_mode_bios,
'boot_mode_uefi': boot_mode.boot_mode_uefi})
tpm_state = sushy_system.bios_settings.tpm_state
all_key_to_value_expression_tuples = [
('sriov_enabled',
sushy_system.bios_settings.sriov == sys_cons.SRIOV_ENABLED),
('cpu_vt',
sushy_system.bios_settings.cpu_vt == (
sys_cons.CPUVT_ENABLED)),
('trusted_boot',
(tpm_state == sys_cons.TPM_PRESENT_ENABLED
or tpm_state == sys_cons.TPM_PRESENT_DISABLED)),
('secure_boot', self._has_secure_boot()),
('iscsi_boot',
(sushy_system.bios_settings.iscsi_resource.
is_iscsi_boot_supported())),
('hardware_supports_raid',
len(sushy_system.smart_storage.array_controllers.
members_identities) > 0),
('has_ssd',
common_storage.has_ssd(sushy_system)),
('has_rotational',
common_storage.has_rotational(sushy_system)),
('has_nvme_ssd',
common_storage.has_nvme_ssd(sushy_system))
]
all_key_to_value_expression_tuples += (
[('logical_raid_level_' + x, True)
for x in sushy_system.smart_storage.logical_raid_levels])
all_key_to_value_expression_tuples += (
[('drive_rotational_' + str(x) + '_rpm', True)
for x in
common_storage.get_drive_rotational_speed_rpm(sushy_system)])
capabilities.update(
{key: 'true'
for (key, value) in all_key_to_value_expression_tuples
if value})
memory_data = sushy_system.memory.details()
if memory_data.has_nvdimm_n:
capabilities.update(
{'persistent_memory': (
json.dumps(memory_data.has_persistent_memory)),
'nvdimm_n': (
json.dumps(memory_data.has_nvdimm_n)),
'logical_nvdimm_n': (
json.dumps(memory_data.has_logical_nvdimm_n))})
except sushy.exceptions.SushyError as e:
msg = (self._("The Redfish controller is unable to get "
"resource or its members. Error "
"%(error)s)") % {'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
return capabilities | [
"def",
"get_server_capabilities",
"(",
"self",
")",
":",
"capabilities",
"=",
"{",
"}",
"sushy_system",
"=",
"self",
".",
"_get_sushy_system",
"(",
"PROLIANT_SYSTEM_ID",
")",
"sushy_manager",
"=",
"self",
".",
"_get_sushy_manager",
"(",
"PROLIANT_MANAGER_ID",
")",
... | Returns the server capabilities
raises: IloError on an error from iLO. | [
"Returns",
"the",
"server",
"capabilities"
] | python | train |
saltstack/salt | salt/engines/slack.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L288-L306 | def _groups_from_pillar(self, pillar_name):
'''
pillar_prefix is the pillar.get syntax for the pillar to be queried.
Group name is gotten via the equivalent of using
``salt['pillar.get']('{}:{}'.format(pillar_prefix, group_name))``
in a jinja template.
returns a dictionary (unless the pillar is mis-formatted)
XXX: instead of using Caller, make the minion to use configurable so there could be some
restrictions placed on what pillars can be used.
'''
if pillar_name and __opts__['__role'] == 'minion':
pillar_groups = __salt__['pillar.get'](pillar_name, {})
log.debug('Got pillar groups %s from pillar %s', pillar_groups, pillar_name)
log.debug('pillar groups is %s', pillar_groups)
log.debug('pillar groups type is %s', type(pillar_groups))
else:
pillar_groups = {}
return pillar_groups | [
"def",
"_groups_from_pillar",
"(",
"self",
",",
"pillar_name",
")",
":",
"if",
"pillar_name",
"and",
"__opts__",
"[",
"'__role'",
"]",
"==",
"'minion'",
":",
"pillar_groups",
"=",
"__salt__",
"[",
"'pillar.get'",
"]",
"(",
"pillar_name",
",",
"{",
"}",
")",
... | pillar_prefix is the pillar.get syntax for the pillar to be queried.
Group name is gotten via the equivalent of using
``salt['pillar.get']('{}:{}'.format(pillar_prefix, group_name))``
in a jinja template.
returns a dictionary (unless the pillar is mis-formatted)
XXX: instead of using Caller, make the minion to use configurable so there could be some
restrictions placed on what pillars can be used. | [
"pillar_prefix",
"is",
"the",
"pillar",
".",
"get",
"syntax",
"for",
"the",
"pillar",
"to",
"be",
"queried",
".",
"Group",
"name",
"is",
"gotten",
"via",
"the",
"equivalent",
"of",
"using",
"salt",
"[",
"pillar",
".",
"get",
"]",
"(",
"{}",
":",
"{}",
... | python | train |
googleapis/dialogflow-python-client-v2 | dialogflow_v2/gapic/contexts_client.py | https://github.com/googleapis/dialogflow-python-client-v2/blob/8c9c8709222efe427b76c9c8fcc04a0c4a0760b5/dialogflow_v2/gapic/contexts_client.py#L407-L468 | def update_context(self,
context,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates the specified context.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.ContextsClient()
>>>
>>> # TODO: Initialize ``context``:
>>> context = {}
>>>
>>> response = client.update_context(context)
Args:
context (Union[dict, ~google.cloud.dialogflow_v2.types.Context]): Required. The context to update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Context`
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Context` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_context' not in self._inner_api_calls:
self._inner_api_calls[
'update_context'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_context,
default_retry=self._method_configs['UpdateContext'].retry,
default_timeout=self._method_configs['UpdateContext']
.timeout,
client_info=self._client_info,
)
request = context_pb2.UpdateContextRequest(
context=context,
update_mask=update_mask,
)
return self._inner_api_calls['update_context'](
request, retry=retry, timeout=timeout, metadata=metadata) | [
"def",
"update_context",
"(",
"self",
",",
"context",
",",
"update_mask",
"=",
"None",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"me... | Updates the specified context.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.ContextsClient()
>>>
>>> # TODO: Initialize ``context``:
>>> context = {}
>>>
>>> response = client.update_context(context)
Args:
context (Union[dict, ~google.cloud.dialogflow_v2.types.Context]): Required. The context to update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Context`
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Context` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | [
"Updates",
"the",
"specified",
"context",
"."
] | python | train |
openego/ding0 | ding0/core/__init__.py | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/__init__.py#L80-L84 | def add_mv_grid_district(self, mv_grid_district):
"""Adds a MV grid_district to _mv_grid_districts if not already existing"""
# TODO: use setter method here (make attribute '_mv_grid_districts' private)
if mv_grid_district not in self.mv_grid_districts():
self._mv_grid_districts.append(mv_grid_district) | [
"def",
"add_mv_grid_district",
"(",
"self",
",",
"mv_grid_district",
")",
":",
"# TODO: use setter method here (make attribute '_mv_grid_districts' private)",
"if",
"mv_grid_district",
"not",
"in",
"self",
".",
"mv_grid_districts",
"(",
")",
":",
"self",
".",
"_mv_grid_dist... | Adds a MV grid_district to _mv_grid_districts if not already existing | [
"Adds",
"a",
"MV",
"grid_district",
"to",
"_mv_grid_districts",
"if",
"not",
"already",
"existing"
] | python | train |
caffeinehit/django-follow | follow/models.py | https://github.com/caffeinehit/django-follow/blob/765a4795e58f57fbf96efdb7838d0c7222db2e56/follow/models.py#L21-L30 | def create(self, user, obj, **kwargs):
"""
Create a new follow link between a user and an object
of a registered model type.
"""
follow = Follow(user=user)
follow.target = obj
follow.save()
return follow | [
"def",
"create",
"(",
"self",
",",
"user",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"follow",
"=",
"Follow",
"(",
"user",
"=",
"user",
")",
"follow",
".",
"target",
"=",
"obj",
"follow",
".",
"save",
"(",
")",
"return",
"follow"
] | Create a new follow link between a user and an object
of a registered model type. | [
"Create",
"a",
"new",
"follow",
"link",
"between",
"a",
"user",
"and",
"an",
"object",
"of",
"a",
"registered",
"model",
"type",
"."
] | python | train |
phaethon/kamene | kamene/utils6.py | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/utils6.py#L646-L655 | def in6_getnsmac(a): # return multicast Ethernet address associated with multicast v6 destination
"""
Return the multicast mac address associated with provided
IPv6 address. Passed address must be in network format.
"""
a = struct.unpack('16B', a)[-4:]
mac = '33:33:'
mac += (':'.join(map(lambda x: '%.2x' %x, a)))
return mac | [
"def",
"in6_getnsmac",
"(",
"a",
")",
":",
"# return multicast Ethernet address associated with multicast v6 destination",
"a",
"=",
"struct",
".",
"unpack",
"(",
"'16B'",
",",
"a",
")",
"[",
"-",
"4",
":",
"]",
"mac",
"=",
"'33:33:'",
"mac",
"+=",
"(",
"':'",... | Return the multicast mac address associated with provided
IPv6 address. Passed address must be in network format. | [
"Return",
"the",
"multicast",
"mac",
"address",
"associated",
"with",
"provided",
"IPv6",
"address",
".",
"Passed",
"address",
"must",
"be",
"in",
"network",
"format",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/phase_diagram.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/phase_diagram.py#L1638-L1670 | def _get_3d_plot(self, label_stable=True):
"""
Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(20)
(lines, labels, unstable) = self.pd_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = entry.name
if label_stable:
if len(entry.composition.elements) == 1:
ax.text(coords[0], coords[1], coords[2], label)
else:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(count, latexify(label)))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
ax.axis("off")
return plt | [
"def",
"_get_3d_plot",
"(",
"self",
",",
"label_stable",
"=",
"True",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"import",
"mpl_toolkits",
".",
"mplot3d",
".",
"axes3d",
"as",
"p3",
"from",
"matplotlib",
".",
"font_manager",
"import",
"Fon... | Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way. | [
"Shows",
"the",
"plot",
"using",
"pylab",
".",
"Usually",
"I",
"won",
"t",
"do",
"imports",
"in",
"methods",
"but",
"since",
"plotting",
"is",
"a",
"fairly",
"expensive",
"library",
"to",
"load",
"and",
"not",
"all",
"machines",
"have",
"matplotlib",
"inst... | python | train |
globality-corp/microcosm-flask | microcosm_flask/conventions/registry.py | https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/conventions/registry.py#L56-L64 | def request(schema):
"""
Decorate a function with a request schema.
"""
def wrapper(func):
setattr(func, REQUEST, schema)
return func
return wrapper | [
"def",
"request",
"(",
"schema",
")",
":",
"def",
"wrapper",
"(",
"func",
")",
":",
"setattr",
"(",
"func",
",",
"REQUEST",
",",
"schema",
")",
"return",
"func",
"return",
"wrapper"
] | Decorate a function with a request schema. | [
"Decorate",
"a",
"function",
"with",
"a",
"request",
"schema",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L3943-L3955 | def get_stp_mst_detail_output_cist_cist_bridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
cist_bridge_id = ET.SubElement(cist, "cist-bridge-id")
cist_bridge_id.text = kwargs.pop('cist_bridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_stp_mst_detail_output_cist_cist_bridge_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_stp_mst_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_stp_mst_detail\"",
")",
"config",
"=... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/core/crashhandler.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/crashhandler.py#L180-L199 | def make_report(self,traceback):
"""Return a string containing a crash report."""
sec_sep = self.section_sep
report = ['*'*75+'\n\n'+'IPython post-mortem report\n\n']
rpt_add = report.append
rpt_add(sys_info())
try:
config = pformat(self.app.config)
rpt_add(sec_sep)
rpt_add('Application name: %s\n\n' % self.app_name)
rpt_add('Current user configuration structure:\n\n')
rpt_add(config)
except:
pass
rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
return ''.join(report) | [
"def",
"make_report",
"(",
"self",
",",
"traceback",
")",
":",
"sec_sep",
"=",
"self",
".",
"section_sep",
"report",
"=",
"[",
"'*'",
"*",
"75",
"+",
"'\\n\\n'",
"+",
"'IPython post-mortem report\\n\\n'",
"]",
"rpt_add",
"=",
"report",
".",
"append",
"rpt_ad... | Return a string containing a crash report. | [
"Return",
"a",
"string",
"containing",
"a",
"crash",
"report",
"."
] | python | test |
CI-WATER/gsshapy | gsshapy/modeling/model.py | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L196-L211 | def set_mask_from_shapefile(self, shapefile_path, cell_size):
"""
Adds a mask from a shapefile
"""
# make sure paths are absolute as the working directory changes
shapefile_path = os.path.abspath(shapefile_path)
# ADD MASK
with tmp_chdir(self.project_directory):
mask_name = '{0}.msk'.format(self.project_manager.name)
msk_file = WatershedMaskFile(project_file=self.project_manager,
session=self.db_session)
msk_file.generateFromWatershedShapefile(shapefile_path,
cell_size=cell_size,
out_raster_path=mask_name,
load_raster_to_db=self.load_rasters_to_db) | [
"def",
"set_mask_from_shapefile",
"(",
"self",
",",
"shapefile_path",
",",
"cell_size",
")",
":",
"# make sure paths are absolute as the working directory changes",
"shapefile_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"shapefile_path",
")",
"# ADD MASK",
"with",... | Adds a mask from a shapefile | [
"Adds",
"a",
"mask",
"from",
"a",
"shapefile"
] | python | train |
iron-io/iron_cache_python | iron_cache.py | https://github.com/iron-io/iron_cache_python/blob/f68f5a5e216e3189397ffd7d243de0d53bf7c764/iron_cache.py#L88-L105 | def get(self, key, cache=None):
"""Query the server for an item, parse the JSON, and return the result.
Keyword arguments:
key -- the key of the item that you'd like to retrieve. Required.
cache -- the name of the cache that the item resides in. Defaults to
None, which uses self.name. If no name is set, raises a
ValueError.
"""
if cache is None:
cache = self.name
if cache is None:
raise ValueError("Cache name must be set")
cache = quote_plus(cache)
key = quote_plus(key)
url = "caches/%s/items/%s" % (cache, key)
result = self.client.get(url)
return Item(values=result["body"]) | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"cache",
"=",
"None",
")",
":",
"if",
"cache",
"is",
"None",
":",
"cache",
"=",
"self",
".",
"name",
"if",
"cache",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cache name must be set\"",
")",
"cache",
... | Query the server for an item, parse the JSON, and return the result.
Keyword arguments:
key -- the key of the item that you'd like to retrieve. Required.
cache -- the name of the cache that the item resides in. Defaults to
None, which uses self.name. If no name is set, raises a
ValueError. | [
"Query",
"the",
"server",
"for",
"an",
"item",
"parse",
"the",
"JSON",
"and",
"return",
"the",
"result",
"."
] | python | train |
fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L455-L485 | def filter_record(self, record):
"""
Filter a single record
"""
quality_scores = record.letter_annotations['phred_quality']
# Simple case - window covers whole sequence
if len(record) <= self.window_size:
mean_score = mean(quality_scores)
if mean_score >= self.min_mean_score:
return record
else:
raise FailedFilter(mean_score)
# Find the right clipping point. Start clipping at the beginning of the
# sequence, then extend the window to include regions with acceptable
# mean quality scores.
clip_right = 0
for i, a in enumerate(
moving_average(quality_scores, self.window_size)):
if a >= self.min_mean_score:
clip_right = i + self.window_size
else:
break
if clip_right:
return record[:clip_right]
else:
# First window failed - record fails
raise FailedFilter() | [
"def",
"filter_record",
"(",
"self",
",",
"record",
")",
":",
"quality_scores",
"=",
"record",
".",
"letter_annotations",
"[",
"'phred_quality'",
"]",
"# Simple case - window covers whole sequence",
"if",
"len",
"(",
"record",
")",
"<=",
"self",
".",
"window_size",
... | Filter a single record | [
"Filter",
"a",
"single",
"record"
] | python | train |
earwig/mwparserfromhell | mwparserfromhell/parser/tokenizer.py | https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/parser/tokenizer.py#L1450-L1465 | def tokenize(self, text, context=0, skip_style_tags=False):
"""Build a list of tokens from a string of wikicode and return it."""
split = self.regex.split(text)
self._text = [segment for segment in split if segment]
self._head = self._global = self._depth = 0
self._bad_routes = set()
self._skip_style_tags = skip_style_tags
try:
tokens = self._parse(context)
except BadRoute: # pragma: no cover (untestable/exceptional case)
raise ParserError("Python tokenizer exited with BadRoute")
if self._stacks: # pragma: no cover (untestable/exceptional case)
err = "Python tokenizer exited with non-empty token stack"
raise ParserError(err)
return tokens | [
"def",
"tokenize",
"(",
"self",
",",
"text",
",",
"context",
"=",
"0",
",",
"skip_style_tags",
"=",
"False",
")",
":",
"split",
"=",
"self",
".",
"regex",
".",
"split",
"(",
"text",
")",
"self",
".",
"_text",
"=",
"[",
"segment",
"for",
"segment",
... | Build a list of tokens from a string of wikicode and return it. | [
"Build",
"a",
"list",
"of",
"tokens",
"from",
"a",
"string",
"of",
"wikicode",
"and",
"return",
"it",
"."
] | python | train |
lsbardel/python-stdnet | stdnet/apps/columnts/npts.py | https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/columnts/npts.py#L19-L23 | def front(self, *fields):
'''Return the front pair of the structure'''
ts = self.irange(0, 0, fields=fields)
if ts:
return ts.start(), ts[0] | [
"def",
"front",
"(",
"self",
",",
"*",
"fields",
")",
":",
"ts",
"=",
"self",
".",
"irange",
"(",
"0",
",",
"0",
",",
"fields",
"=",
"fields",
")",
"if",
"ts",
":",
"return",
"ts",
".",
"start",
"(",
")",
",",
"ts",
"[",
"0",
"]"
] | Return the front pair of the structure | [
"Return",
"the",
"front",
"pair",
"of",
"the",
"structure"
] | python | train |
recurly/recurly-client-python | recurly/resource.py | https://github.com/recurly/recurly-client-python/blob/682217c4e85ec5c8d4e41519ee0620d2dc4d84d7/recurly/resource.py#L673-L685 | def post(self, url, body=None):
"""Sends this `Resource` instance to the service with a
``POST`` request to the given URL. Takes an optional body"""
response = self.http_request(url, 'POST', body or self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status not in (200, 201, 204):
self.raise_http_error(response)
self._url = response.getheader('Location')
if response.status in (200, 201):
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml)) | [
"def",
"post",
"(",
"self",
",",
"url",
",",
"body",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"http_request",
"(",
"url",
",",
"'POST'",
",",
"body",
"or",
"self",
",",
"{",
"'Content-Type'",
":",
"'application/xml; charset=utf-8'",
"}",
")",... | Sends this `Resource` instance to the service with a
``POST`` request to the given URL. Takes an optional body | [
"Sends",
"this",
"Resource",
"instance",
"to",
"the",
"service",
"with",
"a",
"POST",
"request",
"to",
"the",
"given",
"URL",
".",
"Takes",
"an",
"optional",
"body"
] | python | train |
thriftrw/thriftrw-python | thriftrw/idl/parser.py | https://github.com/thriftrw/thriftrw-python/blob/4f2f71acd7a0ac716c9ea5cdcea2162aa561304a/thriftrw/idl/parser.py#L224-L245 | def p_service(self, p):
'''service : SERVICE IDENTIFIER '{' function_seq '}' annotations
| SERVICE IDENTIFIER EXTENDS IDENTIFIER \
'{' function_seq '}' annotations
'''
if len(p) == 7:
p[0] = ast.Service(
name=p[2],
functions=p[4],
parent=None,
annotations=p[6],
lineno=p.lineno(2),
)
else:
p[0] = ast.Service(
name=p[2],
functions=p[6],
parent=ast.ServiceReference(p[4], p.lineno(4)),
annotations=p[8],
lineno=p.lineno(2),
) | [
"def",
"p_service",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"7",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Service",
"(",
"name",
"=",
"p",
"[",
"2",
"]",
",",
"functions",
"=",
"p",
"[",
"4",
"]",
",",
"parent... | service : SERVICE IDENTIFIER '{' function_seq '}' annotations
| SERVICE IDENTIFIER EXTENDS IDENTIFIER \
'{' function_seq '}' annotations | [
"service",
":",
"SERVICE",
"IDENTIFIER",
"{",
"function_seq",
"}",
"annotations",
"|",
"SERVICE",
"IDENTIFIER",
"EXTENDS",
"IDENTIFIER",
"\\",
"{",
"function_seq",
"}",
"annotations"
] | python | train |
vanheeringen-lab/gimmemotifs | gimmemotifs/moap.py | https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/moap.py#L817-L955 | def moap(inputfile, method="hypergeom", scoring=None, outfile=None, motiffile=None, pwmfile=None, genome=None, fpr=0.01, ncpus=None,
subsample=None):
"""Run a single motif activity prediction algorithm.
Parameters
----------
inputfile : str
:1File with regions (chr:start-end) in first column and either cluster
name in second column or a table with values.
method : str, optional
Motif activity method to use. Any of 'hypergeom', 'lasso',
'lightningclassification', 'lightningregressor', 'bayesianridge',
'rf', 'xgboost'. Default is 'hypergeom'.
scoring: str, optional
Either 'score' or 'count'
outfile : str, optional
Name of outputfile to save the fitted activity values.
motiffile : str, optional
Table with motif scan results. First column should be exactly the same
regions as in the inputfile.
pwmfile : str, optional
File with motifs in pwm format. Required when motiffile is not
supplied.
genome : str, optional
Genome name, as indexed by gimme. Required when motiffile is not
supplied
fpr : float, optional
FPR for motif scanning
ncpus : int, optional
Number of threads to use. Default is the number specified in the config.
Returns
-------
pandas DataFrame with motif activity
"""
if scoring and scoring not in ['score', 'count']:
raise ValueError("valid values are 'score' and 'count'")
config = MotifConfig()
if inputfile.endswith("feather"):
df = pd.read_feather(inputfile)
df = df.set_index(df.columns[0])
else:
# read data
df = pd.read_table(inputfile, index_col=0, comment="#")
clf = Moap.create(method, ncpus=ncpus)
if clf.ptype == "classification":
if df.shape[1] != 1:
raise ValueError("1 column expected for {}".format(method))
else:
if np.dtype('object') in set(df.dtypes):
raise ValueError(
"columns should all be numeric for {}".format(method))
if motiffile is None:
if genome is None:
raise ValueError("need a genome")
pwmfile = pwmfile_location(pwmfile)
try:
motifs = read_motifs(pwmfile)
except:
sys.stderr.write("can't read motifs from {}".format(pwmfile))
raise
# initialize scanner
s = Scanner(ncpus=ncpus)
sys.stderr.write(pwmfile + "\n")
s.set_motifs(pwmfile)
s.set_genome(genome)
s.set_background(genome=genome)
# scan for motifs
sys.stderr.write("scanning for motifs\n")
motif_names = [m.id for m in read_motifs(pwmfile)]
scores = []
if method == 'classic' or scoring == "count":
s.set_threshold(fpr=fpr)
for row in s.count(list(df.index)):
scores.append(row)
else:
for row in s.best_score(list(df.index), normalize=True):
scores.append(row)
motifs = pd.DataFrame(scores, index=df.index, columns=motif_names)
else:
motifs = pd.read_table(motiffile, index_col=0, comment="#")
if outfile and os.path.exists(outfile):
out = pd.read_table(outfile, index_col=0, comment="#")
ncols = df.shape[1]
if ncols == 1:
ncols = len(df.iloc[:,0].unique())
if out.shape[0] == motifs.shape[1] and out.shape[1] == ncols:
logger.warn("%s output already exists... skipping", method)
return out
if subsample is not None:
n = int(subsample * df.shape[0])
logger.debug("Subsampling %d regions", n)
df = df.sample(n)
motifs = motifs.loc[df.index]
if method == "lightningregressor":
outdir = os.path.dirname(outfile)
tmpname = os.path.join(outdir, ".lightning.tmp")
clf.fit(motifs, df, tmpdir=tmpname)
shutil.rmtree(tmpname)
else:
clf.fit(motifs, df)
if outfile:
with open(outfile, "w") as f:
f.write("# maelstrom - GimmeMotifs version {}\n".format(__version__))
f.write("# method: {} with motif {}\n".format(method, scoring))
if genome:
f.write("# genome: {}\n".format(genome))
if motiffile:
f.write("# motif table: {}\n".format(motiffile))
f.write("# {}\n".format(clf.act_description))
with open(outfile, "a") as f:
clf.act_.to_csv(f, sep="\t")
return clf.act_ | [
"def",
"moap",
"(",
"inputfile",
",",
"method",
"=",
"\"hypergeom\"",
",",
"scoring",
"=",
"None",
",",
"outfile",
"=",
"None",
",",
"motiffile",
"=",
"None",
",",
"pwmfile",
"=",
"None",
",",
"genome",
"=",
"None",
",",
"fpr",
"=",
"0.01",
",",
"ncp... | Run a single motif activity prediction algorithm.
Parameters
----------
inputfile : str
:1File with regions (chr:start-end) in first column and either cluster
name in second column or a table with values.
method : str, optional
Motif activity method to use. Any of 'hypergeom', 'lasso',
'lightningclassification', 'lightningregressor', 'bayesianridge',
'rf', 'xgboost'. Default is 'hypergeom'.
scoring: str, optional
Either 'score' or 'count'
outfile : str, optional
Name of outputfile to save the fitted activity values.
motiffile : str, optional
Table with motif scan results. First column should be exactly the same
regions as in the inputfile.
pwmfile : str, optional
File with motifs in pwm format. Required when motiffile is not
supplied.
genome : str, optional
Genome name, as indexed by gimme. Required when motiffile is not
supplied
fpr : float, optional
FPR for motif scanning
ncpus : int, optional
Number of threads to use. Default is the number specified in the config.
Returns
-------
pandas DataFrame with motif activity | [
"Run",
"a",
"single",
"motif",
"activity",
"prediction",
"algorithm",
".",
"Parameters",
"----------",
"inputfile",
":",
"str",
":",
"1File",
"with",
"regions",
"(",
"chr",
":",
"start",
"-",
"end",
")",
"in",
"first",
"column",
"and",
"either",
"cluster",
... | python | train |
cloudbase/python-hnvclient | hnv/client.py | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L2920-L2929 | def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data.get("properties", {})
raw_content = properties.get("statistics", None)
if raw_content is not None:
statistics = BGPPeersStatistics.from_raw_data(raw_content)
properties["statistics"] = statistics
return super(BGPPeers, cls).process_raw_data(raw_data) | [
"def",
"process_raw_data",
"(",
"cls",
",",
"raw_data",
")",
":",
"properties",
"=",
"raw_data",
".",
"get",
"(",
"\"properties\"",
",",
"{",
"}",
")",
"raw_content",
"=",
"properties",
".",
"get",
"(",
"\"statistics\"",
",",
"None",
")",
"if",
"raw_conten... | Create a new model using raw API response. | [
"Create",
"a",
"new",
"model",
"using",
"raw",
"API",
"response",
"."
] | python | train |
ff0000/scarlet | scarlet/cms/widgets.py | https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/widgets.py#L460-L476 | def get_qs(self):
"""
Returns a mapping that will be used to generate
the query string for the api url. Any values
in the the `limit_choices_to` specified on the
foreign key field and any arguments specified on
self.extra_query_kwargs are converted to a format
that can be used in a query string and returned as
a dictionary.
"""
qs = url_params_from_lookup_dict(self.rel.limit_choices_to)
if not qs:
qs = {}
if self.extra_query_kwargs:
qs.update(self.extra_query_kwargs)
return qs | [
"def",
"get_qs",
"(",
"self",
")",
":",
"qs",
"=",
"url_params_from_lookup_dict",
"(",
"self",
".",
"rel",
".",
"limit_choices_to",
")",
"if",
"not",
"qs",
":",
"qs",
"=",
"{",
"}",
"if",
"self",
".",
"extra_query_kwargs",
":",
"qs",
".",
"update",
"("... | Returns a mapping that will be used to generate
the query string for the api url. Any values
in the the `limit_choices_to` specified on the
foreign key field and any arguments specified on
self.extra_query_kwargs are converted to a format
that can be used in a query string and returned as
a dictionary. | [
"Returns",
"a",
"mapping",
"that",
"will",
"be",
"used",
"to",
"generate",
"the",
"query",
"string",
"for",
"the",
"api",
"url",
".",
"Any",
"values",
"in",
"the",
"the",
"limit_choices_to",
"specified",
"on",
"the",
"foreign",
"key",
"field",
"and",
"any"... | python | train |
openstack/networking-arista | networking_arista/common/db_lib.py | https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L286-L296 | def get_ports(device_owners=None, vnic_type=None, port_id=None, active=True):
"""Returns list of all ports in neutron the db"""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
ports = (session
.query(port_model)
.filter_unnecessary_ports(device_owners, vnic_type, active))
if port_id:
ports = ports.filter(port_model.id == port_id)
return ports.all() | [
"def",
"get_ports",
"(",
"device_owners",
"=",
"None",
",",
"vnic_type",
"=",
"None",
",",
"port_id",
"=",
"None",
",",
"active",
"=",
"True",
")",
":",
"session",
"=",
"db",
".",
"get_reader_session",
"(",
")",
"with",
"session",
".",
"begin",
"(",
")... | Returns list of all ports in neutron the db | [
"Returns",
"list",
"of",
"all",
"ports",
"in",
"neutron",
"the",
"db"
] | python | train |
not-na/peng3d | peng3d/gui/button.py | https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/gui/button.py#L705-L828 | def redraw_bg(self):
# Convenience variables
sx,sy = self.widget.size
x,y = self.widget.pos
bx,by = self.border
# Button background
# Outer vertices
# x y
v1 = x, y+sy
v2 = x+sx, y+sy
v3 = x, y
v4 = x+sx, y
# Inner vertices
# x y
v5 = x+bx, y+sy-by
v6 = x+sx-bx, y+sy-by
v7 = x+bx, y+by
v8 = x+sx-bx, y+by
# 5 Quads, for edges and the center
qb1 = v5+v6+v2+v1
qb2 = v8+v4+v2+v6
qb3 = v3+v4+v8+v7
qb4 = v3+v7+v5+v1
qc = v7+v8+v6+v5
v = qb1+qb2+qb3+qb4+qc
self.vlist.vertices = v
bg = self.submenu.bg[:3] if isinstance(self.submenu.bg,list) or isinstance(self.submenu.bg,tuple) else [242,241,240]
o,i = bg, [min(bg[0]+8,255),min(bg[1]+8,255),min(bg[2]+8,255)]
s,h = [max(bg[0]-40,0),max(bg[1]-40,0),max(bg[2]-40,0)], [min(bg[0]+12,255),min(bg[1]+12,255),min(bg[2]+12,255)]
# Outer,Inner,Shadow,Highlight
if self.borderstyle == "flat":
if self.widget.pressed:
i = s
cb1 = i+i+i+i
cb2 = i+i+i+i
cb3 = i+i+i+i
cb4 = i+i+i+i
cc = i+i+i+i
elif self.borderstyle == "gradient":
if self.widget.pressed:
i = s
elif self.widget.is_hovering:
i = [min(i[0]+6,255),min(i[1]+6,255),min(i[2]+6,255)]
cb1 = i+i+o+o
cb2 = i+o+o+i
cb3 = o+o+i+i
cb4 = o+i+i+o
cc = i+i+i+i
elif self.borderstyle == "oldshadow":
if self.widget.pressed:
i = s
s,h = h,s
elif self.widget.is_hovering:
i = [min(i[0]+6,255),min(i[1]+6,255),min(i[2]+6,255)]
s = [min(s[0]+6,255),min(s[1]+6,255),min(s[2]+6,255)]
cb1 = h+h+h+h
cb2 = s+s+s+s
cb3 = s+s+s+s
cb4 = h+h+h+h
cc = i+i+i+i
elif self.borderstyle == "material":
if self.widget.pressed:
i = [max(bg[0]-20,0),max(bg[1]-20,0),max(bg[2]-20,0)]
elif self.widget.is_hovering:
i = [max(bg[0]-10,0),max(bg[1]-10,0),max(bg[2]-10,0)]
cb1 = s+s+o+o
cb2 = s+o+o+s
cb3 = o+o+s+s
cb4 = o+s+s+o
cc = i+i+i+i
else:
raise ValueError("Invalid Border style")
c = cb1+cb2+cb3+cb4+cc
self.vlist.colors = c
# Cross
# Old method that displayed a tick
"""if not self.widget.pressed:
self.vlist_cross.colors = 6*bg
else:
if self.borderstyle=="flat":
c = [min(bg[0]+8,255),min(bg[1]+8,255),min(bg[2]+8,255)]
elif self.borderstyle=="gradient":
c = h
elif self.borderstyle=="oldshadow":
c = h
elif self.borderstyle=="material":
c = s
self.vlist_cross.colors = 6*c
# Convenience variables
sx,sy = self.widget.size
x,y = self.widget.pos
bx,by = self.border
v1 = x+bx, y+(sy-by*2)/2+by
v2 = x+sx/2, y+(sy-by*2)/4+by
v3 = v6
v4 = x+sx, y+sy
v5 = x+sx/2, y+by
v6 = x+bx, y+(sy-by*2)/4+by
self.vlist_cross.vertices = v2+v1+v6+v5+v4+v3"""
# TODO: add better visual indicator
v1 = x+bx*1.5, y+sy-by*1.5
v2 = x+sx-bx*1.5, y+sy-by*1.5
v3 = x+bx*1.5, y+by*1.5
v4 = x+sx-bx*1.5, y+by*1.5
self.vlist_check.colors = self.checkcolor*4 if self.widget.pressed else i*4
self.vlist_check.vertices = v3+v4+v2+v1 | [
"def",
"redraw_bg",
"(",
"self",
")",
":",
"# Convenience variables",
"sx",
",",
"sy",
"=",
"self",
".",
"widget",
".",
"size",
"x",
",",
"y",
"=",
"self",
".",
"widget",
".",
"pos",
"bx",
",",
"by",
"=",
"self",
".",
"border",
"# Button background",
... | if not self.widget.pressed:
self.vlist_cross.colors = 6*bg
else:
if self.borderstyle=="flat":
c = [min(bg[0]+8,255),min(bg[1]+8,255),min(bg[2]+8,255)]
elif self.borderstyle=="gradient":
c = h
elif self.borderstyle=="oldshadow":
c = h
elif self.borderstyle=="material":
c = s
self.vlist_cross.colors = 6*c
# Convenience variables
sx,sy = self.widget.size
x,y = self.widget.pos
bx,by = self.border
v1 = x+bx, y+(sy-by*2)/2+by
v2 = x+sx/2, y+(sy-by*2)/4+by
v3 = v6
v4 = x+sx, y+sy
v5 = x+sx/2, y+by
v6 = x+bx, y+(sy-by*2)/4+by
self.vlist_cross.vertices = v2+v1+v6+v5+v4+v3 | [
"if",
"not",
"self",
".",
"widget",
".",
"pressed",
":",
"self",
".",
"vlist_cross",
".",
"colors",
"=",
"6",
"*",
"bg",
"else",
":",
"if",
"self",
".",
"borderstyle",
"==",
"flat",
":",
"c",
"=",
"[",
"min",
"(",
"bg",
"[",
"0",
"]",
"+",
"8",... | python | test |
Scoppio/RagnarokEngine3 | RagnarokEngine3/RE3.py | https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L194-L201 | def distance(vec1, vec2):
"""Calculate the distance between two Vectors"""
if isinstance(vec1, Vector2) \
and isinstance(vec2, Vector2):
dist_vec = vec2 - vec1
return dist_vec.length()
else:
raise TypeError("vec1 and vec2 must be Vector2's") | [
"def",
"distance",
"(",
"vec1",
",",
"vec2",
")",
":",
"if",
"isinstance",
"(",
"vec1",
",",
"Vector2",
")",
"and",
"isinstance",
"(",
"vec2",
",",
"Vector2",
")",
":",
"dist_vec",
"=",
"vec2",
"-",
"vec1",
"return",
"dist_vec",
".",
"length",
"(",
"... | Calculate the distance between two Vectors | [
"Calculate",
"the",
"distance",
"between",
"two",
"Vectors"
] | python | train |
jaumebonet/libconfig | libconfig/config.py | https://github.com/jaumebonet/libconfig/blob/9b34cefcbaf9a326e3f3cd517896c2933cf61a3b/libconfig/config.py#L37-L70 | def register_option(self, key, subkey, default, _type, definition,
values=None, locked=False):
"""Create a new option.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:param default: Default value of the option. Type varies and it is
described by ``_type``.
:param str _type: Type of the value of the option. Available
options are: [``int``, ``float``, ``bool``, ``text``,
``string``, ``path_in``, ``path_out``].
:param str definition: Brief explanation of the option.
:type definition: :class:`str`
:param values: Available values for the option.
:type values: :func:`list` of accepted ``_type``
:param bool locked: If True, option cannot be altered.
:raise:
:AlreadyRegisteredError: If ``key`` or ``subkey`` already
define an option.
"""
if not self.open:
return
key, subkey = _lower_keys(key, subkey)
_entry_must_not_exist(self.gc, key, subkey)
ev.value_eval(default, _type)
values = None if values is False else values
new_opt = pd.Series([key, subkey, default, _type, default,
locked, definition, values], index=self.clmn)
self.gc = self.gc.append(new_opt, ignore_index=True) | [
"def",
"register_option",
"(",
"self",
",",
"key",
",",
"subkey",
",",
"default",
",",
"_type",
",",
"definition",
",",
"values",
"=",
"None",
",",
"locked",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"open",
":",
"return",
"key",
",",
"subkey... | Create a new option.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:param default: Default value of the option. Type varies and it is
described by ``_type``.
:param str _type: Type of the value of the option. Available
options are: [``int``, ``float``, ``bool``, ``text``,
``string``, ``path_in``, ``path_out``].
:param str definition: Brief explanation of the option.
:type definition: :class:`str`
:param values: Available values for the option.
:type values: :func:`list` of accepted ``_type``
:param bool locked: If True, option cannot be altered.
:raise:
:AlreadyRegisteredError: If ``key`` or ``subkey`` already
define an option. | [
"Create",
"a",
"new",
"option",
"."
] | python | train |
VIVelev/PyDojoML | dojo/cluster/kmeans.py | https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/cluster/kmeans.py#L39-L42 | def _init_random_centroids(self):
"""Initialize the centroids as k random samples of X (k = n_clusters)
"""
self.centroids = self._X[np.random.choice(list(range(self._X.shape[0])), size=self.n_clusters), :] | [
"def",
"_init_random_centroids",
"(",
"self",
")",
":",
"self",
".",
"centroids",
"=",
"self",
".",
"_X",
"[",
"np",
".",
"random",
".",
"choice",
"(",
"list",
"(",
"range",
"(",
"self",
".",
"_X",
".",
"shape",
"[",
"0",
"]",
")",
")",
",",
"siz... | Initialize the centroids as k random samples of X (k = n_clusters) | [
"Initialize",
"the",
"centroids",
"as",
"k",
"random",
"samples",
"of",
"X",
"(",
"k",
"=",
"n_clusters",
")"
] | python | train |
LuqueDaniel/pybooru | pybooru/api_danbooru.py | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L141-L155 | def post_flag_list(self, creator_id=None, creator_name=None, post_id=None,
reason_matches=None, is_resolved=None, category=None):
"""Function to flag a post (Requires login).
Parameters:
creator_id (int): The user id of the flag's creator.
creator_name (str): The name of the flag's creator.
post_id (int): The post id if the flag.
"""
params = {
'search[creator_id]': creator_id,
'search[creator_name]': creator_name,
'search[post_id]': post_id,
}
return self._get('post_flags.json', params, auth=True) | [
"def",
"post_flag_list",
"(",
"self",
",",
"creator_id",
"=",
"None",
",",
"creator_name",
"=",
"None",
",",
"post_id",
"=",
"None",
",",
"reason_matches",
"=",
"None",
",",
"is_resolved",
"=",
"None",
",",
"category",
"=",
"None",
")",
":",
"params",
"=... | Function to flag a post (Requires login).
Parameters:
creator_id (int): The user id of the flag's creator.
creator_name (str): The name of the flag's creator.
post_id (int): The post id if the flag. | [
"Function",
"to",
"flag",
"a",
"post",
"(",
"Requires",
"login",
")",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/brownanrs/ff.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/ff.py#L69-L121 | def find_prime_polynomials(generator=2, c_exp=8, fast_primes=False, single=False):
'''Compute the list of prime polynomials for the given generator and galois field characteristic exponent.'''
# fast_primes will output less results but will be significantly faster.
# single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that.
# A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows.
# Why do we need a "prime polynomial"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique.
# For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial.
# Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our "LUT" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial.
# Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow).
# Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf
# Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. "Finding irreducible polynomials over finite fields." Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986.
# Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field
root_charac = 2 # we're in GF(2)
field_charac = int(root_charac**c_exp - 1)
field_charac_next = int(root_charac**(c_exp+1) - 1)
prim_candidates = []
if fast_primes:
prim_candidates = rwh_primes1(field_charac_next) # generate maybe prime polynomials and check later if they really are irreducible
prim_candidates = [x for x in prim_candidates if x > field_charac] # filter out too small primes
else:
prim_candidates = _range(field_charac+2, field_charac_next, root_charac) # try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible)
# Start of the main loop
correct_primes = []
for prim in prim_candidates: # try potential candidates primitive irreducible polys
seen = bytearray(field_charac+1) # memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default)
conflict = False # flag to know if there was at least one conflict
# Second loop, build the whole Galois Field
x = GF2int(1)
for i in _range(field_charac):
# Compute the next value in the field (ie, the next power of alpha/generator)
x = x.multiply(generator, prim, field_charac+1)
# Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime)
if x > field_charac or seen[x] == 1:
conflict = True
break
# Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha
else:
seen[x] = 1
# End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial!
if not conflict:
correct_primes.append(prim)
if single: return prim
# Return the list of all prime polynomials
return correct_primes | [
"def",
"find_prime_polynomials",
"(",
"generator",
"=",
"2",
",",
"c_exp",
"=",
"8",
",",
"fast_primes",
"=",
"False",
",",
"single",
"=",
"False",
")",
":",
"# fast_primes will output less results but will be significantly faster.",
"# single will output the first prime po... | Compute the list of prime polynomials for the given generator and galois field characteristic exponent. | [
"Compute",
"the",
"list",
"of",
"prime",
"polynomials",
"for",
"the",
"given",
"generator",
"and",
"galois",
"field",
"characteristic",
"exponent",
"."
] | python | train |
PatrikValkovic/grammpy | grammpy/transforms/ChomskyForm/transform_from_chomsky_normal_form.py | https://github.com/PatrikValkovic/grammpy/blob/879ce0ef794ac2823acc19314fcd7a8aba53e50f/grammpy/transforms/ChomskyForm/transform_from_chomsky_normal_form.py#L20-L64 | def transform_from_chomsky_normal_form(root):
# type: (Nonterminal) -> Nonterminal
"""
Transform the tree created by grammar in the Chomsky Normal Form to original rules.
:param root: Root of parsed tree.
:return: Modified tree.
"""
# Transforms leaves
items = Traversing.post_order(root)
items = filter(lambda x: isinstance(x, (ChomskyTermRule, ChomskyTerminalReplaceRule)), items)
de = deque(items)
while de:
rule = de.popleft()
if isinstance(rule, ChomskyTermRule):
upper_nonterm = rule.from_symbols[0] # type: Nonterminal
term = rule.to_symbols[0]
Manipulations.replaceNode(upper_nonterm, term)
elif isinstance(rule, ChomskyTerminalReplaceRule):
created_rule = rule.from_rule() # type: Rule
Manipulations.replaceRule(rule, created_rule)
de.append(created_rule)
# Transform inner nodes
items = Traversing.post_order(root)
items = filter(lambda x: isinstance(x, ChomskySplitRule), items)
de = deque(items)
while de:
rule = de.popleft()
if isinstance(rule, ChomskySplitRule):
created_rule = rule.from_rule() # type: Rule
# parent nonterminals
for p in rule.from_symbols: # type: Nonterminal
p._set_to_rule(created_rule)
created_rule._from_symbols.append(p)
# left child
left_child = rule.to_symbols[0] # type: Nonterminal
left_child._set_from_rule(created_rule)
created_rule._to_symbols.append(left_child)
# right childs
for ch in rule.to_symbols[1].to_rule.to_symbols: # type: Nonterminal
ch._set_from_rule(created_rule)
created_rule.to_symbols.append(ch)
# add back if the rules is ChomskySplitRule again
de.appendleft(created_rule)
return root | [
"def",
"transform_from_chomsky_normal_form",
"(",
"root",
")",
":",
"# type: (Nonterminal) -> Nonterminal",
"# Transforms leaves",
"items",
"=",
"Traversing",
".",
"post_order",
"(",
"root",
")",
"items",
"=",
"filter",
"(",
"lambda",
"x",
":",
"isinstance",
"(",
"x... | Transform the tree created by grammar in the Chomsky Normal Form to original rules.
:param root: Root of parsed tree.
:return: Modified tree. | [
"Transform",
"the",
"tree",
"created",
"by",
"grammar",
"in",
"the",
"Chomsky",
"Normal",
"Form",
"to",
"original",
"rules",
".",
":",
"param",
"root",
":",
"Root",
"of",
"parsed",
"tree",
".",
":",
"return",
":",
"Modified",
"tree",
"."
] | python | train |
fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/win32/context_amd64.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/win32/context_amd64.py#L426-L465 | def from_dict(cls, ctx):
'Instance a new structure from a Python native type.'
ctx = Context(ctx)
s = cls()
ContextFlags = ctx['ContextFlags']
s.ContextFlags = ContextFlags
for key in cls._others:
if key != 'VectorRegister':
setattr(s, key, ctx[key])
else:
w = ctx[key]
v = (M128A * len(w))()
i = 0
for x in w:
y = M128A()
y.High = x >> 64
y.Low = x - (x >> 64)
v[i] = y
i += 1
setattr(s, key, v)
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in cls._control:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in cls._integer:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in cls._segments:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in cls._debug:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS:
xmm = s.FltSave.xmm
for key in cls._mmx:
y = M128A()
y.High = x >> 64
y.Low = x - (x >> 64)
setattr(xmm, key, y)
return s | [
"def",
"from_dict",
"(",
"cls",
",",
"ctx",
")",
":",
"ctx",
"=",
"Context",
"(",
"ctx",
")",
"s",
"=",
"cls",
"(",
")",
"ContextFlags",
"=",
"ctx",
"[",
"'ContextFlags'",
"]",
"s",
".",
"ContextFlags",
"=",
"ContextFlags",
"for",
"key",
"in",
"cls",... | Instance a new structure from a Python native type. | [
"Instance",
"a",
"new",
"structure",
"from",
"a",
"Python",
"native",
"type",
"."
] | python | train |
xtuml/pyxtuml | xtuml/meta.py | https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/meta.py#L987-L1007 | def relate(from_instance, to_instance, rel_id, phrase=''):
'''
Relate *from_instance* to *to_instance* across *rel_id*. For reflexive
association, a *phrase* indicating the direction must also be provided.
The two instances are related to each other by copying the identifying
attributes from the instance on the TO side of a association to the instance
n the FROM side. Updated values which affect existing associations are
propagated. A set of all affected instances will be returned.
'''
if None in [from_instance, to_instance]:
return False
inst1, inst2, ass = _find_link(from_instance, to_instance, rel_id, phrase)
if not ass.source_link.connect(inst1, inst2):
raise RelateException(from_instance, to_instance, rel_id, phrase)
if not ass.target_link.connect(inst2, inst1):
raise RelateException(from_instance, to_instance, rel_id, phrase)
return True | [
"def",
"relate",
"(",
"from_instance",
",",
"to_instance",
",",
"rel_id",
",",
"phrase",
"=",
"''",
")",
":",
"if",
"None",
"in",
"[",
"from_instance",
",",
"to_instance",
"]",
":",
"return",
"False",
"inst1",
",",
"inst2",
",",
"ass",
"=",
"_find_link",... | Relate *from_instance* to *to_instance* across *rel_id*. For reflexive
association, a *phrase* indicating the direction must also be provided.
The two instances are related to each other by copying the identifying
attributes from the instance on the TO side of a association to the instance
n the FROM side. Updated values which affect existing associations are
propagated. A set of all affected instances will be returned. | [
"Relate",
"*",
"from_instance",
"*",
"to",
"*",
"to_instance",
"*",
"across",
"*",
"rel_id",
"*",
".",
"For",
"reflexive",
"association",
"a",
"*",
"phrase",
"*",
"indicating",
"the",
"direction",
"must",
"also",
"be",
"provided",
".",
"The",
"two",
"insta... | python | test |
6809/MC6809 | MC6809/components/mc6809_ops_logic.py | https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_ops_logic.py#L153-L169 | def LSL(self, a):
"""
Shifts all bits of accumulator A or B or memory location M one place to
the left. Bit zero is loaded with a zero. Bit seven of accumulator A or
B or memory location M is shifted into the C (carry) bit.
This is a duplicate assembly-language mnemonic for the single machine
instruction ASL.
source code forms: LSL Q; LSLA; LSLB
CC bits "HNZVC": naaas
"""
r = a << 1
self.clear_NZVC()
self.update_NZVC_8(a, a, r)
return r | [
"def",
"LSL",
"(",
"self",
",",
"a",
")",
":",
"r",
"=",
"a",
"<<",
"1",
"self",
".",
"clear_NZVC",
"(",
")",
"self",
".",
"update_NZVC_8",
"(",
"a",
",",
"a",
",",
"r",
")",
"return",
"r"
] | Shifts all bits of accumulator A or B or memory location M one place to
the left. Bit zero is loaded with a zero. Bit seven of accumulator A or
B or memory location M is shifted into the C (carry) bit.
This is a duplicate assembly-language mnemonic for the single machine
instruction ASL.
source code forms: LSL Q; LSLA; LSLB
CC bits "HNZVC": naaas | [
"Shifts",
"all",
"bits",
"of",
"accumulator",
"A",
"or",
"B",
"or",
"memory",
"location",
"M",
"one",
"place",
"to",
"the",
"left",
".",
"Bit",
"zero",
"is",
"loaded",
"with",
"a",
"zero",
".",
"Bit",
"seven",
"of",
"accumulator",
"A",
"or",
"B",
"or... | python | train |
pybluez/pybluez | macos/_bluetoothsockets.py | https://github.com/pybluez/pybluez/blob/e0dc4093dcbaa3ecb3fa24f8ccf22bbfe6b57fc9/macos/_bluetoothsockets.py#L616-L635 | def __waituntil(self, stopwaiting, timeoutmsg):
"""
Waits until stopwaiting() returns True, or until the wait times out
(according to the self.__timeout value).
This is to make a function wait until a buffer has been filled. i.e.
stopwaiting() should return True when the buffer is no longer empty.
"""
if not stopwaiting():
if self.__timeout == 0:
# in non-blocking mode (immediate timeout)
# push event loop to really be sure there is no data available
_macutil.looponce()
if not stopwaiting():
# trying to perform operation now would block
raise _socket.error(errno.EAGAIN, os.strerror(errno.EAGAIN))
else:
# block and wait until we get data, or time out
if not _macutil.waituntil(stopwaiting, self.__timeout):
raise _socket.timeout(timeoutmsg) | [
"def",
"__waituntil",
"(",
"self",
",",
"stopwaiting",
",",
"timeoutmsg",
")",
":",
"if",
"not",
"stopwaiting",
"(",
")",
":",
"if",
"self",
".",
"__timeout",
"==",
"0",
":",
"# in non-blocking mode (immediate timeout)",
"# push event loop to really be sure there is n... | Waits until stopwaiting() returns True, or until the wait times out
(according to the self.__timeout value).
This is to make a function wait until a buffer has been filled. i.e.
stopwaiting() should return True when the buffer is no longer empty. | [
"Waits",
"until",
"stopwaiting",
"()",
"returns",
"True",
"or",
"until",
"the",
"wait",
"times",
"out",
"(",
"according",
"to",
"the",
"self",
".",
"__timeout",
"value",
")",
"."
] | python | train |
mikedh/trimesh | trimesh/path/path.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L353-L365 | def vertex_nodes(self):
"""
Get a list of which vertex indices are nodes,
which are either endpoints or points where the
entity makes a direction change.
Returns
--------------
nodes : (n, 2) int
Indexes of self.vertices which are nodes
"""
nodes = np.vstack([e.nodes for e in self.entities])
return nodes | [
"def",
"vertex_nodes",
"(",
"self",
")",
":",
"nodes",
"=",
"np",
".",
"vstack",
"(",
"[",
"e",
".",
"nodes",
"for",
"e",
"in",
"self",
".",
"entities",
"]",
")",
"return",
"nodes"
] | Get a list of which vertex indices are nodes,
which are either endpoints or points where the
entity makes a direction change.
Returns
--------------
nodes : (n, 2) int
Indexes of self.vertices which are nodes | [
"Get",
"a",
"list",
"of",
"which",
"vertex",
"indices",
"are",
"nodes",
"which",
"are",
"either",
"endpoints",
"or",
"points",
"where",
"the",
"entity",
"makes",
"a",
"direction",
"change",
"."
] | python | train |
jeremylow/pyshk | pyshk/models.py | https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/models.py#L462-L510 | def AsDict(self, dt=True):
"""
A dict representation of this Shake instance.
The return value uses the same key names as the JSON representation.
Args:
dt (bool): If True, return dates as python datetime objects. If
False, return dates as ISO strings.
Return:
A dict representing this Shake instance
"""
data = {}
if self.sharekey:
data['sharekey'] = self.sharekey
if self.name:
data['name'] = self.name
if self.user:
data['user'] = self.user.AsDict()
if self.title:
data['title'] = self.title
if self.description:
data['description'] = self.description
if self.posted_at:
if dt:
data['posted_at'] = self.posted_at
else:
data['posted_at'] = self.posted_at_iso
if self.permalink:
data['permalink'] = self.permalink
if self.width:
data['width'] = self.width
if self.height:
data['height'] = self.height
if self.image_url:
data['image_url'] = self.image_url
if self.source_url:
data['source_url'] = self.source_url
data['views'] = self.views
data['likes'] = self.likes
data['saves'] = self.saves
data['comments'] = self.comments
data['nsfw'] = self.nsfw
data['saved'] = self.saved
data['liked'] = self.liked
return data | [
"def",
"AsDict",
"(",
"self",
",",
"dt",
"=",
"True",
")",
":",
"data",
"=",
"{",
"}",
"if",
"self",
".",
"sharekey",
":",
"data",
"[",
"'sharekey'",
"]",
"=",
"self",
".",
"sharekey",
"if",
"self",
".",
"name",
":",
"data",
"[",
"'name'",
"]",
... | A dict representation of this Shake instance.
The return value uses the same key names as the JSON representation.
Args:
dt (bool): If True, return dates as python datetime objects. If
False, return dates as ISO strings.
Return:
A dict representing this Shake instance | [
"A",
"dict",
"representation",
"of",
"this",
"Shake",
"instance",
"."
] | python | train |
angr/angr | angr/analyses/cfg/segment_list.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/segment_list.py#L154-L257 | def _insert_and_merge_core(self, pos, direction):
"""
The core part of method _insert_and_merge.
:param int pos: The starting position.
:param str direction: If we are traversing forwards or backwards in the list. It determines where the "sort"
of the overlapping memory block comes from. If everything works as expected, "sort" of
the overlapping block is always equal to the segment occupied most recently.
:return: A tuple of (merged (bool), new position to begin searching (int), change in total bytes (int)
:rtype: tuple
"""
bytes_changed = 0
if direction == "forward":
if pos == len(self._list) - 1:
return False, pos, 0
previous_segment = self._list[pos]
previous_segment_pos = pos
segment = self._list[pos + 1]
segment_pos = pos + 1
else: # if direction == "backward":
if pos == 0:
return False, pos, 0
segment = self._list[pos]
segment_pos = pos
previous_segment = self._list[pos - 1]
previous_segment_pos = pos - 1
merged = False
new_pos = pos
if segment.start <= previous_segment.end:
# we should always have new_start+new_size >= segment.start
if segment.sort == previous_segment.sort:
# They are of the same sort - we should merge them!
new_end = max(previous_segment.end, segment.start + segment.size)
new_start = min(previous_segment.start, segment.start)
new_size = new_end - new_start
self._list[segment_pos] = Segment(new_start, new_end, segment.sort)
self._list.pop(previous_segment_pos)
bytes_changed = -(segment.size + previous_segment.size - new_size)
merged = True
new_pos = previous_segment_pos
else:
# Different sorts. It's a bit trickier.
if segment.start == previous_segment.end:
# They are adjacent. Just don't merge.
pass
else:
# They are overlapping. We will create one, two, or three different blocks based on how they are
# overlapping
new_segments = [ ]
if segment.start < previous_segment.start:
new_segments.append(Segment(segment.start, previous_segment.start, segment.sort))
sort = previous_segment.sort if direction == "forward" else segment.sort
new_segments.append(Segment(previous_segment.start, previous_segment.end, sort))
if segment.end < previous_segment.end:
new_segments.append(Segment(segment.end, previous_segment.end, previous_segment.sort))
elif segment.end > previous_segment.end:
new_segments.append(Segment(previous_segment.end, segment.end, segment.sort))
else: # segment.start >= previous_segment.start
if segment.start > previous_segment.start:
new_segments.append(Segment(previous_segment.start, segment.start, previous_segment.sort))
sort = previous_segment.sort if direction == "forward" else segment.sort
if segment.end > previous_segment.end:
new_segments.append(Segment(segment.start, previous_segment.end, sort))
new_segments.append(Segment(previous_segment.end, segment.end, segment.sort))
elif segment.end < previous_segment.end:
new_segments.append(Segment(segment.start, segment.end, sort))
new_segments.append(Segment(segment.end, previous_segment.end, previous_segment.sort))
else:
new_segments.append(Segment(segment.start, segment.end, sort))
# merge segments in new_segments array if they are of the same sort
i = 0
while len(new_segments) > 1 and i < len(new_segments) - 1:
s0 = new_segments[i]
s1 = new_segments[i + 1]
if s0.sort == s1.sort:
new_segments = new_segments[ : i] + [ Segment(s0.start, s1.end, s0.sort) ] + new_segments[i + 2 : ]
else:
i += 1
# Put new segments into self._list
old_size = sum([ seg.size for seg in self._list[previous_segment_pos : segment_pos + 1] ])
new_size = sum([ seg.size for seg in new_segments ])
bytes_changed = new_size - old_size
self._list = self._list[ : previous_segment_pos] + new_segments + self._list[ segment_pos + 1 : ]
merged = True
if direction == "forward":
new_pos = previous_segment_pos + len(new_segments)
else:
new_pos = previous_segment_pos
return merged, new_pos, bytes_changed | [
"def",
"_insert_and_merge_core",
"(",
"self",
",",
"pos",
",",
"direction",
")",
":",
"bytes_changed",
"=",
"0",
"if",
"direction",
"==",
"\"forward\"",
":",
"if",
"pos",
"==",
"len",
"(",
"self",
".",
"_list",
")",
"-",
"1",
":",
"return",
"False",
",... | The core part of method _insert_and_merge.
:param int pos: The starting position.
:param str direction: If we are traversing forwards or backwards in the list. It determines where the "sort"
of the overlapping memory block comes from. If everything works as expected, "sort" of
the overlapping block is always equal to the segment occupied most recently.
:return: A tuple of (merged (bool), new position to begin searching (int), change in total bytes (int)
:rtype: tuple | [
"The",
"core",
"part",
"of",
"method",
"_insert_and_merge",
"."
] | python | train |
ioos/compliance-checker | compliance_checker/cf/cf.py | https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L754-L763 | def _dims_in_order(self, dimension_order):
'''
:param list dimension_order: A list of axes
:rtype: bool
:return: Returns True if the dimensions are in order U*, T, Z, Y, X,
False otherwise
'''
regx = regex.compile(r'^[^TZYX]*T?Z?Y?X?$')
dimension_string = ''.join(dimension_order)
return regx.match(dimension_string) is not None | [
"def",
"_dims_in_order",
"(",
"self",
",",
"dimension_order",
")",
":",
"regx",
"=",
"regex",
".",
"compile",
"(",
"r'^[^TZYX]*T?Z?Y?X?$'",
")",
"dimension_string",
"=",
"''",
".",
"join",
"(",
"dimension_order",
")",
"return",
"regx",
".",
"match",
"(",
"di... | :param list dimension_order: A list of axes
:rtype: bool
:return: Returns True if the dimensions are in order U*, T, Z, Y, X,
False otherwise | [
":",
"param",
"list",
"dimension_order",
":",
"A",
"list",
"of",
"axes",
":",
"rtype",
":",
"bool",
":",
"return",
":",
"Returns",
"True",
"if",
"the",
"dimensions",
"are",
"in",
"order",
"U",
"*",
"T",
"Z",
"Y",
"X",
"False",
"otherwise"
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/controllers/graphical_editor_gaphas.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/graphical_editor_gaphas.py#L746-L831 | def add_state_view_for_model(self, state_m, parent_v=None, rel_pos=(0, 0), size=(100, 100), hierarchy_level=1):
"""Creates a `StateView` (recursively) and adds it to the canvas
The method uses the `StateModel` `state_m` to create the according `StateView`. For all content within
`state_m`, such as connections, states and ports, the views are also created. All views are added to the canvas.
:param rafcon.gui.models.state.StateModel state_m: The state to be drawn
:param rafcon.gui.mygaphas.items.state.StateView parent_v: The parent state view of new state view `state_m`
:param tuple rel_pos: The default relative position (x, y) if there is no relative position stored
:param tuple size: The default size (width, height) if there is no size stored
:param float hierarchy_level: The hierarchy level of the state
:return: The created `StateView`
:rtype: StateView
"""
assert isinstance(state_m, AbstractStateModel)
state_meta = state_m.get_meta_data_editor()
# Use default values if no size information is stored
if not gui_helper_meta_data.contains_geometric_info(state_meta['size']):
state_meta = state_m.set_meta_data_editor('size', size)
size = state_meta['size']
# Use default values if no position information is stored
if not gui_helper_meta_data.contains_geometric_info(state_meta['rel_pos']):
state_meta = state_m.set_meta_data_editor('rel_pos', rel_pos)
rel_pos = state_meta['rel_pos']
if isinstance(state_m, LibraryStateModel):
if not state_m.meta_data_was_scaled:
gui_helper_meta_data.scale_library_ports_meta_data(state_m, gaphas_editor=True)
state_v = StateView(state_m, size, hierarchy_level)
# Draw state above data flows and NameView but beneath transitions
num_data_flows = len(state_m.state.parent.data_flows) if isinstance(state_m.parent, ContainerStateModel) else 0
index = 1 if not parent_v else num_data_flows + 1
# if self.model.root_state is state_m:
# print("init root_state", state_m, state_v)
# else:
# print("init state", state_m, state_v)
# print([hash(elem) for elem in state_m.state.outcomes.values()])
self.canvas.add(state_v, parent_v, index=index)
state_v.matrix.translate(*rel_pos)
state_v.add_income(state_m.income)
for outcome_m in state_m.outcomes:
state_v.add_outcome(outcome_m)
for input_port_m in state_m.input_data_ports:
state_v.add_input_port(input_port_m)
for output_port_m in state_m.output_data_ports:
state_v.add_output_port(output_port_m)
if parent_v is not None:
# Keep state within parent
pass
if isinstance(state_m, LibraryStateModel) and state_m.show_content() and state_m.state_copy_initialized:
gui_helper_meta_data.scale_library_content(state_m)
self.add_state_view_for_model(state_m.state_copy, state_v, hierarchy_level=hierarchy_level + 1)
elif isinstance(state_m, ContainerStateModel):
num_child_state = 0
for scoped_variable_m in state_m.scoped_variables:
state_v.add_scoped_variable(scoped_variable_m)
for child_state_m in state_m.states.values():
# generate optional meta data for child state - not used if valid meta data already in child state model
child_rel_pos, child_size = gui_helper_meta_data.generate_default_state_meta_data(state_m, self.canvas,
num_child_state)
num_child_state += 1
self.add_state_view_for_model(child_state_m, state_v, child_rel_pos, child_size, hierarchy_level + 1)
for transition_m in state_m.transitions:
self.add_transition_view_for_model(transition_m, state_m)
for data_flow_m in state_m.data_flows:
self.add_data_flow_view_for_model(data_flow_m, state_m)
return state_v | [
"def",
"add_state_view_for_model",
"(",
"self",
",",
"state_m",
",",
"parent_v",
"=",
"None",
",",
"rel_pos",
"=",
"(",
"0",
",",
"0",
")",
",",
"size",
"=",
"(",
"100",
",",
"100",
")",
",",
"hierarchy_level",
"=",
"1",
")",
":",
"assert",
"isinstan... | Creates a `StateView` (recursively) and adds it to the canvas
The method uses the `StateModel` `state_m` to create the according `StateView`. For all content within
`state_m`, such as connections, states and ports, the views are also created. All views are added to the canvas.
:param rafcon.gui.models.state.StateModel state_m: The state to be drawn
:param rafcon.gui.mygaphas.items.state.StateView parent_v: The parent state view of new state view `state_m`
:param tuple rel_pos: The default relative position (x, y) if there is no relative position stored
:param tuple size: The default size (width, height) if there is no size stored
:param float hierarchy_level: The hierarchy level of the state
:return: The created `StateView`
:rtype: StateView | [
"Creates",
"a",
"StateView",
"(",
"recursively",
")",
"and",
"adds",
"it",
"to",
"the",
"canvas"
] | python | train |
pricingassistant/mrq | mrq/utils.py | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/utils.py#L105-L117 | def load_class_by_path(taskpath):
""" Given a taskpath, returns the main task class. """
return getattr(
importlib.import_module(
re.sub(
r"\.[^.]+$",
"",
taskpath)),
re.sub(
r"^.*\.",
"",
taskpath)) | [
"def",
"load_class_by_path",
"(",
"taskpath",
")",
":",
"return",
"getattr",
"(",
"importlib",
".",
"import_module",
"(",
"re",
".",
"sub",
"(",
"r\"\\.[^.]+$\"",
",",
"\"\"",
",",
"taskpath",
")",
")",
",",
"re",
".",
"sub",
"(",
"r\"^.*\\.\"",
",",
"\"... | Given a taskpath, returns the main task class. | [
"Given",
"a",
"taskpath",
"returns",
"the",
"main",
"task",
"class",
"."
] | python | train |
tensorflow/datasets | tensorflow_datasets/core/utils/tf_utils.py | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/tf_utils.py#L132-L148 | def assert_shape_match(shape1, shape2):
"""Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None)
"""
shape1 = tf.TensorShape(shape1)
shape2 = tf.TensorShape(shape2)
if shape1.ndims is None or shape2.ndims is None:
raise ValueError('Shapes must have known rank. Got %s and %s.' %
(shape1.ndims, shape2.ndims))
shape1.assert_same_rank(shape2)
shape1.assert_is_compatible_with(shape2) | [
"def",
"assert_shape_match",
"(",
"shape1",
",",
"shape2",
")",
":",
"shape1",
"=",
"tf",
".",
"TensorShape",
"(",
"shape1",
")",
"shape2",
"=",
"tf",
".",
"TensorShape",
"(",
"shape2",
")",
"if",
"shape1",
".",
"ndims",
"is",
"None",
"or",
"shape2",
"... | Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None) | [
"Ensure",
"the",
"shape1",
"match",
"the",
"pattern",
"given",
"by",
"shape2",
"."
] | python | train |
insomnia-lab/libreant | users/__init__.py | https://github.com/insomnia-lab/libreant/blob/55d529435baf4c05a86b8341899e9f5e14e50245/users/__init__.py#L110-L131 | def init_db(dbURL, pwd_salt_size=None, pwd_rounds=None):
'''Initialize users database
initialize database and create necessary tables
to handle users oprations.
:param dbURL: database url, as described in :func:`init_proxy`
'''
if not dbURL:
dbURL = 'sqlite:///:memory:'
logging.getLogger(__name__).debug("Initializing database: {}".format(dict(url=dbURL,
pwd_salt_size=pwd_salt_size,
pwd_rounds=pwd_rounds)))
try:
db = init_proxy(dbURL)
global pwdCryptCtx
pwdCryptCtx = gen_crypt_context(salt_size=pwd_salt_size, rounds=pwd_rounds)
create_tables(db)
return db
except Exception as e:
e.args = (e.args[0] + ' [users database]',)
raise | [
"def",
"init_db",
"(",
"dbURL",
",",
"pwd_salt_size",
"=",
"None",
",",
"pwd_rounds",
"=",
"None",
")",
":",
"if",
"not",
"dbURL",
":",
"dbURL",
"=",
"'sqlite:///:memory:'",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"debug",
"(",
"\"Initializ... | Initialize users database
initialize database and create necessary tables
to handle users oprations.
:param dbURL: database url, as described in :func:`init_proxy` | [
"Initialize",
"users",
"database"
] | python | train |
andreikop/qutepart | qutepart/completer.py | https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/completer.py#L134-L141 | def _makeListOfCompletions(self, wordBeforeCursor, wholeWord):
"""Make list of completions, which shall be shown
"""
onlySuitable = [word for word in self._wordSet \
if word.startswith(wordBeforeCursor) and \
word != wholeWord]
return sorted(onlySuitable) | [
"def",
"_makeListOfCompletions",
"(",
"self",
",",
"wordBeforeCursor",
",",
"wholeWord",
")",
":",
"onlySuitable",
"=",
"[",
"word",
"for",
"word",
"in",
"self",
".",
"_wordSet",
"if",
"word",
".",
"startswith",
"(",
"wordBeforeCursor",
")",
"and",
"word",
"... | Make list of completions, which shall be shown | [
"Make",
"list",
"of",
"completions",
"which",
"shall",
"be",
"shown"
] | python | train |
openego/eTraGo | etrago/cluster/networkclustering.py | https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/cluster/networkclustering.py#L65-L141 | def cluster_on_extra_high_voltage(network, busmap, with_time=True):
""" Main function of the EHV-Clustering approach. Creates a new clustered
pypsa.Network given a busmap mapping all bus_ids to other bus_ids of the
same network.
Parameters
----------
network : pypsa.Network
Container for all network components.
busmap : dict
Maps old bus_ids to new bus_ids.
with_time : bool
If true time-varying data will also be aggregated.
Returns
-------
network : pypsa.Network
Container for all network components of the clustered network.
"""
network_c = Network()
buses = aggregatebuses(
network, busmap, {
'x': _leading(
busmap, network.buses), 'y': _leading(
busmap, network.buses)})
# keep attached lines
lines = network.lines.copy()
mask = lines.bus0.isin(buses.index)
lines = lines.loc[mask, :]
# keep attached links
links = network.links.copy()
mask = links.bus0.isin(buses.index)
links = links.loc[mask, :]
# keep attached transformer
transformers = network.transformers.copy()
mask = transformers.bus0.isin(buses.index)
transformers = transformers.loc[mask, :]
io.import_components_from_dataframe(network_c, buses, "Bus")
io.import_components_from_dataframe(network_c, lines, "Line")
io.import_components_from_dataframe(network_c, links, "Link")
io.import_components_from_dataframe(network_c, transformers, "Transformer")
if with_time:
network_c.snapshots = network.snapshots
network_c.set_snapshots(network.snapshots)
network_c.snapshot_weightings = network.snapshot_weightings.copy()
# dealing with generators
network.generators.control = "PV"
network.generators['weight'] = 1
new_df, new_pnl = aggregategenerators(network, busmap, with_time)
io.import_components_from_dataframe(network_c, new_df, 'Generator')
for attr, df in iteritems(new_pnl):
io.import_series_from_dataframe(network_c, df, 'Generator', attr)
# dealing with all other components
aggregate_one_ports = components.one_port_components.copy()
aggregate_one_ports.discard('Generator')
for one_port in aggregate_one_ports:
new_df, new_pnl = aggregateoneport(
network, busmap, component=one_port, with_time=with_time)
io.import_components_from_dataframe(network_c, new_df, one_port)
for attr, df in iteritems(new_pnl):
io.import_series_from_dataframe(network_c, df, one_port, attr)
network_c.determine_network_topology()
return network_c | [
"def",
"cluster_on_extra_high_voltage",
"(",
"network",
",",
"busmap",
",",
"with_time",
"=",
"True",
")",
":",
"network_c",
"=",
"Network",
"(",
")",
"buses",
"=",
"aggregatebuses",
"(",
"network",
",",
"busmap",
",",
"{",
"'x'",
":",
"_leading",
"(",
"bu... | Main function of the EHV-Clustering approach. Creates a new clustered
pypsa.Network given a busmap mapping all bus_ids to other bus_ids of the
same network.
Parameters
----------
network : pypsa.Network
Container for all network components.
busmap : dict
Maps old bus_ids to new bus_ids.
with_time : bool
If true time-varying data will also be aggregated.
Returns
-------
network : pypsa.Network
Container for all network components of the clustered network. | [
"Main",
"function",
"of",
"the",
"EHV",
"-",
"Clustering",
"approach",
".",
"Creates",
"a",
"new",
"clustered",
"pypsa",
".",
"Network",
"given",
"a",
"busmap",
"mapping",
"all",
"bus_ids",
"to",
"other",
"bus_ids",
"of",
"the",
"same",
"network",
"."
] | python | train |
davidhuser/dhis2.py | dhis2/utils.py | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L25-L46 | def load_csv(path, delimiter=','):
"""
Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV
"""
try:
with open(path, 'rb') as csvfile:
reader = DictReader(csvfile, delimiter=delimiter)
for row in reader:
yield row
except (OSError, IOError):
raise ClientException("File not found: {}".format(path)) | [
"def",
"load_csv",
"(",
"path",
",",
"delimiter",
"=",
"','",
")",
":",
"try",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"csvfile",
":",
"reader",
"=",
"DictReader",
"(",
"csvfile",
",",
"delimiter",
"=",
"delimiter",
")",
"for",
"row... | Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV | [
"Load",
"CSV",
"file",
"from",
"path",
"and",
"yield",
"CSV",
"rows"
] | python | train |
theislab/scvelo | scvelo/tools/velocity.py | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/velocity.py#L104-L183 | def velocity(data, vkey='velocity', mode=None, fit_offset=False, fit_offset2=False, filter_genes=False,
groups=None, groupby=None, groups_for_fit=None, use_raw=False, perc=[5, 95], copy=False):
"""Estimates velocities in a gene-specific manner
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`.
mode: `'deterministic'`, `'stochastic'` or `'bayes'` (default: `'stochastic'`)
Whether to run the estimation using the deterministic or stochastic model of transcriptional dynamics.
`'bayes'` solves the stochastic model and accounts for heteroscedasticity, but is slower than `'stochastic'`.
fit_offset: `bool` (default: `False`)
Whether to fit with offset for first order moment dynamics.
fit_offset2: `bool`, (default: `False`)
Whether to fit with offset for second order moment dynamics.
filter_genes: `bool` (default: `True`)
Whether to remove genes that are not used for further velocity analysis.
groups: `str`, `list` (default: `None`)
Subset of groups, e.g. [‘g1’, ‘g2’, ‘g3’], to which velocity analysis shall be restricted.
groupby: `str`, `list` or `np.ndarray` (default: `None`)
Key of observations grouping to consider.
groups_for_fit: `str`, `list` or `np.ndarray` (default: `None`)
Subset of groups, e.g. [‘g1’, ‘g2’, ‘g3’], to which steady-state fitting shall be restricted.
use_raw: `bool` (default: `False`)
Whether to use raw data for estimation.
perc: `float` (default: `None`)
Percentile, e.g. 98, upon for extreme quantile fit (to better capture steady states for velocity estimation).
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Returns or updates `adata` with the attributes
velocity: `.layers`
velocity vectors for each individual cell
variance_velocity: `.layers`
velocity vectors for the cell variances
velocity_offset, velocity_beta, velocity_gamma, velocity_r2: `.var`
parameters
"""
adata = data.copy() if copy else data
if not use_raw and 'Ms' not in adata.layers.keys(): moments(adata)
logg.info('computing velocities', r=True)
strings_to_categoricals(adata)
categories = adata.obs[groupby].cat.categories \
if groupby is not None and groups is None and groups_for_fit is None else [None]
for cat in categories:
groups = cat if cat is not None else groups
cell_subset = groups_to_bool(adata, groups, groupby)
_adata = adata if groups is None else adata[cell_subset]
velo = Velocity(_adata, groups_for_fit=groups_for_fit, groupby=groupby, use_raw=use_raw)
velo.compute_deterministic(fit_offset, perc=perc)
if any([mode is not None and mode in item for item in ['stochastic', 'bayes', 'alpha']]):
if filter_genes and len(set(velo._velocity_genes)) > 1:
adata._inplace_subset_var(velo._velocity_genes)
residual = velo._residual[:, velo._velocity_genes]
_adata = adata if groups is None else adata[cell_subset]
velo = Velocity(_adata, residual=residual, groups_for_fit=groups_for_fit, groupby=groupby)
velo.compute_stochastic(fit_offset, fit_offset2, mode, perc=perc)
write_residuals(adata, vkey, velo._residual, cell_subset)
write_residuals(adata, 'variance_' + vkey, velo._residual2, cell_subset)
write_pars(adata, vkey, velo.get_pars(), velo.get_pars_names(), add_key=cat)
if filter_genes and len(set(velo._velocity_genes)) > 1:
adata._inplace_subset_var(velo._velocity_genes)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint('added \n'
' \'' + vkey + '\', velocity vectors for each individual cell (adata.layers)')
return adata if copy else None | [
"def",
"velocity",
"(",
"data",
",",
"vkey",
"=",
"'velocity'",
",",
"mode",
"=",
"None",
",",
"fit_offset",
"=",
"False",
",",
"fit_offset2",
"=",
"False",
",",
"filter_genes",
"=",
"False",
",",
"groups",
"=",
"None",
",",
"groupby",
"=",
"None",
","... | Estimates velocities in a gene-specific manner
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`.
mode: `'deterministic'`, `'stochastic'` or `'bayes'` (default: `'stochastic'`)
Whether to run the estimation using the deterministic or stochastic model of transcriptional dynamics.
`'bayes'` solves the stochastic model and accounts for heteroscedasticity, but is slower than `'stochastic'`.
fit_offset: `bool` (default: `False`)
Whether to fit with offset for first order moment dynamics.
fit_offset2: `bool`, (default: `False`)
Whether to fit with offset for second order moment dynamics.
filter_genes: `bool` (default: `True`)
Whether to remove genes that are not used for further velocity analysis.
groups: `str`, `list` (default: `None`)
Subset of groups, e.g. [‘g1’, ‘g2’, ‘g3’], to which velocity analysis shall be restricted.
groupby: `str`, `list` or `np.ndarray` (default: `None`)
Key of observations grouping to consider.
groups_for_fit: `str`, `list` or `np.ndarray` (default: `None`)
Subset of groups, e.g. [‘g1’, ‘g2’, ‘g3’], to which steady-state fitting shall be restricted.
use_raw: `bool` (default: `False`)
Whether to use raw data for estimation.
perc: `float` (default: `None`)
Percentile, e.g. 98, upon for extreme quantile fit (to better capture steady states for velocity estimation).
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Returns or updates `adata` with the attributes
velocity: `.layers`
velocity vectors for each individual cell
variance_velocity: `.layers`
velocity vectors for the cell variances
velocity_offset, velocity_beta, velocity_gamma, velocity_r2: `.var`
parameters | [
"Estimates",
"velocities",
"in",
"a",
"gene",
"-",
"specific",
"manner"
] | python | train |
brutasse/rache | rache/__init__.py | https://github.com/brutasse/rache/blob/fa9cf073376a8c731a13924b84fb8422a771a4ab/rache/__init__.py#L90-L102 | def delete_job(job_id, connection=None):
"""Deletes a job.
:param job_id: unique identifier for this job
>>> delete_job('http://example.com/test')
"""
if connection is None:
connection = r
with connection.pipeline() as pipe:
pipe.delete(job_key(job_id))
pipe.zrem(REDIS_KEY, job_id)
pipe.execute() | [
"def",
"delete_job",
"(",
"job_id",
",",
"connection",
"=",
"None",
")",
":",
"if",
"connection",
"is",
"None",
":",
"connection",
"=",
"r",
"with",
"connection",
".",
"pipeline",
"(",
")",
"as",
"pipe",
":",
"pipe",
".",
"delete",
"(",
"job_key",
"(",... | Deletes a job.
:param job_id: unique identifier for this job
>>> delete_job('http://example.com/test') | [
"Deletes",
"a",
"job",
"."
] | python | train |
tanghaibao/jcvi | jcvi/assembly/postprocess.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/postprocess.py#L365-L385 | def overlapbatch(args):
"""
%prog overlapbatch ctgfasta poolfasta
Fish out the sequences in `poolfasta` that overlap with `ctgfasta`.
Mix and combine using `minimus2`.
"""
p = OptionParser(overlap.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ctgfasta, poolfasta = args
f = Fasta(ctgfasta)
for k, rec in f.iteritems_ordered():
fastafile = k + ".fasta"
fw = open(fastafile, "w")
SeqIO.write([rec], fw, "fasta")
fw.close()
overlap([fastafile, poolfasta]) | [
"def",
"overlapbatch",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"overlap",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(... | %prog overlapbatch ctgfasta poolfasta
Fish out the sequences in `poolfasta` that overlap with `ctgfasta`.
Mix and combine using `minimus2`. | [
"%prog",
"overlapbatch",
"ctgfasta",
"poolfasta"
] | python | train |
rgmining/ria | ria/credibility.py | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/credibility.py#L109-L119 | def review_score(self, reviewer, product):
"""Find a review score from a given reviewer to a product.
Args:
reviewer: Reviewer i.e. an instance of :class:`ria.bipartite.Reviewer`.
product: Product i.e. an instance of :class:`ria.bipartite.Product`.
Returns:
A review object representing the review from the reviewer to the product.
"""
return self._g.retrieve_review(reviewer, product).score | [
"def",
"review_score",
"(",
"self",
",",
"reviewer",
",",
"product",
")",
":",
"return",
"self",
".",
"_g",
".",
"retrieve_review",
"(",
"reviewer",
",",
"product",
")",
".",
"score"
] | Find a review score from a given reviewer to a product.
Args:
reviewer: Reviewer i.e. an instance of :class:`ria.bipartite.Reviewer`.
product: Product i.e. an instance of :class:`ria.bipartite.Product`.
Returns:
A review object representing the review from the reviewer to the product. | [
"Find",
"a",
"review",
"score",
"from",
"a",
"given",
"reviewer",
"to",
"a",
"product",
"."
] | python | train |
postlund/pyatv | pyatv/__init__.py | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/__init__.py#L43-L49 | def add_service(self, zeroconf, service_type, name):
"""Handle callback from zeroconf when a service has been discovered."""
self.lock.acquire()
try:
self._internal_add(zeroconf, service_type, name)
finally:
self.lock.release() | [
"def",
"add_service",
"(",
"self",
",",
"zeroconf",
",",
"service_type",
",",
"name",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"self",
".",
"_internal_add",
"(",
"zeroconf",
",",
"service_type",
",",
"name",
")",
"finally",
... | Handle callback from zeroconf when a service has been discovered. | [
"Handle",
"callback",
"from",
"zeroconf",
"when",
"a",
"service",
"has",
"been",
"discovered",
"."
] | python | train |
enkore/i3pystatus | i3pystatus/pulseaudio/__init__.py | https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/pulseaudio/__init__.py#L67-L90 | def init(self):
"""Creates context, when context is ready context_notify_cb is called"""
# Wrap callback methods in appropriate ctypefunc instances so
# that the Pulseaudio C API can call them
self._context_notify_cb = pa_context_notify_cb_t(
self.context_notify_cb)
self._sink_info_cb = pa_sink_info_cb_t(self.sink_info_cb)
self._update_cb = pa_context_subscribe_cb_t(self.update_cb)
self._success_cb = pa_context_success_cb_t(self.success_cb)
self._server_info_cb = pa_server_info_cb_t(self.server_info_cb)
# Create the mainloop thread and set our context_notify_cb
# method to be called when there's updates relating to the
# connection to Pulseaudio
_mainloop = pa_threaded_mainloop_new()
_mainloop_api = pa_threaded_mainloop_get_api(_mainloop)
context = pa_context_new(_mainloop_api, "i3pystatus_pulseaudio".encode("ascii"))
pa_context_set_state_callback(context, self._context_notify_cb, None)
pa_context_connect(context, None, 0, None)
pa_threaded_mainloop_start(_mainloop)
self.colors = self.get_hex_color_range(self.color_muted, self.color_unmuted, 100)
self.sinks = [] | [
"def",
"init",
"(",
"self",
")",
":",
"# Wrap callback methods in appropriate ctypefunc instances so",
"# that the Pulseaudio C API can call them",
"self",
".",
"_context_notify_cb",
"=",
"pa_context_notify_cb_t",
"(",
"self",
".",
"context_notify_cb",
")",
"self",
".",
"_sin... | Creates context, when context is ready context_notify_cb is called | [
"Creates",
"context",
"when",
"context",
"is",
"ready",
"context_notify_cb",
"is",
"called"
] | python | train |
KristianOellegaard/django-health-check | health_check/views.py | https://github.com/KristianOellegaard/django-health-check/blob/575f811b7224dba0ef5f113791ca6aab20711041/health_check/views.py#L28-L36 | def from_string(cls, value):
"""Return single instance parsed from given accept header string."""
match = cls.pattern.search(value)
if match is None:
raise ValueError('"%s" is not a valid media type' % value)
try:
return cls(match.group('mime_type'), float(match.group('weight') or 1))
except ValueError:
return cls(value) | [
"def",
"from_string",
"(",
"cls",
",",
"value",
")",
":",
"match",
"=",
"cls",
".",
"pattern",
".",
"search",
"(",
"value",
")",
"if",
"match",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'\"%s\" is not a valid media type'",
"%",
"value",
")",
"try",
... | Return single instance parsed from given accept header string. | [
"Return",
"single",
"instance",
"parsed",
"from",
"given",
"accept",
"header",
"string",
"."
] | python | train |
QUANTAXIS/QUANTAXIS | QUANTAXIS/QAMarket/QAShipaneBroker.py | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAMarket/QAShipaneBroker.py#L215-L266 | def query_positions(self, accounts):
"""查询现金和持仓
Arguments:
accounts {[type]} -- [description]
Returns:
dict-- {'cash_available':xxx,'hold_available':xxx}
"""
try:
data = self.call("positions", {'client': accounts})
if data is not None:
cash_part = data.get('subAccounts', {}).get('人民币', False)
if cash_part:
cash_available = cash_part.get('可用金额', cash_part.get('可用'))
position_part = data.get('dataTable', False)
if position_part:
res = data.get('dataTable', False)
if res:
hold_headers = res['columns']
hold_headers = [
cn_en_compare[item] for item in hold_headers
]
hold_available = pd.DataFrame(
res['rows'],
columns=hold_headers
)
if len(hold_available) == 1 and hold_available.amount[0] in [
None,
'',
0
]:
hold_available = pd.DataFrame(
data=None,
columns=hold_headers
)
return {
'cash_available':
cash_available,
'hold_available':
hold_available.assign(
amount=hold_available.amount.apply(float)
).loc[:,
['code',
'amount']].set_index('code').amount
}
else:
print(data)
return False, 'None ACCOUNT'
except:
return False | [
"def",
"query_positions",
"(",
"self",
",",
"accounts",
")",
":",
"try",
":",
"data",
"=",
"self",
".",
"call",
"(",
"\"positions\"",
",",
"{",
"'client'",
":",
"accounts",
"}",
")",
"if",
"data",
"is",
"not",
"None",
":",
"cash_part",
"=",
"data",
"... | 查询现金和持仓
Arguments:
accounts {[type]} -- [description]
Returns:
dict-- {'cash_available':xxx,'hold_available':xxx} | [
"查询现金和持仓"
] | python | train |
openvax/datacache | datacache/database_helpers.py | https://github.com/openvax/datacache/blob/73bcac02d37cf153710a07fbdc636aa55cb214ca/datacache/database_helpers.py#L114-L142 | def build_tables(
table_names_to_dataframes,
table_names_to_primary_keys={},
table_names_to_indices={}):
"""
Parameters
----------
table_names_to_dataframes : dict
Dictionary mapping each table name to a DataFrame
table_names_to_primary_keys : dict
Dictionary mapping each table to its primary key
table_names_to_indices : dict
Dictionary mapping each table to a set of indices
Returns list of DatabaseTable objects
"""
tables = []
for table_name, df in table_names_to_dataframes.items():
table_indices = table_names_to_indices.get(table_name, [])
primary_key = table_names_to_primary_keys.get(table_name)
table = DatabaseTable.from_dataframe(
name=table_name,
df=df,
indices=table_indices,
primary_key=primary_key)
tables.append(table)
return tables | [
"def",
"build_tables",
"(",
"table_names_to_dataframes",
",",
"table_names_to_primary_keys",
"=",
"{",
"}",
",",
"table_names_to_indices",
"=",
"{",
"}",
")",
":",
"tables",
"=",
"[",
"]",
"for",
"table_name",
",",
"df",
"in",
"table_names_to_dataframes",
".",
"... | Parameters
----------
table_names_to_dataframes : dict
Dictionary mapping each table name to a DataFrame
table_names_to_primary_keys : dict
Dictionary mapping each table to its primary key
table_names_to_indices : dict
Dictionary mapping each table to a set of indices
Returns list of DatabaseTable objects | [
"Parameters",
"----------",
"table_names_to_dataframes",
":",
"dict",
"Dictionary",
"mapping",
"each",
"table",
"name",
"to",
"a",
"DataFrame"
] | python | train |
SmileyChris/django-countries | django_countries/__init__.py | https://github.com/SmileyChris/django-countries/blob/68b0934e8180d47bc15eff2887b6887aaa6e0228/django_countries/__init__.py#L137-L160 | def translate_pair(self, code):
"""
Force a country to the current activated translation.
:returns: ``CountryTuple(code, translated_country_name)`` namedtuple
"""
name = self.countries[code]
if code in self.OLD_NAMES:
# Check if there's an older translation available if there's no
# translation for the newest name.
with override(None):
source_name = force_text(name)
name = force_text(name)
if name == source_name:
for old_name in self.OLD_NAMES[code]:
with override(None):
source_old_name = force_text(old_name)
old_name = force_text(old_name)
if old_name != source_old_name:
name = old_name
break
else:
name = force_text(name)
return CountryTuple(code, name) | [
"def",
"translate_pair",
"(",
"self",
",",
"code",
")",
":",
"name",
"=",
"self",
".",
"countries",
"[",
"code",
"]",
"if",
"code",
"in",
"self",
".",
"OLD_NAMES",
":",
"# Check if there's an older translation available if there's no",
"# translation for the newest na... | Force a country to the current activated translation.
:returns: ``CountryTuple(code, translated_country_name)`` namedtuple | [
"Force",
"a",
"country",
"to",
"the",
"current",
"activated",
"translation",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/dotenv/main.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/dotenv/main.py#L317-L348 | def run_command(command, env):
"""Run command in sub process.
Runs the command in a sub process with the variables from `env`
added in the current environment variables.
Parameters
----------
command: List[str]
The command and it's parameters
env: Dict
The additional environment variables
Returns
-------
int
The return code of the command
"""
# copy the current environment variables and add the vales from
# `env`
cmd_env = os.environ.copy()
cmd_env.update(env)
p = Popen(command,
universal_newlines=True,
bufsize=0,
shell=False,
env=cmd_env)
_, _ = p.communicate()
return p.returncode | [
"def",
"run_command",
"(",
"command",
",",
"env",
")",
":",
"# copy the current environment variables and add the vales from",
"# `env`",
"cmd_env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"cmd_env",
".",
"update",
"(",
"env",
")",
"p",
"=",
"Popen",
... | Run command in sub process.
Runs the command in a sub process with the variables from `env`
added in the current environment variables.
Parameters
----------
command: List[str]
The command and it's parameters
env: Dict
The additional environment variables
Returns
-------
int
The return code of the command | [
"Run",
"command",
"in",
"sub",
"process",
"."
] | python | train |
blockchain/api-v1-client-python | blockchain/blockexplorer.py | https://github.com/blockchain/api-v1-client-python/blob/52ea562f824f04303e75239364e06722bec8620f/blockchain/blockexplorer.py#L59-L84 | def get_address(address, filter=None, limit=None, offset=None, api_code=None):
"""Get data for a single address including an address balance and list of relevant transactions.
:param str address: address(base58 or hash160) to look up
:param FilterType filter: the filter for transactions selection (optional)
:param int limit: limit number of transactions to display (optional)
:param int offset: number of transactions to skip when display (optional)
:param str api_code: Blockchain.info API code (optional)
:return: an instance of :class:`Address` class
"""
resource = 'address/{0}?format=json'.format(address)
if filter is not None:
if isinstance(filter, FilterType):
resource += '&filter=' + str(filter.value)
else:
raise ValueError('Filter must be of FilterType enum')
if limit is not None:
resource += '&limit=' + str(limit)
if offset is not None:
resource += '&offset=' + str(offset)
if api_code is not None:
resource += '&api_code=' + api_code
response = util.call_api(resource)
json_response = json.loads(response)
return Address(json_response) | [
"def",
"get_address",
"(",
"address",
",",
"filter",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"offset",
"=",
"None",
",",
"api_code",
"=",
"None",
")",
":",
"resource",
"=",
"'address/{0}?format=json'",
".",
"format",
"(",
"address",
")",
"if",
"filt... | Get data for a single address including an address balance and list of relevant transactions.
:param str address: address(base58 or hash160) to look up
:param FilterType filter: the filter for transactions selection (optional)
:param int limit: limit number of transactions to display (optional)
:param int offset: number of transactions to skip when display (optional)
:param str api_code: Blockchain.info API code (optional)
:return: an instance of :class:`Address` class | [
"Get",
"data",
"for",
"a",
"single",
"address",
"including",
"an",
"address",
"balance",
"and",
"list",
"of",
"relevant",
"transactions",
"."
] | python | train |
a1ezzz/wasp-general | wasp_general/task/thread.py | https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/thread.py#L247-L258 | def check_events(self):
""" Check "stopping"-events ('ready_event', 'stop_event', 'exception_event') if one of them is set.
Usually True value means that thread is meant to be stopped, means that it is finished its job or
some error has happened or this thread was asked to stop
:return: bool
"""
return (
self.ready_event().is_set() is True or
self.stop_event().is_set() is True or
self.exception_event().is_set() is True
) | [
"def",
"check_events",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"ready_event",
"(",
")",
".",
"is_set",
"(",
")",
"is",
"True",
"or",
"self",
".",
"stop_event",
"(",
")",
".",
"is_set",
"(",
")",
"is",
"True",
"or",
"self",
".",
"exceptio... | Check "stopping"-events ('ready_event', 'stop_event', 'exception_event') if one of them is set.
Usually True value means that thread is meant to be stopped, means that it is finished its job or
some error has happened or this thread was asked to stop
:return: bool | [
"Check",
"stopping",
"-",
"events",
"(",
"ready_event",
"stop_event",
"exception_event",
")",
"if",
"one",
"of",
"them",
"is",
"set",
".",
"Usually",
"True",
"value",
"means",
"that",
"thread",
"is",
"meant",
"to",
"be",
"stopped",
"means",
"that",
"it",
"... | python | train |
StackStorm/pybind | pybind/nos/v7_2_0/rbridge_id/router/router_bgp/router_bgp_attributes/graceful_shutdown/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/rbridge_id/router/router_bgp/router_bgp_attributes/graceful_shutdown/__init__.py#L127-L148 | def _set_gshut_route_map(self, v, load=False):
"""
Setter method for gshut_route_map, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/graceful_shutdown/gshut_route_map (rmap-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_gshut_route_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_gshut_route_map() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="gshut-route-map", rest_name="route-map", parent=self, choice=(u'ch-gshut-options', u'ca-gshut-timer-route-map'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route map for graceful shutdown attributes', u'alt-name': u'route-map', u'cli-reset-container': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='rmap-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """gshut_route_map must be of a type compatible with rmap-type""",
'defined-type': "brocade-bgp:rmap-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="gshut-route-map", rest_name="route-map", parent=self, choice=(u'ch-gshut-options', u'ca-gshut-timer-route-map'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route map for graceful shutdown attributes', u'alt-name': u'route-map', u'cli-reset-container': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='rmap-type', is_config=True)""",
})
self.__gshut_route_map = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_gshut_route_map",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
... | Setter method for gshut_route_map, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/graceful_shutdown/gshut_route_map (rmap-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_gshut_route_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_gshut_route_map() directly. | [
"Setter",
"method",
"for",
"gshut_route_map",
"mapped",
"from",
"YANG",
"variable",
"/",
"rbridge_id",
"/",
"router",
"/",
"router_bgp",
"/",
"router_bgp_attributes",
"/",
"graceful_shutdown",
"/",
"gshut_route_map",
"(",
"rmap",
"-",
"type",
")",
"If",
"this",
... | python | train |
inspirehep/harvesting-kit | harvestingkit/bibrecord.py | https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/bibrecord.py#L761-L779 | def record_move_fields(rec, tag, field_positions_local,
field_position_local=None):
"""
Move some fields to the position specified by 'field_position_local'.
:param rec: a record structure as returned by create_record()
:param tag: the tag of the fields to be moved
:param field_positions_local: the positions of the fields to move
:param field_position_local: insert the field before that
field_position_local. If unspecified, appends
the fields :return: the field_position_local
is the operation was successful
"""
fields = record_delete_fields(
rec, tag,
field_positions_local=field_positions_local)
return record_add_fields(
rec, tag, fields,
field_position_local=field_position_local) | [
"def",
"record_move_fields",
"(",
"rec",
",",
"tag",
",",
"field_positions_local",
",",
"field_position_local",
"=",
"None",
")",
":",
"fields",
"=",
"record_delete_fields",
"(",
"rec",
",",
"tag",
",",
"field_positions_local",
"=",
"field_positions_local",
")",
"... | Move some fields to the position specified by 'field_position_local'.
:param rec: a record structure as returned by create_record()
:param tag: the tag of the fields to be moved
:param field_positions_local: the positions of the fields to move
:param field_position_local: insert the field before that
field_position_local. If unspecified, appends
the fields :return: the field_position_local
is the operation was successful | [
"Move",
"some",
"fields",
"to",
"the",
"position",
"specified",
"by",
"field_position_local",
"."
] | python | valid |
gem/oq-engine | openquake/hazardlib/gsim/campbell_2003.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/campbell_2003.py#L126-L133 | def _compute_term2(self, C, mag, rrup):
"""
This computes the term f2 in equation 32, page 1021
"""
c78_factor = (C['c7'] * np.exp(C['c8'] * mag)) ** 2
R = np.sqrt(rrup ** 2 + c78_factor)
return C['c4'] * np.log(R) + (C['c5'] + C['c6'] * mag) * rrup | [
"def",
"_compute_term2",
"(",
"self",
",",
"C",
",",
"mag",
",",
"rrup",
")",
":",
"c78_factor",
"=",
"(",
"C",
"[",
"'c7'",
"]",
"*",
"np",
".",
"exp",
"(",
"C",
"[",
"'c8'",
"]",
"*",
"mag",
")",
")",
"**",
"2",
"R",
"=",
"np",
".",
"sqrt... | This computes the term f2 in equation 32, page 1021 | [
"This",
"computes",
"the",
"term",
"f2",
"in",
"equation",
"32",
"page",
"1021"
] | python | train |
olivier-m/rafter | rafter/contrib/schematics/filters.py | https://github.com/olivier-m/rafter/blob/aafcf8fd019f24abcf519307c4484cc6b4697c04/rafter/contrib/schematics/filters.py#L91-L142 | def filter_validate_response(get_response, params):
"""
This filter process the returned response. It does 2 things:
- If the response is a ``sanic.response.HTTPResponse`` and not a
:class:`rafter.http.Response`, return it immediately.
- It processes, validates and serializes this response when a schema
is provided.
That means that you can always return a normal Sanic's HTTPResponse
and thus, bypass the validation process when you need to do so.
.. important::
The response validation is only effective when:
- A ``response_schema`` has been provided by the resource definition
- The resource returns a :class:`rafter.http.Response` instance
or arbitrary data.
"""
schema = params.get('response_schema')
async def decorated_filter(request, *args, **kwargs):
response = await get_response(request, *args, **kwargs)
if isinstance(response, HTTPResponse) and \
not isinstance(response, Response):
return response
if not isinstance(response, Response):
raise TypeError('response is not an instance '
'of rafter.http.Response.')
if schema:
data = {
'body': response.data,
'headers': response.headers
}
try:
model = schema(data, strict=False, validate=False)
model.validate()
result = model.to_primitive()
response.body = result.get('body', None)
response.headers.update(result.get('headers', {}))
except BaseError as e:
log.exception(e)
abort(500, 'Wrong data output')
return response
return decorated_filter | [
"def",
"filter_validate_response",
"(",
"get_response",
",",
"params",
")",
":",
"schema",
"=",
"params",
".",
"get",
"(",
"'response_schema'",
")",
"async",
"def",
"decorated_filter",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"r... | This filter process the returned response. It does 2 things:
- If the response is a ``sanic.response.HTTPResponse`` and not a
:class:`rafter.http.Response`, return it immediately.
- It processes, validates and serializes this response when a schema
is provided.
That means that you can always return a normal Sanic's HTTPResponse
and thus, bypass the validation process when you need to do so.
.. important::
The response validation is only effective when:
- A ``response_schema`` has been provided by the resource definition
- The resource returns a :class:`rafter.http.Response` instance
or arbitrary data. | [
"This",
"filter",
"process",
"the",
"returned",
"response",
".",
"It",
"does",
"2",
"things",
":"
] | python | train |
tensorflow/cleverhans | examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L224-L255 | def init_from_storage_write_to_datastore(self,
batch_size=100,
allowed_epsilon=None,
skip_image_ids=None,
max_num_images=None):
"""Initializes dataset batches from the list of images in the datastore.
Args:
batch_size: batch size
allowed_epsilon: list of allowed epsilon or None to use default
skip_image_ids: list of image ids to skip
max_num_images: maximum number of images to read
"""
if allowed_epsilon is None:
allowed_epsilon = copy.copy(DEFAULT_EPSILON)
# init dataset batches from data in storage
self._dataset_batches = {}
# read all blob names from storage
images = self._read_image_list(skip_image_ids)
if max_num_images:
images = images[:max_num_images]
for batch_idx, batch_start in enumerate(range(0, len(images), batch_size)):
batch = images[batch_start:batch_start+batch_size]
batch_id = DATASET_BATCH_ID_PATTERN.format(batch_idx)
batch_epsilon = allowed_epsilon[batch_idx % len(allowed_epsilon)]
self.add_batch(batch_id, {'epsilon': batch_epsilon})
for image_id, image_path in batch:
self.add_image(batch_id, image_id,
{'dataset_image_id': os.path.basename(image_path)[:-4],
'image_path': image_path})
# write data to datastore
self.write_to_datastore() | [
"def",
"init_from_storage_write_to_datastore",
"(",
"self",
",",
"batch_size",
"=",
"100",
",",
"allowed_epsilon",
"=",
"None",
",",
"skip_image_ids",
"=",
"None",
",",
"max_num_images",
"=",
"None",
")",
":",
"if",
"allowed_epsilon",
"is",
"None",
":",
"allowed... | Initializes dataset batches from the list of images in the datastore.
Args:
batch_size: batch size
allowed_epsilon: list of allowed epsilon or None to use default
skip_image_ids: list of image ids to skip
max_num_images: maximum number of images to read | [
"Initializes",
"dataset",
"batches",
"from",
"the",
"list",
"of",
"images",
"in",
"the",
"datastore",
"."
] | python | train |
filestack/filestack-python | filestack/models/filestack_client.py | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_client.py#L96-L198 | def upload(self, url=None, filepath=None, multipart=True, params=None, upload_processes=None, intelligent=False):
"""
Uploads a file either through a local filepath or external_url.
Uses multipart by default and Intelligent Ingestion by default (if enabled).
You can specify the number of multipart processes and pass in parameters.
returns [Filestack.Filelink]
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file')
# to use different storage:
client = FilestackClient.new('API_KEY', storage='dropbox')
filelink = client.upload(filepath='/path/to/file', params={'container': 'my-container'})
# to use an external URL:
filelink = client.upload(external_url='https://www.example.com')
# to disable intelligent ingestion:
filelink = client.upload(filepath='/path/to/file', intelligent=False)
```
"""
if params: # Check the structure of parameters
STORE_SCHEMA.check(params)
if filepath and url: # Raise an error for using both filepath and external url
raise ValueError("Cannot upload file and external url at the same time")
if filepath: # Uploading from local drive
if intelligent:
response = intelligent_ingestion.upload(
self.apikey, filepath, self.storage, params=params, security=self.security
)
elif multipart:
response = upload_utils.multipart_upload(
self.apikey, filepath, self.storage,
upload_processes=upload_processes, params=params, security=self.security
)
handle = response['handle']
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else: # Uploading with multipart=False
filename = os.path.basename(filepath)
mimetype = mimetypes.guess_type(filepath)[0]
files = {'fileUpload': (filename, open(filepath, 'rb'), mimetype)}
if params:
params['key'] = self.apikey
else:
params = {'key': self.apikey}
path = '{path}/{storage}'.format(path=STORE_PATH, storage=self.storage)
if self.security:
path = "{path}?policy={policy}&signature={signature}".format(
path=path, policy=self.security['policy'].decode('utf-8'),
signature=self.security['signature']
)
response = utils.make_call(
API_URL, 'post', path=path, params=params, files=files
)
else: # Uploading from an external URL
tasks = []
request_url_list = []
if utils.store_params_checker(params):
store_task = utils.store_params_maker(params)
tasks.append(store_task)
if self.security:
tasks.append(
'security=p:{policy},s:{signature}'.format(
policy=self.security['policy'].decode('utf-8'),
signature=self.security['signature']
)
)
tasks = '/'.join(tasks)
if tasks:
request_url_list.extend((CDN_URL, self.apikey, tasks, url))
else:
request_url_list.extend((CDN_URL, self.apikey, url))
request_url = '/'.join(request_url_list)
response = requests.post(request_url, headers=HEADERS)
if response.ok:
response = response.json()
handle = re.match(
r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)',
response['url']
).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else:
raise Exception('Invalid API response') | [
"def",
"upload",
"(",
"self",
",",
"url",
"=",
"None",
",",
"filepath",
"=",
"None",
",",
"multipart",
"=",
"True",
",",
"params",
"=",
"None",
",",
"upload_processes",
"=",
"None",
",",
"intelligent",
"=",
"False",
")",
":",
"if",
"params",
":",
"# ... | Uploads a file either through a local filepath or external_url.
Uses multipart by default and Intelligent Ingestion by default (if enabled).
You can specify the number of multipart processes and pass in parameters.
returns [Filestack.Filelink]
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file')
# to use different storage:
client = FilestackClient.new('API_KEY', storage='dropbox')
filelink = client.upload(filepath='/path/to/file', params={'container': 'my-container'})
# to use an external URL:
filelink = client.upload(external_url='https://www.example.com')
# to disable intelligent ingestion:
filelink = client.upload(filepath='/path/to/file', intelligent=False)
``` | [
"Uploads",
"a",
"file",
"either",
"through",
"a",
"local",
"filepath",
"or",
"external_url",
".",
"Uses",
"multipart",
"by",
"default",
"and",
"Intelligent",
"Ingestion",
"by",
"default",
"(",
"if",
"enabled",
")",
".",
"You",
"can",
"specify",
"the",
"numbe... | python | train |
saltstack/salt | salt/utils/network.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L2005-L2054 | def parse_host_port(host_port):
"""
Takes a string argument specifying host or host:port.
Returns a (hostname, port) or (ip_address, port) tuple. If no port is given,
the second (port) element of the returned tuple will be None.
host:port argument, for example, is accepted in the forms of:
- hostname
- hostname:1234
- hostname.domain.tld
- hostname.domain.tld:5678
- [1234::5]:5678
- 1234::5
- 10.11.12.13:4567
- 10.11.12.13
"""
host, port = None, None # default
_s_ = host_port[:]
if _s_[0] == "[":
if "]" in host_port:
host, _s_ = _s_.lstrip("[").rsplit("]", 1)
host = ipaddress.IPv6Address(host).compressed
if _s_[0] == ":":
port = int(_s_.lstrip(":"))
else:
if len(_s_) > 1:
raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port))
else:
if _s_.count(":") == 1:
host, _hostport_separator_, port = _s_.partition(":")
try:
port = int(port)
except ValueError as _e_:
log.error('host_port "%s" port value "%s" is not an integer.', host_port, port)
raise _e_
else:
host = _s_
try:
if not isinstance(host, ipaddress._BaseAddress):
host_ip = ipaddress.ip_address(host).compressed
host = host_ip
except ValueError:
log.debug('"%s" Not an IP address? Assuming it is a hostname.', host)
if host != sanitize_host(host):
log.error('bad hostname: "%s"', host)
raise ValueError('bad hostname: "{}"'.format(host))
return host, port | [
"def",
"parse_host_port",
"(",
"host_port",
")",
":",
"host",
",",
"port",
"=",
"None",
",",
"None",
"# default",
"_s_",
"=",
"host_port",
"[",
":",
"]",
"if",
"_s_",
"[",
"0",
"]",
"==",
"\"[\"",
":",
"if",
"\"]\"",
"in",
"host_port",
":",
"host",
... | Takes a string argument specifying host or host:port.
Returns a (hostname, port) or (ip_address, port) tuple. If no port is given,
the second (port) element of the returned tuple will be None.
host:port argument, for example, is accepted in the forms of:
- hostname
- hostname:1234
- hostname.domain.tld
- hostname.domain.tld:5678
- [1234::5]:5678
- 1234::5
- 10.11.12.13:4567
- 10.11.12.13 | [
"Takes",
"a",
"string",
"argument",
"specifying",
"host",
"or",
"host",
":",
"port",
"."
] | python | train |
ibis-project/ibis | ibis/expr/api.py | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L2051-L2073 | def _lpad(self, length, pad=' '):
"""
Returns string of given length by truncating (on right)
or padding (on left) original string
Parameters
----------
length : int
pad : string, default is ' '
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> expr = table.strings.lpad(5, '-')
>>> expr = ibis.literal('a').lpad(5, '-') # 'a' becomes '----a'
>>> expr = ibis.literal('abcdefg').lpad(5, '-') # 'abcdefg' becomes 'abcde' # noqa: E501
Returns
-------
padded : string
"""
return ops.LPad(self, length, pad).to_expr() | [
"def",
"_lpad",
"(",
"self",
",",
"length",
",",
"pad",
"=",
"' '",
")",
":",
"return",
"ops",
".",
"LPad",
"(",
"self",
",",
"length",
",",
"pad",
")",
".",
"to_expr",
"(",
")"
] | Returns string of given length by truncating (on right)
or padding (on left) original string
Parameters
----------
length : int
pad : string, default is ' '
Examples
--------
>>> import ibis
>>> table = ibis.table([('strings', 'string')])
>>> expr = table.strings.lpad(5, '-')
>>> expr = ibis.literal('a').lpad(5, '-') # 'a' becomes '----a'
>>> expr = ibis.literal('abcdefg').lpad(5, '-') # 'abcdefg' becomes 'abcde' # noqa: E501
Returns
-------
padded : string | [
"Returns",
"string",
"of",
"given",
"length",
"by",
"truncating",
"(",
"on",
"right",
")",
"or",
"padding",
"(",
"on",
"left",
")",
"original",
"string"
] | python | train |
Legobot/Legobot | Legobot/Lego.py | https://github.com/Legobot/Legobot/blob/d13da172960a149681cb5151ce34b2f3a58ad32b/Legobot/Lego.py#L73-L82 | def cleanup(self):
"""
Clean up finished children.
:return: None
"""
self.lock.acquire()
logger.debug('Acquired lock in cleanup for ' + str(self))
self.children = [child for child in self.children if child.is_alive()]
self.lock.release() | [
"def",
"cleanup",
"(",
"self",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"logger",
".",
"debug",
"(",
"'Acquired lock in cleanup for '",
"+",
"str",
"(",
"self",
")",
")",
"self",
".",
"children",
"=",
"[",
"child",
"for",
"child",
"in",... | Clean up finished children.
:return: None | [
"Clean",
"up",
"finished",
"children",
"."
] | python | train |
PMEAL/OpenPNM | openpnm/utils/Project.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/utils/Project.py#L509-L524 | def _new_object(self, objtype, name=None):
r"""
"""
if objtype.startswith('net'):
obj = openpnm.network.GenericNetwork(project=self, name=name)
elif objtype.startswith('geo'):
obj = openpnm.geometry.GenericGeometry(project=self, name=name)
elif objtype.startswith('pha'):
obj = openpnm.phases.GenericPhase(project=self, name=name)
elif objtype.startswith('phy'):
obj = openpnm.physics.GenericPhysics(project=self, name=name)
elif objtype.startswith('alg'):
obj = openpnm.algorithm.GenericAlgorithm(project=self, name=name)
else:
obj = openpnm.core.Base(project=self, name=name)
return obj | [
"def",
"_new_object",
"(",
"self",
",",
"objtype",
",",
"name",
"=",
"None",
")",
":",
"if",
"objtype",
".",
"startswith",
"(",
"'net'",
")",
":",
"obj",
"=",
"openpnm",
".",
"network",
".",
"GenericNetwork",
"(",
"project",
"=",
"self",
",",
"name",
... | r""" | [
"r"
] | python | train |
joelfrederico/SciSalt | scisalt/scipy/LinLsqFit_mod.py | https://github.com/joelfrederico/SciSalt/blob/7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f/scisalt/scipy/LinLsqFit_mod.py#L91-L102 | def X(self):
"""
The :math:`X` weighted properly by the errors from *y_error*
"""
if self._X is None:
X = _copy.deepcopy(self.X_unweighted)
# print 'X shape is {}'.format(X.shape)
for i, el in enumerate(X):
X[i, :] = el/self.y_error[i]
# print 'New X shape is {}'.format(X.shape)
self._X = X
return self._X | [
"def",
"X",
"(",
"self",
")",
":",
"if",
"self",
".",
"_X",
"is",
"None",
":",
"X",
"=",
"_copy",
".",
"deepcopy",
"(",
"self",
".",
"X_unweighted",
")",
"# print 'X shape is {}'.format(X.shape)",
"for",
"i",
",",
"el",
"in",
"enumerate",
"(",
"X",
")"... | The :math:`X` weighted properly by the errors from *y_error* | [
"The",
":",
"math",
":",
"X",
"weighted",
"properly",
"by",
"the",
"errors",
"from",
"*",
"y_error",
"*"
] | python | valid |
Toblerity/rtree | rtree/index.py | https://github.com/Toblerity/rtree/blob/5d33357c8e88f1a8344415dc15a7d2440211b281/rtree/index.py#L396-L430 | def count(self, coordinates):
"""Return number of objects that intersect the given coordinates.
:param coordinates: sequence or array
This may be an object that satisfies the numpy array
protocol, providing the index's dimension * 2 coordinate
pairs representing the `mink` and `maxk` coordinates in
each dimension defining the bounds of the query window.
The following example queries the index for any objects any objects
that were stored in the index intersect the bounds given in the
coordinates::
>>> from rtree import index
>>> idx = index.Index()
>>> idx.insert(4321,
... (34.3776829412, 26.7375853734, 49.3776829412,
... 41.7375853734),
... obj=42)
>>> print(idx.count((0, 0, 60, 60)))
1
"""
p_mins, p_maxs = self.get_coordinate_pointers(coordinates)
p_num_results = ctypes.c_uint64(0)
core.rt.Index_Intersects_count(self.handle,
p_mins,
p_maxs,
self.properties.dimension,
ctypes.byref(p_num_results))
return p_num_results.value | [
"def",
"count",
"(",
"self",
",",
"coordinates",
")",
":",
"p_mins",
",",
"p_maxs",
"=",
"self",
".",
"get_coordinate_pointers",
"(",
"coordinates",
")",
"p_num_results",
"=",
"ctypes",
".",
"c_uint64",
"(",
"0",
")",
"core",
".",
"rt",
".",
"Index_Interse... | Return number of objects that intersect the given coordinates.
:param coordinates: sequence or array
This may be an object that satisfies the numpy array
protocol, providing the index's dimension * 2 coordinate
pairs representing the `mink` and `maxk` coordinates in
each dimension defining the bounds of the query window.
The following example queries the index for any objects any objects
that were stored in the index intersect the bounds given in the
coordinates::
>>> from rtree import index
>>> idx = index.Index()
>>> idx.insert(4321,
... (34.3776829412, 26.7375853734, 49.3776829412,
... 41.7375853734),
... obj=42)
>>> print(idx.count((0, 0, 60, 60)))
1 | [
"Return",
"number",
"of",
"objects",
"that",
"intersect",
"the",
"given",
"coordinates",
"."
] | python | test |
mzucker/noteshrink | noteshrink.py | https://github.com/mzucker/noteshrink/blob/7d876e5b43923c6bf8d64b7ef18f6855bfb30ce3/noteshrink.py#L311-L333 | def load(input_filename):
'''Load an image with Pillow and convert it to numpy array. Also
returns the image DPI in x and y as a tuple.'''
try:
pil_img = Image.open(input_filename)
except IOError:
sys.stderr.write('warning: error opening {}\n'.format(
input_filename))
return None, None
if pil_img.mode != 'RGB':
pil_img = pil_img.convert('RGB')
if 'dpi' in pil_img.info:
dpi = pil_img.info['dpi']
else:
dpi = (300, 300)
img = np.array(pil_img)
return img, dpi | [
"def",
"load",
"(",
"input_filename",
")",
":",
"try",
":",
"pil_img",
"=",
"Image",
".",
"open",
"(",
"input_filename",
")",
"except",
"IOError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'warning: error opening {}\\n'",
".",
"format",
"(",
"input_file... | Load an image with Pillow and convert it to numpy array. Also
returns the image DPI in x and y as a tuple. | [
"Load",
"an",
"image",
"with",
"Pillow",
"and",
"convert",
"it",
"to",
"numpy",
"array",
".",
"Also",
"returns",
"the",
"image",
"DPI",
"in",
"x",
"and",
"y",
"as",
"a",
"tuple",
"."
] | python | train |
ibis-project/ibis | ibis/impala/client.py | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/impala/client.py#L33-L40 | def create_table(self, table_name, obj=None, **kwargs):
"""
Dispatch to ImpalaClient.create_table. See that function's docstring
for more
"""
return self.client.create_table(
table_name, obj=obj, database=self.name, **kwargs
) | [
"def",
"create_table",
"(",
"self",
",",
"table_name",
",",
"obj",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"client",
".",
"create_table",
"(",
"table_name",
",",
"obj",
"=",
"obj",
",",
"database",
"=",
"self",
".",
"na... | Dispatch to ImpalaClient.create_table. See that function's docstring
for more | [
"Dispatch",
"to",
"ImpalaClient",
".",
"create_table",
".",
"See",
"that",
"function",
"s",
"docstring",
"for",
"more"
] | python | train |
sci-bots/pygtkhelpers | pygtkhelpers/ui/objectlist/column.py | https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/ui/objectlist/column.py#L252-L266 | def render_tooltip(self, tooltip, obj):
"""Render the tooltip for this column for an object
"""
if self.tooltip_attr:
val = getattr(obj, self.tooltip_attr)
elif self.tooltip_value:
val = self.tooltip_value
else:
return False
setter = getattr(tooltip, TOOLTIP_SETTERS.get(self.tooltip_type))
if self.tooltip_type in TOOLTIP_SIZED_TYPES:
setter(val, self.tooltip_image_size)
else:
setter(val)
return True | [
"def",
"render_tooltip",
"(",
"self",
",",
"tooltip",
",",
"obj",
")",
":",
"if",
"self",
".",
"tooltip_attr",
":",
"val",
"=",
"getattr",
"(",
"obj",
",",
"self",
".",
"tooltip_attr",
")",
"elif",
"self",
".",
"tooltip_value",
":",
"val",
"=",
"self",... | Render the tooltip for this column for an object | [
"Render",
"the",
"tooltip",
"for",
"this",
"column",
"for",
"an",
"object"
] | python | train |
globocom/GloboNetworkAPI-client-python | networkapiclient/ApiEnvironment.py | https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiEnvironment.py#L67-L80 | def update_environment(self, environment, environment_ids):
"""
Method to update environment
:param environment_ids: Ids of Environment
"""
uri = 'api/v3/environment/%s/' % environment_ids
data = dict()
data['environments'] = list()
data['environments'].append(environment)
return super(ApiEnvironment, self).put(uri, data) | [
"def",
"update_environment",
"(",
"self",
",",
"environment",
",",
"environment_ids",
")",
":",
"uri",
"=",
"'api/v3/environment/%s/'",
"%",
"environment_ids",
"data",
"=",
"dict",
"(",
")",
"data",
"[",
"'environments'",
"]",
"=",
"list",
"(",
")",
"data",
... | Method to update environment
:param environment_ids: Ids of Environment | [
"Method",
"to",
"update",
"environment"
] | python | train |
cthoyt/onto2nx | src/onto2nx/ontospy/core/ontospy.py | https://github.com/cthoyt/onto2nx/blob/94c86e5e187cca67534afe0260097177b66e02c8/src/onto2nx/ontospy/core/ontospy.py#L721-L731 | def nextClass(self, classuri):
"""Returns the next class in the list of classes. If it's the last one, returns the first one."""
if classuri == self.classes[-1].uri:
return self.classes[0]
flag = False
for x in self.classes:
if flag == True:
return x
if x.uri == classuri:
flag = True
return None | [
"def",
"nextClass",
"(",
"self",
",",
"classuri",
")",
":",
"if",
"classuri",
"==",
"self",
".",
"classes",
"[",
"-",
"1",
"]",
".",
"uri",
":",
"return",
"self",
".",
"classes",
"[",
"0",
"]",
"flag",
"=",
"False",
"for",
"x",
"in",
"self",
".",... | Returns the next class in the list of classes. If it's the last one, returns the first one. | [
"Returns",
"the",
"next",
"class",
"in",
"the",
"list",
"of",
"classes",
".",
"If",
"it",
"s",
"the",
"last",
"one",
"returns",
"the",
"first",
"one",
"."
] | python | train |
orbingol/NURBS-Python | geomdl/operations.py | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1419-L1442 | def normal(obj, params, **kwargs):
""" Evaluates the normal vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate normal vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input geometry
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple
"""
normalize = kwargs.get('normalize', True)
if isinstance(obj, abstract.Curve):
if isinstance(params, (list, tuple)):
return ops.normal_curve_single_list(obj, params, normalize)
else:
return ops.normal_curve_single(obj, params, normalize)
if isinstance(obj, abstract.Surface):
if isinstance(params[0], float):
return ops.normal_surface_single(obj, params, normalize)
else:
return ops.normal_surface_single_list(obj, params, normalize) | [
"def",
"normal",
"(",
"obj",
",",
"params",
",",
"*",
"*",
"kwargs",
")",
":",
"normalize",
"=",
"kwargs",
".",
"get",
"(",
"'normalize'",
",",
"True",
")",
"if",
"isinstance",
"(",
"obj",
",",
"abstract",
".",
"Curve",
")",
":",
"if",
"isinstance",
... | Evaluates the normal vector of the curves or surfaces at the input parameter values.
This function is designed to evaluate normal vectors of the B-Spline and NURBS shapes at single or
multiple parameter positions.
:param obj: input geometry
:type obj: abstract.Curve or abstract.Surface
:param params: parameters
:type params: float, list or tuple
:return: a list containing "point" and "vector" pairs
:rtype: tuple | [
"Evaluates",
"the",
"normal",
"vector",
"of",
"the",
"curves",
"or",
"surfaces",
"at",
"the",
"input",
"parameter",
"values",
"."
] | python | train |
stevearc/dql | dql/engine.py | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L77-L94 | def iter_insert_items(tree):
""" Iterate over the items to insert from an INSERT statement """
if tree.list_values:
keys = tree.attrs
for values in tree.list_values:
if len(keys) != len(values):
raise SyntaxError(
"Values '%s' do not match attributes " "'%s'" % (values, keys)
)
yield dict(zip(keys, map(resolve, values)))
elif tree.map_values:
for item in tree.map_values:
data = {}
for (key, val) in item:
data[key] = resolve(val)
yield data
else:
raise SyntaxError("No insert data found") | [
"def",
"iter_insert_items",
"(",
"tree",
")",
":",
"if",
"tree",
".",
"list_values",
":",
"keys",
"=",
"tree",
".",
"attrs",
"for",
"values",
"in",
"tree",
".",
"list_values",
":",
"if",
"len",
"(",
"keys",
")",
"!=",
"len",
"(",
"values",
")",
":",
... | Iterate over the items to insert from an INSERT statement | [
"Iterate",
"over",
"the",
"items",
"to",
"insert",
"from",
"an",
"INSERT",
"statement"
] | python | train |
jgorset/django-respite | respite/utils/parsers.py | https://github.com/jgorset/django-respite/blob/719469d11baf91d05917bab1623bd82adc543546/respite/utils/parsers.py#L10-L29 | def parse_content_type(content_type):
"""
Return a tuple of content type and charset.
:param content_type: A string describing a content type.
"""
if '; charset=' in content_type:
return tuple(content_type.split('; charset='))
else:
if 'text' in content_type:
encoding = 'ISO-8859-1'
else:
try:
format = formats.find_by_content_type(content_type)
except formats.UnknownFormat:
encoding = 'ISO-8859-1'
else:
encoding = format.default_encoding or 'ISO-8859-1'
return (content_type, encoding) | [
"def",
"parse_content_type",
"(",
"content_type",
")",
":",
"if",
"'; charset='",
"in",
"content_type",
":",
"return",
"tuple",
"(",
"content_type",
".",
"split",
"(",
"'; charset='",
")",
")",
"else",
":",
"if",
"'text'",
"in",
"content_type",
":",
"encoding"... | Return a tuple of content type and charset.
:param content_type: A string describing a content type. | [
"Return",
"a",
"tuple",
"of",
"content",
"type",
"and",
"charset",
"."
] | python | train |
ternaris/marv | marv_node/node.py | https://github.com/ternaris/marv/blob/c221354d912ff869bbdb4f714a86a70be30d823e/marv_node/node.py#L39-L73 | def input(name, default=None, foreach=None):
"""Decorator to declare input for a node.
Plain inputs, that is plain python objects, are directly passed to
the node. Whereas streams generated by other nodes are requested
and once the handles of all input streams are available the node
is instantiated.
Args:
name (str): Name of the node function argument the input will
be passed to.
default: An optional default value for the input. This can be
any python object or another node.
foreach (bool): This parameter is currently not supported and
only for internal usage.
Returns:
The original function decorated with this input
specification. A function is turned into a node by the
:func:`node` decorator.
"""
assert default is None or foreach is None
value = foreach if foreach is not None else default
value = StreamSpec(value) if isinstance(value, Node) else value
foreach = foreach is not None
spec = InputSpec(name, value, foreach)
def deco(func):
"""Add {!r} to function.""".format(spec)
specs = func.__dict__.setdefault('__marv_input_specs__', OrderedDict())
if spec.name in specs:
raise InputNameCollision(spec.name)
specs[spec.name] = spec
return func
return deco | [
"def",
"input",
"(",
"name",
",",
"default",
"=",
"None",
",",
"foreach",
"=",
"None",
")",
":",
"assert",
"default",
"is",
"None",
"or",
"foreach",
"is",
"None",
"value",
"=",
"foreach",
"if",
"foreach",
"is",
"not",
"None",
"else",
"default",
"value"... | Decorator to declare input for a node.
Plain inputs, that is plain python objects, are directly passed to
the node. Whereas streams generated by other nodes are requested
and once the handles of all input streams are available the node
is instantiated.
Args:
name (str): Name of the node function argument the input will
be passed to.
default: An optional default value for the input. This can be
any python object or another node.
foreach (bool): This parameter is currently not supported and
only for internal usage.
Returns:
The original function decorated with this input
specification. A function is turned into a node by the
:func:`node` decorator. | [
"Decorator",
"to",
"declare",
"input",
"for",
"a",
"node",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/helpers/state.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/helpers/state.py#L40-L63 | def negative_check_for_model_in_expected_future_models(target_state_m, model, msg, delete=True, with_logger=None):
""" Checks if the expected future models list/set includes still a specific model
Return False if the handed model is still in and also creates a warning message as feedback.
:param StateModel target_state_m: The state model which expected_future_models attribute should be checked
:param Model model: Model to check for.
:param str msg: Message for the logger if a model is still in.
:param bool delete: Flag to delete respective model from list/set.
:param with_logger: A optional logger to use in case of logging messages
:rtype: bool
:return: True if empty and False if still model in set/list
"""
if with_logger is None:
with_logger = logger
# check that the model in the list expected_future_model was used
if model in target_state_m.expected_future_models:
with_logger.warning("{0} -> still in is: {1} Please inform the developer how to reproduce this."
"".format(msg, model))
if delete:
# TODO think about to destroy this models
target_state_m.expected_future_models.remove(model)
return False
return True | [
"def",
"negative_check_for_model_in_expected_future_models",
"(",
"target_state_m",
",",
"model",
",",
"msg",
",",
"delete",
"=",
"True",
",",
"with_logger",
"=",
"None",
")",
":",
"if",
"with_logger",
"is",
"None",
":",
"with_logger",
"=",
"logger",
"# check that... | Checks if the expected future models list/set includes still a specific model
Return False if the handed model is still in and also creates a warning message as feedback.
:param StateModel target_state_m: The state model which expected_future_models attribute should be checked
:param Model model: Model to check for.
:param str msg: Message for the logger if a model is still in.
:param bool delete: Flag to delete respective model from list/set.
:param with_logger: A optional logger to use in case of logging messages
:rtype: bool
:return: True if empty and False if still model in set/list | [
"Checks",
"if",
"the",
"expected",
"future",
"models",
"list",
"/",
"set",
"includes",
"still",
"a",
"specific",
"model"
] | python | train |
sholsapp/py509 | py509/x509.py | https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/x509.py#L71-L93 | def make_certificate_signing_request(pkey, digest='sha512', **name):
"""Make a certificate signing request.
:param OpenSSL.crypto.PKey pkey: A private key.
:param str digest: A valid digest to use. For example, `sha512`.
:param name: Key word arguments containing subject name parts: C, ST, L, O,
OU, CN.
:return: A certificate signing request.
:rtype: :class:`OpenSSL.crypto.X509Request`
"""
csr = crypto.X509Req()
subj = csr.get_subject()
subj.C = name.get('C', 'US')
subj.ST = name.get('ST', 'CA')
subj.L = name.get('L', 'Home')
subj.O = name.get('O', 'Home')
subj.OU = name.get('OU', 'Unit')
subj.CN = name.get('CN', 'Common')
csr.set_pubkey(pkey)
csr.set_version(3)
csr.sign(pkey, digest)
return csr | [
"def",
"make_certificate_signing_request",
"(",
"pkey",
",",
"digest",
"=",
"'sha512'",
",",
"*",
"*",
"name",
")",
":",
"csr",
"=",
"crypto",
".",
"X509Req",
"(",
")",
"subj",
"=",
"csr",
".",
"get_subject",
"(",
")",
"subj",
".",
"C",
"=",
"name",
... | Make a certificate signing request.
:param OpenSSL.crypto.PKey pkey: A private key.
:param str digest: A valid digest to use. For example, `sha512`.
:param name: Key word arguments containing subject name parts: C, ST, L, O,
OU, CN.
:return: A certificate signing request.
:rtype: :class:`OpenSSL.crypto.X509Request` | [
"Make",
"a",
"certificate",
"signing",
"request",
"."
] | python | train |
Rapptz/discord.py | discord/client.py | https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/client.py#L1106-L1142 | async def fetch_user_profile(self, user_id):
"""|coro|
Gets an arbitrary user's profile. This can only be used by non-bot accounts.
Parameters
------------
user_id: :class:`int`
The ID of the user to fetch their profile for.
Raises
-------
Forbidden
Not allowed to fetch profiles.
HTTPException
Fetching the profile failed.
Returns
--------
:class:`.Profile`
The profile of the user.
"""
state = self._connection
data = await self.http.get_user_profile(user_id)
def transform(d):
return state._get_guild(int(d['id']))
since = data.get('premium_since')
mutual_guilds = list(filter(None, map(transform, data.get('mutual_guilds', []))))
user = data['user']
return Profile(flags=user.get('flags', 0),
premium_since=utils.parse_time(since),
mutual_guilds=mutual_guilds,
user=User(data=user, state=state),
connected_accounts=data['connected_accounts']) | [
"async",
"def",
"fetch_user_profile",
"(",
"self",
",",
"user_id",
")",
":",
"state",
"=",
"self",
".",
"_connection",
"data",
"=",
"await",
"self",
".",
"http",
".",
"get_user_profile",
"(",
"user_id",
")",
"def",
"transform",
"(",
"d",
")",
":",
"retur... | |coro|
Gets an arbitrary user's profile. This can only be used by non-bot accounts.
Parameters
------------
user_id: :class:`int`
The ID of the user to fetch their profile for.
Raises
-------
Forbidden
Not allowed to fetch profiles.
HTTPException
Fetching the profile failed.
Returns
--------
:class:`.Profile`
The profile of the user. | [
"|coro|"
] | python | train |
RJT1990/pyflux | pyflux/ensembles/mixture_of_experts.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ensembles/mixture_of_experts.py#L153-L188 | def _model_predict_is(self, h, recalculate=False, fit_once=True):
""" Outputs ensemble model predictions for the end-of-period data
Parameters
----------
h : int
How many steps at the end of the series to run the ensemble on
recalculate: boolean
Whether to recalculate the predictions or not
fit_once : boolean
Whether to fit the model once at the beginning, or with every iteration
Returns
----------
- pd.DataFrame of the model predictions, index of dates
"""
if len(self.model_predictions_is) == 0 or h != self.h or recalculate is True:
for no, model in enumerate(self.model_list):
if no == 0:
result = model.predict_is(h, fit_once=fit_once)
result.columns = [model.model_name]
else:
new_frame = model.predict_is(h, fit_once=fit_once)
new_frame.columns = [model.model_name]
result = pd.concat([result,new_frame], axis=1)
self.model_predictions_is = result
self.h = h
return result
else:
return self.model_predictions_is | [
"def",
"_model_predict_is",
"(",
"self",
",",
"h",
",",
"recalculate",
"=",
"False",
",",
"fit_once",
"=",
"True",
")",
":",
"if",
"len",
"(",
"self",
".",
"model_predictions_is",
")",
"==",
"0",
"or",
"h",
"!=",
"self",
".",
"h",
"or",
"recalculate",
... | Outputs ensemble model predictions for the end-of-period data
Parameters
----------
h : int
How many steps at the end of the series to run the ensemble on
recalculate: boolean
Whether to recalculate the predictions or not
fit_once : boolean
Whether to fit the model once at the beginning, or with every iteration
Returns
----------
- pd.DataFrame of the model predictions, index of dates | [
"Outputs",
"ensemble",
"model",
"predictions",
"for",
"the",
"end",
"-",
"of",
"-",
"period",
"data",
"Parameters",
"----------",
"h",
":",
"int",
"How",
"many",
"steps",
"at",
"the",
"end",
"of",
"the",
"series",
"to",
"run",
"the",
"ensemble",
"on"
] | python | train |
heuer/cablemap | cablemap.core/cablemap/core/reader.py | https://github.com/heuer/cablemap/blob/42066c8fc2972d237a2c35578e14525aaf705f38/cablemap.core/cablemap/core/reader.py#L226-L262 | def header_body_from_content(content):
"""\
Tries to extract the header and the message from the cable content.
The header is something like
UNCLASSIFIED ...
SUBJECT ...
REF ...
while the message begins usually with a summary
1. SUMMARY ...
...
10. ...
Returns (header, msg) or (None, None) if the header/message cannot be
detected.
`content`
The "content" part of a cable.
"""
m = _CLASSIFIED_BY_PATTERN.search(content)
idx = m and m.end() or 0
m = _SUMMARY_PATTERN.search(content)
summary_idx = m and m.start() or None
m = _FIRST_PARAGRAPH_PATTERN.search(content)
para_idx = m and m.start() or None
if summary_idx and para_idx:
idx = max(idx, min(summary_idx, para_idx))
elif summary_idx:
idx = max(summary_idx, idx)
elif para_idx:
idx = max(para_idx, idx)
if idx > 0:
return content[:idx], content[idx:]
return None, None | [
"def",
"header_body_from_content",
"(",
"content",
")",
":",
"m",
"=",
"_CLASSIFIED_BY_PATTERN",
".",
"search",
"(",
"content",
")",
"idx",
"=",
"m",
"and",
"m",
".",
"end",
"(",
")",
"or",
"0",
"m",
"=",
"_SUMMARY_PATTERN",
".",
"search",
"(",
"content"... | \
Tries to extract the header and the message from the cable content.
The header is something like
UNCLASSIFIED ...
SUBJECT ...
REF ...
while the message begins usually with a summary
1. SUMMARY ...
...
10. ...
Returns (header, msg) or (None, None) if the header/message cannot be
detected.
`content`
The "content" part of a cable. | [
"\\",
"Tries",
"to",
"extract",
"the",
"header",
"and",
"the",
"message",
"from",
"the",
"cable",
"content",
"."
] | python | train |
berkeley-cocosci/Wallace | wallace/custom.py | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L913-L1006 | def node_transmit(node_id):
"""Transmit to another node.
The sender's node id must be specified in the url.
As with node.transmit() the key parameters are what and to_whom.
However, the values these accept are more limited than for the back end
due to the necessity of serialization.
If what and to_whom are not specified they will default to None.
Alternatively you can pass an int (e.g. '5') or a class name (e.g.
'Info' or 'Agent'). Passing an int will get that info/node, passing
a class name will pass the class. Note that if the class you are specifying
is a custom class it will need to be added to the dictionary of
known_classes in your experiment code.
You may also pass the values property1, property2, property3, property4
and property5. If passed this will fill in the relevant values of the
transmissions created with the values you specified.
For example, to transmit all infos of type Meme to the node with id 10:
reqwest({
url: "/node/" + my_node_id + "/transmit",
method: 'post',
type: 'json',
data: {
what: "Meme",
to_whom: 10,
},
});
"""
exp = experiment(session)
what = request_parameter(parameter="what", optional=True)
to_whom = request_parameter(parameter="to_whom", optional=True)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/transmit, node does not exist")
# create what
if what is not None:
try:
what = int(what)
what = models.Info.get(what)
if what is None:
return error_response(
error_type="/node/transmit POST, info does not exist",
participant=node.participant)
except:
try:
what = exp.known_classes[what]
except:
return error_response(
error_type="/node/transmit POST, info does not exist",
participant=node.participant)
# create to_whom
if to_whom is not None:
try:
to_whom = int(to_whom)
to_whom = models.Node.get(to_whom)
if what is None:
return error_response(
error_type="/node/transmit POST, info does not exist",
participant=node.participant)
except:
try:
to_whom = exp.known_classes[to_whom]
except:
return error_response(
error_type="/node/transmit POST, info does not exist",
participant=node.participant)
# execute the request
try:
transmissions = node.transmit(what=what, to_whom=to_whom)
for t in transmissions:
assign_properties(t)
session.commit()
# ping the experiment
exp.transmission_post_request(
node=node,
transmissions=transmissions)
session.commit()
except:
return error_response(error_type="/node/transmit POST, server error",
participant=node.participant)
# return the data
return success_response(field="transmissions",
data=[t.__json__() for t in transmissions],
request_type="transmit") | [
"def",
"node_transmit",
"(",
"node_id",
")",
":",
"exp",
"=",
"experiment",
"(",
"session",
")",
"what",
"=",
"request_parameter",
"(",
"parameter",
"=",
"\"what\"",
",",
"optional",
"=",
"True",
")",
"to_whom",
"=",
"request_parameter",
"(",
"parameter",
"=... | Transmit to another node.
The sender's node id must be specified in the url.
As with node.transmit() the key parameters are what and to_whom.
However, the values these accept are more limited than for the back end
due to the necessity of serialization.
If what and to_whom are not specified they will default to None.
Alternatively you can pass an int (e.g. '5') or a class name (e.g.
'Info' or 'Agent'). Passing an int will get that info/node, passing
a class name will pass the class. Note that if the class you are specifying
is a custom class it will need to be added to the dictionary of
known_classes in your experiment code.
You may also pass the values property1, property2, property3, property4
and property5. If passed this will fill in the relevant values of the
transmissions created with the values you specified.
For example, to transmit all infos of type Meme to the node with id 10:
reqwest({
url: "/node/" + my_node_id + "/transmit",
method: 'post',
type: 'json',
data: {
what: "Meme",
to_whom: 10,
},
}); | [
"Transmit",
"to",
"another",
"node",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/variation/vfilter.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L121-L133 | def _calc_vcf_stats(in_file):
"""Calculate statistics on VCF for filtering, saving to a file for quick re-runs.
"""
out_file = "%s-stats.yaml" % utils.splitext_plus(in_file)[0]
if not utils.file_exists(out_file):
stats = {"avg_depth": _average_called_depth(in_file)}
with open(out_file, "w") as out_handle:
yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False)
return stats
else:
with open(out_file) as in_handle:
stats = yaml.safe_load(in_handle)
return stats | [
"def",
"_calc_vcf_stats",
"(",
"in_file",
")",
":",
"out_file",
"=",
"\"%s-stats.yaml\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"in_file",
")",
"[",
"0",
"]",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"stats",
"=",
"{",
"\"a... | Calculate statistics on VCF for filtering, saving to a file for quick re-runs. | [
"Calculate",
"statistics",
"on",
"VCF",
"for",
"filtering",
"saving",
"to",
"a",
"file",
"for",
"quick",
"re",
"-",
"runs",
"."
] | python | train |
bpsmith/tia | tia/rlab/table.py | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/table.py#L899-L905 | def _find_column_label_positions(self, match_value_or_fct, levels=None):
"""Check the original DataFrame's column labels to find the locations of columns. And return the adjusted
column indexing within region (offset if including index)"""
allmatches = find_locations(self.df.columns, match_value_or_fct, levels)
if allmatches and self.inc_index: # tramslate back
allmatches = [m + self.nidxs for m in allmatches]
return allmatches | [
"def",
"_find_column_label_positions",
"(",
"self",
",",
"match_value_or_fct",
",",
"levels",
"=",
"None",
")",
":",
"allmatches",
"=",
"find_locations",
"(",
"self",
".",
"df",
".",
"columns",
",",
"match_value_or_fct",
",",
"levels",
")",
"if",
"allmatches",
... | Check the original DataFrame's column labels to find the locations of columns. And return the adjusted
column indexing within region (offset if including index) | [
"Check",
"the",
"original",
"DataFrame",
"s",
"column",
"labels",
"to",
"find",
"the",
"locations",
"of",
"columns",
".",
"And",
"return",
"the",
"adjusted",
"column",
"indexing",
"within",
"region",
"(",
"offset",
"if",
"including",
"index",
")"
] | python | train |
mongolab/dex | dex/dex.py | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L264-L287 | def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0 | [
"def",
"watch_logfile",
"(",
"self",
",",
"logfile_path",
")",
":",
"self",
".",
"_run_stats",
"[",
"'logSource'",
"]",
"=",
"logfile_path",
"log_parser",
"=",
"LogParser",
"(",
")",
"# For each new line in the logfile ...",
"output_time",
"=",
"time",
".",
"time"... | Analyzes queries from the tail of a given log file | [
"Analyzes",
"queries",
"from",
"the",
"tail",
"of",
"a",
"given",
"log",
"file"
] | python | train |
clalancette/pycdlib | pycdlib/headervd.py | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/headervd.py#L548-L563 | def add_to_space_size(self, addition_bytes):
# type: (int) -> None
'''
A method to add bytes to the space size tracked by this Volume
Descriptor.
Parameters:
addition_bytes - The number of bytes to add to the space size.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not yet initialized')
# The 'addition' parameter is expected to be in bytes, but the space
# size we track is in extents. Round up to the next extent.
self.space_size += utils.ceiling_div(addition_bytes, self.log_block_size) | [
"def",
"add_to_space_size",
"(",
"self",
",",
"addition_bytes",
")",
":",
"# type: (int) -> None",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'This Volume Descriptor is not yet initialized'",
")",
"# The ... | A method to add bytes to the space size tracked by this Volume
Descriptor.
Parameters:
addition_bytes - The number of bytes to add to the space size.
Returns:
Nothing. | [
"A",
"method",
"to",
"add",
"bytes",
"to",
"the",
"space",
"size",
"tracked",
"by",
"this",
"Volume",
"Descriptor",
"."
] | python | train |
tcalmant/ipopo | pelix/remote/beans.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/remote/beans.py#L655-L693 | def from_export(cls, endpoint):
# type: (ExportEndpoint) -> EndpointDescription
"""
Converts an ExportEndpoint bean to an EndpointDescription
:param endpoint: An ExportEndpoint bean
:return: An EndpointDescription bean
"""
assert isinstance(endpoint, ExportEndpoint)
# Service properties
properties = endpoint.get_properties()
# Set import keys
properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid
properties[pelix.remote.PROP_IMPORTED_CONFIGS] = endpoint.configurations
properties[
pelix.remote.PROP_EXPORTED_INTERFACES
] = endpoint.specifications
# Remove export keys
for key in (
pelix.remote.PROP_EXPORTED_CONFIGS,
pelix.remote.PROP_EXPORTED_INTERFACES,
pelix.remote.PROP_EXPORTED_INTENTS,
pelix.remote.PROP_EXPORTED_INTENTS_EXTRA,
):
try:
del properties[key]
except KeyError:
pass
# Other information
properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name
properties[
pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID
] = endpoint.framework
return EndpointDescription(None, properties) | [
"def",
"from_export",
"(",
"cls",
",",
"endpoint",
")",
":",
"# type: (ExportEndpoint) -> EndpointDescription",
"assert",
"isinstance",
"(",
"endpoint",
",",
"ExportEndpoint",
")",
"# Service properties",
"properties",
"=",
"endpoint",
".",
"get_properties",
"(",
")",
... | Converts an ExportEndpoint bean to an EndpointDescription
:param endpoint: An ExportEndpoint bean
:return: An EndpointDescription bean | [
"Converts",
"an",
"ExportEndpoint",
"bean",
"to",
"an",
"EndpointDescription"
] | python | train |
edwards-lab/libGWAS | libgwas/pedigree_parser.py | https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/pedigree_parser.py#L141-L305 | def load_genotypes(self, pheno_covar):
"""Load all data into memory and propagate valid individuals to \
pheno_covar.
:param pheno_covar: Phenotype/covariate object is updated with subject
information
:return: None
"""
first_genotype = 6
pheno_col = 5
if not DataParser.has_sex:
first_genotype -= 1
pheno_col -= 1
if not DataParser.has_parents:
first_genotype -= 2
pheno_col -= 2
if not DataParser.has_pheno:
first_genotype -= 1
if not DataParser.has_fid:
first_genotype -= 1
pheno_col -= 1
if DataParser.has_liability:
first_genotype += 1
sex_col = pheno_col - 1
individual_mask = []
self.individual_mask = []
dropped_individuals = []
# number of missing SNPs we can tolerate before dropping an individual
max_missing_for_individual = numpy.sum(
self.snp_mask[:, 0]==0) * DataParser.ind_miss_tol
if DataParser.compressed_pedigree:
ind_count, err = sys_call("gzip -cd %s | wc -l" %
("%s.gz" % (self.datasource)))
else:
ind_count, err = sys_call("wc -l %s" % (self.datasource))
ind_count = int(ind_count[0].split()[0]) + 1
snp_count = numpy.sum(self.snp_mask[:, 0] == 0)
allelic_data = numpy.empty((ind_count, snp_count, 2), dtype='S1')
valid_allele_count = 0
if DataParser.compressed_pedigree:
input_file = gzip.open("%s.gz" % self.datasource, 'rb')
else:
input_file = open(self.datasource)
for line in input_file:
line = line.strip()
if len(line) > 0:
raw_data = line.strip().split()
alleles = numpy.ma.MaskedArray(
numpy.array(raw_data[first_genotype:]).reshape(-1, 2),
self.snp_mask).compressed().reshape(-1, 2)
# Convert the alleles into genotypes
indid = ":".join(raw_data[0:2])
if not DataParser.has_fid:
indid = raw_data[0]
# Ignore any subjects that are to be excluded and remove those
# that have too much missingness
if DataParser.valid_indid(indid):
missing = numpy.sum(alleles[:, 0] ==
DataParser.missing_representation)
if missing > max_missing_for_individual:
individual_mask += [1, 1]
self.individual_mask.append(1)
dropped_individuals.append(indid)
else:
sex = None
phenotype = None
if DataParser.has_pheno:
phenotype = float(raw_data[pheno_col])
if DataParser.has_sex:
sex = int(raw_data[sex_col])
if pheno_covar is not None:
pheno_covar.add_subject(indid, sex, phenotype)
individual_mask += [0, 0]
self.individual_mask.append(0)
allelic_data[valid_allele_count] = alleles
valid_allele_count += 1
else:
individual_mask += [1, 1]
self.individual_mask.append(1)
self.ind_count = valid_allele_count
allelic_data = allelic_data[0:valid_allele_count]
self.genotypes = numpy.empty((snp_count, valid_allele_count))
max_missing_individuals = DataParser.snp_miss_tol * ind_count
dropped_loci = []
valid_snps = 0
valid_markers = []
valid_rsids = []
valid_maf = []
valid_allele_list = []
allele_count2s = []
for i in xrange(0, snp_count):
snp_geno = allelic_data[:,i]
alleles = list(set(numpy.unique(snp_geno)) -
set([DataParser.missing_representation]))
if len(alleles) > 2:
raise TooManyAlleles(chr=self.markers[i][0],
rsid=self.rsids[i],
alleles=alleles)
allele_count1 = numpy.sum(snp_geno==alleles[0])
allele_count2 = 0
maf = 0
if len(alleles) > 1:
allele_count2 = numpy.sum(snp_geno==alleles[1])
real_allele_count2 = allele_count2
if allele_count2 > allele_count1:
sorted_alleles = [alleles[1], alleles[0]]
alleles = sorted_alleles
allele_count = allele_count1
allele_count1 = allele_count2
allele_count2 = allele_count
maf = allele_count2 / float(allele_count1 + allele_count2)
allele_count2s.append(allele_count2)
#genotypes = []
major_allele = alleles[0]
minor_allele = alleles[1]
genotype_data = numpy.sum(snp_geno==alleles[1], axis=1)
genotype_data[
snp_geno[:, 0]==DataParser.missing_representation] = \
DataParser.missing_storage
else:
major_allele = alleles[0]
minor_allele = '?'
missing = numpy.sum(genotype_data==DataParser.missing_storage)
if maf == 0 or maf < DataParser.min_maf or \
maf > DataParser.max_maf or \
max_missing_individuals < missing:
locus_details = self.markers[i]
DataParser.boundary.dropped_snps[
locus_details[0]].add(locus_details[1])
dropped_loci.append("%s:%s" % (locus_details[0],
locus_details[1]))
self.invalid_loci.append(i)
else:
self.genotypes[valid_snps, :] = genotype_data
valid_snps += 1
valid_markers.append(list(self.markers[i]))
valid_rsids.append(self.rsids[i])
valid_allele_list.append([major_allele, minor_allele])
valid_maf.append(maf)
self.markers = valid_markers
self.alleles = valid_allele_list
self.rsids = valid_rsids
self.locus_count = valid_snps
self.genotypes = self.genotypes[0:self.locus_count, :]
self.allele_count2s = allele_count2s | [
"def",
"load_genotypes",
"(",
"self",
",",
"pheno_covar",
")",
":",
"first_genotype",
"=",
"6",
"pheno_col",
"=",
"5",
"if",
"not",
"DataParser",
".",
"has_sex",
":",
"first_genotype",
"-=",
"1",
"pheno_col",
"-=",
"1",
"if",
"not",
"DataParser",
".",
"has... | Load all data into memory and propagate valid individuals to \
pheno_covar.
:param pheno_covar: Phenotype/covariate object is updated with subject
information
:return: None | [
"Load",
"all",
"data",
"into",
"memory",
"and",
"propagate",
"valid",
"individuals",
"to",
"\\",
"pheno_covar",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.