function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def as_list(data, use_pandas=True, header=True):
"""
Convert an H2O data object into a python-specific object.
WARNING! This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the
data frame. Otherwise, a list-of-lists populated by character data will be returned (so
the types of data will all be str).
:param data: an H2O data object.
:param use_pandas: If True, try to use pandas for reading in the data.
:param header: If True, return column names as first element in list
:returns: List of lists (Rows x Columns).
"""
assert_is_type(data, H2OFrame)
assert_is_type(use_pandas, bool)
assert_is_type(header, bool)
return H2OFrame.as_data_frame(data, use_pandas=use_pandas, header=header) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def load_dataset(relative_path):
"""Imports a data file within the 'h2o_data' folder."""
assert_is_type(relative_path, str)
h2o_dir = os.path.split(__file__)[0]
for possible_file in [os.path.join(h2o_dir, relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path + ".csv")]:
if os.path.exists(possible_file):
return upload_file(possible_file)
# File not found -- raise an error!
raise H2OValueError("Data file %s cannot be found" % relative_path) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def flow():
"""
Open H2O Flow in your browser.
"""
webbrowser.open(connection().base_url, new = 1) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def _create_zip_file(dest_filename, *content_list):
from .utils.shared_utils import InMemoryZipArch
with InMemoryZipArch(dest_filename) as zip_arch:
for filename, file_content in content_list:
zip_arch.append(filename, file_content)
return dest_filename | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def upload_custom_metric(func, func_file="metrics.py", func_name=None, class_name=None, source_provider=None):
"""
Upload given metrics function into H2O cluster.
The metrics can have different representation:
- method
- class: needs to inherit from water.udf.CFunc2 and implement method apply(actual, predict)
returning double
- string: the same as in class case, but the class is given as a string
:param func: metrics representation: string, class, function
:param func_file: internal name of file to save given metrics representation
:param func_name: name for h2o key under which the given metric is saved
:param class_name: name of class wrapping the metrics function
:param source_provider: a function which provides a source code for given function
:return: reference to uploaded metrics function
"""
import tempfile
import inspect
# Use default source provider
if not source_provider:
source_provider = _default_source_provider
# The template wraps given metrics representation
_CFUNC_CODE_TEMPLATE = """# Generated code | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def _check_connection():
if not h2oconn or not h2oconn.cluster:
raise H2OConnectionError("Not connected to a cluster. Did you run `h2o.connect()`?") | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def import_frame():
"""Deprecated."""
import_file() | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def parse():
"""Deprecated."""
pass | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def cluster_info():
"""Deprecated."""
_check_connection()
cluster().show_status() | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def cluster_status():
"""Deprecated."""
_check_connection()
cluster().show_status(True) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def shutdown(prompt=False):
"""Deprecated."""
_check_connection()
cluster().shutdown(prompt) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def network_test():
"""Deprecated."""
_check_connection()
cluster().network_test() | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def get_timezone():
"""Deprecated."""
_check_connection()
return cluster().timezone | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def set_timezone(value):
"""Deprecated."""
_check_connection()
cluster().timezone = value | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def get_host_target(env):
debug('vc.py:get_host_target()')
host_platform = env.get('HOST_ARCH')
if not host_platform:
host_platform = platform.machine()
# TODO(2.5): the native Python platform.machine() function returns
# '' on all Python versions before 2.6, after which it also uses
# PROCESSOR_ARCHITECTURE.
if not host_platform:
host_platform = os.environ.get('PROCESSOR_ARCHITECTURE', '')
# Retain user requested TARGET_ARCH
req_target_platform = env.get('TARGET_ARCH')
debug('vc.py:get_host_target() req_target_platform:%s'%req_target_platform)
if req_target_platform:
# If user requested a specific platform then only try that one.
target_platform = req_target_platform
else:
target_platform = host_platform
try:
host = _ARCH_TO_CANONICAL[host_platform.lower()]
except KeyError, e:
msg = "Unrecognized host architecture %s"
raise ValueError(msg % repr(host_platform))
try:
target = _ARCH_TO_CANONICAL[target_platform.lower()]
except KeyError, e:
all_archs = str(_ARCH_TO_CANONICAL.keys())
raise ValueError("Unrecognized target architecture %s\n\tValid architectures: %s" % (target_platform, all_archs))
return (host, target,req_target_platform) | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def msvc_version_to_maj_min(msvc_version):
msvc_version_numeric = ''.join([x for x in msvc_version if x in string_digits + '.'])
t = msvc_version_numeric.split(".")
if not len(t) == 2:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
try:
maj = int(t[0])
min = int(t[1])
return maj, min
except ValueError, e:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric)) | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def find_vc_pdir(msvc_version):
"""Try to find the product directory for the given
version.
Note
----
If for some reason the requested version could not be found, an
exception which inherits from VisualCException will be raised."""
root = 'Software\\'
if common.is_win64():
root = root + 'Wow6432Node\\'
try:
hkeys = _VCVER_TO_PRODUCT_DIR[msvc_version]
except KeyError:
debug("Unknown version of MSVC: %s" % msvc_version)
raise UnsupportedVersion("Unknown version %s" % msvc_version)
for key in hkeys:
key = root + key
try:
comps = common.read_reg(key)
except WindowsError, e:
debug('find_vc_dir(): no VC registry key %s' % repr(key))
else:
debug('find_vc_dir(): found VC in registry: %s' % comps)
if msvc_version == "15.0":
comps = os.path.join(comps, "VC")
if os.path.exists(comps):
return comps
else:
debug('find_vc_dir(): reg says dir is %s, but it does not exist. (ignoring)'\
% comps)
raise MissingConfiguration("registry dir %s not found on the filesystem" % comps)
return None | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def cached_get_installed_vcs():
global __INSTALLED_VCS_RUN
if __INSTALLED_VCS_RUN is None:
ret = get_installed_vcs()
__INSTALLED_VCS_RUN = ret
return __INSTALLED_VCS_RUN | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def reset_installed_vcs():
"""Make it try again to find VC. This is just for the tests."""
__INSTALLED_VCS_RUN = None | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def script_env(script, args=None):
cache_key = (script, args)
stdout = script_env_stdout_cache.get(cache_key, None)
if stdout is None:
stdout = common.get_output(script, args)
script_env_stdout_cache[cache_key] = stdout
# Stupid batch files do not set return code: we take a look at the
# beginning of the output for an error message instead
olines = stdout.splitlines()
if olines[0].startswith("The specified configuration type is missing"):
raise BatchFileExecutionError("\n".join(olines[:2]))
return common.parse_output(stdout) | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def msvc_setup_env_once(env):
try:
has_run = env["MSVC_SETUP_RUN"]
except KeyError:
has_run = False
if not has_run:
msvc_setup_env(env)
env["MSVC_SETUP_RUN"] = True | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def msvc_setup_env(env):
debug('msvc_setup_env()')
version = get_default_version(env)
if version is None:
warn_msg = "No version of Visual Studio compiler found - C/C++ " \
"compilers most likely not set correctly"
# Nuitka: Useless warning for us.
# SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
return None
debug('msvc_setup_env: using specified MSVC version %s\n' % repr(version))
# XXX: we set-up both MSVS version for backward
# compatibility with the msvs tool
env['MSVC_VERSION'] = version
env['MSVS_VERSION'] = version
env['MSVS'] = {}
use_script = env.get('MSVC_USE_SCRIPT', True)
if SCons.Util.is_String(use_script):
debug('vc.py:msvc_setup_env() use_script 1 %s\n' % repr(use_script))
d = script_env(use_script)
elif use_script:
d = msvc_find_valid_batch_script(env,version)
debug('vc.py:msvc_setup_env() use_script 2 %s\n' % d)
if not d:
return d
else:
debug('MSVC_USE_SCRIPT set to False')
warn_msg = "MSVC_USE_SCRIPT set to False, assuming environment " \
"set correctly."
# Nuitka: We use this on purpose.
# SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
return None
for k, v in d.items():
debug('vc.py:msvc_setup_env() env:%s -> %s'%(k,v))
env.PrependENVPath(k, v, delete_existing=True) | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def _parse_args() -> argparse.Namespace:
"""Setup argparse and parse command line args."""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command',
metavar='<command>',
required=True)
project_parser = subparsers.add_parser(
'project', help='output components of an MCUXpresso project')
project_parser.add_argument('manifest_filename', type=pathlib.Path)
project_parser.add_argument('--include', type=str, action='append')
project_parser.add_argument('--exclude', type=str, action='append')
project_parser.add_argument('--prefix', dest='path_prefix', type=str)
return parser.parse_args() | google/pigweed | [
161,
44,
161,
1,
1615327645
] |
def format_raw(self, netenv, indent="", embedded=True,
indirect_attrs=True):
details = [indent + "{0:c}: {0.name}".format(netenv)]
details.append(self.redirect_raw(netenv.dns_environment, indent + " "))
if netenv.location:
details.append(self.redirect_raw(netenv.location, indent + " "))
if netenv.comments:
details.append(indent + " Comments: %s" % netenv.comments)
return "\n".join(details) | quattor/aquilon | [
12,
16,
12,
38,
1361797498
] |
def setUp(self):
self.set_filename('page_breaks04.xlsx')
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']} | jmcnamara/XlsxWriter | [
3172,
594,
3172,
18,
1357261626
] |
def _file_extension_default(self):
return '.html' | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def _default_template_path_default(self):
return os.path.join("..", "templates", "html") | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def _template_file_default(self):
return 'full' | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def default_config(self):
c = Config({
'NbConvertBase': {
'display_data_priority' : ['application/vnd.jupyter.widget-state+json',
'application/vnd.jupyter.widget-view+json',
'application/javascript',
'text/html',
'text/markdown',
'image/svg+xml',
'text/latex',
'image/png',
'image/jpeg',
'text/plain'
]
},
'CSSHTMLHeaderPreprocessor':{
'enabled':True
},
'HighlightMagicsPreprocessor': {
'enabled':True
}
})
c.merge(super(HTMLExporter,self).default_config)
return c | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def default_filters(self):
for pair in super(HTMLExporter, self).default_filters():
yield pair
yield ('markdown2html', self.markdown2html) | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def extract夢見る世界(item):
"""
Parser for '夢見る世界'
""" | fake-name/ReadableWebProxy | [
191,
16,
191,
3,
1437712243
] |
def _default_stream():
return open(cjkdata.get_resource('tables/zhuyin_pinyin_conv_table')) | larsyencken/cjktools | [
20,
6,
20,
2,
1369698266
] |
def parse_lines(istream):
istream = stream_codec(istream)
for line in istream:
if not line.startswith('#'):
yield line.rstrip().split() | larsyencken/cjktools | [
20,
6,
20,
2,
1369698266
] |
def pinyin_to_zhuyin_table(istream=None):
""" Returns a dictionary mapping zhuyin to pinyin. """
with _get_stream_context(istream) as istream:
table = {}
for zhuyin, pinyin in parse_lines(istream):
table[pinyin] = zhuyin
return table | larsyencken/cjktools | [
20,
6,
20,
2,
1369698266
] |
def pinyin_regex_pattern(istream=None):
""" Returns a pinyin regex pattern, with optional tone number. """
all_pinyin = get_all_pinyin(istream)
# Sort from longest to shortest, so as to make maximum matches whenever
# possible.
all_pinyin = sorted(all_pinyin, key=len, reverse=True)
# Build a generic pattern for a single pinyin with an optional tone.
pattern = '(%s)([0-5]?)' % '|'.join(all_pinyin)
return pattern | larsyencken/cjktools | [
20,
6,
20,
2,
1369698266
] |
def _LogDevicesOnFailure(msg):
try:
yield
except base_error.BaseError:
logging.exception(msg)
logging.error('Devices visible to adb:')
for entry in adb_wrapper.AdbWrapper.Devices(desired_state=None,
long_list=True):
logging.error(' %s: %s',
entry[0].GetDeviceSerial(),
' '.join(entry[1:]))
raise | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def Asan(args):
env = os.environ.copy()
env['ADB'] = args.adb
try:
with _LogDevicesOnFailure('Failed to set up the device.'):
device = device_utils.DeviceUtils.HealthyDevices(
device_arg=args.device)[0]
disable_verity = device.build_version_sdk >= version_codes.MARSHMALLOW
if disable_verity:
device.EnableRoot()
# TODO(crbug.com/790202): Stop logging output after diagnosing
# issues on android-asan.
verity_output = device.adb.DisableVerity()
if verity_output:
logging.info('disable-verity output:')
for line in verity_output.splitlines():
logging.info(' %s', line)
device.Reboot()
# Call EnableRoot prior to asan_device_setup.sh to ensure it doesn't
# get tripped up by the root timeout.
device.EnableRoot()
setup_cmd = [_SCRIPT_PATH, '--lib', args.lib]
if args.device:
setup_cmd += ['--device', args.device]
subprocess.check_call(setup_cmd, env=env)
yield
finally:
with _LogDevicesOnFailure('Failed to tear down the device.'):
device.EnableRoot()
teardown_cmd = [_SCRIPT_PATH, '--revert']
if args.device:
teardown_cmd += ['--device', args.device]
subprocess.check_call(teardown_cmd, env=env)
if disable_verity:
# TODO(crbug.com/790202): Stop logging output after diagnosing
# issues on android-asan.
verity_output = device.adb.EnableVerity()
if verity_output:
logging.info('enable-verity output:')
for line in verity_output.splitlines():
logging.info(' %s', line)
device.Reboot() | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def post_request_json(url, data, headers=None):
"""Send json data via POST request and return response
Args:
url(str): url to send request to
data(dict): data to be sent
headers(dict): request headers
Returns:
json_response(dict)
"""
resp = None
json_response = {}
try:
LOG.debug(f"Sending POST request with json data to {url}")
if headers:
resp = requests.post(url, headers=headers, json=data)
else:
resp = requests.post(url, json=data)
json_response["content"] = resp.json()
except Exception as ex:
return {"message": f"An error occurred while sending a POST request to url {url} -> {ex}"}
json_response["status_code"] = resp.status_code
return json_response | Clinical-Genomics/scout | [
122,
41,
122,
149,
1412930641
] |
def delete_request_json(url, headers=None, data=None):
"""Send a DELETE request to a remote API and return its response
Args:
url(str): url to send request to
headers(dict): eventual request HEADERS to use in request
data(dict): eventual request data to ba passed as a json object
Returns:
json_response(dict)
"""
resp = None
json_response = {}
try:
LOG.debug(f"Sending DELETE request to {url}")
if headers and data:
resp = requests.delete(url, headers=headers, json=data)
elif headers:
resp = requests.delete(url, headers=headers)
else:
resp = requests.delete(url)
json_response["content"] = resp.json()
except Exception as ex:
return {"message": f"An error occurred while sending a DELETE request to url {url} -> {ex}"}
json_response["status_code"] = resp.status_code
return json_response | Clinical-Genomics/scout | [
122,
41,
122,
149,
1412930641
] |
def fetch_resource(url, json=False):
"""Fetch a resource and return the resulting lines in a list or a json object
Send file_name to get more clean log messages
Args:
url(str)
json(bool): if result should be in json
Returns:
data
"""
data = None
if url.startswith("ftp"):
# requests do not handle ftp
response = urllib.request.urlopen(url, timeout=TIMEOUT)
if isinstance(response, Exception):
raise response
data = response.read().decode("utf-8")
return data.split("\n")
response = get_request(url)
if json:
LOG.info("Return in json")
data = response.json()
else:
content = response.text
if response.url.endswith(".gz"):
LOG.info("gzipped!")
encoded_content = b"".join(chunk for chunk in response.iter_content(chunk_size=128))
content = zlib.decompress(encoded_content, 16 + zlib.MAX_WBITS).decode("utf-8")
data = content.split("\n")
return data | Clinical-Genomics/scout | [
122,
41,
122,
149,
1412930641
] |
def fetch_genes_to_hpo_to_disease():
"""Fetch the latest version of the map from genes to phenotypes
Returns:
res(list(str)): A list with the lines formatted this way:
#Format: entrez-gene-id<tab>entrez-gene-symbol<tab>HPO-Term-Name<tab>\
HPO-Term-ID<tab>Frequency-Raw<tab>Frequency-HPO<tab>
Additional Info from G-D source<tab>G-D source<tab>disease-ID for link
72 ACTG2 HP:0002027 Abdominal pain - mim2gene OMIM:155310
72 ACTG2 HP:0000368 Low-set, posteriorly rotated ears HP:0040283 orphadata
ORPHA:2604
"""
url = HPO_URL.format("genes_to_phenotype.txt")
return fetch_resource(url) | Clinical-Genomics/scout | [
122,
41,
122,
149,
1412930641
] |
def fetch_hpo_files(genes_to_phenotype=False, phenotype_to_genes=False, hpo_terms=False):
"""
Fetch the necessary HPO files from http://compbio.charite.de
Args:
genes_to_phenotype(bool): if file genes_to_phenotype.txt is required
phenotype_to_genes(bool): if file phenotype_to_genes.txt is required
hpo_terms(bool):if file hp.obo is required
Returns:
hpo_files(dict): A dictionary with the necessary files
"""
LOG.info("Fetching HPO information from http://compbio.charite.de")
hpo_files = {}
if genes_to_phenotype is True:
hpo_files["genes_to_phenotype"] = fetch_genes_to_hpo_to_disease()
if phenotype_to_genes is True:
hpo_files["phenotype_to_genes"] = fetch_hpo_to_genes_to_disease()
if hpo_terms is True:
hpo_files["hpo_terms"] = fetch_hpo_terms()
return hpo_files | Clinical-Genomics/scout | [
122,
41,
122,
149,
1412930641
] |
def fetch_ensembl_biomart(attributes, filters, build=None):
"""Fetch data from ensembl biomart
Args:
attributes(list): List of selected attributes
filters(dict): Select what filters to use
build(str): '37' or '38'
Returns:
client(EnsemblBiomartClient)
"""
build = build or "37"
client = EnsemblBiomartClient(build=build, filters=filters, attributes=attributes)
LOG.info("Selecting attributes: %s", ", ".join(attributes))
LOG.info("Use filter: %s", filters)
return client | Clinical-Genomics/scout | [
122,
41,
122,
149,
1412930641
] |
def fetch_ensembl_transcripts(build=None, chromosomes=None):
"""Fetch the ensembl genes
Args:
build(str): ['37', '38']
chromosomes(iterable(str))
Returns:
result(iterable): Ensembl formated transcript lines
"""
chromosomes = chromosomes or CHROMOSOMES
LOG.info("Fetching ensembl transcripts")
attributes = [
"chromosome_name",
"ensembl_gene_id",
"ensembl_transcript_id",
"transcript_start",
"transcript_end",
"refseq_mrna",
"refseq_mrna_predicted",
"refseq_ncrna",
]
filters = {"chromosome_name": chromosomes}
return fetch_ensembl_biomart(attributes, filters, build) | Clinical-Genomics/scout | [
122,
41,
122,
149,
1412930641
] |
def fetch_hgnc():
"""Fetch the hgnc genes file from
ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/hgnc_complete_set.txt
Returns:
hgnc_gene_lines(list(str))
"""
file_name = "hgnc_complete_set.txt"
url = "ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/{0}".format(file_name)
LOG.info("Fetching HGNC genes from %s", url)
hgnc_lines = fetch_resource(url)
return hgnc_lines | Clinical-Genomics/scout | [
122,
41,
122,
149,
1412930641
] |
def test_create_reply_with_text_not_render(self):
text = "test"
reply = create_reply(text, render=False)
self.assertEqual("text", reply.type)
self.assertEqual(text, reply.content)
reply.render() | jxtech/wechatpy | [
3364,
745,
3364,
44,
1410527008
] |
def test_create_reply_with_message(self):
from wechatpy.messages import TextMessage
msg = TextMessage(
{
"FromUserName": "user1",
"ToUserName": "user2",
}
)
reply = create_reply("test", msg, render=False)
self.assertEqual("user1", reply.target)
self.assertEqual("user2", reply.source)
reply.render() | jxtech/wechatpy | [
3364,
745,
3364,
44,
1410527008
] |
def test_create_reply_with_articles(self):
articles = [
{
"title": "test 1",
"description": "test 1",
"image": "http://www.qq.com/1.png",
"url": "http://www.qq.com/1",
},
{
"title": "test 2",
"description": "test 2",
"image": "http://www.qq.com/2.png",
"url": "http://www.qq.com/2",
},
{
"title": "test 3",
"description": "test 3",
"image": "http://www.qq.com/3.png",
"url": "http://www.qq.com/3",
},
]
reply = create_reply(articles, render=False)
self.assertEqual("news", reply.type)
reply.render() | jxtech/wechatpy | [
3364,
745,
3364,
44,
1410527008
] |
def __init__(
self, plotly_name="tickformatstops", parent_name="contour.colorbar", **kwargs | plotly/python-api | [
13052,
2308,
13052,
1319,
1385013188
] |
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_marooned_pirate_tran_m.iff"
result.attribute_template_id = 9
result.stfName("npc_name","trandoshan_base_male") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_commoner_naboo_human_female_07.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_female") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def test_global(self):
clear_user_default("key1")
set_global_default("key1", "value1")
self.assertEqual(get_global_default("key1"), "value1")
set_global_default("key1", "value2")
self.assertEqual(get_global_default("key1"), "value2")
add_global_default("key1", "value3")
self.assertEqual(get_global_default("key1"), "value2")
self.assertEqual(get_defaults()["key1"], ["value2", "value3"])
self.assertEqual(get_user_default_as_list("key1"), ["value2", "value3"]) | frappe/frappe | [
4495,
2418,
4495,
1493,
1307520856
] |
def test_global_if_not_user(self):
set_global_default("key4", "value4")
self.assertEqual(get_user_default("key4"), "value4") | frappe/frappe | [
4495,
2418,
4495,
1493,
1307520856
] |
def test_clear_global(self):
set_global_default("key6", "value6")
self.assertEqual(get_user_default("key6"), "value6")
clear_default("key6", value="value6")
self.assertEqual(get_user_default("key6"), None) | frappe/frappe | [
4495,
2418,
4495,
1493,
1307520856
] |
def __init__(self, targets=None, priority=None, source=None, batch=False,
**kwargs):
"""
Initialize D7 Networks Object
"""
super(NotifyD7Networks, self).__init__(**kwargs)
# The Priority of the message
if priority not in D7NETWORK_SMS_PRIORITIES:
self.priority = self.template_args['priority']['default']
else:
self.priority = priority
# Prepare Batch Mode Flag
self.batch = batch
# Setup our source address (if defined)
self.source = None \
if not isinstance(source, six.string_types) else source.strip()
# Parse our targets
self.targets = list()
for target in parse_list(targets):
# Validate targets and drop bad ones:
result = IS_PHONE_NO.match(target)
if result:
# Further check our phone # for it's digit count
# if it's less than 10, then we can assume it's
# a poorly specified phone no and spit a warning
result = ''.join(re.findall(r'\d+', result.group('phone')))
if len(result) < 11 or len(result) > 14:
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
continue
# store valid phone number
self.targets.append(result)
continue
self.logger.warning(
'Dropped invalid phone # ({}) specified.'.format(target))
if len(self.targets) == 0:
msg = 'There are no valid targets identified to notify.'
self.logger.warning(msg)
raise TypeError(msg)
return | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
'batch': 'yes' if self.batch else 'no',
}
if self.priority != self.template_args['priority']['default']:
args['priority'] = str(self.priority)
if self.source:
args['from'] = self.source
return '{schema}://{user}:{password}@{targets}/?{args}'.format(
schema=self.secure_protocol,
user=NotifyD7Networks.quote(self.user, safe=''),
password=self.pprint(
self.password, privacy, mode=PrivacyMode.Secret, safe=''),
targets='/'.join(
[NotifyD7Networks.quote(x, safe='') for x in self.targets]),
args=NotifyD7Networks.urlencode(args)) | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def test_404():
with db_connection.env(login_active=False):
req = app.request('/invalidendpoint', method='GET')
assert req.status == "404 Not Found"
req = app.request('/invalidendpoint', method='POST')
assert req.status == "404 Not Found" | riolet/SAM | [
175,
17,
175,
5,
1467750068
] |
def test_exists_stats():
with db_connection.env(login_active=False):
req = app.request('/stats', 'GET')
assert req.status == "200 OK"
req = app.request('/stats', 'POST')
assert req.status == "405 Method Not Allowed" | riolet/SAM | [
175,
17,
175,
5,
1467750068
] |
def test_exists_links():
with db_connection.env(login_active=False):
req = app.request('/links', 'GET')
assert req.status == "200 OK"
req = app.request('/links', 'POST')
assert req.status == "405 Method Not Allowed" | riolet/SAM | [
175,
17,
175,
5,
1467750068
] |
def test_exists_portinfo():
with db_connection.env(login_active=False):
req = app.request('/portinfo', 'GET')
assert req.status == "200 OK"
req = app.request('/portinfo', 'POST')
assert req.status == "200 OK" | riolet/SAM | [
175,
17,
175,
5,
1467750068
] |
def test_exists_table():
with db_connection.env(login_active=False):
req = app.request('/table', 'GET')
assert req.status == "200 OK"
req = app.request('/table', 'POST')
assert req.status == "405 Method Not Allowed" | riolet/SAM | [
175,
17,
175,
5,
1467750068
] |
def test_exists_settings_page():
with db_connection.env(login_active=False):
req = app.request('/settings_page', 'GET')
assert req.status == "200 OK"
req = app.request('/settings_page', 'POST')
assert req.status == "405 Method Not Allowed" | riolet/SAM | [
175,
17,
175,
5,
1467750068
] |
def find_abf_policy(test, id):
policies = test.vapi.abf_policy_dump()
for p in policies:
if id == p.policy.policy_id:
return True
return False | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def __init__(self,
test,
policy_id,
acl,
paths):
self._test = test
self.policy_id = policy_id
self.acl = acl
self.paths = paths
self.encoded_paths = []
for path in self.paths:
self.encoded_paths.append(path.encode()) | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def remove_vpp_config(self):
self._test.vapi.abf_policy_add_del(
0,
{'policy_id': self.policy_id,
'acl_index': self.acl.acl_index,
'n_paths': len(self.paths),
'paths': self.encoded_paths}) | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def object_id(self):
return ("abf-policy-%d" % self.policy_id) | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def __init__(self,
test,
policy_id,
sw_if_index,
priority,
is_ipv6=0):
self._test = test
self.policy_id = policy_id
self.sw_if_index = sw_if_index
self.priority = priority
self.is_ipv6 = is_ipv6 | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def remove_vpp_config(self):
self._test.vapi.abf_itf_attach_add_del(
0,
{'policy_id': self.policy_id,
'sw_if_index': self.sw_if_index,
'priority': self.priority,
'is_ipv6': self.is_ipv6}) | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def object_id(self):
return ("abf-attach-%d-%d" % (self.policy_id, self.sw_if_index)) | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def setUpClass(cls):
super(TestAbf, cls).setUpClass() | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def tearDownClass(cls):
super(TestAbf, cls).tearDownClass() | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.admin_down()
super(TestAbf, self).tearDown() | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def test_abf6(self):
""" IPv6 ACL Based Forwarding
"""
#
# Simple test for matching IPv6 packets
#
#
# Rule 1
#
rule_1 = AclRule(is_permit=1, proto=17, ports=1234,
src_prefix=IPv6Network("2001::2/128"),
dst_prefix=IPv6Network("2001::1/128"))
acl_1 = VppAcl(self, rules=[rule_1])
acl_1.add_vpp_config()
#
# ABF policy for ACL 1 - path via interface 1
#
abf_1 = VppAbfPolicy(self, 10, acl_1,
[VppRoutePath("3001::1",
0xffffffff)])
abf_1.add_vpp_config()
attach_1 = VppAbfAttach(self, 10, self.pg0.sw_if_index,
45, is_ipv6=True)
attach_1.add_vpp_config()
#
# a packet matching the rule
#
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src="2001::2", dst="2001::1") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
#
# packets are dropped because there is no route to the policy's
# next hop
#
self.send_and_assert_no_replies(self.pg1, p * NUM_PKTS, "no route")
#
# add a route resolving the next-hop
#
route = VppIpRoute(self, "3001::1", 32,
[VppRoutePath(self.pg1.remote_ip6,
self.pg1.sw_if_index)])
route.add_vpp_config()
#
# now expect packets forwarded.
#
self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg1) | FDio/vpp | [
923,
525,
923,
24,
1499444980
] |
def __getattr__(self, attr):
if attr == 'rand_name':
# NOTE(flwang): This is a proxy to generate a random name that
# includes a random number and a prefix 'tempest'
attr_obj = partial(lib_data_utils.rand_name,
prefix='tempest')
else:
attr_obj = getattr(lib_data_utils, attr)
self.__dict__[attr] = attr_obj
return attr_obj | cisco-openstack/tempest | [
2,
2,
2,
1,
1410968777
] |
def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
'volume': CONF.service_available.cinder,
# NOTE(masayukig): We have two network services which are neutron and
# nova-network. And we have no way to know whether nova-network is
# available or not. After the pending removal of nova-network from
# nova, we can treat the network/neutron case in the same manner as
# the other services.
'network': True,
# NOTE(masayukig): Tempest tests always require the identity service.
# So we should set this True here.
'identity': True,
'object_storage': CONF.service_available.swift,
}
return service_list | cisco-openstack/tempest | [
2,
2,
2,
1,
1410968777
] |
def decorator(f):
known_services = get_service_list()
for service in args:
if service not in known_services:
raise InvalidServiceTag('%s is not a valid service' % service)
decorators.attr(type=list(args))(f)
@functools.wraps(f)
def wrapper(*func_args, **func_kwargs):
service_list = get_service_list()
for service in args:
if not service_list[service]:
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
return f(*func_args, **func_kwargs)
return wrapper | cisco-openstack/tempest | [
2,
2,
2,
1,
1410968777
] |
def requires_ext(**kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator | cisco-openstack/tempest | [
2,
2,
2,
1,
1410968777
] |
def get_parser(self, prog_name):
parser = super(CreateUser, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help='New user name',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Default domain (name or ID)',
)
parser.add_argument(
'--project',
metavar='<project>',
help='Default project (name or ID)',
)
common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--password',
metavar='<password>',
help='Set user password',
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help='Prompt interactively for password',
)
parser.add_argument(
'--email',
metavar='<email-address>',
help='Set user email address',
)
parser.add_argument(
'--description',
metavar='<description>',
help='User description',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable user (default)',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable user',
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing user'),
)
return parser | nttcom/eclcli | [
22,
15,
22,
1,
1472615846
] |
def get_parser(self, prog_name):
parser = super(DeleteUser, self).get_parser(prog_name)
parser.add_argument(
'users',
metavar='<user>',
nargs="+",
help='User(s) to delete (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain owning <user> (name or ID)',
)
return parser | nttcom/eclcli | [
22,
15,
22,
1,
1472615846
] |
def get_parser(self, prog_name):
parser = super(ListUser, self).get_parser(prog_name)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Filter users by <domain> (name or ID)',
)
project_or_group = parser.add_mutually_exclusive_group()
project_or_group.add_argument(
'--group',
metavar='<group>',
help='Filter users by <group> membership (name or ID)',
)
project_or_group.add_argument(
'--project',
metavar='<project>',
help='Filter users by <project> (name or ID)',
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
return parser | nttcom/eclcli | [
22,
15,
22,
1,
1472615846
] |
def get_parser(self, prog_name):
parser = super(SetUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to change (name or ID)',
)
parser.add_argument(
'--name',
metavar='<name>',
help='Set user name',
)
parser.add_argument(
'--project',
metavar='<project>',
help='Set default project (name or ID)',
)
common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--password',
metavar='<password>',
help='Set user password',
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help='Prompt interactively for password',
)
parser.add_argument(
'--email',
metavar='<email-address>',
help='Set user email address',
)
parser.add_argument(
'--description',
metavar='<description>',
help='Set user description',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable user (default)',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable user',
)
return parser | nttcom/eclcli | [
22,
15,
22,
1,
1472615846
] |
def get_parser(self, prog_name):
parser = super(SetPasswordUser, self).get_parser(prog_name)
parser.add_argument(
'--password',
metavar='<new-password>',
help='New user password'
)
parser.add_argument(
'--original-password',
metavar='<original-password>',
help='Original user password'
)
return parser | nttcom/eclcli | [
22,
15,
22,
1,
1472615846
] |
def get_parser(self, prog_name):
parser = super(ShowUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to display (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain owning <user> (name or ID)',
)
return parser | nttcom/eclcli | [
22,
15,
22,
1,
1472615846
] |
def __init__(self,**params):
super(CameraImage,self).__init__(**params)
self._image = None | ioam/topographica | [
51,
31,
51,
228,
1348109103
] |
def _decode_image(self,fmt,w,h,bpp,fdiv,data):
if fmt==1:
self._image = Image.new('L',(w,h))
self._image.fromstring(data,'raw')
else:
# JPALERT: if not grayscale, then assume color. This
# should be expanded for other modes.
rgb_im = Image.new('RGB',(w,h))
rgb_im.fromstring(data,'raw')
self._image = ImageOps.grayscale(rgb_im) | ioam/topographica | [
51,
31,
51,
228,
1348109103
] |
def _get_image(self,params):
im_spec = None
if self._image is None:
# if we don't have an image then block until we get one
im_spec = self.camera.image_queue.get()
self.camera.image_queue.task_done()
# Make sure we clear the image queue and get the most recent image.
while not self.camera.image_queue.empty():
im_spec = self.camera.image_queue.get_nowait()
self.camera.image_queue.task_done()
if im_spec:
# If we got a new image from the queue, then
# construct a PIL image from it.
self._decode_image(*im_spec)
return True
else:
return False | ioam/topographica | [
51,
31,
51,
228,
1348109103
] |
def start(self):
pass | ioam/topographica | [
51,
31,
51,
228,
1348109103
] |
def get_namespace_choices():
"""
Return the enum to the caller
"""
return NAMESPACE_CHOICES | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def add_prerequisite_course(course_key, prerequisite_course_key):
"""
It would create a milestone, then it would set newly created
milestones as requirement for course referred by `course_key`
and it would set newly created milestone as fulfillment
milestone for course referred by `prerequisite_course_key`.
"""
if not is_prerequisite_courses_enabled():
return None
milestone_name = _('Course {course_id} requires {prerequisite_course_id}').format(
course_id=str(course_key),
prerequisite_course_id=str(prerequisite_course_key)
)
milestone = milestones_api.add_milestone({
'name': milestone_name,
'namespace': str(prerequisite_course_key),
'description': _('System defined milestone'),
})
# add requirement course milestone
milestones_api.add_course_milestone(course_key, 'requires', milestone)
# add fulfillment course milestone
milestones_api.add_course_milestone(prerequisite_course_key, 'fulfills', milestone) | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def set_prerequisite_courses(course_key, prerequisite_course_keys):
"""
It would remove any existing requirement milestones for the given `course_key`
and create new milestones for each pre-requisite course in `prerequisite_course_keys`.
To only remove course milestones pass `course_key` and empty list or
None as `prerequisite_course_keys` .
"""
if not is_prerequisite_courses_enabled():
return None
#remove any existing requirement milestones with this pre-requisite course as requirement
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="requires")
if course_milestones:
for milestone in course_milestones:
remove_prerequisite_course(course_key, milestone)
# add milestones if pre-requisite course is selected
if prerequisite_course_keys:
for prerequisite_course_key_string in prerequisite_course_keys:
prerequisite_course_key = CourseKey.from_string(prerequisite_course_key_string)
add_prerequisite_course(course_key, prerequisite_course_key) | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def get_prerequisite_courses_display(course_descriptor):
"""
It would retrieve pre-requisite courses, make display strings
and return list of dictionary with course key as 'key' field
and course display name as `display` field.
"""
pre_requisite_courses = []
if is_prerequisite_courses_enabled() and course_descriptor.pre_requisite_courses:
for course_id in course_descriptor.pre_requisite_courses:
course_key = CourseKey.from_string(course_id)
required_course_descriptor = modulestore().get_course(course_key)
prc = {
'key': course_key,
'display': get_course_display_string(required_course_descriptor)
}
pre_requisite_courses.append(prc)
return pre_requisite_courses | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def fulfill_course_milestone(course_key, user):
"""
Marks the course specified by the given course_key as complete for the given user.
If any other courses require this course as a prerequisite, their milestones will be appropriately updated.
"""
if not ENABLE_MILESTONES_APP.is_enabled():
return None
try:
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="fulfills")
except InvalidMilestoneRelationshipTypeException:
# we have not seeded milestone relationship types
seed_milestone_relationship_types()
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship="fulfills")
for milestone in course_milestones:
milestones_api.add_user_milestone({'id': user.id}, milestone) | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def get_required_content(course_key, user):
"""
Queries milestones subsystem to see if the specified course is gated on one or more milestones,
and if those milestones can be fulfilled via completion of a particular course content module
"""
required_content = []
if ENABLE_MILESTONES_APP.is_enabled():
course_run_id = str(course_key)
if user.is_authenticated:
# Get all of the outstanding milestones for this course, for this user
try:
milestone_paths = get_course_milestones_fulfillment_paths(
course_run_id,
serialize_user(user)
)
except InvalidMilestoneRelationshipTypeException:
return required_content
# For each outstanding milestone, see if this content is one of its fulfillment paths
for path_key in milestone_paths:
milestone_path = milestone_paths[path_key]
if milestone_path.get('content') and len(milestone_path['content']): # lint-amnesty, pylint: disable=len-as-condition
for content in milestone_path['content']:
required_content.append(content)
else:
if get_course_milestones(course_run_id):
# NOTE (CCB): The initial version of anonymous courseware access is very simple. We avoid accidentally
# exposing locked content by simply avoiding anonymous access altogether for courses runs with
# milestones.
raise InvalidUserException('Anonymous access is not allowed for course runs with milestones set.')
return required_content | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def is_valid_course_key(key):
"""
validates course key. returns True if valid else False.
"""
try:
course_key = CourseKey.from_string(key)
except InvalidKeyError:
course_key = key
return isinstance(course_key, CourseKey) | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def generate_milestone_namespace(namespace, course_key=None):
"""
Returns a specifically-formatted namespace string for the specified type
"""
if namespace in list(NAMESPACE_CHOICES.values()):
if namespace == 'entrance_exams':
return '{}.{}'.format(str(course_key), NAMESPACE_CHOICES['ENTRANCE_EXAM']) | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def add_milestone(milestone_data):
"""
Client API operation adapter/wrapper
"""
if not ENABLE_MILESTONES_APP.is_enabled():
return None
return milestones_api.add_milestone(milestone_data) | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def get_milestone_relationship_types():
"""
Client API operation adapter/wrapper
"""
if not ENABLE_MILESTONES_APP.is_enabled():
return {}
return milestones_api.get_milestone_relationship_types() | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def get_course_milestones(course_id):
"""
Client API operation adapter/wrapper
"""
if not ENABLE_MILESTONES_APP.is_enabled():
return []
return milestones_api.get_course_milestones(course_id) | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def get_course_content_milestones(course_id, content_id=None, relationship='requires', user_id=None):
"""
Client API operation adapter/wrapper
Uses the request cache to store all of a user's
milestones
Returns all content blocks in a course if content_id is None, otherwise it just returns that
specific content block.
"""
if not ENABLE_MILESTONES_APP.is_enabled():
return []
if user_id is None:
return milestones_api.get_course_content_milestones(course_id, content_id, relationship)
request_cache_dict = get_cache(REQUEST_CACHE_NAME)
if user_id not in request_cache_dict:
request_cache_dict[user_id] = {}
if relationship not in request_cache_dict[user_id]:
request_cache_dict[user_id][relationship] = milestones_api.get_course_content_milestones(
course_key=course_id,
relationship=relationship,
user={"id": user_id}
)
if content_id is None:
return request_cache_dict[user_id][relationship]
return [m for m in request_cache_dict[user_id][relationship] if m['content_id'] == str(content_id)] | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
def remove_content_references(content_id):
"""
Client API operation adapter/wrapper
"""
if not ENABLE_MILESTONES_APP.is_enabled():
return None
return milestones_api.remove_content_references(content_id) | eduNEXT/edx-platform | [
5,
3,
5,
6,
1390926698
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.