function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def view_or_basicauth(view, request, test_func, realm = "", *args, **kwargs):
"""
This is a helper function used by both 'logged_in_or_basicauth' and
'has_perm_or_basicauth' that does the nitty of determining if they
are already logged in or if they have provided proper http-authorization
and returning the view if all goes well, otherwise responding with a 401.
"""
if test_func(request.user):
# Already logged in, just return the view.
#
return view(request, *args, **kwargs) | schubergphilis/twitterwall | [
3,
1,
3,
3,
1371292479
] |
def logged_in_or_basicauth(realm = ""):
"""
A simple decorator that requires a user to be logged in. If they are not
logged in the request is examined for a 'authorization' header. | schubergphilis/twitterwall | [
3,
1,
3,
3,
1371292479
] |
def your_view:
... | schubergphilis/twitterwall | [
3,
1,
3,
3,
1371292479
] |
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.is_authenticated(),
realm, *args, **kwargs)
return wrapper | schubergphilis/twitterwall | [
3,
1,
3,
3,
1371292479
] |
def has_perm_or_basicauth(perm, realm = ""):
"""
This is similar to the above decorator 'logged_in_or_basicauth'
except that it requires the logged in user to have a specific
permission. | schubergphilis/twitterwall | [
3,
1,
3,
3,
1371292479
] |
def your_view:
... | schubergphilis/twitterwall | [
3,
1,
3,
3,
1371292479
] |
def view_decorator(func):
def wrapper(request, *args, **kwargs):
return view_or_basicauth(func, request,
lambda u: u.has_perm(perm),
realm, *args, **kwargs)
return wrapper | schubergphilis/twitterwall | [
3,
1,
3,
3,
1371292479
] |
def _get_label_map(label_map):
"""Gets the label map dict."""
if isinstance(label_map, list):
label_map_dict = {}
for i, label in enumerate(label_map):
# 0 is resevered for background.
label_map_dict[i + 1] = label
label_map = label_map_dict
label_map = label_util.get_label_map(label_map)
if 0 in label_map and label_map[0] != 'background':
raise ValueError('0 must be resevered for background.')
label_map.pop(0, None)
name_set = set()
for idx, name in label_map.items():
if not isinstance(idx, int):
raise ValueError('The key (label id) in label_map must be integer.')
if not isinstance(name, str):
raise ValueError('The value (label name) in label_map must be string.')
if name in name_set:
raise ValueError('The value: %s (label name) can\'t be duplicated.' %
name)
name_set.add(name)
return label_map | tensorflow/examples | [
6911,
7012,
6911,
106,
1531779116
] |
def __init__(self,
tfrecord_file_patten,
size,
label_map,
annotations_json_file=None):
"""Initialize DataLoader for object detector.
Args:
tfrecord_file_patten: Glob for tfrecord files. e.g. "/tmp/coco*.tfrecord".
size: The size of the dataset.
label_map: Variable shows mapping label integers ids to string label
names. 0 is the reserved key for `background` and doesn't need to be
included in label_map. Label names can't be duplicated. Supported
formats are:
1. Dict, map label integers ids to string label names, such as {1:
'person', 2: 'notperson'}. 2. List, a list of label names such as
['person', 'notperson'] which is
the same as setting label_map={1: 'person', 2: 'notperson'}.
3. String, name for certain dataset. Accepted values are: 'coco', 'voc'
and 'waymo'. 4. String, yaml filename that stores label_map.
annotations_json_file: JSON with COCO data format containing golden
bounding boxes. Used for validation. If None, use the ground truth from
the dataloader. Refer to
https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5
for the description of COCO data format.
"""
super(DataLoader, self).__init__(dataset=None, size=size)
self.tfrecord_file_patten = tfrecord_file_patten
self.label_map = _get_label_map(label_map)
self.annotations_json_file = annotations_json_file | tensorflow/examples | [
6911,
7012,
6911,
106,
1531779116
] |
def from_pascal_voc(
cls,
images_dir: str,
annotations_dir: str,
label_map: Union[List[str], Dict[int, str], str],
annotation_filenames: Optional[Collection[str]] = None,
ignore_difficult_instances: bool = False,
num_shards: int = 100,
max_num_images: Optional[int] = None,
cache_dir: Optional[str] = None,
cache_prefix_filename: Optional[str] = None) -> DetectorDataLoader:
"""Loads from dataset with PASCAL VOC format.
Refer to
https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5
for the description of PASCAL VOC data format.
LabelImg Tool (https://github.com/tzutalin/labelImg) can annotate the image
and save annotations as XML files in PASCAL VOC data format.
Annotations are in the folder: `annotations_dir`.
Raw images are in the foloder: `images_dir`.
Args:
images_dir: Path to directory that store raw images.
annotations_dir: Path to the annotations directory.
label_map: Variable shows mapping label integers ids to string label
names. 0 is the reserved key for `background`. Label names can't be
duplicated. Supported format: 1. Dict, map label integers ids to string
label names, e.g.
{1: 'person', 2: 'notperson'}. 2. List, a list of label names. e.g.
['person', 'notperson'] which is
the same as setting label_map={1: 'person', 2: 'notperson'}.
3. String, name for certain dataset. Accepted values are: 'coco', 'voc'
and 'waymo'. 4. String, yaml filename that stores label_map.
annotation_filenames: Collection of annotation filenames (strings) to be
loaded. For instance, if there're 3 annotation files [0.xml, 1.xml,
2.xml] in `annotations_dir`, setting annotation_filenames=['0', '1']
makes this method only load [0.xml, 1.xml].
ignore_difficult_instances: Whether to ignore difficult instances.
`difficult` can be set inside `object` item in the annotation xml file.
num_shards: Number of shards for output file.
max_num_images: Max number of imags to process.
cache_dir: The cache directory to save TFRecord, metadata and json file.
When cache_dir is not set, a temporary folder will be created and will
not be removed automatically after training which makes it can be used
later.
cache_prefix_filename: The cache prefix filename. If not set, will
automatically generate it based on `image_dir`, `annotations_dir` and
`annotation_filenames`.
Returns:
ObjectDetectorDataLoader object.
"""
label_map = _get_label_map(label_map)
# If `cache_prefix_filename` is None, automatically generates a hash value.
if cache_prefix_filename is None:
cache_prefix_filename = util.get_cache_prefix_filename_from_pascal(
images_dir=images_dir,
annotations_dir=annotations_dir,
annotation_filenames=annotation_filenames,
num_shards=num_shards)
cache_files = util.get_cache_files(
cache_dir=cache_dir,
cache_prefix_filename=cache_prefix_filename,
num_shards=num_shards)
# If not cached, writes data into tfrecord_file_paths and
# annotations_json_file_path.
# If `num_shards` differs, it's still not cached.
if not util.is_cached(cache_files):
cache_writer = util.PascalVocCacheFilesWriter(
label_map=label_map,
images_dir=images_dir,
num_shards=num_shards,
max_num_images=max_num_images,
ignore_difficult_instances=ignore_difficult_instances)
cache_writer.write_files(
cache_files=cache_files,
annotations_dir=annotations_dir,
annotation_filenames=annotation_filenames)
return cls.from_cache(cache_files.cache_prefix) | tensorflow/examples | [
6911,
7012,
6911,
106,
1531779116
] |
def from_csv(
cls,
filename: str,
images_dir: Optional[str] = None,
delimiter: str = ',',
quotechar: str = '"',
num_shards: int = 10,
max_num_images: Optional[int] = None,
cache_dir: Optional[str] = None,
cache_prefix_filename: Optional[str] = None | tensorflow/examples | [
6911,
7012,
6911,
106,
1531779116
] |
def from_cache(cls, cache_prefix):
"""Loads the data from cache.
Args:
cache_prefix: The cache prefix including the cache directory and the cache
prefix filename, e.g: '/tmp/cache/train'.
Returns:
ObjectDetectorDataLoader object.
"""
# Gets TFRecord files.
tfrecord_file_patten = cache_prefix + '*.tfrecord'
if not tf.io.gfile.glob(tfrecord_file_patten):
raise ValueError('TFRecord files are empty.')
# Loads meta_data.
meta_data_file = cache_prefix + util.META_DATA_FILE_SUFFIX
if not tf.io.gfile.exists(meta_data_file):
raise ValueError('Metadata file %s doesn\'t exist.' % meta_data_file)
with tf.io.gfile.GFile(meta_data_file, 'r') as f:
meta_data = yaml.load(f, Loader=yaml.FullLoader)
# Gets annotation json file.
ann_json_file = cache_prefix + util.ANN_JSON_FILE_SUFFIX
if not tf.io.gfile.exists(ann_json_file):
ann_json_file = None
return DataLoader(tfrecord_file_patten, meta_data['size'],
meta_data['label_map'], ann_json_file) | tensorflow/examples | [
6911,
7012,
6911,
106,
1531779116
] |
def __init__(self, api, settings):
self.api = api
self.settings = settings | cloudControl/cctrl | [
22,
15,
22,
3,
1327055471
] |
def create(self, args):
"""
Create a new user.
"""
if not self.settings.user_registration_enabled:
print messages['RegisterDisabled'].format(self.settings.user_registration_url)
return
self.api.set_token(None)
if args.name and args.email and args.password:
name = args.name[0]
email = args.email[0]
password = args.password[0]
else:
name = raw_input('Username: ')
try:
email, password = get_credentials(self.settings, create=True)
except PasswordsDontMatchException:
return
try:
self.api.create_user(name, email, password)
except NotImplementedError:
raise InputErrorException('CommandNotImplemented')
print messages['UserCreatedNowCheckEmail'] | cloudControl/cctrl | [
22,
15,
22,
3,
1327055471
] |
def delete(self, args):
"""
Delete your user account.
"""
users = self.api.read_users()
if not args.force_delete:
question = raw_input('Do you really want to delete your user? ' +
'Type "Yes" without the quotes to delete: ')
else:
question = 'Yes'
if question.lower() == 'yes':
try:
self.api.delete_user(users[0]['username'])
except NotImplementedError:
raise InputErrorException('CommandNotImplemented')
except ForbiddenError:
raise InputErrorException('DeleteAppsBeforeUser')
# After we have deleted our user we should also delete
# the token_file to avoid confusion
self.api.set_token(None)
else:
raise InputErrorException('SecurityQuestionDenied') | cloudControl/cctrl | [
22,
15,
22,
3,
1327055471
] |
def listKeys(self, args):
"""
List your public keys.
"""
users = self.api.read_users()
if args.id:
key = self.api.read_user_key(users[0]['username'], args.id)
print_key(key)
else:
keys = self.api.read_user_keys(users[0]['username'])
print_keys(keys) | cloudControl/cctrl | [
22,
15,
22,
3,
1327055471
] |
def logout(self, args):
"""
Logout a user by deleting the token.json file.
"""
self.api.set_token(None) | cloudControl/cctrl | [
22,
15,
22,
3,
1327055471
] |
def setup(self, args):
user_config = get_user_config(self.settings)
ssh_key_path = self._get_setup_ssh_key_path(user_config, args)
if not is_key_valid(ssh_key_path):
# If given key path is not default and does not exist
# we raise an error
if ssh_key_path != get_default_ssh_key_path():
raise InputErrorException('WrongPublicKey')
# If given key path was the default one, we create the key
# pair for the user
print >> sys.stderr, "Key '{0}' seems to not be a RSA public key or not found!".format(ssh_key_path)
create_new_default_ssh_keys()
ssh_key_content = readContentOf(ssh_key_path)
ssh_auth = self._get_setup_ssh_auth(self.settings, user_config, args)
if args.email:
set_user_config(self.settings, email=args.email)
try:
users = self.api.read_users()
self.api.create_user_key(
users[0]['username'],
ssh_key_content)
except ConflictDuplicateError:
# Key already added, nothing to do.
pass
set_user_config(self.settings,
ssh_auth=ssh_auth,
ssh_path=ssh_key_path) | cloudControl/cctrl | [
22,
15,
22,
3,
1327055471
] |
def config(ctx):
aim_ctx = context.AimContext(store=api.get_store(expire_on_commit=True))
ctx.obj['manager'] = aim_cfg.ConfigManager(aim_ctx, '') | noironetworks/aci-integration-module | [
9,
13,
9,
19,
1456526919
] |
def update(ctx, host):
"""Current database version."""
host = host or ''
ctx.obj['manager'].to_db(ctx.obj['conf'], host=host) | noironetworks/aci-integration-module | [
9,
13,
9,
19,
1456526919
] |
def test_sample_names_spaces(self):
runs = find_runs_in_directory(path.join(path_to_module, "sample-names-with-spaces"))
self.assertEqual(1, len(runs))
samples = runs[0].sample_list
self.assertEqual(3, len(samples))
for sample in samples:
self.assertEqual(sample.get_id(), sample.get_id().strip()) | phac-nml/irida-miseq-uploader | [
3,
1,
3,
3,
1453404960
] |
def test_completed_upload(self):
runs = find_runs_in_directory(path.join(path_to_module, "completed"))
self.assertEqual(0, len(runs)) | phac-nml/irida-miseq-uploader | [
3,
1,
3,
3,
1453404960
] |
def raw_page(self):
return self | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def raw_page(self):
return self | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def raw_page(self):
return self | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def raw_page(self):
return self | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def __init__(self, *args, **kwargs):
super(CommandTestCase, self).__init__(*args, **kwargs)
self.client = None
self.cluster_info = None
self.class_id1 = None | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_boolean(self):
rec = self.client.command('create vertex v content {"abcdef":false,'
'"qwerty":TRUE}')
assert rec[0].abcdef is not True, "abcdef expected False: '%s'" % rec[
0].abcdef
assert rec[0].qwerty is True, "qwerty expected True: '%s'" % rec[
0].qwerty
rec_value = self.client.query('select from v')
assert rec_value[0].abcdef is not True, "abcdef expected False: '%s'" % \
rec_value[0].abcdef
assert rec_value[0].qwerty is True, "qwerty expected True: '%s'" % \
rec_value[0].qwerty | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_record_create_embedded_list(self):
# this should succeed with no exception
self.client.record_create(self.class_id1, {'@my_v_class': {'a': ['bar', 'bar']}}) | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_new_orient_dict(self):
rec = self.client.command('create vertex v content {"a":false,'
'"q":TRUE}')
assert rec[0].a is False
assert rec[0].q is True
import re
# this can differ from orientDB versions, so i use a regular expression
assert re.match('[0-1]', str(rec[0]._version))
assert rec[0]._rid == '#10:0'
rec = {'a': 1, 'b': 2, 'c': 3}
rec_position = self.client.record_create(3, rec)
assert rec_position.a == 1
assert rec_position.b == 2
assert rec_position.c == 3
# this can differ from orientDB versions, so i use a regular expression
assert re.match('[0-1]', str(rec_position._version))
assert rec_position._rid == '#3:0'
res = self.client.query("select from " + rec_position._rid)
assert res[0].a == 1
assert res[0].b == 2
assert res[0].c == 3
# this can differ from orientDB versions, so i use a regular expression
assert re.match('[0-1]', str(res[0]._version))
assert res[0]._rid == '#3:0'
print(res[0].oRecordData['a']) | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_nested_objects_1(self):
res = self.client.command(
'create vertex v content {"b":[[1]],"a":{},"d":[12],"c":["x"]}'
)
print(res[0]) | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_nested_objects_3(self):
res = self.client.command(
'create vertex v content {"b":[[1,{"abc":2}]]}'
)
print(res[0])
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1]['abc'] == 2 | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_nested_objects_5(self):
res = self.client.command(
'create vertex v content '
'{"b":[[1,{"dx":[1,2]},"abc"]],"a":{},"d":[12],"c":["x"],"s":111}'
)
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1]['dx'][0] == 1
assert res[0].oRecordData['b'][0][1]['dx'][1] == 2
assert res[0].oRecordData['b'][0][2] == "abc"
assert res[0].oRecordData['a'] == {}
assert res[0].oRecordData['d'][0] == 12
assert res[0].oRecordData['c'][0] == "x"
assert res[0].oRecordData['s'] == 111
print(res[0]) | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_nested_objects_7(self):
res = self.client.command(
'create vertex v content '
'{"b":[{"xx":{"xxx":[1,2,"abc"]}}]}'
)
assert isinstance(res[0].oRecordData['b'], list)
assert isinstance(res[0].oRecordData['b'][0], dict)
assert isinstance(res[0].oRecordData['b'][0]['xx'], dict)
assert isinstance(res[0].oRecordData['b'][0]['xx']['xxx'], list)
assert res[0].oRecordData['b'][0]['xx']['xxx'][0] == 1
assert res[0].oRecordData['b'][0]['xx']['xxx'][1] == 2
assert res[0].oRecordData['b'][0]['xx']['xxx'][2] == "abc"
print(res[0]) | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_nested_objects_9(self):
res = self.client.command(
'create vertex v content '
'{"a":[[1,2],[3,4],[5,6],null]}'
)
assert isinstance(res[0].oRecordData['a'], list)
assert isinstance(res[0].oRecordData['a'][0], list)
assert isinstance(res[0].oRecordData['a'][1], list)
assert isinstance(res[0].oRecordData['a'][2], list)
assert res[0].oRecordData['a'][0][0] == 1
assert res[0].oRecordData['a'][0][1] == 2
print(res[0]) | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_nested_objects_11(self):
res = self.client.command(
'create vertex v content '
'{"embedded_map":{"one":{"three":4}}}'
)
assert isinstance(res[0].oRecordData['embedded_map'], dict)
assert isinstance(res[0].oRecordData['embedded_map']['one'], dict)
assert res[0].oRecordData['embedded_map']['one']["three"] == 4
print(res[0]) | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_nested_objects_13(self):
res = self.client.command(
'create vertex v content '
'{"a":1,"b":{},"c":3}'
)
assert res[0].oRecordData['a'] == 1
assert isinstance(res[0].oRecordData['b'], dict)
assert len(res[0].oRecordData['b']) == 0
assert res[0].oRecordData['c'] == 3
print(res[0]) | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def test_db_list(self):
self.client.connect("root", "root")
databases = self.client.db_list()
assert databases.oRecordData['databases']['GratefulDeadConcerts'] | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def __init__(self, session_factory):
self.session_factory = session_factory | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def resolve(resource_type):
if not isinstance(resource_type, type):
raise ValueError(resource_type)
else:
m = resource_type
return m | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def __init__(self, manager):
self.manager = manager
self.query = self.resource_query_factory(self.manager.session_factory) | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def get_resources(self, query):
return self.query.filter(self.manager) | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def augment(self, resources):
return resources | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def __init__(self, manager):
self.manager = manager | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def get_resources(self, _):
log.warning('The Azure Resource Graph source '
'should not be used in production scenarios at this time.')
session = self.manager.get_session()
client = session.client('azure.mgmt.resourcegraph.ResourceGraphClient')
# empty scope will return all resource
query_scope = ""
if self.manager.resource_type.resource_type != 'armresource':
query_scope = "where type =~ '%s'" % self.manager.resource_type.resource_type
query = QueryRequest(
query=query_scope,
subscriptions=[session.get_subscription_id()]
)
res = client.resources(query)
cols = [c['name'] for c in res.data['columns']]
data = [dict(zip(cols, r)) for r in res.data['rows']]
return data | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def augment(self, resources):
return resources | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def filter(self, resource_manager, **params):
"""Query a set of resources."""
m = self.resolve(resource_manager.resource_type) # type: ChildTypeInfo
parents = resource_manager.get_parent_manager()
# Have to query separately for each parent's children.
results = []
for parent in parents.resources():
try:
subset = resource_manager.enumerate_resources(parent, m, **params)
if subset:
# If required, append parent resource ID to all child resources
if m.annotate_parent:
for r in subset:
r[m.parent_key] = parent[parents.resource_type.id]
results.extend(subset)
except Exception as e:
log.warning('Child enumeration failed for {0}. {1}'
.format(parent[parents.resource_type.id], e))
if m.raise_on_exception:
raise e
return results | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def __repr__(cls):
return "<Type info service:%s client: %s>" % (
cls.service,
cls.client) | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def extra_args(cls, resource_manager):
return {} | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def extra_args(cls, parent_resource):
return {} | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def __new__(cls, name, parents, attrs):
if 'filter_registry' not in attrs:
attrs['filter_registry'] = FilterRegistry(
'%s.filters' % name.lower())
if 'action_registry' not in attrs:
attrs['action_registry'] = ActionRegistry(
'%s.actions' % name.lower())
return super(QueryMeta, cls).__new__(cls, name, parents, attrs) | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def __init__(self, data, options):
super(QueryResourceManager, self).__init__(data, options)
self.source = self.get_source(self.source_type)
self._session = None | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def get_permissions(self):
return () | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def get_session(self):
if self._session is None:
self._session = local_session(self.session_factory)
return self._session | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def get_cache_key(self, query):
return {'source_type': self.source_type, 'query': query} | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def get_model(cls):
return ResourceQuery.resolve(cls.resource_type) | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def source_type(self):
return self.data.get('source', 'describe-azure') | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def check_resource_limit(self, selection_count, population_count):
"""Check if policy's execution affects more resources then its limit.
"""
p = self.ctx.policy
max_resource_limits = MaxResourceLimit(p, selection_count, population_count)
return max_resource_limits.check_resource_limits() | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def register_actions_and_filters(registry, resource_class):
resource_class.action_registry.register('notify', Notify)
if 'logic-app' not in resource_class.action_registry:
resource_class.action_registry.register('logic-app', LogicAppAction) | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def source_type(self):
source = self.data.get('source', self.child_source)
if source == 'describe':
source = self.child_source
return source | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def get_session(self):
if self._session is None:
session = super(ChildResourceManager, self).get_session()
if self.resource_type.resource != constants.RESOURCE_ACTIVE_DIRECTORY:
session = session.get_session_for_resource(self.resource_type.resource)
self._session = session
return self._session | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def register_child_specific(registry, resource_class):
if not issubclass(resource_class, ChildResourceManager):
return
# If Child Resource doesn't annotate parent, there is no way to filter based on
# parent properties.
if resource_class.resource_type.annotate_parent:
resource_class.filter_registry.register('parent', ParentFilter) | kapilt/cloud-custodian | [
2,
2,
2,
8,
1461493242
] |
def __init__(self, request, domains, *args, **kwargs):
super(DcDomainForm, self).__init__(request, None, *args, **kwargs)
self.fields['name'].choices = domains.values_list('name', 'name') | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def __init__(self, request, domain, *args, **kwargs):
super(AdminDomainForm, self).__init__(request, domain, *args, **kwargs)
self.fields['owner'].choices = get_owners(request).values_list('username', 'username')
if not request.user.is_staff:
self.fields['dc_bound'].widget.attrs['disabled'] = 'disabled' | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def _final_data(self, data=None):
data = super(AdminDomainForm, self)._final_data(data=data)
if self.action == 'create': # Add dc parameter when doing POST (required by api.db.utils.get_virt_object)
data['dc'] = self._request.dc.name
return data | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def __init__(self, request, data, _all=False, **kwargs):
super(DnsRecordFilterForm, self).__init__(data, **kwargs)
domains = Domain.objects.order_by('name')
user, dc = request.user, request.dc
if request.GET.get('deleted', False):
domains = domains.exclude(access=Domain.INTERNAL)
else:
domains = domains.exclude(access__in=Domain.INVISIBLE)
if user.is_staff and _all:
domain_choices = [(d.name, d.name) for d in domains]
else:
dc_domain_ids = list(dc.domaindc_set.values_list('domain_id', flat=True))
domains = domains.filter(Q(id__in=dc_domain_ids) | Q(user=user.id))
domain_choices = [(d.name, d.name) for d in domains
if (user.is_staff or d.user == user.id or d.dc_bound == dc.id)]
self.fields['domain'].choices = domain_choices | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def __init__(self, request, domain, record, *args, **kwargs):
self.domain = domain
super(DnsRecordForm, self).__init__(request, record, *args, **kwargs) | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def api_call_args(self, domain_name):
if self.action == 'create':
return domain_name,
else:
return domain_name, self.cleaned_data['id'] | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def __init__(self, request, domain, record, *args, **kwargs):
self.domain = domain
super(MultiDnsRecordForm, self).__init__(request, record, *args, **kwargs) | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def __init__(self, task_id, msg, obj=None):
super(DetailLog, self).__init__()
self.task_id = task_id
self.msg = msg
self.obj = obj
self.dc_id = None # Do not change this, unless you know what you are doing (the "vm_zoneid_changed" case) | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def get_detail(self):
return '\n'.join('%s: %s' % (getLevelName(level), message) for level, message in self) | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def save(self, status):
"""Save task log entry if result is not None"""
if hasattr(status, '__iter__'):
status = [i for i in status if i is not None] # remove None from result
if status:
success = all(status)
else:
success = None
else:
success = status
if success is None:
return
if success:
task_status = states.SUCCESS
else:
task_status = states.FAILURE
task_log(self.task_id, self.msg, obj=self.obj, task_status=task_status, task_result=True,
detail=self.get_detail(), dc_id=self.dc_id, update_user_tasks=False) | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def wrap(fun):
@wraps(fun, assigned=available_attrs(fun))
def inner(task_id, sender, **kwargs):
logger.info('Primary task %s issued a secondary mgmt monitoring task %s', sender, task_id)
status = None
# Every monitoring task should collect logs
# NOTE: However, the monitoring task is responsible for setting up the object related to the log entry
kwargs['log'] = log = DetailLog(sender, msg)
try:
status = fun(task_id, sender, **kwargs)
except Exception as exc:
status = False
log.add(CRITICAL, exc)
raise exc
finally:
log.save(status)
return status
return inner | erigones/esdc-ce | [
106,
27,
106,
56,
1478554493
] |
def rsync_get_file(uri_from, uri_to, user, host, port, key):
cmd = [
'rsync',
'-e',
'ssh -i {} -p {} {}'.format(key, port, ' '.join(SSH_OPTIONS)),
'{}@{}:{}'.format(user, host, uri_from),
uri_to,
]
_call(cmd) | galaxyproject/pulsar | [
36,
43,
36,
69,
1403216408
] |
def scp_get_file(uri_from, uri_to, user, host, port, key):
cmd = [
'scp',
'-P', str(port),
'-i', key
] + SSH_OPTIONS + [
'{}@{}:{}'.format(user, host, uri_from),
uri_to,
]
_call(cmd) | galaxyproject/pulsar | [
36,
43,
36,
69,
1403216408
] |
def _ensure_dir(uri_to, key, port, user, host):
directory = os.path.dirname(uri_to)
cmd = [
'ssh',
'-i', key,
'-p', str(port),
] + SSH_OPTIONS + [
'{}@{}'.format(user, host),
'mkdir', '-p', directory,
]
_call(cmd) | galaxyproject/pulsar | [
36,
43,
36,
69,
1403216408
] |
def setUpClass(cls):
ds = mldb.create_dataset({'id' : 'ds', 'type' : 'sparse.mutable'})
ds.record_row('row0', [['x', 'A', 0]])
ds.record_row('row1', [['x', 'B', 0]])
ds.commit() | mldbai/mldb | [
639,
98,
639,
28,
1449592456
] |
def test_int(self):
n = mldb.get('/v1/query', q="select x from (select 17 as x)", format='atom').json()
self.assertEqual(17, n) | mldbai/mldb | [
639,
98,
639,
28,
1449592456
] |
def test_string(self):
n = mldb.get('/v1/query', q="select x from (select 'blah' as x)", format='atom').json()
self.assertEqual('blah', n) | mldbai/mldb | [
639,
98,
639,
28,
1449592456
] |
def test_error_columns(self):
msg = "Query with atom format returned multiple columns"
with self.assertRaisesRegex(ResponseException, msg):
n = mldb.get('/v1/query', q="select x,y from (select false as x, 1 as y)", format='atom').json() | mldbai/mldb | [
639,
98,
639,
28,
1449592456
] |
def test_multiple_rows_limit(self):
n = mldb.get('/v1/query', q="select x from ds limit 1", format='atom').json()
self.assertEqual('B', n) | mldbai/mldb | [
639,
98,
639,
28,
1449592456
] |
def test_error_no_column(self):
msg = "Query with atom format returned no column"
with self.assertRaisesRegex(ResponseException, msg):
n = mldb.get('/v1/query', q="select COLUMN EXPR (WHERE columnName() IN ('Z')) from (select 17 as x)", format='atom').json() | mldbai/mldb | [
639,
98,
639,
28,
1449592456
] |
def test_java() -> None:
sources = {
"src/org/pantsbuild/test/Hello.java": dedent(
"""\
package org.pantsbuild.test;
public class Hello {{
public static void main(String[] args) {{
System.out.println("Hello, World!");
}}
}}
"""
),
"src/org/pantsbuild/test/BUILD": dedent(
"""\
java_sources()
deploy_jar(
name="test_deploy_jar",
main="org.pantsbuild.test.Hello",
dependencies=[":test"],
)
"""
),
"lockfile": EMPTY_RESOLVE,
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.experimental.java",
f"--source-root-patterns=['{tmpdir}/src']",
"--pants-ignore=__pycache__",
f'--jvm-resolves={{"empty": "{tmpdir}/lockfile"}}',
"--jvm-default-resolve=empty",
"run",
f"{tmpdir}/src/org/pantsbuild/test:test_deploy_jar",
]
result = run_pants(args)
assert result.stdout.strip() == "Hello, World!" | pantsbuild/pants | [
2553,
518,
2553,
833,
1355765944
] |
def main(args: Array[String]): Unit = {{
println("Hello, World!") | pantsbuild/pants | [
2553,
518,
2553,
833,
1355765944
] |
def __init__(self, output_dir=None, **kwargs):
self.output_dir = output_dir | tensorflow/examples | [
6911,
7012,
6911,
106,
1531779116
] |
def benchmark_with_function_custom_loops_300_epochs_2_gpus(self):
kwargs = utils.get_cifar10_kwargs()
kwargs.update({'epochs': 300, 'data_format': 'channels_first',
'bottleneck': False, 'compression': 1., 'num_gpu': 2,
'batch_size': 128})
self._run_and_report_benchmark(**kwargs) | tensorflow/examples | [
6911,
7012,
6911,
106,
1531779116
] |
def _run_and_report_benchmark(self, top_1_min=.944, top_1_max=.949, **kwargs):
"""Run the benchmark and report metrics.report.
Args:
top_1_min: Min value for top_1 accuracy. Default range is SOTA.
top_1_max: Max value for top_1 accuracy.
**kwargs: All args passed to the test.
"""
start_time_sec = time.time()
train_loss, train_acc, _, test_acc = distributed_train.main(**kwargs)
wall_time_sec = time.time() - start_time_sec
metrics = []
metrics.append({'name': 'accuracy_top_1',
'value': test_acc,
'min_value': top_1_min,
'max_value': top_1_max})
metrics.append({'name': 'training_accuracy_top_1',
'value': train_acc})
metrics.append({'name': 'train_loss',
'value': train_loss})
self.report_benchmark(wall_time=wall_time_sec, metrics=metrics) | tensorflow/examples | [
6911,
7012,
6911,
106,
1531779116
] |
def create_project(location, with_ui=False, template=None):
"""
Create a ZeroVM application project by writing a default `zapp.yaml` in the
specified directory `location`.
:param location:
Directory location to place project files.
:param with_ui:
Defaults to `False`. If `True`, add basic UI template files as well to
``location``.
:param template:
Default: ``None``. If no template is specified, use the default project
template. (See `zpmlib.zapptemplate`.)
:returns: List of created project files.
"""
if os.path.exists(location):
if not os.path.isdir(location):
# target must be an empty directory
raise RuntimeError("Target `location` must be a directory")
else:
os.makedirs(location)
# Run the template builder, and create additional files for the project by
# the type. If ``template`` is none, this is essientially a NOP.
# TODO: just use the afc._created_files
created_files = []
with util.AtomicFileCreator() as afc:
for file_type, path, contents in zapptemplate.template(
location, template, with_ui=with_ui):
afc.create_file(file_type, path, contents)
created_files.append(path)
return created_files | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def _generate_job_desc(zapp):
"""
Generate the boot/system.map file contents from the zapp config file.
:param zapp:
`dict` of the contents of a ``zapp.yaml`` file.
:returns:
`dict` of the job description
"""
job = []
# TODO(mg): we should eventually reuse zvsh._nvram_escape
def escape(value):
for c in '\\", \n':
value = value.replace(c, '\\x%02x' % ord(c))
return value
def translate_args(cmdline):
# On Python 2, the yaml module loads non-ASCII strings as
# unicode objects. In Python 2.7.2 and earlier, we must give
# shlex.split a str -- but it is an error to give shlex.split
# a bytes object in Python 3.
need_decode = not isinstance(cmdline, str)
if need_decode:
cmdline = cmdline.encode('utf8')
args = shlex.split(cmdline)
if need_decode:
args = [arg.decode('utf8') for arg in args]
return ' '.join(escape(arg) for arg in args)
for zgroup in zapp['execution']['groups']:
# Copy everything, but handle 'env', 'path', and 'args' specially:
jgroup = dict(zgroup)
path = zgroup['path']
# if path is `file://image:exe`, exec->name is "exe"
# if path is `swift://~/container/obj`, exec->name is "obj"
exec_name = None
if path.startswith('file://'):
exec_name = path.split(':')[-1]
elif path.startswith('swift://'):
# If obj is a pseudo path, like foo/bar/obj, we need to
# handle this as well with a careful split.
# If the object path is something like `swift://~/container/obj`,
# then exec_name will be `obj`.
# If the object path is something like
# `swift://./container/foo/bar/obj`, then the exec_name will be
# `foo/bar/obj`.
exec_name = path.split('/', 4)[-1]
jgroup['exec'] = {
'path': zgroup['path'],
'args': translate_args(zgroup['args']),
}
if exec_name is not None:
jgroup['exec']['name'] = exec_name
del jgroup['path'], jgroup['args']
if 'env' in zgroup:
jgroup['exec']['env'] = zgroup['env']
del jgroup['env']
job.append(jgroup)
return job | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def _prepare_job(tar, zapp, zapp_swift_url):
"""
:param tar:
The application .zapp file, as a :class:`tarfile.TarFile` object.
:param dict zapp:
Parsed contents of the application `zapp.yaml` specification, as a
`dict`.
:param str zapp_swift_url:
Path of the .zapp in Swift, which looks like this::
'swift://AUTH_abcdef123/test_container/hello.zapp'
See :func:`_get_swift_zapp_url`.
:returns:
Extracted contents of the boot/system.map with the swift
path to the .zapp added to the `devices` for each `group`.
So if the job looks like this::
[{'exec': {'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [{'name': 'python2.7'}, {'name': 'stdout'}],
'name': 'hello'}]
the output will look like something like this::
[{'exec': {u'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'image',
'path': 'swift://AUTH_abcdef123/test_container/hello.zapp'},
],
'name': 'hello'}]
"""
fp = tar.extractfile(SYSTEM_MAP_ZAPP_PATH)
# NOTE(larsbutler): the `decode` is needed for python3
# compatibility
job = json.loads(fp.read().decode('utf-8'))
device = {'name': 'image', 'path': zapp_swift_url}
for group in job:
group['devices'].append(device)
return job | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def _add_file_to_tar(root, path, tar, arcname=None):
"""
:param root:
Root working directory.
:param path:
File path.
:param tar:
Open :class:`tarfile.TarFile` object to add the ``files`` to.
"""
# TODO(larsbutler): document ``arcname``
LOG.info('adding %s' % path)
path = os.path.join(root, path)
relpath = os.path.relpath(path, root)
if arcname is None:
# In the archive, give the file the same name and path.
arcname = relpath
tar.add(path, arcname=arcname) | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def _post_job(url, token, data, http_conn=None, response_dict=None,
content_type='application/json', content_length=None,
response_body_buffer=None):
# Modelled after swiftclient.client.post_account.
headers = {'X-Auth-Token': token,
'X-Zerovm-Execute': '1.0',
'Content-Type': content_type}
if content_length:
headers['Content-Length'] = str(content_length)
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = swiftclient.http_connection(url)
conn.request('POST', parsed.path, data, headers)
resp = conn.getresponse()
body = resp.read()
swiftclient.http_log((url, 'POST'), {'headers': headers}, resp, body)
swiftclient.store_response(resp, response_dict)
if response_body_buffer is not None:
response_body_buffer.write(body) | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def authenticate(self):
"""
Authenticate with the provided credentials and cache the storage URL
and auth token as `self.url` and `self.token`, respectively.
"""
self.url, self.token = self.get_auth() | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def post_zapp(self, data, response_dict=None, content_length=None,
response_body_buffer=None):
return self._retry(None, _post_job, data,
response_dict=response_dict,
content_type='application/x-gzip',
content_length=content_length,
response_body_buffer=response_body_buffer) | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def _deploy_zapp(conn, target, zapp_path, auth_opts, force=False):
"""Upload all of the necessary files for a zapp.
Returns the name an uploaded index file, or the target if no
index.html file was uploaded.
:param bool force:
Force deployment, even if the target container is not empty. This means
that files could be overwritten and could cause consistency problems
with these objects in Swift.
"""
base_container = target.split('/')[0]
try:
_, objects = conn.get_container(base_container)
if not len(objects) == 0:
if not force:
raise zpmlib.ZPMException(
"Target container ('%s') is not empty.\nDeploying to a "
"non-empty container can cause consistency problems with "
"overwritten objects.\nSpecify the flag `--force/-f` to "
"overwrite anyway."
% base_container
)
except swiftclient.exceptions.ClientException:
# container doesn't exist; create it
LOG.info("Container '%s' not found. Creating it...", base_container)
conn.put_container(base_container)
# If we get here, everything with the container is fine.
index = target + '/'
uploads = _generate_uploads(conn, target, zapp_path, auth_opts)
for path, data, content_type in uploads:
if path.endswith('/index.html'):
index = path
container, obj = path.split('/', 1)
conn.put_object(container, obj, data, content_type=content_type)
return index | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def _prepare_auth(version, args, conn):
"""
:param str version:
Auth version: "0.0", "1.0", or "2.0". "0.0" indicates "no auth".
:param args:
:class:`argparse.Namespace` instance, with attributes representing the
various authentication parameters
:param conn:
:class:`ZeroCloudConnection` instance.
"""
version = str(float(version))
auth = {'version': version}
if version == '0.0':
auth['swiftUrl'] = conn.url
elif version == '1.0':
auth['authUrl'] = args.auth
auth['username'] = args.user
auth['password'] = args.key
else:
# TODO(mg): inserting the username and password in the
# uploaded file makes testing easy, but should not be done in
# production. See issue #46.
auth['authUrl'] = args.os_auth_url
auth['tenant'] = args.os_tenant_name
auth['username'] = args.os_username
auth['password'] = args.os_password
return auth | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def deploy_project(args):
conn = _get_zerocloud_conn(args)
conn.authenticate()
ui_auth_version = conn.auth_version
# We can now reset the auth for the web UI, if needed
if args.no_ui_auth:
ui_auth_version = '0.0'
auth = _prepare_auth(ui_auth_version, args, conn)
auth_opts = jinja2.Markup(json.dumps(auth))
deploy_index = _deploy_zapp(conn, args.target, args.zapp, auth_opts,
force=args.force)
print('app deployed to\n %s/%s' % (conn.url, deploy_index))
if args.execute:
# for compatibility with the option name in 'zpm execute'
args.container = args.target
resp_body_buffer = BytesIO()
resp = execute(args, response_body_buffer=resp_body_buffer)
resp_body_buffer.seek(0)
if resp['status'] < 200 or resp['status'] >= 300:
raise zpmlib.ZPMException(resp_body_buffer.read())
if args.summary:
total_time, exec_table = _get_exec_table(resp)
print('Execution summary:')
print(exec_table)
print('Total time: %s' % total_time)
sys.stdout.write(resp_body_buffer.read()) | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def _get_exec_table_data(headers):
"""Extract a stats table from execution HTTP response headers.
Stats include things like node name, execution time, number of
reads/writes, bytes read/written, etc.
:param dict headers:
`dict` of response headers from a job execution request. It must
contain at least ``x-nexe-system``, ``x-nexe-status``,
``x-nexe-retcode``, ``x-nexe-cdr-line``.
:returns:
Tuple of two items. The first is the total time for the executed job
(as a `str`). The second is a table (2d `list`) of execution data
extracted from ``X-Nexe-System`` and ``X-Nexe-Cdr-Line`` headers.
Each row in the table consists of the following data:
* node name
* node time
* system time
* user time
* number of disk reads
* number of bytes read from disk
* number of disk writes
* number of bytes written to disk
* number of network reads
* number of bytes read from network
* number of network writes
* number of bytes written to network
"""
node_names = iter(headers['x-nexe-system'].split(','))
statuses = iter(headers['x-nexe-status'].split(','))
retcodes = iter(headers['x-nexe-retcode'].split(','))
cdr = headers['x-nexe-cdr-line']
cdr_data = [x.strip() for x in cdr.split(',')]
total_time = cdr_data.pop(0)
cdr_data = iter(cdr_data)
def adviter(x):
return six.advance_iterator(x)
table_data = []
while True:
try:
node_name = adviter(node_names)
status = adviter(statuses)
retcode = adviter(retcodes)
node_time = adviter(cdr_data)
cdr = adviter(cdr_data).split()
row = [node_name, status, retcode, node_time] + cdr
table_data.append(row)
except StopIteration:
break
return total_time, table_data | zerovm/zerovm-cli | [
6,
7,
6,
10,
1384778504
] |
def __init__(self, **kwargs):
super(H2OXGBoostEstimator, self).__init__()
self._parms = {}
names_list = {"model_id", "training_frame", "validation_frame", "nfolds", "keep_cross_validation_models",
"keep_cross_validation_predictions", "keep_cross_validation_fold_assignment",
"score_each_iteration", "fold_assignment", "fold_column", "response_column", "ignored_columns",
"ignore_const_cols", "offset_column", "weights_column", "stopping_rounds", "stopping_metric",
"stopping_tolerance", "max_runtime_secs", "seed", "distribution", "tweedie_power",
"categorical_encoding", "quiet_mode", "export_checkpoints_dir", "ntrees", "max_depth", "min_rows",
"min_child_weight", "learn_rate", "eta", "sample_rate", "subsample", "col_sample_rate",
"colsample_bylevel", "col_sample_rate_per_tree", "colsample_bytree", "max_abs_leafnode_pred",
"max_delta_step", "monotone_constraints", "score_tree_interval", "min_split_improvement", "gamma",
"nthread", "max_bins", "max_leaves", "min_sum_hessian_in_leaf", "min_data_in_leaf", "sample_type",
"normalize_type", "rate_drop", "one_drop", "skip_drop", "tree_method", "grow_policy", "booster",
"reg_lambda", "reg_alpha", "dmatrix_type", "backend", "gpu_id"}
if "Lambda" in kwargs: kwargs["lambda_"] = kwargs.pop("Lambda")
for pname, pvalue in kwargs.items():
if pname == 'model_id':
self._id = pvalue
self._parms["model_id"] = pvalue
elif pname in names_list:
# Using setattr(...) will invoke type-checking of the arguments
setattr(self, pname, pvalue)
else:
raise H2OValueError("Unknown parameter %s = %r" % (pname, pvalue)) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def training_frame(self):
"""
Id of the training data frame.
Type: ``H2OFrame``.
"""
return self._parms.get("training_frame") | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.