function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def _subscribe_extend(tensor, side_effects): """Helper method to extend the list of side_effects for a subscribed tensor. Args: tensor: A `tf.Tensor` as returned by subscribe(). side_effects: List of side_effect functions, see subscribe for details. Returns: The given subscribed tensor (for API consistency). """ assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format( tensor.op.name) source_tensor = tensor.op.inputs[0] # Build the side effect graphs and add their outputs to the list of control # dependencies for the subscribed tensor. outs = [] name_scope = source_tensor.op.name + '/subscription/' with ops.name_scope(name_scope): for s in side_effects: outs += s(source_tensor) out_ops = [out.op if isinstance(out, ops.Tensor) else out for out in outs] tensor.op._add_control_inputs(out_ops) # pylint: disable=protected-access return tensor
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _subscribe(tensor, side_effects, control_cache): """Helper method that subscribes a single tensor to a list of side_effects. This method will check if the given tensor has already been subscribed or if it's a tensor returned by a previous call to `subscribe()` and, if so, will reuse the existing identity op, appending the given side effects to the list of existing ones. Args: tensor: The `tf.Tensor` to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: `_ControlOutputCache` helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed. """ # Check if the given tensor has a numpy compatible type (see dtypes.py). # If not, we cannot subscribe it, so we just return the original tensor. if not tensor.dtype.is_numpy_compatible: logging.debug(('Tensor {} has an un-supported {} type and cannot be ' 'subscribed.').format(tensor.name, tensor.dtype)) return tensor if _is_subscribed_identity(tensor): return _subscribe_extend(tensor, side_effects) # Check if the given tensor has already been subscribed by inspecting its # outputs. name_scope = tensor.op.name + '/subscription/Identity' consumers = tensor.consumers() matching_ops = [op for op in consumers if op.name.startswith(name_scope)] assert len(matching_ops) <= 1, ('Op {} must only have one subscription ' 'op connected to it').format(tensor.op.name) if len(matching_ops) == 1: candidate_tensor = matching_ops[0].outputs[0] if _is_subscribed_identity(candidate_tensor): return _subscribe_extend(candidate_tensor, side_effects) return _subscribe_new(tensor, side_effects, control_cache)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _preserve_control_flow_context(tensor): """Preserve the control flow context for the given tensor. Sets the graph context to the tensor's context so that side effect ops are added under the same context. This is needed when subscribing to tensors defined within a conditional block or a while loop. In these cases we need that the side-effect ops are created within the same control flow context as that of the tensor they are attached to. Args: tensor: tensor whose context should be preserved. Yields: None """ # pylint: disable=protected-access context = tensor.op._get_control_flow_context() # pylint: enable=protected-access if context: context.Enter() try: yield finally: if context: context.Exit()
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__(self, description='operation'): self._starttime = None self._endtime = None self._description = description self.Start()
ric2b/Vivaldi-browser
[ 131, 27, 131, 3, 1490828945 ]
def GetDelta(self): """Returns the rounded delta. Also stops the timer if Stop() has not already been called. """ if self._endtime is None: self.Stop(log=False) delta = self._endtime - self._starttime delta = round(delta, 2) if delta < 10 else round(delta, 1) return delta
ric2b/Vivaldi-browser
[ 131, 27, 131, 3, 1490828945 ]
def test_mixed_float16_policy(self): with policy.policy_scope('mixed_float16'): inputs1 = keras.Input(shape=(36, 512), dtype='float16') inputs2 = keras.Input(shape=(36,), dtype='bool') average_layer = keras.layers.pooling.GlobalAveragePooling1D() _ = average_layer(inputs1, inputs2)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_globalpooling_1d_masking_support(self): model = keras.Sequential() model.add(keras.layers.Masking(mask_value=0., input_shape=(None, 4))) model.add(keras.layers.GlobalAveragePooling1D()) model.compile(loss='mae', optimizer='rmsprop') model_input = np.random.random((2, 3, 4)) model_input[0, 1:, :] = 0 output = model.predict(model_input) self.assertAllClose(output[0], model_input[0, 0, :])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_globalpooling_2d_with_ragged(self): ragged_data = ragged_factory_ops.constant( [[[[1.0], [1.0]], [[2.0], [2.0]], [[3.0], [3.0]]], [[[1.0], [1.0]], [[2.0], [2.0]]]], ragged_rank=1) dense_data = ragged_data.to_tensor() inputs = keras.Input(shape=(None, 2, 1), dtype='float32', ragged=True) out = keras.layers.GlobalMaxPooling2D()(inputs) model = keras.models.Model(inputs=inputs, outputs=out) output_ragged = model.predict(ragged_data, steps=1) inputs = keras.Input(shape=(None, 2, 1), dtype='float32') out = keras.layers.GlobalMaxPooling2D()(inputs) model = keras.models.Model(inputs=inputs, outputs=out) output_dense = model.predict(dense_data, steps=1) self.assertAllEqual(output_ragged, output_dense)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_globalpooling_2d(self): testing_utils.layer_test( keras.layers.pooling.GlobalMaxPooling2D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 4, 5, 6)) testing_utils.layer_test( keras.layers.pooling.GlobalMaxPooling2D, kwargs={'data_format': 'channels_last'}, input_shape=(3, 5, 6, 4)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling2D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 4, 5, 6)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling2D, kwargs={'data_format': 'channels_last'}, input_shape=(3, 5, 6, 4))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_globalpooling_1d_keepdims(self): testing_utils.layer_test( keras.layers.pooling.GlobalMaxPooling1D, kwargs={'keepdims': True}, input_shape=(3, 4, 5), expected_output_shape=(None, 1, 5)) testing_utils.layer_test( keras.layers.pooling.GlobalMaxPooling1D, kwargs={'data_format': 'channels_first', 'keepdims': True}, input_shape=(3, 4, 5), expected_output_shape=(None, 4, 1)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling1D, kwargs={'keepdims': True}, input_shape=(3, 4, 5), expected_output_shape=(None, 1, 5)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling1D, kwargs={'data_format': 'channels_first', 'keepdims': True}, input_shape=(3, 4, 5), expected_output_shape=(None, 4, 1))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_globalpooling_3d_keepdims(self): testing_utils.layer_test( keras.layers.pooling.GlobalMaxPooling3D, kwargs={'data_format': 'channels_first', 'keepdims': True}, input_shape=(3, 4, 3, 4, 3), expected_output_shape=(None, 4, 1, 1, 1)) testing_utils.layer_test( keras.layers.pooling.GlobalMaxPooling3D, kwargs={'data_format': 'channels_last', 'keepdims': True}, input_shape=(3, 4, 3, 4, 3), expected_output_shape=(None, 1, 1, 1, 3)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling3D, kwargs={'data_format': 'channels_first', 'keepdims': True}, input_shape=(3, 4, 3, 4, 3), expected_output_shape=(None, 4, 1, 1, 1)) testing_utils.layer_test( keras.layers.pooling.GlobalAveragePooling3D, kwargs={'data_format': 'channels_last', 'keepdims': True}, input_shape=(3, 4, 3, 4, 3), expected_output_shape=(None, 1, 1, 1, 3))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_maxpooling_2d(self): pool_size = (3, 3) for strides in [(1, 1), (2, 2)]: testing_utils.layer_test( keras.layers.MaxPooling2D, kwargs={ 'strides': strides, 'padding': 'valid', 'pool_size': pool_size }, input_shape=(3, 5, 6, 4))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_maxpooling_3d(self): pool_size = (3, 3, 3) testing_utils.layer_test( keras.layers.MaxPooling3D, kwargs={ 'strides': 2, 'padding': 'valid', 'pool_size': pool_size }, input_shape=(3, 11, 12, 10, 4)) testing_utils.layer_test( keras.layers.MaxPooling3D, kwargs={ 'strides': 3, 'padding': 'valid', 'data_format': 'channels_first', 'pool_size': pool_size }, input_shape=(3, 4, 11, 12, 10))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_maxpooling_1d(self): for padding in ['valid', 'same']: for stride in [1, 2]: testing_utils.layer_test( keras.layers.MaxPooling1D, kwargs={ 'strides': stride, 'padding': padding }, input_shape=(3, 5, 4)) testing_utils.layer_test( keras.layers.MaxPooling1D, kwargs={'data_format': 'channels_first'}, input_shape=(3, 2, 6))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def setUp(self): # Call super's setUp(). TestBase.setUp(self) self.child = None self.child_prompt = '(lldb) ' self.strict_sources = False # Source filename. self.source = 'main.m' # Output filename. self.exe_name = self.getBuildArtifact("a.out") self.d = {'OBJC_SOURCES': self.source, 'EXE': self.exe_name} # Locate breakpoint. self.line = line_number(self.source, '// break here')
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def run_lldb_to_breakpoint(self, exe, source_file, line, settings_commands=None): # Set self.child_prompt, which is "(lldb) ". prompt = self.child_prompt # So that the child gets torn down after the test. import pexpect import sys if sys.version_info.major == 3: self.child = pexpect.spawnu('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe)) else: self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe)) child = self.child # Turn on logging for what the child sends back. if self.TraceOn(): child.logfile_read = sys.stdout # Disable showing of source lines at our breakpoint. # This is necessary for the logging tests, because the very # text we want to match for output from the running inferior # will show up in the source as well. We don't want the source # output to erroneously make a match with our expected output. self.runCmd("settings set stop-line-count-before 0") self.expect_prompt() self.runCmd("settings set stop-line-count-after 0") self.expect_prompt() # Run any test-specific settings commands now. if settings_commands is not None: for setting_command in settings_commands: self.runCmd(setting_command) self.expect_prompt() # Set the breakpoint, and run to it. child.sendline('breakpoint set -f %s -l %d' % (source_file, line)) child.expect_exact(prompt) child.sendline('run') child.expect_exact(prompt) # Ensure we stopped at a breakpoint. self.runCmd("thread list") self.expect(re.compile(r"stop reason = .*breakpoint"))
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def expect_prompt(self, exactly=True): self.expect(self.child_prompt, exactly=exactly)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def do_test(self, expect_regexes=None, settings_commands=None): """ Run a test. """ self.build(dictionary=self.d) self.setTearDownCleanup(dictionary=self.d) exe = self.getBuildArtifact(self.exe_name) self.run_lldb_to_breakpoint(exe, self.source, self.line, settings_commands=settings_commands) self.expect_prompt() # Now go. self.runCmd("process continue") self.expect(expect_regexes)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def setUp(self): self._output_dir = tempfile.mkdtemp() self._output_manager = local_output_manager.LocalOutputManager( self._output_dir)
ric2b/Vivaldi-browser
[ 131, 27, 131, 3, 1490828945 ]
def tearDown(self): shutil.rmtree(self._output_dir)
ric2b/Vivaldi-browser
[ 131, 27, 131, 3, 1490828945 ]
def __new__(cls, value): val = str(value) if val not in cls._instances: cls._instances[val] = super(VerNum, cls).__new__(cls) ret = cls._instances[val] return ret
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def __add__(self, value): ret = int(self) + int(value) return VerNum(ret)
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def __eq__(self, value): return int(self) == int(value)
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def __lt__(self, value): return int(self) < int(value)
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def __ge__(self, value): return int(self) >= int(value)
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def __repr__(self): return "<VerNum(%s)>" % self.value
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def __int__(self): return int(self.value)
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def __hash__(self): return hash(self.value)
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def __init__(self, path): """Collect current version scripts in repository and store them in self.versions """ super(Collection, self).__init__(path) # Create temporary list of files, allowing skipped version numbers. files = os.listdir(path) if '1' in files: # deprecation raise Exception('It looks like you have a repository in the old ' 'format (with directories for each version). ' 'Please convert repository before proceeding.') tempVersions = dict() for filename in files: match = self.FILENAME_WITH_VERSION.match(filename) if match: num = int(match.group(1)) tempVersions.setdefault(num, []).append(filename) else: pass # Must be a helper file or something, let's ignore it. # Create the versions member where the keys # are VerNum's and the values are Version's. self.versions = dict() for num, files in tempVersions.items(): self.versions[VerNum(num)] = Version(num, path, files)
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def latest(self): """:returns: Latest version in Collection""" return max([VerNum(0)] + list(self.versions.keys()))
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def create_new_python_version(self, description, **k): """Create Python files for new version""" ver = self._next_ver_num(k.pop('use_timestamp_numbering', False)) extra = str_to_filename(description) if extra: if extra == '_': extra = '' elif not extra.startswith('_'): extra = '_%s' % extra filename = '%03d%s.py' % (ver, extra) filepath = self._version_path(filename) script.PythonScript.create(filepath, **k) self.versions[ver] = Version(ver, self.path, [filename])
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def version(self, vernum=None): """Returns required version. If vernum is not given latest version will be returned otherwise required version will be returned. :raises: : exceptions.VersionNotFoundError if respective migration script file of version is not present in the migration repository. """ if vernum is None: vernum = self.latest try: return self.versions[VerNum(vernum)] except KeyError: raise exceptions.VersionNotFoundError( ("Database schema file with version %(args)s doesn't " "exist.") % {'args': VerNum(vernum)})
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def clear(cls): super(Collection, cls).clear()
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def __init__(self, vernum, path, filelist): self.version = VerNum(vernum) # Collect scripts in this folder self.sql = dict() self.python = None for script in filelist: self.add_script(os.path.join(path, script))
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def add_script(self, path): """Add script to Collection/Version""" if path.endswith(Extensions.py): self._add_script_py(path) elif path.endswith(Extensions.sql): self._add_script_sql(path)
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def _add_script_sql(self, path): basename = os.path.basename(path) match = self.SQL_FILENAME.match(basename) if match: basename = basename.replace('.sql', '') parts = basename.split('_') if len(parts) < 3: raise exceptions.ScriptError( "Invalid SQL script name %s " % basename + \ "(needs to be ###_description_database_operation.sql)") version = parts[0] op = parts[-1] # NOTE(mriedem): check for ibm_db_sa as the database in the name if 'ibm_db_sa' in basename: if len(parts) == 6: dbms = '_'.join(parts[-4: -1]) else: raise exceptions.ScriptError( "Invalid ibm_db_sa SQL script name '%s'; " "(needs to be " "###_description_ibm_db_sa_operation.sql)" % basename) else: dbms = parts[-2] else: raise exceptions.ScriptError( "Invalid SQL script name %s " % basename + \ "(needs to be ###_description_database_operation.sql)") # File the script into a dictionary self.sql.setdefault(dbms, {})[op] = script.SqlScript(path)
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def setUp(self): super(BuildDumbTestCase, self).setUp() self.old_location = os.getcwd() self.old_sys_argv = sys.argv, sys.argv[:]
FFMG/myoddweb.piger
[ 16, 2, 16, 2, 1456065110 ]
def test_simple_built(self): # let's create a simple package tmp_dir = self.mkdtemp() pkg_dir = os.path.join(tmp_dir, 'foo') os.mkdir(pkg_dir) self.write_file((pkg_dir, 'setup.py'), SETUP_PY) self.write_file((pkg_dir, 'foo.py'), '#') self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py') self.write_file((pkg_dir, 'README'), '') dist = Distribution({'name': 'foo', 'version': '0.1', 'py_modules': ['foo'], 'url': 'xxx', 'author': 'xxx', 'author_email': 'xxx'}) dist.script_name = 'setup.py' os.chdir(pkg_dir) sys.argv = ['setup.py'] cmd = bdist_dumb(dist) # so the output is the same no matter # what is the platform cmd.format = 'zip' cmd.ensure_finalized() cmd.run() # see what we have dist_created = os.listdir(os.path.join(pkg_dir, 'dist')) base = "%s.%s.zip" % (dist.get_fullname(), cmd.plat_name) self.assertEqual(dist_created, [base]) # now let's check what we have in the zip file fp = zipfile.ZipFile(os.path.join('dist', base)) try: contents = fp.namelist() finally: fp.close() contents = sorted(filter(None, map(os.path.basename, contents))) wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py'] if not sys.dont_write_bytecode: wanted.append('foo.%s.pyc' % sys.implementation.cache_tag) self.assertEqual(contents, sorted(wanted))
FFMG/myoddweb.piger
[ 16, 2, 16, 2, 1456065110 ]
def upgrade(): query = '''UPDATE "user" SET ckan_api=null WHERE id IN (SELECT id FROM (SELECT id, row_number() over (partition BY ckan_api ORDER BY id) AS rnum FROM "user") t WHERE t.rnum > 1); ''' op.execute(query) op.create_unique_constraint('ckan_api_uq', 'user', ['ckan_api'])
PyBossa/pybossa
[ 716, 269, 716, 21, 1321773782 ]
def set_test_params(self): self.num_nodes = 0 self.supports_cli = True
NeblioTeam/neblio
[ 119, 45, 119, 29, 1501023994 ]
def run_test(self): pass
NeblioTeam/neblio
[ 119, 45, 119, 29, 1501023994 ]
def add_gallery_post(generator): contentpath = generator.settings.get('PATH') gallerycontentpath = os.path.join(contentpath,'images/gallery') for article in generator.articles: if 'gallery' in article.metadata.keys(): album = article.metadata.get('gallery') galleryimages = [] articlegallerypath=os.path.join(gallerycontentpath, album) if(os.path.isdir(articlegallerypath)): for i in os.listdir(articlegallerypath): if not i.startswith('.') and os.path.isfile(os.path.join(os.path.join(gallerycontentpath, album), i)): galleryimages.append(i) article.album = album article.galleryimages = sorted(galleryimages)
ioos/system-test
[ 7, 14, 7, 59, 1387901856 ]
def generate_gallery_page(generator): contentpath = generator.settings.get('PATH') gallerycontentpath = os.path.join(contentpath,'images/gallery') for page in generator.pages: if page.metadata.get('template') == 'gallery': gallery = dict() for a in os.listdir(gallerycontentpath): if not a.startswith('.') and os.path.isdir(os.path.join(gallerycontentpath, a)): for i in os.listdir(os.path.join(gallerycontentpath, a)): if not a.startswith('.') and os.path.isfile(os.path.join(os.path.join(gallerycontentpath, a), i)): gallery.setdefault(a, []).append(i) gallery[a].sort() page.gallery=gallery
ioos/system-test
[ 7, 14, 7, 59, 1387901856 ]
def input_fn_train: # returns x, y (where y represents label's class index). pass
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def _get_feature_dict(features): if isinstance(features, dict): return features return {"": features}
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def train_op_fn(loss): return optimizers.optimize_loss( loss, global_step=None, learning_rate=0.3, optimizer="Adagrad")
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def input_fn_train: # returns x, y (where y represents label's class index). pass
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def __init__(self, model_dir=None, n_classes=2, weight_column_name=None, config=None, feature_engineering_fn=None, label_keys=None): """Initializes a DebugClassifier instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. n_classes: number of label classes. Default is binary classification. It must be greater than 1. Note: Class labels are integers representing the class index (i.e. values from 0 to n_classes-1). For arbitrary label values (e.g. string labels), convert to class indices first. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. config: `RunConfig` object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. label_keys: Optional list of strings with size `[n_classes]` defining the label vocabulary. Only supported for `n_classes` > 2. Returns: A `DebugClassifier` estimator. Raises: ValueError: If `n_classes` < 2. """ params = {"head": head_lib.multi_class_head( n_classes=n_classes, weight_column_name=weight_column_name, enable_centered_bias=True, label_keys=label_keys)} super(DebugClassifier, self).__init__( model_fn=debug_model_fn, model_dir=model_dir, config=config, params=params, feature_engineering_fn=feature_engineering_fn)
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def predict_proba(self, input_fn=None, batch_size=None): """Returns prediction probabilities for given features. Args: input_fn: Input function. batch_size: Override default batch size. Returns: An iterable of predicted probabilities with shape [batch_size, n_classes]. """ key = prediction_key.PredictionKey.PROBABILITIES preds = self.predict( input_fn=input_fn, batch_size=batch_size, outputs=[key]) return (pred[key] for pred in preds)
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def input_fn_train: # returns x, y (where y represents label's class index). pass
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def __init__(self, model_dir=None, label_dimension=1, weight_column_name=None, config=None, feature_engineering_fn=None): """Initializes a DebugRegressor instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. label_dimension: Number of regression targets per example. This is the size of the last dimension of the labels and logits `Tensor` objects (typically, these have shape `[batch_size, label_dimension]`). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. config: `RunConfig` object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. Returns: A `DebugRegressor` estimator. """ params = { "head": head_lib.regression_head( weight_column_name=weight_column_name, label_dimension=label_dimension, enable_centered_bias=True) } super(DebugRegressor, self).__init__( model_fn=debug_model_fn, model_dir=model_dir, config=config, params=params, feature_engineering_fn=feature_engineering_fn)
npuichigo/ttsflow
[ 16, 6, 16, 1, 1500635633 ]
def __init__(self, dataset_url): self.dataset_url = dataset_url self.SELECT = Counter() self.WHERE = None # list() self.GROUPBY = list() self.OFFSET = None self.LIMIT = None self.ORDERBY = list()
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def removeSELECT(self, obj): logging.debug("Removing SELECT {}".format(obj)) if obj not in self.SELECT: return self.SELECT[obj] -= 1 if self.SELECT[obj] == 0: del self.SELECT[obj] logging.debug(self.SELECT)
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def addWHERE(self, where, boolean=None): if where is None: return if self.WHERE is None: self.WHERE = where else: # if boolean is None: # raise RuntimeError("Must provide boolean instruction to WHERE") if boolean != "OR" and boolean != "AND": raise RuntimeError("Boolean instruction must OR or AND") self.WHERE = "({} {} {})".format(self.WHERE, boolean, where) # self.WHERE.append(where)
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def addGROUPBY(self, value): self.GROUPBY.append(str(value))
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def setOFFSET(self, value): # Basically the start of slicing. This can normally be a negative # number in python. For now (and probably forever), not supported. # i.e. my_list[-10:] is not supported if type(value) != int: raise RuntimeError("Can only slice with integer") if value < 0: raise RuntimeError("Slicing with negative index is not allowed") if self.OFFSET is None: self.OFFSET = value if self.OFFSET < value: self.OFFSET = value
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def addORDERBY(self, value): self.ORDERBY.append(value)
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def mergeQuery(self, query, how=None): self.mergeSELECT(query) self.mergeWHERE(query, how) self.mergeGROUPBY(query) self.mergeORDERBY(query) if self.OFFSET is not None and query.OFFSET is not None: raise RuntimeError("Multiple slicing asked") if self.OFFSET is None: self.OFFSET = query.OFFSET if self.LIMIT is not None and query.LIMIT is not None: raise RuntimeError("Multiple slicing asked") if self.LIMIT is None: self.LIMIT = query.LIMIT
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def executeQuery(self, format): query = self.buildQuery() query["format"] = format logging.debug("REST params\n{}".format(json.dumps(query))) select_url = self.dataset_url + "/query" try: # logging.info(select_url) response = requests.get(select_url, params=query) logging.info("URL poked {}".format(response.url)) except requests.HTTPError as e: logging.error("Code: {}\nReason: {}".format( e.status_code, e.reason)) logging.error("Content: {}".format(response.content)) logging.error(traceback.format_exc()) if response.status_code != 200: logging.error("Code: {}\nReason: {}".format( response.status_code, response.reason)) logging.error("Content: {}".format(response.content)) logging.error(traceback.format_exc()) try: return response.json() except: return {}
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def __and__(self, value): if isinstance(value, Query): query = self.copy() # self.addWHERE('AND') query.mergeQuery(value, "AND") return query
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def __ror__(self, value): raise NotImplementedError()
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def __repr__(self): return json.dumps(self.buildQuery(), indent=4)
datacratic/pymldb
[ 8, 12, 8, 4, 1426693694 ]
def __ls(broadcast_vars, iterator): """ Get the list of files in the worker-local directory """ return [__get_hostname(), os.listdir(SparkFiles.getRootDirectory())]
moutai/sparkonda
[ 11, 2, 11, 2, 1448656530 ]
def __unpack_conda_env(broadcast_vars, iterator): """ Command to install the conda env in the local worker """ # transform '/path/to/conda' to 'path' worker_conda_folder = __parse_worker_local_path(broadcast_vars['CONDA_ENV_LOCATION']) # delete existing unpacked env root 'path' folder cmd = 'rm -rf %s' % worker_conda_folder subprocess.check_output(cmd.split(' ')) # unpack the env with tarfile.open(broadcast_vars['CONDA_ENV_NAME'] + '.tar') as tar_handle: tar_handle.errorlevel = UNPACKING_ERROR_LEVEL tar_handle.extractall() # remove the env tar file cmd = 'rm -rf %s.tar' % broadcast_vars['CONDA_ENV_NAME'] subprocess.check_output(cmd.split(' ')) return [__get_hostname(), 'done']
moutai/sparkonda
[ 11, 2, 11, 2, 1448656530 ]
def __get_hostname(): """ Get the hostname of the worker """ import socket return socket.gethostname()
moutai/sparkonda
[ 11, 2, 11, 2, 1448656530 ]
def __get_local_module_file_location(): """Discover the location of the sparkonda module """ module_file = os.path.abspath(__file__).replace('pyc', 'py') return module_file
moutai/sparkonda
[ 11, 2, 11, 2, 1448656530 ]
def __tar_env(): """Untar the conda env tar file """ with tarfile.open('/tmp/' + CONDA_ENV_NAME + '.tar', 'w') as tar_handle: for root, dirs, files in os.walk(CONDA_ENV_LOCATION): for cur_file in files: tar_handle.add(os.path.join(root, cur_file))
moutai/sparkonda
[ 11, 2, 11, 2, 1448656530 ]
def pack_conda_env(overwrite=True): """ Pack the local conda env located at CONDA_ENV_LOCATION """ if overwrite: print('Overwriting tar file if exists at:' + '/tmp/' + CONDA_ENV_NAME + '.tar') __tar_env() else: if not os.path.exists('/tmp/%s.tar' % CONDA_ENV_NAME): print('Tar file does not exist, creating new tar file at:' + '/tmp/' + CONDA_ENV_NAME + '.tar') __tar_env()
moutai/sparkonda
[ 11, 2, 11, 2, 1448656530 ]
def list_cwd_files(sc, debug=False): """ List the files in the temporary directory of each executor given a sparkcontext """ return prun(sc, __ls, debug=debug)
moutai/sparkonda
[ 11, 2, 11, 2, 1448656530 ]
def remove_conda_env(sc): """ Remove the conda env from each executor given a sparkcontext """ return prun(sc, __rm_conda_env)
moutai/sparkonda
[ 11, 2, 11, 2, 1448656530 ]
def file(self): openfile = lambda: file(self.path, "rb+") if PocketChunksFile.holdFileOpen: if self._file is None: self._file = openfile() return notclosing(self._file) else: return openfile()
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def __init__(self, path): self.path = path self._file = None if not os.path.exists(path): file(path, "w").close() with self.file as f: filesize = os.path.getsize(path) if filesize & 0xfff: filesize = (filesize | 0xfff) + 1 f.truncate(filesize) if filesize == 0: filesize = self.SECTOR_BYTES f.truncate(filesize) f.seek(0) offsetsData = f.read(self.SECTOR_BYTES) self.freeSectors = [True] * (filesize / self.SECTOR_BYTES) self.freeSectors[0] = False self.offsets = fromstring(offsetsData, dtype='<u4') needsRepair = False for index, offset in enumerate(self.offsets): sector = offset >> 8 count = offset & 0xff for i in xrange(sector, sector + count): if i >= len(self.freeSectors): # raise RegionMalformed("Region file offset table points to sector {0} (past the end of the file)".format(i)) print "Region file offset table points to sector {0} (past the end of the file)".format(i) needsRepair = True break if self.freeSectors[i] is False: logger.debug("Double-allocated sector number %s (offset %s @ %s)", i, offset, index) needsRepair = True self.freeSectors[i] = False if needsRepair: self.repair() logger.info("Found region file {file} with {used}/{total} sectors used and {chunks} chunks present".format( file=os.path.basename(path), used=self.usedSectors, total=self.sectorCount, chunks=self.chunkCount))
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def usedSectors(self): return len(self.freeSectors) - sum(self.freeSectors)
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def sectorCount(self): return len(self.freeSectors)
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def chunkCount(self): return sum(self.offsets > 0)
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def _readChunk(self, cx, cz): cx &= 0x1f cz &= 0x1f offset = self.getOffset(cx, cz) if offset == 0: return None sectorStart = offset >> 8 numSectors = offset & 0xff if numSectors == 0: return None if sectorStart + numSectors > len(self.freeSectors): return None with self.file as f: f.seek(sectorStart * self.SECTOR_BYTES) data = f.read(numSectors * self.SECTOR_BYTES) assert (len(data) > 0) logger.debug("REGION LOAD %s,%s sector %s", cx, cz, sectorStart) return data
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def saveChunk(self, chunk): cx, cz = chunk.chunkPosition cx &= 0x1f cz &= 0x1f offset = self.getOffset(cx, cz) sectorNumber = offset >> 8 sectorsAllocated = offset & 0xff data = chunk._savedData() sectorsNeeded = (len(data) + self.CHUNK_HEADER_SIZE) / self.SECTOR_BYTES + 1 if sectorsNeeded >= 256: return if sectorNumber != 0 and sectorsAllocated >= sectorsNeeded: logger.debug("REGION SAVE {0},{1} rewriting {2}b".format(cx, cz, len(data))) self.writeSector(sectorNumber, data, format) else: # we need to allocate new sectors # mark the sectors previously used for this chunk as free for i in xrange(sectorNumber, sectorNumber + sectorsAllocated): self.freeSectors[i] = True runLength = 0 try: runStart = self.freeSectors.index(True) for i in range(runStart, len(self.freeSectors)): if runLength: if self.freeSectors[i]: runLength += 1 else: runLength = 0 elif self.freeSectors[i]: runStart = i runLength = 1 if runLength >= sectorsNeeded: break except ValueError: pass # we found a free space large enough if runLength >= sectorsNeeded: logger.debug("REGION SAVE {0},{1}, reusing {2}b".format(cx, cz, len(data))) sectorNumber = runStart self.setOffset(cx, cz, sectorNumber << 8 | sectorsNeeded) self.writeSector(sectorNumber, data, format) self.freeSectors[sectorNumber:sectorNumber + sectorsNeeded] = [False] * sectorsNeeded else: # no free space large enough found -- we need to grow the # file logger.debug("REGION SAVE {0},{1}, growing by {2}b".format(cx, cz, len(data))) with self.file as f: f.seek(0, 2) filesize = f.tell() sectorNumber = len(self.freeSectors) assert sectorNumber * self.SECTOR_BYTES == filesize filesize += sectorsNeeded * self.SECTOR_BYTES f.truncate(filesize) self.freeSectors += [False] * sectorsNeeded self.setOffset(cx, cz, sectorNumber << 8 | sectorsNeeded) self.writeSector(sectorNumber, data, format)
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def containsChunk(self, cx, cz): return self.getOffset(cx, cz) != 0
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def setOffset(self, cx, cz, offset): cx &= 0x1f cz &= 0x1f self.offsets[cx + cz * 32] = offset with self.file as f: f.seek(0) f.write(self.offsets.tostring())
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def allChunks(self): return list(self.chunkFile.chunkCoords())
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def getChunk(self, cx, cz): for p in cx, cz: if not 0 <= p <= 31: raise ChunkNotPresent((cx, cz, self)) c = self._loadedChunks.get((cx, cz)) if c is None: c = self.chunkFile.loadChunk(cx, cz, self) self._loadedChunks[cx, cz] = c return c
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def _isLevel(cls, filename): clp = ("chunks.dat", "level.dat") if not os.path.isdir(filename): f = os.path.basename(filename) if f not in clp: return False filename = os.path.dirname(filename) return all([os.path.exists(os.path.join(filename, fl)) for fl in clp])
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def containsChunk(self, cx, cz): if cx > 31 or cz > 31 or cx < 0 or cz < 0: return False return self.chunkFile.getOffset(cx, cz) != 0
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def chunksNeedingLighting(self): for chunk in self._loadedChunks.itervalues(): if chunk.needsLighting: yield chunk.chunkPosition
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def __init__(self, cx, cz, data, world): self.chunkPosition = (cx, cz) self.world = world data = fromstring(data, dtype='uint8') self.Blocks, data = data[:32768], data[32768:] self.Data, data = data[:16384], data[16384:] self.SkyLight, data = data[:16384], data[16384:] self.BlockLight, data = data[:16384], data[16384:] self.DirtyColumns = data[:256] self.unpackChunkData() self.shapeChunkData()
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def shapeChunkData(self): chunkSize = 16 self.Blocks.shape = (chunkSize, chunkSize, self.world.Height) self.SkyLight.shape = (chunkSize, chunkSize, self.world.Height) self.BlockLight.shape = (chunkSize, chunkSize, self.world.Height) self.Data.shape = (chunkSize, chunkSize, self.world.Height) self.DirtyColumns.shape = chunkSize, chunkSize
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def packData(dataArray): assert dataArray.shape[2] == self.world.Height data = array(dataArray).reshape(16, 16, self.world.Height / 2, 2) data[..., 1] <<= 4 data[..., 1] |= data[..., 0] return array(data[:, :, :, 1])
Khroki/MCEdit-Unified
[ 467, 109, 467, 116, 1410734091 ]
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories, only_in_packages=True, show_ignored=False): """ Return a dictionary suitable for use in ``package_data`` in a distutils ``setup.py`` file. The dictionary looks like:: {'package': [files]} Where ``files`` is a list of all the files in that package that don't match anything in ``exclude``. If ``only_in_packages`` is true, then top-level directories that are not packages won't be included (but directories under packages will). Directories matching any pattern in ``exclude_directories`` will be ignored; by default directories with leading ``.``, ``CVS``, and ``_darcs`` will be ignored. If ``show_ignored`` is true, then all the files that aren't included in package data are shown on stderr (for debugging purposes). Note patterns use wildcards, or can be exact paths (including leading ``./``), and all searching is case-insensitive. """ out = {} stack = [(convert_path(where), '', package, only_in_packages)] while stack: where, prefix, package, only_in_packages = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn): bad_name = False for pattern in exclude_directories: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True if show_ignored: print >> sys.stderr, ( 'Directory %s ignored by pattern %s' % (fn, pattern)) break if bad_name: continue if (os.path.isfile(os.path.join(fn, '__init__.py')) and not prefix): if not package: new_package = name else: new_package = package + '.' + name stack.append((fn, '', new_package, False)) else: stack.append( (fn, prefix + name + '/', package, only_in_packages)) elif package or not only_in_packages: # is a file bad_name = False for pattern in exclude: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True if show_ignored: print >> sys.stderr, ( 'File %s ignored by pattern %s' % (fn, pattern)) break if bad_name: continue out.setdefault(package, []).append(prefix + name) return out
tbarbugli/django_email_multibackend
[ 16, 10, 16, 1, 1366216440 ]
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__) parser.add_argument( "--show-help", "-l", action="store_true", help="Show any help texts as well") parser.add_argument( "kconfig", metavar="KCONFIG", nargs="?", default="Kconfig", help="Top-level Kconfig file (default: Kconfig)") args = parser.parse_args() kconf = Kconfig(args.kconfig, suppress_traceback=True) # Make it possible to filter this message out print(kconf.load_config(), file=sys.stderr) for sym in kconf.unique_defined_syms: # Only show symbols that can be toggled. Choice symbols are a special # case in that sym.assignable will be (2,) (length 1) for visible # symbols in choices in y mode, but they can still be toggled by # selecting some other symbol. if sym.user_value is None and \ (len(sym.assignable) > 1 or (sym.visibility and (sym.orig_type in (INT, HEX, STRING) or sym.choice))): # Don't reuse the 'config_string' format for bool/tristate symbols, # to show n-valued symbols as 'CONFIG_FOO=n' instead of # '# CONFIG_FOO is not set'. This matches the C tools. if sym.orig_type in (BOOL, TRISTATE): s = "{}{}={}\n".format(kconf.config_prefix, sym.name, TRI_TO_STR[sym.tri_value]) else: s = sym.config_string print(s, end="") if args.show_help: for node in sym.nodes: if node.help is not None: # Indent by two spaces. textwrap.indent() is not # available in Python 2 (it's 3.3+). print("\n".join(" " + line for line in node.help.split("\n"))) break
ulfalizer/Kconfiglib
[ 362, 141, 362, 29, 1341946757 ]
def __init__(self, session, request, cache): self.session = session self.request = request self.cache = cache # required for GraphUtils access to access points Logic.factory('vanilla')
theonlydude/RandomMetroidSolver
[ 33, 22, 33, 11, 1508516621 ]
def __init__(self, title=None, **kwargs): if title is not None: self.title = title for key in kwargs: if hasattr(self.__class__, key): setattr(self, key, kwargs[key]) self.children = self.children or [] self.css_classes = self.css_classes or [] # boolean flag to ensure that the module is initialized only once self._initialized = False
liberation/django-admin-tools
[ 2, 2, 2, 3, 1371719510 ]
def init_with_context(self, context): request = context['request'] # we use sessions to store the visited pages stack history = request.session.get('history', []) for item in history: self.children.append(item) # add the current page to the history history.insert(0, { 'title': context['title'], 'url': request.META['PATH_INFO'] }) if len(history) > 10: history = history[:10] request.session['history'] = history
liberation/django-admin-tools
[ 2, 2, 2, 3, 1371719510 ]
def is_empty(self): """ Return True if the module has no content and False otherwise. >>> mod = DashboardModule() >>> mod.is_empty() True >>> mod.pre_content = 'foo' >>> mod.is_empty() False >>> mod.pre_content = None >>> mod.is_empty() True >>> mod.children.append('foo') >>> mod.is_empty() False >>> mod.children = [] >>> mod.is_empty() True """ return self.pre_content is None and \ self.post_content is None and \ len(self.children) == 0
liberation/django-admin-tools
[ 2, 2, 2, 3, 1371719510 ]
def _prepare_children(self): pass
liberation/django-admin-tools
[ 2, 2, 2, 3, 1371719510 ]
def __init__(self, **kwargs): Dashboard.__init__(self, **kwargs) self.children.append(modules.Group( title="My group", display="tabs", children=[ modules.AppList( title='Administration', models=('django.contrib.*',) ), modules.AppList( title='Applications', exclude=('django.contrib.*',) ) ] ))
liberation/django-admin-tools
[ 2, 2, 2, 3, 1371719510 ]
def init_with_context(self, context): if self._initialized: return for module in self.children: # to simplify the whole stuff, modules have some limitations, # they cannot be dragged, collapsed or closed module.collapsible = False module.draggable = False module.deletable = False if self.force_show_title: module.show_title = (self.display == 'stacked') module.init_with_context(context) self._initialized = True
liberation/django-admin-tools
[ 2, 2, 2, 3, 1371719510 ]
def _prepare_children(self): # computes ids for children: generates them if they are not set # and then prepends them with this group's id seen = set() for id, module in enumerate(self.children): proposed_id = "%s_%s" % (self.id, module.id or id+1) module.id = uniquify(proposed_id, seen) module._prepare_children()
liberation/django-admin-tools
[ 2, 2, 2, 3, 1371719510 ]
def __init__(self, **kwargs): Dashboard.__init__(self, **kwargs) self.children.append(modules.LinkList( layout='inline', children=( { 'title': 'Python website', 'url': 'http://www.python.org', 'external': True, 'description': 'Python programming language rocks !', }, ['Django website', 'http://www.djangoproject.com', True], ['Some internal link', '/some/internal/link/'], ) ))
liberation/django-admin-tools
[ 2, 2, 2, 3, 1371719510 ]
def init_with_context(self, context): if self._initialized: return new_children = [] for link in self.children: if isinstance(link, (tuple, list,)): link_dict = {'title': link[0], 'url': link[1]} if len(link) >= 3: link_dict['external'] = link[2] if len(link) >= 4: link_dict['description'] = link[3] new_children.append(link_dict) else: new_children.append(link) self.children = new_children self._initialized = True
liberation/django-admin-tools
[ 2, 2, 2, 3, 1371719510 ]
def __init__(self, **kwargs): Dashboard.__init__(self, **kwargs) # will only list the django.contrib apps self.children.append(modules.AppList( title='Administration', models=('django.contrib.*',) )) # will list all apps except the django.contrib ones self.children.append(modules.AppList( title='Applications', exclude=('django.contrib.*',) ))
liberation/django-admin-tools
[ 2, 2, 2, 3, 1371719510 ]