function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def _pyro_noncentered_model(J, sigma, y=None): import pyro import pyro.distributions as dist
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def pyro_noncentered_schools(data, draws, chains): """Non-centered eight schools implementation in Pyro.""" import torch from pyro.infer import MCMC, NUTS
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def _numpyro_noncentered_model(J, sigma, y=None): import numpyro import numpyro.distributions as dist
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def numpyro_schools_model(data, draws, chains): """Centered eight schools implementation in NumPyro.""" from jax.random import PRNGKey from numpyro.infer import MCMC, NUTS
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def pystan_noncentered_schools(data, draws, chains): """Non-centered eight schools implementation for pystan.""" schools_code = """ data { int<lower=0> J; real y[J]; real<lower=0> sigma[J]; }
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def pymc3_noncentered_schools(data, draws, chains): """Non-centered eight schools implementation for pymc3.""" import pymc3 as pm
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def library_handle(library): """Import a library and return the handle.""" if library == "pystan": try: module = importlib.import_module("pystan") except ImportError: module = importlib.import_module("stan") else: module = importlib.import_module(library) return module
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def load_cached_models(eight_schools_data, draws, chains, libs=None): """Load pymc3, pystan, emcee, and pyro models from pickle.""" here = os.path.dirname(os.path.abspath(__file__)) supported = ( ("pystan", pystan_noncentered_schools), ("pymc3", pymc3_noncentered_schools), ("emcee", emcee_schools_model), ("pyro", pyro_noncentered_schools), ("numpyro", numpyro_schools_model), ) data_directory = os.path.join(here, "saved_models") models = {}
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def pystan_version(): """Check PyStan version.
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def test_precompile_models(eight_schools_params, draws, chains): """Precompile model files.""" load_cached_models(eight_schools_params, draws, chains)
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def running_on_ci() -> bool: """Return True if running on CI machine.""" return os.environ.get("ARVIZ_CI_MACHINE") is not None
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def importorskip( modname: str, minversion: Optional[str] = None, reason: Optional[str] = None
arviz-devs/arviz
[ 1351, 324, 1351, 165, 1438170670 ]
def minPathSum(self, grid: List[List[int]]) -> int: row = len(grid) col = len(grid[0]) dp = [[0]*col for i in range(row)] minPath = 0 return self.findPath(grid, row-1, col-1, dp)
saisankargochhayat/algo_quest
[ 2, 1, 2, 1, 1473454289 ]
def process_nbest(fread, fwrite): nEmptySentNum = 0 with open(fread, 'rt') as f1, open(fwrite, 'wt') as f2: for a in [line.split() for line in f1]: if len(a) == 1: nEmptySentNum += 1 a.append('<UNK>') f2.write(' '.join(a) + '\n') print('[nbest] empty sentence num = {}'.format(nEmptySentNum))
wbengine/SPMILM
[ 18, 10, 18, 1, 1470972882 ]
def __init__(self, x_dim: float, y_dim: float, tunneling: float, coulomb: float, periodic: bool=True, iterations: int=1, adiabatic_evolution_time: Optional[float]=None, qubits: Optional[Sequence[cirq.Qid]]=None ) -> None: """ Args: iterations: The number of iterations of the basic template to include in the circuit. The number of parameters grows linearly with this value. adiabatic_evolution_time: The time scale for Hamiltonian evolution used to determine the default initial parameters of the ansatz. This is the value A from the docstring of this class. If not specified, defaults to the sum of the absolute values of the entries of the two-body tensor of the Hamiltonian. qubits: Qubits to be used by the ansatz circuit. If not specified, then qubits will automatically be generated by the `_generate_qubits` method. """ self.x_dim = x_dim self.y_dim = y_dim self.tunneling = tunneling self.coulomb = coulomb self.periodic = periodic self.iterations = iterations if adiabatic_evolution_time is None: adiabatic_evolution_time = 0.1*abs(coulomb)*iterations self.adiabatic_evolution_time = cast(float, adiabatic_evolution_time) super().__init__(qubits)
quantumlib/OpenFermion-Cirq
[ 265, 89, 265, 31, 1521507533 ]
def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]: """Bounds on the parameters.""" bounds = [] for param in self.params(): s = 1.0 if param.letter == 'V' else 2.0 bounds.append((-s, s)) return bounds
quantumlib/OpenFermion-Cirq
[ 265, 89, 265, 31, 1521507533 ]
def operations(self, qubits: Sequence[cirq.Qid]) -> cirq.OP_TREE: """Produce the operations of the ansatz circuit.""" for i in range(self.iterations): # Apply one- and two-body interactions with a swap network that # reverses the order of the modes def one_and_two_body_interaction(p, q, a, b) -> cirq.OP_TREE: th_symbol = LetterWithSubscripts('Th', i) tv_symbol = LetterWithSubscripts('Tv', i) v_symbol = LetterWithSubscripts('V', i) if _is_horizontal_edge( p, q, self.x_dim, self.y_dim, self.periodic): yield cirq.ISwapPowGate(exponent=-th_symbol).on(a, b) if _is_vertical_edge( p, q, self.x_dim, self.y_dim, self.periodic): yield cirq.ISwapPowGate(exponent=-tv_symbol).on(a, b) if _are_same_site_opposite_spin(p, q, self.x_dim*self.y_dim): yield cirq.CZPowGate(exponent=v_symbol).on(a, b) yield swap_network( qubits, one_and_two_body_interaction, fermionic=True) qubits = qubits[::-1] # Apply one- and two-body interactions again. This time, reorder # them so that the entire iteration is symmetric def one_and_two_body_interaction_reversed_order(p, q, a, b ) -> cirq.OP_TREE: th_symbol = LetterWithSubscripts('Th', i) tv_symbol = LetterWithSubscripts('Tv', i) v_symbol = LetterWithSubscripts('V', i) if _are_same_site_opposite_spin(p, q, self.x_dim*self.y_dim): yield cirq.CZPowGate(exponent=v_symbol).on(a, b) if _is_vertical_edge( p, q, self.x_dim, self.y_dim, self.periodic): yield cirq.ISwapPowGate(exponent=-tv_symbol).on(a, b) if _is_horizontal_edge( p, q, self.x_dim, self.y_dim, self.periodic): yield cirq.ISwapPowGate(exponent=-th_symbol).on(a, b) yield swap_network( qubits, one_and_two_body_interaction_reversed_order, fermionic=True, offset=True) qubits = qubits[::-1]
quantumlib/OpenFermion-Cirq
[ 265, 89, 265, 31, 1521507533 ]
def _is_horizontal_edge(p, q, x_dim, y_dim, periodic): n_sites = x_dim*y_dim if p < n_sites and q >= n_sites or q < n_sites and p >= n_sites: return False if p >= n_sites and q >= n_sites: p -= n_sites q -= n_sites return (q == _right_neighbor(p, x_dim, y_dim, periodic) or p == _right_neighbor(q, x_dim, y_dim, periodic))
quantumlib/OpenFermion-Cirq
[ 265, 89, 265, 31, 1521507533 ]
def _are_same_site_opposite_spin(p, q, n_sites): return abs(p-q) == n_sites
quantumlib/OpenFermion-Cirq
[ 265, 89, 265, 31, 1521507533 ]
def _bottom_neighbor(site, x_dimension, y_dimension, periodic): if y_dimension == 1: return None if site + x_dimension + 1 > x_dimension*y_dimension: if periodic: return site + x_dimension - x_dimension*y_dimension else: return None return site + x_dimension
quantumlib/OpenFermion-Cirq
[ 265, 89, 265, 31, 1521507533 ]
def setUp(self): super().setUp() self._connection_config = metadata_store_pb2.ConnectionConfig() self._connection_config.sqlite.SetInParent() self._metadata = self.enter_context( metadata.Metadata(connection_config=self._connection_config)) self._store = self._metadata.store
tensorflow/tfx
[ 1905, 649, 1905, 157, 1549300476 ]
def testStrategy(self): # Model with id 1, will be blessed. model_one = standard_artifacts.Model() model_one.uri = 'model_one' model_one.id = 1 # Model with id 2, will be blessed. model_two = standard_artifacts.Model() model_two.uri = 'model_two' model_two.id = 2 # Model with id 3, will not be blessed. model_three = standard_artifacts.Model() model_three.uri = 'model_three' model_three.id = 3 model_blessing_one = standard_artifacts.ModelBlessing() self._set_model_blessing_bit(model_blessing_one, model_one.id, 1) model_blessing_two = standard_artifacts.ModelBlessing() self._set_model_blessing_bit(model_blessing_two, model_two.id, 1) strategy = latest_blessed_model_strategy.LatestBlessedModelStrategy() result = strategy.resolve_artifacts( self._store, { 'model': [model_one, model_two, model_three], 'model_blessing': [model_blessing_one, model_blessing_two] }) self.assertIsNotNone(result) self.assertEqual([a.uri for a in result['model']], ['model_two'])
tensorflow/tfx
[ 1905, 649, 1905, 157, 1549300476 ]
def format_raw(self, location, indent="", embedded=True, indirect_attrs=True): details = [indent + "{0:c}: {0.name}".format(location)] if location.fullname: details.append(indent + " Fullname: {}".format(location.fullname)) if hasattr(location, 'timezone'): details.append(indent + " Timezone: {}".format(location.timezone)) # Rack could have been a separate formatter, but since this is # the only difference... if isinstance(location, Rack): details.append(indent + " Row: {}".format(location.rack_row)) details.append(indent + " Column: {}".format(location.rack_column)) elif isinstance(location, Building): details.append(indent + " Address: {}".format(location.address)) details.append(indent + " Next Rack ID: {}".format(location.next_rackid)) details.append(indent + " Network Devices Require Racks: {}".format(location.netdev_rack)) elif isinstance(location, Room) and location.floor: details.append(indent + " Floor: {}".format(location.floor)) if location.uri: details.append(indent + " Location URI: {}".format(location.uri)) if location.comments: details.append(indent + " Comments: {}".format(location.comments)) if location.parents: details.append(indent + " Location Parents: [{}]".format(", ".join(format(p) for p in location.parents))) if location.default_dns_domain: details.append(indent + " Default DNS Domain: {0.name}".format(location.default_dns_domain)) return "\n".join(details)
quattor/aquilon
[ 12, 16, 12, 38, 1361797498 ]
def csv_fields(self, location): """Yield a CSV-ready list of selected attribute values for location.""" # Columns 0 and 1 details = [location.location_type, location.name] # Columns 2 and 3 if location.parent: details.append(location.parent.location_type) details.append(location.parent.name) else: details.extend([None, None]) # Columns 4 and 5 if isinstance(location, Rack): details.append(location.rack_row) details.append(location.rack_column) else: details.extend([None, None]) # Column 6 if hasattr(location, 'timezone'): details.append(location.timezone) else: details.append(None) # Column 7 details.append(location.fullname) # Column 8 if location.default_dns_domain: details.append(location.default_dns_domain) else: details.append(None) yield details
quattor/aquilon
[ 12, 16, 12, 38, 1361797498 ]
def on_init(self): pass
scionrep/scioncc
[ 3, 10, 3, 1, 1435685091 ]
def on_quit(self): pass
scionrep/scioncc
[ 3, 10, 3, 1, 1435685091 ]
def extractNotsogoodtranslatorWordpressCom(item): ''' Parser for 'notsogoodtranslator.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
fake-name/ReadableWebProxy
[ 191, 16, 191, 3, 1437712243 ]
def extractDhragonisslytherinWordpressCom(item): ''' Parser for 'dhragonisslytherin.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
fake-name/ReadableWebProxy
[ 191, 16, 191, 3, 1437712243 ]
def session_type(): if 'IPython' not in sys.modules: # IPython hasn't been imported, definitely not return "python" from IPython import get_ipython # check for `kernel` attribute on the IPython instance if getattr(get_ipython(), 'kernel', None) is not None: return "kernel" return "ipython"
Featuretools/featuretools
[ 6538, 841, 6538, 165, 1504908917 ]
def get_relationship_variable_id(path): _, r = path[0] child_link_name = r.child_variable.id for _, r in path[1:]: parent_link_name = child_link_name child_link_name = '%s.%s' % (r.parent_entity.id, parent_link_name) return child_link_name
Featuretools/featuretools
[ 6538, 841, 6538, 165, 1504908917 ]
def check_schema_version(cls, cls_type): if isinstance(cls_type, str): if cls_type == 'entityset': from featuretools.entityset.serialize import SCHEMA_VERSION version_string = cls.get('schema_version') elif cls_type == 'features': from featuretools.feature_base.features_serializer import SCHEMA_VERSION version_string = cls.features_dict['schema_version'] current = SCHEMA_VERSION.split('.') saved = version_string.split('.') warning_text_upgrade = ('The schema version of the saved %s' '(%s) is greater than the latest supported (%s). ' 'You may need to upgrade featuretools. Attempting to load %s ...' % (cls_type, version_string, SCHEMA_VERSION, cls_type)) for c_num, s_num in zip_longest(current, saved, fillvalue=0): if c_num > s_num: break elif c_num < s_num: warnings.warn(warning_text_upgrade) break warning_text_outdated = ('The schema version of the saved %s' '(%s) is no longer supported by this version' 'of featuretools. Attempting to load %s ...' % (cls_type, version_string, cls_type)) # Check if saved has older major version. if current[0] > saved[0]: warnings.warn(warning_text_outdated)
Featuretools/featuretools
[ 6538, 841, 6538, 165, 1504908917 ]
def use_s3fs_es(file_path, path, read=True): s3 = s3fs.S3FileSystem(anon=True) if read: s3.get(path, file_path) else: s3.put(file_path, path)
Featuretools/featuretools
[ 6538, 841, 6538, 165, 1504908917 ]
def __init__(self, num_filters, filter_length, **kwargs): self.num_filters = num_filters self.filter_length = filter_length super(Conv1D, self).__init__(**kwargs)
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def _initialize(self): self.weights_init.initialize(self.parameters[0], self.rng)
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def __init__(self, match_dim, conv_n, conv_num_filters=1, state_transformer=None, attended_transformer=None, energy_computer=None, prior=None, energy_normalizer=None, **kwargs): super(SequenceContentAndConvAttention, self).__init__(**kwargs) if not state_transformer: state_transformer = Linear(use_bias=False) self.match_dim = match_dim self.state_transformer = state_transformer self.state_transformers = Parallel(input_names=self.state_names, prototype=state_transformer, name="state_trans") if not attended_transformer: # Only this contributor to the match vector # is allowed to have biases attended_transformer = Linear(name="preprocess") if not energy_normalizer: energy_normalizer = 'softmax' self.energy_normalizer = energy_normalizer if not energy_computer: energy_computer = ShallowEnergyComputer( name="energy_comp", use_bias=self.energy_normalizer != 'softmax') self.filter_handler = Linear(name="handler", use_bias=False) self.attended_transformer = attended_transformer self.energy_computer = energy_computer if not prior: prior = dict(type='expanding', initial_begin=0, initial_end=10000, min_speed=0, max_speed=0) self.prior = prior self.conv_n = conv_n self.conv_num_filters = conv_num_filters self.conv = Conv1D(conv_num_filters, 2 * conv_n + 1) self.children = [self.state_transformers, self.attended_transformer, self.energy_computer, self.filter_handler, self.conv]
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def compute_energies(self, attended, preprocessed_attended, previous_weights, states): if not preprocessed_attended: preprocessed_attended = self.preprocess(attended) transformed_states = self.state_transformers.apply(as_dict=True, **states) # Broadcasting of transformed states should be done automatically match_vectors = sum(transformed_states.values(), preprocessed_attended) conv_result = self.conv.apply(previous_weights) match_vectors += self.filter_handler.apply( conv_result[:, :, self.conv_n:-self.conv_n] .dimshuffle(0, 2, 1)).dimshuffle(1, 0, 2) energies = self.energy_computer.apply(match_vectors).reshape( match_vectors.shape[:-1], ndim=match_vectors.ndim - 1) return energies
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def mask_row(offset, length, empty_row): return tensor.set_subtensor(empty_row[offset:offset+length], 1)
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def take_glimpses(self, attended, preprocessed_attended=None, attended_mask=None, weights=None, step=None, **states): # Cut the considered window. p = self.prior length = attended.shape[0] prior_type = p.get('type', 'expanding') if prior_type=='expanding': begin = p['initial_begin'] + step[0] * p['min_speed'] end = p['initial_end'] + step[0] * p['max_speed'] begin = tensor.maximum(0, tensor.minimum(length - 1, begin)) end = tensor.maximum(0, tensor.minimum(length, end)) additional_mask = None elif prior_type.startswith('window_around'): #check whether we want the mean or median! if prior_type == 'window_around_mean': position_in_attended = tensor.arange(length, dtype=floatX)[None, :] expected_last_source_pos = (weights * position_in_attended).sum(axis=1) elif prior_type == 'window_around_median': ali_to_05 = tensor.extra_ops.cumsum(weights, axis=1) - 0.5 ali_to_05 = (ali_to_05>=0) ali_median_pos = ali_to_05[:,1:] - ali_to_05[:,:-1] expected_last_source_pos = tensor.argmax(ali_median_pos, axis=1) expected_last_source_pos = theano.gradient.disconnected_grad( expected_last_source_pos) else: raise ValueError #the window taken around each element begins = tensor.floor(expected_last_source_pos - p['before']) ends = tensor.ceil(expected_last_source_pos + p['after']) #the global window to optimize computations begin = tensor.maximum(0, begins.min()).astype('int64') end = tensor.minimum(length, ends.max()).astype('int64') #the new mask, already cut to begin:end position_in_attended_cut = tensor.arange( begin * 1., end * 1., 1., dtype=floatX)[None, :] additional_mask = ((position_in_attended_cut > begins[:,None]) * (position_in_attended_cut < ends[:,None])) else: raise Exception("Unknown prior type: %s", prior_type) begin = tensor.floor(begin).astype('int64') end = tensor.ceil(end).astype('int64') attended_cut = attended[begin:end] preprocessed_attended_cut = (preprocessed_attended[begin:end] if preprocessed_attended else None) attended_mask_cut = ( (attended_mask[begin:end] if attended_mask else None) * (additional_mask.T if additional_mask else 1)) weights_cut = weights[:, begin:end] # Call energies_cut = self.compute_energies(attended_cut, preprocessed_attended_cut, weights_cut, states) weights_cut = self.compute_weights(energies_cut, attended_mask_cut) weighted_averages = self.compute_weighted_averages(weights_cut, attended_cut) # Paste new_weights = new_energies = tensor.zeros_like(weights.T) new_weights = tensor.set_subtensor(new_weights[begin:end], weights_cut) new_energies = tensor.set_subtensor(new_energies[begin:end], energies_cut) return weighted_averages, new_weights.T, new_energies.T, step + 1
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def take_glimpses_inputs(self): return (['attended', 'preprocessed_attended', 'attended_mask', 'weights', 'step'] + self.state_names)
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def compute_weights(self, energies, attended_mask): if self.energy_normalizer == 'softmax': logger.debug("Using softmax attention weights normalization") energies = energies - energies.max(axis=0) unnormalized_weights = tensor.exp(energies) elif self.energy_normalizer == 'logistic': logger.debug("Using smoothfocus (logistic sigm) " "attention weights normalization") unnormalized_weights = tensor.nnet.sigmoid(energies) elif self.energy_normalizer == 'relu': logger.debug("Using ReLU attention weights normalization") unnormalized_weights = tensor.maximum(energies/1000., 0.0) else: raise Exception("Unknown energey_normalizer: {}" .format(self.energy_computer)) if attended_mask: unnormalized_weights *= attended_mask # If mask consists of all zeros use 1 as the normalization coefficient normalization = (unnormalized_weights.sum(axis=0) + tensor.all(1 - attended_mask, axis=0)) return unnormalized_weights / normalization
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def initial_glimpses(self, batch_size, attended): return ([tensor.zeros((batch_size, self.attended_dim))] + 2 * [tensor.concatenate([ tensor.ones((batch_size, 1)), tensor.zeros((batch_size, attended.shape[0] - 1))], axis=1)] + [tensor.zeros((batch_size,), dtype='int64')])
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def initial_glimpses_outputs(self): return ['weight_averages', 'weights', 'energies', 'step']
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def preprocess(self, attended): return self.attended_transformer.apply(attended)
rizar/attention-lvcsr
[ 259, 103, 259, 11, 1443211188 ]
def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str """ return self["color"]
plotly/python-api
[ 13052, 2308, 13052, 1319, 1385013188 ]
def color(self, val): self["color"] = val
plotly/python-api
[ 13052, 2308, 13052, 1319, 1385013188 ]
def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman".
plotly/python-api
[ 13052, 2308, 13052, 1319, 1385013188 ]
def family(self, val): self["family"] = val
plotly/python-api
[ 13052, 2308, 13052, 1319, 1385013188 ]
def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["size"]
plotly/python-api
[ 13052, 2308, 13052, 1319, 1385013188 ]
def size(self, val): self["size"] = val
plotly/python-api
[ 13052, 2308, 13052, 1319, 1385013188 ]
def _prop_descriptions(self): return """\ color family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". size """
plotly/python-api
[ 13052, 2308, 13052, 1319, 1385013188 ]
def create(kernel): result = Intangible() result.template = "object/draft_schematic/food/shared_drink_aludium_pu36.iff" result.attribute_template_id = -1 result.stfName("string_id_table","")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def create(kernel): result = Tangible() result.template = "object/tangible/deed/event_perk/shared_fire_pit_deed.iff" result.attribute_template_id = 2 result.stfName("event_perk","fire_pit_name")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def from_election_to_elections(apps, schema_editor): # We can't import the Person model directly as it may be a newer # version than this migration expects. We use the historical version. Election = apps.get_model("elections", "Election") Candidate = apps.get_model("elections", "Candidate") for candidate in Candidate.objects.all(): candidate.elections.add(candidate.election)
ciudadanointeligente/votainteligente-portal-electoral
[ 43, 34, 43, 122, 1375904659 ]
def addTemplate(core): mobileTemplate = MobileTemplate()
ProjectSWGCore/NGECore2
[ 23, 70, 23, 56, 1372673790 ]
def InfSourceParser(self, SectionString, InfSectionObject, FileName): SectionMacros = {} ValueList = [] SourceList = [] StillCommentFalg = False HeaderComments = [] LineComment = None SectionContent = '' for Line in SectionString: SrcLineContent = Line[0] SrcLineNo = Line[1]
google/google-ctf
[ 3196, 457, 3196, 1, 1524844563 ]
def __init__(self, config_entry: config_entries.ConfigEntry) -> None: """Initialize google options flow.""" self.config_entry = config_entry
home-assistant/home-assistant
[ 58698, 22318, 58698, 2794, 1379402988 ]
def async_get_options_flow( config_entry: config_entries.ConfigEntry,
home-assistant/home-assistant
[ 58698, 22318, 58698, 2794, 1379402988 ]
def ControllerAgentClockSync(issue_ts, name): """Record the clock sync marker for controller tracing agent. Unlike with the other tracing agents, the tracing controller should not call this directly. Rather, it is called via callback from the other tracing agents when they write a trace. """ trace_event.clock_sync(name, issue_ts=issue_ts)
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def __init__(self): # TODO(https://crbug.com/1262296): Update this after Python2 trybots retire. # pylint: disable=super-with-arguments super(TracingControllerAgent, self).__init__() self._log_path = None
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def StartAgentTracing(self, config, timeout=None): """Start tracing for the controller tracing agent. Start tracing for the controller tracing agent. Note that the tracing controller records the "controller side" of the clock sync records, and nothing else. """ del config if not trace_event.trace_can_enable(): raise RuntimeError('Cannot enable trace_event;' ' ensure py_utils is in PYTHONPATH') controller_log_file = tempfile.NamedTemporaryFile(delete=False) self._log_path = controller_log_file.name controller_log_file.close() trace_event.trace_enable(self._log_path) return True
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def StopAgentTracing(self, timeout=None): """Stops tracing for the controller tracing agent. """ # pylint: disable=no-self-use # This function doesn't use self, but making it a member function # for consistency with the other TracingAgents trace_event.trace_disable() return True
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def GetResults(self, timeout=None): """Gets the log output from the controller tracing agent. This output only contains the "controller side" of the clock sync records. """ with open(self._log_path, 'r') as outfile: data = ast.literal_eval(outfile.read() + ']') # Explicitly set its own clock domain. This will stop the Systrace clock # domain from incorrectly being collapsed into the on device clock domain. formatted_data = { 'traceEvents': data, 'metadata': { 'clock-domain': 'SYSTRACE', } } return trace_result.TraceResult(TRACE_DATA_CONTROLLER_NAME, json.dumps(formatted_data))
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def RecordClockSyncMarker(self, sync_id, callback): raise NotImplementedError
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def __init__(self, agents_with_config, controller_config): """Create tracing controller. Create a tracing controller object. Note that the tracing controller is also a tracing agent. Args: agents_with_config: List of tracing agents for this controller with the corresponding tracing configuration objects. controller_config: Configuration options for the tracing controller. """ self._child_agents = None self._child_agents_with_config = agents_with_config self._controller_agent = TracingControllerAgent() self._controller_config = controller_config self._trace_in_progress = False self.all_results = None
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def get_child_agents(self): return self._child_agents
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def StopTracing(self): """Issue clock sync marker and stop tracing for all tracing agents. This function stops both the controller tracing agent and the child tracing agents. It issues a clock sync marker prior to stopping tracing. Returns: Boolean indicating whether or not the stop tracing succeeded for all agents. """ assert self._trace_in_progress, 'No trace in progress.' self._trace_in_progress = False # Issue the clock sync marker and stop the child tracing agents. self._IssueClockSyncMarker() succ_agents = [] for agent in self._child_agents: if agent.StopAgentTracing(timeout=self._controller_config.timeout): succ_agents.append(agent) else: print('Agent %s not stopped.' % str(agent)) # Stop the controller tracing agent. Controller tracing agent # must be stopped successfully to proceed. if not self._controller_agent.StopAgentTracing( timeout=self._controller_config.timeout): print('Unable to stop controller tracing agent.') return False # Print warning if all agents not stopped. na = len(self._child_agents) ns = len(succ_agents) if ns < na: print('Warning: Only %d of %d tracing agents stopped.' % (ns, na)) self._child_agents = succ_agents # Collect the results from all the stopped tracing agents. all_results = [] for agent in self._child_agents + [self._controller_agent]: try: result = agent.GetResults( timeout=self._controller_config.collection_timeout) if not result: print('Warning: Timeout when getting results from %s.' % str(agent)) continue if result.source_name in [r.source_name for r in all_results]: print ('Warning: Duplicate tracing agents named %s.' % result.source_name) all_results.append(result) # Check for exceptions. If any exceptions are seen, reraise and abort. # Note that a timeout exception will be swalloed by the timeout # mechanism and will not get to that point (it will return False instead # of the trace result, which will be dealt with above) except: print('Warning: Exception getting results from %s:' % str(agent)) print('Try checking android device storage permissions for chrome') print(sys.exc_info()[0]) raise self.all_results = all_results return all_results
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def _IssueClockSyncMarker(self): """Issue clock sync markers to all the child tracing agents.""" for agent in self._child_agents: if agent.SupportsExplicitClockSync(): sync_id = GetUniqueSyncID() agent.RecordClockSyncMarker(sync_id, ControllerAgentClockSync)
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def __init__(self, agent, config): self.agent = agent self.config = config
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def __init__(self, output_file, trace_time, write_json, link_assets, asset_dir, timeout, collection_timeout, device_serial_number, target, trace_buf_size): tracing_agents.TracingConfig.__init__(self) self.output_file = output_file self.trace_time = trace_time self.write_json = write_json self.link_assets = link_assets self.asset_dir = asset_dir self.timeout = timeout self.collection_timeout = collection_timeout self.device_serial_number = device_serial_number self.target = target self.trace_buf_size = trace_buf_size
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def get_challenge_for_url(url): """ Gets the challenge for the cached URL. :param url: the URL the challenge is cached for. :rtype: HttpBearerChallenge """ if not url: raise ValueError("URL cannot be None") key = _get_cache_key(url) with _lock: return _cache.get(key)
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def remove_challenge_for_url(url): """ Removes the cached challenge for the specified URL. :param url: the URL for which to remove the cached challenge """ if not url: raise ValueError("URL cannot be empty") url = parse.urlparse(url) with _lock: del _cache[url.netloc]
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def status(self) -> str: return "NonClosingPool"
timabbott/zulip
[ 2, 7, 2, 1, 1443209656 ]
def recreate(self) -> 'NonClosingPool': return self.__class__(creator=self._creator, recycle=self._recycle, use_threadlocal=self._use_threadlocal, reset_on_return=self._reset_on_return, echo=self.echo, logging_name=self._orig_logging_name, _dispatch=self.dispatch)
timabbott/zulip
[ 2, 7, 2, 1, 1443209656 ]
def make_image(): img = np.zeros((500, 500), np.uint8) black, white = 0, 255 for i in xrange(6): dx = int((i%2)*250 - 30) dy = int((i/2.)*150) if i == 0: for j in xrange(11): angle = (j+5)*np.pi/21 c, s = np.cos(angle), np.sin(angle) x1, y1 = np.int32([dx+100+j*10-80*c, dy+100-90*s]) x2, y2 = np.int32([dx+100+j*10-30*c, dy+100-30*s]) cv2.line(img, (x1, y1), (x2, y2), white) cv2.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 ) cv2.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 ) cv2.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 ) cv2.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 ) cv2.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 ) cv2.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 ) cv2.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 ) cv2.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 ) cv2.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 ) cv2.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 ) cv2.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 ) return img
makelove/OpenCV-Python-Tutorial
[ 2997, 1086, 2997, 11, 1445060268 ]
def update(levels): vis = np.zeros((h, w, 3), np.uint8) levels = levels - 3 cv2.drawContours( vis, contours, (-1, 2)[levels <= 0], (128,255,255), 3, cv2.LINE_AA, hierarchy, abs(levels) ) cv2.imshow('contours', vis)
makelove/OpenCV-Python-Tutorial
[ 2997, 1086, 2997, 11, 1445060268 ]
def setUp(self): super(DebugIdentityV2OpTest, self).setUp() # Testing using a small circular-buffer size. self.circular_buffer_size = 4 self.tfdbg_run_id = "test_tfdbg_run" self.writer = debug_events_writer.DebugEventsWriter( self.dump_root, self.tfdbg_run_id, self.circular_buffer_size)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testSingleTensorFullTensorDebugModeWithCircularBufferBehavior(self): @def_function.function def write_debug_trace(x): square = math_ops.square(x) gen_debug_ops.debug_identity_v2( square, tfdbg_context_id="deadbeaf", op_name="Square", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root]) sqrt = math_ops.sqrt(x) gen_debug_ops.debug_identity_v2( sqrt, tfdbg_context_id="beafdead", op_name="Sqrt", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root]) return square + sqrt x = np.array([3.0, 4.0]) # Only the graph-execution trace of the last iteration should be written # to self.dump_root. for _ in range(self.circular_buffer_size // 2 + 1): self.assertAllClose( write_debug_trace(x), [9.0 + np.sqrt(3.0), 16.0 + 2.0]) with debug_events_reader.DebugEventsReader(self.dump_root) as reader: # Check that the .metadata DebugEvents data file has been created, even # before FlushExecutionFiles() is called. self.assertGreater(reader.starting_wall_time(), 0) self.assertTrue(reader.tensorflow_version()) self.assertTrue(reader.tfdbg_file_version().startswith("debug.Event")) graph_trace_iter = reader.graph_execution_traces_iterators()[0] # Before FlushExecutionFiles() is called, the .graph_execution_traces file # ought to be empty. with self.assertRaises(StopIteration): next(graph_trace_iter) # Flush the circular buffer. self.writer.FlushExecutionFiles() graph_trace_iter = reader.graph_execution_traces_iterators()[0] # The circular buffer has a size of 4. So only the data from the # last two iterations should have been written to self.dump_root. for _ in range(2): debug_event = next(graph_trace_iter).debug_event self.assertGreater(debug_event.wall_time, 0) trace = debug_event.graph_execution_trace self.assertEqual(trace.tfdbg_context_id, "deadbeaf") self.assertEqual(trace.op_name, "Square") self.assertEqual(trace.output_slot, 0) self.assertEqual(trace.tensor_debug_mode, debug_event_pb2.TensorDebugMode.FULL_TENSOR) tensor_value = tensor_util.MakeNdarray(trace.tensor_proto) self.assertAllClose(tensor_value, [9.0, 16.0]) debug_event = next(graph_trace_iter).debug_event self.assertGreater(debug_event.wall_time, 0) trace = debug_event.graph_execution_trace self.assertEqual(trace.tfdbg_context_id, "beafdead") self.assertEqual(trace.op_name, "Sqrt") self.assertEqual(trace.output_slot, 0) self.assertEqual(trace.tensor_debug_mode, debug_event_pb2.TensorDebugMode.FULL_TENSOR) tensor_value = tensor_util.MakeNdarray(trace.tensor_proto) self.assertAllClose(tensor_value, [np.sqrt(3.0), 2.0]) # Only the graph-execution trace of the last iteration should be written # to self.dump_root. with self.assertRaises(StopIteration): next(graph_trace_iter)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testControlFlow(self): @def_function.function def collatz(x): counter = constant_op.constant(0, dtype=dtypes.int32) while math_ops.greater(x, 1): counter = counter + 1 gen_debug_ops.debug_identity_v2( x, tfdbg_context_id="deadbeaf", op_name="x", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root]) if math_ops.equal(x % 2, 0): x = math_ops.div(x, 2) else: x = x * 3 + 1 return counter x = constant_op.constant(10, dtype=dtypes.int32) self.evaluate(collatz(x)) self.writer.FlushExecutionFiles() with debug_events_reader.DebugEventsReader(self.dump_root) as reader: graph_trace_iter = reader.graph_execution_traces_iterators()[0] try: x_values = [] timestamp = 0 while True: debug_event = next(graph_trace_iter).debug_event self.assertGreater(debug_event.wall_time, timestamp) timestamp = debug_event.wall_time trace = debug_event.graph_execution_trace self.assertEqual(trace.tfdbg_context_id, "deadbeaf") self.assertEqual(trace.op_name, "x") self.assertEqual(trace.output_slot, 0) self.assertEqual(trace.tensor_debug_mode, debug_event_pb2.TensorDebugMode.FULL_TENSOR) x_values.append(int(tensor_util.MakeNdarray(trace.tensor_proto))) except StopIteration: pass # Due to the circular buffer, only the last 4 iterations of # [10, 5, 16, 8, 4, 2] should have been written. self.assertAllEqual(x_values, [16, 8, 4, 2])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testTwoDumpRoots(self): another_dump_root = os.path.join(self.dump_root, "another") another_debug_url = "file://%s" % another_dump_root another_writer = debug_events_writer.DebugEventsWriter( another_dump_root, "test_tfdbg_run") @def_function.function def write_debug_trace(x): # DebugIdentityV2 is a stateful op. It ought to be included by auto # control dependency. square = math_ops.square(x) gen_debug_ops.debug_identity_v2( square, tfdbg_context_id="deadbeaf", tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root, another_debug_url]) return square + 1.0 x = np.array([3.0, 4.0]) self.assertAllClose(write_debug_trace(x), np.array([10.0, 17.0])) self.writer.FlushExecutionFiles() another_writer.FlushExecutionFiles() another_writer.Close() for debug_root in (self.dump_root, another_dump_root): with debug_events_reader.DebugEventsReader(debug_root) as reader: graph_trace_iter = reader.graph_execution_traces_iterators()[0] debug_event = next(graph_trace_iter).debug_event trace = debug_event.graph_execution_trace self.assertEqual(trace.tfdbg_context_id, "deadbeaf") self.assertEqual(trace.op_name, "") self.assertEqual(trace.tensor_debug_mode, debug_event_pb2.TensorDebugMode.FULL_TENSOR) tensor_value = tensor_util.MakeNdarray(trace.tensor_proto) self.assertAllClose(tensor_value, [9.0, 16.0]) with self.assertRaises(StopIteration): next(graph_trace_iter)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testInvokingDebugIdentityV2OpBeforeCreatingDebugEventsWriterWorks(self): circular_buffer_size = 3 @def_function.function def write_debug_trace(x): # DebugIdentityV2 is a stateful op. It ought to be included by auto # control dependency. square = math_ops.square(x) gen_debug_ops.debug_identity_v2( square, tfdbg_context_id="deadbeaf", op_name="Square", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root], circular_buffer_size=circular_buffer_size) return square # The DebugIdentityV2 ops are invokes *before* a DebugEventsWriter at the # same dump root is created. for i in range(circular_buffer_size * 2): self.assertAllClose( write_debug_trace(np.array([i]).astype(np.float32)), [i**2.0]) writer = debug_events_writer.DebugEventsWriter(self.dump_root, "test_tfdbg_run", circular_buffer_size) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() with debug_events_reader.DebugEventsReader(self.dump_root) as reader: graph_trace_iter = reader.graph_execution_traces_iterators()[0] graph_execution_traces = [] while True: try: graph_execution_traces.append( next(graph_trace_iter).debug_event.graph_execution_trace) except StopIteration: break self.assertLen(graph_execution_traces, circular_buffer_size) for i in range(circular_buffer_size): self.assertAllClose( tensor_util.MakeNdarray(graph_execution_traces[i].tensor_proto), [(i + circular_buffer_size)**2.0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpReduceInfNanThreeSlots(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=( debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS))) self.assertAllEqual( debug_summary(constant_op.constant([])), [0.0, 0.0, 0.0]) self.assertAllEqual( debug_summary(constant_op.constant(42.0)), [0.0, 0.0, 0.0]) self.assertAllEqual( debug_summary(constant_op.constant([3.0, 4.0])), [0.0, 0.0, 0.0]) self.assertAllEqual( debug_summary(constant_op.constant(np.array([3.0, -np.inf]))), [-np.inf, 0.0, 0.0]) self.assertAllEqual( debug_summary(constant_op.constant(np.array([[0, 0], [np.nan, 0]]))), [0.0, 0.0, np.nan]) self.assertAllEqual( debug_summary( constant_op.constant(np.array([[0, 0], [np.nan, np.inf]]))), [0.0, np.inf, np.nan]) self.assertAllEqual( debug_summary( constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]]))), [-np.inf, np.inf, np.nan]) x = np.zeros([100, 100], dtype=np.float16) x[32, 47] = np.nan self.assertAllEqual( debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan]) x = np.zeros([97, 97], dtype=np.float32) x[50, 83] = -np.inf self.assertAllEqual( debug_summary(constant_op.constant(x)), [-np.inf, 0.0, 0.0]) x[1, 41] = np.nan self.assertAllEqual( debug_summary(constant_op.constant(x)), [-np.inf, 0.0, np.nan]) x = np.zeros([9701], dtype=np.float64) x[9700] = np.nan self.assertAllEqual( debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpLargeTensorIDError(self): modes = [ debug_event_pb2.TensorDebugMode.CURT_HEALTH, debug_event_pb2.TensorDebugMode.CONCISE_HEALTH, debug_event_pb2.TensorDebugMode.SHAPE, ] # Maximum allowed tensor_id tensor_id = np.power(2, 53, dtype=np.int64) for mode in modes: self.evaluate( gen_debug_ops.debug_numeric_summary_v2( constant_op.constant(42.0), tensor_debug_mode=mode, tensor_id=tensor_id, output_dtype=dtypes.float64)) # Incrementing by one should error tensor_id += 1 for mode in modes: with self.assertRaises(errors.InvalidArgumentError): self.evaluate( gen_debug_ops.debug_numeric_summary_v2( constant_op.constant(42.0), tensor_debug_mode=mode, tensor_id=tensor_id, output_dtype=dtypes.float64))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpCurtHealthValuesSmall(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH), tensor_id=x._id, output_dtype=dtypes.float64)), x._id tensor, tensor_id = debug_summary(constant_op.constant([])) self.assertAllEqual(tensor, [tensor_id, 0.0]) tensor, tensor_id = debug_summary(constant_op.constant(42.0)) self.assertAllEqual(tensor, [tensor_id, 0.0]) tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0])) self.assertAllEqual(tensor, [tensor_id, 0.0]) tensor, tensor_id = debug_summary( constant_op.constant(np.array([3.0, -np.inf]))) self.assertAllEqual(tensor, [tensor_id, 1.0]) tensor, tensor_id = debug_summary( constant_op.constant(np.array([[0, 0], [np.nan, 0]]))) self.assertAllEqual(tensor, [tensor_id, 1.0]) tensor, tensor_id = debug_summary( constant_op.constant(np.array([[0, 0], [np.nan, np.inf]]))) self.assertAllEqual(tensor, [tensor_id, 1.0]) tensor, tensor_id = debug_summary( constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]]))) self.assertAllEqual(tensor, [tensor_id, 1.0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpCurtHealthValuesLarge(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH), tensor_id=x._id, output_dtype=dtypes.float64)), x._id x = np.zeros([100, 100], dtype=np.float16) x[32, 47] = np.nan tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual(tensor, [tensor_id, 1.0]) x = np.zeros([97, 97], dtype=np.float32) x[50, 83] = -np.inf tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual(tensor, [tensor_id, 1.0]) x[1, 41] = np.nan tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual(tensor, [tensor_id, 1.0]) x = np.zeros([9701], dtype=np.float64) x[9700] = np.nan tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual(tensor, [tensor_id, 1.0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpCurtHealthConsistency(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH), tensor_id=x._id, output_dtype=dtypes.float64)), x._id x = np.zeros([100, 100], dtype=np.float16) x[43, 99] = np.nan c = constant_op.constant(x) tensor_1, tensor_id_1 = debug_summary(c) tensor_2, tensor_id_2 = debug_summary(c) self.assertAllEqual(tensor_1, tensor_2) self.assertEqual(tensor_id_1, tensor_id_2) x = np.zeros([100, 100, 50], dtype=np.float64) x[0, 0, 1] = np.inf c = constant_op.constant(x) tensor_1, tensor_id_1 = debug_summary(c) tensor_2, tensor_id_2 = debug_summary(c) self.assertAllEqual(tensor_1, tensor_2) self.assertEqual(tensor_id_1, tensor_id_2) c = constant_op.constant(np.ones((100, 200), np.double)) tensor_1, tensor_id_1 = debug_summary(c) tensor_2, tensor_id_2 = debug_summary(c) self.assertAllEqual(tensor_1, tensor_2) self.assertEqual(tensor_id_1, tensor_id_2)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpDeterminism(self): x = np.zeros([100, 100, 50], dtype=np.float64) x = constant_op.constant(x) modes = ( debug_event_pb2.TensorDebugMode.CONCISE_HEALTH, debug_event_pb2.TensorDebugMode.FULL_HEALTH, ) for mode in modes: debug_mode = debug_event_pb2.TensorDebugMode.Name(mode) with test_util.deterministic_ops(): if test_util.is_gpu_available(cuda_only=True): with self.assertRaisesRegex( errors_impl.UnimplementedError, "Determinism is not yet " "supported for DebugNumericSummaryV2 when tensor_debug_mode is " + debug_mode + "."): self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=mode, tensor_id=x._id, output_dtype=dtypes.float64))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpConciseHealthSmall(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=( debug_event_pb2.TensorDebugMode.CONCISE_HEALTH), tensor_id=x._id, output_dtype=dtypes.float64)), x._id tensor, tensor_id = debug_summary(constant_op.constant([])) self.assertAllEqual(tensor, [tensor_id, 0.0, 0.0, 0.0, 0.0]) tensor, tensor_id = debug_summary(constant_op.constant(42.0)) self.assertAllEqual(tensor, [tensor_id, 1.0, 0.0, 0.0, 0.0]) tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0])) self.assertAllEqual(tensor, [tensor_id, 2.0, 0.0, 0.0, 0.0]) tensor, tensor_id = debug_summary( constant_op.constant(np.array([3.0, -np.inf]))) self.assertAllEqual(tensor, [tensor_id, 2.0, 1.0, 0.0, 0.0]) tensor, tensor_id = debug_summary( constant_op.constant(np.array([[0, 0], [np.nan, 0]]))) self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 0.0, 1.0]) tensor, tensor_id = debug_summary( constant_op.constant(np.array([[0, 0], [np.nan, np.inf]]))) self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 1.0, 1.0]) tensor, tensor_id = debug_summary( constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]]))) self.assertAllEqual(tensor, [tensor_id, 4.0, 1.0, 1.0, 1.0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpConciseHealthLarge(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=( debug_event_pb2.TensorDebugMode.CONCISE_HEALTH), tensor_id=x._id, output_dtype=dtypes.float64)), x._id x = np.zeros([100, 100], dtype=np.float16) x[32, :] = np.nan tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual(tensor, [tensor_id, 10000.0, 0.0, 0.0, 100.0]) x = np.zeros([97, 97], dtype=np.float32) x[50, 83:85] = -np.inf tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 0.0]) x[1:9, 41] = np.nan tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 8.0]) x = np.zeros([9701], dtype=np.float64) x[9700] = np.nan tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual(tensor, [tensor_id, 9701, 0.0, 0.0, 1.0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpConciseHealthConsistency(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=( debug_event_pb2.TensorDebugMode.CONCISE_HEALTH), tensor_id=x._id, output_dtype=dtypes.float64)), x._id # Assert the same op is returns a consistent value x = np.zeros([100, 100], dtype=np.float16) x[3, 4] = -np.inf c = constant_op.constant(x) tensor_1, tensor_id_1 = debug_summary(c) tensor_2, tensor_id_2 = debug_summary(c) self.assertAllEqual(tensor_1, tensor_2) self.assertEqual(tensor_id_1, tensor_id_2) c = constant_op.constant(np.ones((100, 200), np.double)) tensor_1, tensor_id_1 = debug_summary(c) tensor_2, tensor_id_2 = debug_summary(c) self.assertAllEqual(tensor_1, tensor_2) self.assertEqual(tensor_id_1, tensor_id_2)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpShapeEmpty(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE), tensor_id=x._id, output_dtype=dtypes.float64)), x._id tensor, tensor_id = debug_summary(constant_op.constant(0.0)) self.assertAllEqual( tensor, [tensor_id, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpShapeSmall(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE), tensor_id=x._id, output_dtype=dtypes.float64)), x._id x = np.zeros([3, 4], dtype=np.float32) tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual( tensor, [tensor_id, 1.0, 2.0, 12.0, 3.0, 4.0, 0.0, 0.0, 0.0, 0.0]) x = np.ones([1, 2, 3, 4, 5, 6], dtype=np.float16) x[0, 1, 2, 2, 2, 2] = np.nan tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual( tensor, [tensor_id, 19, 6.0, 2 * 3 * 4 * 5 * 6, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) x = np.zeros([2], dtype=np.float32) tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual( tensor, [tensor_id, 1.0, 1.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0]) tensor, tensor_id = debug_summary(constant_op.constant([])) self.assertAllEqual( tensor, [tensor_id, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpShapeLarge(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE), tensor_id=x._id, output_dtype=dtypes.float64)), x._id x = np.ones([1, 2, 3, 4, 5, 6, 7], dtype=np.double) tensor, tensor_id = debug_summary(constant_op.constant(x)) self.assertAllEqual(tensor, [ tensor_id, 2.0, 7.0, 2 * 3 * 4 * 5 * 6 * 7, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 ])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpFullHealthSmall(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH), tensor_id=x._id, output_dtype=dtypes.float64)), x._id tensor, tensor_id = debug_summary(constant_op.constant([])) expected = [tensor_id, -1, 1, 1, 0, 0, 0, 0, 0, 0, 0] self.assertAllEqual(tensor, expected) tensor, tensor_id = debug_summary(constant_op.constant(42.0)) expected = [tensor_id, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1] self.assertAllEqual(tensor, expected) tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0])) expected = [tensor_id, -1, 1, 1, 2, 0, 0, 0, 0, 0, 2] self.assertAllEqual(tensor, expected) tensor, tensor_id = debug_summary( constant_op.constant(np.array([3, -np.inf], dtype=np.float32))) expected = [tensor_id, -1, 1, 1, 2, 1, 0, 0, 0, 0, 1] self.assertAllEqual(tensor, expected) tensor, tensor_id = debug_summary( constant_op.constant(np.array([[0, 0], [np.nan, 0]], dtype=np.float64))) expected = [tensor_id, -1, 2, 2, 4, 0, 0, 1, 0, 3, 0] self.assertAllEqual(tensor, expected) tensor, tensor_id = debug_summary( constant_op.constant( np.array([[0, 0], [np.nan, np.inf]], dtype=np.float16))) expected = [tensor_id, -1, 19, 2, 4, 0, 1, 1, 0, 2, 0] self.assertAllEqual(tensor, expected) tensor, tensor_id = debug_summary( constant_op.constant( np.array([[0, np.inf], [np.nan, -np.inf]], dtype=np.float32))) expected = [tensor_id, -1, 1, 2, 4, 1, 1, 1, 0, 1, 0] self.assertAllEqual(tensor, expected)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpFullHealthLarge(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH), tensor_id=x._id, output_dtype=dtypes.float64)), x._id def tensor_counts(arr): counts = [len(np.shape(arr)), np.size(arr), 0, 0, 0, 0, 0, 0] for n in np.ravel(arr): if np.isneginf(n): counts[2] += 1 elif np.isposinf(n): counts[3] += 1 elif np.isnan(n): counts[4] += 1 elif n < 0.: counts[5] += 1 elif n == 0.: counts[6] += 1 else: counts[7] += 1 return counts x = np.zeros([50, 50], dtype=np.float16) x[32, 47] = np.nan x[0:4, 3] = np.inf x[40:50, 40:50] = 10 x[3, 20] = -10 tensor, tensor_id = debug_summary(constant_op.constant(x)) expected = [tensor_id, -1, 19] + tensor_counts(x) self.assertAllEqual(tensor, expected) x = np.ones([25, 25, 50], dtype=np.float32) * np.inf x[:, :, 1] = np.nan x[:, :, 2] = -np.inf x[:, :, 3] = -1 x[:, :, 4] = 0 x[:, :, 5] = 1 tensor, tensor_id = debug_summary(constant_op.constant(x)) expected = [tensor_id, -1, 1] + tensor_counts(x) self.assertAllEqual(tensor, expected) x[0, 0, 0] = np.nan tensor, tensor_id = debug_summary(constant_op.constant(x)) expected = [ tensor_id, -1, 1, ] + tensor_counts(x) self.assertAllEqual(tensor, expected) x = np.zeros([9701], dtype=np.float64) x[9700] = np.nan tensor, tensor_id = debug_summary(constant_op.constant(x)) expected = [tensor_id, -1, 2] + tensor_counts(x) self.assertAllEqual(tensor, expected)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDebugNumericSummaryV2OpFullHealthConsistency(self): def debug_summary(x): return self.evaluate( gen_debug_ops.debug_numeric_summary_v2( x, tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH), tensor_id=x._id, output_dtype=dtypes.float64)), x._id # Assert the same op is returns a consistent value x = np.zeros([100, 100], dtype=np.float16) x[32, 47] = np.nan x[0:4, 3] = np.inf x[90:100, 90:100] = 10 x[3, 20] = -10 c = constant_op.constant(x) tensor_1, tensor_id_1 = debug_summary(c) tensor_2, tensor_id_2 = debug_summary(c) self.assertAllEqual(tensor_1, tensor_2) self.assertEqual(tensor_id_1, tensor_id_2) x = np.ones((100, 200, 3, 10), np.double) x[1, 30, 2] = 10 x[5, :, 0, 1] = np.nan x[90:100, 150, :, :] = np.inf c = constant_op.constant(x) tensor_1, tensor_id_1 = debug_summary(c) tensor_2, tensor_id_2 = debug_summary(c) self.assertAllEqual(tensor_1, tensor_2) self.assertEqual(tensor_id_1, tensor_id_2)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testCheckNumericsV2OpNegativeAndPositiveInfAndNaN(self): """CheckNumericsV2 op distinguishes - & + infs when nan is present.""" with self.session(graph=ops.Graph()): t1 = constant_op.constant([-1.0, 1.0, 0.0]) t2 = constant_op.constant([0.0, 0.0, 0.0]) with self.assertRaisesRegex( errors.InvalidArgumentError, r"pass through test.*had -Inf, \+Inf, and NaN values"): self.evaluate( array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test(self): print "I##|nitializing A", "test"##| attribute = "hello"
aptana/Pydev
[ 239, 85, 239, 6, 1250792405 ]
def my_method(self): print self.attribute
aptana/Pydev
[ 239, 85, 239, 6, 1250792405 ]
def _get_context(req): return req.environ['nova.context']
ntt-sic/nova
[ 1, 2, 1, 1, 1382427064 ]
def wrapped(self, req, id, body, *args, **kwargs): if len(body) == 1 and "host" in body: host = body['host'] else: raise exc.HTTPBadRequest() return fn(self, req, id, host, *args, **kwargs)
ntt-sic/nova
[ 1, 2, 1, 1, 1382427064 ]