body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def preferred_variants(self, pkg_name): 'Facts on concretization preferences, as read from packages.yaml' preferences = spack.package_prefs.PackagePrefs preferred_variants = preferences.preferred_variants(pkg_name) if (not preferred_variants): return for variant_name in sorted(preferred_variants): variant = preferred_variants[variant_name] values = variant.value if (not isinstance(values, tuple)): values = (values,) spec = spack.spec.Spec(pkg_name) spec.update_variant_validate(variant_name, values) for value in values: self.variant_values_from_specs.add((pkg_name, variant.name, value)) self.gen.fact(fn.variant_default_value_from_packages_yaml(pkg_name, variant.name, value))
7,701,278,619,298,708,000
Facts on concretization preferences, as read from packages.yaml
lib/spack/spack/solver/asp.py
preferred_variants
AaltoSciComp/spack
python
def preferred_variants(self, pkg_name): preferences = spack.package_prefs.PackagePrefs preferred_variants = preferences.preferred_variants(pkg_name) if (not preferred_variants): return for variant_name in sorted(preferred_variants): variant = preferred_variants[variant_name] values = variant.value if (not isinstance(values, tuple)): values = (values,) spec = spack.spec.Spec(pkg_name) spec.update_variant_validate(variant_name, values) for value in values: self.variant_values_from_specs.add((pkg_name, variant.name, value)) self.gen.fact(fn.variant_default_value_from_packages_yaml(pkg_name, variant.name, value))
def checked_spec_clauses(self, *args, **kwargs): 'Wrap a call to spec clauses into a try/except block that raise\n a comprehensible error message in case of failure.\n ' requestor = kwargs.pop('required_from', None) try: clauses = self.spec_clauses(*args, **kwargs) except RuntimeError as exc: msg = str(exc) if requestor: msg += ' [required from package "{0}"]'.format(requestor) raise RuntimeError(msg) return clauses
5,603,958,043,790,484,000
Wrap a call to spec clauses into a try/except block that raise a comprehensible error message in case of failure.
lib/spack/spack/solver/asp.py
checked_spec_clauses
AaltoSciComp/spack
python
def checked_spec_clauses(self, *args, **kwargs): 'Wrap a call to spec clauses into a try/except block that raise\n a comprehensible error message in case of failure.\n ' requestor = kwargs.pop('required_from', None) try: clauses = self.spec_clauses(*args, **kwargs) except RuntimeError as exc: msg = str(exc) if requestor: msg += ' [required from package "{0}"]'.format(requestor) raise RuntimeError(msg) return clauses
def spec_clauses(self, spec, body=False, transitive=True): "Return a list of clauses for a spec mandates are true.\n\n Arguments:\n spec (spack.spec.Spec): the spec to analyze\n body (bool): if True, generate clauses to be used in rule bodies\n (final values) instead of rule heads (setters).\n transitive (bool): if False, don't generate clauses from\n dependencies (default True)\n " clauses = [] class Head(object): node = fn.node virtual_node = fn.virtual_node node_platform = fn.node_platform_set node_os = fn.node_os_set node_target = fn.node_target_set variant_value = fn.variant_set node_compiler = fn.node_compiler_set node_compiler_version = fn.node_compiler_version_set node_flag = fn.node_flag_set class Body(object): node = fn.node virtual_node = fn.virtual_node node_platform = fn.node_platform node_os = fn.node_os node_target = fn.node_target variant_value = fn.variant_value node_compiler = fn.node_compiler node_compiler_version = fn.node_compiler_version node_flag = fn.node_flag f = (Body if body else Head) if spec.name: clauses.append((f.node(spec.name) if (not spec.virtual) else f.virtual_node(spec.name))) clauses.extend(self.spec_versions(spec)) arch = spec.architecture if arch: if arch.platform: clauses.append(f.node_platform(spec.name, arch.platform)) if arch.os: clauses.append(f.node_os(spec.name, arch.os)) if arch.target: clauses.extend(self.target_ranges(spec, f.node_target)) for (vname, variant) in sorted(spec.variants.items()): values = variant.value if (not isinstance(values, (list, tuple))): values = [values] for value in values: if (value == '*'): continue if (not spec.concrete): reserved_names = spack.directives.reserved_names if ((not spec.virtual) and (vname not in reserved_names)): try: variant_def = spec.package.variants[vname] except KeyError: msg = 'variant "{0}" not found in package "{1}"' raise RuntimeError(msg.format(vname, spec.name)) else: variant_def.validate_or_raise(variant, spec.package) clauses.append(f.variant_value(spec.name, vname, value)) self.variant_values_from_specs.add((spec.name, vname, value)) if spec.compiler: clauses.append(f.node_compiler(spec.name, spec.compiler.name)) if spec.compiler.concrete: clauses.append(f.node_compiler_version(spec.name, spec.compiler.name, spec.compiler.version)) elif spec.compiler.versions: clauses.append(fn.node_compiler_version_satisfies(spec.name, spec.compiler.name, spec.compiler.versions)) self.compiler_version_constraints.add((spec.name, spec.compiler)) for (flag_type, flags) in spec.compiler_flags.items(): for flag in flags: clauses.append(f.node_flag(spec.name, flag_type, flag)) if spec.concrete: clauses.append(fn.concrete(spec.name)) if transitive: for dep in spec.traverse(root=False): clauses.extend(self.spec_clauses(dep, body, transitive=False)) return clauses
5,841,212,043,630,514,000
Return a list of clauses for a spec mandates are true. Arguments: spec (spack.spec.Spec): the spec to analyze body (bool): if True, generate clauses to be used in rule bodies (final values) instead of rule heads (setters). transitive (bool): if False, don't generate clauses from dependencies (default True)
lib/spack/spack/solver/asp.py
spec_clauses
AaltoSciComp/spack
python
def spec_clauses(self, spec, body=False, transitive=True): "Return a list of clauses for a spec mandates are true.\n\n Arguments:\n spec (spack.spec.Spec): the spec to analyze\n body (bool): if True, generate clauses to be used in rule bodies\n (final values) instead of rule heads (setters).\n transitive (bool): if False, don't generate clauses from\n dependencies (default True)\n " clauses = [] class Head(object): node = fn.node virtual_node = fn.virtual_node node_platform = fn.node_platform_set node_os = fn.node_os_set node_target = fn.node_target_set variant_value = fn.variant_set node_compiler = fn.node_compiler_set node_compiler_version = fn.node_compiler_version_set node_flag = fn.node_flag_set class Body(object): node = fn.node virtual_node = fn.virtual_node node_platform = fn.node_platform node_os = fn.node_os node_target = fn.node_target variant_value = fn.variant_value node_compiler = fn.node_compiler node_compiler_version = fn.node_compiler_version node_flag = fn.node_flag f = (Body if body else Head) if spec.name: clauses.append((f.node(spec.name) if (not spec.virtual) else f.virtual_node(spec.name))) clauses.extend(self.spec_versions(spec)) arch = spec.architecture if arch: if arch.platform: clauses.append(f.node_platform(spec.name, arch.platform)) if arch.os: clauses.append(f.node_os(spec.name, arch.os)) if arch.target: clauses.extend(self.target_ranges(spec, f.node_target)) for (vname, variant) in sorted(spec.variants.items()): values = variant.value if (not isinstance(values, (list, tuple))): values = [values] for value in values: if (value == '*'): continue if (not spec.concrete): reserved_names = spack.directives.reserved_names if ((not spec.virtual) and (vname not in reserved_names)): try: variant_def = spec.package.variants[vname] except KeyError: msg = 'variant "{0}" not found in package "{1}"' raise RuntimeError(msg.format(vname, spec.name)) else: variant_def.validate_or_raise(variant, spec.package) clauses.append(f.variant_value(spec.name, vname, value)) self.variant_values_from_specs.add((spec.name, vname, value)) if spec.compiler: clauses.append(f.node_compiler(spec.name, spec.compiler.name)) if spec.compiler.concrete: clauses.append(f.node_compiler_version(spec.name, spec.compiler.name, spec.compiler.version)) elif spec.compiler.versions: clauses.append(fn.node_compiler_version_satisfies(spec.name, spec.compiler.name, spec.compiler.versions)) self.compiler_version_constraints.add((spec.name, spec.compiler)) for (flag_type, flags) in spec.compiler_flags.items(): for flag in flags: clauses.append(f.node_flag(spec.name, flag_type, flag)) if spec.concrete: clauses.append(fn.concrete(spec.name)) if transitive: for dep in spec.traverse(root=False): clauses.extend(self.spec_clauses(dep, body, transitive=False)) return clauses
def build_version_dict(self, possible_pkgs, specs): 'Declare any versions in specs not declared in packages.' self.declared_versions = collections.defaultdict(list) self.possible_versions = collections.defaultdict(set) self.deprecated_versions = collections.defaultdict(set) packages_yaml = spack.config.get('packages') packages_yaml = _normalize_packages_yaml(packages_yaml) for pkg_name in possible_pkgs: pkg = spack.repo.get(pkg_name) def key_fn(item): (version, info) = item return (info.get('preferred', False), (not version.isdevelop()), version) for (idx, item) in enumerate(sorted(pkg.versions.items(), key=key_fn, reverse=True)): (v, version_info) = item self.possible_versions[pkg_name].add(v) self.declared_versions[pkg_name].append(DeclaredVersion(version=v, idx=idx, origin=version_provenance.package_py)) deprecated = version_info.get('deprecated', False) if deprecated: self.deprecated_versions[pkg_name].add(v) version_preferences = packages_yaml.get(pkg_name, {}).get('version', []) for (idx, v) in enumerate(version_preferences): self.declared_versions[pkg_name].append(DeclaredVersion(version=v, idx=idx, origin=version_provenance.packages_yaml)) for spec in specs: for dep in spec.traverse(): if dep.versions.concrete: self.declared_versions[dep.name].append(DeclaredVersion(version=dep.version, idx=0, origin=version_provenance.spec)) self.possible_versions[dep.name].add(dep.version)
-8,180,885,107,792,739,000
Declare any versions in specs not declared in packages.
lib/spack/spack/solver/asp.py
build_version_dict
AaltoSciComp/spack
python
def build_version_dict(self, possible_pkgs, specs): self.declared_versions = collections.defaultdict(list) self.possible_versions = collections.defaultdict(set) self.deprecated_versions = collections.defaultdict(set) packages_yaml = spack.config.get('packages') packages_yaml = _normalize_packages_yaml(packages_yaml) for pkg_name in possible_pkgs: pkg = spack.repo.get(pkg_name) def key_fn(item): (version, info) = item return (info.get('preferred', False), (not version.isdevelop()), version) for (idx, item) in enumerate(sorted(pkg.versions.items(), key=key_fn, reverse=True)): (v, version_info) = item self.possible_versions[pkg_name].add(v) self.declared_versions[pkg_name].append(DeclaredVersion(version=v, idx=idx, origin=version_provenance.package_py)) deprecated = version_info.get('deprecated', False) if deprecated: self.deprecated_versions[pkg_name].add(v) version_preferences = packages_yaml.get(pkg_name, {}).get('version', []) for (idx, v) in enumerate(version_preferences): self.declared_versions[pkg_name].append(DeclaredVersion(version=v, idx=idx, origin=version_provenance.packages_yaml)) for spec in specs: for dep in spec.traverse(): if dep.versions.concrete: self.declared_versions[dep.name].append(DeclaredVersion(version=dep.version, idx=0, origin=version_provenance.spec)) self.possible_versions[dep.name].add(dep.version)
def _supported_targets(self, compiler_name, compiler_version, targets): 'Get a list of which targets are supported by the compiler.\n\n Results are ordered most to least recent.\n ' supported = [] for target in targets: try: with warnings.catch_warnings(): warnings.simplefilter('ignore') target.optimization_flags(compiler_name, compiler_version) supported.append(target) except archspec.cpu.UnsupportedMicroarchitecture: continue except ValueError: continue return sorted(supported, reverse=True)
-1,959,975,852,145,534,000
Get a list of which targets are supported by the compiler. Results are ordered most to least recent.
lib/spack/spack/solver/asp.py
_supported_targets
AaltoSciComp/spack
python
def _supported_targets(self, compiler_name, compiler_version, targets): 'Get a list of which targets are supported by the compiler.\n\n Results are ordered most to least recent.\n ' supported = [] for target in targets: try: with warnings.catch_warnings(): warnings.simplefilter('ignore') target.optimization_flags(compiler_name, compiler_version) supported.append(target) except archspec.cpu.UnsupportedMicroarchitecture: continue except ValueError: continue return sorted(supported, reverse=True)
def target_defaults(self, specs): 'Add facts about targets and target compatibility.' self.gen.h2('Default target') platform = spack.architecture.platform() uarch = archspec.cpu.TARGETS.get(platform.default) self.gen.h2('Target compatibility') compatible_targets = ([uarch] + uarch.ancestors) additional_targets_in_family = sorted([t for t in archspec.cpu.TARGETS.values() if ((t.family.name == uarch.family.name) and (t not in compatible_targets))], key=(lambda x: len(x.ancestors)), reverse=True) compatible_targets += additional_targets_in_family compilers = self.possible_compilers best_targets = set([uarch.family.name]) for compiler in sorted(compilers): supported = self._supported_targets(compiler.name, compiler.version, compatible_targets) if (not supported): compiler_obj = spack.compilers.compilers_for_spec(compiler) compiler_obj = compiler_obj[0] supported = self._supported_targets(compiler.name, compiler_obj.real_version, compatible_targets) if (not supported): continue for target in supported: best_targets.add(target.name) self.gen.fact(fn.compiler_supports_target(compiler.name, compiler.version, target.name)) self.gen.fact(fn.compiler_supports_target(compiler.name, compiler.version, uarch.family.name)) for spec in specs: if ((not spec.architecture) or (not spec.architecture.target)): continue target = archspec.cpu.TARGETS.get(spec.target.name) if (not target): self.target_ranges(spec, None) continue if (target not in compatible_targets): compatible_targets.append(target) i = 0 for target in compatible_targets: self.gen.fact(fn.target(target.name)) self.gen.fact(fn.target_family(target.name, target.family.name)) for parent in sorted(target.parents): self.gen.fact(fn.target_parent(target.name, parent.name)) if (target.name in best_targets): self.gen.fact(fn.default_target_weight(target.name, i)) i += 1 else: self.gen.fact(fn.default_target_weight(target.name, 100)) self.gen.newline()
5,557,989,692,404,767,000
Add facts about targets and target compatibility.
lib/spack/spack/solver/asp.py
target_defaults
AaltoSciComp/spack
python
def target_defaults(self, specs): self.gen.h2('Default target') platform = spack.architecture.platform() uarch = archspec.cpu.TARGETS.get(platform.default) self.gen.h2('Target compatibility') compatible_targets = ([uarch] + uarch.ancestors) additional_targets_in_family = sorted([t for t in archspec.cpu.TARGETS.values() if ((t.family.name == uarch.family.name) and (t not in compatible_targets))], key=(lambda x: len(x.ancestors)), reverse=True) compatible_targets += additional_targets_in_family compilers = self.possible_compilers best_targets = set([uarch.family.name]) for compiler in sorted(compilers): supported = self._supported_targets(compiler.name, compiler.version, compatible_targets) if (not supported): compiler_obj = spack.compilers.compilers_for_spec(compiler) compiler_obj = compiler_obj[0] supported = self._supported_targets(compiler.name, compiler_obj.real_version, compatible_targets) if (not supported): continue for target in supported: best_targets.add(target.name) self.gen.fact(fn.compiler_supports_target(compiler.name, compiler.version, target.name)) self.gen.fact(fn.compiler_supports_target(compiler.name, compiler.version, uarch.family.name)) for spec in specs: if ((not spec.architecture) or (not spec.architecture.target)): continue target = archspec.cpu.TARGETS.get(spec.target.name) if (not target): self.target_ranges(spec, None) continue if (target not in compatible_targets): compatible_targets.append(target) i = 0 for target in compatible_targets: self.gen.fact(fn.target(target.name)) self.gen.fact(fn.target_family(target.name, target.family.name)) for parent in sorted(target.parents): self.gen.fact(fn.target_parent(target.name, parent.name)) if (target.name in best_targets): self.gen.fact(fn.default_target_weight(target.name, i)) i += 1 else: self.gen.fact(fn.default_target_weight(target.name, 100)) self.gen.newline()
def define_version_constraints(self): 'Define what version_satisfies(...) means in ASP logic.' for (pkg_name, versions) in sorted(self.version_constraints): allowed_versions = [v for v in sorted(self.possible_versions[pkg_name]) if v.satisfies(versions)] exact_match = [v for v in allowed_versions if (v == versions)] if exact_match: allowed_versions = exact_match for v in allowed_versions: self.gen.fact(fn.version_satisfies(pkg_name, versions, v)) self.gen.newline()
4,387,517,118,108,470,300
Define what version_satisfies(...) means in ASP logic.
lib/spack/spack/solver/asp.py
define_version_constraints
AaltoSciComp/spack
python
def define_version_constraints(self): for (pkg_name, versions) in sorted(self.version_constraints): allowed_versions = [v for v in sorted(self.possible_versions[pkg_name]) if v.satisfies(versions)] exact_match = [v for v in allowed_versions if (v == versions)] if exact_match: allowed_versions = exact_match for v in allowed_versions: self.gen.fact(fn.version_satisfies(pkg_name, versions, v)) self.gen.newline()
def define_virtual_constraints(self): 'Define versions for constraints on virtuals.\n\n Must be called before define_version_constraints().\n ' constraint_map = collections.defaultdict((lambda : set())) for (pkg_name, versions) in self.version_constraints: if (not spack.repo.path.is_virtual(pkg_name)): continue constraint_map[pkg_name].add(versions) def versions_for(v): if isinstance(v, spack.version.Version): return [v] elif isinstance(v, spack.version.VersionRange): result = ([v.start] if v.start else []) result += ([v.end] if v.end else []) return result elif isinstance(v, spack.version.VersionList): return sum((versions_for(e) for e in v), []) else: raise TypeError(('expected version type, found: %s' % type(v))) for (pkg_name, versions) in sorted(constraint_map.items()): possible_versions = set(sum([versions_for(v) for v in versions], [])) for version in sorted(possible_versions): self.possible_versions[pkg_name].add(version)
-3,161,284,530,170,989,600
Define versions for constraints on virtuals. Must be called before define_version_constraints().
lib/spack/spack/solver/asp.py
define_virtual_constraints
AaltoSciComp/spack
python
def define_virtual_constraints(self): 'Define versions for constraints on virtuals.\n\n Must be called before define_version_constraints().\n ' constraint_map = collections.defaultdict((lambda : set())) for (pkg_name, versions) in self.version_constraints: if (not spack.repo.path.is_virtual(pkg_name)): continue constraint_map[pkg_name].add(versions) def versions_for(v): if isinstance(v, spack.version.Version): return [v] elif isinstance(v, spack.version.VersionRange): result = ([v.start] if v.start else []) result += ([v.end] if v.end else []) return result elif isinstance(v, spack.version.VersionList): return sum((versions_for(e) for e in v), []) else: raise TypeError(('expected version type, found: %s' % type(v))) for (pkg_name, versions) in sorted(constraint_map.items()): possible_versions = set(sum([versions_for(v) for v in versions], [])) for version in sorted(possible_versions): self.possible_versions[pkg_name].add(version)
def define_variant_values(self): 'Validate variant values from the command line.\n\n Also add valid variant values from the command line to the\n possible values for a variant.\n\n ' for (pkg, variant, value) in sorted(self.variant_values_from_specs): self.gen.fact(fn.variant_possible_value(pkg, variant, value))
5,043,408,363,552,209,000
Validate variant values from the command line. Also add valid variant values from the command line to the possible values for a variant.
lib/spack/spack/solver/asp.py
define_variant_values
AaltoSciComp/spack
python
def define_variant_values(self): 'Validate variant values from the command line.\n\n Also add valid variant values from the command line to the\n possible values for a variant.\n\n ' for (pkg, variant, value) in sorted(self.variant_values_from_specs): self.gen.fact(fn.variant_possible_value(pkg, variant, value))
def setup(self, driver, specs, tests=False): 'Generate an ASP program with relevant constraints for specs.\n\n This calls methods on the solve driver to set up the problem with\n facts and rules from all possible dependencies of the input\n specs, as well as constraints from the specs themselves.\n\n Arguments:\n specs (list): list of Specs to solve\n ' self._condition_id_counter = itertools.count() check_packages_exist(specs) self.possible_virtuals = set((x.name for x in specs if x.virtual)) possible = spack.package.possible_dependencies(*specs, virtuals=self.possible_virtuals, deptype=spack.dependency.all_deptypes) pkgs = set(possible) self.gen = driver self.possible_compilers = self.generate_possible_compilers(specs) self.build_version_dict(possible, specs) self.gen.h1('General Constraints') self.available_compilers() self.compiler_defaults() self.compiler_supports_os() self.platform_defaults() self.os_defaults(specs) self.target_defaults(specs) self.virtual_providers() self.provider_defaults() self.external_packages() self.flag_defaults() self.gen.h1('Package Constraints') for pkg in sorted(pkgs): self.gen.h2(('Package rules: %s' % pkg)) self.pkg_rules(pkg, tests=tests) self.gen.h2(('Package preferences: %s' % pkg)) self.preferred_variants(pkg) self.preferred_targets(pkg) env = ev.active_environment() if env: for spec in sorted(specs): for dep in spec.traverse(): _develop_specs_from_env(dep, env) self.gen.h1('Spec Constraints') for spec in sorted(specs): self.gen.h2(('Spec: %s' % str(spec))) self.gen.fact((fn.virtual_root(spec.name) if spec.virtual else fn.root(spec.name))) for clause in self.spec_clauses(spec): self.gen.fact(clause) if (clause.name == 'variant_set'): self.gen.fact(fn.variant_default_value_from_cli(*clause.args)) self.gen.h1('Variant Values defined in specs') self.define_variant_values() self.gen.h1('Virtual Constraints') self.define_virtual_constraints() self.gen.h1('Version Constraints') self.define_version_constraints() self.gen.h1('Compiler Version Constraints') self.define_compiler_version_constraints() self.gen.h1('Target Constraints') self.define_target_constraints()
-8,143,368,605,306,560,000
Generate an ASP program with relevant constraints for specs. This calls methods on the solve driver to set up the problem with facts and rules from all possible dependencies of the input specs, as well as constraints from the specs themselves. Arguments: specs (list): list of Specs to solve
lib/spack/spack/solver/asp.py
setup
AaltoSciComp/spack
python
def setup(self, driver, specs, tests=False): 'Generate an ASP program with relevant constraints for specs.\n\n This calls methods on the solve driver to set up the problem with\n facts and rules from all possible dependencies of the input\n specs, as well as constraints from the specs themselves.\n\n Arguments:\n specs (list): list of Specs to solve\n ' self._condition_id_counter = itertools.count() check_packages_exist(specs) self.possible_virtuals = set((x.name for x in specs if x.virtual)) possible = spack.package.possible_dependencies(*specs, virtuals=self.possible_virtuals, deptype=spack.dependency.all_deptypes) pkgs = set(possible) self.gen = driver self.possible_compilers = self.generate_possible_compilers(specs) self.build_version_dict(possible, specs) self.gen.h1('General Constraints') self.available_compilers() self.compiler_defaults() self.compiler_supports_os() self.platform_defaults() self.os_defaults(specs) self.target_defaults(specs) self.virtual_providers() self.provider_defaults() self.external_packages() self.flag_defaults() self.gen.h1('Package Constraints') for pkg in sorted(pkgs): self.gen.h2(('Package rules: %s' % pkg)) self.pkg_rules(pkg, tests=tests) self.gen.h2(('Package preferences: %s' % pkg)) self.preferred_variants(pkg) self.preferred_targets(pkg) env = ev.active_environment() if env: for spec in sorted(specs): for dep in spec.traverse(): _develop_specs_from_env(dep, env) self.gen.h1('Spec Constraints') for spec in sorted(specs): self.gen.h2(('Spec: %s' % str(spec))) self.gen.fact((fn.virtual_root(spec.name) if spec.virtual else fn.root(spec.name))) for clause in self.spec_clauses(spec): self.gen.fact(clause) if (clause.name == 'variant_set'): self.gen.fact(fn.variant_default_value_from_cli(*clause.args)) self.gen.h1('Variant Values defined in specs') self.define_variant_values() self.gen.h1('Virtual Constraints') self.define_virtual_constraints() self.gen.h1('Version Constraints') self.define_version_constraints() self.gen.h1('Compiler Version Constraints') self.define_compiler_version_constraints() self.gen.h1('Target Constraints') self.define_target_constraints()
def external_spec_selected(self, pkg, idx): 'This means that the external spec and index idx\n has been selected for this package.\n ' packages_yaml = spack.config.get('packages') packages_yaml = _normalize_packages_yaml(packages_yaml) spec_info = packages_yaml[pkg]['externals'][int(idx)] self._specs[pkg].external_path = spec_info.get('prefix', None) self._specs[pkg].external_modules = spack.spec.Spec._format_module_list(spec_info.get('modules', None)) self._specs[pkg].extra_attributes = spec_info.get('extra_attributes', {})
-2,211,581,437,128,111,000
This means that the external spec and index idx has been selected for this package.
lib/spack/spack/solver/asp.py
external_spec_selected
AaltoSciComp/spack
python
def external_spec_selected(self, pkg, idx): 'This means that the external spec and index idx\n has been selected for this package.\n ' packages_yaml = spack.config.get('packages') packages_yaml = _normalize_packages_yaml(packages_yaml) spec_info = packages_yaml[pkg]['externals'][int(idx)] self._specs[pkg].external_path = spec_info.get('prefix', None) self._specs[pkg].external_modules = spack.spec.Spec._format_module_list(spec_info.get('modules', None)) self._specs[pkg].extra_attributes = spec_info.get('extra_attributes', {})
def reorder_flags(self): "Order compiler flags on specs in predefined order.\n\n We order flags so that any node's flags will take priority over\n those of its dependents. That is, the deepest node in the DAG's\n flags will appear last on the compile line, in the order they\n were specified.\n\n The solver determines wihch flags are on nodes; this routine\n imposes order afterwards.\n " compilers = dict(((c.spec, c) for c in all_compilers_in_config())) for pkg in self._flag_compiler_defaults: spec = self._specs[pkg] compiler_flags = compilers[spec.compiler].flags check_same_flags(spec.compiler_flags, compiler_flags) spec.compiler_flags.update(compiler_flags) cmd_specs = dict(((s.name, s) for spec in self._command_line_specs for s in spec.traverse())) for (pkg, sources) in self._flag_sources.items(): spec = self._specs[pkg] order = [s.name for s in spec.traverse(order='post', direction='parents')] sorted_sources = sorted(sources, key=(lambda s: order.index(s))) flags = collections.defaultdict((lambda : [])) for source_name in sorted_sources: source = cmd_specs[source_name] for (name, flag_list) in source.compiler_flags.items(): extend_flag_list(flags[name], flag_list) check_same_flags(spec.compiler_flags, flags) spec.compiler_flags.update(flags)
-8,122,179,783,565,153,000
Order compiler flags on specs in predefined order. We order flags so that any node's flags will take priority over those of its dependents. That is, the deepest node in the DAG's flags will appear last on the compile line, in the order they were specified. The solver determines wihch flags are on nodes; this routine imposes order afterwards.
lib/spack/spack/solver/asp.py
reorder_flags
AaltoSciComp/spack
python
def reorder_flags(self): "Order compiler flags on specs in predefined order.\n\n We order flags so that any node's flags will take priority over\n those of its dependents. That is, the deepest node in the DAG's\n flags will appear last on the compile line, in the order they\n were specified.\n\n The solver determines wihch flags are on nodes; this routine\n imposes order afterwards.\n " compilers = dict(((c.spec, c) for c in all_compilers_in_config())) for pkg in self._flag_compiler_defaults: spec = self._specs[pkg] compiler_flags = compilers[spec.compiler].flags check_same_flags(spec.compiler_flags, compiler_flags) spec.compiler_flags.update(compiler_flags) cmd_specs = dict(((s.name, s) for spec in self._command_line_specs for s in spec.traverse())) for (pkg, sources) in self._flag_sources.items(): spec = self._specs[pkg] order = [s.name for s in spec.traverse(order='post', direction='parents')] sorted_sources = sorted(sources, key=(lambda s: order.index(s))) flags = collections.defaultdict((lambda : [])) for source_name in sorted_sources: source = cmd_specs[source_name] for (name, flag_list) in source.compiler_flags.items(): extend_flag_list(flags[name], flag_list) check_same_flags(spec.compiler_flags, flags) spec.compiler_flags.update(flags)
def test_homepage(self): 'Make sure that homepage works fine' response = self.client.get(url_for('home_view')) assert (b'Add a feature request:' in response.data) assert (b'List feature requests:' in response.data)
-1,474,053,232,443,634,700
Make sure that homepage works fine
test_core.py
test_homepage
spapas/feature-requests
python
def test_homepage(self): response = self.client.get(url_for('home_view')) assert (b'Add a feature request:' in response.data) assert (b'List feature requests:' in response.data)
def test_empty_listpage(self): 'Make sure that empty list page works fine' response = self.client.get(url_for('home_view')) response = self.client.get(url_for('feature_requests_view')) assert (b'No feature requests found.' in response.data)
9,047,264,463,829,702,000
Make sure that empty list page works fine
test_core.py
test_empty_listpage
spapas/feature-requests
python
def test_empty_listpage(self): response = self.client.get(url_for('home_view')) response = self.client.get(url_for('feature_requests_view')) assert (b'No feature requests found.' in response.data)
def test_non_empty_listpage(self): 'Also that it can display multiple entries' fr = FeatureRequest(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None) db.session.add(fr) fr2 = FeatureRequest(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None) db.session.add(fr2) db.session.commit() response = self.client.get(url_for('feature_requests_view')) assert (response.data.count(b'Update') == 2) assert (response.data.count(b'Delete') == 2) assert (url_for('feature_requests_update', feature_request_id=1).encode() in response.data) assert (url_for('feature_requests_delete', feature_request_id=1).encode() in response.data)
2,469,986,225,949,389,300
Also that it can display multiple entries
test_core.py
test_non_empty_listpage
spapas/feature-requests
python
def test_non_empty_listpage(self): fr = FeatureRequest(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None) db.session.add(fr) fr2 = FeatureRequest(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None) db.session.add(fr2) db.session.commit() response = self.client.get(url_for('feature_requests_view')) assert (response.data.count(b'Update') == 2) assert (response.data.count(b'Delete') == 2) assert (url_for('feature_requests_update', feature_request_id=1).encode() in response.data) assert (url_for('feature_requests_delete', feature_request_id=1).encode() in response.data)
def test_createpage(self): 'Make sure that the create page works' response = self.client.get(url_for('feature_requests_create')) assert (b'Add Feature Request' in response.data) assert (b"<form method='POST'>" in response.data) assert (b'form-group has-error' not in response.data)
-5,400,908,960,627,150,000
Make sure that the create page works
test_core.py
test_createpage
spapas/feature-requests
python
def test_createpage(self): response = self.client.get(url_for('feature_requests_create')) assert (b'Add Feature Request' in response.data) assert (b"<form method='POST'>" in response.data) assert (b'form-group has-error' not in response.data)
def test_createpage_error(self): 'The create page should return with error when post data is missing' response = self.client.post(url_for('feature_requests_create'), data=dict(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None)) assert (b'form-group has-error' in response.data) assert (b"<form method='POST'>" in response.data) assert (response.status == '200 OK')
-6,914,650,206,671,616,000
The create page should return with error when post data is missing
test_core.py
test_createpage_error
spapas/feature-requests
python
def test_createpage_error(self): response = self.client.post(url_for('feature_requests_create'), data=dict(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None)) assert (b'form-group has-error' in response.data) assert (b"<form method='POST'>" in response.data) assert (response.status == '200 OK')
def test_createpage_success(self): 'The create page should return a 302 FOUND redirect when an entry is submitted' client = Client('C1') db.session.add(client) product_area = ProductArea('PA1') db.session.add(product_area) db.session.commit() response = self.client.post(url_for('feature_requests_create'), data=dict(title='Title', description='Desc', client=client.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=product_area.id)) assert (response.status == '302 FOUND')
-6,706,679,454,331,955,000
The create page should return a 302 FOUND redirect when an entry is submitted
test_core.py
test_createpage_success
spapas/feature-requests
python
def test_createpage_success(self): client = Client('C1') db.session.add(client) product_area = ProductArea('PA1') db.session.add(product_area) db.session.commit() response = self.client.post(url_for('feature_requests_create'), data=dict(title='Title', description='Desc', client=client.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=product_area.id)) assert (response.status == '302 FOUND')
def test_createpage_success_flash(self): 'The create page should display the proper flash message when an object is\n created' self.add_other_objects() response = self.client.post(url_for('feature_requests_create'), data=dict(title='Title', description='Desc', client=self.cl.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa.id), follow_redirects=True) assert (response.status == '200 OK') assert (b'Feature request created!' in response.data) assert (response.data.count(b'Update') == 1) assert (response.data.count(b'Delete') == 1) assert (self.cl.name.encode() in response.data) assert (self.pa.name.encode() in response.data)
-5,260,746,790,233,260,000
The create page should display the proper flash message when an object is created
test_core.py
test_createpage_success_flash
spapas/feature-requests
python
def test_createpage_success_flash(self): 'The create page should display the proper flash message when an object is\n created' self.add_other_objects() response = self.client.post(url_for('feature_requests_create'), data=dict(title='Title', description='Desc', client=self.cl.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa.id), follow_redirects=True) assert (response.status == '200 OK') assert (b'Feature request created!' in response.data) assert (response.data.count(b'Update') == 1) assert (response.data.count(b'Delete') == 1) assert (self.cl.name.encode() in response.data) assert (self.pa.name.encode() in response.data)
def test_createpage_change_priorities(self): 'The create page should change the priorities of the other objects when a\n new one has the same priority and client' self.add_other_objects() fr = FeatureRequest(title='Title', description='Desc', client=self.cl, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa) db.session.add(fr) db.session.commit() assert (FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 1) response = self.client.post(url_for('feature_requests_create'), data=dict(title='Title', description='Desc', client=self.cl.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa.id), follow_redirects=True) assert (response.status == '200 OK') assert (FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 2)
-4,539,755,280,102,366,000
The create page should change the priorities of the other objects when a new one has the same priority and client
test_core.py
test_createpage_change_priorities
spapas/feature-requests
python
def test_createpage_change_priorities(self): 'The create page should change the priorities of the other objects when a\n new one has the same priority and client' self.add_other_objects() fr = FeatureRequest(title='Title', description='Desc', client=self.cl, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa) db.session.add(fr) db.session.commit() assert (FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 1) response = self.client.post(url_for('feature_requests_create'), data=dict(title='Title', description='Desc', client=self.cl.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa.id), follow_redirects=True) assert (response.status == '200 OK') assert (FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 2)
def add_feature_request(self): 'A reusable method for this class' self.fr = FeatureRequest(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None) db.session.add(self.fr) db.session.commit()
5,368,258,466,379,493,000
A reusable method for this class
test_core.py
add_feature_request
spapas/feature-requests
python
def add_feature_request(self): self.fr = FeatureRequest(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None) db.session.add(self.fr) db.session.commit()
def test_updatepage_not_found(self): 'Make sure that the update page returs 404 when the obj is not found' response = self.client.get(url_for('feature_requests_update', feature_request_id=1232)) assert (response.status == '404 NOT FOUND')
3,947,901,401,632,254,500
Make sure that the update page returs 404 when the obj is not found
test_core.py
test_updatepage_not_found
spapas/feature-requests
python
def test_updatepage_not_found(self): response = self.client.get(url_for('feature_requests_update', feature_request_id=1232)) assert (response.status == '404 NOT FOUND')
def test_updatepage_ok(self): 'Make sure that the update page is displayed properly along with the object' self.add_feature_request() response = self.client.get(url_for('feature_requests_update', feature_request_id=self.fr.id)) assert ('Edit Feature Request: {0}'.format(self.fr.id).encode() in response.data) assert (b"<form method='POST'>" in response.data) assert (b'form-group has-error' not in response.data) assert (self.fr.title.encode() in response.data) assert (self.fr.description.encode() in response.data)
7,812,104,558,715,032,000
Make sure that the update page is displayed properly along with the object
test_core.py
test_updatepage_ok
spapas/feature-requests
python
def test_updatepage_ok(self): self.add_feature_request() response = self.client.get(url_for('feature_requests_update', feature_request_id=self.fr.id)) assert ('Edit Feature Request: {0}'.format(self.fr.id).encode() in response.data) assert (b"<form method='POST'>" in response.data) assert (b'form-group has-error' not in response.data) assert (self.fr.title.encode() in response.data) assert (self.fr.description.encode() in response.data)
def test_updatepage_error(self): 'The createpage should return an error when data is missing' self.add_feature_request() response = self.client.post(url_for('feature_requests_update', feature_request_id=self.fr.id), data=dict(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None)) assert (b'form-group has-error' in response.data) assert (b"<form method='POST'>" in response.data) assert (response.status == '200 OK')
5,283,285,016,547,896,000
The createpage should return an error when data is missing
test_core.py
test_updatepage_error
spapas/feature-requests
python
def test_updatepage_error(self): self.add_feature_request() response = self.client.post(url_for('feature_requests_update', feature_request_id=self.fr.id), data=dict(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None)) assert (b'form-group has-error' in response.data) assert (b"<form method='POST'>" in response.data) assert (response.status == '200 OK')
def test_createpage_success(self): 'The createpage should properly update the object' self.add_feature_request() self.add_other_objects() newtitle = 'The new title' response = self.client.post(url_for('feature_requests_update', feature_request_id=self.fr.id), data=dict(title=newtitle, description='Desc', client=self.cl.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa.id)) assert (response.status == '302 FOUND') assert (FeatureRequest.query.filter_by(id=self.fr.id).first().title == newtitle)
-5,652,872,008,606,689,000
The createpage should properly update the object
test_core.py
test_createpage_success
spapas/feature-requests
python
def test_createpage_success(self): self.add_feature_request() self.add_other_objects() newtitle = 'The new title' response = self.client.post(url_for('feature_requests_update', feature_request_id=self.fr.id), data=dict(title=newtitle, description='Desc', client=self.cl.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa.id)) assert (response.status == '302 FOUND') assert (FeatureRequest.query.filter_by(id=self.fr.id).first().title == newtitle)
def test_updatepage_success_flash(self): 'Make sure that the flash message is displayed correctly and we are\n redirected to the list view' self.add_feature_request() self.add_other_objects() response = self.client.post(url_for('feature_requests_update', feature_request_id=self.fr.id), data=dict(title='Title', description='Desc', client=self.cl.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa.id), follow_redirects=True) assert (response.status == '200 OK') assert (b'Feature request updated!' in response.data) assert (response.data.count(b'Update') == 1) assert (response.data.count(b'Delete') == 1) assert (self.cl.name.encode() in response.data) assert (self.pa.name.encode() in response.data)
6,706,171,406,238,756,000
Make sure that the flash message is displayed correctly and we are redirected to the list view
test_core.py
test_updatepage_success_flash
spapas/feature-requests
python
def test_updatepage_success_flash(self): 'Make sure that the flash message is displayed correctly and we are\n redirected to the list view' self.add_feature_request() self.add_other_objects() response = self.client.post(url_for('feature_requests_update', feature_request_id=self.fr.id), data=dict(title='Title', description='Desc', client=self.cl.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa.id), follow_redirects=True) assert (response.status == '200 OK') assert (b'Feature request updated!' in response.data) assert (response.data.count(b'Update') == 1) assert (response.data.count(b'Delete') == 1) assert (self.cl.name.encode() in response.data) assert (self.pa.name.encode() in response.data)
def test_updatepage_change_priorities(self): 'The updatepage should also update the client priorities' self.add_other_objects() fr = FeatureRequest(title='Title', description='Desc', client=self.cl, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa) db.session.add(fr) fr2 = FeatureRequest(title='Title', description='Desc', client=self.cl, client_priority=2, target_date=datetime.date(2018, 1, 1), product_area=self.pa) db.session.add(fr2) db.session.commit() assert (FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 1) assert (FeatureRequest.query.filter_by(id=fr2.id).first().client_priority == 2) response = self.client.post(url_for('feature_requests_update', feature_request_id=2), data=dict(title='Title', description='Desc', client=self.cl.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa.id), follow_redirects=True) assert (response.status == '200 OK') assert (FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 2) assert (FeatureRequest.query.filter_by(id=fr2.id).first().client_priority == 1)
5,855,504,665,284,687,000
The updatepage should also update the client priorities
test_core.py
test_updatepage_change_priorities
spapas/feature-requests
python
def test_updatepage_change_priorities(self): self.add_other_objects() fr = FeatureRequest(title='Title', description='Desc', client=self.cl, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa) db.session.add(fr) fr2 = FeatureRequest(title='Title', description='Desc', client=self.cl, client_priority=2, target_date=datetime.date(2018, 1, 1), product_area=self.pa) db.session.add(fr2) db.session.commit() assert (FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 1) assert (FeatureRequest.query.filter_by(id=fr2.id).first().client_priority == 2) response = self.client.post(url_for('feature_requests_update', feature_request_id=2), data=dict(title='Title', description='Desc', client=self.cl.id, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=self.pa.id), follow_redirects=True) assert (response.status == '200 OK') assert (FeatureRequest.query.filter_by(id=fr.id).first().client_priority == 2) assert (FeatureRequest.query.filter_by(id=fr2.id).first().client_priority == 1)
def add_feature_request(self): 'A reusable method for this class' self.fr = FeatureRequest(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None) db.session.add(self.fr) db.session.commit()
5,368,258,466,379,493,000
A reusable method for this class
test_core.py
add_feature_request
spapas/feature-requests
python
def add_feature_request(self): self.fr = FeatureRequest(title='Title', description='Desc', client=None, client_priority=1, target_date=datetime.date(2018, 1, 1), product_area=None) db.session.add(self.fr) db.session.commit()
def test_deletepdatepage_only_post(self): 'Make sure that the delete page returns 405 when requested with get' response = self.client.get(url_for('feature_requests_delete', feature_request_id=1232)) assert (response.status == '405 METHOD NOT ALLOWED')
3,604,485,937,408,843,300
Make sure that the delete page returns 405 when requested with get
test_core.py
test_deletepdatepage_only_post
spapas/feature-requests
python
def test_deletepdatepage_only_post(self): response = self.client.get(url_for('feature_requests_delete', feature_request_id=1232)) assert (response.status == '405 METHOD NOT ALLOWED')
def test_deletepdatepage_not_found(self): 'Make sure that the delete page returs 404 when the obj is not found' response = self.client.post(url_for('feature_requests_delete', feature_request_id=1232)) assert (response.status == '404 NOT FOUND')
6,247,266,416,346,871,000
Make sure that the delete page returs 404 when the obj is not found
test_core.py
test_deletepdatepage_not_found
spapas/feature-requests
python
def test_deletepdatepage_not_found(self): response = self.client.post(url_for('feature_requests_delete', feature_request_id=1232)) assert (response.status == '404 NOT FOUND')
def test_deletepage_ok(self): 'Make sure that the delete page deletes the obj' self.add_feature_request() assert (db.session.query(FeatureRequest.query.filter().exists()).scalar() is True) response = self.client.post(url_for('feature_requests_delete', feature_request_id=self.fr.id)) assert (db.session.query(FeatureRequest.query.filter().exists()).scalar() is False) assert (response.status == '302 FOUND')
-1,981,492,078,870,599,200
Make sure that the delete page deletes the obj
test_core.py
test_deletepage_ok
spapas/feature-requests
python
def test_deletepage_ok(self): self.add_feature_request() assert (db.session.query(FeatureRequest.query.filter().exists()).scalar() is True) response = self.client.post(url_for('feature_requests_delete', feature_request_id=self.fr.id)) assert (db.session.query(FeatureRequest.query.filter().exists()).scalar() is False) assert (response.status == '302 FOUND')
def test_deletepage_flash_message(self): 'Make sure that the delete page shows the proper flash message' self.add_feature_request() response = self.client.post(url_for('feature_requests_delete', feature_request_id=self.fr.id), follow_redirects=True) assert (response.status == '200 OK') assert (b'Feature request deleted!' in response.data) assert (response.data.count(b'Update') == 0) assert (response.data.count(b'Delete') == 0)
2,915,011,334,631,146,000
Make sure that the delete page shows the proper flash message
test_core.py
test_deletepage_flash_message
spapas/feature-requests
python
def test_deletepage_flash_message(self): self.add_feature_request() response = self.client.post(url_for('feature_requests_delete', feature_request_id=self.fr.id), follow_redirects=True) assert (response.status == '200 OK') assert (b'Feature request deleted!' in response.data) assert (response.data.count(b'Update') == 0) assert (response.data.count(b'Delete') == 0)
def __init__(self, parent, settings): '\n Constructs a WarningPopUp\n :param parent: Parent Frame\n :param settings: settings class \n ' self.settings = settings ttk.Frame.__init__(self, parent, relief='raised', borderwidth=2) self.content = ttk.Frame(self, borderwidth=2) self.content.pack(expand=True, fill=tk.X, side='top', anchor='n') self.devices = [] label1 = tk.Label(self.content, text='Apparaten', font=('Verdana', 14), relief='groove') label1.pack(expand=True, fill=tk.X, side='top') self.render_devices()
-4,057,271,277,834,414,600
Constructs a WarningPopUp :param parent: Parent Frame :param settings: settings class
dashboard/entities/Devices.py
__init__
Hexagoons/GUI-Arduino-Weather-Station
python
def __init__(self, parent, settings): '\n Constructs a WarningPopUp\n :param parent: Parent Frame\n :param settings: settings class \n ' self.settings = settings ttk.Frame.__init__(self, parent, relief='raised', borderwidth=2) self.content = ttk.Frame(self, borderwidth=2) self.content.pack(expand=True, fill=tk.X, side='top', anchor='n') self.devices = [] label1 = tk.Label(self.content, text='Apparaten', font=('Verdana', 14), relief='groove') label1.pack(expand=True, fill=tk.X, side='top') self.render_devices()
def decode_one_batch(params: AttributeDict, model: nn.Module, HLG: Optional[k2.Fsa], H: Optional[k2.Fsa], bpe_model: Optional[spm.SentencePieceProcessor], batch: dict, word_table: k2.SymbolTable, sos_id: int, eos_id: int, G: Optional[k2.Fsa]=None) -> Dict[(str, List[List[str]])]: 'Decode one batch and return the result in a dict. The dict has the\n following format:\n\n - key: It indicates the setting used for decoding. For example,\n if no rescoring is used, the key is the string `no_rescore`.\n If LM rescoring is used, the key is the string `lm_scale_xxx`,\n where `xxx` is the value of `lm_scale`. An example key is\n `lm_scale_0.7`\n - value: It contains the decoding result. `len(value)` equals to\n batch size. `value[i]` is the decoding result for the i-th\n utterance in the given batch.\n Args:\n params:\n It\'s the return value of :func:`get_params`.\n\n - params.method is "1best", it uses 1best decoding without LM rescoring.\n - params.method is "nbest", it uses nbest decoding without LM rescoring.\n - params.method is "nbest-rescoring", it uses nbest LM rescoring.\n - params.method is "whole-lattice-rescoring", it uses whole lattice LM\n rescoring.\n\n model:\n The neural model.\n HLG:\n The decoding graph. Used only when params.method is NOT ctc-decoding.\n H:\n The ctc topo. Used only when params.method is ctc-decoding.\n bpe_model:\n The BPE model. Used only when params.method is ctc-decoding.\n batch:\n It is the return value from iterating\n `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation\n for the format of the `batch`.\n word_table:\n The word symbol table.\n sos_id:\n The token ID of the SOS.\n eos_id:\n The token ID of the EOS.\n G:\n An LM. It is not None when params.method is "nbest-rescoring"\n or "whole-lattice-rescoring". In general, the G in HLG\n is a 3-gram LM, while this G is a 4-gram LM.\n Returns:\n Return the decoding result. See above description for the format of\n the returned dict.\n ' if (HLG is not None): device = HLG.device else: device = H.device feature = batch['inputs'] assert (feature.ndim == 3) feature = feature.to(device) supervisions = batch['supervisions'] (nnet_output, memory, memory_key_padding_mask) = model(feature, supervisions) supervision_segments = torch.stack((supervisions['sequence_idx'], (supervisions['start_frame'] // params.subsampling_factor), (supervisions['num_frames'] // params.subsampling_factor)), 1).to(torch.int32) if (H is None): assert (HLG is not None) decoding_graph = HLG else: assert (HLG is None) assert (bpe_model is not None) decoding_graph = H lattice = get_lattice(nnet_output=nnet_output, decoding_graph=decoding_graph, supervision_segments=supervision_segments, search_beam=params.search_beam, output_beam=params.output_beam, min_active_states=params.min_active_states, max_active_states=params.max_active_states, subsampling_factor=params.subsampling_factor) if (params.method == 'ctc-decoding'): best_path = one_best_decoding(lattice=lattice, use_double_scores=params.use_double_scores) token_ids = get_texts(best_path) hyps = bpe_model.decode(token_ids) hyps = [s.split() for s in hyps] key = 'ctc-decoding' return {key: hyps} if (params.method == 'nbest-oracle'): best_path = nbest_oracle(lattice=lattice, num_paths=params.num_paths, ref_texts=supervisions['text'], word_table=word_table, nbest_scale=params.nbest_scale, oov='<UNK>') hyps = get_texts(best_path) hyps = [[word_table[i] for i in ids] for ids in hyps] key = f'oracle_{params.num_paths}_nbest_scale_{params.nbest_scale}' return {key: hyps} if (params.method in ['1best', 'nbest']): if (params.method == '1best'): best_path = one_best_decoding(lattice=lattice, use_double_scores=params.use_double_scores) key = 'no_rescore' else: best_path = nbest_decoding(lattice=lattice, num_paths=params.num_paths, use_double_scores=params.use_double_scores, nbest_scale=params.nbest_scale) key = f'no_rescore-nbest-scale-{params.nbest_scale}-{params.num_paths}' hyps = get_texts(best_path) hyps = [[word_table[i] for i in ids] for ids in hyps] return {key: hyps} assert (params.method in ['nbest-rescoring', 'whole-lattice-rescoring', 'attention-decoder']) lm_scale_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] lm_scale_list += [0.8, 0.9, 1.0, 1.1, 1.2, 1.3] lm_scale_list += [1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] if (params.method == 'nbest-rescoring'): best_path_dict = rescore_with_n_best_list(lattice=lattice, G=G, num_paths=params.num_paths, lm_scale_list=lm_scale_list, nbest_scale=params.nbest_scale) elif (params.method == 'whole-lattice-rescoring'): best_path_dict = rescore_with_whole_lattice(lattice=lattice, G_with_epsilon_loops=G, lm_scale_list=lm_scale_list) elif (params.method == 'attention-decoder'): rescored_lattice = rescore_with_whole_lattice(lattice=lattice, G_with_epsilon_loops=G, lm_scale_list=None) best_path_dict = rescore_with_attention_decoder(lattice=rescored_lattice, num_paths=params.num_paths, model=model, memory=memory, memory_key_padding_mask=memory_key_padding_mask, sos_id=sos_id, eos_id=eos_id, nbest_scale=params.nbest_scale) else: assert False, f'Unsupported decoding method: {params.method}' ans = dict() if (best_path_dict is not None): for (lm_scale_str, best_path) in best_path_dict.items(): hyps = get_texts(best_path) hyps = [[word_table[i] for i in ids] for ids in hyps] ans[lm_scale_str] = hyps else: for lm_scale in lm_scale_list: ans['empty'] = [([] * lattice.shape[0])] return ans
-3,078,755,603,851,290,600
Decode one batch and return the result in a dict. The dict has the following format: - key: It indicates the setting used for decoding. For example, if no rescoring is used, the key is the string `no_rescore`. If LM rescoring is used, the key is the string `lm_scale_xxx`, where `xxx` is the value of `lm_scale`. An example key is `lm_scale_0.7` - value: It contains the decoding result. `len(value)` equals to batch size. `value[i]` is the decoding result for the i-th utterance in the given batch. Args: params: It's the return value of :func:`get_params`. - params.method is "1best", it uses 1best decoding without LM rescoring. - params.method is "nbest", it uses nbest decoding without LM rescoring. - params.method is "nbest-rescoring", it uses nbest LM rescoring. - params.method is "whole-lattice-rescoring", it uses whole lattice LM rescoring. model: The neural model. HLG: The decoding graph. Used only when params.method is NOT ctc-decoding. H: The ctc topo. Used only when params.method is ctc-decoding. bpe_model: The BPE model. Used only when params.method is ctc-decoding. batch: It is the return value from iterating `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation for the format of the `batch`. word_table: The word symbol table. sos_id: The token ID of the SOS. eos_id: The token ID of the EOS. G: An LM. It is not None when params.method is "nbest-rescoring" or "whole-lattice-rescoring". In general, the G in HLG is a 3-gram LM, while this G is a 4-gram LM. Returns: Return the decoding result. See above description for the format of the returned dict.
egs/librispeech/ASR/conformer_mmi/decode.py
decode_one_batch
aarora8/icefall
python
def decode_one_batch(params: AttributeDict, model: nn.Module, HLG: Optional[k2.Fsa], H: Optional[k2.Fsa], bpe_model: Optional[spm.SentencePieceProcessor], batch: dict, word_table: k2.SymbolTable, sos_id: int, eos_id: int, G: Optional[k2.Fsa]=None) -> Dict[(str, List[List[str]])]: 'Decode one batch and return the result in a dict. The dict has the\n following format:\n\n - key: It indicates the setting used for decoding. For example,\n if no rescoring is used, the key is the string `no_rescore`.\n If LM rescoring is used, the key is the string `lm_scale_xxx`,\n where `xxx` is the value of `lm_scale`. An example key is\n `lm_scale_0.7`\n - value: It contains the decoding result. `len(value)` equals to\n batch size. `value[i]` is the decoding result for the i-th\n utterance in the given batch.\n Args:\n params:\n It\'s the return value of :func:`get_params`.\n\n - params.method is "1best", it uses 1best decoding without LM rescoring.\n - params.method is "nbest", it uses nbest decoding without LM rescoring.\n - params.method is "nbest-rescoring", it uses nbest LM rescoring.\n - params.method is "whole-lattice-rescoring", it uses whole lattice LM\n rescoring.\n\n model:\n The neural model.\n HLG:\n The decoding graph. Used only when params.method is NOT ctc-decoding.\n H:\n The ctc topo. Used only when params.method is ctc-decoding.\n bpe_model:\n The BPE model. Used only when params.method is ctc-decoding.\n batch:\n It is the return value from iterating\n `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation\n for the format of the `batch`.\n word_table:\n The word symbol table.\n sos_id:\n The token ID of the SOS.\n eos_id:\n The token ID of the EOS.\n G:\n An LM. It is not None when params.method is "nbest-rescoring"\n or "whole-lattice-rescoring". In general, the G in HLG\n is a 3-gram LM, while this G is a 4-gram LM.\n Returns:\n Return the decoding result. See above description for the format of\n the returned dict.\n ' if (HLG is not None): device = HLG.device else: device = H.device feature = batch['inputs'] assert (feature.ndim == 3) feature = feature.to(device) supervisions = batch['supervisions'] (nnet_output, memory, memory_key_padding_mask) = model(feature, supervisions) supervision_segments = torch.stack((supervisions['sequence_idx'], (supervisions['start_frame'] // params.subsampling_factor), (supervisions['num_frames'] // params.subsampling_factor)), 1).to(torch.int32) if (H is None): assert (HLG is not None) decoding_graph = HLG else: assert (HLG is None) assert (bpe_model is not None) decoding_graph = H lattice = get_lattice(nnet_output=nnet_output, decoding_graph=decoding_graph, supervision_segments=supervision_segments, search_beam=params.search_beam, output_beam=params.output_beam, min_active_states=params.min_active_states, max_active_states=params.max_active_states, subsampling_factor=params.subsampling_factor) if (params.method == 'ctc-decoding'): best_path = one_best_decoding(lattice=lattice, use_double_scores=params.use_double_scores) token_ids = get_texts(best_path) hyps = bpe_model.decode(token_ids) hyps = [s.split() for s in hyps] key = 'ctc-decoding' return {key: hyps} if (params.method == 'nbest-oracle'): best_path = nbest_oracle(lattice=lattice, num_paths=params.num_paths, ref_texts=supervisions['text'], word_table=word_table, nbest_scale=params.nbest_scale, oov='<UNK>') hyps = get_texts(best_path) hyps = [[word_table[i] for i in ids] for ids in hyps] key = f'oracle_{params.num_paths}_nbest_scale_{params.nbest_scale}' return {key: hyps} if (params.method in ['1best', 'nbest']): if (params.method == '1best'): best_path = one_best_decoding(lattice=lattice, use_double_scores=params.use_double_scores) key = 'no_rescore' else: best_path = nbest_decoding(lattice=lattice, num_paths=params.num_paths, use_double_scores=params.use_double_scores, nbest_scale=params.nbest_scale) key = f'no_rescore-nbest-scale-{params.nbest_scale}-{params.num_paths}' hyps = get_texts(best_path) hyps = [[word_table[i] for i in ids] for ids in hyps] return {key: hyps} assert (params.method in ['nbest-rescoring', 'whole-lattice-rescoring', 'attention-decoder']) lm_scale_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] lm_scale_list += [0.8, 0.9, 1.0, 1.1, 1.2, 1.3] lm_scale_list += [1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] if (params.method == 'nbest-rescoring'): best_path_dict = rescore_with_n_best_list(lattice=lattice, G=G, num_paths=params.num_paths, lm_scale_list=lm_scale_list, nbest_scale=params.nbest_scale) elif (params.method == 'whole-lattice-rescoring'): best_path_dict = rescore_with_whole_lattice(lattice=lattice, G_with_epsilon_loops=G, lm_scale_list=lm_scale_list) elif (params.method == 'attention-decoder'): rescored_lattice = rescore_with_whole_lattice(lattice=lattice, G_with_epsilon_loops=G, lm_scale_list=None) best_path_dict = rescore_with_attention_decoder(lattice=rescored_lattice, num_paths=params.num_paths, model=model, memory=memory, memory_key_padding_mask=memory_key_padding_mask, sos_id=sos_id, eos_id=eos_id, nbest_scale=params.nbest_scale) else: assert False, f'Unsupported decoding method: {params.method}' ans = dict() if (best_path_dict is not None): for (lm_scale_str, best_path) in best_path_dict.items(): hyps = get_texts(best_path) hyps = [[word_table[i] for i in ids] for ids in hyps] ans[lm_scale_str] = hyps else: for lm_scale in lm_scale_list: ans['empty'] = [([] * lattice.shape[0])] return ans
def decode_dataset(dl: torch.utils.data.DataLoader, params: AttributeDict, model: nn.Module, HLG: Optional[k2.Fsa], H: Optional[k2.Fsa], bpe_model: Optional[spm.SentencePieceProcessor], word_table: k2.SymbolTable, sos_id: int, eos_id: int, G: Optional[k2.Fsa]=None) -> Dict[(str, List[Tuple[(List[str], List[str])]])]: 'Decode dataset.\n\n Args:\n dl:\n PyTorch\'s dataloader containing the dataset to decode.\n params:\n It is returned by :func:`get_params`.\n model:\n The neural model.\n HLG:\n The decoding graph. Used only when params.method is NOT ctc-decoding.\n H:\n The ctc topo. Used only when params.method is ctc-decoding.\n bpe_model:\n The BPE model. Used only when params.method is ctc-decoding.\n word_table:\n It is the word symbol table.\n sos_id:\n The token ID for SOS.\n eos_id:\n The token ID for EOS.\n G:\n An LM. It is not None when params.method is "nbest-rescoring"\n or "whole-lattice-rescoring". In general, the G in HLG\n is a 3-gram LM, while this G is a 4-gram LM.\n Returns:\n Return a dict, whose key may be "no-rescore" if no LM rescoring\n is used, or it may be "lm_scale_0.7" if LM rescoring is used.\n Its value is a list of tuples. Each tuple contains two elements:\n The first is the reference transcript, and the second is the\n predicted result.\n ' results = [] num_cuts = 0 try: num_batches = len(dl) except TypeError: num_batches = '?' results = defaultdict(list) for (batch_idx, batch) in enumerate(dl): texts = batch['supervisions']['text'] hyps_dict = decode_one_batch(params=params, model=model, HLG=HLG, H=H, bpe_model=bpe_model, batch=batch, word_table=word_table, G=G, sos_id=sos_id, eos_id=eos_id) for (lm_scale, hyps) in hyps_dict.items(): this_batch = [] assert (len(hyps) == len(texts)) for (hyp_words, ref_text) in zip(hyps, texts): ref_words = ref_text.split() this_batch.append((ref_words, hyp_words)) results[lm_scale].extend(this_batch) num_cuts += len(batch['supervisions']['text']) if ((batch_idx % 100) == 0): batch_str = f'{batch_idx}/{num_batches}' logging.info(f'batch {batch_str}, cuts processed until now is {num_cuts}') return results
-7,345,658,783,875,339,000
Decode dataset. Args: dl: PyTorch's dataloader containing the dataset to decode. params: It is returned by :func:`get_params`. model: The neural model. HLG: The decoding graph. Used only when params.method is NOT ctc-decoding. H: The ctc topo. Used only when params.method is ctc-decoding. bpe_model: The BPE model. Used only when params.method is ctc-decoding. word_table: It is the word symbol table. sos_id: The token ID for SOS. eos_id: The token ID for EOS. G: An LM. It is not None when params.method is "nbest-rescoring" or "whole-lattice-rescoring". In general, the G in HLG is a 3-gram LM, while this G is a 4-gram LM. Returns: Return a dict, whose key may be "no-rescore" if no LM rescoring is used, or it may be "lm_scale_0.7" if LM rescoring is used. Its value is a list of tuples. Each tuple contains two elements: The first is the reference transcript, and the second is the predicted result.
egs/librispeech/ASR/conformer_mmi/decode.py
decode_dataset
aarora8/icefall
python
def decode_dataset(dl: torch.utils.data.DataLoader, params: AttributeDict, model: nn.Module, HLG: Optional[k2.Fsa], H: Optional[k2.Fsa], bpe_model: Optional[spm.SentencePieceProcessor], word_table: k2.SymbolTable, sos_id: int, eos_id: int, G: Optional[k2.Fsa]=None) -> Dict[(str, List[Tuple[(List[str], List[str])]])]: 'Decode dataset.\n\n Args:\n dl:\n PyTorch\'s dataloader containing the dataset to decode.\n params:\n It is returned by :func:`get_params`.\n model:\n The neural model.\n HLG:\n The decoding graph. Used only when params.method is NOT ctc-decoding.\n H:\n The ctc topo. Used only when params.method is ctc-decoding.\n bpe_model:\n The BPE model. Used only when params.method is ctc-decoding.\n word_table:\n It is the word symbol table.\n sos_id:\n The token ID for SOS.\n eos_id:\n The token ID for EOS.\n G:\n An LM. It is not None when params.method is "nbest-rescoring"\n or "whole-lattice-rescoring". In general, the G in HLG\n is a 3-gram LM, while this G is a 4-gram LM.\n Returns:\n Return a dict, whose key may be "no-rescore" if no LM rescoring\n is used, or it may be "lm_scale_0.7" if LM rescoring is used.\n Its value is a list of tuples. Each tuple contains two elements:\n The first is the reference transcript, and the second is the\n predicted result.\n ' results = [] num_cuts = 0 try: num_batches = len(dl) except TypeError: num_batches = '?' results = defaultdict(list) for (batch_idx, batch) in enumerate(dl): texts = batch['supervisions']['text'] hyps_dict = decode_one_batch(params=params, model=model, HLG=HLG, H=H, bpe_model=bpe_model, batch=batch, word_table=word_table, G=G, sos_id=sos_id, eos_id=eos_id) for (lm_scale, hyps) in hyps_dict.items(): this_batch = [] assert (len(hyps) == len(texts)) for (hyp_words, ref_text) in zip(hyps, texts): ref_words = ref_text.split() this_batch.append((ref_words, hyp_words)) results[lm_scale].extend(this_batch) num_cuts += len(batch['supervisions']['text']) if ((batch_idx % 100) == 0): batch_str = f'{batch_idx}/{num_batches}' logging.info(f'batch {batch_str}, cuts processed until now is {num_cuts}') return results
@staticmethod def createSetsFromStrings(structureString='', valueString=None): '\n Construct a list of GeneSet objects based on the given strings.\n The first string, structureString, determines the number of GeneSets to create.\n An empty string will return an empty list.\n A string containing anything else will return one or more subsets.\n The number of subsets is determined by the number of times the separator string " or " occurs in the\n structureString. For example, "(setA) or (setB) or (setC) or geneD or geneE or (setF)" will create\n six subsets. "(setA)", "(setC)", "geneD", etc are substrings that declare the contents of each set.\n There are two accepted ways to format the substrings:\n Method #1:\n substrings example: "aName=[aLow:aBest:aHigh] and bName=[bLow:bBest:bHigh] and cName=value and dName"\n valueString=None\n In this example, four NamedRangedNumber objects will be created in total:\n aName and bName specify rangedNumber values for NamedRangedNumber, cName specifies just one floating point\n number that is converted to a rangedNumber, and dName crates a NamedRangedNumber with the value set to None.\n valueString can be left out entirely.\n Method #2:\n substrings example: "aName and dName and cName and qName"\n valueString example: "aName=[aLow:aBest:aHigh] bName=[bLow:bBest:bHigh] cName=value dName=value fName=value"\n In this example, four NamedRangedNumber objects will be created, but only two of them will be assigned values\n (the other two will have values of None). This happens because the structureString declares what items are in\n the set, while the valueString only assigns values. If a value is given in the second string for a name that is\n not listed in the first, that value is ignored. No item is created for it.\n While it is possible to supply a mixture of methods 1 and 2, it is not recommended practice. Values assigned via\n method 2 take precedence over values assigned via method 1, even if the value assigned is "=None".\n Note that Gene objects are re-used from one set to the next. That is, if the same name is mentioned in two\n different substrings, only one Gene object will be created but it will be placed in two subsets.\n ' givenValues = {} if (valueString is not None): pairs = valueString.split() for pair in pairs: parts = pair.split('=') name = parts[0] if parts[1:2]: givenValues[name] = parts[1] subSets = [] structureString = structureString.strip() if (structureString != ''): collectionStrings = re.split('\\s+or\\s+', structureString) for collectionStr in collectionStrings: items = [] collectionStr = collectionStr.replace('(', ' ').replace(')', ' ').strip() itemStrings = re.split('\\s+and\\s+', collectionStr) for itemString in itemStrings: item = Gene.fromString(itemString) if (item.canonicalName in givenValues): item.set(givenValues[item.canonicalName]) items.append(item) if items: subSets.append(GeneSet(items)) return subSets
-8,194,630,866,699,173,000
Construct a list of GeneSet objects based on the given strings. The first string, structureString, determines the number of GeneSets to create. An empty string will return an empty list. A string containing anything else will return one or more subsets. The number of subsets is determined by the number of times the separator string " or " occurs in the structureString. For example, "(setA) or (setB) or (setC) or geneD or geneE or (setF)" will create six subsets. "(setA)", "(setC)", "geneD", etc are substrings that declare the contents of each set. There are two accepted ways to format the substrings: Method #1: substrings example: "aName=[aLow:aBest:aHigh] and bName=[bLow:bBest:bHigh] and cName=value and dName" valueString=None In this example, four NamedRangedNumber objects will be created in total: aName and bName specify rangedNumber values for NamedRangedNumber, cName specifies just one floating point number that is converted to a rangedNumber, and dName crates a NamedRangedNumber with the value set to None. valueString can be left out entirely. Method #2: substrings example: "aName and dName and cName and qName" valueString example: "aName=[aLow:aBest:aHigh] bName=[bLow:bBest:bHigh] cName=value dName=value fName=value" In this example, four NamedRangedNumber objects will be created, but only two of them will be assigned values (the other two will have values of None). This happens because the structureString declares what items are in the set, while the valueString only assigns values. If a value is given in the second string for a name that is not listed in the first, that value is ignored. No item is created for it. While it is possible to supply a mixture of methods 1 and 2, it is not recommended practice. Values assigned via method 2 take precedence over values assigned via method 1, even if the value assigned is "=None". Note that Gene objects are re-used from one set to the next. That is, if the same name is mentioned in two different substrings, only one Gene object will be created but it will be placed in two subsets.
code/core/Genes.py
createSetsFromStrings
somtirtharoy/jqmm
python
@staticmethod def createSetsFromStrings(structureString=, valueString=None): '\n Construct a list of GeneSet objects based on the given strings.\n The first string, structureString, determines the number of GeneSets to create.\n An empty string will return an empty list.\n A string containing anything else will return one or more subsets.\n The number of subsets is determined by the number of times the separator string " or " occurs in the\n structureString. For example, "(setA) or (setB) or (setC) or geneD or geneE or (setF)" will create\n six subsets. "(setA)", "(setC)", "geneD", etc are substrings that declare the contents of each set.\n There are two accepted ways to format the substrings:\n Method #1:\n substrings example: "aName=[aLow:aBest:aHigh] and bName=[bLow:bBest:bHigh] and cName=value and dName"\n valueString=None\n In this example, four NamedRangedNumber objects will be created in total:\n aName and bName specify rangedNumber values for NamedRangedNumber, cName specifies just one floating point\n number that is converted to a rangedNumber, and dName crates a NamedRangedNumber with the value set to None.\n valueString can be left out entirely.\n Method #2:\n substrings example: "aName and dName and cName and qName"\n valueString example: "aName=[aLow:aBest:aHigh] bName=[bLow:bBest:bHigh] cName=value dName=value fName=value"\n In this example, four NamedRangedNumber objects will be created, but only two of them will be assigned values\n (the other two will have values of None). This happens because the structureString declares what items are in\n the set, while the valueString only assigns values. If a value is given in the second string for a name that is\n not listed in the first, that value is ignored. No item is created for it.\n While it is possible to supply a mixture of methods 1 and 2, it is not recommended practice. Values assigned via\n method 2 take precedence over values assigned via method 1, even if the value assigned is "=None".\n Note that Gene objects are re-used from one set to the next. That is, if the same name is mentioned in two\n different substrings, only one Gene object will be created but it will be placed in two subsets.\n ' givenValues = {} if (valueString is not None): pairs = valueString.split() for pair in pairs: parts = pair.split('=') name = parts[0] if parts[1:2]: givenValues[name] = parts[1] subSets = [] structureString = structureString.strip() if (structureString != ): collectionStrings = re.split('\\s+or\\s+', structureString) for collectionStr in collectionStrings: items = [] collectionStr = collectionStr.replace('(', ' ').replace(')', ' ').strip() itemStrings = re.split('\\s+and\\s+', collectionStr) for itemString in itemStrings: item = Gene.fromString(itemString) if (item.canonicalName in givenValues): item.set(givenValues[item.canonicalName]) items.append(item) if items: subSets.append(GeneSet(items)) return subSets
def _get_wrapped_function_from_comp(comp, must_pin_function_to_cpu, param_type, device): 'Extracts the TensorFlow function from serialized computation.\n\n Args:\n comp: An instance of `pb.Computation`.\n must_pin_function_to_cpu: A boolean flag to indicate if the computation is\n forced to be on CPUs.\n param_type: A `tff.Type` instance or None.\n device: A `tf.config.LogicalDevice` or None.\n\n Returns:\n A TensorFlow ConcreteFunction.\n ' def function_to_wrap(): 'No-arg function to import graph def.\n\n We pass a no-arg function to `tf.compat.v1.wrap_function` to avoid\n the leftover placeholders that can result from binding arguments to the\n imported graphdef via `input_map`. The correct signature will be added to\n this function later, via the `prune` call below.\n\n Returns:\n Result of importing graphdef backing `comp`.\n ' graph_def = serialization_utils.unpack_graph_def(comp.tensorflow.graph_def) init_op = comp.tensorflow.initialize_op if init_op: graph_def = tensorflow_utils.add_control_deps_for_init_op(graph_def, init_op) def _import_fn(): return tf.import_graph_def(graph_merge.uniquify_shared_names(graph_def), name='') if must_pin_function_to_cpu: with tf.device('cpu'): return _import_fn() elif (device is not None): with tf.device(device.name): return _import_fn() else: return _import_fn() wrapped_noarg_fn = tf.compat.v1.wrap_function(function_to_wrap, signature=[]) if (param_type is not None): input_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(comp.tensorflow.parameter) else: input_tensor_names = [] output_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(comp.tensorflow.result) import_graph = wrapped_noarg_fn.graph try: wrapped_fn = wrapped_noarg_fn.prune(feeds=tf.nest.map_structure(import_graph.as_graph_element, input_tensor_names), fetches=tf.nest.map_structure(import_graph.as_graph_element, output_tensor_names)) except KeyError as e: raise TypeError('Caught exception trying to prune graph `{g}` with feeds {feeds} and fetches {fetches}. This indicates that these names may not refer to tensors in the graph. .\nException: {e}'.format(g=import_graph, feeds=input_tensor_names, fetches=output_tensor_names, e=e)) return wrapped_fn
8,839,281,469,228,193,000
Extracts the TensorFlow function from serialized computation. Args: comp: An instance of `pb.Computation`. must_pin_function_to_cpu: A boolean flag to indicate if the computation is forced to be on CPUs. param_type: A `tff.Type` instance or None. device: A `tf.config.LogicalDevice` or None. Returns: A TensorFlow ConcreteFunction.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
_get_wrapped_function_from_comp
ddayzzz/federated
python
def _get_wrapped_function_from_comp(comp, must_pin_function_to_cpu, param_type, device): 'Extracts the TensorFlow function from serialized computation.\n\n Args:\n comp: An instance of `pb.Computation`.\n must_pin_function_to_cpu: A boolean flag to indicate if the computation is\n forced to be on CPUs.\n param_type: A `tff.Type` instance or None.\n device: A `tf.config.LogicalDevice` or None.\n\n Returns:\n A TensorFlow ConcreteFunction.\n ' def function_to_wrap(): 'No-arg function to import graph def.\n\n We pass a no-arg function to `tf.compat.v1.wrap_function` to avoid\n the leftover placeholders that can result from binding arguments to the\n imported graphdef via `input_map`. The correct signature will be added to\n this function later, via the `prune` call below.\n\n Returns:\n Result of importing graphdef backing `comp`.\n ' graph_def = serialization_utils.unpack_graph_def(comp.tensorflow.graph_def) init_op = comp.tensorflow.initialize_op if init_op: graph_def = tensorflow_utils.add_control_deps_for_init_op(graph_def, init_op) def _import_fn(): return tf.import_graph_def(graph_merge.uniquify_shared_names(graph_def), name=) if must_pin_function_to_cpu: with tf.device('cpu'): return _import_fn() elif (device is not None): with tf.device(device.name): return _import_fn() else: return _import_fn() wrapped_noarg_fn = tf.compat.v1.wrap_function(function_to_wrap, signature=[]) if (param_type is not None): input_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(comp.tensorflow.parameter) else: input_tensor_names = [] output_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(comp.tensorflow.result) import_graph = wrapped_noarg_fn.graph try: wrapped_fn = wrapped_noarg_fn.prune(feeds=tf.nest.map_structure(import_graph.as_graph_element, input_tensor_names), fetches=tf.nest.map_structure(import_graph.as_graph_element, output_tensor_names)) except KeyError as e: raise TypeError('Caught exception trying to prune graph `{g}` with feeds {feeds} and fetches {fetches}. This indicates that these names may not refer to tensors in the graph. .\nException: {e}'.format(g=import_graph, feeds=input_tensor_names, fetches=output_tensor_names, e=e)) return wrapped_fn
def embed_tensorflow_computation(comp, type_spec=None, device=None): 'Embeds a TensorFlow computation for use in the eager context.\n\n Args:\n comp: An instance of `pb.Computation`.\n type_spec: An optional `tff.Type` instance or something convertible to it.\n device: An optional `tf.config.LogicalDevice`.\n\n Returns:\n Either a one-argument or a zero-argument callable that executes the\n computation in eager mode.\n\n Raises:\n TypeError: If arguments are of the wrong types, e.g., in `comp` is not a\n TensorFlow computation.\n ' py_typecheck.check_type(comp, pb.Computation) comp_type = type_serialization.deserialize_type(comp.type) type_spec = computation_types.to_type(type_spec) if (type_spec is not None): if (not type_spec.is_equivalent_to(comp_type)): raise TypeError('Expected a computation of type {}, got {}.'.format(type_spec, comp_type)) else: type_spec = comp_type must_pin_function_to_cpu = type_analysis.contains(type_spec.result, (lambda t: t.is_sequence())) which_computation = comp.WhichOneof('computation') if (which_computation != 'tensorflow'): unexpected_building_block = building_blocks.ComputationBuildingBlock.from_proto(comp) raise TypeError('Expected a TensorFlow computation, found {}.'.format(unexpected_building_block)) if type_spec.is_function(): param_type = type_spec.parameter result_type = type_spec.result else: param_type = None result_type = type_spec wrapped_fn = _get_wrapped_function_from_comp(comp, must_pin_function_to_cpu, param_type, device) param_fns = [] if (param_type is not None): for spec in structure.flatten(type_spec.parameter): if spec.is_tensor(): param_fns.append((lambda x: x)) else: py_typecheck.check_type(spec, computation_types.SequenceType) param_fns.append(tf.data.experimental.to_variant) result_fns = [] for spec in structure.flatten(result_type): if spec.is_tensor(): result_fns.append((lambda x: x)) else: py_typecheck.check_type(spec, computation_types.SequenceType) tf_structure = type_conversions.type_to_tf_structure(spec.element) def fn(x, tf_structure=tf_structure): return tf.data.experimental.from_variant(x, tf_structure) result_fns.append(fn) def _fn_to_return(arg, param_fns, wrapped_fn): param_elements = [] if (arg is not None): arg_parts = structure.flatten(arg) if (len(arg_parts) != len(param_fns)): raise RuntimeError('Expected {} arguments, found {}.'.format(len(param_fns), len(arg_parts))) for (arg_part, param_fn) in zip(arg_parts, param_fns): param_elements.append(param_fn(arg_part)) result_parts = wrapped_fn(*param_elements) resources = [] for op in wrapped_fn.graph.get_operations(): if (op.type == 'VarHandleOp'): resources += op.outputs if resources: for resource in wrapped_fn.prune(feeds={}, fetches=resources)(): tf.raw_ops.DestroyResourceOp(resource=resource) result_elements = [] for (result_part, result_fn) in zip(result_parts, result_fns): result_elements.append(result_fn(result_part)) return structure.pack_sequence_as(result_type, result_elements) fn_to_return = (lambda arg, p=param_fns, w=wrapped_fn: _fn_to_return(arg, p, w)) if must_pin_function_to_cpu: old_fn_to_return = fn_to_return def fn_to_return(x): with tf.device('cpu'): return old_fn_to_return(x) elif (device is not None): old_fn_to_return = fn_to_return def fn_to_return(x): with tf.device(device.name): return old_fn_to_return(x) if (param_type is not None): return (lambda arg: fn_to_return(arg)) else: return (lambda : fn_to_return(None))
5,325,071,796,585,580,000
Embeds a TensorFlow computation for use in the eager context. Args: comp: An instance of `pb.Computation`. type_spec: An optional `tff.Type` instance or something convertible to it. device: An optional `tf.config.LogicalDevice`. Returns: Either a one-argument or a zero-argument callable that executes the computation in eager mode. Raises: TypeError: If arguments are of the wrong types, e.g., in `comp` is not a TensorFlow computation.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
embed_tensorflow_computation
ddayzzz/federated
python
def embed_tensorflow_computation(comp, type_spec=None, device=None): 'Embeds a TensorFlow computation for use in the eager context.\n\n Args:\n comp: An instance of `pb.Computation`.\n type_spec: An optional `tff.Type` instance or something convertible to it.\n device: An optional `tf.config.LogicalDevice`.\n\n Returns:\n Either a one-argument or a zero-argument callable that executes the\n computation in eager mode.\n\n Raises:\n TypeError: If arguments are of the wrong types, e.g., in `comp` is not a\n TensorFlow computation.\n ' py_typecheck.check_type(comp, pb.Computation) comp_type = type_serialization.deserialize_type(comp.type) type_spec = computation_types.to_type(type_spec) if (type_spec is not None): if (not type_spec.is_equivalent_to(comp_type)): raise TypeError('Expected a computation of type {}, got {}.'.format(type_spec, comp_type)) else: type_spec = comp_type must_pin_function_to_cpu = type_analysis.contains(type_spec.result, (lambda t: t.is_sequence())) which_computation = comp.WhichOneof('computation') if (which_computation != 'tensorflow'): unexpected_building_block = building_blocks.ComputationBuildingBlock.from_proto(comp) raise TypeError('Expected a TensorFlow computation, found {}.'.format(unexpected_building_block)) if type_spec.is_function(): param_type = type_spec.parameter result_type = type_spec.result else: param_type = None result_type = type_spec wrapped_fn = _get_wrapped_function_from_comp(comp, must_pin_function_to_cpu, param_type, device) param_fns = [] if (param_type is not None): for spec in structure.flatten(type_spec.parameter): if spec.is_tensor(): param_fns.append((lambda x: x)) else: py_typecheck.check_type(spec, computation_types.SequenceType) param_fns.append(tf.data.experimental.to_variant) result_fns = [] for spec in structure.flatten(result_type): if spec.is_tensor(): result_fns.append((lambda x: x)) else: py_typecheck.check_type(spec, computation_types.SequenceType) tf_structure = type_conversions.type_to_tf_structure(spec.element) def fn(x, tf_structure=tf_structure): return tf.data.experimental.from_variant(x, tf_structure) result_fns.append(fn) def _fn_to_return(arg, param_fns, wrapped_fn): param_elements = [] if (arg is not None): arg_parts = structure.flatten(arg) if (len(arg_parts) != len(param_fns)): raise RuntimeError('Expected {} arguments, found {}.'.format(len(param_fns), len(arg_parts))) for (arg_part, param_fn) in zip(arg_parts, param_fns): param_elements.append(param_fn(arg_part)) result_parts = wrapped_fn(*param_elements) resources = [] for op in wrapped_fn.graph.get_operations(): if (op.type == 'VarHandleOp'): resources += op.outputs if resources: for resource in wrapped_fn.prune(feeds={}, fetches=resources)(): tf.raw_ops.DestroyResourceOp(resource=resource) result_elements = [] for (result_part, result_fn) in zip(result_parts, result_fns): result_elements.append(result_fn(result_part)) return structure.pack_sequence_as(result_type, result_elements) fn_to_return = (lambda arg, p=param_fns, w=wrapped_fn: _fn_to_return(arg, p, w)) if must_pin_function_to_cpu: old_fn_to_return = fn_to_return def fn_to_return(x): with tf.device('cpu'): return old_fn_to_return(x) elif (device is not None): old_fn_to_return = fn_to_return def fn_to_return(x): with tf.device(device.name): return old_fn_to_return(x) if (param_type is not None): return (lambda arg: fn_to_return(arg)) else: return (lambda : fn_to_return(None))
def to_representation_for_type(value: Any, tf_function_cache: MutableMapping[(str, Any)], type_spec: Optional[computation_types.Type]=None, device: Optional[tf.config.LogicalDevice]=None) -> Any: 'Verifies or converts the `value` to an eager object matching `type_spec`.\n\n WARNING: This function is only partially implemented. It does not support\n data sets at this point.\n\n The output of this function is always an eager tensor, eager dataset, a\n representation of a TensorFlow computation, or a nested structure of those\n that matches `type_spec`, and when `device` has been specified, everything\n is placed on that device on a best-effort basis.\n\n TensorFlow computations are represented here as zero- or one-argument Python\n callables that accept their entire argument bundle as a single Python object.\n\n Args:\n value: The raw representation of a value to compare against `type_spec` and\n potentially to be converted.\n tf_function_cache: A cache obeying `dict` semantics that can be used to look\n up previously embedded TensorFlow functions.\n type_spec: An instance of `tff.Type`, can be `None` for values that derive\n from `typed_object.TypedObject`.\n device: An optional `tf.config.LogicalDevice` to place the value on (for\n tensor-level values).\n\n Returns:\n Either `value` itself, or a modified version of it.\n\n Raises:\n TypeError: If the `value` is not compatible with `type_spec`.\n ' type_spec = type_utils.reconcile_value_with_type_spec(value, type_spec) if isinstance(value, computation_base.Computation): return to_representation_for_type(computation_impl.ComputationImpl.get_proto(value), tf_function_cache, type_spec, device) elif isinstance(value, pb.Computation): key = (value.SerializeToString(), str(type_spec), (device.name if device else None)) cached_fn = tf_function_cache.get(key) if (cached_fn is not None): return cached_fn embedded_fn = embed_tensorflow_computation(value, type_spec, device) tf_function_cache[key] = embedded_fn return embedded_fn elif type_spec.is_struct(): type_elem = structure.to_elements(type_spec) value_elem = structure.to_elements(structure.from_container(value)) result_elem = [] if (len(type_elem) != len(value_elem)): raise TypeError('Expected a {}-element tuple, found {} elements.'.format(len(type_elem), len(value_elem))) for ((t_name, el_type), (v_name, el_val)) in zip(type_elem, value_elem): if (t_name != v_name): raise TypeError('Mismatching element names in type vs. value: {} vs. {}.'.format(t_name, v_name)) el_repr = to_representation_for_type(el_val, tf_function_cache, el_type, device) result_elem.append((t_name, el_repr)) return structure.Struct(result_elem) elif (device is not None): py_typecheck.check_type(device, tf.config.LogicalDevice) with tf.device(device.name): return to_representation_for_type(value, tf_function_cache, type_spec=type_spec, device=None) elif isinstance(value, EagerValue): return value.internal_representation elif isinstance(value, executor_value_base.ExecutorValue): raise TypeError('Cannot accept a value embedded within a non-eager executor.') elif type_spec.is_tensor(): if (not tf.is_tensor(value)): value = tf.convert_to_tensor(value, dtype=type_spec.dtype) elif hasattr(value, 'read_value'): value = value.read_value() value_type = computation_types.TensorType(value.dtype.base_dtype, value.shape) if (not type_spec.is_assignable_from(value_type)): raise TypeError('The apparent type {} of a tensor {} does not match the expected type {}.'.format(value_type, value, type_spec)) return value elif type_spec.is_sequence(): if isinstance(value, list): value = tensorflow_utils.make_data_set_from_elements(None, value, type_spec.element) py_typecheck.check_type(value, type_conversions.TF_DATASET_REPRESENTATION_TYPES) element_type = computation_types.to_type(value.element_spec) value_type = computation_types.SequenceType(element_type) type_spec.check_assignable_from(value_type) return value else: raise TypeError('Unexpected type {}.'.format(type_spec))
-5,421,512,149,350,450,000
Verifies or converts the `value` to an eager object matching `type_spec`. WARNING: This function is only partially implemented. It does not support data sets at this point. The output of this function is always an eager tensor, eager dataset, a representation of a TensorFlow computation, or a nested structure of those that matches `type_spec`, and when `device` has been specified, everything is placed on that device on a best-effort basis. TensorFlow computations are represented here as zero- or one-argument Python callables that accept their entire argument bundle as a single Python object. Args: value: The raw representation of a value to compare against `type_spec` and potentially to be converted. tf_function_cache: A cache obeying `dict` semantics that can be used to look up previously embedded TensorFlow functions. type_spec: An instance of `tff.Type`, can be `None` for values that derive from `typed_object.TypedObject`. device: An optional `tf.config.LogicalDevice` to place the value on (for tensor-level values). Returns: Either `value` itself, or a modified version of it. Raises: TypeError: If the `value` is not compatible with `type_spec`.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
to_representation_for_type
ddayzzz/federated
python
def to_representation_for_type(value: Any, tf_function_cache: MutableMapping[(str, Any)], type_spec: Optional[computation_types.Type]=None, device: Optional[tf.config.LogicalDevice]=None) -> Any: 'Verifies or converts the `value` to an eager object matching `type_spec`.\n\n WARNING: This function is only partially implemented. It does not support\n data sets at this point.\n\n The output of this function is always an eager tensor, eager dataset, a\n representation of a TensorFlow computation, or a nested structure of those\n that matches `type_spec`, and when `device` has been specified, everything\n is placed on that device on a best-effort basis.\n\n TensorFlow computations are represented here as zero- or one-argument Python\n callables that accept their entire argument bundle as a single Python object.\n\n Args:\n value: The raw representation of a value to compare against `type_spec` and\n potentially to be converted.\n tf_function_cache: A cache obeying `dict` semantics that can be used to look\n up previously embedded TensorFlow functions.\n type_spec: An instance of `tff.Type`, can be `None` for values that derive\n from `typed_object.TypedObject`.\n device: An optional `tf.config.LogicalDevice` to place the value on (for\n tensor-level values).\n\n Returns:\n Either `value` itself, or a modified version of it.\n\n Raises:\n TypeError: If the `value` is not compatible with `type_spec`.\n ' type_spec = type_utils.reconcile_value_with_type_spec(value, type_spec) if isinstance(value, computation_base.Computation): return to_representation_for_type(computation_impl.ComputationImpl.get_proto(value), tf_function_cache, type_spec, device) elif isinstance(value, pb.Computation): key = (value.SerializeToString(), str(type_spec), (device.name if device else None)) cached_fn = tf_function_cache.get(key) if (cached_fn is not None): return cached_fn embedded_fn = embed_tensorflow_computation(value, type_spec, device) tf_function_cache[key] = embedded_fn return embedded_fn elif type_spec.is_struct(): type_elem = structure.to_elements(type_spec) value_elem = structure.to_elements(structure.from_container(value)) result_elem = [] if (len(type_elem) != len(value_elem)): raise TypeError('Expected a {}-element tuple, found {} elements.'.format(len(type_elem), len(value_elem))) for ((t_name, el_type), (v_name, el_val)) in zip(type_elem, value_elem): if (t_name != v_name): raise TypeError('Mismatching element names in type vs. value: {} vs. {}.'.format(t_name, v_name)) el_repr = to_representation_for_type(el_val, tf_function_cache, el_type, device) result_elem.append((t_name, el_repr)) return structure.Struct(result_elem) elif (device is not None): py_typecheck.check_type(device, tf.config.LogicalDevice) with tf.device(device.name): return to_representation_for_type(value, tf_function_cache, type_spec=type_spec, device=None) elif isinstance(value, EagerValue): return value.internal_representation elif isinstance(value, executor_value_base.ExecutorValue): raise TypeError('Cannot accept a value embedded within a non-eager executor.') elif type_spec.is_tensor(): if (not tf.is_tensor(value)): value = tf.convert_to_tensor(value, dtype=type_spec.dtype) elif hasattr(value, 'read_value'): value = value.read_value() value_type = computation_types.TensorType(value.dtype.base_dtype, value.shape) if (not type_spec.is_assignable_from(value_type)): raise TypeError('The apparent type {} of a tensor {} does not match the expected type {}.'.format(value_type, value, type_spec)) return value elif type_spec.is_sequence(): if isinstance(value, list): value = tensorflow_utils.make_data_set_from_elements(None, value, type_spec.element) py_typecheck.check_type(value, type_conversions.TF_DATASET_REPRESENTATION_TYPES) element_type = computation_types.to_type(value.element_spec) value_type = computation_types.SequenceType(element_type) type_spec.check_assignable_from(value_type) return value else: raise TypeError('Unexpected type {}.'.format(type_spec))
def function_to_wrap(): 'No-arg function to import graph def.\n\n We pass a no-arg function to `tf.compat.v1.wrap_function` to avoid\n the leftover placeholders that can result from binding arguments to the\n imported graphdef via `input_map`. The correct signature will be added to\n this function later, via the `prune` call below.\n\n Returns:\n Result of importing graphdef backing `comp`.\n ' graph_def = serialization_utils.unpack_graph_def(comp.tensorflow.graph_def) init_op = comp.tensorflow.initialize_op if init_op: graph_def = tensorflow_utils.add_control_deps_for_init_op(graph_def, init_op) def _import_fn(): return tf.import_graph_def(graph_merge.uniquify_shared_names(graph_def), name='') if must_pin_function_to_cpu: with tf.device('cpu'): return _import_fn() elif (device is not None): with tf.device(device.name): return _import_fn() else: return _import_fn()
1,383,540,077,352,208,600
No-arg function to import graph def. We pass a no-arg function to `tf.compat.v1.wrap_function` to avoid the leftover placeholders that can result from binding arguments to the imported graphdef via `input_map`. The correct signature will be added to this function later, via the `prune` call below. Returns: Result of importing graphdef backing `comp`.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
function_to_wrap
ddayzzz/federated
python
def function_to_wrap(): 'No-arg function to import graph def.\n\n We pass a no-arg function to `tf.compat.v1.wrap_function` to avoid\n the leftover placeholders that can result from binding arguments to the\n imported graphdef via `input_map`. The correct signature will be added to\n this function later, via the `prune` call below.\n\n Returns:\n Result of importing graphdef backing `comp`.\n ' graph_def = serialization_utils.unpack_graph_def(comp.tensorflow.graph_def) init_op = comp.tensorflow.initialize_op if init_op: graph_def = tensorflow_utils.add_control_deps_for_init_op(graph_def, init_op) def _import_fn(): return tf.import_graph_def(graph_merge.uniquify_shared_names(graph_def), name=) if must_pin_function_to_cpu: with tf.device('cpu'): return _import_fn() elif (device is not None): with tf.device(device.name): return _import_fn() else: return _import_fn()
def __init__(self, value, tf_function_cache, type_spec=None, device=None): 'Creates an instance of a value in this executor.\n\n Args:\n value: Depending on `type_spec`, either a `tf.Tensor`, `tf.data.Dataset`,\n or a nested structure of these stored in an `Struct`.\n tf_function_cache: A cache obeying `dict` semantics that can be used to\n look up previously embedded TensorFlow functions.\n type_spec: An instance of `tff.Type` that represents a tensor, a dataset,\n or a nested structure of these.\n device: An optional `tf.config.LogicalDevice` on which to place the value.\n ' if (type_spec is None): py_typecheck.check_type(value, typed_object.TypedObject) type_spec = value.type_signature else: type_spec = computation_types.to_type(type_spec) py_typecheck.check_type(type_spec, computation_types.Type) self._type_signature = type_spec self._value = to_representation_for_type(value, tf_function_cache, type_spec, device)
-5,315,987,343,711,920,000
Creates an instance of a value in this executor. Args: value: Depending on `type_spec`, either a `tf.Tensor`, `tf.data.Dataset`, or a nested structure of these stored in an `Struct`. tf_function_cache: A cache obeying `dict` semantics that can be used to look up previously embedded TensorFlow functions. type_spec: An instance of `tff.Type` that represents a tensor, a dataset, or a nested structure of these. device: An optional `tf.config.LogicalDevice` on which to place the value.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
__init__
ddayzzz/federated
python
def __init__(self, value, tf_function_cache, type_spec=None, device=None): 'Creates an instance of a value in this executor.\n\n Args:\n value: Depending on `type_spec`, either a `tf.Tensor`, `tf.data.Dataset`,\n or a nested structure of these stored in an `Struct`.\n tf_function_cache: A cache obeying `dict` semantics that can be used to\n look up previously embedded TensorFlow functions.\n type_spec: An instance of `tff.Type` that represents a tensor, a dataset,\n or a nested structure of these.\n device: An optional `tf.config.LogicalDevice` on which to place the value.\n ' if (type_spec is None): py_typecheck.check_type(value, typed_object.TypedObject) type_spec = value.type_signature else: type_spec = computation_types.to_type(type_spec) py_typecheck.check_type(type_spec, computation_types.Type) self._type_signature = type_spec self._value = to_representation_for_type(value, tf_function_cache, type_spec, device)
@property def internal_representation(self): 'Returns a representation of the eager value embedded in the executor.\n\n This property is only intended for use by the eager executor and tests. Not\n for consumption by consumers of the executor interface.\n ' return self._value
4,524,943,150,084,182,500
Returns a representation of the eager value embedded in the executor. This property is only intended for use by the eager executor and tests. Not for consumption by consumers of the executor interface.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
internal_representation
ddayzzz/federated
python
@property def internal_representation(self): 'Returns a representation of the eager value embedded in the executor.\n\n This property is only intended for use by the eager executor and tests. Not\n for consumption by consumers of the executor interface.\n ' return self._value
def __init__(self, device=None): 'Creates a new instance of an eager executor.\n\n Args:\n device: An optional `tf.config.LogicalDevice` that this executor will\n schedule all of its operations to run on. For example, the list of\n logical devices can be obtained using\n `tf.config.list_logical_devices()`.\n\n Raises:\n RuntimeError: If not executing eagerly.\n TypeError: If the device is not a `tf.config.LogicalDevice`.\n ValueError: If there is no device `device`.\n ' if (not tf.executing_eagerly()): raise RuntimeError('The eager executor may only be used in eager mode.') if (device is not None): py_typecheck.check_type(device, tf.config.LogicalDevice) self._device = device else: self._device = None self._tf_function_cache = cachetools.LRUCache(_TF_FUNCTION_CACHE_SIZE)
1,817,966,913,073,732,900
Creates a new instance of an eager executor. Args: device: An optional `tf.config.LogicalDevice` that this executor will schedule all of its operations to run on. For example, the list of logical devices can be obtained using `tf.config.list_logical_devices()`. Raises: RuntimeError: If not executing eagerly. TypeError: If the device is not a `tf.config.LogicalDevice`. ValueError: If there is no device `device`.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
__init__
ddayzzz/federated
python
def __init__(self, device=None): 'Creates a new instance of an eager executor.\n\n Args:\n device: An optional `tf.config.LogicalDevice` that this executor will\n schedule all of its operations to run on. For example, the list of\n logical devices can be obtained using\n `tf.config.list_logical_devices()`.\n\n Raises:\n RuntimeError: If not executing eagerly.\n TypeError: If the device is not a `tf.config.LogicalDevice`.\n ValueError: If there is no device `device`.\n ' if (not tf.executing_eagerly()): raise RuntimeError('The eager executor may only be used in eager mode.') if (device is not None): py_typecheck.check_type(device, tf.config.LogicalDevice) self._device = device else: self._device = None self._tf_function_cache = cachetools.LRUCache(_TF_FUNCTION_CACHE_SIZE)
@tracing.trace(span=True) async def create_value(self, value, type_spec=None): 'Embeds `value` of type `type_spec` within this executor.\n\n Args:\n value: An object that represents the value to embed within the executor.\n type_spec: The `tff.Type` of the value represented by this object, or\n something convertible to it. Can optionally be `None` if `value` is an\n instance of `typed_object.TypedObject`.\n\n Returns:\n An instance of `EagerValue`.\n\n Raises:\n RuntimeError: If not executing eagerly.\n TypeError: If the arguments are of the wrong types.\n ValueError: If the type was not specified and cannot be determined from\n the value.\n ' if (not tf.executing_eagerly()): raise RuntimeError('The eager executor may only be used in eager mode.') return EagerValue(value, self._tf_function_cache, type_spec, self._device)
-8,492,140,350,242,627,000
Embeds `value` of type `type_spec` within this executor. Args: value: An object that represents the value to embed within the executor. type_spec: The `tff.Type` of the value represented by this object, or something convertible to it. Can optionally be `None` if `value` is an instance of `typed_object.TypedObject`. Returns: An instance of `EagerValue`. Raises: RuntimeError: If not executing eagerly. TypeError: If the arguments are of the wrong types. ValueError: If the type was not specified and cannot be determined from the value.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
create_value
ddayzzz/federated
python
@tracing.trace(span=True) async def create_value(self, value, type_spec=None): 'Embeds `value` of type `type_spec` within this executor.\n\n Args:\n value: An object that represents the value to embed within the executor.\n type_spec: The `tff.Type` of the value represented by this object, or\n something convertible to it. Can optionally be `None` if `value` is an\n instance of `typed_object.TypedObject`.\n\n Returns:\n An instance of `EagerValue`.\n\n Raises:\n RuntimeError: If not executing eagerly.\n TypeError: If the arguments are of the wrong types.\n ValueError: If the type was not specified and cannot be determined from\n the value.\n ' if (not tf.executing_eagerly()): raise RuntimeError('The eager executor may only be used in eager mode.') return EagerValue(value, self._tf_function_cache, type_spec, self._device)
@tracing.trace async def create_call(self, comp, arg=None): 'Creates a call to `comp` with optional `arg`.\n\n Args:\n comp: As documented in `executor_base.Executor`.\n arg: As documented in `executor_base.Executor`.\n\n Returns:\n An instance of `EagerValue` representing the result of the call.\n\n Raises:\n RuntimeError: If not executing eagerly.\n TypeError: If the arguments are of the wrong types.\n ' py_typecheck.check_type(comp, EagerValue) if (arg is not None): py_typecheck.check_type(arg, EagerValue) if (not comp.type_signature.is_function()): raise TypeError('Expected a functional type, found {}'.format(comp.type_signature)) if (comp.type_signature.parameter is not None): return EagerValue(comp.internal_representation(arg.internal_representation), self._tf_function_cache, comp.type_signature.result, self._device) elif (arg is None): return EagerValue(comp.internal_representation(), self._tf_function_cache, comp.type_signature.result, self._device) else: raise TypeError('Cannot pass an argument to a no-argument function.')
7,628,042,971,048,479,000
Creates a call to `comp` with optional `arg`. Args: comp: As documented in `executor_base.Executor`. arg: As documented in `executor_base.Executor`. Returns: An instance of `EagerValue` representing the result of the call. Raises: RuntimeError: If not executing eagerly. TypeError: If the arguments are of the wrong types.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
create_call
ddayzzz/federated
python
@tracing.trace async def create_call(self, comp, arg=None): 'Creates a call to `comp` with optional `arg`.\n\n Args:\n comp: As documented in `executor_base.Executor`.\n arg: As documented in `executor_base.Executor`.\n\n Returns:\n An instance of `EagerValue` representing the result of the call.\n\n Raises:\n RuntimeError: If not executing eagerly.\n TypeError: If the arguments are of the wrong types.\n ' py_typecheck.check_type(comp, EagerValue) if (arg is not None): py_typecheck.check_type(arg, EagerValue) if (not comp.type_signature.is_function()): raise TypeError('Expected a functional type, found {}'.format(comp.type_signature)) if (comp.type_signature.parameter is not None): return EagerValue(comp.internal_representation(arg.internal_representation), self._tf_function_cache, comp.type_signature.result, self._device) elif (arg is None): return EagerValue(comp.internal_representation(), self._tf_function_cache, comp.type_signature.result, self._device) else: raise TypeError('Cannot pass an argument to a no-argument function.')
@tracing.trace async def create_struct(self, elements): 'Creates a tuple of `elements`.\n\n Args:\n elements: As documented in `executor_base.Executor`.\n\n Returns:\n An instance of `EagerValue` that represents the constructed tuple.\n ' elements = structure.to_elements(structure.from_container(elements)) val_elements = [] type_elements = [] for (k, v) in elements: py_typecheck.check_type(v, EagerValue) val_elements.append((k, v.internal_representation)) type_elements.append((k, v.type_signature)) return EagerValue(structure.Struct(val_elements), self._tf_function_cache, computation_types.StructType([((k, v) if (k is not None) else v) for (k, v) in type_elements]))
-5,155,764,902,810,965,000
Creates a tuple of `elements`. Args: elements: As documented in `executor_base.Executor`. Returns: An instance of `EagerValue` that represents the constructed tuple.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
create_struct
ddayzzz/federated
python
@tracing.trace async def create_struct(self, elements): 'Creates a tuple of `elements`.\n\n Args:\n elements: As documented in `executor_base.Executor`.\n\n Returns:\n An instance of `EagerValue` that represents the constructed tuple.\n ' elements = structure.to_elements(structure.from_container(elements)) val_elements = [] type_elements = [] for (k, v) in elements: py_typecheck.check_type(v, EagerValue) val_elements.append((k, v.internal_representation)) type_elements.append((k, v.type_signature)) return EagerValue(structure.Struct(val_elements), self._tf_function_cache, computation_types.StructType([((k, v) if (k is not None) else v) for (k, v) in type_elements]))
@tracing.trace async def create_selection(self, source, index=None, name=None): 'Creates a selection from `source`.\n\n Args:\n source: As documented in `executor_base.Executor`.\n index: As documented in `executor_base.Executor`.\n name: As documented in `executor_base.Executor`.\n\n Returns:\n An instance of `EagerValue` that represents the constructed selection.\n\n Raises:\n TypeError: If arguments are of the wrong types.\n ValueError: If either both, or neither of `name` and `index` are present.\n ' py_typecheck.check_type(source, EagerValue) py_typecheck.check_type(source.type_signature, computation_types.StructType) py_typecheck.check_type(source.internal_representation, structure.Struct) if (index is not None): py_typecheck.check_type(index, int) if (name is not None): raise ValueError('Cannot simultaneously specify name {} and index {}.'.format(name, index)) else: return EagerValue(source.internal_representation[index], self._tf_function_cache, source.type_signature[index]) elif (name is not None): py_typecheck.check_type(name, str) return EagerValue(getattr(source.internal_representation, str(name)), self._tf_function_cache, getattr(source.type_signature, str(name))) else: raise ValueError('Must specify either name or index.')
2,041,311,405,275,014,700
Creates a selection from `source`. Args: source: As documented in `executor_base.Executor`. index: As documented in `executor_base.Executor`. name: As documented in `executor_base.Executor`. Returns: An instance of `EagerValue` that represents the constructed selection. Raises: TypeError: If arguments are of the wrong types. ValueError: If either both, or neither of `name` and `index` are present.
tensorflow_federated/python/core/impl/executors/eager_tf_executor.py
create_selection
ddayzzz/federated
python
@tracing.trace async def create_selection(self, source, index=None, name=None): 'Creates a selection from `source`.\n\n Args:\n source: As documented in `executor_base.Executor`.\n index: As documented in `executor_base.Executor`.\n name: As documented in `executor_base.Executor`.\n\n Returns:\n An instance of `EagerValue` that represents the constructed selection.\n\n Raises:\n TypeError: If arguments are of the wrong types.\n ValueError: If either both, or neither of `name` and `index` are present.\n ' py_typecheck.check_type(source, EagerValue) py_typecheck.check_type(source.type_signature, computation_types.StructType) py_typecheck.check_type(source.internal_representation, structure.Struct) if (index is not None): py_typecheck.check_type(index, int) if (name is not None): raise ValueError('Cannot simultaneously specify name {} and index {}.'.format(name, index)) else: return EagerValue(source.internal_representation[index], self._tf_function_cache, source.type_signature[index]) elif (name is not None): py_typecheck.check_type(name, str) return EagerValue(getattr(source.internal_representation, str(name)), self._tf_function_cache, getattr(source.type_signature, str(name))) else: raise ValueError('Must specify either name or index.')
def ensemble(scores): '\n Ensemble by majority vote.\n ' c = Counter() for probs in zip(scores): idx = int(np.argmax(np.array(probs))) c.update([idx]) best = c.most_common(1)[0][0] return best
-556,498,030,192,940,700
Ensemble by majority vote.
ensemble.py
ensemble
gstoica27/tacred-exploration
python
def ensemble(scores): '\n \n ' c = Counter() for probs in zip(scores): idx = int(np.argmax(np.array(probs))) c.update([idx]) best = c.most_common(1)[0][0] return best
def __len__(self): 'Return the total number of path components.' i = (- 1) for (i, _) in enumerate(self): pass return (i + 1)
-4,726,724,090,606,615,000
Return the total number of path components.
grr/lib/rdfvalues/paths.py
__len__
mrhania/grr
python
def __len__(self): i = (- 1) for (i, _) in enumerate(self): pass return (i + 1)
def __iter__(self): 'Only iterate over all components from the current pointer.' element = self while element.HasField('pathtype'): (yield element) if element.HasField('nested_path'): element = element.nested_path else: break
-346,827,248,376,829,700
Only iterate over all components from the current pointer.
grr/lib/rdfvalues/paths.py
__iter__
mrhania/grr
python
def __iter__(self): element = self while element.HasField('pathtype'): (yield element) if element.HasField('nested_path'): element = element.nested_path else: break
def Insert(self, index, rdfpathspec=None, **kwarg): 'Insert a single component at index.' if (rdfpathspec is None): rdfpathspec = self.__class__(**kwarg) if (index == 0): nested_proto = self.__class__() nested_proto.SetRawData(self.GetRawData()) self.SetRawData(rdfpathspec.GetRawData()) self.last.nested_path = nested_proto else: previous = self[(index - 1)] rdfpathspec.last.nested_path = previous.nested_path previous.nested_path = rdfpathspec
2,803,779,637,885,970,000
Insert a single component at index.
grr/lib/rdfvalues/paths.py
Insert
mrhania/grr
python
def Insert(self, index, rdfpathspec=None, **kwarg): if (rdfpathspec is None): rdfpathspec = self.__class__(**kwarg) if (index == 0): nested_proto = self.__class__() nested_proto.SetRawData(self.GetRawData()) self.SetRawData(rdfpathspec.GetRawData()) self.last.nested_path = nested_proto else: previous = self[(index - 1)] rdfpathspec.last.nested_path = previous.nested_path previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg): 'Append a new pathspec component to this pathspec.' if (component is None): component = self.__class__(**kwarg) if self.HasField('pathtype'): self.last.nested_path = component else: for (k, v) in kwarg.items(): setattr(self, k, v) self.SetRawData(component.GetRawData()) return self
-4,659,558,211,762,946,000
Append a new pathspec component to this pathspec.
grr/lib/rdfvalues/paths.py
Append
mrhania/grr
python
def Append(self, component=None, **kwarg): if (component is None): component = self.__class__(**kwarg) if self.HasField('pathtype'): self.last.nested_path = component else: for (k, v) in kwarg.items(): setattr(self, k, v) self.SetRawData(component.GetRawData()) return self
def Pop(self, index=0): 'Removes and returns the pathspec at the specified index.' if (index < 0): index += len(self) if (index == 0): result = self.__class__() result.SetRawData(self.GetRawData()) self.SetRawData(self.nested_path.GetRawData()) else: previous = self[(index - 1)] result = previous.nested_path previous.nested_path = result.nested_path result.nested_path = None return result
3,488,132,426,749,126,700
Removes and returns the pathspec at the specified index.
grr/lib/rdfvalues/paths.py
Pop
mrhania/grr
python
def Pop(self, index=0): if (index < 0): index += len(self) if (index == 0): result = self.__class__() result.SetRawData(self.GetRawData()) self.SetRawData(self.nested_path.GetRawData()) else: previous = self[(index - 1)] result = previous.nested_path previous.nested_path = result.nested_path result.nested_path = None return result
def Dirname(self): 'Get a new copied object with only the directory path.' result = self.Copy() while 1: last_directory = posixpath.dirname(result.last.path) if ((last_directory != '/') or (len(result) <= 1)): result.last.path = last_directory result.last.inode = None break result.Pop((- 1)) return result
-4,814,401,091,083,449,000
Get a new copied object with only the directory path.
grr/lib/rdfvalues/paths.py
Dirname
mrhania/grr
python
def Dirname(self): result = self.Copy() while 1: last_directory = posixpath.dirname(result.last.path) if ((last_directory != '/') or (len(result) <= 1)): result.last.path = last_directory result.last.inode = None break result.Pop((- 1)) return result
def AFF4Path(self, client_urn): 'Returns the AFF4 URN this pathspec will be stored under.\n\n Args:\n client_urn: A ClientURN.\n\n Returns:\n A urn that corresponds to this pathspec.\n\n Raises:\n ValueError: If pathspec is not of the correct type.\n ' if (not self.HasField('pathtype')): raise ValueError("Can't determine AFF4 path without a valid pathtype.") first_component = self[0] dev = first_component.path if first_component.HasField('offset'): dev += (':' + str((first_component.offset / 512))) if ((len(self) > 1) and (first_component.pathtype == PathSpec.PathType.OS) and (self[1].pathtype == PathSpec.PathType.TSK)): result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev] start = 1 else: result = [self.AFF4_PREFIXES[first_component.pathtype]] start = 0 for p in self[start]: component = p.path if p.HasField('offset'): component += (':' + str((p.offset / 512))) if p.HasField('stream_name'): component += (':' + p.stream_name) result.append(component) return client_urn.Add('/'.join(result))
3,655,332,079,633,526,300
Returns the AFF4 URN this pathspec will be stored under. Args: client_urn: A ClientURN. Returns: A urn that corresponds to this pathspec. Raises: ValueError: If pathspec is not of the correct type.
grr/lib/rdfvalues/paths.py
AFF4Path
mrhania/grr
python
def AFF4Path(self, client_urn): 'Returns the AFF4 URN this pathspec will be stored under.\n\n Args:\n client_urn: A ClientURN.\n\n Returns:\n A urn that corresponds to this pathspec.\n\n Raises:\n ValueError: If pathspec is not of the correct type.\n ' if (not self.HasField('pathtype')): raise ValueError("Can't determine AFF4 path without a valid pathtype.") first_component = self[0] dev = first_component.path if first_component.HasField('offset'): dev += (':' + str((first_component.offset / 512))) if ((len(self) > 1) and (first_component.pathtype == PathSpec.PathType.OS) and (self[1].pathtype == PathSpec.PathType.TSK)): result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev] start = 1 else: result = [self.AFF4_PREFIXES[first_component.pathtype]] start = 0 for p in self[start]: component = p.path if p.HasField('offset'): component += (':' + str((p.offset / 512))) if p.HasField('stream_name'): component += (':' + p.stream_name) result.append(component) return client_urn.Add('/'.join(result))
def Validate(self): 'GlobExpression is valid.' if (len(self.RECURSION_REGEX.findall(self._value)) > 1): raise ValueError(('Only one ** is permitted per path: %s.' % self._value))
-7,115,543,097,806,057,000
GlobExpression is valid.
grr/lib/rdfvalues/paths.py
Validate
mrhania/grr
python
def Validate(self): if (len(self.RECURSION_REGEX.findall(self._value)) > 1): raise ValueError(('Only one ** is permitted per path: %s.' % self._value))
def InterpolateGrouping(self, pattern): 'Interpolate inline globbing groups.' components = [] offset = 0 for match in GROUPING_PATTERN.finditer(pattern): components.append([pattern[offset:match.start()]]) alternatives = match.group(1).split(',') components.append(set(alternatives)) offset = match.end() components.append([pattern[offset:]]) for vector in itertools.product(*components): (yield u''.join(vector))
-7,670,459,769,591,568,000
Interpolate inline globbing groups.
grr/lib/rdfvalues/paths.py
InterpolateGrouping
mrhania/grr
python
def InterpolateGrouping(self, pattern): components = [] offset = 0 for match in GROUPING_PATTERN.finditer(pattern): components.append([pattern[offset:match.start()]]) alternatives = match.group(1).split(',') components.append(set(alternatives)) offset = match.end() components.append([pattern[offset:]]) for vector in itertools.product(*components): (yield u.join(vector))
def AsRegEx(self): 'Return the current glob as a simple regex.\n\n Note: No interpolation is performed.\n\n Returns:\n A RegularExpression() object.\n ' parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value) result = ''.join((self._ReplaceRegExPart(p) for p in parts)) return rdf_standard.RegularExpression(('(?i)\\A%s\\Z' % result))
-634,661,955,549,305,600
Return the current glob as a simple regex. Note: No interpolation is performed. Returns: A RegularExpression() object.
grr/lib/rdfvalues/paths.py
AsRegEx
mrhania/grr
python
def AsRegEx(self): 'Return the current glob as a simple regex.\n\n Note: No interpolation is performed.\n\n Returns:\n A RegularExpression() object.\n ' parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value) result = .join((self._ReplaceRegExPart(p) for p in parts)) return rdf_standard.RegularExpression(('(?i)\\A%s\\Z' % result))
def _get_cluster_id_value(self): '\n Getter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number)\n ' return self.__cluster_id_value
2,962,098,623,915,979,000
Getter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number)
pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/router_bgp_attributes/cluster_id/__init__.py
_get_cluster_id_value
extremenetworks/pybind
python
def _get_cluster_id_value(self): '\n \n ' return self.__cluster_id_value
def _set_cluster_id_value(self, v, load=False): '\n Setter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_cluster_id_value is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_cluster_id_value() directly.\n ' if hasattr(v, '_utype'): v = v._utype(v) try: t = YANGDynClass(v, base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name='cluster-id-value', rest_name='id', parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as 32 bit quantity', u'cli-drop-node-name': None, u'alt-name': u'id'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='decimal-number', is_config=True) except (TypeError, ValueError): raise ValueError({'error-string': 'cluster_id_value must be of a type compatible with decimal-number', 'defined-type': 'brocade-bgp:decimal-number', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={\'range\': [\'0..4294967295\']}, int_size=32), restriction_dict={\'range\': [u\'1..65535\']}), is_leaf=True, yang_name="cluster-id-value", rest_name="id", parent=self, choice=(u\'ch-cluster-id\', u\'ca-cluster-id\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Route-Reflector Cluster-ID as 32 bit quantity\', u\'cli-drop-node-name\': None, u\'alt-name\': u\'id\'}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'decimal-number\', is_config=True)'}) self.__cluster_id_value = t if hasattr(self, '_set'): self._set()
236,462,422,169,550,080
Setter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number) If this variable is read-only (config: false) in the source YANG file, then _set_cluster_id_value is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cluster_id_value() directly.
pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/router_bgp_attributes/cluster_id/__init__.py
_set_cluster_id_value
extremenetworks/pybind
python
def _set_cluster_id_value(self, v, load=False): '\n Setter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_cluster_id_value is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_cluster_id_value() directly.\n ' if hasattr(v, '_utype'): v = v._utype(v) try: t = YANGDynClass(v, base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name='cluster-id-value', rest_name='id', parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as 32 bit quantity', u'cli-drop-node-name': None, u'alt-name': u'id'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='decimal-number', is_config=True) except (TypeError, ValueError): raise ValueError({'error-string': 'cluster_id_value must be of a type compatible with decimal-number', 'defined-type': 'brocade-bgp:decimal-number', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={\'range\': [\'0..4294967295\']}, int_size=32), restriction_dict={\'range\': [u\'1..65535\']}), is_leaf=True, yang_name="cluster-id-value", rest_name="id", parent=self, choice=(u\'ch-cluster-id\', u\'ca-cluster-id\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Route-Reflector Cluster-ID as 32 bit quantity\', u\'cli-drop-node-name\': None, u\'alt-name\': u\'id\'}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'decimal-number\', is_config=True)'}) self.__cluster_id_value = t if hasattr(self, '_set'): self._set()
def _get_cluster_id_ipv4_address(self): '\n Getter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address)\n ' return self.__cluster_id_ipv4_address
-3,337,470,066,997,727,000
Getter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address)
pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/router_bgp_attributes/cluster_id/__init__.py
_get_cluster_id_ipv4_address
extremenetworks/pybind
python
def _get_cluster_id_ipv4_address(self): '\n \n ' return self.__cluster_id_ipv4_address
def _set_cluster_id_ipv4_address(self, v, load=False): '\n Setter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_cluster_id_ipv4_address is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_cluster_id_ipv4_address() directly.\n ' if hasattr(v, '_utype'): v = v._utype(v) try: t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name='cluster-id-ipv4-address', rest_name='ipv4-address', parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id-ipv4-address'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as IP address', u'alt-name': u'ipv4-address'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv4-address', is_config=True) except (TypeError, ValueError): raise ValueError({'error-string': 'cluster_id_ipv4_address must be of a type compatible with inet:ipv4-address', 'defined-type': 'inet:ipv4-address', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={\'pattern\': u\'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?\'}), is_leaf=True, yang_name="cluster-id-ipv4-address", rest_name="ipv4-address", parent=self, choice=(u\'ch-cluster-id\', u\'ca-cluster-id-ipv4-address\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Route-Reflector Cluster-ID as IP address\', u\'alt-name\': u\'ipv4-address\'}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'inet:ipv4-address\', is_config=True)'}) self.__cluster_id_ipv4_address = t if hasattr(self, '_set'): self._set()
5,680,376,728,149,061,000
Setter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_cluster_id_ipv4_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cluster_id_ipv4_address() directly.
pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/router_bgp_attributes/cluster_id/__init__.py
_set_cluster_id_ipv4_address
extremenetworks/pybind
python
def _set_cluster_id_ipv4_address(self, v, load=False): '\n Setter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_cluster_id_ipv4_address is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_cluster_id_ipv4_address() directly.\n ' if hasattr(v, '_utype'): v = v._utype(v) try: t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name='cluster-id-ipv4-address', rest_name='ipv4-address', parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id-ipv4-address'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as IP address', u'alt-name': u'ipv4-address'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv4-address', is_config=True) except (TypeError, ValueError): raise ValueError({'error-string': 'cluster_id_ipv4_address must be of a type compatible with inet:ipv4-address', 'defined-type': 'inet:ipv4-address', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={\'pattern\': u\'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?\'}), is_leaf=True, yang_name="cluster-id-ipv4-address", rest_name="ipv4-address", parent=self, choice=(u\'ch-cluster-id\', u\'ca-cluster-id-ipv4-address\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Route-Reflector Cluster-ID as IP address\', u\'alt-name\': u\'ipv4-address\'}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'inet:ipv4-address\', is_config=True)'}) self.__cluster_id_ipv4_address = t if hasattr(self, '_set'): self._set()
def test_rss2_feed(self): '\n Test the structure and content of feeds generated by Rss201rev2Feed.\n ' response = self.client.get('/syndication/rss2/') doc = minidom.parseString(response.content) feed_elem = doc.getElementsByTagName('rss') self.assertEqual(len(feed_elem), 1) feed = feed_elem[0] self.assertEqual(feed.getAttribute('version'), '2.0') chan_elem = feed.getElementsByTagName('channel') self.assertEqual(len(chan_elem), 1) chan = chan_elem[0] d = Entry.objects.latest('date').date ltz = tzinfo.LocalTimezone(d) last_build_date = rfc2822_date(d.replace(tzinfo=ltz)) self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category']) self.assertChildNodeContent(chan, {'title': 'My blog', 'description': 'A more thorough description of my blog.', 'link': 'http://example.com/blog/', 'language': 'en', 'lastBuildDate': last_build_date, 'ttl': '600', 'copyright': 'Copyright (c) 2007, Sally Smith'}) self.assertCategories(chan, ['python', 'django']) self.assertChildNodeContent(chan, {'title': 'My blog', 'link': 'http://example.com/blog/'}) self.assertEqual(chan.getElementsByTagName('atom:link')[0].getAttribute('href'), 'http://example.com/syndication/rss2/') d = Entry.objects.get(pk=1).date ltz = tzinfo.LocalTimezone(d) pub_date = rfc2822_date(d.replace(tzinfo=ltz)) items = chan.getElementsByTagName('item') self.assertEqual(len(items), Entry.objects.count()) self.assertChildNodeContent(items[0], {'title': 'My first entry', 'description': 'Overridden description: My first entry', 'link': 'http://example.com/blog/1/', 'guid': 'http://example.com/blog/1/', 'pubDate': pub_date, 'author': 'example@example.com (Sally Smith)'}) self.assertCategories(items[0], ['python', 'testing']) for item in items: self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
-553,182,212,880,170,560
Test the structure and content of feeds generated by Rss201rev2Feed.
tests/regressiontests/syndication/tests.py
test_rss2_feed
Smarsh/django
python
def test_rss2_feed(self): '\n \n ' response = self.client.get('/syndication/rss2/') doc = minidom.parseString(response.content) feed_elem = doc.getElementsByTagName('rss') self.assertEqual(len(feed_elem), 1) feed = feed_elem[0] self.assertEqual(feed.getAttribute('version'), '2.0') chan_elem = feed.getElementsByTagName('channel') self.assertEqual(len(chan_elem), 1) chan = chan_elem[0] d = Entry.objects.latest('date').date ltz = tzinfo.LocalTimezone(d) last_build_date = rfc2822_date(d.replace(tzinfo=ltz)) self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category']) self.assertChildNodeContent(chan, {'title': 'My blog', 'description': 'A more thorough description of my blog.', 'link': 'http://example.com/blog/', 'language': 'en', 'lastBuildDate': last_build_date, 'ttl': '600', 'copyright': 'Copyright (c) 2007, Sally Smith'}) self.assertCategories(chan, ['python', 'django']) self.assertChildNodeContent(chan, {'title': 'My blog', 'link': 'http://example.com/blog/'}) self.assertEqual(chan.getElementsByTagName('atom:link')[0].getAttribute('href'), 'http://example.com/syndication/rss2/') d = Entry.objects.get(pk=1).date ltz = tzinfo.LocalTimezone(d) pub_date = rfc2822_date(d.replace(tzinfo=ltz)) items = chan.getElementsByTagName('item') self.assertEqual(len(items), Entry.objects.count()) self.assertChildNodeContent(items[0], {'title': 'My first entry', 'description': 'Overridden description: My first entry', 'link': 'http://example.com/blog/1/', 'guid': 'http://example.com/blog/1/', 'pubDate': pub_date, 'author': 'example@example.com (Sally Smith)'}) self.assertCategories(items[0], ['python', 'testing']) for item in items: self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
def test_rss091_feed(self): '\n Test the structure and content of feeds generated by RssUserland091Feed.\n ' response = self.client.get('/syndication/rss091/') doc = minidom.parseString(response.content) feed_elem = doc.getElementsByTagName('rss') self.assertEqual(len(feed_elem), 1) feed = feed_elem[0] self.assertEqual(feed.getAttribute('version'), '0.91') chan_elem = feed.getElementsByTagName('channel') self.assertEqual(len(chan_elem), 1) chan = chan_elem[0] self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category']) self.assertChildNodeContent(chan, {'title': 'My blog', 'link': 'http://example.com/blog/'}) self.assertCategories(chan, ['python', 'django']) self.assertEqual(chan.getElementsByTagName('atom:link')[0].getAttribute('href'), 'http://example.com/syndication/rss091/') items = chan.getElementsByTagName('item') self.assertEqual(len(items), Entry.objects.count()) self.assertChildNodeContent(items[0], {'title': 'My first entry', 'description': 'Overridden description: My first entry', 'link': 'http://example.com/blog/1/'}) for item in items: self.assertChildNodes(item, ['title', 'link', 'description']) self.assertCategories(item, [])
-545,652,658,098,288,800
Test the structure and content of feeds generated by RssUserland091Feed.
tests/regressiontests/syndication/tests.py
test_rss091_feed
Smarsh/django
python
def test_rss091_feed(self): '\n \n ' response = self.client.get('/syndication/rss091/') doc = minidom.parseString(response.content) feed_elem = doc.getElementsByTagName('rss') self.assertEqual(len(feed_elem), 1) feed = feed_elem[0] self.assertEqual(feed.getAttribute('version'), '0.91') chan_elem = feed.getElementsByTagName('channel') self.assertEqual(len(chan_elem), 1) chan = chan_elem[0] self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category']) self.assertChildNodeContent(chan, {'title': 'My blog', 'link': 'http://example.com/blog/'}) self.assertCategories(chan, ['python', 'django']) self.assertEqual(chan.getElementsByTagName('atom:link')[0].getAttribute('href'), 'http://example.com/syndication/rss091/') items = chan.getElementsByTagName('item') self.assertEqual(len(items), Entry.objects.count()) self.assertChildNodeContent(items[0], {'title': 'My first entry', 'description': 'Overridden description: My first entry', 'link': 'http://example.com/blog/1/'}) for item in items: self.assertChildNodes(item, ['title', 'link', 'description']) self.assertCategories(item, [])
def test_atom_feed(self): '\n Test the structure and content of feeds generated by Atom1Feed.\n ' response = self.client.get('/syndication/atom/') feed = minidom.parseString(response.content).firstChild self.assertEqual(feed.nodeName, 'feed') self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom') self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author']) for link in feed.getElementsByTagName('link'): if (link.getAttribute('rel') == 'self'): self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/') entries = feed.getElementsByTagName('entry') self.assertEqual(len(entries), Entry.objects.count()) for entry in entries: self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'category', 'updated', 'rights', 'author']) summary = entry.getElementsByTagName('summary')[0] self.assertEqual(summary.getAttribute('type'), 'html')
-7,519,615,970,066,044,000
Test the structure and content of feeds generated by Atom1Feed.
tests/regressiontests/syndication/tests.py
test_atom_feed
Smarsh/django
python
def test_atom_feed(self): '\n \n ' response = self.client.get('/syndication/atom/') feed = minidom.parseString(response.content).firstChild self.assertEqual(feed.nodeName, 'feed') self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom') self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author']) for link in feed.getElementsByTagName('link'): if (link.getAttribute('rel') == 'self'): self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/') entries = feed.getElementsByTagName('entry') self.assertEqual(len(entries), Entry.objects.count()) for entry in entries: self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'category', 'updated', 'rights', 'author']) summary = entry.getElementsByTagName('summary')[0] self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self): '\n Tests that titles are escaped correctly in RSS feeds.\n ' response = self.client.get('/syndication/rss2/') doc = minidom.parseString(response.content) for item in doc.getElementsByTagName('item'): link = item.getElementsByTagName('link')[0] if (link.firstChild.wholeText == 'http://example.com/blog/4/'): title = item.getElementsByTagName('title')[0] self.assertEquals(title.firstChild.wholeText, u'A &amp; B &lt; C &gt; D')
-6,928,047,492,795,901,000
Tests that titles are escaped correctly in RSS feeds.
tests/regressiontests/syndication/tests.py
test_title_escaping
Smarsh/django
python
def test_title_escaping(self): '\n \n ' response = self.client.get('/syndication/rss2/') doc = minidom.parseString(response.content) for item in doc.getElementsByTagName('item'): link = item.getElementsByTagName('link')[0] if (link.firstChild.wholeText == 'http://example.com/blog/4/'): title = item.getElementsByTagName('title')[0] self.assertEquals(title.firstChild.wholeText, u'A &amp; B &lt; C &gt; D')
def test_naive_datetime_conversion(self): '\n Test that datetimes are correctly converted to the local time zone.\n ' response = self.client.get('/syndication/naive-dates/') doc = minidom.parseString(response.content) updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText d = Entry.objects.latest('date').date ltz = tzinfo.LocalTimezone(d) latest = rfc3339_date(d.replace(tzinfo=ltz)) self.assertEqual(updated, latest)
4,198,248,771,720,096,000
Test that datetimes are correctly converted to the local time zone.
tests/regressiontests/syndication/tests.py
test_naive_datetime_conversion
Smarsh/django
python
def test_naive_datetime_conversion(self): '\n \n ' response = self.client.get('/syndication/naive-dates/') doc = minidom.parseString(response.content) updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText d = Entry.objects.latest('date').date ltz = tzinfo.LocalTimezone(d) latest = rfc3339_date(d.replace(tzinfo=ltz)) self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self): "\n Test that datetimes with timezones don't get trodden on.\n " response = self.client.get('/syndication/aware-dates/') doc = minidom.parseString(response.content) updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText self.assertEqual(updated[(- 6):], '+00:42')
4,785,870,899,759,614,000
Test that datetimes with timezones don't get trodden on.
tests/regressiontests/syndication/tests.py
test_aware_datetime_conversion
Smarsh/django
python
def test_aware_datetime_conversion(self): "\n \n " response = self.client.get('/syndication/aware-dates/') doc = minidom.parseString(response.content) updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText self.assertEqual(updated[(- 6):], '+00:42')
def test_feed_url(self): '\n Test that the feed_url can be overridden.\n ' response = self.client.get('/syndication/feedurl/') doc = minidom.parseString(response.content) for link in doc.getElementsByTagName('link'): if (link.getAttribute('rel') == 'self'): self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
-4,254,890,965,348,122,600
Test that the feed_url can be overridden.
tests/regressiontests/syndication/tests.py
test_feed_url
Smarsh/django
python
def test_feed_url(self): '\n \n ' response = self.client.get('/syndication/feedurl/') doc = minidom.parseString(response.content) for link in doc.getElementsByTagName('link'): if (link.getAttribute('rel') == 'self'): self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_item_link_error(self): '\n Test that a ImproperlyConfigured is raised if no link could be found\n for the item(s).\n ' self.assertRaises(ImproperlyConfigured, self.client.get, '/syndication/articles/')
4,261,096,110,716,304,400
Test that a ImproperlyConfigured is raised if no link could be found for the item(s).
tests/regressiontests/syndication/tests.py
test_item_link_error
Smarsh/django
python
def test_item_link_error(self): '\n Test that a ImproperlyConfigured is raised if no link could be found\n for the item(s).\n ' self.assertRaises(ImproperlyConfigured, self.client.get, '/syndication/articles/')
def test_template_feed(self): '\n Test that the item title and description can be overridden with\n templates.\n ' response = self.client.get('/syndication/template/') doc = minidom.parseString(response.content) feed = doc.getElementsByTagName('rss')[0] chan = feed.getElementsByTagName('channel')[0] items = chan.getElementsByTagName('item') self.assertChildNodeContent(items[0], {'title': 'Title in your templates: My first entry', 'description': 'Description in your templates: My first entry', 'link': 'http://example.com/blog/1/'})
-8,862,071,585,553,255,000
Test that the item title and description can be overridden with templates.
tests/regressiontests/syndication/tests.py
test_template_feed
Smarsh/django
python
def test_template_feed(self): '\n Test that the item title and description can be overridden with\n templates.\n ' response = self.client.get('/syndication/template/') doc = minidom.parseString(response.content) feed = doc.getElementsByTagName('rss')[0] chan = feed.getElementsByTagName('channel')[0] items = chan.getElementsByTagName('item') self.assertChildNodeContent(items[0], {'title': 'Title in your templates: My first entry', 'description': 'Description in your templates: My first entry', 'link': 'http://example.com/blog/1/'})
def test_add_domain(self): '\n Test add_domain() prefixes domains onto the correct URLs.\n ' self.assertEqual(views.add_domain('example.com', '/foo/?arg=value'), 'http://example.com/foo/?arg=value') self.assertEqual(views.add_domain('example.com', 'http://djangoproject.com/doc/'), 'http://djangoproject.com/doc/') self.assertEqual(views.add_domain('example.com', 'https://djangoproject.com/doc/'), 'https://djangoproject.com/doc/') self.assertEqual(views.add_domain('example.com', 'mailto:example@example.com'), 'mailto:example@example.com')
9,005,860,053,456,032,000
Test add_domain() prefixes domains onto the correct URLs.
tests/regressiontests/syndication/tests.py
test_add_domain
Smarsh/django
python
def test_add_domain(self): '\n \n ' self.assertEqual(views.add_domain('example.com', '/foo/?arg=value'), 'http://example.com/foo/?arg=value') self.assertEqual(views.add_domain('example.com', 'http://djangoproject.com/doc/'), 'http://djangoproject.com/doc/') self.assertEqual(views.add_domain('example.com', 'https://djangoproject.com/doc/'), 'https://djangoproject.com/doc/') self.assertEqual(views.add_domain('example.com', 'mailto:example@example.com'), 'mailto:example@example.com')
def test_empty_feed_dict(self): '\n Test that an empty feed_dict raises a 404.\n ' response = self.client.get('/syndication/depr-feeds-empty/aware-dates/') self.assertEquals(response.status_code, 404)
6,601,415,122,694,413,000
Test that an empty feed_dict raises a 404.
tests/regressiontests/syndication/tests.py
test_empty_feed_dict
Smarsh/django
python
def test_empty_feed_dict(self): '\n \n ' response = self.client.get('/syndication/depr-feeds-empty/aware-dates/') self.assertEquals(response.status_code, 404)
def test_nonexistent_slug(self): '\n Test that a non-existent slug raises a 404.\n ' response = self.client.get('/syndication/depr-feeds/foobar/') self.assertEquals(response.status_code, 404)
-625,279,591,304,710,500
Test that a non-existent slug raises a 404.
tests/regressiontests/syndication/tests.py
test_nonexistent_slug
Smarsh/django
python
def test_nonexistent_slug(self): '\n \n ' response = self.client.get('/syndication/depr-feeds/foobar/') self.assertEquals(response.status_code, 404)
def test_rss_feed(self): '\n A simple test for Rss201rev2Feed feeds generated by the deprecated\n system.\n ' response = self.client.get('/syndication/depr-feeds/rss/') doc = minidom.parseString(response.content) feed = doc.getElementsByTagName('rss')[0] self.assertEqual(feed.getAttribute('version'), '2.0') chan = feed.getElementsByTagName('channel')[0] self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link']) items = chan.getElementsByTagName('item') self.assertEqual(len(items), Entry.objects.count())
9,172,127,389,360,837,000
A simple test for Rss201rev2Feed feeds generated by the deprecated system.
tests/regressiontests/syndication/tests.py
test_rss_feed
Smarsh/django
python
def test_rss_feed(self): '\n A simple test for Rss201rev2Feed feeds generated by the deprecated\n system.\n ' response = self.client.get('/syndication/depr-feeds/rss/') doc = minidom.parseString(response.content) feed = doc.getElementsByTagName('rss')[0] self.assertEqual(feed.getAttribute('version'), '2.0') chan = feed.getElementsByTagName('channel')[0] self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link']) items = chan.getElementsByTagName('item') self.assertEqual(len(items), Entry.objects.count())
def test_complex_base_url(self): "\n Tests that the base url for a complex feed doesn't raise a 500\n exception.\n " response = self.client.get('/syndication/depr-feeds/complex/') self.assertEquals(response.status_code, 404)
-4,618,111,784,728,741,000
Tests that the base url for a complex feed doesn't raise a 500 exception.
tests/regressiontests/syndication/tests.py
test_complex_base_url
Smarsh/django
python
def test_complex_base_url(self): "\n Tests that the base url for a complex feed doesn't raise a 500\n exception.\n " response = self.client.get('/syndication/depr-feeds/complex/') self.assertEquals(response.status_code, 404)
def search(self, params, max_results=None, results=[]): '\n Do a search.\n ' sleep(random.randint(0, 1)) count = (max_results if (max_results and (max_results <= Linkedin._MAX_SEARCH_COUNT)) else Linkedin._MAX_SEARCH_COUNT) default_params = {'count': count, 'guides': 'List()', 'origin': 'GLOBAL_SEARCH_HEADER', 'q': 'guided', 'start': len(results)} default_params.update(params) res = self.client.session.get(f'{self.client.API_BASE_URL}/search/cluster', params=default_params) data = res.json() total_found = data.get('paging', {}).get('total') if ((len(data['elements']) == 0) or ((max_results is not None) and (len(results) >= max_results)) or (total_found is None) or (len(results) >= total_found) or ((max_results is not None) and ((len(results) / max_results) >= Linkedin._MAX_REPEATED_REQUESTS))): return results results.extend(data['elements'][0]['elements']) self.logger.debug(f'results grew: {len(results)}') return self.search(params, results=results, max_results=max_results)
3,976,749,391,847,237,000
Do a search.
linkedin_api/linkedin.py
search
Alexander-Bakogeorge/linkedin-api
python
def search(self, params, max_results=None, results=[]): '\n \n ' sleep(random.randint(0, 1)) count = (max_results if (max_results and (max_results <= Linkedin._MAX_SEARCH_COUNT)) else Linkedin._MAX_SEARCH_COUNT) default_params = {'count': count, 'guides': 'List()', 'origin': 'GLOBAL_SEARCH_HEADER', 'q': 'guided', 'start': len(results)} default_params.update(params) res = self.client.session.get(f'{self.client.API_BASE_URL}/search/cluster', params=default_params) data = res.json() total_found = data.get('paging', {}).get('total') if ((len(data['elements']) == 0) or ((max_results is not None) and (len(results) >= max_results)) or (total_found is None) or (len(results) >= total_found) or ((max_results is not None) and ((len(results) / max_results) >= Linkedin._MAX_REPEATED_REQUESTS))): return results results.extend(data['elements'][0]['elements']) self.logger.debug(f'results grew: {len(results)}') return self.search(params, results=results, max_results=max_results)
def search_people(self, keywords=None, connection_of=None, network_depth=None, regions=None, industries=None): '\n Do a people search.\n ' guides = ['v->PEOPLE'] if connection_of: guides.append(f'facetConnectionOf->{connection_of}') if network_depth: guides.append(f'facetNetwork->{network_depth}') if regions: guides.append(f"facetGeoRegion->{'|'.join(regions)}") if industries: guides.append(f"facetIndustry->{'|'.join(industries)}") params = {'guides': 'List({})'.format(','.join(guides))} if keywords: params['keywords'] = keywords data = self.search(params) results = [] for item in data: search_profile = item['hitInfo']['com.linkedin.voyager.search.SearchProfile'] profile_id = search_profile['id'] distance = search_profile['distance']['value'] results.append({'urn_id': profile_id, 'distance': distance, 'public_id': search_profile['miniProfile']['publicIdentifier']}) return results
1,342,571,766,792,130,300
Do a people search.
linkedin_api/linkedin.py
search_people
Alexander-Bakogeorge/linkedin-api
python
def search_people(self, keywords=None, connection_of=None, network_depth=None, regions=None, industries=None): '\n \n ' guides = ['v->PEOPLE'] if connection_of: guides.append(f'facetConnectionOf->{connection_of}') if network_depth: guides.append(f'facetNetwork->{network_depth}') if regions: guides.append(f"facetGeoRegion->{'|'.join(regions)}") if industries: guides.append(f"facetIndustry->{'|'.join(industries)}") params = {'guides': 'List({})'.format(','.join(guides))} if keywords: params['keywords'] = keywords data = self.search(params) results = [] for item in data: search_profile = item['hitInfo']['com.linkedin.voyager.search.SearchProfile'] profile_id = search_profile['id'] distance = search_profile['distance']['value'] results.append({'urn_id': profile_id, 'distance': distance, 'public_id': search_profile['miniProfile']['publicIdentifier']}) return results
def search_companies(self, max_results=None, results=[]): '\n Do a company search\n Note: try swap from blended search to cluster\n ' sleep(random.randint(2, 5)) '\n default_params = {\n "count": count,\n "guides": "List()",\n "origin": "GLOBAL_SEARCH_HEADER",\n "q": "guided",\n "start": len(results),\n }\n ' default_params = {'origin': 'GLOBAL_SEARCH_HEADER', 'guides': 'List(resultType->companies)', 'count': '10', 'q': 'guided', 'filters': 'List(resultType->companies)', 'start': len(results)} res = self.client.session.get(f'{self.client.API_BASE_URL}/search/blended?keywords=s&origin=GLOBAL_SEARCH_HEADER&count=10&guides=List(resultType-%3Ecompanies)&q=all&filters=List(resultType-%3Ecompanies)&start={len(results)}') data = res.json() total_found = data.get('paging', {}).get('total') if ((len(data['elements']) == 0) or (len(data['elements'][0]['elements']) == 0) or (total_found is None) or ((max_results is not None) and (len(results) >= max_results)) or ((max_results is not None) and ((len(results) / max_results) >= Linkedin._MAX_REPEATED_REQUESTS))): return results results.extend(data['elements'][0]['elements']) self.logger.debug(f'results grew: {len(results)}') return self.search_companies(max_results=max_results, results=results)
-919,447,569,238,439,700
Do a company search Note: try swap from blended search to cluster
linkedin_api/linkedin.py
search_companies
Alexander-Bakogeorge/linkedin-api
python
def search_companies(self, max_results=None, results=[]): '\n Do a company search\n Note: try swap from blended search to cluster\n ' sleep(random.randint(2, 5)) '\n default_params = {\n "count": count,\n "guides": "List()",\n "origin": "GLOBAL_SEARCH_HEADER",\n "q": "guided",\n "start": len(results),\n }\n ' default_params = {'origin': 'GLOBAL_SEARCH_HEADER', 'guides': 'List(resultType->companies)', 'count': '10', 'q': 'guided', 'filters': 'List(resultType->companies)', 'start': len(results)} res = self.client.session.get(f'{self.client.API_BASE_URL}/search/blended?keywords=s&origin=GLOBAL_SEARCH_HEADER&count=10&guides=List(resultType-%3Ecompanies)&q=all&filters=List(resultType-%3Ecompanies)&start={len(results)}') data = res.json() total_found = data.get('paging', {}).get('total') if ((len(data['elements']) == 0) or (len(data['elements'][0]['elements']) == 0) or (total_found is None) or ((max_results is not None) and (len(results) >= max_results)) or ((max_results is not None) and ((len(results) / max_results) >= Linkedin._MAX_REPEATED_REQUESTS))): return results results.extend(data['elements'][0]['elements']) self.logger.debug(f'results grew: {len(results)}') return self.search_companies(max_results=max_results, results=results)
def get_profile_contact_info(self, public_id=None, urn_id=None): '\n Return data for a single profile.\n\n [public_id] - public identifier i.e. tom-quirk-1928345\n [urn_id] - id provided by the related URN\n ' res = self.client.session.get(f'{self.client.API_BASE_URL}/identity/profiles/{(public_id or urn_id)}/profileContactInfo') data = res.json() contact_info = {'email_address': data.get('emailAddress'), 'websites': [], 'phone_numbers': data.get('phoneNumbers', [])} websites = data.get('websites', []) for item in websites: if ('com.linkedin.voyager.identity.profile.StandardWebsite' in item['type']): item['label'] = item['type']['com.linkedin.voyager.identity.profile.StandardWebsite']['category'] elif ('' in item['type']): item['label'] = item['type']['com.linkedin.voyager.identity.profile.CustomWebsite']['label'] del item['type'] contact_info['websites'] = websites return contact_info
3,077,738,592,435,370,500
Return data for a single profile. [public_id] - public identifier i.e. tom-quirk-1928345 [urn_id] - id provided by the related URN
linkedin_api/linkedin.py
get_profile_contact_info
Alexander-Bakogeorge/linkedin-api
python
def get_profile_contact_info(self, public_id=None, urn_id=None): '\n Return data for a single profile.\n\n [public_id] - public identifier i.e. tom-quirk-1928345\n [urn_id] - id provided by the related URN\n ' res = self.client.session.get(f'{self.client.API_BASE_URL}/identity/profiles/{(public_id or urn_id)}/profileContactInfo') data = res.json() contact_info = {'email_address': data.get('emailAddress'), 'websites': [], 'phone_numbers': data.get('phoneNumbers', [])} websites = data.get('websites', []) for item in websites: if ('com.linkedin.voyager.identity.profile.StandardWebsite' in item['type']): item['label'] = item['type']['com.linkedin.voyager.identity.profile.StandardWebsite']['category'] elif ( in item['type']): item['label'] = item['type']['com.linkedin.voyager.identity.profile.CustomWebsite']['label'] del item['type'] contact_info['websites'] = websites return contact_info
def get_profile(self, public_id=None, urn_id=None): '\n Return data for a single profile.\n\n [public_id] - public identifier i.e. tom-quirk-1928345\n [urn_id] - id provided by the related URN\n ' sleep(random.randint(2, 5)) res = self.client.session.get(f'{self.client.API_BASE_URL}/identity/profiles/{(public_id or urn_id)}/profileView') data = res.json() if (data and ('status' in data) and (data['status'] != 200)): self.logger.info('request failed: {}'.format(data['message'])) return {} profile = data['profile'] if ('miniProfile' in profile): if ('picture' in profile['miniProfile']): profile['displayPictureUrl'] = profile['miniProfile']['picture']['com.linkedin.common.VectorImage']['rootUrl'] profile['profile_id'] = get_id_from_urn(profile['miniProfile']['entityUrn']) del profile['miniProfile'] del profile['defaultLocale'] del profile['supportedLocales'] del profile['versionTag'] del profile['showEducationOnProfileTopCard'] experience = data['positionView']['elements'] for item in experience: if (('company' in item) and ('miniCompany' in item['company'])): if ('logo' in item['company']['miniCompany']): logo = item['company']['miniCompany']['logo'].get('com.linkedin.common.VectorImage') if logo: item['companyLogoUrl'] = logo['rootUrl'] del item['company']['miniCompany'] profile['experience'] = experience skills = [item['name'] for item in data['skillView']['elements']] profile['skills'] = skills education = data['educationView']['elements'] for item in education: if ('school' in item): if ('logo' in item['school']): item['school']['logoUrl'] = item['school']['logo']['com.linkedin.common.VectorImage']['rootUrl'] del item['school']['logo'] profile['education'] = education return profile
4,548,155,974,933,145,000
Return data for a single profile. [public_id] - public identifier i.e. tom-quirk-1928345 [urn_id] - id provided by the related URN
linkedin_api/linkedin.py
get_profile
Alexander-Bakogeorge/linkedin-api
python
def get_profile(self, public_id=None, urn_id=None): '\n Return data for a single profile.\n\n [public_id] - public identifier i.e. tom-quirk-1928345\n [urn_id] - id provided by the related URN\n ' sleep(random.randint(2, 5)) res = self.client.session.get(f'{self.client.API_BASE_URL}/identity/profiles/{(public_id or urn_id)}/profileView') data = res.json() if (data and ('status' in data) and (data['status'] != 200)): self.logger.info('request failed: {}'.format(data['message'])) return {} profile = data['profile'] if ('miniProfile' in profile): if ('picture' in profile['miniProfile']): profile['displayPictureUrl'] = profile['miniProfile']['picture']['com.linkedin.common.VectorImage']['rootUrl'] profile['profile_id'] = get_id_from_urn(profile['miniProfile']['entityUrn']) del profile['miniProfile'] del profile['defaultLocale'] del profile['supportedLocales'] del profile['versionTag'] del profile['showEducationOnProfileTopCard'] experience = data['positionView']['elements'] for item in experience: if (('company' in item) and ('miniCompany' in item['company'])): if ('logo' in item['company']['miniCompany']): logo = item['company']['miniCompany']['logo'].get('com.linkedin.common.VectorImage') if logo: item['companyLogoUrl'] = logo['rootUrl'] del item['company']['miniCompany'] profile['experience'] = experience skills = [item['name'] for item in data['skillView']['elements']] profile['skills'] = skills education = data['educationView']['elements'] for item in education: if ('school' in item): if ('logo' in item['school']): item['school']['logoUrl'] = item['school']['logo']['com.linkedin.common.VectorImage']['rootUrl'] del item['school']['logo'] profile['education'] = education return profile
def get_profile_connections(self, urn_id): '\n Return a list of profile ids connected to profile of given [urn_id]\n ' return self.search_people(connection_of=urn_id, network_depth='F')
-8,977,781,855,160,709,000
Return a list of profile ids connected to profile of given [urn_id]
linkedin_api/linkedin.py
get_profile_connections
Alexander-Bakogeorge/linkedin-api
python
def get_profile_connections(self, urn_id): '\n \n ' return self.search_people(connection_of=urn_id, network_depth='F')
def get_profile_networkinfo(self, urn_id): '\n Return the nework info connected to the profile of the given [urn_id]\n ' sleep(random.randint(2, 5)) res = self.client.session.get(f'{self.client.API_BASE_URL}/identity/profiles/{urn_id}/networkinfo') return res.json()
4,752,674,305,240,124,000
Return the nework info connected to the profile of the given [urn_id]
linkedin_api/linkedin.py
get_profile_networkinfo
Alexander-Bakogeorge/linkedin-api
python
def get_profile_networkinfo(self, urn_id): '\n \n ' sleep(random.randint(2, 5)) res = self.client.session.get(f'{self.client.API_BASE_URL}/identity/profiles/{urn_id}/networkinfo') return res.json()
def get_company_updates(self, public_id=None, urn_id=None, max_results=None, results=[]): '"\n Return a list of company posts\n\n [public_id] - public identifier ie - microsoft\n [urn_id] - id provided by the related URN\n ' sleep(random.randint(2, 5)) params = {'companyUniversalName': {(public_id or urn_id)}, 'q': 'companyFeedByUniversalName', 'moduleKey': 'member-share', 'count': Linkedin._MAX_UPDATE_COUNT, 'start': len(results)} res = self.client.session.get(f'{self.client.API_BASE_URL}/feed/updates', params=params) data = res.json() if ((len(data['elements']) == 0) or ((max_results is not None) and (len(results) >= max_results)) or ((max_results is not None) and ((len(results) / max_results) >= Linkedin._MAX_REPEATED_REQUESTS))): return results results.extend(data['elements']) self.logger.debug(f'results grew: {len(results)}') return self.get_company_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
-8,852,638,028,545,315,000
" Return a list of company posts [public_id] - public identifier ie - microsoft [urn_id] - id provided by the related URN
linkedin_api/linkedin.py
get_company_updates
Alexander-Bakogeorge/linkedin-api
python
def get_company_updates(self, public_id=None, urn_id=None, max_results=None, results=[]): '"\n Return a list of company posts\n\n [public_id] - public identifier ie - microsoft\n [urn_id] - id provided by the related URN\n ' sleep(random.randint(2, 5)) params = {'companyUniversalName': {(public_id or urn_id)}, 'q': 'companyFeedByUniversalName', 'moduleKey': 'member-share', 'count': Linkedin._MAX_UPDATE_COUNT, 'start': len(results)} res = self.client.session.get(f'{self.client.API_BASE_URL}/feed/updates', params=params) data = res.json() if ((len(data['elements']) == 0) or ((max_results is not None) and (len(results) >= max_results)) or ((max_results is not None) and ((len(results) / max_results) >= Linkedin._MAX_REPEATED_REQUESTS))): return results results.extend(data['elements']) self.logger.debug(f'results grew: {len(results)}') return self.get_company_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
def get_profile_updates(self, public_id=None, urn_id=None, max_results=None, results=[]): '"\n Return a list of profile posts\n\n [public_id] - public identifier i.e. tom-quirk-1928345\n [urn_id] - id provided by the related URN\n ' sleep(random.randint(2, 5)) params = {'profileId': {(public_id or urn_id)}, 'q': 'memberShareFeed', 'moduleKey': 'member-share', 'count': Linkedin._MAX_UPDATE_COUNT, 'start': len(results)} res = self.client.session.get(f'{self.client.API_BASE_URL}/feed/updates', params=params) data = res.json() if ((len(data['elements']) == 0) or ((max_results is not None) and (len(results) >= max_results)) or ((max_results is not None) and ((len(results) / max_results) >= Linkedin._MAX_REPEATED_REQUESTS))): return results results.extend(data['elements']) self.logger.debug(f'results grew: {len(results)}') return self.get_profile_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
4,512,190,451,809,785,000
" Return a list of profile posts [public_id] - public identifier i.e. tom-quirk-1928345 [urn_id] - id provided by the related URN
linkedin_api/linkedin.py
get_profile_updates
Alexander-Bakogeorge/linkedin-api
python
def get_profile_updates(self, public_id=None, urn_id=None, max_results=None, results=[]): '"\n Return a list of profile posts\n\n [public_id] - public identifier i.e. tom-quirk-1928345\n [urn_id] - id provided by the related URN\n ' sleep(random.randint(2, 5)) params = {'profileId': {(public_id or urn_id)}, 'q': 'memberShareFeed', 'moduleKey': 'member-share', 'count': Linkedin._MAX_UPDATE_COUNT, 'start': len(results)} res = self.client.session.get(f'{self.client.API_BASE_URL}/feed/updates', params=params) data = res.json() if ((len(data['elements']) == 0) or ((max_results is not None) and (len(results) >= max_results)) or ((max_results is not None) and ((len(results) / max_results) >= Linkedin._MAX_REPEATED_REQUESTS))): return results results.extend(data['elements']) self.logger.debug(f'results grew: {len(results)}') return self.get_profile_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
def get_current_profile_views(self): '\n Get profile view statistics, including chart data.\n ' res = self.client.session.get(f'{self.client.API_BASE_URL}/identity/panels') data = res.json() return data['elements'][0]['value']['com.linkedin.voyager.identity.me.ProfileViewsByTimePanel']
7,285,217,187,157,486,000
Get profile view statistics, including chart data.
linkedin_api/linkedin.py
get_current_profile_views
Alexander-Bakogeorge/linkedin-api
python
def get_current_profile_views(self): '\n \n ' res = self.client.session.get(f'{self.client.API_BASE_URL}/identity/panels') data = res.json() return data['elements'][0]['value']['com.linkedin.voyager.identity.me.ProfileViewsByTimePanel']
def get_school(self, public_id): '\n Return data for a single school.\n\n [public_id] - public identifier i.e. uq\n ' sleep(random.randint(2, 5)) params = {'decoration': '\n (\n autoGenerated,backgroundCoverImage,\n companyEmployeesSearchPageUrl,companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,\n entityUrn,followingInfo,foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,\n paidCompany,partnerCompanyUrl,partnerLogo,partnerLogoImage,rankForTopCompanies,salesNavigatorCompanyUrl,\n school,showcase,staffCount,staffCountRange,staffingCompany,topCompaniesListName,universalName,url,\n companyIndustries*,industries,specialities,\n acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),\n affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),\n groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),\n showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)\n )\n ', 'q': 'universalName', 'universalName': public_id} res = self.client.session.get(f'{self.client.API_BASE_URL}/organization/companies', params=params) data = res.json() if (data and ('status' in data) and (data['status'] != 200)): self.logger.info('request failed: {}'.format(data['message'])) return {} school = data['elements'][0] return school
7,497,702,793,954,886,000
Return data for a single school. [public_id] - public identifier i.e. uq
linkedin_api/linkedin.py
get_school
Alexander-Bakogeorge/linkedin-api
python
def get_school(self, public_id): '\n Return data for a single school.\n\n [public_id] - public identifier i.e. uq\n ' sleep(random.randint(2, 5)) params = {'decoration': '\n (\n autoGenerated,backgroundCoverImage,\n companyEmployeesSearchPageUrl,companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,\n entityUrn,followingInfo,foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,\n paidCompany,partnerCompanyUrl,partnerLogo,partnerLogoImage,rankForTopCompanies,salesNavigatorCompanyUrl,\n school,showcase,staffCount,staffCountRange,staffingCompany,topCompaniesListName,universalName,url,\n companyIndustries*,industries,specialities,\n acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),\n affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),\n groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),\n showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)\n )\n ', 'q': 'universalName', 'universalName': public_id} res = self.client.session.get(f'{self.client.API_BASE_URL}/organization/companies', params=params) data = res.json() if (data and ('status' in data) and (data['status'] != 200)): self.logger.info('request failed: {}'.format(data['message'])) return {} school = data['elements'][0] return school
def get_similar_companies(self, public_id): '\n Return similar companies for a single company.\n\n [public_id] - public identifier i.e. univeristy-of-queensland\n ' sleep(random.randint(2, 5)) res = self.client.session.get(f'{self.client.API_BASE_URL}/organization/companies?count={Linkedin._MAX_SEARCH_COUNT}&companyUniversalName={public_id}&q=similarCompanies&start=0&decorationId=com.linkedin.voyager.deco.organization.web.WebSimilarCompanyCardWithRelevanceReason-3') data = res.json() return data
-165,383,243,870,637,150
Return similar companies for a single company. [public_id] - public identifier i.e. univeristy-of-queensland
linkedin_api/linkedin.py
get_similar_companies
Alexander-Bakogeorge/linkedin-api
python
def get_similar_companies(self, public_id): '\n Return similar companies for a single company.\n\n [public_id] - public identifier i.e. univeristy-of-queensland\n ' sleep(random.randint(2, 5)) res = self.client.session.get(f'{self.client.API_BASE_URL}/organization/companies?count={Linkedin._MAX_SEARCH_COUNT}&companyUniversalName={public_id}&q=similarCompanies&start=0&decorationId=com.linkedin.voyager.deco.organization.web.WebSimilarCompanyCardWithRelevanceReason-3') data = res.json() return data
def get_company(self, public_id): '\n Return data for a single company.\n\n [public_id] - public identifier i.e. univeristy-of-queensland\n ' sleep(random.randint(2, 5)) params = {'decoration': '\n (\n affiliatedCompaniesWithEmployeesRollup,affiliatedCompaniesWithJobsRollup,articlePermalinkForTopCompanies,\n autoGenerated,backgroundCoverImage,companyEmployeesSearchPageUrl,\n companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,entityUrn,followingInfo,\n foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,paidCompany,\n partnerCompanyUrl,partnerLogo,partnerLogoImage,permissions,rankForTopCompanies,\n salesNavigatorCompanyUrl,school,showcase,staffCount,staffCountRange,staffingCompany,\n topCompaniesListName,universalName,url,companyIndustries*,industries,specialities,\n acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),\n affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),\n groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),\n showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)\n )\n ', 'q': 'universalName', 'universalName': public_id} res = self.client.session.get(f'{self.client.API_BASE_URL}/organization/companies', params=params) data = res.json() if (data and ('status' in data) and (data['status'] != 200)): self.logger.info('request failed: {}'.format(data['message'])) return {} company = data['elements'][0] return company
-4,725,619,835,804,699,000
Return data for a single company. [public_id] - public identifier i.e. univeristy-of-queensland
linkedin_api/linkedin.py
get_company
Alexander-Bakogeorge/linkedin-api
python
def get_company(self, public_id): '\n Return data for a single company.\n\n [public_id] - public identifier i.e. univeristy-of-queensland\n ' sleep(random.randint(2, 5)) params = {'decoration': '\n (\n affiliatedCompaniesWithEmployeesRollup,affiliatedCompaniesWithJobsRollup,articlePermalinkForTopCompanies,\n autoGenerated,backgroundCoverImage,companyEmployeesSearchPageUrl,\n companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,entityUrn,followingInfo,\n foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,paidCompany,\n partnerCompanyUrl,partnerLogo,partnerLogoImage,permissions,rankForTopCompanies,\n salesNavigatorCompanyUrl,school,showcase,staffCount,staffCountRange,staffingCompany,\n topCompaniesListName,universalName,url,companyIndustries*,industries,specialities,\n acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),\n affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),\n groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),\n showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)\n )\n ', 'q': 'universalName', 'universalName': public_id} res = self.client.session.get(f'{self.client.API_BASE_URL}/organization/companies', params=params) data = res.json() if (data and ('status' in data) and (data['status'] != 200)): self.logger.info('request failed: {}'.format(data['message'])) return {} company = data['elements'][0] return company
def get_conversation_details(self, profile_urn_id): '\n Return the conversation (or "message thread") details for a given [public_profile_id]\n ' res = self.client.session.get(f'{self.client.API_BASE_URL}/messaging/conversations? keyVersion=LEGACY_INBOX&q=participants&recipients=List({profile_urn_id})') data = res.json() item = data['elements'][0] item['id'] = get_id_from_urn(item['entityUrn']) return item
-4,790,249,521,740,766,000
Return the conversation (or "message thread") details for a given [public_profile_id]
linkedin_api/linkedin.py
get_conversation_details
Alexander-Bakogeorge/linkedin-api
python
def get_conversation_details(self, profile_urn_id): '\n \n ' res = self.client.session.get(f'{self.client.API_BASE_URL}/messaging/conversations? keyVersion=LEGACY_INBOX&q=participants&recipients=List({profile_urn_id})') data = res.json() item = data['elements'][0] item['id'] = get_id_from_urn(item['entityUrn']) return item
def get_conversations(self): '\n Return list of conversations the user is in.\n ' params = {'keyVersion': 'LEGACY_INBOX'} res = self.client.session.get(f'{self.client.API_BASE_URL}/messaging/conversations', params=params) return res.json()
8,023,586,044,105,106,000
Return list of conversations the user is in.
linkedin_api/linkedin.py
get_conversations
Alexander-Bakogeorge/linkedin-api
python
def get_conversations(self): '\n \n ' params = {'keyVersion': 'LEGACY_INBOX'} res = self.client.session.get(f'{self.client.API_BASE_URL}/messaging/conversations', params=params) return res.json()
def get_conversation(self, conversation_urn_id): '\n Return the full conversation at a given [conversation_urn_id]\n ' res = self.client.session.get(f'{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events') return res.json()
8,517,749,211,933,363,000
Return the full conversation at a given [conversation_urn_id]
linkedin_api/linkedin.py
get_conversation
Alexander-Bakogeorge/linkedin-api
python
def get_conversation(self, conversation_urn_id): '\n \n ' res = self.client.session.get(f'{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events') return res.json()
def send_message(self, conversation_urn_id, message_body): '\n Return the full conversation at a given [conversation_urn_id]\n ' params = {'action': 'create'} payload = json.dumps({'eventCreate': {'value': {'com.linkedin.voyager.messaging.create.MessageCreate': {'body': message_body, 'attachments': [], 'attributedBody': {'text': message_body, 'attributes': []}, 'mediaAttachments': []}}}}) res = self.client.session.post(f'{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events', params=params, data=payload) return (res.status_code == 201)
-6,964,454,851,207,718,000
Return the full conversation at a given [conversation_urn_id]
linkedin_api/linkedin.py
send_message
Alexander-Bakogeorge/linkedin-api
python
def send_message(self, conversation_urn_id, message_body): '\n \n ' params = {'action': 'create'} payload = json.dumps({'eventCreate': {'value': {'com.linkedin.voyager.messaging.create.MessageCreate': {'body': message_body, 'attachments': [], 'attributedBody': {'text': message_body, 'attributes': []}, 'mediaAttachments': []}}}}) res = self.client.session.post(f'{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events', params=params, data=payload) return (res.status_code == 201)
def test_too_many_arguments_in_fixture(absolute_path): "\n End-to-End test to check arguments count.\n\n It is required due to how 'function_type' parameter\n works inside 'flake8'.\n\n Otherwise it is not set, unit tests can not cover `is_method` correctly.\n " filename = absolute_path('fixtures', 'config', 'wrong_arguments.py') process = subprocess.Popen(['flake8', '--select', 'Z', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, _) = process.communicate() assert (stdout.count(b'Z211') == 4)
4,681,387,448,552,898,000
End-to-End test to check arguments count. It is required due to how 'function_type' parameter works inside 'flake8'. Otherwise it is not set, unit tests can not cover `is_method` correctly.
tests/test_checkers/test_high_complexity.py
test_too_many_arguments_in_fixture
AlwxSin/wemake-python-styleguide
python
def test_too_many_arguments_in_fixture(absolute_path): "\n End-to-End test to check arguments count.\n\n It is required due to how 'function_type' parameter\n works inside 'flake8'.\n\n Otherwise it is not set, unit tests can not cover `is_method` correctly.\n " filename = absolute_path('fixtures', 'config', 'wrong_arguments.py') process = subprocess.Popen(['flake8', '--select', 'Z', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, _) = process.communicate() assert (stdout.count(b'Z211') == 4)
@Backend._assert_backend_available def compile_function(self, function, arguments): 'Compiles a Theano graph into a callable.' return self._compile_function_without_warnings(arguments, function)
-8,960,196,433,114,248,000
Compiles a Theano graph into a callable.
pymanopt/autodiff/backends/_theano.py
compile_function
Andrew-Wyn/pymanopt
python
@Backend._assert_backend_available def compile_function(self, function, arguments): return self._compile_function_without_warnings(arguments, function)
@Backend._assert_backend_available def compute_gradient(self, function, arguments): 'Returns a compiled function computing the gradient of ``function``\n with respect to ``arguments``.\n ' if (len(arguments) == 1): (argument,) = arguments gradient = T.grad(function, argument) return self._compile_function_without_warnings(arguments, gradient) gradient = T.grad(function, arguments) return self._compile_function_without_warnings(arguments, gradient)
-52,295,808,930,264,856
Returns a compiled function computing the gradient of ``function`` with respect to ``arguments``.
pymanopt/autodiff/backends/_theano.py
compute_gradient
Andrew-Wyn/pymanopt
python
@Backend._assert_backend_available def compute_gradient(self, function, arguments): 'Returns a compiled function computing the gradient of ``function``\n with respect to ``arguments``.\n ' if (len(arguments) == 1): (argument,) = arguments gradient = T.grad(function, argument) return self._compile_function_without_warnings(arguments, gradient) gradient = T.grad(function, arguments) return self._compile_function_without_warnings(arguments, gradient)
def _compute_unary_hessian_vector_product(self, gradient, argument): 'Returns a function accepting two arguments to compute a\n Hessian-vector product of a scalar-valued unary function.\n ' argument_type = argument.type() try: Rop = T.Rop(gradient, argument, argument_type) except NotImplementedError: proj = T.sum((gradient * disconnected_grad(argument_type))) Rop = T.grad(proj, argument) return self._compile_function_without_warnings([argument, argument_type], Rop)
-4,414,070,525,013,380,000
Returns a function accepting two arguments to compute a Hessian-vector product of a scalar-valued unary function.
pymanopt/autodiff/backends/_theano.py
_compute_unary_hessian_vector_product
Andrew-Wyn/pymanopt
python
def _compute_unary_hessian_vector_product(self, gradient, argument): 'Returns a function accepting two arguments to compute a\n Hessian-vector product of a scalar-valued unary function.\n ' argument_type = argument.type() try: Rop = T.Rop(gradient, argument, argument_type) except NotImplementedError: proj = T.sum((gradient * disconnected_grad(argument_type))) Rop = T.grad(proj, argument) return self._compile_function_without_warnings([argument, argument_type], Rop)
def _compute_nary_hessian_vector_product(self, gradients, arguments): "Returns a function accepting `2 * len(arguments)` arguments to\n compute a Hessian-vector product of a multivariate function.\n\n Notes\n -----\n The implementation is based on TensorFlow's '_hessian_vector_product'\n function in 'tensorflow.python.ops.gradients_impl'.\n " argument_types = [argument.type() for argument in arguments] try: Rop = T.Rop(gradients, arguments, argument_types) except NotImplementedError: proj = [T.sum((gradient * disconnected_grad(argument_type))) for (gradient, argument_type) in zip(gradients, argument_types)] proj_grad = [T.grad(proj_elem, arguments, disconnected_inputs='ignore', return_disconnected='None') for proj_elem in proj] proj_grad_transpose = map(list, zip(*proj_grad)) proj_grad_stack = [T.stacklists([c for c in row if (c is not None)]) for row in proj_grad_transpose] Rop = [T.sum(stack, axis=0) for stack in proj_grad_stack] return self._compile_function_without_warnings(list(itertools.chain(arguments, argument_types)), Rop)
6,644,731,796,320,247,000
Returns a function accepting `2 * len(arguments)` arguments to compute a Hessian-vector product of a multivariate function. Notes ----- The implementation is based on TensorFlow's '_hessian_vector_product' function in 'tensorflow.python.ops.gradients_impl'.
pymanopt/autodiff/backends/_theano.py
_compute_nary_hessian_vector_product
Andrew-Wyn/pymanopt
python
def _compute_nary_hessian_vector_product(self, gradients, arguments): "Returns a function accepting `2 * len(arguments)` arguments to\n compute a Hessian-vector product of a multivariate function.\n\n Notes\n -----\n The implementation is based on TensorFlow's '_hessian_vector_product'\n function in 'tensorflow.python.ops.gradients_impl'.\n " argument_types = [argument.type() for argument in arguments] try: Rop = T.Rop(gradients, arguments, argument_types) except NotImplementedError: proj = [T.sum((gradient * disconnected_grad(argument_type))) for (gradient, argument_type) in zip(gradients, argument_types)] proj_grad = [T.grad(proj_elem, arguments, disconnected_inputs='ignore', return_disconnected='None') for proj_elem in proj] proj_grad_transpose = map(list, zip(*proj_grad)) proj_grad_stack = [T.stacklists([c for c in row if (c is not None)]) for row in proj_grad_transpose] Rop = [T.sum(stack, axis=0) for stack in proj_grad_stack] return self._compile_function_without_warnings(list(itertools.chain(arguments, argument_types)), Rop)
@Backend._assert_backend_available def compute_hessian_vector_product(self, function, arguments): 'Computes the directional derivative of the gradient, which is\n equivalent to computing a Hessian-vector product with the direction\n vector.\n ' if (len(arguments) == 1): (argument,) = arguments gradient = T.grad(function, argument) return self._compute_unary_hessian_vector_product(gradient, argument) gradients = T.grad(function, arguments) return self._compute_nary_hessian_vector_product(gradients, arguments)
-6,657,651,170,826,868,000
Computes the directional derivative of the gradient, which is equivalent to computing a Hessian-vector product with the direction vector.
pymanopt/autodiff/backends/_theano.py
compute_hessian_vector_product
Andrew-Wyn/pymanopt
python
@Backend._assert_backend_available def compute_hessian_vector_product(self, function, arguments): 'Computes the directional derivative of the gradient, which is\n equivalent to computing a Hessian-vector product with the direction\n vector.\n ' if (len(arguments) == 1): (argument,) = arguments gradient = T.grad(function, argument) return self._compute_unary_hessian_vector_product(gradient, argument) gradients = T.grad(function, arguments) return self._compute_nary_hessian_vector_product(gradients, arguments)
def _check_cuda_version(): '\n Make sure that CUDA versions match between the pytorch install and torchvision install\n ' if (not _HAS_OPS): return (- 1) import torch _version = torch.ops.torchvision._cuda_version() if ((_version != (- 1)) and (torch.version.cuda is not None)): tv_version = str(_version) if (int(tv_version) < 10000): tv_major = int(tv_version[0]) tv_minor = int(tv_version[2]) else: tv_major = int(tv_version[0:2]) tv_minor = int(tv_version[3]) t_version = torch.version.cuda t_version = t_version.split('.') t_major = int(t_version[0]) t_minor = int(t_version[1]) if ((t_major != tv_major) or (t_minor != tv_minor)): raise RuntimeError('Detected that PyTorch and torchvision were compiled with different CUDA versions. PyTorch has CUDA Version={}.{} and torchvision has CUDA Version={}.{}. Please reinstall the torchvision that matches your PyTorch install.'.format(t_major, t_minor, tv_major, tv_minor)) return _version
-8,702,634,230,537,215,000
Make sure that CUDA versions match between the pytorch install and torchvision install
torchvision/extension.py
_check_cuda_version
AryanRaj315/vision
python
def _check_cuda_version(): '\n \n ' if (not _HAS_OPS): return (- 1) import torch _version = torch.ops.torchvision._cuda_version() if ((_version != (- 1)) and (torch.version.cuda is not None)): tv_version = str(_version) if (int(tv_version) < 10000): tv_major = int(tv_version[0]) tv_minor = int(tv_version[2]) else: tv_major = int(tv_version[0:2]) tv_minor = int(tv_version[3]) t_version = torch.version.cuda t_version = t_version.split('.') t_major = int(t_version[0]) t_minor = int(t_version[1]) if ((t_major != tv_major) or (t_minor != tv_minor)): raise RuntimeError('Detected that PyTorch and torchvision were compiled with different CUDA versions. PyTorch has CUDA Version={}.{} and torchvision has CUDA Version={}.{}. Please reinstall the torchvision that matches your PyTorch install.'.format(t_major, t_minor, tv_major, tv_minor)) return _version