function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def setup_environ_for_server(self):
# We intentionally copy only a subset of the environment when
# launching subprocesses to ensure consistent test results.
clean_env = {}
variables_to_copy = [
'CHROME_DEVEL_SANDBOX',
'CHROME_IPC_LOGGING',
'ASAN_OPTIONS',
'TSAN_OPTIONS',
'MSAN_OPTIONS',
'LSAN_OPTIONS',
'UBSAN_OPTIONS',
'VALGRIND_LIB',
'VALGRIND_LIB_INNER',
'TMPDIR',
]
if 'TMPDIR' not in self.host.environ:
self.host.environ['TMPDIR'] = tempfile.gettempdir()
# CGIs are run directory-relative so they need an absolute TMPDIR
self.host.environ['TMPDIR'] = self._filesystem.abspath(
self.host.environ['TMPDIR'])
if self.host.platform.is_linux() or self.host.platform.is_freebsd():
variables_to_copy += [
'XAUTHORITY', 'HOME', 'LANG', 'LD_LIBRARY_PATH',
'DBUS_SESSION_BUS_ADDRESS', 'XDG_DATA_DIRS', 'XDG_RUNTIME_DIR'
]
clean_env['DISPLAY'] = self.host.environ.get('DISPLAY', ':1')
if self.host.platform.is_mac():
clean_env['DYLD_LIBRARY_PATH'] = self._build_path()
variables_to_copy += [
'HOME',
]
if self.host.platform.is_win():
variables_to_copy += [
'PATH',
]
for variable in variables_to_copy:
if variable in self.host.environ:
clean_env[variable] = self.host.environ[variable]
for string_variable in self.get_option('additional_env_var', []):
[name, value] = string_variable.split('=', 1)
clean_env[name] = value
return clean_env | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def create_driver(self, worker_number, no_timeout=False):
"""Returns a newly created Driver subclass for starting/stopping the
test driver.
"""
return self._driver_class()(self, worker_number, no_timeout=no_timeout) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def start_http_server(self,
additional_dirs,
number_of_drivers,
output_dir=''):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a web server to be running.
"""
assert not self._http_server, 'Already running an http server.'
output_dir = output_dir or self.artifacts_directory()
server = apache_http.ApacheHTTP(
self,
output_dir,
additional_dirs=additional_dirs,
number_of_servers=(number_of_drivers * 4))
server.start()
self._http_server = server | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def is_wpt_test(test):
"""Whether a test is considered a web-platform-tests test."""
return Port.WPT_REGEX.match(test) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def is_wpt_idlharness_test(test_file):
"""Returns whether a WPT test is (probably) an idlharness test.
There are no rules in WPT that can be used to identify idlharness tests
without examining the file contents (which would be expensive). This
method utilizes a filename heuristic, based on the convention of
including 'idlharness' in the appropriate test names.
"""
match = Port.WPT_REGEX.match(test_file)
if not match:
return False
filename = match.group(2).split('/')[-1]
return 'idlharness' in filename | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def should_use_wptserve(test):
return Port.is_wpt_test(test) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def stop_wptserve(self):
"""Shuts down the WPT server if it is running."""
if self._wpt_server:
self._wpt_server.stop()
self._wpt_server = None | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def error_handler(err):
pass | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def http_server_supports_ipv6(self):
# Apache < 2.4 on win32 does not support IPv6.
return not self.host.platform.is_win() | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def stop_websocket_server(self):
"""Shuts down the websocket server if it is running."""
if self._websocket_server:
self._websocket_server.stop()
self._websocket_server = None | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def test_configuration(self):
"""Returns the current TestConfiguration for the port."""
if not self._test_configuration:
self._test_configuration = TestConfiguration(
self._version, self._architecture,
self._options.configuration.lower())
return self._test_configuration | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def all_test_configurations(self):
"""Returns a list of TestConfiguration instances, representing all available
test configurations for this port.
"""
return self._generate_all_test_configurations() | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def configuration_specifier_macros(self):
"""Ports may provide a way to abbreviate configuration specifiers to conveniently
refer to them as one term or alias specific values to more generic ones. For example:
(vista, win7) -> win # Abbreviate all Windows versions into one namesake.
(precise, trusty) -> linux # Change specific name of Linux distro to a more generic term.
Returns a dictionary, each key representing a macro term ('win', for example),
and value being a list of valid configuration specifiers (such as ['vista', 'win7']).
"""
return self.CONFIGURATION_SPECIFIER_MACROS | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _flag_specific_expectations_path(self):
config_name = self.flag_specific_config_name()
if config_name:
return self.path_to_flag_specific_expectations_file(config_name) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def expectations_dict(self):
"""Returns an OrderedDict of name -> expectations strings.
The names are expected to be (but not required to be) paths in the
filesystem. If the name is a path, the file can be considered updatable
for things like rebaselining, so don't use names that are paths if
they're not paths.
Generally speaking the ordering should be files in the filesystem in
cascade order (TestExpectations followed by Skipped, if the port honors
both formats), then any built-in expectations (e.g., from compile-time
exclusions), then --additional-expectations options.
"""
# FIXME: rename this to test_expectations() once all the callers are
# updated to know about the ordered dict.
expectations = collections.OrderedDict()
default_expectations_files = set(self.default_expectations_files())
ignore_default = self.get_option('ignore_default_expectations', False)
for path in self.used_expectations_files():
is_default = path in default_expectations_files
if ignore_default and is_default:
continue
path_exists = self._filesystem.exists(path)
if is_default:
if path_exists:
expectations[path] = self._filesystem.read_text_file(path)
else:
if path_exists:
_log.debug(
"reading additional_expectations from path '%s'", path)
expectations[path] = self._filesystem.read_text_file(path)
else:
# TODO(rmhasan): Fix additional expectation paths for
# not_site_per_process_blink_web_tests, then change this
# back to raising exceptions for incorrect expectation
# paths.
_log.warning(
"additional_expectations path '%s' does not exist",
path)
return expectations | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def bot_expectations(self):
if not self.get_option('ignore_flaky_tests'):
return {}
full_port_name = self.determine_full_port_name(
self.host, self._options, self.port_name)
builder_category = self.get_option('ignore_builder_category', 'layout')
factory = BotTestExpectationsFactory(self.host.builders)
# FIXME: This only grabs release builder's flakiness data. If we're running debug,
# when we should grab the debug builder's data.
expectations = factory.expectations_for_port(full_port_name,
builder_category)
if not expectations:
return {}
ignore_mode = self.get_option('ignore_flaky_tests')
if ignore_mode == 'very-flaky' or ignore_mode == 'maybe-flaky':
return expectations.flakes_by_path(ignore_mode == 'very-flaky')
if ignore_mode == 'unexpected':
return expectations.unexpected_results_by_path()
_log.warning("Unexpected ignore mode: '%s'.", ignore_mode)
return {} | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def used_expectations_files(self):
"""Returns a list of paths to expectation files that are used."""
if self._used_expectation_files is None:
self._used_expectation_files = list(
self.default_expectations_files())
flag_specific = self._flag_specific_expectations_path()
if flag_specific:
self._used_expectation_files.append(flag_specific)
for path in self.get_option('additional_expectations', []):
expanded_path = self._filesystem.expanduser(path)
abs_path = self._filesystem.abspath(expanded_path)
self._used_expectation_files.append(abs_path)
return self._used_expectation_files | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def path_to_generic_test_expectations_file(self):
return self._filesystem.join(self.web_tests_dir(), 'TestExpectations') | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def path_to_webdriver_expectations_file(self):
return self._filesystem.join(self.web_tests_dir(),
'WebDriverExpectations') | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def repository_path(self):
"""Returns the repository path for the chromium code base."""
return self._path_from_chromium_base('build') | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def clobber_old_port_specific_results(self):
pass | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def path_to_apache(self):
"""Returns the full path to the apache binary.
This is needed only by ports that use the apache_http_server module.
"""
raise NotImplementedError('Port.path_to_apache') | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def path_to_apache_config_file(self):
"""Returns the full path to the apache configuration file.
If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
contents will be used instead.
This is needed only by ports that use the apache_http_server module.
"""
config_file_from_env = self.host.environ.get(
'WEBKIT_HTTP_SERVER_CONF_PATH')
if config_file_from_env:
if not self._filesystem.exists(config_file_from_env):
raise IOError(
'%s was not found on the system' % config_file_from_env)
return config_file_from_env
config_file_name = self._apache_config_file_name_for_platform()
return self._filesystem.join(self.apache_config_directory(),
config_file_name) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _apache_config_file_name_for_platform(self):
if self.host.platform.is_linux():
distribution = self.host.platform.linux_distribution()
custom_configurations = ['arch', 'debian', 'fedora', 'redhat']
if distribution in custom_configurations:
return '%s-httpd-%s.conf' % (distribution,
self._apache_version())
return 'apache2-httpd-' + self._apache_version() + '.conf' | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _path_to_image_diff(self):
"""Returns the full path to the image_diff binary, or None if it is not available.
This is likely used only by diff_image()
"""
return self._build_path('image_diff') | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _driver_class(self):
"""Returns the port's driver implementation."""
return driver.Driver | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
if self.output_contains_sanitizer_messages(stderr):
# Running the symbolizer script can take a lot of memory, so we need to
# serialize access to it across all the concurrently running drivers.
llvm_symbolizer_path = self._path_from_chromium_base(
'third_party', 'llvm-build', 'Release+Asserts', 'bin',
'llvm-symbolizer')
if self._filesystem.exists(llvm_symbolizer_path):
env = self.host.environ.copy()
env['LLVM_SYMBOLIZER_PATH'] = llvm_symbolizer_path
else:
env = None
sanitizer_filter_path = self._path_from_chromium_base(
'tools', 'valgrind', 'asan', 'asan_symbolize.py')
sanitizer_strip_path_prefix = 'Release/../../'
if self._filesystem.exists(sanitizer_filter_path):
stderr = self._executive.run_command([
'flock', sys.executable, sanitizer_filter_path,
sanitizer_strip_path_prefix
],
input=stderr,
decode_output=False,
env=env)
name_str = name or '<unknown process name>'
pid_str = str(pid or '<unknown>')
# We require stdout and stderr to be bytestrings, not character strings.
if stdout:
stdout_lines = stdout.decode('utf8', 'replace').splitlines()
else:
stdout_lines = [u'<empty>']
if stderr:
stderr_lines = stderr.decode('utf8', 'replace').splitlines()
else:
stderr_lines = [u'<empty>']
return (stderr,
('crash log for %s (pid %s):\n%s\n%s\n' %
(name_str, pid_str, '\n'.join(
('STDOUT: ' + l) for l in stdout_lines), '\n'.join(
('STDERR: ' + l)
for l in stderr_lines))).encode('utf8', 'replace'),
self._get_crash_site(stderr_lines)) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def look_for_new_crash_logs(self, crashed_processes, start_time):
pass | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def sample_process(self, name, pid):
pass | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _all_virtual_tests(self, tests_by_dir):
tests = []
for suite in self.virtual_test_suites():
if suite.bases:
tests.extend(map(lambda x: suite.full_prefix + x,
self.real_tests_from_dict(suite.bases, tests_by_dir)))
return tests | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _virtual_tests_for_suite_with_paths(self, suite, paths):
if not suite.bases:
return []
bases = self._get_bases_for_suite_with_paths(suite, paths)
if not bases:
return []
tests = []
tests.extend(
map(lambda x: suite.full_prefix + x, self.real_tests(bases)))
wpt_bases = []
for base in bases:
if any(base.startswith(wpt_dir) for wpt_dir in self.WPT_DIRS):
wpt_bases.append(base)
if wpt_bases:
tests.extend(
self._wpt_test_urls_matching_paths(
wpt_bases, [suite.full_prefix] * len(wpt_bases)))
return tests | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _path_has_wildcard(self, path):
return '*' in path | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _lookup_virtual_suite(self, test_name):
if not test_name.startswith('virtual/'):
return None
for suite in self.virtual_test_suites():
if test_name.startswith(suite.full_prefix):
return suite
return None | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _lookup_virtual_test_args(self, test_name):
normalized_test_name = self.normalize_test_name(test_name)
for suite in self.virtual_test_suites():
if normalized_test_name.startswith(suite.full_prefix):
return suite.args
return [] | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _build_path_with_target(self, target, *comps):
target = target or self.get_option('target')
return self._filesystem.join(
self._path_from_chromium_base(),
self.get_option('build_directory') or 'out', target, *comps) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _get_font_files(self):
"""Returns list of font files that should be used by the test."""
# TODO(sergeyu): Currently FONT_FILES is valid only on Linux. Make it
# usable on other platforms if necessary.
result = []
for (font_dirs, font_file, package) in FONT_FILES:
exists = False
for font_dir in font_dirs:
font_path = self._filesystem.join(font_dir, font_file)
if not self._filesystem.isabs(font_path):
font_path = self._build_path(font_path)
if self._check_file_exists(font_path, '', more_logging=False):
result.append(font_path)
exists = True
break
if not exists:
message = 'You are missing %s under %s.' % (font_file,
font_dirs)
if package:
message += ' Try installing %s. See build instructions.' % package
_log.error(message)
raise TestRunException(exit_codes.SYS_DEPS_EXIT_STATUS,
message)
return result | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def split_webdriver_test_name(test_name):
"""Splits a WebDriver test name into a filename and a subtest name and
returns both of them. E.g.
test.py>>foo.html -> (test.py, foo.html)
test.py -> (test.py, None)
"""
separator_index = test_name.find(Port.WEBDRIVER_SUBTEST_SEPARATOR)
if separator_index == -1:
return (test_name, None)
webdriver_test_name = test_name[:separator_index]
separator_len = len(Port.WEBDRIVER_SUBTEST_SEPARATOR)
subtest_suffix = test_name[separator_index + separator_len:]
return (webdriver_test_name, subtest_suffix) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def add_webdriver_subtest_suffix(test_name, subtest_name):
"""Appends a subtest name to a WebDriver test name. E.g.
(test.py, foo.html) -> test.py>>foo.html
(test.py, None) -> test.py
"""
if subtest_name:
return test_name + Port.WEBDRIVER_SUBTEST_SEPARATOR + subtest_name
return test_name | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def split_webdriver_subtest_pytest_name(test_name):
"""Splits a WebDriver test name in pytest format into a filename and a subtest name and
returns both of them. E.g.
test.py::foo.html -> (test.py, foo.html)
test.py -> (test.py, None)
"""
names_after_split = test_name.split(
Port.WEBDRIVER_SUBTEST_PYTEST_SEPARATOR)
assert len(names_after_split) <= 2, \
"%s has a length greater than 2 after split by ::" % (test_name)
if len(names_after_split) == 1:
return (names_after_split[0], None)
return (names_after_split[0], names_after_split[1]) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def add_webdriver_subtest_pytest_suffix(test_name, subtest_name):
if subtest_name is None:
return test_name
return test_name + Port.WEBDRIVER_SUBTEST_PYTEST_SEPARATOR + subtest_name | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def __init__(self, prefix=None, bases=None, args=None):
assert VALID_FILE_NAME_REGEX.match(prefix), \
"Virtual test suite prefix '{}' contains invalid characters".format(prefix)
assert isinstance(bases, list)
assert args
assert isinstance(args, list)
self.full_prefix = 'virtual/' + prefix + '/'
self.bases = bases
self.args = args | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/decorative/shared_radio.iff"
result.attribute_template_id = 6
result.stfName("frn_n","radio") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Ship()
result.template = "object/ship/shared_hutt_heavy_s02_tier4.iff"
result.attribute_template_id = -1
result.stfName("","") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/reactor/shared_rct_sds_imperial_1.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","rct_sds_imperial_1_n") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': InvenTreeURLFormField
}) | inventree/InvenTree | [
2517,
401,
2517,
134,
1490233450
] |
def __init__(self, **kwargs):
# detect if creating migration
if 'migrate' in sys.argv or 'makemigrations' in sys.argv:
# remove currency information for a clean migration
kwargs['default_currency'] = ''
kwargs['currency_choices'] = []
else:
# set defaults
kwargs.update(money_kwargs())
# Set a minimum value validator
validators = kwargs.get('validators', [])
if len(validators) == 0:
validators.append(
MinMoneyValidator(0),
)
kwargs['validators'] = validators
super().__init__(**kwargs) | inventree/InvenTree | [
2517,
401,
2517,
134,
1490233450
] |
def __init__(self, *args, **kwargs):
# override initial values with the real info from database
kwargs.update(money_kwargs())
super().__init__(*args, **kwargs) | inventree/InvenTree | [
2517,
401,
2517,
134,
1490233450
] |
def __init__(self, **kwargs):
help_text = kwargs.get('help_text', _('Enter date'))
label = kwargs.get('label', None)
required = kwargs.get('required', False)
initial = kwargs.get('initial', None)
widget = forms.DateInput(
attrs={
'type': 'date',
}
)
forms.DateField.__init__(
self,
required=required,
initial=initial,
help_text=help_text,
widget=widget,
label=label
) | inventree/InvenTree | [
2517,
401,
2517,
134,
1490233450
] |
def to_python(self, value):
value = super(RoundingDecimalFormField, self).to_python(value)
value = round_decimal(value, self.decimal_places)
return value | inventree/InvenTree | [
2517,
401,
2517,
134,
1490233450
] |
def to_python(self, value):
value = super(RoundingDecimalField, self).to_python(value)
return round_decimal(value, self.decimal_places) | inventree/InvenTree | [
2517,
401,
2517,
134,
1490233450
] |
def make_request(body, method='POST'):
"""
Helper method to make request
"""
request = Request.blank('/')
request.body = body.encode('utf-8')
request.method = method
return request | open-craft/xblock-poll | [
14,
52,
14,
3,
1421687011
] |
def Args(parser):
restart_on_failure = parser.add_argument(
'--restart-on-failure',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='Restart instances if they are terminated by Compute Engine.')
restart_on_failure.detailed_help = """\
The instances will be restarted if they are terminated by Compute '
Engine. This does not affect terminations performed by the user.'
"""
instance_flags.AddMaintenancePolicyArgs(parser)
instance_flags.INSTANCE_ARG.AddArgument(parser) | KaranToor/MA450 | [
1,
1,
1,
4,
1484697944
] |
def service(self):
return self.compute.instances | KaranToor/MA450 | [
1,
1,
1,
4,
1484697944
] |
def method(self):
return 'SetScheduling' | KaranToor/MA450 | [
1,
1,
1,
4,
1484697944
] |
def resource_type(self):
return 'instances' | KaranToor/MA450 | [
1,
1,
1,
4,
1484697944
] |
def _Import(name):
"""Imports the python module of the given name."""
tf.logging.info('Importing %s', name)
try:
importlib.import_module(name)
tf.logging.info('Imported %s', name)
except ImportError as e:
# It is expected that some imports may be missing.
tf.logging.info('Could not import %s: %s', name, e) | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def ImportAllParams(task_root=_TASK_ROOT, task_dirs=_TASK_DIRS):
# Import all ModelParams to ensure that they are added to the global registry.
for task in task_dirs:
# By our code repository convention, there is a params.py under the task's
# params directory. params.py imports _all_ modules that may registers a
# model param.
_Import('{}.{}.params.params'.format(task_root, task)) | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def _is_dns_available(self, domain):
# Naive check to see if there is DNS available to use.
# Used to conditionally skip fqdn geoip checks.
# See #25407 for details.
ErrClass = socket.error if six.PY2 else OSError
try:
socket.gethostbyname(domain)
return True
except ErrClass:
return False | cloudera/hue | [
804,
271,
804,
38,
1277149611
] |
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
with self.assertRaises(GeoIPException):
cntry_g.city('google.com')
with self.assertRaises(GeoIPException):
cntry_g.coords('yahoo.com')
# Non-string query should raise TypeError
with self.assertRaises(TypeError):
cntry_g.country_code(17)
with self.assertRaises(TypeError):
cntry_g.country_name(GeoIP) | cloudera/hue | [
804,
271,
804,
38,
1277149611
] |
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
queries = [self.addr]
if self._is_dns_available(self.fqdn):
queries.append(self.fqdn)
for query in queries:
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('San Antonio', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(210, d['area_code'])
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
lon, lat = (-98, 29)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 0)
self.assertAlmostEqual(lat, tup[1], 0) | cloudera/hue | [
804,
271,
804,
38,
1277149611
] |
def test_deprecation_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
GeoIP()
self.assertEqual(len(warns), 1)
msg = str(warns[0].message)
self.assertIn('django.contrib.gis.geoip is deprecated', msg) | cloudera/hue | [
804,
271,
804,
38,
1277149611
] |
def useless_function(first, second):
print("I have the wrong number of arguments.") | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def another_function(frame, bp_loc, extra_args, dict):
se_value = extra_args.GetValueForKey("side_effect")
se_string = se_value.GetStringValue(100)
side_effect.fancy = se_string | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def objective_func(x, rate_mask, lb, ub):
caller_frame, _, _, caller_func, _, _ = inspect.stack()[1]
if caller_func in {'anneal', '_minimize_anneal'}:
caller_locals = caller_frame.f_locals
if caller_locals['n'] == 1:
print(caller_locals['best_state'].cost, caller_locals['current_state'].cost) | LoLab-VU/pysb | [
5,
8,
5,
11,
1378399252
] |
def estimate(start_values=None):
"""Estimate parameter values by fitting to data.
Parameters
==========
parameter_values : numpy array of floats, optional
Starting parameter values. Taken from model's nominal parameter values
if not specified.
Returns
=======
numpy array of floats, containing fitted parameter values.
"""
# Set starting position to nominal parameter values if not specified
if start_values is None:
start_values = nominal_values
else:
assert start_values.shape == nominal_values.shape
# Log-transform the starting position
x0 = np.log10(start_values[rate_mask])
# Displacement size for annealing moves
dx = .02
# The default 'fast' annealing schedule uses the 'lower' and 'upper'
# arguments in a somewhat counterintuitive way. See
# http://projects.scipy.org/scipy/ticket/1126 for more information. This is
# how to get the search to start at x0 and use a displacement on the order
# of dx (note that this will affect the T0 estimation which *does* expect
# lower and upper to be the absolute expected bounds on x).
lower = x0 - dx / 2
upper = x0 + dx / 2
# Log-transform the rate parameter values
xnominal = np.log10(nominal_values[rate_mask])
# Hard lower and upper bounds on x
lb = xnominal - bounds_radius
ub = xnominal + bounds_radius
# Perform the annealing
args = [rate_mask, lb, ub]
(xmin, Jmin, Tfinal, feval, iters, accept, retval) = \
scipy.optimize.anneal(objective_func, x0, full_output=True,
maxiter=4000, quench=0.5,
lower=lower, upper=upper,
args=args)
# Construct vector with resulting parameter values (un-log-transformed)
params_estimated = start_values.copy()
params_estimated[rate_mask] = 10 ** xmin
# Display annealing results
for v in ('xmin', 'Jmin', 'Tfinal', 'feval', 'iters', 'accept', 'retval'):
print("%s: %s" % (v, locals()[v]))
return params_estimated | LoLab-VU/pysb | [
5,
8,
5,
11,
1378399252
] |
def getElementFromShadowRoot(driver, element, selector):
if element is None:
return None
else:
return driver.execute_script(
"return arguments[0].shadowRoot.querySelector(arguments[1])", element,
selector) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def __init__(self,
identifier,
idl_type,
code_generator_info=None,
component=None,
debug_info=None):
IRMap.IR.__init__(
self, identifier=identifier, kind=IRMap.IR.Kind.TYPEDEF)
WithCodeGeneratorInfo.__init__(self, code_generator_info)
WithComponent.__init__(self, component)
WithDebugInfo.__init__(self, debug_info)
self.idl_type = idl_type | chromium/chromium | [
14247,
5365,
14247,
62,
1517864132
] |
def convertElementNode(elementNode, geometryOutput):
"Convert the xml element to a difference xml element."
group.convertContainerElementNode(elementNode, geometryOutput, Difference()) | dob71/x2swn | [
13,
8,
13,
5,
1345256205
] |
def processElementNode(elementNode):
"Process the xml element."
evaluate.processArchivable(Difference, elementNode) | dob71/x2swn | [
13,
8,
13,
5,
1345256205
] |
def getLoopsFromObjectLoopsList(self, importRadius, visibleObjectLoopsList):
"Get loops from visible object loops list."
return self.getDifference(importRadius, visibleObjectLoopsList) | dob71/x2swn | [
13,
8,
13,
5,
1345256205
] |
def assertIsSubclass(self, actual, klass):
self.assertTrue(issubclass(actual, klass), "Not a subclass.") | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def test_new_runner_old_case(self):
runner = unittest2.TextTestRunner(resultclass=resultFactory,
stream=StringIO())
class Test(unittest.TestCase):
def testOne(self):
pass
suite = unittest2.TestSuite((Test('testOne'),))
result = runner.run(suite)
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(result.errors), 0) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def testOne(self):
self.assertDictEqual({}, {}) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape` object[s]."""
raise NotImplementedError | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def initialize(self, name=None):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
name: Name scope for any created operations.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def step(self, time, inputs, state, name=None):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNNCell input (possibly nested tuple of) tensor[s] for this time
step.
state: RNNCell state (possibly nested tuple of) tensor[s] from previous
time step.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an instance
of BasicDecoderOutput, `next_state` is a (structure of) state tensors and
TensorArrays, `next_inputs` is the tensor that should be used as input for
the next step, `finished` is a boolean tensor telling whether the sequence
is complete, for each sequence in the batch.
"""
raise NotImplementedError | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def _create_zero_outputs(size, dtype, batch_size):
"""Create a zero outputs Tensor structure."""
def _t(s):
return (s if isinstance(s, ops.Tensor) else constant_op.constant(
tensor_shape.TensorShape(s).as_list(),
dtype=dtypes.int32,
name="zero_suffix_shape"))
def _create(s, d):
return array_ops.zeros(
array_ops.concat(
([batch_size], _t(s)), axis=0), dtype=d)
return nest.map_structure(_create, size, dtype) | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def _shape(batch_size, from_shape):
if not isinstance(from_shape, tensor_shape.TensorShape):
return tensor_shape.TensorShape(None)
else:
batch_size = tensor_util.constant_value(
ops.convert_to_tensor(
batch_size, name="batch_size"))
return tensor_shape.TensorShape([batch_size]).concatenate(from_shape) | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def condition(unused_time, unused_outputs_ta, unused_state, unused_inputs,
finished, unused_sequence_lengths):
return math_ops.logical_not(math_ops.reduce_all(finished)) | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return new if pass_through else array_ops.where(finished, cur, new) | unnikrishnankgs/va | [
1,
5,
1,
10,
1496432585
] |
def strptime(text, dateFormat):
return datetime(*(time.strptime(text, dateFormat)[0:6])) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testExponentialLogPDF(self):
with session.Session():
batch_size = 6
lam = constant_op.constant([2.0] * batch_size)
lam_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
exponential = exponential_lib.Exponential(rate=lam)
log_pdf = exponential.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
pdf = exponential.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
if not stats:
return
expected_log_pdf = stats.expon.logpdf(x, scale=1 / lam_v)
self.assertAllClose(log_pdf.eval(), expected_log_pdf)
self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf)) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testExponentialMean(self):
with session.Session():
lam_v = np.array([1.0, 4.0, 2.5])
exponential = exponential_lib.Exponential(rate=lam_v)
self.assertEqual(exponential.mean().get_shape(), (3,))
if not stats:
return
expected_mean = stats.expon.mean(scale=1 / lam_v)
self.assertAllClose(exponential.mean().eval(), expected_mean) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testExponentialEntropy(self):
with session.Session():
lam_v = np.array([1.0, 4.0, 2.5])
exponential = exponential_lib.Exponential(rate=lam_v)
self.assertEqual(exponential.entropy().get_shape(), (3,))
if not stats:
return
expected_entropy = stats.expon.entropy(scale=1 / lam_v)
self.assertAllClose(exponential.entropy().eval(), expected_entropy) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testExponentialSampleMultiDimensional(self):
with self.test_session():
batch_size = 2
lam_v = [3.0, 22.0]
lam = constant_op.constant([lam_v] * batch_size)
exponential = exponential_lib.Exponential(rate=lam)
n = 100000
samples = exponential.sample(n, seed=138)
self.assertEqual(samples.get_shape(), (n, batch_size, 2))
sample_values = samples.eval()
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
for i in range(2):
self.assertLess(
stats.kstest(
sample_values[:, 0, i],
stats.expon(scale=1.0 / lam_v[i]).cdf)[0],
0.01)
self.assertLess(
stats.kstest(
sample_values[:, 1, i],
stats.expon(scale=1.0 / lam_v[i]).cdf)[0],
0.01) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def main(argv):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option("--protoc", help="Path to protoc binary.")
parser.add_option("--proto-path", help="Path to proto directory.")
parser.add_option("--java-out-dir",
help="Path to output directory for java files.")
parser.add_option("--srcjar", help="Path to output srcjar.")
parser.add_option("--stamp", help="File to touch on success.")
options, args = parser.parse_args(argv)
build_utils.CheckOptions(options, parser, ['protoc', 'proto_path'])
if not options.java_out_dir and not options.srcjar:
print 'One of --java-out-dir or --srcjar must be specified.'
return 1
with build_utils.TempDir() as temp_dir:
# Specify arguments to the generator.
generator_args = ['optional_field_style=reftypes',
'store_unknown_fields=true']
out_arg = '--javanano_out=' + ','.join(generator_args) + ':' + temp_dir
# Generate Java files using protoc.
build_utils.CheckOutput(
[options.protoc, '--proto_path', options.proto_path, out_arg]
+ args)
if options.java_out_dir:
build_utils.DeleteDirectory(options.java_out_dir)
shutil.copytree(temp_dir, options.java_out_dir)
else:
build_utils.ZipDir(options.srcjar, temp_dir)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
args + [options.protoc] + build_utils.GetPythonDependencies())
if options.stamp:
build_utils.Touch(options.stamp) | Teamxrtc/webrtc-streaming-node | [
6,
5,
6,
2,
1449773735
] |
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--stamp', action='store')
parser.add_option('--output', action='store')
options, _ = parser.parse_args(argv)
devices = build_device.GetAttachedDevices()
device_configurations = []
for d in devices:
configuration, is_online, has_root = (
build_device.GetConfigurationForDevice(d))
if not is_online:
build_utils.PrintBigWarning(
'%s is not online. Skipping managed install for this device. '
'Try rebooting the device to fix this warning.' % d)
continue
if not has_root:
build_utils.PrintBigWarning(
'"adb root" failed on device: %s\n'
'Skipping managed install for this device.'
% configuration['description'])
continue
device_configurations.append(configuration)
if len(device_configurations) == 0:
build_utils.PrintBigWarning(
'No valid devices attached. Skipping managed install steps.')
elif len(devices) > 1:
# Note that this checks len(devices) and not len(device_configurations).
# This way, any time there are multiple devices attached it is
# explicitly stated which device we will install things to even if all but
# one device were rejected for other reasons (e.g. two devices attached,
# one w/o root).
build_utils.PrintBigWarning(
'Multiple devices attached. '
'Installing to the preferred device: '
'%(id)s (%(description)s)' % (device_configurations[0]))
build_device.WriteConfigurations(device_configurations, options.output) | Teamxrtc/webrtc-streaming-node | [
6,
5,
6,
2,
1449773735
] |
def regload(path, leaf):
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0,
_winreg.KEY_ALL_ACCESS)
try:
value = _winreg.QueryValueEx(key, leaf)[0]
except WindowsError:
value = None
return value | openhatch/oh-mainline | [
238,
323,
238,
203,
1321317235
] |
def epoch_time_offset_from_win32_filetime(filetime):
"""Convert from win32 filetime to seconds-since-epoch value.
MSIE stores create and expire times as Win32 FILETIME, which is 64
bits of 100 nanosecond intervals since Jan 01 1601.
mechanize expects time in 32-bit value expressed in seconds since the
epoch (Jan 01 1970).
"""
if filetime < WIN32_EPOCH:
raise ValueError("filetime (%d) is before epoch (%d)" %
(filetime, WIN32_EPOCH))
return divmod((filetime - WIN32_EPOCH), 10000000L)[0] | openhatch/oh-mainline | [
238,
323,
238,
203,
1321317235
] |
def binary_to_str(d): return "".join(map(binary_to_char, list(d))) | openhatch/oh-mainline | [
238,
323,
238,
203,
1321317235
] |
def __init__(self):
self._delayload_domains = {} | openhatch/oh-mainline | [
238,
323,
238,
203,
1321317235
] |
def _load_cookies_from_file(self, filename):
debug("Loading MSIE cookies file: %s", filename)
cookies = []
cookies_fh = open(filename)
try:
while 1:
key = cookies_fh.readline()
if key == "": break
rl = cookies_fh.readline
def getlong(rl=rl): return long(rl().rstrip())
def getstr(rl=rl): return rl().rstrip()
key = key.rstrip()
value = getstr()
domain_path = getstr()
flags = getlong() # 0x2000 bit is for secure I think
lo_expire = getlong()
hi_expire = getlong()
lo_create = getlong()
hi_create = getlong()
sep = getstr()
if "" in (key, value, domain_path, flags, hi_expire, lo_expire,
hi_create, lo_create, sep) or (sep != "*"):
break
m = self.msie_domain_re.search(domain_path)
if m:
domain = m.group(1)
path = m.group(2)
cookies.append({"KEY": key, "VALUE": value,
"DOMAIN": domain, "PATH": path,
"FLAGS": flags, "HIXP": hi_expire,
"LOXP": lo_expire, "HICREATE": hi_create,
"LOCREATE": lo_create})
finally:
cookies_fh.close()
return cookies | openhatch/oh-mainline | [
238,
323,
238,
203,
1321317235
] |
def load_from_registry(self, ignore_discard=False, ignore_expires=False,
username=None):
"""
username: only required on win9x
"""
cookies_dir = regload(self.reg_path, self.reg_key)
filename = os.path.normpath(os.path.join(cookies_dir, "INDEX.DAT"))
self.load(filename, ignore_discard, ignore_expires, username) | openhatch/oh-mainline | [
238,
323,
238,
203,
1321317235
] |
def __init__(self, filename=None, delayload=False, policy=None):
MSIEBase.__init__(self)
FileCookieJar.__init__(self, filename, delayload, policy) | openhatch/oh-mainline | [
238,
323,
238,
203,
1321317235
] |
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
domains = self._cookies.copy()
domains.update(self._delayload_domains)
domains = domains.keys()
cookies = []
for domain in domains:
cookies.extend(self._cookies_for_domain(domain, request))
return cookies | openhatch/oh-mainline | [
238,
323,
238,
203,
1321317235
] |
def read_all_cookies(self):
"""Eagerly read in all cookies."""
if self.delayload:
for domain in self._delayload_domains.keys():
self._delayload_domain(domain) | openhatch/oh-mainline | [
238,
323,
238,
203,
1321317235
] |
def store_hours(self, data):
day_groups = []
this_day_group = {}
weekdays = ['Su', 'Mo', 'Th', 'We', 'Tu', 'Fr', 'Sa']
for day_hour in data:
if day_hour['idx'] > 7:
continue
hours = ''
start, end = day_hour['value'].split("-")[0].strip(), day_hour['value'].split("-")[1].strip()
short_day = weekdays[day_hour['idx'] - 1]
hours = '{}:{}-{}:{}'.format(start[:2], start[3:], end[:2], end[3:])
if not this_day_group:
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
elif hours == this_day_group['hours']:
this_day_group['to_day'] = short_day
elif hours != this_day_group['hours']:
day_groups.append(this_day_group)
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
day_groups.append(this_day_group)
if not day_groups:
return None
opening_hours = ''
if len(day_groups) == 1 and not day_groups[0]:
return None
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours [:-2]
return opening_hours | iandees/all-the-places | [
379,
151,
379,
602,
1465952958
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.