function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.cast_value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.cast_value, *range_values) | spulec/moto | [
6700,
1808,
6700,
82,
1361221859
] |
def is_set(self):
return self.type in (DDBType.STRING_SET, DDBType.NUMBER_SET, DDBType.BINARY_SET) | spulec/moto | [
6700,
1808,
6700,
82,
1361221859
] |
def is_map(self):
return self.type == DDBType.MAP | spulec/moto | [
6700,
1808,
6700,
82,
1361221859
] |
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version} | cloudera/hue | [
804,
271,
804,
38,
1277149611
] |
def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2)) | cloudera/hue | [
804,
271,
804,
38,
1277149611
] |
def test_string_serde() -> None:
syft_string = String("Hello OpenMined")
serialized = syft_string._object2proto()
assert isinstance(serialized, String_PB)
deserialized = String._proto2object(proto=serialized)
assert isinstance(deserialized, String)
assert deserialized.id == syft_string.id | OpenMined/PySyft | [
8617,
1908,
8617,
143,
1500410476
] |
def test_parametric_attention_model_with_single_representation(self):
model = parametric_attention.SimpleParametricAttention(
output_dimension=2,
input_embedding_dimension=2,
vocab_size=10,
num_representations=1,
max_sequence_size=20)
input_batch = tf.convert_to_tensor(
np.random.randint(low=0, high=10, size=(10, 20)))
output = model(input_batch)
self.assertIsInstance(model, tf.keras.Model)
self.assertSequenceEqual(output.numpy().shape, [10, 1, 2]) | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def __init__(self, response):
super(CreateCommentResult, self).__init__(response)
self.comment_id = -1
if self.ok:
serialized = response.json()
if "id" in serialized:
self.comment_id = int(serialized["id"]) | icoxfog417/pykintone | [
21,
11,
21,
8,
1435900514
] |
def __init__(self, response):
super(SelectCommentResult, self).__init__(response)
self.raw_comments = []
self.older = False
self.newer = False
if self.ok:
serialized = response.json()
if "comments" in serialized:
self.raw_comments = serialized["comments"]
self.older = serialized["older"]
self.newer = serialized["newer"] | icoxfog417/pykintone | [
21,
11,
21,
8,
1435900514
] |
def test_xl_cell_to_rowcol_abs(self):
"""Test xl_cell_to_rowcol_abs()"""
tests = [
# row, col, A1 string
(0, 0, 'A1'),
(0, 1, 'B1'),
(0, 2, 'C1'),
(0, 9, 'J1'),
(1, 0, 'A2'),
(2, 0, 'A3'),
(9, 0, 'A10'),
(1, 24, 'Y2'),
(7, 25, 'Z8'),
(9, 26, 'AA10'),
(1, 254, 'IU2'),
(1, 255, 'IV2'),
(1, 256, 'IW2'),
(0, 16383, 'XFD1'),
(1048576, 16384, 'XFE1048577'),
]
for row, col, string in tests:
exp = (row, col, 0, 0)
got = xl_cell_to_rowcol_abs(string)
self.assertEqual(got, exp) | jmcnamara/XlsxWriter | [
3172,
594,
3172,
18,
1357261626
] |
def test_equality():
""" Test Equality """
foo1 = qitoolchain.qipackage.QiPackage("foo", "1.2")
foo2 = qitoolchain.qipackage.QiPackage("foo", "1.2")
foo3 = qitoolchain.qipackage.QiPackage("foo", "1.3")
bar1 = qitoolchain.qipackage.QiPackage("bar", "1.2")
assert foo1 == foo2
assert foo2 < foo3
assert foo1 != bar1 | aldebaran/qibuild | [
67,
45,
67,
42,
1297185497
] |
def test_skip_package_xml(tmpdir):
""" Test Skip Package Xml """
foo1 = tmpdir.mkdir("foo")
foo_xml = foo1.join("package.xml")
foo_xml.write("""<package name="foo" version="0.1"/>""")
foo1.ensure("include", "foo.h", file=True)
foo1.ensure("lib", "libfoo.so", file=True)
package = qitoolchain.qipackage.QiPackage("foo", path=foo1.strpath)
dest = tmpdir.join("dest")
package.install(dest.strpath)
assert dest.join("include", "foo.h").check(file=True)
assert dest.join("lib", "libfoo.so").check(file=True)
assert not dest.join("package.xml").check(file=True) | aldebaran/qibuild | [
67,
45,
67,
42,
1297185497
] |
def test_backward_compat_runtime_install(tmpdir):
""" Test Backward Compat Runtime """
boost_path = tmpdir.mkdir("boost")
boost_path.ensure("include", "boost.h", file=True)
boost_path.ensure("lib", "libboost.so", file=True)
boost_path.ensure("package.xml", file=True)
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
dest = tmpdir.join("dest")
installed = package.install(dest.strpath, components=["runtime"])
assert not dest.join("include", "boost.h").check(file=True)
libbost_so = dest.join("lib", "libboost.so")
assert libbost_so.check(file=True)
assert installed == ["lib/libboost.so"] | aldebaran/qibuild | [
67,
45,
67,
42,
1297185497
] |
def test_include_in_mask(tmpdir):
""" Test Include in Mask """
qt_path = tmpdir.mkdir("qt")
qt_path.ensure("bin", "assitant.exe")
qt_path.ensure("bin", "moc.exe")
qt_path.ensure("bin", "lrelease.exe")
qt_path.ensure("bin", "lupdate.exe")
runtime_mask = qt_path.ensure("runtime.mask", file=True)
runtime_mask.write(b"""\nexclude bin/.*\\.exe\ninclude bin/lrelease.exe\ninclude bin/lupdate.exe\n""")
dest = tmpdir.join("dest")
package = qitoolchain.qipackage.QiPackage("qt", path=qt_path.strpath)
package.install(dest.strpath, release=True, components=["runtime"])
assert dest.join("bin", "lrelease.exe").check(file=True)
assert not dest.join("bin", "moc.exe").check(file=True) | aldebaran/qibuild | [
67,
45,
67,
42,
1297185497
] |
def test_extract_legacy_bad_top_dir(tmpdir):
""" Test Extract Legacy Bad Top Dir """
src = tmpdir.mkdir("src")
boost = src.mkdir("boost")
boost.ensure("lib", "libboost.so", file=True)
res = qisys.archive.compress(boost.strpath)
dest = tmpdir.mkdir("dest").join("boost-1.55")
qitoolchain.qipackage.extract(res, dest.strpath)
assert dest.join("lib", "libboost.so").check(file=True) | aldebaran/qibuild | [
67,
45,
67,
42,
1297185497
] |
def test_extract_modern(tmpdir):
""" Test Extract Modern """
src = tmpdir.mkdir("src")
src.ensure("package.xml", file=True)
src.ensure("lib", "libboost.so", file=True)
output = tmpdir.join("boost.zip")
res = qisys.archive.compress(src.strpath, output=output.strpath, flat=True)
dest = tmpdir.mkdir("dest").join("boost-1.55")
qitoolchain.qipackage.extract(res, dest.strpath)
assert dest.join("lib", "libboost.so").check(file=True) | aldebaran/qibuild | [
67,
45,
67,
42,
1297185497
] |
def test_get_set_license(tmpdir):
""" Test Get Set Licence """
boost_path = tmpdir.mkdir("boost")
boost_path.join("package.xml").write("""\n<package name="boost" version="1.58" />\n""")
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
assert package.license is None
package.license = "BSD"
package2 = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
assert package2.license == "BSD" | aldebaran/qibuild | [
67,
45,
67,
42,
1297185497
] |
def test_post_add_does_not_exist(tmpdir):
""" Test Post Add Does Not Exist """
boost_path = tmpdir.mkdir("boost")
boost_path.join("package.xml").write(
b"""\n<package name="boost" version="1.58" post-add="asdf" />\n"""
)
package = qitoolchain.qipackage.QiPackage("boost", path=boost_path.strpath)
package.load_package_xml()
with pytest.raises(qisys.command.NotInPath):
package.post_add() | aldebaran/qibuild | [
67,
45,
67,
42,
1297185497
] |
def __init__(self, id_=None, name=None, definer=None, reference=None):
super(KillChain, self).__init__()
self.id_ = id_
self.name = name
self.definer = definer
self.reference = reference
self.number_of_phases = None # can we just do len(self.kill_chain_phases)? | STIXProject/python-stix | [
230,
86,
230,
25,
1360691159
] |
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, self.__class__):
return False
return other.to_dict() == self.to_dict() | STIXProject/python-stix | [
230,
86,
230,
25,
1360691159
] |
def _dict_as_list(cls):
return False | STIXProject/python-stix | [
230,
86,
230,
25,
1360691159
] |
def __init__(self, phase_id=None, name=None, ordinality=None):
super(KillChainPhase, self).__init__()
self.phase_id = phase_id
self.name = name
self.ordinality = ordinality | STIXProject/python-stix | [
230,
86,
230,
25,
1360691159
] |
def __ne__(self, other):
return not self.__eq__(other) | STIXProject/python-stix | [
230,
86,
230,
25,
1360691159
] |
def __init__(self, phase_id=None, name=None, ordinality=None, kill_chain_id=None, kill_chain_name=None):
super(KillChainPhaseReference, self).__init__(phase_id, name, ordinality)
self.kill_chain_id = kill_chain_id
self.kill_chain_name = kill_chain_name | STIXProject/python-stix | [
230,
86,
230,
25,
1360691159
] |
def __init__(self, *args):
super(_KillChainPhaseReferenceList, self).__init__(type=KillChainPhaseReference, *args) | STIXProject/python-stix | [
230,
86,
230,
25,
1360691159
] |
def _dict_as_list(cls):
return False | STIXProject/python-stix | [
230,
86,
230,
25,
1360691159
] |
def __init__(self, *args, **kwargs):
# Set Tester member variables
self.set_write_output_files(WRITE_OUTPUT_FILES)
self.set_output_py_dir(OUTPUT_PY_DIR)
self.set_output_yml_dir(OUTPUT_YML_DIR)
self.set_debug_level(DEBUG_LEVEL)
# Store the base path
self._base_path = os.path.dirname(os.path.abspath(__file__))
# Call the parent constructor
super(TestGenerate, self).__init__(
*args,
script_dirs=[os.path.join(self._base_path, 'smacha_scripts/smacha_test_examples')],
template_dirs=[
os.path.join(self._base_path, ROS_TEMPLATES_DIR),
os.path.join(self._base_path, TEMPLATES_DIR)
],
**kwargs) | ReconCell/smacha | [
15,
2,
15,
1,
1501314993
] |
def setUp(self):
self.username = 'test'
self.password = 'secret'
self.auth_url = 'http://testserver' + reverse('auth-password')
self.test_url = 'http://testserver/api/'
get_user_model().objects.create_user(
self.username, 'admin@example.com', self.password
) | opennode/nodeconductor-assembly-waldur | [
39,
35,
39,
3,
1484854426
] |
def test_user_can_authenticate_with_token(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK) | opennode/nodeconductor-assembly-waldur | [
39,
35,
39,
3,
1484854426
] |
def test_token_creation_time_is_updated_on_every_request(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
created1 = Token.objects.values_list('created', flat=True).get(key=token)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
self.client.get(self.test_url)
created2 = Token.objects.values_list('created', flat=True).get(key=token)
self.assertTrue(created1 < created2) | opennode/nodeconductor-assembly-waldur | [
39,
35,
39,
3,
1484854426
] |
def test_expired_token_is_recreated_on_successful_authentication(self):
user = get_user_model().objects.get(username=self.username)
self.assertIsNotNone(user.token_lifetime)
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token1 = response.data['token']
mocked_now = timezone.now() + timezone.timedelta(seconds=user.token_lifetime)
with freeze_time(mocked_now):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
token2 = response.data['token']
self.assertNotEqual(token1, token2) | opennode/nodeconductor-assembly-waldur | [
39,
35,
39,
3,
1484854426
] |
def test_token_never_expires_if_token_lifetime_is_none(self):
user = get_user_model().objects.get(username=self.username)
user.token_lifetime = None
user.save()
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
original_token = response.data['token']
year_ahead = timezone.now() + timezone.timedelta(days=365)
with freeze_time(year_ahead):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
token_in_a_year = response.data['token']
self.assertEqual(original_token, token_in_a_year) | opennode/nodeconductor-assembly-waldur | [
39,
35,
39,
3,
1484854426
] |
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_marooned_pirate_hum_f.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_female") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def __init__(self, plotly_name="y", parent_name="volume.caps", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Y"),
data_docs=kwargs.pop(
"data_docs",
"""
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the y `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges. | plotly/python-api | [
13052,
2308,
13052,
1319,
1385013188
] |
def _make_hash_value(self, user, timestamp):
return (
six.text_type(user.pk) + six.text_type(timestamp) +
six.text_type(user.profile.email_confirmed)
) | shashank-sharma/mythical-learning | [
27,
3,
27,
5,
1494527251
] |
def _string_to_bytes(text, max_length):
"""Given string and length, convert to byte seq of at most max_length.
This process mimics docqa/elmo's preprocessing:
https://github.com/allenai/document-qa/blob/master/docqa/elmo/data.py
Note that we make use of BOS_CHAR_ID and EOS_CHAR_ID in iterator_utils.py &
our usage differs from docqa/elmo.
Args:
text: tf.string tensor of shape []
max_length: max number of chars for each word.
Returns:
A tf.int32 tensor of the byte encoded text.
"""
byte_ids = tf.to_int32(tf.decode_raw(text, tf.uint8))
byte_ids = byte_ids[:max_length - 2]
padding = tf.fill([max_length - tf.shape(byte_ids)[0] - 2], PAD_CHAR_ID)
byte_ids = tf.concat(
[[BOW_CHAR_ID], byte_ids, [EOW_CHAR_ID], padding], axis=0)
tf.logging.info(byte_ids)
byte_ids = tf.reshape(byte_ids, [max_length])
tf.logging.info(byte_ids.get_shape().as_list())
return byte_ids + 1 | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def load_vocab(vocab_file):
vocab = []
with codecs.getreader("utf-8")(tf.gfile.GFile(vocab_file, "rb")) as f:
vocab_size = 0
for word in f:
vocab_size += 1
vocab.append(word.strip())
return vocab, vocab_size | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def create_vocab_tables(src_vocab_file):
"""Creates vocab tables for src_vocab_file and tgt_vocab_file."""
src_vocab_table = lookup_ops.index_table_from_file(
src_vocab_file, default_value=UNK_ID)
tgt_vocab_table = src_vocab_table
return src_vocab_table, tgt_vocab_table | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def __init__(self):
self.base_link = 'http://segos.es'
self.search_link = '/?search=%s'
#self.episode_link = '-Season-%01d-Episode-%01d' | mrknow/filmkodi | [
68,
68,
68,
206,
1444160337
] |
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
query = self.moviesearch_link % (urllib.unquote(tvshowtitle))
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = json.loads(result)
tvshowtitle = cleantitle.tv(tvshowtitle)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'h2', ret='title')[0], client.parseDOM(i, 'span', attrs = {'itemprop': 'copyrightYear'})) for i in result]
result = [i for i in result if len(i[2]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
result = [i[0] for i in result if any(x in i[2][0] for x in years)][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return | mrknow/filmkodi | [
68,
68,
68,
206,
1444160337
] |
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.request(url)
vtype = re.findall('<div class="col-lg-9 col-md-9 col-sm-9">\s.*<b>Język</b>:(.*?)\.*</div>',result)[0].strip()
q = re.findall('<div class="col-lg-9 col-md-9 col-sm-9">\s.*<b>Jakość</b>:(.*?)\.*</div>', result)[0].strip()
quality = 'SD'
if '720' in q: quality = 'HD'
if '1080' in q: quality = '1080p'
links = client.parseDOM(result, 'div', attrs={'id':'Film'})
links = [client.parseDOM(i, 'a', ret='href', attrs={'target':'_blank'})[0] for i in links]
for i in links:
try:
host = urlparse.urlparse(i).netloc
host = host.split('.')
host = host[-2]+"."+host[-1]
host = host.lower()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'SEGOS', 'url': i, 'vtype':vtype})
except:
pass
return sources
except:
return sources | mrknow/filmkodi | [
68,
68,
68,
206,
1444160337
] |
def _device_id(aiohue_sensor):
# Work out the shared device ID, as described below
device_id = aiohue_sensor.uniqueid
if device_id and len(device_id) > 23:
device_id = device_id[:23]
return device_id | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def __init__(self, bridge):
"""Initialize the sensor manager."""
self.bridge = bridge
self._component_add_entities = {}
self.current = {}
self.current_events = {}
self._enabled_platforms = ("binary_sensor", "sensor")
self.coordinator = DataUpdateCoordinator(
bridge.hass,
LOGGER,
name="sensor",
update_method=self.async_update_data,
update_interval=self.SCAN_INTERVAL,
request_refresh_debouncer=debounce.Debouncer(
bridge.hass, LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
) | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def async_update_items(self):
"""Update sensors from the bridge."""
api = self.bridge.api.sensors
if len(self._component_add_entities) < len(self._enabled_platforms):
return
to_add = {}
primary_sensor_devices = {}
current = self.current
# Physical Hue motion sensors present as three sensors in the API: a
# presence sensor, a temperature sensor, and a light level sensor. Of
# these, only the presence sensor is assigned the user-friendly name
# that the user has given to the device. Each of these sensors is
# linked by a common device_id, which is the first twenty-three
# characters of the unique id (then followed by a hyphen and an ID
# specific to the individual sensor).
#
# To set up neat values, and assign the sensor entities to the same
# device, we first, iterate over all the sensors and find the Hue
# presence sensors, then iterate over all the remaining sensors -
# finding the remaining ones that may or may not be related to the
# presence sensors.
for item_id in api:
if api[item_id].type != TYPE_ZLL_PRESENCE:
continue
primary_sensor_devices[_device_id(api[item_id])] = api[item_id]
# Iterate again now we have all the presence sensors, and add the
# related sensors with nice names where appropriate.
for item_id in api:
uniqueid = api[item_id].uniqueid
if current.get(uniqueid, self.current_events.get(uniqueid)) is not None:
continue
sensor_type = api[item_id].type
# Check for event generator devices
event_config = EVENT_CONFIG_MAP.get(sensor_type)
if event_config is not None:
base_name = api[item_id].name
name = event_config["name_format"].format(base_name)
new_event = event_config["class"](api[item_id], name, self.bridge)
self.bridge.hass.async_create_task(
new_event.async_update_device_registry()
)
self.current_events[uniqueid] = new_event
sensor_config = SENSOR_CONFIG_MAP.get(sensor_type)
if sensor_config is None:
continue
base_name = api[item_id].name
primary_sensor = primary_sensor_devices.get(_device_id(api[item_id]))
if primary_sensor is not None:
base_name = primary_sensor.name
name = sensor_config["name_format"].format(base_name)
current[uniqueid] = sensor_config["class"](
api[item_id], name, self.bridge, primary_sensor=primary_sensor
)
to_add.setdefault(sensor_config["platform"], []).append(current[uniqueid])
self.bridge.hass.async_create_task(
remove_devices(
self.bridge,
[value.uniqueid for value in api.values()],
current,
)
)
for platform, value in to_add.items():
self._component_add_entities[platform](value) | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def available(self):
"""Return if sensor is available."""
return self.bridge.sensor_manager.coordinator.last_update_success and (
self.allow_unreachable
# remotes like Hue Tap (ZGPSwitchSensor) have no _reachability_
or self.sensor.config.get("reachable", True)
) | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def state_class(self):
"""Return the state class of this entity, from STATE_CLASSES, if any."""
return SensorStateClass.MEASUREMENT | home-assistant/home-assistant | [
58698,
22318,
58698,
2794,
1379402988
] |
def Args(parser):
"""Register flags for this command."""
parser.add_argument('subscription',
help='Subscription name to ACK messages on.')
parser.add_argument('ackid', nargs='+',
help='One or more AckId to acknowledge.') | KaranToor/MA450 | [
1,
1,
1,
4,
1484697944
] |
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDataSource = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetDataSource',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetDataSourceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DataSource.FromString,
)
self.ListDataSources = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListDataSources',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesResponse.FromString,
)
self.CreateTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/CreateTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CreateTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.UpdateTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/UpdateTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.UpdateTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.DeleteTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/DeleteTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.ListTransferConfigs = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferConfigs',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsResponse.FromString,
)
self.ScheduleTransferRuns = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ScheduleTransferRuns',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsResponse.FromString,
)
self.GetTransferRun = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetTransferRun',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferRunRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferRun.FromString,
)
self.DeleteTransferRun = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/DeleteTransferRun',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferRunRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListTransferRuns = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferRuns',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsResponse.FromString,
)
self.ListTransferLogs = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferLogs',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsResponse.FromString,
)
self.CheckValidCreds = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/CheckValidCreds',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsResponse.FromString,
) | jonparrott/google-cloud-python | [
2,
1,
2,
1,
1443151125
] |
def GetDataSource(self, request, context):
"""Retrieves a supported data source and returns its settings,
which can be used for UI rendering.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | jonparrott/google-cloud-python | [
2,
1,
2,
1,
1443151125
] |
def CreateTransferConfig(self, request, context):
"""Creates a new data transfer configuration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | jonparrott/google-cloud-python | [
2,
1,
2,
1,
1443151125
] |
def DeleteTransferConfig(self, request, context):
"""Deletes a data transfer configuration,
including any associated transfer runs and logs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | jonparrott/google-cloud-python | [
2,
1,
2,
1,
1443151125
] |
def ListTransferConfigs(self, request, context):
"""Returns information about all data transfers in the project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | jonparrott/google-cloud-python | [
2,
1,
2,
1,
1443151125
] |
def GetTransferRun(self, request, context):
"""Returns information about the particular transfer run.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | jonparrott/google-cloud-python | [
2,
1,
2,
1,
1443151125
] |
def ListTransferRuns(self, request, context):
"""Returns information about running and completed jobs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | jonparrott/google-cloud-python | [
2,
1,
2,
1,
1443151125
] |
def CheckValidCreds(self, request, context):
"""Returns true if valid credentials exist for the given data source and
requesting user.
Some data sources doesn't support service account, so we need to talk to
them on behalf of the end user. This API just checks whether we have OAuth
token for the particular user, which is a pre-requisite before user can
create a transfer config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | jonparrott/google-cloud-python | [
2,
1,
2,
1,
1443151125
] |
def test_api2_runfunc():
def noargs():
pass
with pytest.raises(SyntaxError):
execute_python._runfunc_ok(noargs)
def twoargs(a, b):
pass
with pytest.raises(SyntaxError):
execute_python._runfunc_ok(twoargs)
def two_with_default(a, b=2):
pass
# making sure this doesn't raise
execute_python._runfunc_ok(two_with_default)
def one_with_default(a=2):
pass
# shouldn't raise
execute_python._runfunc_ok(one_with_default)
def starargs(*args):
pass
# shouldn't raise
execute_python._runfunc_ok(starargs) | Opentrons/labware | [
323,
152,
323,
668,
1436215261
] |
def test_execute_ok(protocol, protocol_file, loop):
proto = parse(protocol.text, protocol.filename)
ctx = ProtocolContext(loop)
execute.run_protocol(proto, context=ctx) | Opentrons/labware | [
323,
152,
323,
668,
1436215261
] |
def run():
pass | Opentrons/labware | [
323,
152,
323,
668,
1436215261
] |
def run(a, b):
pass | Opentrons/labware | [
323,
152,
323,
668,
1436215261
] |
def test_proto_with_exception(loop):
ctx = ProtocolContext(loop)
exc_in_root = '''metadata={"apiLevel": "2.0"} | Opentrons/labware | [
323,
152,
323,
668,
1436215261
] |
def this_throws():
raise Exception("hi") | Opentrons/labware | [
323,
152,
323,
668,
1436215261
] |
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.source = 'main.m'
self.line = line_number(self.source, '// Set breakpoint 0 here.')
self.shlib_names = ["Container"] | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def test_expr(self):
self.do_test() | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __init__(self, oauth2session, client_type, client_config):
"""
Args:
oauth2session (requests_oauthlib.OAuth2Session):
The OAuth 2.0 session from ``requests-oauthlib``.
client_type (str): The client type, either ``web`` or
``installed``.
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
"""
self.client_type = client_type
"""str: The client type, either ``'web'`` or ``'installed'``"""
self.client_config = client_config[client_type]
"""Mapping[str, Any]: The OAuth 2.0 client configuration."""
self.oauth2session = oauth2session
"""requests_oauthlib.OAuth2Session: The OAuth 2.0 session.""" | axbaretto/beam | [
9,
2,
9,
74,
1474583398
] |
def from_client_config(cls, client_config, scopes, **kwargs):
"""Creates a :class:`requests_oauthlib.OAuth2Session` from client
configuration loaded from a Google-format client secrets file.
Args:
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
Raises:
ValueError: If the client configuration is not in the correct
format.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
"""
if 'web' in client_config:
client_type = 'web'
elif 'installed' in client_config:
client_type = 'installed'
else:
raise ValueError(
'Client secrets must be for a web or installed app.')
session, client_config = (
google.oauth2.oauthlib.session_from_client_config(
client_config, scopes, **kwargs))
return cls(session, client_type, client_config) | axbaretto/beam | [
9,
2,
9,
74,
1474583398
] |
def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):
"""Creates a :class:`Flow` instance from a Google client secrets file.
Args:
client_secrets_file (str): The path to the client secrets .json
file.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
"""
with open(client_secrets_file, 'r') as json_file:
client_config = json.load(json_file)
return cls.from_client_config(client_config, scopes=scopes, **kwargs) | axbaretto/beam | [
9,
2,
9,
74,
1474583398
] |
def redirect_uri(self):
"""The OAuth 2.0 redirect URI. Pass-through to
``self.oauth2session.redirect_uri``."""
return self.oauth2session.redirect_uri | axbaretto/beam | [
9,
2,
9,
74,
1474583398
] |
def redirect_uri(self, value):
self.oauth2session.redirect_uri = value | axbaretto/beam | [
9,
2,
9,
74,
1474583398
] |
def fetch_token(self, **kwargs):
"""Completes the Authorization Flow and obtains an access token.
This is the final step in the OAuth 2.0 Authorization Flow. This is
called after the user consents.
This method calls
:meth:`requests_oauthlib.OAuth2Session.fetch_token`
and specifies the client configuration's token URI (usually Google's
token server).
Args:
kwargs: Arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least
one of ``code`` or ``authorization_response`` must be
specified.
Returns:
Mapping[str, str]: The obtained tokens. Typically, you will not use
return value of this function and instead and use
:meth:`credentials` to obtain a
:class:`~google.auth.credentials.Credentials` instance.
"""
return self.oauth2session.fetch_token(
self.client_config['token_uri'],
client_secret=self.client_config['client_secret'],
**kwargs) | axbaretto/beam | [
9,
2,
9,
74,
1474583398
] |
def credentials(self):
"""Returns credentials from the OAuth 2.0 session.
:meth:`fetch_token` must be called before accessing this. This method
constructs a :class:`google.oauth2.credentials.Credentials` class using
the session's token and the client config.
Returns:
google.oauth2.credentials.Credentials: The constructed credentials.
Raises:
ValueError: If there is no access token in the session.
"""
return google.oauth2.oauthlib.credentials_from_session(
self.oauth2session, self.client_config) | axbaretto/beam | [
9,
2,
9,
74,
1474583398
] |
def __init__(self, attrs=None, years=None):
# years is an optional list/tuple of years to use in the "year" select box.
self.attrs = attrs or {}
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year+10) | paulsmith/geodjango | [
20,
7,
20,
1,
1215189246
] |
def id_for_label(self, id_):
return '%s_month' % id_ | paulsmith/geodjango | [
20,
7,
20,
1,
1215189246
] |
def __init__(self, values, row_splits):
"""Creates a `RaggedTensorValue`.
Args:
values: A numpy array of any type and shape; or a RaggedTensorValue.
row_splits: A 1-D int32 or int64 numpy array.
"""
if not (isinstance(row_splits, (np.ndarray, np.generic)) and
row_splits.dtype in (np.int64, np.int32) and row_splits.ndim == 1):
raise TypeError("row_splits must be a 1D int32 or int64 numpy array")
if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)):
raise TypeError("values must be a numpy array or a RaggedTensorValue")
if (isinstance(values, RaggedTensorValue) and
row_splits.dtype != values.row_splits.dtype):
raise ValueError("row_splits and values.row_splits must have "
"the same dtype")
self._values = values
self._row_splits = row_splits | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def flat_values(self):
"""The innermost `values` array for this ragged tensor value."""
rt_values = self.values
while isinstance(rt_values, RaggedTensorValue):
rt_values = rt_values.values
return rt_values | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def nested_row_splits(self):
"""The row_splits for all ragged dimensions in this ragged tensor value."""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensorValue):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def ragged_rank(self):
"""The number of ragged dimensions in this ragged tensor value."""
values_is_ragged = isinstance(self._values, RaggedTensorValue)
return self._values.ragged_rank + 1 if values_is_ragged else 1 | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def shape(self):
"""A tuple indicating the shape of this RaggedTensorValue."""
return (self._row_splits.shape[0] - 1,) + (None,) + self._values.shape[1:] | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def _nested_row_partitions(self):
"""The row_partitions representing this shape."""
return [RowPartition.from_row_splits(rs) for rs in self.nested_row_splits] | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def __repr__(self):
return "tf.RaggedTensorValue(values=%r, row_splits=%r)" % (self._values,
self._row_splits) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def __init__(self, args):
"""Abstract class to validate and store properties related to Skia Gold.
Args:
args: The parsed arguments from an argparse.ArgumentParser.
"""
self._git_revision = None
self._issue = None
self._patchset = None
self._job_id = None
self._local_pixel_tests = None
self._no_luci_auth = None
self._bypass_skia_gold_functionality = None
self._code_review_system = None
self._continuous_integration_system = None
self._local_png_directory = None
self._InitializeProperties(args) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def continuous_integration_system(self):
return self._continuous_integration_system or 'buildbucket' | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def code_review_system(self):
return self._code_review_system or 'gerrit' | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def git_revision(self):
return self._GetGitRevision() | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def issue(self):
return self._issue | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def job_id(self):
return self._job_id | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def local_pixel_tests(self):
return self._IsLocalRun() | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def local_png_directory(self):
return self._local_png_directory | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def no_luci_auth(self):
return self._no_luci_auth | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def patchset(self):
return self._patchset | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def bypass_skia_gold_functionality(self):
return self._bypass_skia_gold_functionality | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _GetGitOriginMainHeadSha1():
raise NotImplementedError() | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _IsLocalRun(self):
if self._local_pixel_tests is None:
# Look for the presence of the SWARMING_SERVER environment variable as a
# heuristic to determine whether we're running on a workstation or a bot.
# This should always be set on swarming, but would be strange to be set on
# a workstation.
self._local_pixel_tests = 'SWARMING_SERVER' not in os.environ
if self._local_pixel_tests:
logging.warning(
'Automatically determined that test is running on a workstation')
else:
logging.warning(
'Automatically determined that test is running on a bot')
return self._local_pixel_tests | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def parse_args(args=None, *, parser_type=None):
parser_type = parser_type or argparse.ArgumentParser
parser = parser_type(
description='Update the active milestones for the chromium project')
parser.set_defaults(func=None)
parser.add_argument('--milestones-json',
help='Path to the milestones.json file',
default=os.path.join(INFRA_CONFIG_DIR, 'milestones.json'))
subparsers = parser.add_subparsers()
activate_parser = subparsers.add_parser(
'activate', help='Add an additional active milestone')
activate_parser.set_defaults(func=activate_cmd)
activate_parser.add_argument(
'--milestone',
required=True,
help=('The milestone identifier '
'(e.g. the milestone number for standard release channel)'))
activate_parser.add_argument(
'--branch',
required=True,
help='The branch name, must correspond to a ref in refs/branch-heads')
deactivate_parser = subparsers.add_parser(
'deactivate', help='Remove an active milestone')
deactivate_parser.set_defaults(func=deactivate_cmd)
deactivate_parser.add_argument(
'--milestone',
required=True,
help=('The milestone identifier '
'(e.g. the milestone number for standard release channel)'))
args = parser.parse_args(args)
if args.func is None:
parser.error('no sub-command specified')
return args | nwjs/chromium.src | [
136,
133,
136,
45,
1453904223
] |
def numeric_sort_key(s):
# The capture group in the regex means that the numeric portions are returned,
# odd indices will be the numeric portions of the string (the 0th or last
# element will be empty if the string starts or ends with a number,
# respectively)
pieces = _NUMBER_RE.split(s)
return [
(int(x), x) if is_numeric else x
for x, is_numeric
in zip(pieces, itertools.cycle([False, True]))
] | nwjs/chromium.src | [
136,
133,
136,
45,
1453904223
] |
def activate_cmd(args):
with open(args.milestones_json) as f:
milestones = json.load(f)
milestones = add_milestone(milestones, args.milestone, args.branch)
with open(args.milestones_json, 'w') as f:
f.write(milestones) | nwjs/chromium.src | [
136,
133,
136,
45,
1453904223
] |
def deactivate_cmd(args):
with open(args.milestones_json) as f:
milestones = json.load(f)
milestones = remove_milestone(milestones, args.milestone)
with open(args.milestones_json, 'w') as f:
f.write(milestones) | nwjs/chromium.src | [
136,
133,
136,
45,
1453904223
] |
def isnan(val):
# NaN is never equal to itself.
return val != val | nwjs/chromium.src | [
136,
133,
136,
45,
1453904223
] |
def IsPosInf(val):
return isinf(val) and (val > 0) | nwjs/chromium.src | [
136,
133,
136,
45,
1453904223
] |
def testBadUtf8String(self, message_module):
if api_implementation.Type() != 'python':
self.skipTest("Skipping testBadUtf8String, currently only the python "
"api implementation raises UnicodeDecodeError when a "
"string field contains bad utf-8.")
bad_utf8_data = test_util.GoldenFileData('bad_utf8_string')
with self.assertRaises(UnicodeDecodeError) as context:
message_module.TestAllTypes.FromString(bad_utf8_data)
self.assertIn('TestAllTypes.optional_string', str(context.exception)) | nwjs/chromium.src | [
136,
133,
136,
45,
1453904223
] |
def testGoldenPackedMessage(self, message_module):
golden_data = test_util.GoldenFileData('golden_packed_fields_message')
golden_message = message_module.TestPackedTypes()
parsed_bytes = golden_message.ParseFromString(golden_data)
all_set = message_module.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEqual(parsed_bytes, len(golden_data))
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString()) | nwjs/chromium.src | [
136,
133,
136,
45,
1453904223
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.