hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace13a2722799e0e06a21b8906bda2bc74044f23 | 431 | py | Python | aula13.py | rodrigosilvanew/pythonexercicios-guanabara | de9de3109d6ff97d5f61ef41cb8e9db6de37a644 | [
"MIT"
] | null | null | null | aula13.py | rodrigosilvanew/pythonexercicios-guanabara | de9de3109d6ff97d5f61ef41cb8e9db6de37a644 | [
"MIT"
] | null | null | null | aula13.py | rodrigosilvanew/pythonexercicios-guanabara | de9de3109d6ff97d5f61ef41cb8e9db6de37a644 | [
"MIT"
] | null | null | null | '''for c in range(6, 0, -1): # ele ignora o último número / se colocar (x, y, -1) ele vai contando de trás pra frente
print(c)
print('FIM')
'''
'''
i = int(input('Início: '))
f = int(input('Fim: '))
p = int(input('Passo: '))
for c in range(i, f+1, p):
print(c)
print('FIM')
'''
s = 0
for c in range(0, 4):
n = int(input('Digite um valor: '))
s += n
print('O somatório de todos os valores foi {}.'.format(s))
| 20.52381 | 121 | 0.559165 |
ace13b6d59d5da570f872916ff92a42a96027774 | 4,993 | py | Python | booru/account/forms.py | NogardRyuu/Boorunaut | 3ff45e6338ae07ba7fbc8159bb44f542d3fb48ba | [
"MIT"
] | 19 | 2018-07-20T22:59:57.000Z | 2021-12-19T07:18:07.000Z | booru/account/forms.py | NogardRyuu/Boorunaut | 3ff45e6338ae07ba7fbc8159bb44f542d3fb48ba | [
"MIT"
] | 64 | 2017-12-23T20:33:02.000Z | 2021-09-03T02:20:16.000Z | booru/account/forms.py | NogardRyuu/Boorunaut | 3ff45e6338ae07ba7fbc8159bb44f542d3fb48ba | [
"MIT"
] | 14 | 2017-12-19T10:53:00.000Z | 2022-03-03T09:46:13.000Z | from django import forms
from django.contrib.auth.forms import (AuthenticationForm, UserCreationForm,
UsernameField)
from django.contrib.auth.models import Group
from django.db.models import Q
from django.template.defaultfilters import slugify
import booru.utils
from booru.account.models import Account
class UniqueUserEmailField(forms.EmailField):
"""
An EmailField which only is valid if no Account has that email.
"""
def validate(self, value):
super().validate(value)
try:
Account.objects.get(email=value)
raise forms.ValidationError("A user with that email already exists.")
except Account.MultipleObjectsReturned:
raise forms.ValidationError("A user with that email already exists.")
except Account.DoesNotExist:
pass
class UsernameExistsField(UsernameField):
"""
An UsernameField that raises an error when the name is
not registered on the database.
"""
def validate(self, value):
super().validate(value)
try:
Account.objects.get(username=value)
except Account.DoesNotExist:
raise forms.ValidationError("There's no user registered with that username.")
class UsernameNotBlockedField(UsernameExistsField):
"""
An UsernameExistsField that raises an error when the account
is banned from the website.
"""
def validate(self, value):
super().validate(value)
account = Account.objects.get(slug=slugify(value))
priv_timeout = account.get_priv_timeout("can_login")
if priv_timeout.exists(): # is banned
raise forms.ValidationError("This user is currently banned until {}.".format(priv_timeout.first().expiration))
class UniqueUsernameField(UsernameField):
"""
An UsernameField that raises an error when the
name is already in use.
"""
def validate(self, value):
super().validate(value)
try:
Account.objects.get(slug=slugify(value))
raise forms.ValidationError("There's already an user registered with that username.")
except Account.MultipleObjectsReturned:
raise forms.ValidationError("There's already an user registered with that username.")
except Account.DoesNotExist:
pass
class UserRegisterForm(UserCreationForm):
"""
Extends the built in UserCreationForm to include the Account email
and the form-control class in each widget.
"""
email = UniqueUserEmailField(required=True, label='Email address')
username = UniqueUsernameField(
max_length=254,
widget=forms.TextInput(attrs={'autofocus': True, 'class': 'form-control'}),
)
class Meta:
model = Account
fields = ("username", "email")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['password1'].widget = forms.PasswordInput(attrs={'class': 'form-control'})
self.fields['password2'].widget = forms.PasswordInput(attrs={'class': 'form-control'})
self.fields['email'].widget = forms.EmailInput(attrs={'class': 'form-control'})
class UserAuthenticationForm(AuthenticationForm):
"""
Extends the built in AuthenticationForm to add
the form-control class in each widget.
"""
username = UsernameNotBlockedField(
max_length=254,
widget=forms.TextInput(attrs={'autofocus': True, 'class': 'form-control'}),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['password'].widget = forms.PasswordInput(attrs={'class': 'form-control'})
class UserSettingsForm(forms.ModelForm):
"""
Form for modifying the user settings.
"""
class Meta:
model = Account
fields = ["safe_only", "show_comments", "tag_blacklist"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['safe_only'].widget = forms.CheckboxInput(attrs={'class': 'form-control', 'data-toggle': 'toggle'})
self.fields['show_comments'].widget = forms.CheckboxInput(attrs={'class': 'form-control', 'data-toggle': 'toggle'})
# TODO: implement the tag blacklist
#self.fields['tag_blacklist'].widget = forms.Textarea(attrs={'class': 'form-control', 'rows': '4', 'placeholder': 'Ex.: wall rating:explicit user:girugamesh'})
self.fields['tag_blacklist'].widget = forms.Textarea(attrs={'class': 'form-control', 'rows': '4', 'placeholder': "This feature wasn't implemented yet.", "disabled": ""})
class StaffUserGroupForm(forms.Form):
group = forms.ChoiceField()
class Meta:
fields = "__all__"
def __init__(self, *args, **kwargs):
super(StaffUserGroupForm, self).__init__(*args, **kwargs)
self.fields['group'].widget = forms.Select(attrs={'class': 'form-control'})
self.fields['group'].choices = booru.utils.get_all_roles()
| 37.261194 | 177 | 0.662728 |
ace13c1d33da4269ae9949f8c00b6b122cd3248a | 2,884 | py | Python | nsxt/nsxt_transport_node_details.py | VuppalaJagadeesh/ansible-module-chaperone | 052a7a39d08d9b72238bdbddfcebd5ea84307463 | [
"Apache-2.0"
] | null | null | null | nsxt/nsxt_transport_node_details.py | VuppalaJagadeesh/ansible-module-chaperone | 052a7a39d08d9b72238bdbddfcebd5ea84307463 | [
"Apache-2.0"
] | null | null | null | nsxt/nsxt_transport_node_details.py | VuppalaJagadeesh/ansible-module-chaperone | 052a7a39d08d9b72238bdbddfcebd5ea84307463 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
#
# Copyright © 2015 VMware, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#__author__ = 'VJ49'
import yaml
import yamlordereddictloader
from collections import OrderedDict
import logging
logger = logging.getLogger('vswitch')
hdlr = logging.FileHandler('/var/log/chaperone/ChaperoneNSXtLog.log')
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(funcName)s: %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(10)
def main():
module = AnsibleModule(
argument_spec=dict(
),
supports_check_mode=True
)
final_dict = {}
sub_dict = {}
main_dict = {}
main_list= list()
stream1 = open('/var/lib/chaperone/answerfile.yml', 'r')
dict1 = yaml.load(stream1, Loader=yamlordereddictloader.Loader)
try:
for data in dict1:
if data.startswith('check_edge_node') == True:
sub_dict[data] = dict1[data]
for content in dict1:
if content.startswith('esxi_compute') == True:
if 'host' in content and 'ip' in content:
main_dict["ip_address"]=dict1[content]
logger.info(main_dict)
if 'host' in content and 'vmnic' in content:
main_dict["vmnic"]=dict1[content]
logger.info(main_dict)
main_list.append(main_dict)
main_dict={}
#logger.info(main_list)
#logger.info(main_dict)
final_dict['transport_host_nodes']=main_list
module.exit_json(changed=True, id=final_dict, msg= "Successfully got the information")
except Exception as err:
module.fail_json(changed=False, msg= "Failure: %s" %(err))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| 38.972973 | 115 | 0.682732 |
ace13d5657f58e5b838f7471c60838e8b17a9f2a | 1,857 | py | Python | mmdet/utils/checkpoint.py | jie311/miemiedetection | b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6 | [
"Apache-2.0"
] | 65 | 2021-12-30T03:30:52.000Z | 2022-03-25T01:44:32.000Z | mmdet/utils/checkpoint.py | jie311/miemiedetection | b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6 | [
"Apache-2.0"
] | 1 | 2021-12-31T01:51:35.000Z | 2022-01-01T14:42:37.000Z | mmdet/utils/checkpoint.py | jie311/miemiedetection | b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6 | [
"Apache-2.0"
] | 7 | 2021-12-31T09:25:06.000Z | 2022-03-10T01:25:09.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import os
import shutil
from loguru import logger
import torch
def load_ckpt(model, ckpt):
model_state_dict = model.state_dict()
load_dict = {}
for key_model, v in model_state_dict.items():
if key_model not in ckpt:
logger.warning(
"{} is not in the ckpt. Please double check and see if this is desired.".format(
key_model
)
)
continue
v_ckpt = ckpt[key_model]
if v.shape != v_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_model, v_ckpt.shape, key_model, v.shape
)
)
continue
load_dict[key_model] = v_ckpt
model.load_state_dict(load_dict, strict=False)
return model
def save_checkpoint(state, is_best, save_dir, model_name="", max_keep=10):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
filename = os.path.join(save_dir, model_name + ".pth")
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save_dir, "best_ckpt.pth")
shutil.copyfile(filename, best_filename)
# delete models
path_dir = os.listdir(save_dir)
epoch_id_plus_one = [] # epoch_id加1
for name in path_dir:
sss = name.split('.')
if sss[-1] == "pth" or sss[-1] == "pt":
if sss[0].isdigit():
it_id = int(sss[0])
epoch_id_plus_one.append(it_id)
if len(epoch_id_plus_one) > max_keep * 1:
it_id = min(epoch_id_plus_one)
del_model = '%s/%d.pth' % (save_dir, it_id)
if os.path.exists(del_model):
os.remove(del_model)
| 32.017241 | 96 | 0.581583 |
ace13d823b6df10bbe4088dd69c0f2bea187c09c | 3,577 | py | Python | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/module_utils/xenserver/test_gather_vm_params_and_facts.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/module_utils/xenserver/test_gather_vm_params_and_facts.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/module_utils/xenserver/test_gather_vm_params_and_facts.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ..common import testcase_bad_xenapi_refs
testcase_gather_vm_params_and_facts = {
"params": [
["ansible-test-vm-1-params.json", "ansible-test-vm-1-facts.json"],
["ansible-test-vm-2-params.json", "ansible-test-vm-2-facts.json"],
["ansible-test-vm-3-params.json", "ansible-test-vm-3-facts.json"],
],
"ids": [
"ansible-test-vm-1",
"ansible-test-vm-2",
"ansible-test-vm-3",
],
}
@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids'])
def test_gather_vm_params_bad_vm_ref(fake_ansible_module, xenserver, vm_ref):
"""Tests return of empty dict on bad vm_ref."""
assert xenserver.gather_vm_params(fake_ansible_module, vm_ref) == {}
def test_gather_vm_facts_no_vm_params(fake_ansible_module, xenserver):
"""Tests return of empty facts dict when vm_params is not available"""
assert xenserver.gather_vm_facts(fake_ansible_module, None) == {}
assert xenserver.gather_vm_facts(fake_ansible_module, {}) == {}
@pytest.mark.parametrize('fixture_data_from_file',
testcase_gather_vm_params_and_facts['params'],
ids=testcase_gather_vm_params_and_facts['ids'],
indirect=True)
def test_gather_vm_params_and_facts(mocker, fake_ansible_module, XenAPI, xenserver, fixture_data_from_file):
"""Tests proper parsing of VM parameters and facts."""
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
if "params" in list(fixture_data_from_file.keys())[0]:
params_file = list(fixture_data_from_file.keys())[0]
facts_file = list(fixture_data_from_file.keys())[1]
else:
params_file = list(fixture_data_from_file.keys())[1]
facts_file = list(fixture_data_from_file.keys())[0]
mocked_returns = {
"VM.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM'][obj_ref],
"VM_metrics.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM_metrics'][obj_ref],
"VM_guest_metrics.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM_guest_metrics'][obj_ref],
"VBD.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VBD'][obj_ref],
"VDI.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VDI'][obj_ref],
"SR.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['SR'][obj_ref],
"VIF.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VIF'][obj_ref],
"network.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['network'][obj_ref],
"host.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['host'][obj_ref],
}
mocked_xenapi.configure_mock(**mocked_returns)
mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
vm_ref = list(fixture_data_from_file[params_file]['VM'].keys())[0]
assert xenserver.gather_vm_facts(fake_ansible_module, xenserver.gather_vm_params(fake_ansible_module, vm_ref)) == fixture_data_from_file[facts_file]
| 47.693333 | 152 | 0.725468 |
ace13f02e9439a16f8995494098518afd0baac4f | 980 | py | Python | app/core/tests/test_commands.py | ombg/pdd-site | 04a3cc3ebc744f45e3ffe86e75af6b7a9d4e31d0 | [
"MIT"
] | null | null | null | app/core/tests/test_commands.py | ombg/pdd-site | 04a3cc3ebc744f45e3ffe86e75af6b7a9d4e31d0 | [
"MIT"
] | null | null | null | app/core/tests/test_commands.py | ombg/pdd-site | 04a3cc3ebc744f45e3ffe86e75af6b7a9d4e31d0 | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
"""
Goal: Mock the behaviour of the Django get_database function.
It simulates if DB is available or not.
"""
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True) # Make sleep() return immediately
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
| 36.296296 | 78 | 0.677551 |
ace13f3b64b57654d2251c20dc2b61e50aeb8dfe | 538 | py | Python | magni/reproducibility/__init__.py | SIP-AAU/Magni | 6328dc98a273506f433af52e6bd394754a844550 | [
"BSD-2-Clause"
] | 42 | 2015-02-09T10:17:26.000Z | 2021-12-21T09:38:04.000Z | magni/reproducibility/__init__.py | SIP-AAU/Magni | 6328dc98a273506f433af52e6bd394754a844550 | [
"BSD-2-Clause"
] | 3 | 2015-03-20T12:00:40.000Z | 2015-03-20T12:01:16.000Z | magni/reproducibility/__init__.py | SIP-AAU/Magni | 6328dc98a273506f433af52e6bd394754a844550 | [
"BSD-2-Clause"
] | 14 | 2015-04-28T03:08:32.000Z | 2021-07-24T13:29:24.000Z | """
..
Copyright (c) 2014-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing functionality for aiding in quest for more reproducible
research.
Routine listings
----------------
data
Module providing functions that return various data about the system,
magni, files, etc.
io
Module providing input/output functions to databases containing results
from reproducible research.
"""
from magni.reproducibility import data
from magni.reproducibility import io
| 23.391304 | 75 | 0.745353 |
ace13f44931f4e1bc9629161551ef6f715cee6fe | 33,009 | py | Python | scripts/qcgf.py | pizzathief/PyFluxPro | c075c0040b4a9d6c9ab75ca1cef158f1307f8396 | [
"BSD-3-Clause"
] | null | null | null | scripts/qcgf.py | pizzathief/PyFluxPro | c075c0040b4a9d6c9ab75ca1cef158f1307f8396 | [
"BSD-3-Clause"
] | null | null | null | scripts/qcgf.py | pizzathief/PyFluxPro | c075c0040b4a9d6c9ab75ca1cef158f1307f8396 | [
"BSD-3-Clause"
] | null | null | null | # standard modules
import ast
import datetime
import os
import logging
import sys
# 3rd party modules
import numpy
import matplotlib.dates as mdt
import xlrd
# PFP modules
import constants as c
import qcio
import qcts
import qcutils
logger = logging.getLogger("pfp_log")
# GapFillParseControlFile parses the L4 control file
def GapFillParseControlFile(cf, ds, series, ds_alt):
# find the section containing the series
section = qcutils.get_cfsection(cf, series=series, mode="quiet")
# return empty handed if the series is not in a section
if len(section) == 0:
return
if "GapFillFromAlternate" in cf[section][series].keys():
# create the alternate dictionary in ds
gfalternate_createdict(cf, ds, series, ds_alt)
if "GapFillUsingSOLO" in cf[section][series].keys():
# create the SOLO dictionary in ds
gfSOLO_createdict(cf, ds, series)
if "GapFillUsingMDS" in cf[section][series].keys():
# create the MDS dictionary in ds
gfMDS_createdict(cf, ds, series)
if "GapFillFromClimatology" in cf[section][series].keys():
# create the climatology dictionary in the data structure
gfClimatology_createdict(cf, ds, series)
if "MergeSeries" in cf[section][series].keys():
# create the merge series dictionary in the data structure
gfMergeSeries_createdict(cf, ds, series)
def gfalternate_createdict(cf, ds, series, ds_alt):
"""
Purpose:
Creates a dictionary in ds to hold information about the alternate data used to gap fill the tower data.
Usage:
Side effects:
Author: PRI
Date: August 2014
"""
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf, series=series, mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section)==0:
logger.error("GapFillFromAlternate: Series %s not found in control file, skipping ...", series)
return
# create the alternate directory in the data structure
if "alternate" not in dir(ds):
ds.alternate = {}
# name of alternate output series in ds
output_list = cf[section][series]["GapFillFromAlternate"].keys()
# loop over the outputs listed in the control file
for output in output_list:
# create the dictionary keys for this output
ds.alternate[output] = {}
ds.alternate[output]["label_tower"] = series
# source name
ds.alternate[output]["source"] = cf[section][series]["GapFillFromAlternate"][output]["source"]
# site name
ds.alternate[output]["site_name"] = ds.globalattributes["site_name"]
# alternate data file name
# first, look in the [Files] section for a generic file name
file_list = cf["Files"].keys()
lower_file_list = [item.lower() for item in file_list]
if ds.alternate[output]["source"].lower() in lower_file_list:
# found a generic file name
i = lower_file_list.index(ds.alternate[output]["source"].lower())
ds.alternate[output]["file_name"] = cf["Files"][file_list[i]]
else:
# no generic file name found, look for a file name in the variable section
ds.alternate[output]["file_name"] = cf[section][series]["GapFillFromAlternate"][output]["file_name"]
# if the file has not already been read, do it now
if ds.alternate[output]["file_name"] not in ds_alt:
ds_alternate = qcio.nc_read_series(ds.alternate[output]["file_name"],fixtimestepmethod="round")
gfalternate_matchstartendtimes(ds,ds_alternate)
ds_alt[ds.alternate[output]["file_name"]] = ds_alternate
# get the type of fit
ds.alternate[output]["fit_type"] = "OLS"
if "fit" in cf[section][series]["GapFillFromAlternate"][output]:
if cf[section][series]["GapFillFromAlternate"][output]["fit"].lower() in ["ols","ols_thru0","mrev","replace","rma","odr"]:
ds.alternate[output]["fit_type"] = cf[section][series]["GapFillFromAlternate"][output]["fit"]
else:
logger.info("gfAlternate: unrecognised fit option for series %s, used OLS", output)
# correct for lag?
if "lag" in cf[section][series]["GapFillFromAlternate"][output]:
if cf[section][series]["GapFillFromAlternate"][output]["lag"].lower() in ["no","false"]:
ds.alternate[output]["lag"] = "no"
elif cf[section][series]["GapFillFromAlternate"][output]["lag"].lower() in ["yes","true"]:
ds.alternate[output]["lag"] = "yes"
else:
logger.info("gfAlternate: unrecognised lag option for series %s", output)
else:
ds.alternate[output]["lag"] = "yes"
# choose specific alternate variable?
if "usevars" in cf[section][series]["GapFillFromAlternate"][output]:
ds.alternate[output]["usevars"] = ast.literal_eval(cf[section][series]["GapFillFromAlternate"][output]["usevars"])
# alternate data variable name if different from name used in control file
if "alternate_name" in cf[section][series]["GapFillFromAlternate"][output]:
ds.alternate[output]["alternate_name"] = cf[section][series]["GapFillFromAlternate"][output]["alternate_name"]
else:
ds.alternate[output]["alternate_name"] = series
# results of best fit for plotting later on
ds.alternate[output]["results"] = {"startdate":[],"enddate":[],"No. points":[],"No. filled":[],
"r":[],"Bias":[],"RMSE":[],"Frac Bias":[],"NMSE":[],
"Avg (Tower)":[],"Avg (Alt)":[],
"Var (Tower)":[],"Var (Alt)":[],"Var ratio":[]}
# create an empty series in ds if the alternate output series doesn't exist yet
if output not in ds.series.keys():
data,flag,attr = qcutils.MakeEmptySeries(ds,output)
qcutils.CreateSeries(ds,output,data,flag,attr)
qcutils.CreateSeries(ds,series+"_composite",data,flag,attr)
def gfalternate_matchstartendtimes(ds,ds_alternate):
"""
Purpose:
Match the start and end times of the alternate and tower data.
The logic is as follows:
- if there is no overlap between the alternate and tower data then
dummy series with missing data are created for the alternate data
for the period of the tower data
- if the alternate and tower data overlap then truncate or pad (with
missing values) the alternate data series so that the periods of the
tower data and alternate data match.
Usage:
gfalternate_matchstartendtimes(ds,ds_alternate)
where ds is the data structure containing the tower data
ds_alternate is the data structure containing the alternate data
Author: PRI
Date: July 2015
"""
# check the time steps are the same
ts_tower = int(ds.globalattributes["time_step"])
ts_alternate = int(ds_alternate.globalattributes["time_step"])
if ts_tower!=ts_alternate:
msg = " GapFillFromAlternate: time step for tower and alternate data are different, returning ..."
logger.error(msg)
ds.returncodes["GapFillFromAlternate"] = "error"
return
# get the start and end times of the tower and the alternate data and see if they overlap
ldt_alternate = ds_alternate.series["DateTime"]["Data"]
start_alternate = ldt_alternate[0]
ldt_tower = ds.series["DateTime"]["Data"]
end_tower = ldt_tower[-1]
# since the datetime is monotonically increasing we need only check the start datetime
overlap = start_alternate<=end_tower
# do the alternate and tower data overlap?
if overlap:
# index of alternate datetimes that are also in tower datetimes
#alternate_index = qcutils.FindIndicesOfBInA(ldt_tower,ldt_alternate)
#alternate_index = [qcutils.find_nearest_value(ldt_tower, dt) for dt in ldt_alternate]
# index of tower datetimes that are also in alternate datetimes
#tower_index = qcutils.FindIndicesOfBInA(ldt_alternate,ldt_tower)
#tower_index = [qcutils.find_nearest_value(ldt_alternate, dt) for dt in ldt_tower]
tower_index, alternate_index = qcutils.FindMatchingIndices(ldt_tower, ldt_alternate)
# check that the indices point to the same times
ldta = [ldt_alternate[i] for i in alternate_index]
ldtt = [ldt_tower[i] for i in tower_index]
if ldta!=ldtt:
# and exit with a helpful message if they dont
logger.error(" Something went badly wrong and I'm giving up")
sys.exit()
# get a list of alternate series
alternate_series_list = [item for item in ds_alternate.series.keys() if "_QCFlag" not in item]
# number of records in truncated or padded alternate data
nRecs_tower = len(ldt_tower)
# force the alternate dattime to be the tower date time
ds_alternate.series["DateTime"] = ds.series["DateTime"]
# loop over the alternate series and truncate or pad as required
# truncation or padding is handled by the indices
for series in alternate_series_list:
if series in ["DateTime","DateTime_UTC"]: continue
# get the alternate data
data,flag,attr = qcutils.GetSeriesasMA(ds_alternate,series)
# create an array of missing data of the required length
data_overlap = numpy.full(nRecs_tower,c.missing_value,dtype=numpy.float64)
flag_overlap = numpy.ones(nRecs_tower,dtype=numpy.int32)
# replace missing data with alternate data where times match
data_overlap[tower_index] = data[alternate_index]
flag_overlap[tower_index] = flag[alternate_index]
# write the truncated or padded series back into the alternate data structure
qcutils.CreateSeries(ds_alternate,series,data_overlap,flag_overlap,attr)
# update the number of records in the file
ds_alternate.globalattributes["nc_nrecs"] = nRecs_tower
else:
# there is no overlap between the alternate and tower data, create dummy series
nRecs = len(ldt_tower)
ds_alternate.globalattributes["nc_nrecs"] = nRecs
ds_alternate.series["DateTime"] = ds.series["DateTime"]
alternate_series_list = [item for item in ds_alternate.series.keys() if "_QCFlag" not in item]
for series in alternate_series_list:
if series in ["DateTime","DateTime_UTC"]:
continue
_, _, attr = qcutils.GetSeriesasMA(ds_alternate, series)
data = numpy.full(nRecs, c.missing_value, dtype=numpy.float64)
flag = numpy.ones(nRecs, dtype=numpy.int32)
qcutils.CreateSeries(ds_alternate, series, data, flag, attr)
ds.returncodes["GapFillFromAlternate"] = "normal"
def gfClimatology_createdict(cf, ds, series):
""" Creates a dictionary in ds to hold information about the climatological data used
to gap fill the tower data."""
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf, series=series,mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section) == 0:
logger.error("GapFillFromClimatology: Series %s not found in control file, skipping ...", series)
return
# create the climatology directory in the data structure
if "climatology" not in dir(ds):
ds.climatology = {}
# name of alternate output series in ds
output_list = cf[section][series]["GapFillFromClimatology"].keys()
# loop over the outputs listed in the control file
for output in output_list:
# create the dictionary keys for this output
ds.climatology[output] = {}
ds.climatology[output]["label_tower"] = series
# site name
ds.climatology[output]["site_name"] = ds.globalattributes["site_name"]
# Climatology file name
file_list = cf["Files"].keys()
lower_file_list = [item.lower() for item in file_list]
# first, look in the [Files] section for a generic file name
if "climatology" in lower_file_list:
# found a generic file name
i = lower_file_list.index("climatology")
ds.climatology[output]["file_name"] = cf["Files"][file_list[i]]
else:
# no generic file name found, look for a file name in the variable section
ds.climatology[output]["file_name"] = cf[section][series]["GapFillFromClimatology"][output]["file_name"]
# climatology variable name if different from name used in control file
if "climatology_name" in cf[section][series]["GapFillFromClimatology"][output]:
ds.climatology[output]["climatology_name"] = cf[section][series]["GapFillFromClimatology"][output]["climatology_name"]
else:
ds.climatology[output]["climatology_name"] = series
# climatology gap filling method
if "method" not in cf[section][series]["GapFillFromClimatology"][output].keys():
# default if "method" missing is "interpolated_daily"
ds.climatology[output]["method"] = "interpolated_daily"
else:
ds.climatology[output]["method"] = cf[section][series]["GapFillFromClimatology"][output]["method"]
# create an empty series in ds if the climatology output series doesn't exist yet
if output not in ds.series.keys():
data, flag, attr = qcutils.MakeEmptySeries(ds, output)
qcutils.CreateSeries(ds, output, data, flag, attr)
def gfMDS_createdict(cf, ds, series):
"""
Purpose:
Create an information dictionary for MDS gap filling from the contents
of the control file.
Usage:
info["MDS"] = gfMDS_createdict(cf)
Author: PRI
Date: May 2018
"""
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf, series=series, mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section)==0:
logger.error("GapFillUsingMDS: Series %s not found in control file, skipping ...", series)
return
# create the MDS attribute (a dictionary) in ds, this will hold all MDS settings
if "mds" not in dir(ds):
ds.mds = {}
# name of MDS output series in ds
output_list = cf[section][series]["GapFillUsingMDS"].keys()
# loop over the outputs listed in the control file
for output in output_list:
# create the dictionary keys for this series
ds.mds[output] = {}
# get the target
if "target" in cf[section][series]["GapFillUsingMDS"][output]:
ds.mds[output]["target"] = cf[section][series]["GapFillUsingMDS"][output]["target"]
else:
ds.mds[output]["target"] = series
# site name
ds.mds[output]["site_name"] = ds.globalattributes["site_name"]
# list of SOLO settings
if "mds_settings" in cf[section][series]["GapFillUsingMDS"][output]:
mdss_list = ast.literal_eval(cf[section][series]["GapFillUsingMDS"][output]["mds_settings"])
# list of drivers
ds.mds[output]["drivers"] = ast.literal_eval(cf[section][series]["GapFillUsingMDS"][output]["drivers"])
# list of tolerances
ds.mds[output]["tolerances"] = ast.literal_eval(cf[section][series]["GapFillUsingMDS"][output]["tolerances"])
# get the ustar filter option
opt = qcutils.get_keyvaluefromcf(cf, [section, series, "GapFillUsingMDS", output], "turbulence_filter", default="")
ds.mds[output]["turbulence_filter"] = opt
# get the day/night filter option
opt = qcutils.get_keyvaluefromcf(cf, [section, series, "GapFillUsingMDS", output], "daynight_filter", default="")
ds.mds[output]["daynight_filter"] = opt
# check that all requested targets and drivers have a mapping to
# a FluxNet label, remove if they don't
fluxnet_label_map = {"Fc":"NEE", "Fe":"LE", "Fh":"H",
"Fsd":"SW_IN", "Ta":"TA", "VPD":"VPD"}
for mds_label in ds.mds:
ds.mds[mds_label]["mds_label"] = mds_label
pfp_target = ds.mds[mds_label]["target"]
if pfp_target not in fluxnet_label_map:
msg = " Target ("+pfp_target+") not supported for MDS gap filling"
logger.warning(msg)
del ds.mds[mds_label]
else:
ds.mds[mds_label]["target_mds"] = fluxnet_label_map[pfp_target]
pfp_drivers = ds.mds[mds_label]["drivers"]
for pfp_driver in pfp_drivers:
if pfp_driver not in fluxnet_label_map:
msg = "Driver ("+pfp_driver+") not supported for MDS gap filling"
logger.warning(msg)
ds.mds[mds_label]["drivers"].remove(pfp_driver)
else:
if "drivers_mds" not in ds.mds[mds_label]:
ds.mds[mds_label]["drivers_mds"] = []
ds.mds[mds_label]["drivers_mds"].append(fluxnet_label_map[pfp_driver])
if len(ds.mds[mds_label]["drivers"]) == 0:
del ds.mds[mds_label]
return
def gfMergeSeries_createdict(cf,ds,series):
""" Creates a dictionary in ds to hold information about the merging of gap filled
and tower data."""
merge_prereq_list = ["Fsd","Fsu","Fld","Flu","Ts","Sws"]
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf,series=series,mode="quiet")
# create the merge directory in the data structure
if "merge" not in dir(ds): ds.merge = {}
# check to see if this series is in the "merge first" list
# series in the "merge first" list get merged first so they can be used with existing tower
# data to re-calculate Fg, Fn and Fa
merge_order = "standard"
if series in merge_prereq_list: merge_order = "prerequisite"
if merge_order not in ds.merge.keys(): ds.merge[merge_order] = {}
# create the dictionary keys for this series
ds.merge[merge_order][series] = {}
# output series name
ds.merge[merge_order][series]["output"] = series
# site name
ds.merge[merge_order][series]["source"] = ast.literal_eval(cf[section][series]["MergeSeries"]["Source"])
# create an empty series in ds if the output series doesn't exist yet
if ds.merge[merge_order][series]["output"] not in ds.series.keys():
data,flag,attr = qcutils.MakeEmptySeries(ds,ds.merge[merge_order][series]["output"])
qcutils.CreateSeries(ds,ds.merge[merge_order][series]["output"],data,flag,attr)
def gfSOLO_createdict(cf,ds,series):
""" Creates a dictionary in ds to hold information about the SOLO data used
to gap fill the tower data."""
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf,series=series,mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section)==0:
logger.error("GapFillUsingSOLO: Series %s not found in control file, skipping ...", series)
return
# create the solo directory in the data structure
if "solo" not in dir(ds): ds.solo = {}
# name of SOLO output series in ds
output_list = cf[section][series]["GapFillUsingSOLO"].keys()
# loop over the outputs listed in the control file
for output in output_list:
# create the dictionary keys for this series
ds.solo[output] = {}
# get the target
if "target" in cf[section][series]["GapFillUsingSOLO"][output]:
ds.solo[output]["label_tower"] = cf[section][series]["GapFillUsingSOLO"][output]["target"]
else:
ds.solo[output]["label_tower"] = series
# site name
ds.solo[output]["site_name"] = ds.globalattributes["site_name"]
# list of SOLO settings
if "solo_settings" in cf[section][series]["GapFillUsingSOLO"][output]:
ss_list = ast.literal_eval(cf[section][series]["GapFillUsingSOLO"][output]["solo_settings"])
ds.solo[output]["solo_settings"] = {}
ds.solo[output]["solo_settings"]["nodes_target"] = int(ss_list[0])
ds.solo[output]["solo_settings"]["training"] = int(ss_list[1])
ds.solo[output]["solo_settings"]["factor"] = int(ss_list[2])
ds.solo[output]["solo_settings"]["learningrate"] = float(ss_list[3])
ds.solo[output]["solo_settings"]["iterations"] = int(ss_list[4])
# list of drivers
ds.solo[output]["drivers"] = ast.literal_eval(cf[section][series]["GapFillUsingSOLO"][output]["drivers"])
# apply ustar filter
opt = qcutils.get_keyvaluefromcf(cf,[section,series,"GapFillUsingSOLO",output],
"turbulence_filter",default="")
ds.solo[output]["turbulence_filter"] = opt
opt = qcutils.get_keyvaluefromcf(cf,[section,series,"GapFillUsingSOLO",output],
"daynight_filter",default="")
ds.solo[output]["daynight_filter"] = opt
# results of best fit for plotting later on
ds.solo[output]["results"] = {"startdate":[],"enddate":[],"No. points":[],"r":[],
"Bias":[],"RMSE":[],"Frac Bias":[],"NMSE":[],
"Avg (obs)":[],"Avg (SOLO)":[],
"Var (obs)":[],"Var (SOLO)":[],"Var ratio":[],
"m_ols":[],"b_ols":[]}
# create an empty series in ds if the SOLO output series doesn't exist yet
if output not in ds.series.keys():
data,flag,attr = qcutils.MakeEmptySeries(ds,output)
qcutils.CreateSeries(ds,output,data,flag,attr)
# functions for GapFillUsingMDS: not implemented yet
def GapFillFluxUsingMDS(cf, ds, series=""):
section = qcutils.get_cfsection(cf, series=series, mode="quiet")
if len(section)==0:
return
if "GapFillFluxUsingMDS" in cf[section][series].keys():
logger.info(" GapFillFluxUsingMDS: not implemented yet")
return
# functions for GapFillFromClimatology
def GapFillFromClimatology(ds):
'''
Gap fill missing data using data from the climatology spreadsheet produced by
the climatology.py script.
'''
if "climatology" not in dir(ds): return
# tell the user what we are going to do
msg = " Reading climatology file and creating climatology series"
logger.info(msg)
# loop over the series to be gap filled using climatology
cli_xlbooks = {}
for output in ds.climatology.keys():
# check to see if there are any gaps in "series"
#index = numpy.where(abs(ds.series[label]['Data']-float(c.missing_value))<c.eps)[0]
#if len(index)==0: continue # no gaps found in "series"
cli_filename = ds.climatology[output]["file_name"]
if not os.path.exists(cli_filename):
logger.error(" GapFillFromClimatology: Climatology file %s doesn't exist", cli_filename)
continue
if cli_filename not in cli_xlbooks: cli_xlbooks[cli_filename] = xlrd.open_workbook(cli_filename)
# local pointers to the series name and climatology method
label = ds.climatology[output]["label_tower"]
method = ds.climatology[output]["method"]
# do the gap filling
# choose the gap filling method
if method=="monthly":
gfClimatology_monthly(ds,label,output,cli_xlbooks)
elif method=="interpolated daily":
gfClimatology_interpolateddaily(ds,label,output,cli_xlbooks)
else:
logger.error(" GapFillFromClimatology: unrecognised method option for %s", label)
continue
if 'GapFillFromClimatology' not in ds.globalattributes['Functions']:
ds.globalattributes['Functions'] = ds.globalattributes['Functions']+', GapFillFromClimatology'
# remove the "climatology" attribute from ds
#del ds.climatology
def gfClimatology_interpolateddaily(ds,series,output,xlbooks):
"""
Gap fill using data interpolated over a 2D array where the days are
the rows and the time of day is the columns.
"""
# gap fill from interpolated 30 minute data
xlfilename = ds.climatology[output]["file_name"]
sheet_name = series+'i(day)'
if sheet_name not in xlbooks[xlfilename].sheet_names():
msg = " gfClimatology: sheet "+sheet_name+" not found, skipping ..."
logger.warning(msg)
return
ldt = ds.series["DateTime"]["Data"]
thissheet = xlbooks[xlfilename].sheet_by_name(sheet_name)
datemode = xlbooks[xlfilename].datemode
basedate = datetime.datetime(1899, 12, 30)
nts = thissheet.ncols - 1
ndays = thissheet.nrows - 2
# read the time stamp values from the climatology worksheet
tsteps = thissheet.row_values(1,start_colx=1,end_colx=nts+1)
# read the data from the climatology workbook
val1d = numpy.ma.zeros(ndays*nts,dtype=numpy.float64)
# initialise an array for the datetime of the climatological values
cdt = [None]*nts*ndays
# loop over the rows (days) of data
for xlRow in range(ndays):
# get the Excel datetime value
xldatenumber = int(thissheet.cell_value(xlRow+2,0))
# convert this to a Python Datetime
xldatetime = basedate+datetime.timedelta(days=xldatenumber+1462*datemode)
# fill the climatology datetime array
cdt[xlRow*nts:(xlRow+1)*nts] = [xldatetime+datetime.timedelta(hours=hh) for hh in tsteps]
# fill the climatological value array
val1d[xlRow*nts:(xlRow+1)*nts] = thissheet.row_values(xlRow+2,start_colx=1,end_colx=nts+1)
# get the data to be filled with climatological values
data,flag,attr = qcutils.GetSeriesasMA(ds,series)
# get an index of missing values
idx = numpy.where(numpy.ma.getmaskarray(data)==True)[0]
#idx = numpy.ma.where(numpy.ma.getmaskarray(data)==True)[0]
# there must be a better way to do this ...
# simply using the index (idx) to set a slice of the data array to the gap filled values in val1d
# does not seem to work (mask stays true on replaced values in data), the work around is to
# step through the indices, find the time of the missing value in data, find the same time in the
# gap filled values val1d and set the missing element of data to this element of val1d
# actually ...
# this may not be the fastest but it may be the most robust because it matches dates of missing data
# to dates in the climatology file
for ii in idx:
try:
jj = qcutils.find_nearest_value(cdt, ldt[ii])
data[ii] = val1d[jj]
flag[ii] = numpy.int32(40)
except ValueError:
data[ii] = numpy.float64(c.missing_value)
flag[ii] = numpy.int32(41)
# put the gap filled data back into the data structure
qcutils.CreateSeries(ds,output,data,flag,attr)
def gfClimatology_monthly(ds,series,output,xlbook):
""" Gap fill using monthly climatology."""
thissheet = xlbook.sheet_by_name(series)
val1d = numpy.zeros_like(ds.series[series]['Data'])
values = numpy.zeros([48,12])
for month in range(1,13):
xlCol = (month-1)*5 + 2
values[:,month-1] = thissheet.col_values(xlCol)[2:50]
for i in range(len(ds.series[series]['Data'])):
h = numpy.int(2*ds.series['Hdh']['Data'][i])
m = numpy.int(ds.series['Month']['Data'][i])
val1d[i] = values[h,m-1]
index = numpy.where(abs(ds.series[output]['Data']-c.missing_value)<c.eps)[0]
ds.series[output]['Data'][index] = val1d[index]
ds.series[output]['Flag'][index] = numpy.int32(40)
# functions for GapFillUsingInterpolation
def GapFillUsingInterpolation(cf,ds):
"""
Purpose:
Gap fill variables in the data structure using interpolation.
All variables in the [Variables], [Drivers] and [Fluxes] section
are processed.
Usage:
qcgf.GapFillUsingInterpolation(cf,ds)
where cf is a control file object
ds is a data structure
Author: PRI
Date: September 2016
"""
label_list = qcutils.get_label_list_from_cf(cf)
maxlen = int(qcutils.get_keyvaluefromcf(cf,["Options"],"MaxGapInterpolate",default=2))
if maxlen==0:
msg = " Gap fill by interpolation disabled in control file"
logger.info(msg)
return
for label in label_list:
section = qcutils.get_cfsection(cf, series=label)
if "MaxGapInterpolate" in cf[section][label]:
maxlen = int(qcutils.get_keyvaluefromcf(cf,[section,label],"MaxGapInterpolate",default=2))
if maxlen==0:
msg = " Gap fill by interpolation disabled for "+label
logger.info(msg)
continue
qcts.InterpolateOverMissing(ds,series=label,maxlen=2)
# miscellaneous L4 routines
def gf_getdiurnalstats(DecHour,Data,ts):
nInts = 24*int((60/ts)+0.5)
Num = numpy.ma.zeros(nInts,dtype=int)
Hr = numpy.ma.zeros(nInts,dtype=float)
for i in range(nInts):
Hr[i] = float(i)*ts/60.
Av = numpy.ma.masked_all(nInts)
Sd = numpy.ma.masked_all(nInts)
Mx = numpy.ma.masked_all(nInts)
Mn = numpy.ma.masked_all(nInts)
if numpy.size(Data)!=0:
for i in range(nInts):
li = numpy.ma.where((abs(DecHour-Hr[i])<c.eps)&(abs(Data-float(c.missing_value))>c.eps))
Num[i] = numpy.size(li)
if Num[i]!=0:
Av[i] = numpy.ma.mean(Data[li])
Sd[i] = numpy.ma.std(Data[li])
Mx[i] = numpy.ma.maximum(Data[li])
Mn[i] = numpy.ma.minimum(Data[li])
return Num, Hr, Av, Sd, Mx, Mn
def gf_getdateticks(start, end):
from datetime import timedelta as td
delta = end - start
if delta <= td(minutes=10):
loc = mdt.MinuteLocator()
fmt = mdt.DateFormatter('%H:%M')
elif delta <= td(minutes=30):
loc = mdt.MinuteLocator(byminute=range(0,60,5))
fmt = mdt.DateFormatter('%H:%M')
elif delta <= td(hours=1):
loc = mdt.MinuteLocator(byminute=range(0,60,15))
fmt = mdt.DateFormatter('%H:%M')
elif delta <= td(hours=6):
loc = mdt.HourLocator()
fmt = mdt.DateFormatter('%H:%M')
elif delta <= td(days=1):
loc = mdt.HourLocator(byhour=range(0,24,3))
fmt = mdt.DateFormatter('%H:%M')
elif delta <= td(days=3):
loc = mdt.HourLocator(byhour=range(0,24,12))
fmt = mdt.DateFormatter('%d/%m %H')
elif delta <= td(weeks=2):
loc = mdt.DayLocator()
fmt = mdt.DateFormatter('%d/%m')
elif delta <= td(weeks=12):
loc = mdt.WeekdayLocator()
fmt = mdt.DateFormatter('%d/%m')
elif delta <= td(weeks=104):
loc = mdt.MonthLocator()
fmt = mdt.DateFormatter('%d/%m')
elif delta <= td(weeks=208):
loc = mdt.MonthLocator(interval=3)
fmt = mdt.DateFormatter('%d/%m/%y')
else:
loc = mdt.MonthLocator(interval=6)
fmt = mdt.DateFormatter('%d/%m/%y')
return loc,fmt
def ImportSeries(cf,ds):
# check to see if there is an Imports section
if "Imports" not in cf.keys(): return
# number of records
nRecs = int(ds.globalattributes["nc_nrecs"])
# get the start and end datetime
ldt = ds.series["DateTime"]["Data"]
start_date = ldt[0]
end_date = ldt[-1]
# loop over the series in the Imports section
for label in cf["Imports"].keys():
import_filename = qcutils.get_keyvaluefromcf(cf,["Imports",label],"file_name",default="")
if import_filename=="":
msg = " ImportSeries: import filename not found in control file, skipping ..."
logger.warning(msg)
continue
var_name = qcutils.get_keyvaluefromcf(cf,["Imports",label],"var_name",default="")
if var_name=="":
msg = " ImportSeries: variable name not found in control file, skipping ..."
logger.warning(msg)
continue
ds_import = qcio.nc_read_series(import_filename)
ts_import = ds_import.globalattributes["time_step"]
ldt_import = ds_import.series["DateTime"]["Data"]
si = qcutils.GetDateIndex(ldt_import,str(start_date),ts=ts_import,default=0,match="exact")
ei = qcutils.GetDateIndex(ldt_import,str(end_date),ts=ts_import,default=len(ldt_import)-1,match="exact")
data = numpy.ma.ones(nRecs)*float(c.missing_value)
flag = numpy.ma.ones(nRecs)
data_import,flag_import,attr_import = qcutils.GetSeriesasMA(ds_import,var_name,si=si,ei=ei)
ldt_import = ldt_import[si:ei+1]
index = qcutils.FindIndicesOfBInA(ldt_import,ldt)
data[index] = data_import
flag[index] = flag_import
qcutils.CreateSeries(ds,label,data,flag,attr_import)
| 50.472477 | 134 | 0.647217 |
ace140eea5238ab68c05533b5e00871e940ad10d | 7,584 | py | Python | tests/ignite/test_utils.py | OBITORASU/ignite | be2bff832b88cfbc0a21828a38c664930f297409 | [
"BSD-3-Clause"
] | 1 | 2021-06-17T12:17:03.000Z | 2021-06-17T12:17:03.000Z | tests/ignite/test_utils.py | lidq92/ignite | f1ecb3aa4ae0fe450f01e1a6dc24b660769b6cd9 | [
"BSD-3-Clause"
] | null | null | null | tests/ignite/test_utils.py | lidq92/ignite | f1ecb3aa4ae0fe450f01e1a6dc24b660769b6cd9 | [
"BSD-3-Clause"
] | null | null | null | import logging
import os
import sys
import warnings
from collections import namedtuple
import pytest
import torch
from ignite.engine import Engine, Events
from ignite.utils import convert_tensor, deprecated, setup_logger, to_onehot
def test_convert_tensor():
x = torch.tensor([0.0])
tensor = convert_tensor(x)
assert torch.is_tensor(tensor)
x = torch.tensor([0.0])
tensor = convert_tensor(x, device="cpu", non_blocking=True)
assert torch.is_tensor(tensor)
x = torch.tensor([0.0])
tensor = convert_tensor(x, device="cpu", non_blocking=False)
assert torch.is_tensor(tensor)
x = [torch.tensor([0.0]), torch.tensor([0.0])]
list_ = convert_tensor(x)
assert isinstance(list_, list)
assert torch.is_tensor(list_[0])
assert torch.is_tensor(list_[1])
x = (torch.tensor([0.0]), torch.tensor([0.0]))
tuple_ = convert_tensor(x)
assert isinstance(tuple_, tuple)
assert torch.is_tensor(tuple_[0])
assert torch.is_tensor(tuple_[1])
Point = namedtuple("Point", ["x", "y"])
x = Point(torch.tensor([0.0]), torch.tensor([0.0]))
tuple_ = convert_tensor(x)
assert isinstance(tuple_, Point)
assert torch.is_tensor(tuple_[0])
assert torch.is_tensor(tuple_[1])
x = {"a": torch.tensor([0.0]), "b": torch.tensor([0.0])}
dict_ = convert_tensor(x)
assert isinstance(dict_, dict)
assert torch.is_tensor(dict_["a"])
assert torch.is_tensor(dict_["b"])
assert convert_tensor("a") == "a"
with pytest.raises(TypeError):
convert_tensor(12345)
def test_to_onehot():
indices = torch.tensor([0, 1, 2, 3], dtype=torch.long)
actual = to_onehot(indices, 4)
expected = torch.eye(4, dtype=torch.uint8)
assert actual.equal(expected)
y = torch.randint(0, 21, size=(1000,))
y_ohe = to_onehot(y, num_classes=21)
y2 = torch.argmax(y_ohe, dim=1)
assert y.equal(y2)
y = torch.randint(0, 21, size=(4, 250, 255))
y_ohe = to_onehot(y, num_classes=21)
y2 = torch.argmax(y_ohe, dim=1)
assert y.equal(y2)
y = torch.randint(0, 21, size=(4, 150, 155, 4, 6))
y_ohe = to_onehot(y, num_classes=21)
y2 = torch.argmax(y_ohe, dim=1)
assert y.equal(y2)
# Test with `TorchScript`
x = torch.tensor([0, 1, 2, 3])
# Test the raw `to_onehot` function
scripted_to_onehot = torch.jit.script(to_onehot)
assert scripted_to_onehot(x, 4).allclose(to_onehot(x, 4))
# Test inside `torch.nn.Module`
class SLP(torch.nn.Module):
def __init__(self):
super(SLP, self).__init__()
self.linear = torch.nn.Linear(4, 1)
def forward(self, x):
x = to_onehot(x, 4)
return self.linear(x.to(torch.float))
eager_model = SLP()
scripted_model = torch.jit.script(eager_model)
assert eager_model(x).allclose(scripted_model(x))
def test_dist_setup_logger():
logger = setup_logger("trainer", level=logging.CRITICAL, distributed_rank=1)
assert logger.level != logging.CRITICAL
def test_setup_logger(capsys, dirname):
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
assert len(trainer.logger.handlers) == 0
trainer.logger.addHandler(logging.NullHandler())
trainer.logger.addHandler(logging.NullHandler())
trainer.logger.addHandler(logging.NullHandler())
fp = os.path.join(dirname, "log")
def _test(stream):
trainer.logger = setup_logger("trainer", stream=stream, filepath=fp, reset=True)
evaluator.logger = setup_logger("evaluator", stream=stream, filepath=fp, reset=True)
assert len(trainer.logger.handlers) == 2
assert len(evaluator.logger.handlers) == 2
@trainer.on(Events.EPOCH_COMPLETED)
def _(_):
evaluator.run([0, 1, 2])
trainer.run([0, 1, 2, 3, 4, 5], max_epochs=5)
captured = capsys.readouterr()
if stream is sys.stdout:
err = captured.out.split("\n")
else:
err = captured.err.split("\n")
with open(fp, "r") as h:
data = h.readlines()
for source in [err, data]:
assert "trainer INFO: Engine run starting with max_epochs=5." in source[0]
assert "evaluator INFO: Engine run starting with max_epochs=1." in source[1]
_test(stream=None)
_test(stream=sys.stderr)
_test(stream=sys.stdout)
# Needed by windows to release FileHandler in the loggers
logging.shutdown()
def _setup_a_logger_and_dump(name, message):
logger = setup_logger(name)
logger.info(message)
def test_override_setup_logger(capsys):
_setup_a_logger_and_dump(__name__, "test_override_setup_logger")
source = capsys.readouterr().err.split("\n")
assert "tests.ignite.test_utils INFO: test_override_setup_logger" in source[0]
# change the logger level of _setup_a_logger_and_dump
setup_logger(name=__name__, level=logging.WARNING, reset=True)
_setup_a_logger_and_dump(__name__, "test_override_setup_logger")
source = capsys.readouterr().err.split("\n")
assert source[0] == ""
# Needed by windows to release FileHandler in the loggers
logging.shutdown()
def test_deprecated():
# Test on function without docs, @deprecated without reasons
@deprecated("0.4.2", "0.6.0")
def func_no_docs():
return 24
assert func_no_docs.__doc__ == "**Deprecated function**.\n\n .. deprecated:: 0.4.2"
# Test on function with docs, @deprecated without reasons
@deprecated("0.4.2", "0.6.0")
def func_no_reasons():
"""Docs are cool
"""
return 24
assert func_no_reasons.__doc__ == "**Deprecated function**.\n\n Docs are cool\n .. deprecated:: 0.4.2"
# Test on function with docs, @deprecated with reasons
@deprecated("0.4.2", "0.6.0", reasons=("r1", "r2"))
def func_no_warnings():
"""Docs are very cool
"""
return 24
assert (
func_no_warnings.__doc__
== "**Deprecated function**.\n\n Docs are very cool\n .. deprecated:: 0.4.2\n\n\t\n\t- r1\n\t- r2"
)
# Tests that the function emits DeprecationWarning
@deprecated("0.4.2", "0.6.0", reasons=("r1", "r2"))
def func_check_warning():
"""Docs are very ...
"""
return 24
with pytest.deprecated_call():
func_check_warning()
assert func_check_warning() == 24
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
func_check_warning()
# Verify some things
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert (
"This function has been deprecated since version 0.4.2 and will be removed in version 0.6.0."
+ "\n Please refer to the documentation for more details."
in str(w[-1].message)
)
# Test that the function raises Exception
@deprecated("0.4.2", "0.6.0", reasons=("reason1", "reason2"), raise_exception=True)
def func_with_everything():
return 1
with pytest.raises(Exception) as exec_info:
func_with_everything()
assert (
str(exec_info.value)
== "This function has been deprecated since version 0.4.2 and will be removed in version 0.6.0."
+ "\n Please refer to the documentation for more details."
)
def test_smoke__utils():
from ignite._utils import apply_to_tensor, apply_to_type, convert_tensor, to_onehot # noqa: F401
| 29.976285 | 116 | 0.64557 |
ace140faf451916c2ed9a3fbaf862eacc28d59cf | 38 | py | Python | py-Goldsberry/goldsberry/player/__init__.py | LiuHao-THU/nba_logistic_regression | fcb8198b8b2a65ac7835b039dd195d7d3560058e | [
"Apache-2.0"
] | 3 | 2018-02-24T06:54:58.000Z | 2019-12-04T19:04:23.000Z | py-Goldsberry/goldsberry/player/__init__.py | LiuHao-THU/nba_logistic_regression | fcb8198b8b2a65ac7835b039dd195d7d3560058e | [
"Apache-2.0"
] | null | null | null | py-Goldsberry/goldsberry/player/__init__.py | LiuHao-THU/nba_logistic_regression | fcb8198b8b2a65ac7835b039dd195d7d3560058e | [
"Apache-2.0"
] | null | null | null | from goldsberry.player.player import * | 38 | 38 | 0.842105 |
ace140fafd998ce3b8b71a7f4762a691746228a7 | 23,582 | py | Python | PyInstaller/utils/conftest.py | SnoopJeDi/pyinstaller | 20f922cdac0207e943395bcae4a23d597558c5f7 | [
"Apache-2.0"
] | null | null | null | PyInstaller/utils/conftest.py | SnoopJeDi/pyinstaller | 20f922cdac0207e943395bcae4a23d597558c5f7 | [
"Apache-2.0"
] | null | null | null | PyInstaller/utils/conftest.py | SnoopJeDi/pyinstaller | 20f922cdac0207e943395bcae4a23d597558c5f7 | [
"Apache-2.0"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2022, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import copy
import glob
import logging
import os
import re
import shutil
import subprocess
from contextlib import suppress
# Set a handler for the root-logger to inhibit 'basicConfig()' (called in PyInstaller.log) is setting up a stream
# handler writing to stderr. This avoids log messages to be written (and captured) twice: once on stderr and
# once by pytests's caplog.
logging.getLogger().addHandler(logging.NullHandler())
# Manages subprocess timeout.
import psutil # noqa: E402
import py # noqa: E402
import pytest # noqa: E402
import sys # noqa: E402
# Expand sys.path with PyInstaller source.
_ROOT_DIR = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
sys.path.append(_ROOT_DIR)
from PyInstaller import __main__ as pyi_main # noqa: E402
from PyInstaller import configure # noqa: E402
from PyInstaller.compat import architecture, is_darwin, is_linux, is_win, safe_repr # noqa: E402
from PyInstaller.depend.analysis import initialize_modgraph # noqa: E402
from PyInstaller.utils.cliutils import archive_viewer # noqa: E402
from PyInstaller.utils.tests import gen_sourcefile # noqa: E402
from PyInstaller.utils.win32 import winutils # noqa: E402
# Timeout for running the executable. If executable does not exit in this time, it is interpreted as a test failure.
_EXE_TIMEOUT = 3 * 60 # In sec.
# Number of retries we should attempt if the executable times out.
_MAX_RETRIES = 2
# All currently supported platforms
SUPPORTED_OSES = {"darwin", "linux", "win32"}
# Fixtures
# --------
@pytest.fixture
def SPEC_DIR(request):
"""
Return the directory where the test spec-files reside.
"""
return py.path.local(_get_spec_dir(request))
@pytest.fixture
def SCRIPT_DIR(request):
"""
Return the directory where the test scripts reside.
"""
return py.path.local(_get_script_dir(request))
def pytest_runtest_setup(item):
"""
Markers to skip tests based on the current platform.
https://pytest.org/en/stable/example/markers.html#marking-platform-specific-tests-with-pytest
Available markers: see setup.cfg [tool:pytest] markers
- @pytest.mark.darwin (macOS)
- @pytest.mark.linux (GNU/Linux)
- @pytest.mark.win32 (Windows)
"""
supported_platforms = SUPPORTED_OSES.intersection(mark.name for mark in item.iter_markers())
plat = sys.platform
if supported_platforms and plat not in supported_platforms:
pytest.skip("does not run on %s" % plat)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# Execute all other hooks to obtain the report object.
outcome = yield
rep = outcome.get_result()
# Set a report attribute for each phase of a call, which can be "setup", "call", "teardown".
setattr(item, "rep_" + rep.when, rep)
# Return the base directory which contains the current test module.
def _get_base_dir(request):
return os.path.dirname(os.path.abspath(request.fspath.strpath))
# Directory with Python scripts for functional tests.
def _get_script_dir(request):
return os.path.join(_get_base_dir(request), 'scripts')
# Directory with testing modules used in some tests.
def _get_modules_dir(request):
return os.path.join(_get_base_dir(request), 'modules')
# Directory with .toc log files.
def _get_logs_dir(request):
return os.path.join(_get_base_dir(request), 'logs')
# Return the directory where data for tests is located.
def _get_data_dir(request):
return os.path.join(_get_base_dir(request), 'data')
# Directory with .spec files used in some tests.
def _get_spec_dir(request):
return os.path.join(_get_base_dir(request), 'specs')
@pytest.fixture
def script_dir(request):
return py.path.local(_get_script_dir(request))
# A helper function to copy from data/dir to tmpdir/data.
def _data_dir_copy(
# The pytest request object.
request,
# The name of the subdirectory located in data/name to copy.
subdir_name,
# The tmpdir object for this test. See: https://pytest.org/latest/tmpdir.html.
tmpdir
):
# Form the source and tmp paths.
source_data_dir = py.path.local(_get_data_dir(request)).join(subdir_name)
tmp_data_dir = tmpdir.join('data', subdir_name)
# Copy the data.
shutil.copytree(source_data_dir.strpath, tmp_data_dir.strpath)
# Return the temporary data directory, so that the copied data can now be used.
return tmp_data_dir
# Define a fixure for the DataDir object.
@pytest.fixture
def data_dir(
# The request object for this test. See
# https://pytest.org/latest/builtin.html#_pytest.python.FixtureRequest
# and
# https://pytest.org/latest/fixture.html#fixtures-can-introspect-the-requesting-test-context.
request,
# The tmpdir object for this test. See https://pytest.org/latest/tmpdir.html.
tmpdir
):
# Strip the leading 'test_' from the test's name.
name = request.function.__name__[5:]
# Copy to tmpdir and return the path.
return _data_dir_copy(request, name, tmpdir)
class AppBuilder:
def __init__(self, tmpdir, request, bundle_mode):
self._tmpdir = tmpdir
self._request = request
self._mode = bundle_mode
self._specdir = str(tmpdir)
self._distdir = str(tmpdir / 'dist')
self._builddir = str(tmpdir / 'build')
self._is_spec = False
def test_spec(self, specfile, *args, **kwargs):
"""
Test a Python script that is referenced in the supplied .spec file.
"""
__tracebackhide__ = True
specfile = os.path.join(_get_spec_dir(self._request), specfile)
# 'test_script' should handle .spec properly as script.
self._is_spec = True
return self.test_script(specfile, *args, **kwargs)
def test_source(self, source, *args, **kwargs):
"""
Test a Python script given as source code.
The source will be written into a file named like the test-function. This file will then be passed to
`test_script`. If you need other related file, e.g., as `.toc`-file for testing the content, put it at at the
normal place. Just mind to take the basnename from the test-function's name.
:param script: Source code to create executable from. This will be saved into a temporary file which is then
passed on to `test_script`.
:param test_id: Test-id for parametrized tests. If given, it will be appended to the script filename, separated
by two underscores.
All other arguments are passed straight on to `test_script`.
Ensure that the caller of `test_source` is in a UTF-8 encoded file with the correct '# -*- coding: utf-8 -*-'
marker.
"""
__tracebackhide__ = True
# For parametrized test append the test-id.
scriptfile = gen_sourcefile(self._tmpdir, source, kwargs.setdefault('test_id'))
del kwargs['test_id']
return self.test_script(str(scriptfile), *args, **kwargs)
def test_script(
self, script, pyi_args=None, app_name=None, app_args=None, runtime=None, run_from_path=False, **kwargs
):
"""
Main method to wrap all phases of testing a Python script.
:param script: Name of script to create executable from.
:param pyi_args: Additional arguments to pass to PyInstaller when creating executable.
:param app_name: Name of the executable. This is equivalent to argument --name=APPNAME.
:param app_args: Additional arguments to pass to
:param runtime: Time in seconds how long to keep executable running.
:param toc_log: List of modules that are expected to be bundled with the executable.
"""
__tracebackhide__ = True
def marker(line):
# Print some marker to stdout and stderr to make it easier to distinguish the phases in the CI test output.
print('-------', line, '-------')
print('-------', line, '-------', file=sys.stderr)
if pyi_args is None:
pyi_args = []
if app_args is None:
app_args = []
if app_name:
if not self._is_spec:
pyi_args.extend(['--name', app_name])
else:
# Derive name from script name.
app_name = os.path.splitext(os.path.basename(script))[0]
# Relative path means that a script from _script_dir is referenced.
if not os.path.isabs(script):
script = os.path.join(_get_script_dir(self._request), script)
self.script = script
assert os.path.exists(self.script), 'Script %s not found.' % script
marker('Starting build.')
if not self._test_building(args=pyi_args):
pytest.fail('Building of %s failed.' % script)
marker('Build finished, now running executable.')
self._test_executables(app_name, args=app_args, runtime=runtime, run_from_path=run_from_path, **kwargs)
marker('Running executable finished.')
def _test_executables(self, name, args, runtime, run_from_path, **kwargs):
"""
Run created executable to make sure it works.
Multipackage-tests generate more than one exe-file and all of them have to be run.
:param args: CLI options to pass to the created executable.
:param runtime: Time in seconds how long to keep the executable running.
:return: Exit code of the executable.
"""
__tracebackhide__ = True
exes = self._find_executables(name)
# Empty list means that PyInstaller probably failed to create any executable.
assert exes != [], 'No executable file was found.'
for exe in exes:
# Try to find .toc log file. .toc log file has the same basename as exe file.
toc_log = os.path.join(_get_logs_dir(self._request), os.path.splitext(os.path.basename(exe))[0] + '.toc')
if os.path.exists(toc_log):
if not self._examine_executable(exe, toc_log):
pytest.fail('Matching .toc of %s failed.' % exe)
retcode = self._run_executable(exe, args, run_from_path, runtime)
if retcode != kwargs.get('retcode', 0):
pytest.fail('Running exe %s failed with return-code %s.' % (exe, retcode))
def _find_executables(self, name):
"""
Search for all executables generated by the testcase.
If the test-case is called e.g. 'test_multipackage1', this is searching for each of 'test_multipackage1.exe'
and 'multipackage1_?.exe' in both one-file- and one-dir-mode.
:param name: Name of the executable to look for.
:return: List of executables
"""
exes = []
onedir_pt = os.path.join(self._distdir, name, name)
onefile_pt = os.path.join(self._distdir, name)
patterns = [
onedir_pt,
onefile_pt,
# Multipackage one-dir
onedir_pt + '_?',
# Multipackage one-file
onefile_pt + '_?'
]
# For Windows append .exe extension to patterns.
if is_win:
patterns = [pt + '.exe' for pt in patterns]
# For Mac OS append pattern for .app bundles.
if is_darwin:
# e.g: ./dist/name.app/Contents/MacOS/name
pt = os.path.join(self._distdir, name + '.app', 'Contents', 'MacOS', name)
patterns.append(pt)
# Apply file patterns.
for pattern in patterns:
for prog in glob.glob(pattern):
if os.path.isfile(prog):
exes.append(prog)
return exes
def _run_executable(self, prog, args, run_from_path, runtime):
"""
Run executable created by PyInstaller.
:param args: CLI options to pass to the created executable.
"""
# Run the test in a clean environment to make sure they're really self-contained.
prog_env = copy.deepcopy(os.environ)
prog_env['PATH'] = ''
del prog_env['PATH']
# For Windows we need to keep minimal PATH for successful running of some tests.
if is_win:
# Minimum Windows PATH is in most cases: C:\Windows\system32;C:\Windows
prog_env['PATH'] = os.pathsep.join(winutils.get_system_path())
exe_path = prog
if run_from_path:
# Run executable in the temp directory. Add the directory containing the executable to $PATH. Basically,
# pretend we are a shell executing the program from $PATH.
prog_cwd = str(self._tmpdir)
prog_name = os.path.basename(prog)
prog_env['PATH'] = os.pathsep.join([prog_env.get('PATH', ''), os.path.dirname(prog)])
else:
# Run executable in the directory where it is.
prog_cwd = os.path.dirname(prog)
# The executable will be called with argv[0] as relative not absolute path.
prog_name = os.path.join(os.curdir, os.path.basename(prog))
args = [prog_name] + args
# Using sys.stdout/sys.stderr for subprocess fixes printing messages in Windows command prompt. Py.test is then
# able to collect stdout/sterr messages and display them if a test fails.
for _ in range(_MAX_RETRIES):
retcode = self._run_executable_(args, exe_path, prog_env, prog_cwd, runtime)
if retcode != 1: # retcode == 1 means a timeout
break
return retcode
def _run_executable_(self, args, exe_path, prog_env, prog_cwd, runtime):
process = psutil.Popen(
args, executable=exe_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=prog_env, cwd=prog_cwd
)
def _msg(*text):
print('[' + str(process.pid) + '] ', *text)
# Run executable. stderr is redirected to stdout.
_msg('RUNNING: ', safe_repr(exe_path), ', args: ', safe_repr(args))
# 'psutil' allows to use timeout in waiting for a subprocess. If not timeout was specified then it is 'None' -
# no timeout, just waiting. Runtime is useful mostly for interactive tests.
try:
timeout = runtime if runtime else _EXE_TIMEOUT
stdout, stderr = process.communicate(timeout=timeout)
retcode = process.returncode
except (psutil.TimeoutExpired, subprocess.TimeoutExpired):
if runtime:
# When 'runtime' is set, the expired timeout is a good sign that the executable was running successfully
# for a specified time.
# TODO: is there a better way return success than 'retcode = 0'?
retcode = 0
else:
# Exe is running and it is not interactive. Fail the test.
retcode = 1
_msg(f'TIMED OUT while running executable (timeout: {timeout} sec)!')
# Kill the subprocess and its child processes.
for p in list(process.children(recursive=True)) + [process]:
with suppress(psutil.NoSuchProcess):
p.kill()
stdout, stderr = process.communicate()
sys.stdout.buffer.write(stdout)
sys.stderr.buffer.write(stderr)
return retcode
def _test_building(self, args):
"""
Run building of test script.
:param args: additional CLI options for PyInstaller.
Return True if build succeeded False otherwise.
"""
if self._is_spec:
default_args = [
'--distpath', self._distdir,
'--workpath', self._builddir,
'--log-level=INFO',
] # yapf: disable
else:
default_args = [
'--debug=bootloader',
'--noupx',
'--specpath', self._specdir,
'--distpath', self._distdir,
'--workpath', self._builddir,
'--path', _get_modules_dir(self._request),
'--log-level=INFO',
] # yapf: disable
# Choose bundle mode.
if self._mode == 'onedir':
default_args.append('--onedir')
elif self._mode == 'onefile':
default_args.append('--onefile')
# if self._mode is None then just the spec file was supplied.
pyi_args = [self.script] + default_args + args
# TODO: fix return code in running PyInstaller programmatically.
PYI_CONFIG = configure.get_config(upx_dir=None)
# Override CACHEDIR for PyInstaller and put it into self.tmpdir
PYI_CONFIG['cachedir'] = str(self._tmpdir)
pyi_main.run(pyi_args, PYI_CONFIG)
retcode = 0
return retcode == 0
def _examine_executable(self, exe, toc_log):
"""
Compare log files (now used mostly by multipackage test_name).
:return: True if .toc files match
"""
print('EXECUTING MATCHING:', toc_log)
fname_list = archive_viewer.get_archive_content(exe)
fname_list = [fn for fn in fname_list]
with open(toc_log, 'r') as f:
pattern_list = eval(f.read())
# Alphabetical order of patterns.
pattern_list.sort()
missing = []
for pattern in pattern_list:
for fname in fname_list:
if re.match(pattern, fname):
print('MATCH:', pattern, '-->', fname)
break
else:
# No matching entry found
missing.append(pattern)
print('MISSING:', pattern)
# Not all modules matched. Stop comparing other .toc files and fail the test.
if missing:
for m in missing:
print('Missing', m, 'in', exe)
return False
# All patterns matched.
return True
# Scope 'session' should keep the object unchanged for whole tests. This fixture caches basic module graph dependencies
# that are same for every executable.
@pytest.fixture(scope='session')
def pyi_modgraph():
# Explicitly set the log level since the plugin `pytest-catchlog` (un-) sets the root logger's level to NOTSET for
# the setup phase, which will lead to TRACE messages been written out.
import PyInstaller.log as logging
logging.logger.setLevel(logging.DEBUG)
initialize_modgraph()
# Run by default test as onedir and onefile.
@pytest.fixture(params=['onedir', 'onefile'])
def pyi_builder(tmpdir, monkeypatch, request, pyi_modgraph):
# Save/restore environment variable PATH.
monkeypatch.setenv('PATH', os.environ['PATH'])
# PyInstaller or a test case might manipulate 'sys.path'. Reset it for every test.
monkeypatch.syspath_prepend(None)
# Set current working directory to
monkeypatch.chdir(tmpdir)
# Clean up configuration and force PyInstaller to do a clean configuration for another app/test. The value is same
# as the original value.
monkeypatch.setattr('PyInstaller.config.CONF', {'pathex': []})
yield AppBuilder(tmpdir, request, request.param)
if is_darwin or is_linux:
if request.node.rep_setup.passed:
if request.node.rep_call.passed:
if tmpdir.exists():
tmpdir.remove(rec=1, ignore_errors=True)
# Fixture for .spec based tests. With .spec it does not make sense to differentiate onefile/onedir mode.
@pytest.fixture
def pyi_builder_spec(tmpdir, request, monkeypatch, pyi_modgraph):
# Save/restore environment variable PATH.
monkeypatch.setenv('PATH', os.environ['PATH'])
# Set current working directory to
monkeypatch.chdir(tmpdir)
# PyInstaller or a test case might manipulate 'sys.path'. Reset it for every test.
monkeypatch.syspath_prepend(None)
# Clean up configuration and force PyInstaller to do a clean configuration for another app/test. The value is same
# as the original value.
monkeypatch.setattr('PyInstaller.config.CONF', {'pathex': []})
return AppBuilder(tmpdir, request, None)
# Define a fixture which compiles the data/load_dll_using_ctypes/ctypes_dylib.c program in the tmpdir, returning the
# tmpdir object.
@pytest.fixture()
def compiled_dylib(tmpdir, request):
tmp_data_dir = _data_dir_copy(request, 'ctypes_dylib', tmpdir)
# Compile the ctypes_dylib in the tmpdir: Make tmpdir/data the CWD. Do NOT use monkeypatch.chdir() to change and
# monkeypatch.undo() to restore the CWD, since this will undo ALL monkeypatches (such as the pyi_builder's additions
# to sys.path), breaking the test.
old_wd = tmp_data_dir.chdir()
try:
if is_win:
tmp_data_dir = tmp_data_dir.join('ctypes_dylib.dll')
# For Mingw-x64 we must pass '-m32' to build 32-bit binaries
march = '-m32' if architecture == '32bit' else '-m64'
ret = subprocess.call('gcc -shared ' + march + ' ctypes_dylib.c -o ctypes_dylib.dll', shell=True)
if ret != 0:
# Find path to cl.exe file.
from distutils.msvccompiler import MSVCCompiler
comp = MSVCCompiler()
comp.initialize()
cl_path = comp.cc
# Fallback to msvc.
ret = subprocess.call([cl_path, '/LD', 'ctypes_dylib.c'], shell=False)
elif is_darwin:
tmp_data_dir = tmp_data_dir.join('ctypes_dylib.dylib')
# On Mac OS X we need to detect architecture - 32 bit or 64 bit.
arch = 'i386' if architecture == '32bit' else 'x86_64'
cmd = (
'gcc -arch ' + arch + ' -Wall -dynamiclib '
'ctypes_dylib.c -o ctypes_dylib.dylib -headerpad_max_install_names'
)
ret = subprocess.call(cmd, shell=True)
id_dylib = os.path.abspath('ctypes_dylib.dylib')
ret = subprocess.call('install_name_tool -id %s ctypes_dylib.dylib' % (id_dylib,), shell=True)
else:
tmp_data_dir = tmp_data_dir.join('ctypes_dylib.so')
ret = subprocess.call('gcc -fPIC -shared ctypes_dylib.c -o ctypes_dylib.so', shell=True)
assert ret == 0, 'Compile ctypes_dylib failed.'
finally:
# Reset the CWD directory.
old_wd.chdir()
return tmp_data_dir
@pytest.fixture
def pyi_windowed_builder(pyi_builder: AppBuilder):
"""A pyi_builder equivalent for testing --windowed applications."""
# psutil.Popen() somehow bypasses an application's windowed/console mode so that any application built in
# --windowed mode but invoked with psutil still receives valid std{in,out,err} handles and behaves exactly like
# a console application. In short, testing windowed mode with psutil is a null test. We must instead use subprocess.
def _run_executable_(args, exe_path, prog_env, prog_cwd, runtime):
return subprocess.run([exe_path, *args], env=prog_env, cwd=prog_cwd, timeout=runtime).returncode
pyi_builder._run_executable_ = _run_executable_
yield pyi_builder
| 40.5189 | 120 | 0.64422 |
ace141a8dc22bc8c263e252cd4ab9707f22ad6fc | 3,251 | py | Python | analyze/constants.py | supertask/KSUHack | 4eda5eee07015faae173db44bfa576abbb4c9b6b | [
"MIT"
] | null | null | null | analyze/constants.py | supertask/KSUHack | 4eda5eee07015faae173db44bfa576abbb4c9b6b | [
"MIT"
] | null | null | null | analyze/constants.py | supertask/KSUHack | 4eda5eee07015faae173db44bfa576abbb4c9b6b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
from datetime import date
class Constants(object):
"""A class defining a fixed numer for some classes."""
#
# Exit statuses
#
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
#
# Regular expression for finding any strings.
#
STUDENT_ID_RE = re.compile('(g\d{7})')
DOMAIN_RE = re.compile('http[s]?://((?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
URL_RE = re.compile('http\S+')
CSE_URL_DIR_RE = re.compile("www\.cse\.kyoto-su\.ac\.jp\/.*(g\d{7})\/(.*)")
CSE_DOMAIN_RE = re.compile("www\.cse\.kyoto-su\.ac\.jp")
KSU_TEMPLATE_INDEX = "ksu_index.html"
#
# 'g'=bachelor or 'i'=master
# Ex: g1144704 -> 'g'1144704, i1558129 -> 'i'1558129
#
STUDENT_TYPE = 'g'
#
# Probably, 4 is for computer science department.
# Ex: g1144704 -> g11'4'4704
#
DEPARTMENT = 4
#
# A date object indicated today for timestamp, for instance, '2016-12-25'.
#
TODAY = date.today()
#
# Analyzing domain names plus downloaded folder names.
#
CC_DOMAIN = "www.cc.kyoto-su.ac.jp"
CSE_DOMAIN = "www.cse.kyoto-su.ac.jp"
#
# This URLs is used for determining a student id.
#
URLS_FOR_DETERMINING_STUDENT_ID = [
"http://www.cc.kyoto-su.ac.jp/~%s/",
"http://www.cc.kyoto-su.ac.jp/~%s/index-j.html"
]
#
# An entrance year of oldest OB.
#
ENTRANCE_YEAR_OF_OLDEST_OB = 2008
#
# Database
#
ESTIMATED_CSE_STUDENT_DB = "DB/estimated_cse_student_DB.db"
CSE_STUDENT_DB = "DB/cse_student_DB.db"
KEYWORDS_DB = "DB/keywords_DB.db"
#
# A spliting charactor for a list element of the database
#
SPLIT_CHAR = ","
#
#
#
ANALYZING_FOLDERS = ["www.cse.kyoto-su.ac.jp/", "www.cc.kyoto-su.ac.jp/"]
@classmethod
def get_grade(self, year):
"""Estimates a grade from an entrance year using a date.
Example:
if today -> 2016
2016,2015,2014,2013 -> 1,2,3,4
"""
if Constants.TODAY.month < 4:
freshman_year = Constants.TODAY.year - 1
else:
freshman_year = Constants.TODAY.year
return freshman_year - year + 1
@classmethod
def get_year(self, grade):
"""Estimates a year from grade using a date.
Example:
if today -> 2016
1,2,3,4 -> 2016,2015,2014,2013
"""
if Constants.TODAY.month < 4:
freshman_year = Constants.TODAY.year - 1
else:
freshman_year = Constants.TODAY.year
return freshman_year - grade + 1
def main():
"""Run an example for a Constants class."""
print Constants.EXIT_SUCCESS
print Constants.EXIT_FAILURE
print Constants.STUDENT_ID_RE
#2016,2015,2014,2013 -> 1,2,3,4
print Constants.get_grade(2016)
print Constants.get_grade(2015)
print Constants.get_year(1)
print Constants.get_year(2)
for year in [2016, 2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008]:
assert Constants.get_year(Constants.get_grade(year)) == year
return Constants.EXIT_SUCCESS
if __name__ == '__main__':
sys.exit(main())
| 25.20155 | 110 | 0.591818 |
ace1436edcd0302f08a64b5de11405500675adcb | 32,698 | py | Python | yt/visualization/plot_container.py | NeilZaim/yt | 42fbb920ec36352c3de7606b40279b92b6034e63 | [
"BSD-3-Clause-Clear"
] | 1 | 2020-06-22T10:17:01.000Z | 2020-06-22T10:17:01.000Z | yt/visualization/plot_container.py | NeilZaim/yt | 42fbb920ec36352c3de7606b40279b92b6034e63 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/visualization/plot_container.py | NeilZaim/yt | 42fbb920ec36352c3de7606b40279b92b6034e63 | [
"BSD-3-Clause-Clear"
] | 2 | 2020-05-16T15:29:37.000Z | 2020-06-22T10:17:08.000Z | """
A base class for "image" plots with colorbars.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from yt.extern.six.moves import builtins
from yt.extern.six import \
iteritems, \
string_types
import base64
import numpy as np
import matplotlib
import os
from collections import defaultdict
from functools import wraps
from yt.config import \
ytcfg
from yt.data_objects.time_series import \
DatasetSeries
from yt.funcs import \
get_image_suffix, \
iterable, \
ensure_dir, \
ensure_list, \
issue_deprecation_warning
from yt.units.unit_lookup_table import \
prefixable_units, latex_prefixes
from yt.units.unit_object import \
Unit
from yt.utilities.definitions import \
formatted_length_unit_names
from yt.utilities.exceptions import \
YTNotInsideNotebook
from yt.visualization.color_maps import \
yt_colormaps
def invalidate_data(f):
@wraps(f)
def newfunc(*args, **kwargs):
rv = f(*args, **kwargs)
args[0]._data_valid = False
args[0]._plot_valid = False
return rv
return newfunc
def invalidate_figure(f):
@wraps(f)
def newfunc(*args, **kwargs):
rv = f(*args, **kwargs)
for field in args[0].plots.keys():
args[0].plots[field].figure = None
args[0].plots[field].axes = None
args[0].plots[field].cax = None
args[0]._setup_plots()
return rv
return newfunc
def invalidate_plot(f):
@wraps(f)
def newfunc(*args, **kwargs):
rv = f(*args, **kwargs)
args[0]._plot_valid = False
return rv
return newfunc
def validate_plot(f):
@wraps(f)
def newfunc(*args, **kwargs):
if hasattr(args[0], '_data_valid'):
if not args[0]._data_valid:
args[0]._recreate_frb()
if hasattr(args[0], '_profile_valid'):
if not args[0]._profile_valid:
args[0]._recreate_profile()
if not args[0]._plot_valid:
# it is the responsibility of _setup_plots to
# call args[0].run_callbacks()
args[0]._setup_plots()
rv = f(*args, **kwargs)
return rv
return newfunc
def apply_callback(f):
@wraps(f)
def newfunc(*args, **kwargs):
args[0]._callbacks.append((f.__name__, (args, kwargs)))
return args[0]
return newfunc
def accepts_all_fields(f):
"""Decorate a function whose second argument is <field> and deal with the special case
field == 'all', looping over all fields already present in the PlotContainer instance.
"""
# This is to be applied to PlotContainer class methods with the following signature:
#
# f(self, field, *args, **kwargs) -> self
@wraps(f)
def newfunc(self, field, *args, **kwargs):
if field == 'all':
fields = list(self.plots.keys())
else:
fields = ensure_list(field)
for field in self.data_source._determine_fields(fields):
f(self, field, *args, **kwargs)
return self
return newfunc
def get_log_minorticks(vmin, vmax):
"""calculate positions of linear minorticks on a log colorbar
Parameters
----------
vmin : float
the minimum value in the colorbar
vmax : float
the maximum value in the colorbar
"""
expA = np.floor(np.log10(vmin))
expB = np.floor(np.log10(vmax))
cofA = np.ceil(vmin/10**expA).astype("int64")
cofB = np.floor(vmax/10**expB).astype("int64")
lmticks = []
while cofA*10**expA <= cofB*10**expB:
if expA < expB:
lmticks = np.hstack( (lmticks, np.linspace(cofA, 9, 10-cofA)*10**expA) )
cofA = 1
expA += 1
else:
lmticks = np.hstack( (lmticks, np.linspace(cofA, cofB, cofB-cofA+1)*10**expA) )
expA += 1
return np.array(lmticks)
def get_symlog_minorticks(linthresh, vmin, vmax):
"""calculate positions of linear minorticks on a symmetric log colorbar
Parameters
----------
linthresh : float
the threshold for the linear region
vmin : float
the minimum value in the colorbar
vmax : float
the maximum value in the colorbar
"""
if vmin > 0 or vmax < 0:
return get_log_minorticks(vmin, vmax)
elif vmin == 0:
return np.hstack((0, get_log_minorticks(linthresh, vmax)))
elif vmax == 0:
return np.hstack((-get_log_minorticks(linthresh,-vmin)[::-1], 0) )
else:
return np.hstack((-get_log_minorticks(linthresh,-vmin)[::-1], 0,
get_log_minorticks(linthresh, vmax)))
field_transforms = {}
class FieldTransform(object):
def __init__(self, name, func):
self.name = name
self.func = func
field_transforms[name] = self
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
log_transform = FieldTransform('log10', np.log10)
linear_transform = FieldTransform('linear', lambda x: x)
symlog_transform = FieldTransform('symlog', None)
class PlotDictionary(defaultdict):
def __getitem__(self, item):
return defaultdict.__getitem__(
self, self.data_source._determine_fields(item)[0])
def __setitem__(self, item, value):
return defaultdict.__setitem__(
self, self.data_source._determine_fields(item)[0], value)
def __contains__(self, item):
return defaultdict.__contains__(
self, self.data_source._determine_fields(item)[0])
def __init__(self, data_source, default_factory=None):
self.data_source = data_source
return defaultdict.__init__(self, default_factory)
class PlotContainer(object):
"""A container for generic plots"""
_plot_type = None
_plot_valid = False
def __init__(self, data_source, figure_size, fontsize):
from matplotlib.font_manager import FontProperties
self.data_source = data_source
self.ds = data_source.ds
self.ts = self._initialize_dataset(self.ds)
if iterable(figure_size):
self.figure_size = float(figure_size[0]), float(figure_size[1])
else:
self.figure_size = float(figure_size)
font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
self._font_properties = FontProperties(size=fontsize, fname=font_path)
self._font_color = None
self._xlabel = None
self._ylabel = None
self._minorticks = {}
self._field_transform = {}
@accepts_all_fields
@invalidate_plot
def set_log(self, field, log, linthresh=None):
"""set a field to log or linear.
Parameters
----------
field : string
the field to set a transform
if field == 'all', applies to all plots.
log : boolean
Log on/off.
linthresh : float (must be positive)
linthresh will be enabled for symlog scale only when log is true
"""
if log:
if linthresh is not None:
if not linthresh > 0.:
raise ValueError('\"linthresh\" must be positive')
self._field_transform[field] = symlog_transform
self._field_transform[field].func = linthresh
else:
self._field_transform[field] = log_transform
else:
self._field_transform[field] = linear_transform
return self
def get_log(self, field):
"""get the transform type of a field.
Parameters
----------
field : string
the field to get a transform
if field == 'all', applies to all plots.
"""
# devnote : accepts_all_fields decorator is not applicable here because the return variable isn't self
log = {}
if field == 'all':
fields = list(self.plots.keys())
else:
fields = ensure_list(field)
for field in self.data_source._determine_fields(fields):
log[field] = (self._field_transform[field] == log_transform)
return log
@invalidate_plot
def set_transform(self, field, name):
field = self.data_source._determine_fields(field)[0]
if name not in field_transforms:
raise KeyError(name)
self._field_transform[field] = field_transforms[name]
return self
@accepts_all_fields
@invalidate_plot
def set_minorticks(self, field, state):
"""Turn minor ticks on or off in the current plot.
Displaying minor ticks reduces performance; turn them off
using set_minorticks('all', False) if drawing speed is a problem.
Parameters
----------
field : string
the field to remove minorticks
if field == 'all', applies to all plots.
state : bool
the state indicating 'on' (True) or 'off' (False)
"""
if isinstance(state, str):
from yt.funcs import issue_deprecation_warning
issue_deprecation_warning("Deprecated api, use bools for *state*.")
state = {"on": True, "off": False}[state.lower()]
self._minorticks[field] = state
return self
def _setup_plots(self):
# Left blank to be overridden in subclasses
pass
def _initialize_dataset(self, ts):
if not isinstance(ts, DatasetSeries):
if not iterable(ts): ts = [ts]
ts = DatasetSeries(ts)
return ts
def _switch_ds(self, new_ds, data_source=None):
old_object = self.data_source
name = old_object._type_name
kwargs = dict((n, getattr(old_object, n))
for n in old_object._con_args)
kwargs['center'] = getattr(old_object, 'center', None)
if data_source is not None:
if name != "proj":
raise RuntimeError("The data_source keyword argument "
"is only defined for projections.")
kwargs['data_source'] = data_source
self.ds = new_ds
# A _hack_ for ParticleProjectionPlots
if name == 'Particle':
from yt.visualization.particle_plots import \
ParticleAxisAlignedDummyDataSource
new_object = ParticleAxisAlignedDummyDataSource(ds=self.ds, **kwargs)
else:
new_object = getattr(new_ds, name)(**kwargs)
self.data_source = new_object
self._data_valid = self._plot_valid = False
for d in 'xyz':
lim_name = d+'lim'
if hasattr(self, lim_name):
lim = getattr(self, lim_name)
lim = tuple(new_ds.quan(l.value, str(l.units)) for l in lim)
setattr(self, lim_name, lim)
self.plots.data_source = new_object
self._background_color.data_source = new_object
self._colorbar_label.data_source = new_object
self._setup_plots()
@validate_plot
def __getitem__(self, item):
return self.plots[item]
def _set_font_properties(self):
for f in self.plots:
self.plots[f]._set_font_properties(
self._font_properties, self._font_color)
@invalidate_plot
@invalidate_figure
def set_font(self, font_dict=None):
"""
Set the font and font properties.
Parameters
----------
font_dict : dict
A dict of keyword parameters to be passed to
:class:`matplotlib.font_manager.FontProperties`.
Possible keys include:
* family - The font family. Can be serif, sans-serif, cursive,
'fantasy' or 'monospace'.
* style - The font style. Either normal, italic or oblique.
* color - A valid color string like 'r', 'g', 'red', 'cobalt',
and 'orange'.
* variant - Either normal or small-caps.
* size - Either a relative value of xx-small, x-small, small,
medium, large, x-large, xx-large or an absolute font size, e.g. 12
* stretch - A numeric value in the range 0-1000 or one of
ultra-condensed, extra-condensed, condensed, semi-condensed,
normal, semi-expanded, expanded, extra-expanded or ultra-expanded
* weight - A numeric value in the range 0-1000 or one of ultralight,
light, normal, regular, book, medium, roman, semibold, demibold,
demi, bold, heavy, extra bold, or black
See the matplotlib font manager API documentation for more details.
https://matplotlib.org/api/font_manager_api.html
Notes
-----
Mathtext axis labels will only obey the `size` and `color` keyword.
Examples
--------
This sets the font to be 24-pt, blue, sans-serif, italic, and
bold-face.
>>> slc = SlicePlot(ds, 'x', 'Density')
>>> slc.set_font({'family':'sans-serif', 'style':'italic',
... 'weight':'bold', 'size':24, 'color':'blue'})
"""
from matplotlib.font_manager import FontProperties
if font_dict is None:
font_dict = {}
if 'color' in font_dict:
self._font_color = font_dict.pop('color')
# Set default values if the user does not explicitly set them.
# this prevents reverting to the matplotlib defaults.
font_dict.setdefault('family', 'stixgeneral')
font_dict.setdefault('size', 18)
self._font_properties = \
FontProperties(**font_dict)
return self
def set_font_size(self, size):
"""Set the size of the font used in the plot
This sets the font size by calling the set_font function. See set_font
for more font customization options.
Parameters
----------
size : float
The absolute size of the font in points (1 pt = 1/72 inch).
"""
return self.set_font({'size': size})
@invalidate_plot
@invalidate_figure
def set_figure_size(self, size):
"""Sets a new figure size for the plot
parameters
----------
size : float
The size of the figure on the longest axis (in units of inches),
including the margins but not the colorbar.
"""
self.figure_size = float(size)
return self
@validate_plot
def save(self, name=None, suffix=None, mpl_kwargs=None):
"""saves the plot to disk.
Parameters
----------
name : string or tuple
The base of the filename. If name is a directory or if name is not
set, the filename of the dataset is used. For a tuple, the
resulting path will be given by joining the elements of the
tuple
suffix : string
Specify the image type by its suffix. If not specified, the output
type will be inferred from the filename. Defaults to PNG.
mpl_kwargs : dict
A dict of keyword arguments to be passed to matplotlib.
>>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
"""
names = []
if mpl_kwargs is None: mpl_kwargs = {}
if isinstance(name, (tuple, list)):
name = os.path.join(*name)
if name is None:
name = str(self.ds)
name = os.path.expanduser(name)
if name[-1] == os.sep and not os.path.isdir(name):
ensure_dir(name)
if os.path.isdir(name) and name != str(self.ds):
name = name + (os.sep if name[-1] != os.sep else '') + str(self.ds)
if suffix is None:
suffix = get_image_suffix(name)
if suffix != '':
for k, v in iteritems(self.plots):
names.append(v.save(name, mpl_kwargs))
return names
if hasattr(self.data_source, 'axis'):
axis = self.ds.coordinates.axis_name.get(
self.data_source.axis, '')
else:
axis = None
weight = None
type = self._plot_type
if type in ['Projection', 'OffAxisProjection']:
weight = self.data_source.weight_field
if weight is not None:
weight = weight[1].replace(' ', '_')
if 'Cutting' in self.data_source.__class__.__name__:
type = 'OffAxisSlice'
for k, v in iteritems(self.plots):
if isinstance(k, tuple):
k = k[1]
if axis:
n = "%s_%s_%s_%s" % (name, type, axis, k.replace(' ', '_'))
else:
# for cutting planes
n = "%s_%s_%s" % (name, type, k.replace(' ', '_'))
if weight:
n += "_%s" % (weight)
if suffix != '':
n = ".".join([n,suffix])
names.append(v.save(n, mpl_kwargs))
return names
@invalidate_data
def refresh(self):
# invalidate_data will take care of everything
return self
@validate_plot
def show(self):
r"""This will send any existing plots to the IPython notebook.
If yt is being run from within an IPython session, and it is able to
determine this, this function will send any existing plots to the
notebook for display.
If yt can't determine if it's inside an IPython session, it will raise
YTNotInsideNotebook.
Examples
--------
>>> from yt.mods import SlicePlot
>>> slc = SlicePlot(ds, "x", ["Density", "VelocityMagnitude"])
>>> slc.show()
"""
interactivity = self.plots[list(self.plots.keys())[0]].interactivity
if interactivity:
for k,v in sorted(iteritems(self.plots)):
v.show()
else:
if "__IPYTHON__" in dir(builtins):
from IPython.display import display
display(self)
else:
raise YTNotInsideNotebook
@validate_plot
def display(self, name=None, mpl_kwargs=None):
"""Will attempt to show the plot in in an IPython notebook.
Failing that, the plot will be saved to disk."""
try:
return self.show()
except YTNotInsideNotebook:
return self.save(name=name, mpl_kwargs=mpl_kwargs)
@validate_plot
def _repr_html_(self):
"""Return an html representation of the plot object. Will display as a
png for each WindowPlotMPL instance in self.plots"""
ret = ''
for field in self.plots:
img = base64.b64encode(self.plots[field]._repr_png_()).decode()
ret += r'<img style="max-width:100%%;max-height:100%%;" ' \
r'src="data:image/png;base64,{0}"><br>'.format(img)
return ret
@invalidate_plot
def set_xlabel(self, label):
r"""
Allow the user to modify the X-axis title
Defaults to the global value. Fontsize defaults
to 18.
Parameters
----------
label : str
The new string for the x-axis.
>>> plot.set_xlabel("H2I Number Density (cm$^{-3}$)")
"""
self._xlabel = label
return self
@invalidate_plot
def set_ylabel(self, label):
r"""
Allow the user to modify the Y-axis title
Defaults to the global value.
Parameters
----------
label : str
The new string for the y-axis.
>>> plot.set_ylabel("Temperature (K)")
"""
self._ylabel = label
return self
def _get_axes_unit_labels(self, unit_x, unit_y):
axes_unit_labels = ['', '']
comoving = False
hinv = False
for i, un in enumerate((unit_x, unit_y)):
unn = None
if hasattr(self.data_source, 'axis'):
if hasattr(self.ds.coordinates, "image_units"):
# This *forces* an override
unn = self.ds.coordinates.image_units[
self.data_source.axis][i]
elif hasattr(self.ds.coordinates, "default_unit_label"):
axax = getattr(self.ds.coordinates,
"%s_axis" % ("xy"[i]))[self.data_source.axis]
unn = self.ds.coordinates.default_unit_label.get(
axax, None)
if unn is not None:
axes_unit_labels[i] = r'\ \ \left('+unn+r'\right)'
continue
# Use sympy to factor h out of the unit. In this context 'un'
# is a string, so we call the Unit constructor.
expr = Unit(un, registry=self.ds.unit_registry).expr
h_expr = Unit('h', registry=self.ds.unit_registry).expr
# See http://docs.sympy.org/latest/modules/core.html#sympy.core.expr.Expr
h_power = expr.as_coeff_exponent(h_expr)[1]
# un is now the original unit, but with h factored out.
un = str(expr*h_expr**(-1*h_power))
un_unit = Unit(un, registry=self.ds.unit_registry)
cm = Unit('cm').expr
if str(un).endswith('cm') and cm not in un_unit.expr.atoms():
comoving = True
un = un[:-2]
# no length units besides code_length end in h so this is safe
if h_power == -1:
hinv = True
elif h_power != 0:
# It doesn't make sense to scale a position by anything
# other than h**-1
raise RuntimeError
if un not in ['1', 'u', 'unitary']:
if un in formatted_length_unit_names:
un = formatted_length_unit_names[un]
else:
un = Unit(un, registry=self.ds.unit_registry)
un = un.latex_representation()
if hinv:
un = un + r'\,h^{-1}'
if comoving:
un = un + r'\,(1+z)^{-1}'
pp = un[0]
if pp in latex_prefixes:
symbol_wo_prefix = un[1:]
if symbol_wo_prefix in prefixable_units:
un = un.replace(
pp, "{"+latex_prefixes[pp]+"}", 1)
axes_unit_labels[i] = r'\ \ ('+un+')'
return axes_unit_labels
def hide_colorbar(self, field=None):
"""
Hides the colorbar for a plot and updates the size of the
plot accordingly. Defaults to operating on all fields for a
PlotContainer object.
Parameters
----------
field : string, field tuple, or list of strings or field tuples (optional)
The name of the field(s) that we want to hide the colorbar.
If None or 'all' is provided, will default to using all fields available
for this object.
Examples
--------
This will save an image with no colorbar.
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> s = SlicePlot(ds, 2, 'density', 'c', (20, 'kpc'))
>>> s.hide_colorbar()
>>> s.save()
This will save an image with no axis or colorbar.
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> s = SlicePlot(ds, 2, 'density', 'c', (20, 'kpc'))
>>> s.hide_axes()
>>> s.hide_colorbar()
>>> s.save()
"""
if field is None or field == 'all':
field = self.fields
field = ensure_list(field)
for f in field:
self.plots[f].hide_colorbar()
return self
def show_colorbar(self, field=None):
"""
Shows the colorbar for a plot and updates the size of the
plot accordingly. Defaults to operating on all fields for a
PlotContainer object. See hide_colorbar().
Parameters
----------
field : string, field tuple, or list of strings or field tuples (optional)
The name of the field(s) that we want to show the colorbar.
"""
if field is None:
field = self.fields
field = ensure_list(field)
for f in field:
self.plots[f].show_colorbar()
return self
def hide_axes(self, field=None, draw_frame=False):
"""
Hides the axes for a plot and updates the size of the
plot accordingly. Defaults to operating on all fields for a
PlotContainer object.
Parameters
----------
field : string, field tuple, or list of strings or field tuples (optional)
The name of the field(s) that we want to hide the axes.
draw_frame : boolean
If True, the axes frame will still be drawn. Defaults to False.
See note below for more details.
Examples
--------
This will save an image with no axes.
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> s = SlicePlot(ds, 2, 'density', 'c', (20, 'kpc'))
>>> s.hide_axes()
>>> s.save()
This will save an image with no axis or colorbar.
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> s = SlicePlot(ds, 2, 'density', 'c', (20, 'kpc'))
>>> s.hide_axes()
>>> s.hide_colorbar()
>>> s.save()
Note
----
By default, when removing the axes, the patch on which the axes are
drawn is disabled, making it impossible to later change e.g. the
background colour. To force the axes patch to be displayed while still
hiding the axes, set the ``draw_frame`` keyword argument to ``True``.
"""
if field is None:
field = self.fields
field = ensure_list(field)
for f in field:
self.plots[f].hide_axes(draw_frame)
return self
def show_axes(self, field=None):
"""
Shows the axes for a plot and updates the size of the
plot accordingly. Defaults to operating on all fields for a
PlotContainer object. See hide_axes().
Parameters
----------
field : string, field tuple, or list of strings or field tuples (optional)
The name of the field(s) that we want to show the axes.
"""
if field is None:
field = self.fields
field = ensure_list(field)
for f in field:
self.plots[f].show_axes()
return self
class ImagePlotContainer(PlotContainer):
"""A container for plots with colorbars.
"""
_colorbar_valid = False
def __init__(self, data_source, figure_size, fontsize):
super(ImagePlotContainer, self).__init__(
data_source, figure_size, fontsize)
self.plots = PlotDictionary(data_source)
self._callbacks = []
self._colormaps = defaultdict(
lambda: ytcfg.get("yt", "default_colormap"))
self._cbar_minorticks = {}
self._background_color = PlotDictionary(
self.data_source, lambda: 'w')
self._colorbar_label = PlotDictionary(
self.data_source, lambda: None)
@accepts_all_fields
@invalidate_plot
def set_cmap(self, field, cmap):
"""set the colormap for one of the fields
Parameters
----------
field : string
the field to set the colormap
if field == 'all', applies to all plots.
cmap : string or tuple
If a string, will be interpreted as name of the colormap.
If a tuple, it is assumed to be of the form (name, type, number)
to be used for palettable functionality. (name, type, number, bool)
can be used to specify if a reverse colormap is to be used.
"""
self._colorbar_valid = False
self._colormaps[field] = cmap
return self
@accepts_all_fields
@invalidate_plot
def set_background_color(self, field, color=None):
"""set the background color to match provided color
Parameters
----------
field : string
the field to set the colormap
if field == 'all', applies to all plots.
color : string or RGBA tuple (optional)
if set, set the background color to this color
if unset, background color is set to the bottom value of
the color map
"""
if color is None:
cmap = self._colormaps[field]
if isinstance(cmap, string_types):
try:
cmap = yt_colormaps[cmap]
except KeyError:
cmap = getattr(matplotlib.cm, cmap)
color = cmap(0)
self._background_color[field] = color
return self
@accepts_all_fields
@invalidate_plot
def set_zlim(self, field, zmin, zmax, dynamic_range=None):
"""set the scale of the colormap
Parameters
----------
field : string
the field to set a colormap scale
if field == 'all', applies to all plots.
zmin : float
the new minimum of the colormap scale. If 'min', will
set to the minimum value in the current view.
zmax : float
the new maximum of the colormap scale. If 'max', will
set to the maximum value in the current view.
Other Parameters
----------------
dynamic_range : float (default: None)
The dynamic range of the image.
If zmin == None, will set zmin = zmax / dynamic_range
If zmax == None, will set zmax = zmin * dynamic_range
When dynamic_range is specified, defaults to setting
zmin = zmax / dynamic_range.
"""
myzmin = zmin
myzmax = zmax
if zmin == 'min':
myzmin = self.plots[field].image._A.min()
if zmax == 'max':
myzmax = self.plots[field].image._A.max()
if dynamic_range is not None:
if zmax is None:
myzmax = myzmin * dynamic_range
else:
myzmin = myzmax / dynamic_range
if myzmin > 0.0 and self._field_transform[field] == symlog_transform:
self._field_transform[field] = log_transform
self.plots[field].zmin = myzmin
self.plots[field].zmax = myzmax
return self
@invalidate_plot
def set_cbar_minorticks(self, field, state):
"""Deprecated alias, kept for backward compatibility.
turn colorbar minor ticks "on" or "off" in the current plot, according to *state*
Parameters
----------
field : string
the field to remove colorbar minorticks
state : string
the state indicating 'on' or 'off'
"""
issue_deprecation_warning("Deprecated alias, use set_colorbar_minorticks instead.")
boolstate = {"on": True, "off": False}[state.lower()]
return self.set_colorbar_minorticks(field, boolstate)
@accepts_all_fields
@invalidate_plot
def set_colorbar_minorticks(self, field, state):
"""turn colorbar minor ticks on or off in the current plot
Displaying minor ticks reduces performance; turn them off
using set_colorbar_minorticks('all', False) if drawing speed is a problem.
Parameters
----------
field : string
the field to remove colorbar minorticks
if field == 'all', applies to all plots.
state : bool
the state indicating 'on' (True) or 'off' (False)
"""
self._cbar_minorticks[field] = state
return self
@invalidate_plot
def set_colorbar_label(self, field, label):
r"""
Sets the colorbar label.
Parameters
----------
field : str or tuple
The name of the field to modify the label for.
label : str
The new label
>>> plot.set_colorbar_label("density", "Dark Matter Density (g cm$^{-3}$)")
"""
self._colorbar_label[field] = label
return self
def _get_axes_labels(self, field):
return(self._xlabel, self._ylabel, self._colorbar_label[field])
| 33.639918 | 110 | 0.5719 |
ace143cc7ececf99cccc72e89996c1142a080af8 | 3,264 | py | Python | unet.py | nsid10/U-net | f54368f145d70974e040dc585bd23c787122f44b | [
"CC0-1.0"
] | 1 | 2021-07-31T02:23:25.000Z | 2021-07-31T02:23:25.000Z | unet.py | nsid10/U-net | f54368f145d70974e040dc585bd23c787122f44b | [
"CC0-1.0"
] | null | null | null | unet.py | nsid10/U-net | f54368f145d70974e040dc585bd23c787122f44b | [
"CC0-1.0"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.applications import EfficientNetB7
from tensorflow.keras.layers import Concatenate, Conv2D, Conv2DTranspose, Input, MaxPool2D
from tensorflow.keras.optimizers import Adam
from blocks import Conv_block
def unet_block(x, filters: int, a=0.01, dr=0.05):
"""
U-net functional block
Edit this block as needed
Args:
x: Input tensor.
filters (int): No. of filters in convolution layer.
a (float, optional): Leakage rate for ReLU. Defaults to 0.01.
dr (float, optional): Dropout rate. Defaults to 0.05.
Returns:
Output tensor
"""
y = Conv_block(x, filters, a, dr)
return y
def unet_builder(input_shape: tuple, filters=[32, 32, 48, 80, 224], a=0.01, dr=0.05):
"""
Generalized U-net builder
Args:
input_shape (tuple): Input shape of data.
filters (list, optional): Filter size per U-net level. Defaults to [32, 32, 48, 80, 224].
a (float, optional): Leakage rate for ReLU. Defaults to 0.01.
dr (float, optional): Dropout rate. Defaults to 0.05.
Returns:
Keras model
"""
level = len(filters) - 1
contract = list()
# input
start = Input(input_shape)
xx = unet_block(start, filters[0], a, dr)
# contracting path
for ii in range(level):
contract.append(xx)
en = MaxPool2D()(xx)
xx = unet_block(en, filters[ii + 1], a, dr)
# expansive path
for jj in range(level):
ex = Conv2DTranspose(filters[level - jj], (2, 2), strides=(2, 2), padding="same")(xx)
ex = Concatenate(axis=-1)([ex, contract[-jj - 1]])
xx = unet_block(ex, filters[-jj - 2], a, dr)
# output
end = Conv2D(1, (1, 1), activation="sigmoid")(xx)
model = tf.keras.Model(inputs=[start], outputs=[end])
model.compile(optimizer=Adam(0.1), loss="binary_crossentropy", metrics=["acc", "mse"])
return model
def efficientB7_unet_builder(input_shape: tuple, filters=[32, 32, 48, 80, 224], a=0.01, dr=0.05):
"""
EfficientB7 U-net builder
Args:
input_shape (tuple): Input shape of data.
filters (list, optional): Filter size per U-net level. Defaults to [32, 32, 48, 80, 224].
a (float, optional): Leakage rate for ReLU. Defaults to 0.01.
dr (float, optional): Dropout rate. Defaults to 0.05.
Returns:
[type]: [description]
"""
level = len(filters) - 1
contract = list()
Encoder = EfficientNetB7(include_top=False, weights=None, input_shape=input_shape)
# input
start = Encoder.input
xx = unet_block(start, filters[0], a, dr)
# contracting path
for ii in (52, 156, 260, 557):
contract.append(xx)
xx = Encoder.layers[ii].output
# expansive path
for jj in range(level):
ex = Conv2DTranspose(filters[level - jj], (2, 2), strides=(2, 2), padding="same")(xx)
ex = Concatenate(axis=-1)([ex, contract[-jj - 1]])
xx = unet_block(ex, filters[-jj - 2], a, dr)
# output
end = Conv2D(1, (1, 1), activation="sigmoid")(xx)
model = tf.keras.Model(inputs=[start], outputs=[end])
model.compile(optimizer=Adam(0.1), loss="binary_crossentropy", metrics=["acc", "mse"])
return model
| 30.222222 | 97 | 0.619485 |
ace14469f74891d6f2be5f7b34470819eb292e2d | 337 | py | Python | tensorflow_example/csnsus/process_data/__init__.py | songjun54cm/ExampleBank | 195154ae7aa7fdbee358019248bef77028b8e97e | [
"Apache-2.0"
] | null | null | null | tensorflow_example/csnsus/process_data/__init__.py | songjun54cm/ExampleBank | 195154ae7aa7fdbee358019248bef77028b8e97e | [
"Apache-2.0"
] | null | null | null | tensorflow_example/csnsus/process_data/__init__.py | songjun54cm/ExampleBank | 195154ae7aa7fdbee358019248bef77028b8e97e | [
"Apache-2.0"
] | 3 | 2018-12-14T06:33:26.000Z | 2019-01-18T07:51:39.000Z | __author__ = 'JunSong<songjun@corp.netease.com>'
# Date: 2019/1/25
import argparse
def main(config):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', dest='file', type=str, default='example.txt')
args = parser.parse_args()
config = vars(args)
main(config) | 22.466667 | 85 | 0.667656 |
ace14506eb96a77cd37f7e4a1004ee7e769082ce | 10,660 | py | Python | tsbenchmark/tsloader.py | DataCanvasIO/TSBenchmark | e8e45cd0cd206723ad59a8cac2a572e48f43a728 | [
"Apache-2.0"
] | 2 | 2022-03-28T02:10:09.000Z | 2022-03-29T02:56:26.000Z | tsbenchmark/tsloader.py | DataCanvasIO/TSBenchmark | e8e45cd0cd206723ad59a8cac2a572e48f43a728 | [
"Apache-2.0"
] | null | null | null | tsbenchmark/tsloader.py | DataCanvasIO/TSBenchmark | e8e45cd0cd206723ad59a8cac2a572e48f43a728 | [
"Apache-2.0"
] | null | null | null | from tsbenchmark.core.loader import DataSetLoader, TaskLoader
from tsbenchmark.datasets import TSDataset, TSTaskData
import os
from hypernets.utils import logging
import pandas as pd
import yaml
from tsbenchmark.tasks import TSTaskConfig
from tsbenchmark.util import download_util, file_util, df_util
from tsbenchmark import consts
logging.set_level('DEBUG') # TODO
logger = logging.getLogger(__name__)
# BASE_URL = 'https://tsbenchmark.s3.amazonaws.com/datas' # TODO
# DESC_URL = f'{BASE_URL}/dataset_desc.csv'
class TSDataSetDesc:
def __init__(self, data_path, data_source):
self.data_path = data_path
if not os.path.exists(self._desc_file()):
logger.info('Downloading dataset_desc.csv from remote.')
download_util.download(self._desc_file(), f'{data_source}/dataset_desc.csv')
logger.info('Finish download dataset_desc.csv.')
self.dataset_desc = pd.read_csv(self._desc_file())
self.dataset_desc_local = None
if os.path.exists(self._desc_local_file()):
self.dataset_desc_local = pd.read_csv(self._desc_local_file())
def exists(self, dataset_id):
return self.dataset_desc[self.dataset_desc['id'] == dataset_id].shape[0] == 1
def cached(self, dataset_id):
return self.dataset_desc_local is not None and \
self.dataset_desc_local[self.dataset_desc_local['id'] == dataset_id].shape[0] == 1
def update_local(self, dataset_id):
df = pd.read_csv(self._desc_file())
df[df['id'] == dataset_id].to_csv(self._desc_local_file(), index=False, mode='a')
def _desc_file(self):
return os.path.join(self.data_path, 'dataset_desc.csv')
def _desc_local_file(self):
return os.path.join(self.data_path, 'dataset_desc_local.csv')
def train_file_path(self, dataset_id):
return os.path.join(self.dataset_path_local(dataset_id), 'train.csv')
def test_file_path(self, dataset_id):
return os.path.join(self.dataset_path_local(dataset_id), 'test.csv')
def meta_file_path(self, dataset_id):
return os.path.join(self.dataset_path_local(dataset_id), 'metadata.yaml')
def dataset_path_local(self, dataset_id):
dataset = self.dataset_desc_local[self.dataset_desc_local['id'] == dataset_id]
return os.path.join(self.data_path, dataset.type.values[0],
dataset.data_size.values[0], dataset.name.values[0])
def data_size(self, dataset_id):
dataset = self.dataset_desc_local[self.dataset_desc_local['id'] == dataset_id]
return dataset.data_size.values[0]
def data_shape(self, dataset_id):
dataset = self.dataset_desc_local[self.dataset_desc_local['id'] == dataset_id]
return dataset.shape
def _get_metadata(meta_file_path):
f = open(meta_file_path, 'r', encoding='utf-8')
metadata = yaml.load(f.read(), Loader=yaml.FullLoader)
f.close()
return metadata
def _to_dataset(taskdata_id):
if '_' in str(taskdata_id):
strs = taskdata_id.split('_')
dataset_id = int(strs[0])
task_no = int(strs[1])
else:
dataset_id = int(taskdata_id)
task_no = 1
return dataset_id, task_no
class TSDataSetLoader(DataSetLoader):
def __init__(self, data_path, data_source=None):
self.data_path = data_path
self.data_source = consts.DATASETS_SOURCE_MAP[
consts.DATASETS_SOURCE_DEFAULT] if data_source is None else data_source
self.dataset_desc = TSDataSetDesc(data_path, self.data_source)
def list(self, type=None, data_size=None):
df = self.dataset_desc.dataset_desc
df = df_util.filter(df, 'data_size', data_size)
df = df_util.filter(df, 'type', type)
df = df[df['format'] != 'tsf'] # todo support in the future.
return df['id'].values
def exists(self, dataset_id):
return self.dataset_desc.exists(dataset_id)
def data_format(self, dataset_id):
df = self.dataset_desc.dataset_desc
return df[df['id'] == dataset_id]['format'].values[0]
def load_train(self, dataset_id):
self._download_if_not_cached(dataset_id)
df_train = pd.read_csv(self.dataset_desc.train_file_path(dataset_id))
return df_train
def load_test(self, dataset_id):
self._download_if_not_cached(dataset_id)
df_test = pd.read_csv(self.dataset_desc.test_file_path(dataset_id))
return df_test
def load_meta(self, dataset_id):
self._download_if_not_cached(dataset_id)
metadata = _get_metadata(self.dataset_desc.meta_file_path(dataset_id))
metadata['data_size'] = self.dataset_desc.data_size(dataset_id)
metadata['shape'] = self.dataset_desc.data_shape(dataset_id)
metadata['series_name'] = metadata['series_name'].split(
",") if 'series_name' in metadata else None
metadata['covariables_name'] = metadata['covariables_name'].split(
",") if 'covariables_name' in metadata else None
columns = list(self.load_test(dataset_id).columns.values)
columns.remove(metadata['date_name'])
if metadata['series_name'] is None and metadata['covariables_name'] is None:
metadata['series_name'] = columns
elif metadata['series_name'] is None:
for col in metadata['covariables_name']:
columns.remove(col)
metadata['series_name'] = columns
elif metadata['covariables_name'] is None:
if len(columns) != len(metadata['series_name']):
for col in metadata['series_name']:
columns.remove(col)
metadata['covariables_name'] = columns
return metadata
def _download_if_not_cached(self, dataset_id):
if not self.exists(dataset_id):
raise ValueError(f"TaskData {dataset_id} does not exists!")
if not self.dataset_desc.cached(dataset_id):
# 1. Get dataset's meta from dataset_desc.
meta = self.dataset_desc.dataset_desc[self.dataset_desc.dataset_desc['id'] == dataset_id]
task_type = meta['type'].values[0]
data_size = meta['data_size'].values[0]
name = meta['name'].values[0]
# 2. Download tmp zip file from cloud.
tmp_path = file_util.get_dir_path(os.path.join(self.data_path, 'tmp'))
url = f"{self.data_source}/{task_type}/{data_size}/{name}.zip"
import uuid
file_name = str(uuid.uuid1()) + '.zip'
file_tmp = os.path.join(tmp_path, file_name)
download_util.download(file_tmp, url)
# 3. Unzip file under data_path
data_path = os.path.join(self.data_path, task_type, data_size)
file_util.unzip(file_tmp, data_path)
# 4. Record to dataset_desc_local.
if self.dataset_desc.dataset_desc_local is not None:
self.dataset_desc.dataset_desc_local = pd.concat([self.dataset_desc.dataset_desc_local, meta], 0)
else:
self.dataset_desc.dataset_desc_local = meta.copy()
self.dataset_desc.dataset_desc_local.to_csv(self.dataset_desc._desc_local_file(), index=False)
# 5. Remove tmp file.
os.remove(file_tmp)
class TSTaskDataLoader():
def __init__(self, data_path, data_source=None):
self.data_path = data_path
self.dataset_loader = TSDataSetLoader(data_path, data_source)
def list(self, type=None, data_size=None):
df = self.dataset_loader.dataset_desc.dataset_desc
df = df_util.filter(df, 'data_size', data_size)
df = df_util.filter(df, 'type', type)
df = df[df['format'] != 'tsf'] # todo support in the future.
taskdata_list = []
for i, row in df.iterrows():
task_count = row['task_count']
if task_count == 1:
taskdata_list.append(str(row['id']))
else:
taskdata_list = taskdata_list + [str("{}_{}".format(row['id'], i)) for i in range(task_count)]
return taskdata_list
def exists(self, task_data_id):
df = self.dataset_loader.dataset_desc.dataset_desc
dataset_id, task_no = _to_dataset(task_data_id)
row = df[df['id'] == dataset_id]
return row.shape[0] == 1 and int(row['task_count'].values[0]) >= task_no
def load_meta(self, task_data_id):
dataset_id, task_no = _to_dataset(task_data_id)
return self.dataset_loader.load_meta(dataset_id)
def load(self, task_data_id):
return self.load_train(task_data_id), self.load_test(task_data_id)
def load_train(self, task_data_id):
dataset_id, task_no = _to_dataset(task_data_id)
if self.dataset_loader.data_format(dataset_id) == 'csv':
return self.dataset_loader.load_train(dataset_id)
else:
logger.info("To be implement.") # todo
raise NotImplemented
def load_test(self, task_data_id):
dataset_id, task_no = _to_dataset(task_data_id)
if self.dataset_loader.data_format(dataset_id) == 'csv':
return self.dataset_loader.load_test(dataset_id)
else:
logger.info("To be implement.") # todo
raise NotImplemented
class TSTaskLoader(TaskLoader):
def __init__(self, data_path, data_source=None):
self.data_path = data_path
self.taskdata_loader = TSTaskDataLoader(data_path, data_source)
def list(self, type=None, data_size=None):
return self.taskdata_loader.list(type, data_size)
def exists(self, taskconfig_id):
return self.taskdata_loader.exists(taskconfig_id)
def load(self, taskconfig_id):
metadata = self.taskdata_loader.load_meta(taskconfig_id)
dataset_id, task_no = _to_dataset(taskconfig_id)
task = TSTaskConfig(taskconfig_id=taskconfig_id,
dataset_id=dataset_id,
taskdata=TSTaskData(id=taskconfig_id, dataset_id=dataset_id, name=metadata['name'],
taskdata_loader=self.taskdata_loader),
date_name=metadata['date_name'],
task=metadata['task'],
horizon=metadata['horizon'],
data_size=metadata['data_size'],
shape=metadata['shape'],
series_name=metadata['series_name'],
covariables_name=metadata['covariables_name'],
dtformat=metadata['dtformat'])
return task
| 40.842912 | 113 | 0.648124 |
ace146271ac63b7d24b88ad8cad2556fdc51e271 | 2,747 | py | Python | tests/test_models/test_install_config.py | p-sherratt/shellfoundry | d1f35a31123b9e701c801345fb633b6fda5420b7 | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_install_config.py | p-sherratt/shellfoundry | d1f35a31123b9e701c801345fb633b6fda5420b7 | [
"Apache-2.0"
] | 1 | 2021-03-25T23:21:02.000Z | 2021-03-25T23:21:02.000Z | tests/test_models/test_install_config.py | p-sherratt/shellfoundry | d1f35a31123b9e701c801345fb633b6fda5420b7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from mock import Mock, patch
from shellfoundry.models.install_config import InstallConfig
class TestInstallConfig(unittest.TestCase):
def test_two_instances_should_be_equal(self):
config1 = InstallConfig("localhost", 9000, "YOUR_USERNAME", "YOUR_PASSWORD", "Global", "author",
"online_mode", "template_location", "github_login", "github_password")
config2 = InstallConfig("localhost", 9000, "YOUR_USERNAME", "YOUR_PASSWORD", "Global", "author",
"online_mode", "template_location", "github_login", "github_password")
self.assertEqual(config1, config2)
def test_two_instances_should_not_be_equal(self):
config1 = InstallConfig("localhost", 9000, "YOUR_USERNAME", "YOUR_PASSWORD", "Global", "author",
"online_mode", "template_location", "github_login", "github_password")
config2 = InstallConfig("remote", 1, "U", "P", "Local", "co-author", "False", "local_templates_location",
"github_login_new", "github_password_new")
self.assertNotEqual(config1, config2)
@patch("platform.node", Mock(return_value="machine-name-here"))
def test_encrypted_password_field_becomes_decrypted(self):
config = InstallConfig("localhost", 9000, "YOUR_USERNAME", "DAUOAQc=", "Global", "author",
"online_mode", "template_location", "github_login", "github_password")
self.assertEqual("admin", config.password)
@patch("platform.node", Mock(return_value="machine-name-here"))
def test_non_encrypted_password_field_stays_regular(self):
config = InstallConfig("localhost", 9000, "YOUR_USERNAME", "admin", "Global", "author",
"online_mode", "template_location", "github_login", "github_password")
self.assertEqual("admin", config.password)
@patch("platform.node", Mock(return_value="machine-name-here"))
def test_encrypted_github_password_field_becomes_decrypted(self):
config = InstallConfig("localhost", 9000, "CS_USERNAME", "CS_PASSWORD", "Global", "author",
"online_mode", "template_location", "github_login", "DAUOAQc=")
self.assertEqual("admin", config.github_password)
@patch("platform.node", Mock(return_value="machine-name-here"))
def test_non_encrypted_github_password_field_stays_regular(self):
config = InstallConfig("localhost", 9000, "YOUR_USERNAME", "admin", "Global", "author",
"online_mode", "template_location", "github_login", "admin")
self.assertEqual("admin", config.github_password)
| 54.94 | 113 | 0.659265 |
ace14770e88cf0d974074d1374693d541ad14257 | 2,812 | py | Python | ansible_collections/arista/cvp/plugins/module_utils/cv_tools.py | jrecchia1029/ansible-cvp | 7cd2eb4b8748fad295f03b9eb5808587965df470 | [
"Apache-2.0"
] | null | null | null | ansible_collections/arista/cvp/plugins/module_utils/cv_tools.py | jrecchia1029/ansible-cvp | 7cd2eb4b8748fad295f03b9eb5808587965df470 | [
"Apache-2.0"
] | null | null | null | ansible_collections/arista/cvp/plugins/module_utils/cv_tools.py | jrecchia1029/ansible-cvp | 7cd2eb4b8748fad295f03b9eb5808587965df470 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8 -*-
#
# FIXME: required to pass ansible-test
# GNU General Public License v3.0+
#
# Copyright 2019 Arista Networks AS-EMEA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import logging
LOGGER = logging.getLogger('arista.cvp.cv_tools')
def isIterable(testing_object=None):
"""
Test if an object is iterable or not.
Test if an object is iterable or not. If yes return True, else return False.
Parameters
----------
testing_object : any, optional
Object to test if it is iterable or not, by default None
"""
try:
some_object_iterator = iter(testing_object) # noqa # pylint: disable=unused-variable
return True
except TypeError as te: # noqa # pylint: disable=unused-variable
return False
def match_filter(input, filter, default_always='all'):
"""
Function to test if an object match userdefined filter.
Function support list of string and string as filter.
A default value is provided when calling function and if this default value for always matching is configured by user, then return True (Always matching)
If filter is a list, then we iterate over the input and check if it matches an entry in the filter.
Parameters
----------
input : string
Input to test of that match filter or not.
filter : list
List of string to compare against input.
default_always : str, optional
Keyword to consider as always matching, by default 'all'
default_none : str, optional
Keyword to consider as never matching, by default 'none'
Returns
-------
bool
True if input matchs filter, False in other situation
"""
# W102 Workaround to avoid list as default value.
if filter is None:
LOGGER.critical('Filter is not set, configure default value to [\'all\']')
filter = ["all"]
LOGGER.debug(" * is_in_filter - filter is %s", str(filter))
LOGGER.debug(" * is_in_filter - input string is %s", str(input))
if "all" in filter:
return True
elif any(element in input for element in filter):
return True
LOGGER.debug(" * is_in_filter - NOT matched")
return False
| 31.954545 | 157 | 0.690256 |
ace147bc09a6cab257389bf805a1ea9af5c31067 | 898 | py | Python | Ex3/02.py | sreyom31/app-srm | 5ca1ac1a3681e160564b05a7d10db2d1b94e5fd1 | [
"MIT"
] | null | null | null | Ex3/02.py | sreyom31/app-srm | 5ca1ac1a3681e160564b05a7d10db2d1b94e5fd1 | [
"MIT"
] | null | null | null | Ex3/02.py | sreyom31/app-srm | 5ca1ac1a3681e160564b05a7d10db2d1b94e5fd1 | [
"MIT"
] | null | null | null | # Write a Python program to crate four empty classes, CTECH, CINTEL, NWC and DSBS. Now create some instances and check whether they are instances of the said classes or not. Also, check whether the said classes are subclasses of the built-in object class or not.
class CTECH:
pass
class CINTEL:
pass
class NWC:
pass
class DSBS:
pass
student1 = CTECH()
marks1 = CINTEL()
stu=NWC()
ma=DSBS()
print(isinstance(student1, CTECH))
print(isinstance(marks1, CTECH))
print(isinstance(stu, CTECH))
print(isinstance(ma, CTECH))
print(isinstance(marks1, CINTEL))
print(isinstance(student1, CINTEL))
print(isinstance(stu, CINTEL))
print(isinstance(ma, CINTEL))
print("\nCheck whether the said classes are subclasses of the built-in object class or not.")
print(issubclass(CTECH, object))
print(issubclass(CINTEL, object))
print(issubclass(NWC, object))
print(issubclass(DSBS, object)) | 28.0625 | 262 | 0.752784 |
ace149bfe40145b754c470810b774b6129eedea1 | 9,952 | py | Python | python3/overlap.py | RonBeavis/IntrinsicDisorder | 28b7e93fc31f291a068720d0197d1c79359aa727 | [
"Apache-2.0"
] | null | null | null | python3/overlap.py | RonBeavis/IntrinsicDisorder | 28b7e93fc31f291a068720d0197d1c79359aa727 | [
"Apache-2.0"
] | null | null | null | python3/overlap.py | RonBeavis/IntrinsicDisorder | 28b7e93fc31f291a068720d0197d1c79359aa727 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
Copyright © 2022 Ron Beavis
import cgi,cgitb
import sys
import requests
import re
import json
import datetime
def trypsin(_seq):
# a list of elegible sites
ps = [-1]
# residues cut by trypsin
ss = set(['K','R'])
# residues that block cleavage when C-terminal to site
cbad = set(['P'])
# length of protein
lseq = len(_seq)
seqs = list(_seq)
peps = []
# iterate through the sequence
for i,res in enumerate(seqs):
if i == lseq - 1:
continue
# if you just passed a cleavage site, act
if i == 0 or (seqs[i-1] in ss and seqs[i] not in cbad):
j = i + 1
# is the next residue a cleavage site too
if j < lseq-1 and seqs[j] in ss and seqs[j+1] not in cbad:
peps.append({'seq':'%s' % (_seq[i:j+1]),'f':i+1,'l':j+1})
if i == 0:
j += 1
else:
if j == lseq - 1:
peps.append({'seq':'%s' % (_seq[i:j+1]),'f':i+1,'l':j+1})
continue
# find the next cleavage site
while j < lseq-1 and not (seqs[j] in ss and seqs[j+1] not in cbad):
j += 1
if j < lseq -2:
peps.append({'seq':'%s' % (_seq[i:j+1]),'f':i+1,'l':j+1})
j += 1
# deal with the last residue cleavage problem
if j < lseq-1 and seqs[j] in ss and seqs[i+1] not in cbad:
peps.append({'seq':'%s' % (_seq[i:j+1]),'f':i+1,'l':j+1})
elif j >= lseq - 1:
peps.append({'seq':'%s' % (_seq[i:j+1]),'f':i+1,'l':lseq})
else:
pass
# make sure everything is in order
peps = [p for p in sorted(peps, key=lambda k: k['f'])]
return peps
def get_peptides(_e):
url = 'http://gpmdb.thegpm.org/protein/model/%s&excel=1' % (_e)
session = requests.session()
try:
r = session.get(url,timeout=20)
except requests.exceptions.RequestException as e:
print(e)
return None
text = re.sub('\r\n','\n',r.text)
return text.splitlines()
def get_protein(_l):
url = 'http://gpmdb.thegpm.org/1/protein/sequence/acc=%s' % (_l)
session = requests.session()
try:
r = session.get(url,timeout=20)
except requests.exceptions.RequestException as e:
print(e)
return None
try:
values = json.loads(r.text)
except:
return None
return values[0]
def get_description(_l):
url = 'http://gpmdb.thegpm.org/1/protein/description/acc=%s' % (_l)
session = requests.session()
try:
r = session.get(url,timeout=20)
except requests.exceptions.RequestException as e:
print(e)
return None
try:
values = json.loads(r.text)
except:
return None
return values[0]
def print_top(_l,_l2):
desc = "Sequence overlap %s ∩ %s" % (_l,_l2)
print('''<!DOCTYPE html>
<html lang="en" class="no-js">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta charset="utf-8">
<title>Sequence overlap/intersection display</title>
<meta name="viewport" content="width=device-width,initial-scale=1" />
<meta name="robots" content="index,nofollow,noarchive">''')
print('''
<meta property="og:locale" content="en_EN" />
<meta property="og:type" content="website" />
<meta property="og:title" content="GPMDB Sequence overlap/intersection display" />
<meta property="og:description" content="%s" />
<meta property="og:url" content="https://intrinsicdisorder.com" />
<meta property="og:image:width" content="800" />
<meta property="og:image:height" content="400" />
<meta property="og:image" content="https://intrinsicdisorder.com/pics/ov.png" />
<meta property="og:image:secure_url" content="https://intrinsicdisorder.com/pics/ov.png" />
''' % (desc))
v = re.sub(r'[\|\:]',r'_',_l)
print('''
<meta name="twitter:url" content="https://intrinsicdisorder.com/a/overlap.py?l=%s&l2=%s">
<meta name="twitter:domain" content="intrinsicdisorder.com">
<meta name="twitter:card" content="summary_large_image" />
<meta name="twitter:site" content="@norsivaeb" />
<meta name="twitter:description" content="%s" />
<meta name="twitter:title" content="Intrinsic Disorder observed residue overlap - %s" />
<meta name="twitter:image" content="https://intrinsicdisorder.com/pics/ov.png" />
''' % (_l,_l2,desc,desc))
print('''
<style media="screen" type="text/css">
@font-face {
font-family: 'Anonymous Pro';
font-style: normal;
font-weight: 400;
src: local('Anonymous Pro'), local('Anonymous Pro-Regular'), url('/fonts/AnonymousPro-Regular.ttf');
format('ttf');
}
@font-face {
font-family: 'Anonymous Pro';
font-style: normal;
font-weight: 700;
src: local('Anonymous Pro-Bold'), url('/fonts/AnonymousPro-Bold.ttf');
format('ttf');
}
@font-face {
font-family: 'Anonymous Pro';
font-style: italic;
font-weight: 400;
src: local('Anonymous Pro-Italic'), url('/fonts/AnonymousPro-Italic.ttf');
format('ttf');
}
body {
color: #000000;
background-color: #FFFFFF;
font-weight: normal;
font-family: "Anonymous Pro",serif;
font-size: 13pt;
margin: auto;
}
.cdiv {
display: table;
margin: 0 auto;
}
.num {
color: grey;
font-size: 11pt;
}
.red {
background-color: #ff6666;
color: white;
border: 1px solid white;
border-radius: 5px;
cursor: pointer;
}
.blue {
background-color: #6666ff;
color: white;
border: 1px solid white;
border-radius: 5px;
cursor: pointer;
}
.accession {
background-color: #996633;
color: white;
border: 1px solid white;
border-radius: 5px;
cursor: pointer;
}
.unmarked {
color: grey;
border: 1px solid white;
border-radius: 5px;
cursor: pointer;
}
.mem {
background-color: #aaaaaa;
color: #FFFFFF;
}
.highlight {
background-color: #00cc99;
color: white;
}
div.ex1 {
margin: 3px 3px 3px 3px;
}
.button {
font-weight: normal;
position: relative;
background-color: #4CAF50;
border: none;
font-size: 16px;
color: #FFFFFF;
padding: 3px;
width: 30px;
text-align: center;
-webkit-transition-duration: 0.4s; /* Safari */
transition-duration: 0.4s;
text-decoration: none;
overflow: hidden;
cursor: pointer;
margin-bottom: 8px;
margin-top: 8px;
}
.button:after {
font-weight: normal;
content: "";
background: #f1f1f1;
display: block;
position: absolute;
padding-top: 300%;
padding-left: 10%;
margin-left: -20px !important;
margin-top: -120%;
opacity: 0;
transition: all 0.8s;
}
.button:active:after {
font-weight: normal;
padding: 0;
margin: 0;
opacity: 1;
transition: 0s
}
</style>
</head>''')
print('''\n<body>
<div id="main"><div id="main_body" class="cdiv">\n''')
return
def print_bottom():
print('<p id="copyright">%s Intrinsic Disorder</p>' % (datetime.datetime.now()))
t = '''</div></div></body></html>\n'''
print(t)
return
def print_form(_l1,_l2):
but = '<input type="submit" class="button" value="↻" title="refresh display" />'
print('<div class="ex1">')
print('<form style="display: inline;" name="seq_form" action="/a/overlap.py/" METHOD="GET">')
print('<input id="red" name="l" size="20" value="%s" placeholder="ENSP0...." /> overlaps on <input name="l2" size="20" value="%s" placeholder="ENSP0...." />' % (_l1,_l2))
print(but)
print('</form>')
cgitb.enable()
form = cgi.FieldStorage()
print('Content-type: text/html\n\n')
try:
label1 = form['l'].value
label2 = form['l2'].value
print_top(label1,label2)
except:
print_top('','')
print_form('','')
print('</body></html>\n')
exit()
ls = get_peptides(label1)
protein_1 = {}
for l in ls:
v = l.strip()
if v.find('Sequence') != -1:
continue
vs = v.split('\t')
if len(vs) < 5:
continue
sj = re.sub(r'[LI]',r'J',vs[5])
if sj not in protein_1:
protein_1[sj] = [(int(vs[0]),int(vs[1]))]
else:
protein_1[sj].append((int(vs[0]),int(vs[1])))
protein_2 = {}
ls = get_peptides(label2)
mset = set()
for l in ls:
v = l.strip()
if v.find('Sequence') != -1:
continue
vs = v.split('\t')
if len(vs) < 5:
continue
sj = re.sub(r'[LI]',r'J',vs[5])
if sj not in protein_1:
continue
if sj not in protein_2:
protein_2[sj] = [(int(vs[0]),int(vs[1]))]
else:
protein_2[sj].append((int(vs[0]),int(vs[1])))
for p in protein_2:
ls = protein_2[p]
for l in ls:
for i in range(l[0],l[1]+1):
mset.add(i)
seq = get_protein(label2)
des = re.sub('\[.+',r'',get_description(label1))
print('<p><a href="/protein/model/%s" target="_blank">%s</a>—%s <br/>' % (label1,label1,des))
print('overlap with observed peptides shown on <br/>')
des = re.sub('\[.+',r'',get_description(label2))
print('<a href="/protein/model/%s" target="_blank">%s</a>—%s</p>' % (label2,label2,des))
print('<p><span class="red">overlapping residues</span>: %i<br/>total residues: %i<br/><span class="red">overlap</span>: %.1f%%</p>' % (len(mset),len(seq),100*(len(mset)/len(seq))))
#print('<pre>')
display = '<div class="ex1">\n'
for i,s in enumerate(seq):
if i != 0 and i % 50 == 0:
display += ' <span class="num">%i</span></div>\n<div class="ex1">' % (i)
if i+1 in mset:
display += '<span class="red" title="%s %i">%s</span>' % (s,i+1,s)
else:
display += '<span class="unmarked" title="%s %i">%s</span>' % (s,i+1,s)
display += '</div>\n'
print(display)
print_form(label1,label2)
peps = trypsin(seq)
if len(peps) == 0:
exit()
print('<hr width="650" style="margin-left: -20px;"/><div id="tryptic_peptides">')
print('<table cellspacing="1" cellpadding="2">')
print('<tr><td width="50">start</td><td width="50">end</td><td>1° tryptic peptide</td></tr>')
full = 0
full_aa = 0
for p in peps:
line = '<tr><td>%i</td><td>%i</td><td>' % (p['f'],p['l'])
cf = 0
for i,a in enumerate(p['seq']):
c = p['f']+i
if c in mset:
line += '<span class="red">%s</span>' % (a)
cf += 1
else:
line += '%s' % (a)
if cf == len(p['seq']):
full += 1
full_aa += len(p['seq'])
line += '</td></tr>\n'
print(line)
print('<tr><td></td><td></td><td>peptides: %i/%i (%.1f%%), residues: %i/%i (%.1f%%)</td>' % (full,len(peps),100.0*full/len(peps),full_aa,len(seq),100*full_aa/len(seq)))
print('</table></div>')
print_bottom()
| 26.897297 | 181 | 0.618569 |
ace149e843d3cda9a7f2da90c705e418d4d86164 | 27,768 | py | Python | papelex_magento/catalog.py | pauloacmelo/papelex_winthor | 4993aba5c24c4ea7f203058f164c14bc19d64980 | [
"MIT"
] | 1 | 2020-06-14T04:59:42.000Z | 2020-06-14T04:59:42.000Z | papelex_magento/catalog.py | pauloacmelo/papelex_winthor | 4993aba5c24c4ea7f203058f164c14bc19d64980 | [
"MIT"
] | null | null | null | papelex_magento/catalog.py | pauloacmelo/papelex_winthor | 4993aba5c24c4ea7f203058f164c14bc19d64980 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
'''
magento.catalog
Product Catalog API for magento
:license: BSD, see LICENSE for more details
'''
import warnings
from .api import API
class Category(API):
"""
Product Category API
"""
__slots__ = ()
def currentStore(self, store_view=None):
"""
Set/Get current store view
:param store_view: Store view ID or Code
:return: int
"""
args = [store_view] if store_view else []
return int(self.call('catalog_category.currentStore', args))
def tree(self, parent_id=None, store_view=None):
"""
Retrieve hierarchical tree of categories.
:param parent_id: Integer ID of parent category (optional)
:param store_view: Store View (optional)
:return: dictionary of values
"""
return self.call('catalog_category.tree', [parent_id, store_view])
def level(self, website=None, store_view=None, parent_category=None):
"""
Retrieve one level of categories by website/store view/parent category
:param website: Website code or ID
:param store_view: storeview code or ID
:param parent_category: Parent Category ID
:return: Dictionary
"""
return self.call(
'catalog_category.level', [website, store_view, parent_category]
)
def info(self, category_id, store_view=None, attributes=None):
"""
Retrieve Category details
:param category_id: ID of category to retrieve
:param store_view: Store view ID or code
:param attributes: Return the fields specified
:return: Dictionary of data
"""
return self.call(
'catalog_category.info', [category_id, store_view, attributes]
)
def create(self, parent_id, data, store_view=None):
"""
Create new category and return its ID
:param parent_id: ID of parent
:param data: Data for category
:param store_view: Store view ID or Code
:return: Integer ID
"""
return int(self.call(
'catalog_category.create', [parent_id, data, store_view])
)
def update(self, category_id, data, store_view=None):
"""
Update Category
:param category_id: ID of category
:param data: Category Data
:param store_view: Store view ID or code
:return: Boolean
"""
return bool(
self.call(
'catalog_category.update', [category_id, data, store_view]
)
)
def move(self, category_id, parent_id, after_id=None):
"""
Move category in tree
:param category_id: ID of category to move
:param parent_id: New parent of the category
:param after_id: Category ID after what position it will be moved
:return: Boolean
"""
return bool(self.call(
'catalog_category.move', [category_id, parent_id, after_id])
)
def delete(self, category_id):
"""
Delete category
:param category_id: ID of category
:return: Boolean
"""
return bool(self.call('catalog_category.delete', [category_id]))
def assignedproducts(self, category_id, store):
"""
Retrieve list of assigned products
:param category_id: Category ID
:param store: Store ID or Code
:return: Dictionary
"""
return self.call(
'catalog_category.assignedProducts', [category_id, store]
)
#: A proxy for :meth:`assignedproducts`
assigned_products = assignedproducts
def assignproduct(self, category_id, product, position=None):
"""
Assign product to a category
:param category_id: ID of a category
:param product: ID or Code of the product
:param position: Position of product in category
:return: boolean
"""
return bool(self.call(
'catalog_category.assignProduct', [category_id, product, position])
)
#: A proxy for :meth:`assignproduct`
assign_product = assignproduct
def updateproduct(self, category_id, product, position=None):
"""
Update assigned product
:param category_id: ID of a category
:param product: ID or Code of the product
:param position: Position of product in category
:return: boolean
"""
return bool(self.call(
'catalog_category.updateProduct', [category_id, product, position])
)
#: A proxy for :meth:`updateproduct`
update_product = updateproduct
def removeproduct(self, category_id, product):
"""
Remove product from category
:param category_id: ID of a category
:param product: ID or Code of the product
:return: boolean
"""
return bool(self.call(
'catalog_category.removeProduct', [category_id, product])
)
#: A proxy for :meth:`removeproduct`
remove_product = removeproduct
class CategoryAttribute(API):
"""
Product Category Attribute API to connect to magento
Allows to get attributes and options for category.
"""
__slots__ = ()
def currentStore(self, store_view=None):
"""
Set/Get current store view
:param store_view: Store view ID or Code
:return: int
"""
args = [store_view] if store_view else []
return int(self.call('catalog_category_attribute.currentStore', args))
def list(self):
"""
Retrieve Category attrbutes
"""
return self.call('category_attribute.list', [])
def options(self, attribute_id, store_view=None):
"""
Retrieve attribute options
:param attribute_id: ID of the attribute whose options are reqd
:param store_view: ID or Code of the store view
:return: list of dictionary
"""
return self.call(
'category_attribute.options', [attribute_id, store_view]
)
class Product(API):
"""
Product API for magento
"""
__slots__ = ()
def currentStore(self, store_view=None):
"""
Set/Get current store view
:param store_view: Store view ID or Code
:return: int
"""
args = [store_view] if store_view else []
return int(self.call('catalog_product.currentStore', args))
def list(self, filters=None, store_view=None):
"""
Retrieve product list by filters
:param filters: Dictionary of filters.
Format :
`{<attribute>:{<operator>:<value>}}`
Example :
`{'firstname':{'ilike':'sharoon'}}`
:param store_view: Code or ID of store view
:return: `list` of `dict`
"""
return self.call('catalog_product.list', [filters, store_view])
def info(self, product, store_view=None, attributes=None,
identifierType=None):
"""
Retrieve product data
:param product: ID or SKU of product
:param store_view: ID or Code of store view
:param attributes: List of fields required
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `dict` of values
"""
return self.call(
'catalog_product.info', [
product, store_view, attributes, identifierType
]
)
def create(self, product_type, attribute_set_id, sku, data):
"""
Create Product and return ID
:param product_type: String type of product
:param attribute_set_id: ID of attribute set
:param sku: SKU of the product
:param data: Dictionary of data
:return: INT id of product created
"""
return int(self.call(
'catalog_product.create',
[product_type, attribute_set_id, sku, data]
)
)
def update(self, product, data, store_view=None, identifierType=None):
"""
Update product Information
:param product: ID or SKU of product
:param data: Dictionary of attributes to update
:param store_view: ID or Code of store view
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Boolean
"""
return bool(self.call(
'catalog_product.update',
[product, data, store_view, identifierType]
))
def setSpecialPrice(self, product, special_price=None,
from_date=None, to_date=None, store_view=None,
identifierType=None):
"""
Update product's special price
:param product: ID or SKU of product
:param special_price: Special Price
:param from_date: From date
:param to_date: To Date
:param store_view: ID or Code of Store View
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Boolean
"""
return bool(self.call(
'catalog_product.setSpecialPrice', [
product, special_price, from_date, to_date, store_view,
identifierType
]
))
def getSpecialPrice(self, product, store_view=None, identifierType=None):
"""
Get product special price data
:param product: ID or SKU of product
:param store_view: ID or Code of Store view
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Dictionary
"""
return self.call(
'catalog_product.getSpecialPrice', [
product, store_view, identifierType
]
)
def delete(self, product, identifierType=None):
"""
Delete a product
:param product: ID or SKU of product
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Boolean
"""
return bool(self.call('catalog_product.delete', [
product, identifierType
]))
class ProductAttribute(API):
"""
Product Attribute API
"""
__slots__ = ()
def currentStore(self, store_view=None):
"""
Set/Get current store view
:param store_view: Store view ID or Code
:return: int
"""
args = [store_view] if store_view else []
return int(self.call('catalog_product_attribute.currentStore', args))
def list(self, attribute_set_id):
"""
Retrieve product attribute list
:param attribute_set_id: ID of attribute set
:return: `list` of `dict`
"""
return self.call('catalog_product_attribute.list', [attribute_set_id])
def info(self, attribute):
"""
Retrieve product attribute info
:param attribute: ID or Code of the attribute
:return: `list` of `dict`
"""
return self.call('catalog_product_attribute.info', [attribute])
def options(self, attribute, store_view=None):
"""
Retrieve product attribute options
:param attribute: ID or Code of the attribute
:return: `list` of `dict`
"""
return self.call('catalog_product_attribute.options',
[attribute, store_view])
def addOption(self, attribute, data):
"""
Create new options to attribute (Magento > 1.7.0)
:param attribute: ID or Code of the attribute.
:param data: Dictionary of Data.
{'label':[{'store_id':[0,1], 'value':'Value'},], 'order':1, 'is_default':1}
:return: True if created.
"""
return bool(self.call('product_attribute.addOption',
[attribute, data]))
def createOption(self, *a, **kw):
warnings.warn(
"ProductAttribute: createOption is deprecated, use addOption instead."
)
return self.addOption(*a, **kw)
def removeOption(self, attribute, option):
"""
Remove option to attribute (Magento > 1.7.0)
:param attribute: ID or Code of the attribute.
:param option: Option ID.
:return: True if the option is removed.
"""
return bool(self.call('product_attribute.removeOption',
[attribute, option]))
def create(self, data):
"""
Create attribute entity.
:param data: Dictionary of entity data to create attribute with.
:return: Integer ID of attribute created
"""
return self.call('catalog_product_attribute.create', [data])
def update(self, attribute, data):
"""
Update attribute entity data.
:param attribute: ID or Code of the attribute.
:param data: Dictionary of entity data to update on attribute.
:return: Boolean
"""
return self.call('catalog_product_attribute.update', [attribute, data])
class ProductAttributeSet(API):
"""
Product Attribute Set API
"""
__slots__ = ()
def list(self):
"""
Retrieve list of product attribute sets
:return: `list` of `dict`
"""
return self.call('catalog_product_attribute_set.list', [])
def create(self, attribute_set_name, skeleton_set_id):
"""
Create a new attribute set based on a "skeleton" attribute set.
If unsure, use the "Default" attribute set as a skeleton.
:param attribute_set_name: name of the new attribute set
:param skeleton_set_id: id of the skeleton attribute set to base this set on.
:return: Integer ID of new attribute set
"""
return self.call('catalog_product_attribute_set.create', [attribute_set_name, skeleton_set_id])
def attributeAdd(self, attribute_id, attribute_set_id):
"""
Add an existing attribute to an attribute set.
:param attribute_id: ID of the attribute to add
:param attribute_set_id: ID of the attribute set to add to
:return: Boolean
"""
return self.call('catalog_product_attribute_set.attributeAdd', [attribute_id, attribute_set_id])
def attributeRemove(self, attribute_id, attribute_set_id):
"""
Remove an existing attribute to an attribute set.
:param attribute_id: ID of the attribute to remove
:param attribute_set_id: ID of the attribute set to remove from
:return: Boolean
"""
return self.call('catalog_product_attribute_set.attributeRemove', [attribute_id, attribute_set_id])
class ProductTypes(API):
"""
Product Types API
"""
__slots__ = ()
def list(self):
"""
Retrieve list of product types
:return: `list` of `dict`
"""
return self.call('catalog_product_type.list', [])
class ProductImages(API):
"""
Product Images API
"""
__slots__ = ()
def currentStore(self, store_view=None):
"""
Set/Get current store view
:param store_view: Store view ID or Code
:return: int
"""
args = []
if store_view:
args = [store_view]
return int(self.call('catalog_product_attribute_media.currentStore',
args))
def list(self, product, store_view=None, identifierType=None):
"""
Retrieve product image list
:param product: ID or SKU of product
:param store_view: Code or ID of store view
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `list` of `dict`
"""
return self.call('catalog_product_attribute_media.list',
[product, store_view, identifierType])
def info(self, product, image_file, store_view=None, identifierType=None):
"""
Retrieve product image data
:param product: ID or SKU of product
:param store_view: ID or Code of store view
:param attributes: List of fields required
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `list` of `dict`
"""
return self.call('catalog_product_attribute_media.info',
[product, image_file, store_view, identifierType])
def types(self, attribute_set_id):
"""
Retrieve product image types (image, small_image, thumbnail, etc.)
:param attribute_set_id: ID of attribute set
:return: `list` of `dict`
"""
return self.call('catalog_product_attribute_media.types',
[attribute_set_id])
def create(self, product, data, store_view=None, identifierType=None):
"""
Upload a new product image.
:param product: ID or SKU of product
:param data: `dict` of image data (label, position, exclude, types)
Example: { 'label': 'description of photo',
'position': '1', 'exclude': '0',
'types': ['image', 'small_image', 'thumbnail']}
:param store_view: Store view ID or Code
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: string - image file name
"""
return self.call('catalog_product_attribute_media.create',
[product, data, store_view, identifierType])
def update(self, product, img_file_name, data, store_view=None,
identifierType=None):
"""
Update a product image.
:param product: ID or SKU of product
:param img_file_name: The image file name
Example: '/m/y/my_image_thumb.jpg'
:param data: `dict` of image data (label, position, exclude, types)
Example: { 'label': 'description of photo',
'position': '1', 'exclude': '0',
'types': ['image', 'small_image', 'thumbnail']}
:param store_view: Store view ID or Code
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: string - image file name
"""
return self.call('catalog_product_attribute_media.update',
[product, img_file_name, data, store_view, identifierType])
def remove(self, product, img_file_name, identifierType=None):
"""
Remove a product image.
:param product: ID or SKU of product
:param img_file_name: The image file name
Example: '/m/y/my_image_thumb.jpg'
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: boolean
"""
return self.call('catalog_product_attribute_media.remove',
[product, img_file_name, identifierType])
class ProductTierPrice(API):
"""
Product Tier Price API
"""
__slots__ = ()
def info(self, product, identifierType=None):
"""
Retrieve product data
:param product: ID or SKU of product
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `list` of `dict`
"""
return self.call('catalog_product_attribute_tier_price.info',
[product, identifierType])
def update(self, product, data, identifierType=None):
"""
Update product tier prices.
Note: All existing tier prices for the product are replaced by the tier
prices provided in data.
:param product: ID or SKU of product
:param data: List of dictionaries of tier price information
Example:
[{
'website': 'all',
'customer_group_id': '1',
'qty': '99.0000',
'price': '123.9900'
},
{
'website': 'all',
...
},...]
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Boolean
"""
return bool(self.call('catalog_product_attribute_tier_price.update',
[product, data, identifierType]))
class ProductLinks(API):
"""
Product links API (related, cross sells, up sells, grouped)
"""
__slots__ = ()
def list(self, link_type, product, identifierType=None):
"""
Retrieve list of linked products
:param link_type: type of link, one of 'cross_sell', 'up_sell',
'related' or 'grouped'
:param product: ID or SKU of product
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: `list` of `dict`
"""
return self.call('catalog_product_link.list',
[link_type, product, identifierType])
def assign(self, link_type, product, linked_product, data=None,
identifierType=None):
"""
Assign a product link
:param link_type: type of link, one of 'cross_sell', 'up_sell',
'related' or 'grouped'
:param product: ID or SKU of product
:param linked_product: ID or SKU of linked product
:param data: dictionary of link data, (position, qty, etc.)
Example: { 'position': '0', 'qty': 1}
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: boolean
"""
return bool(self.call('catalog_product_link.assign',
[link_type, product, linked_product, data, identifierType]))
def update(self, link_type, product, linked_product, data=None,
identifierType=None):
"""
Update a product link
:param link_type: type of link, one of 'cross_sell', 'up_sell',
'related' or 'grouped'
:param product: ID or SKU of product
:param linked_product: ID or SKU of linked product
:param data: dictionary of link data, (position, qty, etc.)
Example: { 'position': '0', 'qty': 1}
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: boolean
"""
return bool(self.call('catalog_product_link.update',
[link_type, product, linked_product, data, identifierType]))
def remove(self, link_type, product, linked_product, identifierType=None):
"""
Remove a product link
:param link_type: type of link, one of 'cross_sell', 'up_sell',
'related' or 'grouped'
:param product: ID or SKU of product
:param linked_product: ID or SKU of linked product to unlink
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: boolean
"""
return bool(self.call('catalog_product_link.remove',
[link_type, product, linked_product, identifierType]))
def types(self):
"""
Retrieve a list of product link types
:return: `list` of types
"""
return self.call('catalog_product_link.types', [])
def attributes(self, link_type):
"""
Retrieve a list of attributes of a product link type
:param link_type: type of link, one of 'cross_sell', 'up_sell',
'related' or 'grouped'
:return: `list` of `dict`
Format :
`[{'code': <attribute code>, 'type': <attribute type>}, ...]`
Example :
`[{'code': 'position', 'type': 'int'},
{'code': 'qty', 'type': 'decimal'}]`
"""
return self.call('catalog_product_link.attributes', [link_type])
class ProductConfigurable(API):
"""
Product Configurable API for magento.
These API endpoints only work if you have zikzakmedia's
magento_webservices Magento plugin installed.
"""
__slots__ = ()
def info(self, product):
"""
Configurable product Info
:param product: ID or SKU of product
:return: List
"""
return self.call('ol_catalog_product_link.list', [product])
def getSuperAttributes(self, product):
"""
Configurable Attributes product
:param product: ID or SKU of product
:return: List
"""
return self.call('ol_catalog_product_link.listSuperAttributes',
[product])
def setSuperAttributeValues(self, product, attribute):
"""
Configurable Attributes product
:param product: ID or SKU of product
:param attribute: ID attribute
:return: List
"""
return self.call('ol_catalog_product_link.setSuperAttributeValues',
[product, attribute])
def update(self, product, linked_products, attributes):
"""
Configurable Update product
:param product: ID or SKU of product
:param linked_products: List ID or SKU of linked product to link
:param attributes: dicc
:return: True/False
"""
return bool(self.call('ol_catalog_product_link.assign',
[product, linked_products, attributes]))
def remove(self, product, linked_products):
"""
Remove a product link configurable
:param product: ID or SKU of product
:param linked_products: List ID or SKU of linked product to unlink
"""
return bool(self.call('ol_catalog_product_link.remove',
[product, linked_products]))
class Inventory(API):
"""
Allows to update stock attributes (status, quantity)
"""
__slots__ = ()
def list(self, products):
"""
Retrieve inventory stock data by product ids
:param products: list of IDs or SKUs of products
:return: `list` of `dict`
"""
return self.call('cataloginventory_stock_item.list', [products])
def update(self, product, data):
"""
Update inventory stock data
:param product: ID or SKU of product
:param data: Dictionary of data to change,
eg dict(qty=99, is_in_stock='1')
:return: boolean
"""
return bool(
self.call(
'cataloginventory_stock_item.update',
[product, data]
)
)
def update_multi(self, product_data_pairs):
"""
It is usually expensive to update inventory on magento and this
uses the multi call api to make it faster. The expected argument is
a list of pairs of product and data dictionaries.
"""
return self.multiCall([
[
'cataloginventory_stock_item.update',
product_data_pair
]
for product_data_pair in product_data_pairs
])
| 31.305524 | 107 | 0.588051 |
ace14aefd9db9e42a9cba7cbe3c15eb55f66bc4b | 7,291 | py | Python | salt/runner.py | pille/salt | 47322575309faac8c4755287d930469caffc1c65 | [
"Apache-2.0"
] | 1 | 2019-06-27T13:03:07.000Z | 2019-06-27T13:03:07.000Z | salt/runner.py | pille/salt | 47322575309faac8c4755287d930469caffc1c65 | [
"Apache-2.0"
] | null | null | null | salt/runner.py | pille/salt | 47322575309faac8c4755287d930469caffc1c65 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Execute salt convenience routines
'''
# Import python libs
from __future__ import print_function
import multiprocessing
import datetime
import time
import logging
import collections
# Import salt libs
import salt.loader
import salt.exceptions
import salt.utils
import salt.minion
import salt.utils.event
from salt.utils.doc import strip_rst as _strip_rst
from salt.utils.event import tagify
from salt.utils.error import raise_error
from salt.output import display_output
logger = logging.getLogger(__name__)
class RunnerClient(object):
'''
The interface used by the :command:`salt-run` CLI tool on the Salt Master
It executes :ref:`runner modules <all-salt.runners>` which run on the Salt
Master.
Importing and using ``RunnerClient`` must be done on the same machine as
the Salt Master and it must be done using the same user that the Salt
Master is running as.
Salt's :conf_master:`external_auth` can be used to authenticate calls. The
eauth user must be authorized to execute runner modules: (``@runner``).
Only the :py:meth:`master_call` below supports eauth.
'''
def __init__(self, opts):
self.opts = opts
self.functions = salt.loader.runner(opts)
def _proc_runner(self, fun, low, user, tag, jid):
'''
Run this method in a multiprocess target to execute the runner in a
multiprocess and fire the return data on the event bus
'''
salt.utils.daemonize()
event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
data = {'fun': 'runner.{0}'.format(fun),
'jid': jid,
'user': user,
}
event.fire_event(data, tagify('new', base=tag))
try:
data['return'] = self.low(fun, low)
data['success'] = True
except Exception as exc:
data['return'] = 'Exception occurred in runner {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
data['user'] = user
event.fire_event(data, tagify('ret', base=tag))
# this is a workaround because process reaping is defeating 0MQ linger
time.sleep(2.0) # delay so 0MQ event gets out before runner process
# reaped
def _verify_fun(self, fun):
'''
Check that the function passed really exists
'''
if fun not in self.functions:
err = 'Function {0!r} is unavailable'.format(fun)
raise salt.exceptions.CommandExecutionError(err)
def get_docs(self, arg=None):
'''
Return a dictionary of functions and the inline documentation for each
'''
if arg:
target_mod = arg + '.' if not arg.endswith('.') else arg
docs = [(fun, self.functions[fun].__doc__)
for fun in sorted(self.functions)
if fun == arg or fun.startswith(target_mod)]
else:
docs = [(fun, self.functions[fun].__doc__)
for fun in sorted(self.functions)]
docs = dict(docs)
return _strip_rst(docs)
def cmd(self, fun, arg, kwarg=None):
'''
Execute a runner function
.. code-block:: python
>>> opts = salt.config.master_config('/etc/salt/master')
>>> runner = salt.runner.RunnerClient(opts)
>>> runner.cmd('jobs.list_jobs', [])
{
'20131219215650131543': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:56:50.131543',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
'20131219215921857715': {
'Arguments': [300],
'Function': 'test.sleep',
'StartTime': '2013, Dec 19 21:59:21.857715',
'Target': '*',
'Target-type': 'glob',
'User': 'saltdev'
},
}
'''
if not isinstance(kwarg, dict):
kwarg = {}
self._verify_fun(fun)
args, kwargs = salt.minion.parse_args_and_kwargs(
self.functions[fun],
arg,
kwarg)
return self.functions[fun](*args, **kwargs)
def low(self, fun, low):
'''
Pass in the runner function name and the low data structure
.. code-block:: python
runner.low({'fun': 'jobs.lookup_jid', 'jid': '20131219215921857715'})
'''
self._verify_fun(fun)
l_fun = self.functions[fun]
f_call = salt.utils.format_call(l_fun, low)
ret = l_fun(*f_call.get('args', ()), **f_call.get('kwargs', {}))
return ret
def async(self, fun, low, user='UNKNOWN'):
'''
Execute the runner in a multiprocess and return the event tag to use
to watch for the return
'''
jid = '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())
tag = tagify(jid, prefix='run')
#low['tag'] = tag
#low['jid'] = jid
proc = multiprocessing.Process(
target=self._proc_runner,
args=(fun, low, user, tag, jid))
proc.start()
return {'tag': tag}
def master_call(self, **kwargs):
'''
Execute a runner function through the master network interface (eauth).
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.master_call(
fun='jobs.list_jobs',
username='saltdev',
password='saltdev',
eauth='pam'
)
'''
load = kwargs
load['cmd'] = 'runner'
# sreq = salt.payload.SREQ(
# 'tcp://{0[interface]}:{0[ret_port]}'.format(self.opts),
# )
sreq = salt.transport.Channel.factory(self.opts, crypt='clear')
ret = sreq.send(load)
if isinstance(ret, collections.Mapping):
if 'error' in ret:
raise_error(**ret['error'])
return ret
class Runner(RunnerClient):
'''
Execute the salt runner interface
'''
def _print_docs(self):
'''
Print out the documentation!
'''
arg = self.opts.get('fun', None)
docs = super(Runner, self).get_docs(arg)
for fun in sorted(docs):
display_output('{0}:'.format(fun), 'text', self.opts)
print(docs[fun])
def run(self):
'''
Execute the runner sequence
'''
if self.opts.get('doc', False):
self._print_docs()
else:
try:
return super(Runner, self).cmd(
self.opts['fun'], self.opts['arg'], self.opts)
except salt.exceptions.SaltException as exc:
ret = str(exc)
print(ret)
return ret
| 32.695067 | 81 | 0.536689 |
ace14bbabb5ca1f1f1d2741dd421c4a0d96ce3f4 | 6,676 | py | Python | _dev/stats/predictive.py | nicolossus/pylfi | 7950aff5c36e7368cbe77b32ef348966b905f5cf | [
"MIT"
] | null | null | null | _dev/stats/predictive.py | nicolossus/pylfi | 7950aff5c36e7368cbe77b32ef348966b905f5cf | [
"MIT"
] | null | null | null | _dev/stats/predictive.py | nicolossus/pylfi | 7950aff5c36e7368cbe77b32ef348966b905f5cf | [
"MIT"
] | null | null | null | def _predictive(
rng_key,
model,
posterior_samples,
batch_shape,
return_sites=None,
parallel=True,
model_args=(),
model_kwargs={},
):
model = numpyro.handlers.mask(model, mask=False)
def single_prediction(val):
rng_key, samples = val
model_trace = trace(seed(substitute(model, samples), rng_key)).get_trace(
*model_args, **model_kwargs
)
if return_sites is not None:
if return_sites == "":
sites = {
k for k, site in model_trace.items() if site["type"] != "plate"
}
else:
sites = return_sites
else:
sites = {
k
for k, site in model_trace.items()
if (site["type"] == "sample" and k not in samples)
or (site["type"] == "deterministic")
}
return {
name: site["value"] for name, site in model_trace.items() if name in sites
}
num_samples = int(np.prod(batch_shape))
if num_samples > 1:
rng_key = random.split(rng_key, num_samples)
rng_key = rng_key.reshape(batch_shape + (2,))
chunk_size = num_samples if parallel else 1
return soft_vmap(
single_prediction, (rng_key, posterior_samples), len(
batch_shape), chunk_size
)
class Predictive(object):
"""
This class is used to construct predictive distribution. The predictive distribution is obtained
by running model conditioned on latent samples from `posterior_samples`.
.. warning::
The interface for the `Predictive` class is experimental, and
might change in the future.
:param model: Python callable containing Pyro primitives.
:param dict posterior_samples: dictionary of samples from the posterior.
:param callable guide: optional guide to get posterior samples of sites not present
in `posterior_samples`.
:param dict params: dictionary of values for param sites of model/guide.
:param int num_samples: number of samples
:param list return_sites: sites to return; by default only sample sites not present
in `posterior_samples` are returned.
:param bool parallel: whether to predict in parallel using JAX vectorized map :func:`jax.vmap`.
Defaults to False.
:param batch_ndims: the number of batch dimensions in posterior samples. Some usages:
+ set `batch_ndims=0` to get prediction for 1 single sample
+ set `batch_ndims=1` to get prediction for `posterior_samples`
with shapes `(num_samples x ...)`
+ set `batch_ndims=2` to get prediction for `posterior_samples`
with shapes `(num_chains x N x ...)`. Note that if `num_samples`
argument is not None, its value should be equal to `num_chains x N`.
:return: dict of samples from the predictive distribution.
"""
def __init__(
self,
model,
posterior_samples=None,
guide=None,
params=None,
num_samples=None,
return_sites=None,
parallel=False,
batch_ndims=1,
):
if posterior_samples is None and num_samples is None:
raise ValueError(
"Either posterior_samples or num_samples must be specified."
)
posterior_samples = {} if posterior_samples is None else posterior_samples
prototype_site = batch_shape = batch_size = None
for name, sample in posterior_samples.items():
if batch_shape is not None and sample.shape[:batch_ndims] != batch_shape:
raise ValueError(
f"Batch shapes at site {name} and {prototype_site} "
f"should be the same, but got "
f"{sample.shape[:batch_ndims]} and {batch_shape}"
)
else:
prototype_site = name
batch_shape = sample.shape[:batch_ndims]
batch_size = int(np.prod(batch_shape))
if (num_samples is not None) and (num_samples != batch_size):
warnings.warn(
"Sample's batch dimension size {} is different from the "
"provided {} num_samples argument. Defaulting to {}.".format(
batch_size, num_samples, batch_size
),
UserWarning,
)
num_samples = batch_size
if num_samples is None:
raise ValueError(
"No sample sites in posterior samples to infer `num_samples`."
)
if batch_shape is None:
batch_shape = (1,) * (batch_ndims - 1) + (num_samples,)
if return_sites is not None:
assert isinstance(return_sites, (list, tuple, set))
self.model = model
self.posterior_samples = {} if posterior_samples is None else posterior_samples
self.num_samples = num_samples
self.guide = guide
self.params = {} if params is None else params
self.return_sites = return_sites
self.parallel = parallel
self.batch_ndims = batch_ndims
self._batch_shape = batch_shape
def __call__(self, rng_key, *args, **kwargs):
"""
Returns dict of samples from the predictive distribution. By default, only sample sites not
contained in `posterior_samples` are returned. This can be modified by changing the
`return_sites` keyword argument of this :class:`Predictive` instance.
:param jax.random.PRNGKey rng_key: random key to draw samples.
:param args: model arguments.
:param kwargs: model kwargs.
"""
posterior_samples = self.posterior_samples
if self.guide is not None:
rng_key, guide_rng_key = random.split(rng_key)
# use return_sites='' as a special signal to return all sites
guide = substitute(self.guide, self.params)
posterior_samples = _predictive(
guide_rng_key,
guide,
posterior_samples,
self._batch_shape,
return_sites="",
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
)
model = substitute(self.model, self.params)
return _predictive(
rng_key,
model,
posterior_samples,
self._batch_shape,
return_sites=self.return_sites,
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
)
| 39.270588 | 100 | 0.593619 |
ace14bdb44be4c92b9d80bd82860ce41f8bc7ecb | 8,155 | py | Python | dataset/run_law_squad.py | saltfun/DuReader | cdcd232cea55cb85e78321a78e57a0ba229f550c | [
"Apache-2.0"
] | null | null | null | dataset/run_law_squad.py | saltfun/DuReader | cdcd232cea55cb85e78321a78e57a0ba229f550c | [
"Apache-2.0"
] | null | null | null | dataset/run_law_squad.py | saltfun/DuReader | cdcd232cea55cb85e78321a78e57a0ba229f550c | [
"Apache-2.0"
] | null | null | null | import json
import args
import torch
import pickle
import random
import numpy as np
from tqdm import tqdm
from tokenization import BertTokenizer
random.seed(args.seed)
def read_squad_examples(law_input_file, search_input_file, is_training=True):
total, error = 0, 0
examples = []
with open(search_input_file, 'r', encoding='utf-8') as f:
for line in tqdm(f.readlines()):
source = json.loads(line.strip())
if (len(source['answer_spans']) == 0):
continue
if source['answers'] == []:
continue
if (source['match_scores'][0] < 0.8):
continue
if (source['answer_spans'][0][1] > args.max_seq_length):
continue
docs_index = source['answer_docs'][0]
start_id = source['answer_spans'][0][0]
end_id = source['answer_spans'][0][1] + 1 ## !!!!!
question_type = source['question_type']
passages = []
try:
answer_passage_idx = source['documents'][docs_index]['most_related_para']
except:
continue
doc_tokens = source['documents'][docs_index]['segmented_paragraphs'][answer_passage_idx]
ques_len = len(source['documents'][docs_index]['segmented_title']) + 1
doc_tokens = doc_tokens[ques_len:]
start_id , end_id = start_id - ques_len, end_id - ques_len
if start_id >= end_id or end_id > len(doc_tokens) or start_id >= len(doc_tokens):
continue
new_doc_tokens = ""
for idx, token in enumerate(doc_tokens):
if idx == start_id:
new_start_id = len(new_doc_tokens)
break
new_doc_tokens = new_doc_tokens + token
new_doc_tokens = "".join(doc_tokens)
new_end_id = new_start_id + len(source['fake_answers'][0])
if source['fake_answers'][0] != "".join(new_doc_tokens[new_start_id:new_end_id]):
continue
if is_training:
new_end_id = new_end_id - 1
example = {
"qas_id":source['question_id'],
"question_text":source['question'].strip(),
"question_type":question_type,
"doc_tokens":new_doc_tokens.strip(),
"start_position":new_start_id,
"end_position":new_end_id }
examples.append(example)
with open(law_input_file, 'r', encoding='utf-8') as f:
for line in tqdm(f.readlines()):
source = json.loads(line.strip())
if (len(source['answer_spans']) == 0):
continue
if source['answers'] == []:
continue
if (source['match_scores'][0] < 0.8):
continue
if (source['answer_spans'][0][1] > args.max_seq_length):
continue
docs_index = source['answer_docs'][0]
start_id = source['answer_spans'][0][0]
end_id = source['answer_spans'][0][1] + 1 ## !!!!!
question_type = source['question_type']
passages = []
try:
answer_passage_idx = source['documents'][docs_index]['most_related_para']
except:
continue
doc_tokens = source['documents'][docs_index]['segmented_paragraphs'][answer_passage_idx]
ques_len = len(source['documents'][docs_index]['segmented_title']) + 1
doc_tokens = doc_tokens[ques_len:]
start_id , end_id = start_id - ques_len, end_id - ques_len
if start_id >= end_id or end_id > len(doc_tokens) or start_id >= len(doc_tokens):
continue
new_doc_tokens = ""
for idx, token in enumerate(doc_tokens):
if idx == start_id:
new_start_id = len(new_doc_tokens)
break
new_doc_tokens = new_doc_tokens + token
new_doc_tokens = "".join(doc_tokens)
new_end_id = new_start_id + len(source['fake_answers'][0])
if source['fake_answers'][0] != "".join(new_doc_tokens[new_start_id:new_end_id]):
continue
if is_training:
new_end_id = new_end_id - 1
example = {
"qas_id":source['question_id'],
"question_text":source['question'].strip(),
"question_type":question_type,
"doc_tokens":new_doc_tokens.strip(),
"start_position":new_start_id,
"end_position":new_end_id }
examples.append(example)
print("len(examples):",len(examples))
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length, max_query_length):
features = []
for example in tqdm(examples):
query_tokens = list(example['question_text'])
question_type = example['question_type']
doc_tokens = example['doc_tokens']
doc_tokens = doc_tokens.replace(u"“", u"\"")
doc_tokens = doc_tokens.replace(u"”", u"\"")
start_position = example['start_position']
end_position = example['end_position']
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
start_position = start_position + 1
end_position = end_position + 1
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
start_position = start_position + 1
end_position = end_position + 1
tokens.append("[SEP]")
segment_ids.append(0)
start_position = start_position + 1
end_position = end_position + 1
for i in doc_tokens:
tokens.append(i)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
if end_position >= max_seq_length:
continue
if len(tokens) > max_seq_length:
tokens[max_seq_length-1] = "[SEP]"
input_ids = tokenizer.convert_tokens_to_ids(tokens[:max_seq_length]) ## !!! SEP
segment_ids = segment_ids[:max_seq_length]
else:
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
assert len(input_ids) == len(segment_ids)
features.append(
{"input_ids":input_ids,
"input_mask":input_mask,
"segment_ids":segment_ids,
"start_position":start_position,
"end_position":end_position })
with open("./train.data", 'w', encoding="utf-8") as fout:
for feature in features:
fout.write(json.dumps(feature, ensure_ascii=False) + '\n')
print("len(features):",len(features))
return features
if __name__ == "__main__":
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese', do_lower_case=True)
# 生成训练数据, train.data
examples = read_squad_examples(law_input_file=args.law_input_file,
search_input_file=args.search_input_file)
features = convert_examples_to_features(examples=examples, tokenizer=tokenizer,
max_seq_length=args.max_seq_length, max_query_length=args.max_query_length)
# 生成验证数据, dev.data。记得注释掉生成训练数据的代码,并在196行将train.data改为dev.data
# examples = read_squad_examples(zhidao_input_file=args.dev_zhidao_input_file,
# search_input_file=args.dev_search_input_file)
# features = convert_examples_to_features(examples=examples, tokenizer=tokenizer,
# max_seq_length=args.max_seq_length, max_query_length=args.max_query_length)
| 37.75463 | 120 | 0.56309 |
ace14d6073754edd3e406d0135b914be86e1955c | 369 | py | Python | Multidimensional lists - Exercise/2x2 squares in matrix.py | DiyanKalaydzhiev23/Advanced---Python | ed2c60bb887c49e5a87624719633e2b8432f6f6b | [
"MIT"
] | null | null | null | Multidimensional lists - Exercise/2x2 squares in matrix.py | DiyanKalaydzhiev23/Advanced---Python | ed2c60bb887c49e5a87624719633e2b8432f6f6b | [
"MIT"
] | null | null | null | Multidimensional lists - Exercise/2x2 squares in matrix.py | DiyanKalaydzhiev23/Advanced---Python | ed2c60bb887c49e5a87624719633e2b8432f6f6b | [
"MIT"
] | null | null | null | rows, columns = input().split()
matrix = [input().split() for row in range(int(rows))]
equal_blocks = 0
for row in range(int(rows)-1):
for i in range(int(columns)-1):
symbol = matrix[row][i]
if matrix[row][i+1] == symbol and matrix[row+1][i] == symbol and matrix[row+1][i+1] == symbol:
equal_blocks += 1
print(equal_blocks)
| 30.75 | 103 | 0.593496 |
ace14fe6e27cb0dc618b8362b08f58eae62dfbef | 712 | py | Python | lang.py | fanzeyi/Vulpix | 9448e968973073c98231b22663bbebb2a452dcd7 | [
"BSD-3-Clause"
] | 13 | 2015-03-08T11:59:28.000Z | 2021-07-11T11:58:01.000Z | src/tornado/demos/Vulpix-master/lang.py | ptphp/PyLib | 07ac99cf2deb725475f5771b123b9ea1375f5e65 | [
"Apache-2.0"
] | null | null | null | src/tornado/demos/Vulpix-master/lang.py | ptphp/PyLib | 07ac99cf2deb725475f5771b123b9ea1375f5e65 | [
"Apache-2.0"
] | 3 | 2015-05-29T16:14:08.000Z | 2016-04-29T07:25:26.000Z | # -*- coding: utf-8 -*-
# AUTHOR: Zeray Rice <fanzeyi1994@gmail.com>
# FILE: lang.py
# CREATED: 02:44:51 08/03/2012
# MODIFIED: 02:54:55 08/03/2012
# DESCRIPTION: Set language handler
from tornado.web import HTTPError
from config import accept_lang
from judge.base import BaseHandler
class SetLanguageHandler(BaseHandler):
''' `/lang/(.*)` - set language. '''
def get(self, lang):
if lang not in accept_lang.keys():
raise HTTPError(404)
self.set_cookie('LANG', accept_lang[lang])
if self.request.headers.has_key('Referer'):
self.redirect(self.request.headers['Referer'])
return
self.redirect('/')
__all__ = ["SetLanguageHandler"]
| 27.384615 | 58 | 0.65309 |
ace1500b891b3b2f99f52ad5f14e563677d91b2b | 320 | py | Python | cajas/inventory/api/views/brand_list.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | cajas/inventory/api/views/brand_list.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | cajas/inventory/api/views/brand_list.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | from rest_framework.generics import ListAPIView
from ...models import Brand
from ..serializer.brand_serializer import BrandSerializer
class BrandList(ListAPIView):
serializer_class = BrandSerializer
def get_queryset(self, *args, **kwargs):
return Brand.objects.filter(category__pk=self.kwargs['pk'])
| 26.666667 | 67 | 0.775 |
ace150a1e21bb30c102258ba36494a51e0b4c871 | 2,608 | py | Python | catalog/models.py | toert/django-shop-template | 22b60f80235735673a486c6d7dc24a5c81c40c01 | [
"MIT"
] | 1 | 2017-08-13T20:19:38.000Z | 2017-08-13T20:19:38.000Z | catalog/models.py | toert/django-shop-template | 22b60f80235735673a486c6d7dc24a5c81c40c01 | [
"MIT"
] | null | null | null | catalog/models.py | toert/django-shop-template | 22b60f80235735673a486c6d7dc24a5c81c40c01 | [
"MIT"
] | null | null | null | from django.db import models
from django.core.urlresolvers import reverse
class Category(models.Model):
name = models.CharField(max_length=80, verbose_name='Название категории')
slug = models.SlugField(max_length=200, db_index=True)
description = models.CharField(max_length=200, verbose_name='Краткое описание')
image = models.ImageField(upload_to='catalog/categories_images/', blank=True, verbose_name='Изображение категории')
class Meta:
ordering = ['name']
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
def get_absolute_url(self):
return reverse('catalog:list_products', args=[self.slug])
def __str__(self):
return self.name
class Product(models.Model):
category = models.ForeignKey(Category, related_name='products', verbose_name='Категория')
name = models.CharField(max_length=200, db_index=True, verbose_name='Название')
slug = models.SlugField(max_length=200, db_index=True)
image = models.ImageField(upload_to='catalog/products_images/%Y/%m/%d/', blank=True, verbose_name='Изображение товара')
description = models.TextField(blank=True, verbose_name='Описание')
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='Цена')
stock = models.PositiveIntegerField(verbose_name='На складе')
available = models.BooleanField(default=True, verbose_name='Доступен')
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Товар'
verbose_name_plural = 'Товары'
index_together = [
['id', 'slug']
]
def get_absolute_url(self):
return reverse('catalog:render_product', kwargs={'id': self.id,
'product_slug': self.slug,
'category_slug': self.category.slug})
def __str__(self):
return '{}/{}'.format(self.category, self.name)
class ProductAttribute(models.Model):
product = models.ForeignKey(Product, related_name='attributes', db_index=True, verbose_name='Товар')
attribute_name = models.CharField(max_length=80, db_index=True, verbose_name='Название характеристики')
attribute_value = models.CharField(max_length=200, verbose_name='Значение характеристики')
class Meta:
verbose_name = 'Характеристика товара'
verbose_name_plural = 'Характеристики Товаров'
def __str__(self):
return '{}-{}-{}'.format(self.product, self.attribute_name, self.attribute_value)
| 42.064516 | 123 | 0.687883 |
ace150bccc5ec153b2ba1d543e7d74838cf12f81 | 3,243 | py | Python | dramatic-dragonflies/the_htvms/settings.py | lavirlifiliol/summer-code-jam-2020 | 982cb20cd1efba6420a48daa56419686dd308cbc | [
"MIT"
] | null | null | null | dramatic-dragonflies/the_htvms/settings.py | lavirlifiliol/summer-code-jam-2020 | 982cb20cd1efba6420a48daa56419686dd308cbc | [
"MIT"
] | null | null | null | dramatic-dragonflies/the_htvms/settings.py | lavirlifiliol/summer-code-jam-2020 | 982cb20cd1efba6420a48daa56419686dd308cbc | [
"MIT"
] | null | null | null | """
Django settings for the_htvms project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'if#@q-v3q9@l3v@dn%3-^u!h7lv%zgjyi&6o#$(j5qp%+3+$9t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG', False)
ALLOWED_HOSTS = ['0.0.0.0']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'the_htvms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'the_htvms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'thehtvms',
'USER': 'thehtvms',
'PASSWORD': 'thehtvms',
'HOST': 'postgres',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/app/static'
| 25.738095 | 91 | 0.685168 |
ace150e7c580b5db77903f6f6d95b827ee2a648d | 24 | py | Python | pontoon/translate/__init__.py | udacity/pontoon | e15a03a0c987615385b2a8c537bb18c99567f77e | [
"BSD-3-Clause"
] | 1 | 2018-12-24T11:15:35.000Z | 2018-12-24T11:15:35.000Z | pontoon/translate/__init__.py | udacity/pontoon | e15a03a0c987615385b2a8c537bb18c99567f77e | [
"BSD-3-Clause"
] | 9 | 2020-09-06T05:18:03.000Z | 2022-02-26T14:28:38.000Z | pontoon/translate/__init__.py | udacity/pontoon | e15a03a0c987615385b2a8c537bb18c99567f77e | [
"BSD-3-Clause"
] | 1 | 2019-10-07T17:46:36.000Z | 2019-10-07T17:46:36.000Z | URL_BASE = 'translate/'
| 12 | 23 | 0.708333 |
ace151f9d07e22911ec8de4866e79b4177de2d14 | 3,155 | py | Python | PBP.py | j-friedrich/neuronalGPR | 0a05f0b0a30ac6ecbd5b3ff175d3cbfeb723479b | [
"Apache-2.0"
] | 2 | 2020-11-26T04:23:04.000Z | 2021-06-10T11:32:40.000Z | PBP.py | j-friedrich/neuronalGPR | 0a05f0b0a30ac6ecbd5b3ff175d3cbfeb723479b | [
"Apache-2.0"
] | null | null | null | PBP.py | j-friedrich/neuronalGPR | 0a05f0b0a30ac6ecbd5b3ff175d3cbfeb723479b | [
"Apache-2.0"
] | null | null | null | from GPnet import logpdf
import math
import numpy as np
import os
import sys
from time import time
sys.path.append('Probabilistic-Backpropagation-master/c/PBP_net')
import PBP_net
# pass the name of the UCI Dataset directory as argument
try:
data_directory = sys.argv[1]
except:
data_directory = 'bostonHousing'
_UCI_DIRECTORY_PATH = "DropoutUncertaintyExps-master/UCI_Datasets/"
subfolders = [f.name for f in os.scandir(_UCI_DIRECTORY_PATH) if f.is_dir()]
subfolders.sort()
if data_directory not in subfolders:
raise ValueError("data directory must be one of the following " +
repr(subfolders) + " but was " + data_directory)
_DATA_DIRECTORY_PATH = _UCI_DIRECTORY_PATH + data_directory + "/data/"
data = np.loadtxt(_DATA_DIRECTORY_PATH + "data.txt")
index_features = np.loadtxt(_DATA_DIRECTORY_PATH + "index_features.txt")
index_target = np.loadtxt(_DATA_DIRECTORY_PATH + "index_target.txt")
X = data[:, [int(i) for i in index_features.tolist()]]
y = data[:, int(index_target.tolist())]
n_splits = int(np.loadtxt(_DATA_DIRECTORY_PATH + 'n_splits.txt'))
n_hidden = int(np.loadtxt(_DATA_DIRECTORY_PATH + "n_hidden.txt"))
n_epochs = int(np.loadtxt(_DATA_DIRECTORY_PATH + 'n_epochs.txt'))
def _get_index_train_test_path(split_num, train=True):
"""
Method to generate the path containing the training/test split for the given
split number (generally from 1 to 20).
@param split_num Split number for which the data has to be generated
@param train Is true if the data is training data. Else false.
@return path Path of the file containing the requried data
"""
if train:
return _DATA_DIRECTORY_PATH + "index_train_" + str(split_num) + ".txt"
else:
return _DATA_DIRECTORY_PATH + "index_test_" + str(split_num) + ".txt"
perf = np.nan * np.zeros((n_splits, 2, 3))
for n_layers in (1, 2):
np.random.seed(1)
for split in range(n_splits):
# We load the indexes of the training and test sets
print('Loading file: ' + _get_index_train_test_path(split, train=True))
print('Loading file: ' + _get_index_train_test_path(split, train=False))
index_train = np.loadtxt(_get_index_train_test_path(split, train=True))
index_test = np.loadtxt(_get_index_train_test_path(split, train=False))
X_train = X[[int(i) for i in index_train.tolist()]]
y_train = y[[int(i) for i in index_train.tolist()]]
X_test = X[[int(i) for i in index_test.tolist()]]
y_test = y[[int(i) for i in index_test.tolist()]]
net = PBP_net.PBP_net(X_train, y_train, [n_hidden] * n_layers,
normalize=True, n_epochs=n_epochs)
# We make predictions for the test set
t = -time()
m, v, v_noise = net.predict(X_test)
t += time()
# We compute the test RMSE and ll
perf[split, n_layers - 1, :2] = (np.sqrt(np.mean((y_test - m)**2)),
logpdf(y_test - m, v + v_noise).mean())
perf[split, n_layers - 1, 2] = t
np.save('results/PBP/' + data_directory, perf)
| 39.4375 | 85 | 0.666878 |
ace15396d972af0e708b040d483237bfad87bb7f | 1,947 | py | Python | kindled/models.py | jinglemansweep/Kindled | ce1964b94e019864bff3bcca75c0a3c96d54eafe | [
"MIT"
] | 3 | 2015-02-12T14:10:45.000Z | 2017-06-13T09:40:33.000Z | kindled/models.py | jinglemansweep/Kindled | ce1964b94e019864bff3bcca75c0a3c96d54eafe | [
"MIT"
] | null | null | null | kindled/models.py | jinglemansweep/Kindled | ce1964b94e019864bff3bcca75c0a3c96d54eafe | [
"MIT"
] | null | null | null |
class Recipe(object):
""" Recipe Class """
name = None
filename = None
def __init__(self, name, filename):
""" Constructor """
self.name = name
self.filename = filename
class Recipient(object):
""" Recipient Class """
name = None
email_address = None
def __init__(self, name, email_address):
""" Constructor """
self.name = name
self.email_address = email_address
class Group(object):
""" Group Class """
name = None
recipients = None
def __init__(self, name, recipients=None):
""" Constructor """
self.name = name
if recipients is None:
self.recipients = []
elif isinstance(recipients, list):
self.recipients = recipients
else:
self.recipients = [recipients]
def add_recipients(self, recipient):
""" Adds Recipient To Group """
if isinstance(recipients, list):
self.recipients.extend(recipients)
else:
self.recipients.append(recipients)
class Subscription(object):
""" Subscription Class """
name = None
groups = None
recipes = None
def __init__(self, name, groups=None, recipes=None):
""" Constructor """
self.name = name
if groups is None:
self.groups = []
elif isinstance(groups, list):
self.groups = groups
else:
self.groups = [groups]
def add_groups(self, groups):
""" Adds Groups To Subscription """
if isinstance(groups, list):
self.groups.extend(groups)
else:
self.groups.append(groups)
def add_recipes(self, recipes):
""" Adds Recipes To Subscription """
if isinstance(recipes, list):
self.recipes.extend(recipes)
else:
self.recipes.append(recipes)
| 18.542857 | 56 | 0.552645 |
ace154dbf5e33546f94fe115ab6ae612fdd7c0f5 | 5,716 | py | Python | Camera/main.py | henrybatt/Morph-2020 | 174a310e373a639904609bd50cdd918257ea0b20 | [
"MIT"
] | null | null | null | Camera/main.py | henrybatt/Morph-2020 | 174a310e373a639904609bd50cdd918257ea0b20 | [
"MIT"
] | null | null | null | Camera/main.py | henrybatt/Morph-2020 | 174a310e373a639904609bd50cdd918257ea0b20 | [
"MIT"
] | null | null | null | # Untitled - By: Henry - Mon Oct 7 2019
##### MUST MAKE CFASTER
ATTACK_YELLOW = True
ROBOT = 1 # 1=A, 0=B
DEBUG_WHITEBAL = False
DEBUGGING = False
## ======================= BLOB Finder =======================
import image, sensor, time
from math import atan2, sqrt, degrees
class Find():
def __init__(self):
self.NO_DATA_ANGLE = -1
self.NO_DATA_DIST = 0
self.debugCount = 0
self.DEBUG_COUNT_MAX = 30
def init(self, robot, _ATTACK_YELLOW, _debug, DEBUG_WHITEBAL):
# - Setup Sensors and values - #
self.debug = _debug
self.attackIsYellow = _ATTACK_YELLOW
self.robot = robot
# - Camera Specific Values - #
if robot:
self.thresholds = [
(64, 99, -78, 31, 55, 96), #Yellow Goal
(11, 50, -60, -11, -7, 34)] #Blue Goal
self.whitebal = (-6.157801, -6.02073, -0.931115)
self.window = (20, 0, 120, 120)
self.MAX_RAD = 70
self.CENTREX = 60
self.CENTREY = 60
else:
self.thresholds = [
[(0, 0, 0, 0, 0, 0)], #Yellow Goal
[(0, 0, 0, 0, 0, 0)]] #Blue Goal
self.whitebal = (-6.02073, -5.886325, -1.476391)
self.window = (20, 0, 120, 120)
self.MAX_RAD = 70
self.CENTREX = 60
self.CENTREY = 60
# - Sensor Setup - #
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.set_windowing(self.window)
sensor.skip_frames(time = 100)
# - Balance - #
if DEBUG_WHITEBAL:
sensor.set_auto_whitebal(True)
else:
sensor.set_brightness(0)
sensor.set_contrast(3)
sensor.set_saturation(2)
sensor.set_auto_whitebal(False, rgb_gain_db=self.whitebal)
sensor.set_auto_exposure(False, exposure_us=10000)
sensor.set_auto_gain(False, gain_db=15)
sensor.skip_frames(time=500)
def screenshot(self):
self.img = sensor.snapshot()
if (self.debug):
self.img.draw_cross(self.CENTREX, self.CENTREY)
self.img.draw_circle(self.CENTREX, self.CENTREY, self.MAX_RAD)
self.img.draw_string(30, 0, str(self.robot)+" " +str(self.attackIsYellow))
def calculateAngleDist(self, object):
x = object.cx() - (self.CENTREX)
y = object.cy() - (self.CENTREY)
angle = (450 - degrees(atan2(y, x))) % 360
distance = (sqrt(x**2 + y**2))
return angle, distance
def whiteBal(self):
self.debugCount += 1
if self.debugCount >= self.DEBUG_COUNT_MAX:
print(sensor.get_rgb_gain_db())
self.debugCount = 0
def sortBlobs(self, blobs):
if len(blobs) > 0:
for blob in sorted(blobs, key=lambda x: x.pixels(), reverse = True):
angle, distance = self.calculateAngleDist(blob)
if distance < self.MAX_RAD:
if (self.debug):
self.img.draw_cross(blob.cx(), blob.cy())
self.img.draw_rectangle(blob.rect())
self.img.draw_line((self.CENTREX, self.CENTREY, blob.cx(), blob.cy()),thickness=1)
return(angle, distance)
return(self.NO_DATA_ANGLE, self.NO_DATA_DIST)
def findBlobs(self):
# - Find all goal blobs and return their angles and distances - #
self.screenshot()
goalBlobs = self.img.find_blobs(self.thresholds[0:], x_stride=5, y_stride=5, area_threshold=200, pixel_threshold=200, merge=True, margin=23)
# Blob Codes #
# yellow = 1
# blue = 2
yellowBlobs = []
blueBlobs = []
for blob in goalBlobs:
if blob.code() == 1:
yellowBlobs.append(blob)
elif blob.code() == 2:
blueBlobs.append(blob)
if self.attackIsYellow:
attackAngle, attackDist = self.sortBlobs(yellowBlobs)
defendAngle, defendDist = self.sortBlobs(blueBlobs)
else:
attackAngle, attackDist = self.sortBlobs(blueBlobs)
defendAngle, defendDist = self.sortBlobs(yellowBlobs)
return([attackAngle, attackDist, defendAngle, defendDist])
## ======================= SEND DATA =======================
from pyb import UART, LED
class Send():
def __init__(self, bus=3, baud=115200, tout_c=10):
self.uart = UART(bus, baud, timeout_char=tout_c)
def sendData(self, data):
data = [round(x) for x in data]
# - Data to write - #
# Starting byte
sendData = [255, 255]
# Attack Data
sendData.append((data[0] >> 8) & 0xFF)
sendData.append(data[0] & 0xFF)
sendData.append(data[1])
# Defend Data
sendData.append((data[2] >> 8) & 0xFF)
sendData.append(data[2] & 0xF)
sendData.append(data[3])
# - Ensure no data is the same as starting byte - #
for i in range(1, len(sendData)):
if(sendData[i] == 255): sendData[i] -= 1
# - Send Data - #
for d in sendData:
self.uart.writechar(d)
print(', '.join(map(str, data)))
#print(', '.join(map(str, sendData)))
## ======================= MAIN =======================
clock = time.clock()
LED(1).on()
finder = Find()
sender = Send()
finder.init(ROBOT, ATTACK_YELLOW, DEBUGGING, DEBUG_WHITEBAL)
LED(1).off()
while(True):
clock.tick()
if DEBUG_WHITEBAL:
finder.whiteBal()
data = finder.findBlobs()
sender.sendData(data)
#print(clock.fps())
| 27.747573 | 148 | 0.546011 |
ace1566696504df7111c7a0437619cb88e46ce89 | 8,702 | py | Python | blacksheep/server/responses.py | Cdayz/BlackSheep | 42c708c90c29299981e4bf0ed466d262f1ade0e5 | [
"MIT"
] | null | null | null | blacksheep/server/responses.py | Cdayz/BlackSheep | 42c708c90c29299981e4bf0ed466d262f1ade0e5 | [
"MIT"
] | null | null | null | blacksheep/server/responses.py | Cdayz/BlackSheep | 42c708c90c29299981e4bf0ed466d262f1ade0e5 | [
"MIT"
] | null | null | null | import ntpath
from enum import Enum
from functools import lru_cache
from io import BytesIO
from typing import Any, AnyStr, AsyncIterable, Callable, Union
from essentials.json import FriendlyEncoder
from essentials.json import dumps as friendly_dumps
from blacksheep import Content, JSONContent, Response, StreamedContent, TextContent
from blacksheep.common.files.asyncfs import FilesHandler
MessageType = Any
class ContentDispositionType(Enum):
INLINE = "inline"
ATTACHMENT = "attachment"
def _ensure_bytes(value: AnyStr) -> bytes:
if isinstance(value, str):
return value.encode()
if isinstance(value, bytes):
return value
raise ValueError("Input value must be bytes or str")
def _json_serialize(obj) -> str:
return friendly_dumps(obj, cls=FriendlyEncoder, separators=(",", ":"))
def _json_content(obj) -> JSONContent:
return JSONContent(obj, _json_serialize)
def _optional_content(message: Any = None) -> Content:
if isinstance(message, str):
return TextContent(message)
else:
return _json_content(message)
def status_code(status: int = 200, message: Any = None) -> Response:
"""
Returns a plain response with given status, with optional message;
sent as plain text or JSON.
"""
if not message:
return Response(status)
return Response(status, content=_optional_content(message))
def ok(message: Any = None) -> Response:
"""
Returns an HTTP 200 OK response, with optional message;
sent as plain text or JSON."""
return status_code(200, message)
def created(message: Any = None, location: AnyStr = "") -> Response:
"""
Returns an HTTP 201 Created response, to the given location
and with optional JSON content.
"""
return Response(
201,
[(b"Location", _ensure_bytes(location))] if location else [],
content=_optional_content(message) if message else None,
)
def accepted(message: Any = None) -> Response:
"""
Returns an HTTP 202 Accepted response, with optional message;
sent as plain text or JSON.
"""
return status_code(202, message)
def no_content() -> Response:
"""
Returns an HTTP 204 No Content response.
"""
return Response(204)
def not_modified() -> Response:
"""
Returns an HTTP 304 Not Modified response.
"""
return Response(304)
def unauthorized(message: Any = None) -> Response:
"""
Returns an HTTP 401 Unauthorized response, with optional message;
sent as plain text or JSON.
"""
return status_code(401, message)
def forbidden(message: Any = None) -> Response:
"""
Returns an HTTP 403 Forbidden response, with optional message;
sent as plain text or JSON.
"""
return status_code(403, message)
def bad_request(message: Any = None) -> Response:
"""
Returns an HTTP 400 Bad Request response, with optional message;
sent as plain text or JSON.
"""
return status_code(400, message)
def not_found(message: Any = None) -> Response:
"""
Returns an HTTP 404 Not Found response, with optional message;
sent as plain text or JSON.
"""
return status_code(404, message)
def moved_permanently(location: AnyStr) -> Response:
"""
Returns an HTTP 301 Moved Permanently response, to the given location.
"""
return Response(301, [(b"Location", _ensure_bytes(location))])
def redirect(location: AnyStr) -> Response:
"""
Returns an HTTP 302 Found response (commonly called redirect),
to the given location.
"""
return Response(302, [(b"Location", _ensure_bytes(location))])
def see_other(location: AnyStr) -> Response:
"""
Returns an HTTP 303 See Other response, to the given location.
"""
return Response(303, [(b"Location", _ensure_bytes(location))])
def temporary_redirect(location: AnyStr) -> Response:
"""
Returns an HTTP 307 Temporary Redirect response, to the given location.
"""
return Response(307, [(b"Location", _ensure_bytes(location))])
def permanent_redirect(location: AnyStr) -> Response:
"""
Returns an HTTP 308 Permanent Redirect response, to the given location.
"""
return Response(308, [(b"Location", _ensure_bytes(location))])
def text(value: str, status: int = 200) -> Response:
"""
Returns a response with text/plain content,
and given status (default HTTP 200 OK).
"""
return Response(
status, None, Content(b"text/plain; charset=utf-8", value.encode("utf8"))
)
def html(value: str, status: int = 200) -> Response:
"""
Returns a response with text/html content,
and given status (default HTTP 200 OK).
"""
return Response(
status, None, Content(b"text/html; charset=utf-8", value.encode("utf8"))
)
def json(data: Any, status: int = 200, dumps=friendly_dumps) -> Response:
"""
Returns a response with application/json content,
and given status (default HTTP 200 OK).
"""
return Response(
status,
None,
Content(
b"application/json",
dumps(data, cls=FriendlyEncoder, separators=(",", ":")).encode("utf8"),
),
)
def pretty_json(
data: Any, status: int = 200, dumps=friendly_dumps, indent: int = 4
) -> Response:
"""
Returns a response with indented application/json content,
and given status (default HTTP 200 OK).
"""
return Response(
status,
None,
Content(
b"application/json",
dumps(data, cls=FriendlyEncoder, indent=indent).encode("utf8"),
),
)
FileInput = Union[Callable[[], AsyncIterable[bytes]], str, bytes, bytearray, BytesIO]
@lru_cache(2000)
def _get_file_provider(file_path: str) -> Callable[[], AsyncIterable[bytes]]:
async def data_provider():
async for chunk in FilesHandler().chunks(file_path):
yield chunk
return data_provider
def _file(
value: FileInput,
content_type: str,
content_disposition_type: ContentDispositionType,
file_name: str = None,
) -> Response:
if file_name:
exact_file_name = ntpath.basename(file_name)
if not exact_file_name:
raise ValueError(
"Invalid file name: it should be an exact "
'file name without path, for example: "foo.txt"'
)
content_disposition_value = (
f'{content_disposition_type.value}; filename="{exact_file_name}"'
)
else:
content_disposition_value = content_disposition_type.value
content: Content
content_type_value = _ensure_bytes(content_type)
if isinstance(value, str):
# value is treated as a path
content = StreamedContent(content_type_value, _get_file_provider(value))
elif isinstance(value, BytesIO):
async def data_provider():
try:
value.seek(0)
while True:
chunk = value.read(1024 * 64)
if not chunk:
break
yield chunk
yield b""
finally:
if not value.closed:
value.close()
content = StreamedContent(content_type_value, data_provider)
elif callable(value):
# value is treated as an async generator
async def data_provider():
async for chunk in value():
yield chunk
yield b""
content = StreamedContent(content_type_value, data_provider)
elif isinstance(value, bytes):
content = Content(content_type_value, value)
elif isinstance(value, bytearray):
content = Content(content_type_value, bytes(value))
else:
raise ValueError(
"Invalid value, expected one of: Callable, str, "
"bytes, bytearray, io.BytesIO"
)
return Response(
200, [(b"Content-Disposition", content_disposition_value.encode())], content
)
def file(
value: FileInput,
content_type: str,
*,
file_name: str = None,
content_disposition: ContentDispositionType = ContentDispositionType.ATTACHMENT,
) -> Response:
"""
Returns a binary file response with given content type and optional
file name, for download (attachment)
(default HTTP 200 OK). This method supports both call with bytes,
or a generator yielding chunks.
Remarks: this method does not handle cache, ETag and HTTP 304 Not Modified
responses; when handling files it is recommended to handle cache, ETag and
Not Modified, according to use case.
"""
return _file(value, content_type, content_disposition, file_name)
| 27.801917 | 85 | 0.648931 |
ace1568f9179181732a4987b7341c0f2ba2c1fe3 | 10,379 | py | Python | rcnn/io/rpn.py | OpenSLAM/mx-maskrcnn | c35fb765e437ed187b2fb4931d79275721554922 | [
"Apache-2.0"
] | 1,991 | 2017-10-25T06:38:03.000Z | 2022-03-29T13:13:54.000Z | rcnn/io/rpn.py | choetin/mx-maskrcnn | c35fb765e437ed187b2fb4931d79275721554922 | [
"Apache-2.0"
] | 143 | 2017-10-25T15:06:27.000Z | 2020-05-12T01:50:08.000Z | rcnn/io/rpn.py | choetin/mx-maskrcnn | c35fb765e437ed187b2fb4931d79275721554922 | [
"Apache-2.0"
] | 690 | 2017-10-25T06:41:18.000Z | 2021-11-29T03:09:34.000Z | """
RPN:
data =
{'data': [num_images, c, h, w],
'im_info': [num_images, 4] (optional)}
label =
{'gt_boxes': [num_boxes, 5] (optional),
'label': [batch_size, 1] <- [batch_size, num_anchors, feat_height, feat_width],
'bbox_target': [batch_size, num_anchors, feat_height, feat_width],
'bbox_weight': [batch_size, num_anchors, feat_height, feat_width]}
"""
import numpy as np
import numpy.random as npr
from ..config import config
from .image import get_image, tensor_vstack
from ..processing.generate_anchor import generate_anchors, anchors_plane
from ..processing.bbox_transform import bbox_overlaps, nonlinear_transform
bbox_transform = nonlinear_transform
def get_rpn_testbatch(roidb):
"""
return a dict of testbatch
:param roidb: ['image', 'flipped']
:return: data, label, im_info
"""
assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb)
im_array = imgs[0]
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
data = {'data': im_array,
'im_info': im_info}
label = {}
return data, label, im_info
def get_rpn_batch(roidb):
"""
prototype for rpn batch: data, im_info, gt_boxes
:param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
:return: data, label
"""
# support multiple images per device now
imgs, roidb = get_image(roidb, scale=config.TRAIN.SCALE)
im_array = tensor_vstack(imgs)
# im_array = np.vstack(imgs)
im_info = []
gt_boxes = []
for im_i in range(len(roidb)):
im_info.append(roidb[im_i]['im_info'])
# gt boxes: (x1, y1, x2, y2, cls)
if roidb[im_i]['gt_classes'].size > 0:
gt_inds_i = np.where(roidb[im_i]['gt_classes'] != 0)[0]
gt_boxes_i = np.empty((roidb[im_i]['boxes'].shape[0], 5), dtype=np.float32)
gt_boxes_i[:, 0:4] = roidb[im_i]['boxes'][gt_inds_i, :]
gt_boxes_i[:, 4] = roidb[im_i]['gt_classes'][gt_inds_i]
else:
gt_boxes_i = np.ones((1, 5), dtype=np.float32) * -1
gt_boxes.append(gt_boxes_i[np.newaxis, :, :])
im_info = np.array(im_info, dtype=np.float32)
data = {'data': im_array,
'im_info': im_info}
# gt_boxes shape : [num_images, num_boxes, 5], type list
label = {'gt_boxes': gt_boxes}
return data, label
def assign_anchor_fpn(feat_shape, gt_boxes, im_info, feat_strides=[64,32,16,8,4],
scales=(8, 16, 32), ratios=(0.5, 1, 2), allowed_border=0):
"""
assign ground truth boxes to anchor positions
:param feat_shape: infer output shape
:param gt_boxes: assign ground truth
:param im_info: filter out anchors overlapped with edges
:param feat_stride: anchor position step
:param scales: used to generate anchors, affects num_anchors (per location)
:param ratios: aspect ratios of generated anchors
:param allowed_border: filter out anchors with edge overlap > allowed_border
:return: tuple
labels: of shape (batch_size, 1) <- (batch_size, num_anchors, feat_height, feat_width)
bbox_targets: of shape (batch_size, num_anchors * 4, feat_height, feat_width)
bbox_weights: mark the assigned anchors
"""
def _unmap(data, count, inds, fill=0):
"""" unmap a subset inds of data into original data of size count """
if len(data.shape) == 1:
ret = np.empty((count,), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count,) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
DEBUG = False
# clean up boxes
nonneg = np.where(gt_boxes[:, 4] != -1)[0]
gt_boxes = gt_boxes[nonneg]
scales = np.array(scales, dtype=np.float32)
anchors_list = []
anchors_num_list = []
inds_inside_list = []
feat_infos = []
A_list = []
for i in range(len(feat_strides)):
base_anchors = generate_anchors(base_size=feat_strides[i], ratios=list(ratios), scales=scales)
num_anchors = base_anchors.shape[0]
feat_height, feat_width = feat_shape[i][-2:]
feat_stride = feat_strides[i]
feat_infos.append([feat_height, feat_width])
A = num_anchors
A_list.append(A)
K = feat_height * feat_width
all_anchors = anchors_plane(feat_height, feat_width, feat_stride, base_anchors)
all_anchors = all_anchors.reshape((K * A, 4))
total_anchors = int(K * A)
anchors_num_list.append(total_anchors)
# only keep anchors inside the image
inds_inside = np.where((all_anchors[:, 0] >= -allowed_border) &
(all_anchors[:, 1] >= -allowed_border) &
(all_anchors[:, 2] < im_info[1] + allowed_border) &
(all_anchors[:, 3] < im_info[0] + allowed_border))[0]
if DEBUG:
print 'total_anchors', total_anchors
print 'inds_inside', len(inds_inside)
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
anchors_list.append(anchors)
inds_inside_list.append(inds_inside)
# Concat anchors from each level
anchors = np.concatenate(anchors_list)
for i in range(1, len(inds_inside_list)):
inds_inside_list[i] = inds_inside_list[i] + sum(anchors_num_list[:i])
inds_inside = np.concatenate(inds_inside_list)
total_anchors = sum(anchors_num_list)
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.empty((len(inds_inside),), dtype=np.float32)
labels.fill(-1)
if gt_boxes.size > 0:
# overlap between the anchors and the gt boxes
# overlaps (ex, gt)
overlaps = bbox_overlaps(anchors.astype(np.float), gt_boxes.astype(np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps, np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not config.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
labels[max_overlaps < config.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IoU
labels[max_overlaps >= config.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if config.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < config.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
else:
labels[:] = 0
# subsample positive labels if we have too many
num_fg = int(config.TRAIN.RPN_FG_FRACTION * config.TRAIN.RPN_BATCH_SIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
if DEBUG:
disable_inds = fg_inds[:(len(fg_inds) - num_fg)]
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = config.TRAIN.RPN_BATCH_SIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)
if DEBUG:
disable_inds = bg_inds[:(len(bg_inds) - num_bg)]
labels[disable_inds] = -1
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
if gt_boxes.size > 0:
bbox_targets[:] = bbox_transform(anchors, gt_boxes[argmax_overlaps, :4])
bbox_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_weights[labels == 1, :] = np.array(config.TRAIN.RPN_BBOX_WEIGHTS)
if DEBUG:
_sums = bbox_targets[labels == 1, :].sum(axis=0)
_squared_sums = (bbox_targets[labels == 1, :] ** 2).sum(axis=0)
_counts = np.sum(labels == 1)
means = _sums / (_counts + 1e-14)
stds = np.sqrt(_squared_sums / _counts - means ** 2)
print 'means', means
print 'stdevs', stds
# map up to original set of anchors
labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_weights = _unmap(bbox_weights, total_anchors, inds_inside, fill=0)
if DEBUG:
if gt_boxes.size > 0:
print 'rpn: max max_overlaps', np.max(max_overlaps)
print 'rpn: num_positives', np.sum(labels == 1)
print 'rpn: num_negatives', np.sum(labels == 0)
_fg_sum = np.sum(labels == 1)
_bg_sum = np.sum(labels == 0)
_count = 1
print 'rpn: num_positive avg', _fg_sum / _count
print 'rpn: num_negative avg', _bg_sum / _count
# resahpe
label_list = list()
bbox_target_list = list()
bbox_weight_list = list()
anchors_num_range = [0] + anchors_num_list
for i in range(len(feat_strides)):
feat_height, feat_width = feat_infos[i]
A = A_list[i]
label = labels[sum(anchors_num_range[:i+1]):sum(anchors_num_range[:i+1])+anchors_num_range[i+1]]
bbox_target = bbox_targets[sum(anchors_num_range[:i+1]):sum(anchors_num_range[:i+1])+anchors_num_range[i+1]]
bbox_weight = bbox_weights[sum(anchors_num_range[:i+1]):sum(anchors_num_range[:i+1])+anchors_num_range[i+1]]
label = label.reshape((1, feat_height, feat_width, A)).transpose(0, 3, 1, 2)
label = label.reshape((1, A * feat_height * feat_width))
bbox_target = bbox_target.reshape((1, feat_height*feat_width, A * 4)).transpose(0, 2, 1)
bbox_weight = bbox_weight.reshape((1, feat_height*feat_width, A * 4)).transpose((0, 2, 1))
label_list.append(label)
bbox_target_list.append(bbox_target)
bbox_weight_list.append(bbox_weight)
label_concat = np.concatenate(label_list, axis=1)
bbox_target_concat = np.concatenate(bbox_target_list, axis=2)
bbox_weight_concat = np.concatenate(bbox_weight_list, axis=2)
label= {'label': label_concat,
'bbox_target': bbox_target_concat,
'bbox_weight': bbox_weight_concat}
return label | 40.228682 | 116 | 0.641102 |
ace1577514b7dacc1ba9e2ed1b48965eb61d67c9 | 14,074 | py | Python | tests/models/test_horovod.py | bayethiernodiop/pytorch-lightning | ac3f1bd0da50b676493877d5d96509c928f17db6 | [
"Apache-2.0"
] | 2 | 2021-06-24T11:46:53.000Z | 2021-06-24T13:29:50.000Z | tests/models/test_horovod.py | bayethiernodiop/pytorch-lightning | ac3f1bd0da50b676493877d5d96509c928f17db6 | [
"Apache-2.0"
] | null | null | null | tests/models/test_horovod.py | bayethiernodiop/pytorch-lightning | ac3f1bd0da50b676493877d5d96509c928f17db6 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import platform
import shlex
import subprocess
import sys
import numpy as np
import pytest
import torch
from sklearn.metrics import accuracy_score
import tests.base.develop_pipelines as tpipes
import tests.base.develop_utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators.horovod_accelerator import HorovodAccelerator
from pytorch_lightning.core.step_result import Result, TrainResult, EvalResult
from pytorch_lightning.metrics.classification.accuracy import Accuracy
from pytorch_lightning.utilities import APEX_AVAILABLE, NATIVE_AMP_AVAILABLE
from tests.base import EvalModelTemplate
from tests.base.models import BasicGAN
try:
import horovod
from horovod.common.util import nccl_built
except ImportError:
HOROVOD_AVAILABLE = False
else:
HOROVOD_AVAILABLE = True
# This script will run the actual test model training in parallel
TEST_SCRIPT = os.path.join(os.path.dirname(__file__), 'data', 'horovod', 'train_default_model.py')
def _nccl_available():
if not HOROVOD_AVAILABLE:
return False
try:
return nccl_built()
except AttributeError:
# Horovod 0.19.1 nccl_built() does not yet work with Python 3.8:
# See: https://github.com/horovod/horovod/issues/1891
return False
def _run_horovod(trainer_options, on_gpu=False):
"""Execute the training script across multiple workers in parallel."""
num_processes = trainer_options.get('gpus', 2)
# for Horovod, we interpret `gpus` to be set per worker
trainer_options.update(gpus=1 if on_gpu else None)
tutils.reset_seed()
cmdline = [
'horovodrun',
'-np', str(num_processes),
sys.executable, TEST_SCRIPT,
'--trainer-options', shlex.quote(json.dumps(trainer_options))
]
if on_gpu:
cmdline += ['--on-gpu']
exit_code = subprocess.call(' '.join(cmdline), shell=True, env=os.environ.copy())
assert exit_code == 0
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
def test_horovod_cpu(tmpdir):
"""Test Horovod running multi-process on CPU."""
trainer_options = dict(
default_root_dir=str(tmpdir),
weights_save_path=str(tmpdir),
gradient_clip_val=1.0,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
distributed_backend='horovod',
deterministic=True,
)
_run_horovod(trainer_options)
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
def test_horovod_cpu_implicit(tmpdir):
"""Test Horovod without specifying a backend, inferring from env set by `horovodrun`."""
trainer_options = dict(
default_root_dir=str(tmpdir),
weights_save_path=str(tmpdir),
gradient_clip_val=1.0,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
deterministic=True,
)
_run_horovod(trainer_options)
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.skipif(not _nccl_available(), reason="test requires Horovod with NCCL support")
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_horovod_multi_gpu(tmpdir):
"""Test Horovod with multi-GPU support."""
trainer_options = dict(
default_root_dir=str(tmpdir),
weights_save_path=str(tmpdir),
gradient_clip_val=1.0,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=2,
deterministic=True,
distributed_backend='horovod'
)
_run_horovod(trainer_options, on_gpu=True)
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.skipif(not _nccl_available(), reason="test requires Horovod with NCCL support")
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.skipif(not APEX_AVAILABLE, reason="test requires apex")
def test_horovod_apex(tmpdir):
"""Test Horovod with multi-GPU support using apex amp."""
trainer_options = dict(
default_root_dir=str(tmpdir),
weights_save_path=str(tmpdir),
gradient_clip_val=1.0,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=2,
deterministic=True,
distributed_backend='horovod',
amp_backend='apex',
precision=16,
)
_run_horovod(trainer_options, on_gpu=True)
@pytest.mark.skip(reason="Skip till Horovod fixes integration with Native torch.cuda.amp")
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.skipif(not _nccl_available(), reason="test requires Horovod with NCCL support")
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.skipif(not NATIVE_AMP_AVAILABLE, reason="test requires torch.cuda.amp")
def test_horovod_amp(tmpdir):
"""Test Horovod with multi-GPU support using native amp."""
trainer_options = dict(
default_root_dir=str(tmpdir),
weights_save_path=str(tmpdir),
gradient_clip_val=1.0,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=2,
deterministic=True,
distributed_backend='horovod',
amp_backend='native',
precision=16,
)
_run_horovod(trainer_options, on_gpu=True)
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
@pytest.mark.skipif(not _nccl_available(), reason="test requires Horovod with NCCL support")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
def test_horovod_transfer_batch_to_gpu(tmpdir):
class TestTrainingStepModel(EvalModelTemplate):
def training_step(self, batch, *args, **kwargs):
x, y = batch
assert str(x.device) != 'cpu'
assert str(y.device) != 'cpu'
return super(TestTrainingStepModel, self).training_step(batch, *args, **kwargs)
def validation_step(self, batch, *args, **kwargs):
x, y = batch
assert str(x.device) != 'cpu'
assert str(y.device) != 'cpu'
return super(TestTrainingStepModel, self).validation_step(batch, *args, **kwargs)
hparams = EvalModelTemplate.get_default_hparams()
model = TestTrainingStepModel(**hparams)
trainer_options = dict(
default_root_dir=str(tmpdir),
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=1,
deterministic=True,
distributed_backend='horovod'
)
tpipes.run_model_test_without_loggers(trainer_options, model)
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
def test_horovod_multi_optimizer(tmpdir):
model = BasicGAN(**EvalModelTemplate.get_default_hparams())
# fit model
trainer = Trainer(
default_root_dir=str(tmpdir),
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
deterministic=True,
distributed_backend='horovod',
)
result = trainer.fit(model)
assert result == 1, 'model failed to complete'
assert len(trainer.optimizers) == 2
for i, optimizer in enumerate(trainer.optimizers):
assert hasattr(optimizer, 'synchronize'), 'optimizer has not been wrapped into DistributedOptimizer'
def get_model_params(model):
return set([p for p in model.parameters()])
def get_optimizer_params(optimizer):
return set([p for group in optimizer.param_groups for p in group.get('params', [])])
assert get_model_params(model.generator) != get_model_params(model.discriminator)
assert get_model_params(model.generator) == get_optimizer_params(trainer.optimizers[0])
assert get_model_params(model.discriminator) == get_optimizer_params(trainer.optimizers[1])
@pytest.mark.skipif(not HOROVOD_AVAILABLE, reason="Horovod is unavailable")
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
def test_result_reduce_horovod(tmpdir):
"""Make sure result logging works with Horovod.
This test mirrors tests/core/test_results.py::_ddp_test_fn
"""
tutils.reset_seed()
tutils.set_random_master_port()
def hvd_test_fn():
path_here = os.path.abspath(os.path.dirname(__file__))
path_root = os.path.abspath(os.path.join(path_here, '..', '..'))
sys.path.insert(0, os.path.abspath(path_root))
from tests.base.boring_model import BoringModel
import horovod.torch as hvd
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.training_step_called = True
tensor = torch.tensor([1.0])
self.log("test_tensor", tensor, sync_dist=True, sync_dist_op='sum',
on_step=True, on_epoch=True)
res = self._results
# Check that `tensor` is summed across all ranks automatically
assert res["test_tensor"].item() == hvd.size(), \
"Result-Log does not work properly with Horovod and Tensors"
def training_epoch_end(self, outputs) -> None:
assert len(outputs) == 0
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
)
trainer.fit(model)
horovod.run(hvd_test_fn, np=2)
@pytest.mark.skipif(not HOROVOD_AVAILABLE, reason="Horovod is unavailable")
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
def test_accuracy_metric_horovod():
num_batches = 10
batch_size = 16
threshold = 0.5
def sk_metric(preds, target):
sk_preds = (preds.view(-1).numpy() >= threshold).astype(np.uint8)
sk_target = target.view(-1).numpy()
return accuracy_score(y_true=sk_target, y_pred=sk_preds)
preds = torch.rand(num_batches, batch_size)
target = torch.randint(high=2, size=(num_batches, batch_size))
def _compute_batch():
import horovod.torch as hvd
trainer = Trainer(
fast_dev_run=True,
distributed_backend='horovod',
)
accelerator_backend = trainer.accelerator_connector.select_accelerator()
assert isinstance(accelerator_backend, HorovodAccelerator)
metric = Accuracy(compute_on_step=True,
dist_sync_on_step=True,
dist_sync_fn=accelerator_backend.gather_all_tensors,
threshold=threshold)
for i in range(hvd.rank(), num_batches, hvd.size()):
batch_result = metric(preds[i], target[i])
if hvd.rank() == 0:
dist_preds = torch.stack([preds[i + r] for r in range(hvd.size())])
dist_target = torch.stack([target[i + r] for r in range(hvd.size())])
sk_batch_result = sk_metric(dist_preds, dist_target)
assert np.allclose(batch_result.numpy(), sk_batch_result)
# check on all batches on all ranks
result = metric.compute()
assert isinstance(result, torch.Tensor)
total_preds = torch.stack([preds[i] for i in range(num_batches)])
total_target = torch.stack([target[i] for i in range(num_batches)])
sk_result = sk_metric(total_preds, total_target)
assert np.allclose(result.numpy(), sk_result)
horovod.run(_compute_batch, np=2)
# @pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
# def test_horovod_multi_optimizer_with_scheduling_stepping(tmpdir):
# hparams = EvalModelTemplate.get_default_hparams()
# model = EvalModelTemplate(**hparams)
# model.configure_optimizers = model.configure_optimizers__multiple_schedulers
#
# num_workers = 8
# init_lr = hparams.get('learning_rate') * num_workers
#
# with patch('pytorch_lightning.accelerators.horovod_backend.hvd.size') as mock_hvd_size:
# mock_hvd_size.return_value = 8
#
# # fit model
# trainer = Trainer(
# default_root_dir=tmpdir,
# max_epochs=1,
# limit_val_batches=0.5,
# limit_train_batches=0.2,
# distributed_backend='horovod'
# )
# results = trainer.fit(model)
# assert results == 1
#
# adjusted_lr1 = [pg['lr'] for pg in trainer.optimizers[0].param_groups][0]
# adjusted_lr2 = [pg['lr'] for pg in trainer.optimizers[1].param_groups][0]
#
# # Called ones after end of epoch with gamma=0.1
# assert pytest.approx(init_lr * 0.1) == adjusted_lr1
#
# # Called every 3 steps, meaning for 1 epoch of 11 batches, it is called 3 times with gamma=0.1
# assert pytest.approx(init_lr * 0.1) == adjusted_lr2
| 37.036842 | 108 | 0.681185 |
ace1592bdb272b36ac922b90698b4741aa477d10 | 461 | py | Python | example.py | stephantul/orst | 5f4d7a08e5759fa9eeee21eb8ff0316145dd9925 | [
"MIT"
] | 2 | 2016-06-08T12:49:58.000Z | 2019-11-22T08:49:21.000Z | example.py | stephantul/orst | 5f4d7a08e5759fa9eeee21eb8ff0316145dd9925 | [
"MIT"
] | null | null | null | example.py | stephantul/orst | 5f4d7a08e5759fa9eeee21eb8ff0316145dd9925 | [
"MIT"
] | null | null | null | """Example with image."""
import logging
import numpy as np
from PIL import Image
from orst.orst import sort
from orst.reduction import summation
if __name__ == u"__main__":
logging.basicConfig(level=logging.INFO)
path = u"test.jpg"
img = Image.open(path)
o_img = np.array(img.getdata()).reshape((img.size[1], img.size[0], 3)) / 255
def heuristic(x):
return x < 0.9
img = sort(o_img, heuristic, summation, num_rotations=1)
| 21.952381 | 80 | 0.67679 |
ace1592c3867cba2b8a5acefbd52c139eac32fe1 | 11,165 | py | Python | sympy/printing/pretty.py | certik/sympy-oldcore | eb5bd061c309d88cdfb502bfd5df511b30368458 | [
"BSD-3-Clause"
] | 1 | 2016-05-08T17:54:57.000Z | 2016-05-08T17:54:57.000Z | sympy/printing/pretty.py | certik/sympy-oldcore | eb5bd061c309d88cdfb502bfd5df511b30368458 | [
"BSD-3-Clause"
] | null | null | null | sympy/printing/pretty.py | certik/sympy-oldcore | eb5bd061c309d88cdfb502bfd5df511b30368458 | [
"BSD-3-Clause"
] | null | null | null | from sympy.core import Basic
from printer import Printer
from stringpict import *
from pretty_symbology import xstr, hobj, vobj, xobj, xrel, pretty_symbol, pretty_atom, pretty_use_unicode
pprint_use_unicode = pretty_use_unicode
def prettyAtom(s):
return prettyForm(s, binding=prettyForm.ATOM)
class PrettyPrinter(Printer):
"""
A class that prints a prettified expression, one that is not limited
to one dimension like casting the expression to a string would return.
"""
def __init__(self, use_unicode=None):
Printer.__init__(self)
self.emptyPrinter = lambda x : prettyAtom(xstr(x))
def doprint(self, expr):
Printer.doprint.__doc__
return self._print(expr).terminal_string()
def _print_Symbol(self, e):
symb = pretty_symbol(e.name)
return prettyAtom(symb)
def _print_Atom(self, e):
try:
# print atoms like Exp1 or Pi
return prettyAtom(pretty_atom(e.__class__.__name__))
except KeyError:
pass
# Infinity inherits from Rational, so we have to override _print_XXX order
_print_Infinity = _print_Atom
_print_NegativeInfinity = _print_Atom
def _print_Factorial(self, e):
x = e[0]
if (isinstance(x, Basic.Integer) and x.is_nonnegative) or \
isinstance(x, Basic.Symbol):
s = self._print(x)
else:
# XXX parens
s = "(" + self._print(x) + ")"
return s + "!"
def _print_Relational(self, e):
op = prettyForm(' ' + xrel(e.rel_op) + ' ')
l = self._print(e.lhs)
r = self._print(e.rhs)
pform = prettyForm(*stringPict.next(l, op, r))
return pform
def _print_conjugate(self, e):
pform = self._print(e[0])
return prettyForm( *pform.above( hobj('_',pform.width())) )
def _print_abs(self, e):
pform = self._print(e[0])
vbar = vobj('|', pform.height())
vbar = stringPict(vbar, baseline=pform.baseline)
pform = prettyForm(*pform.left (vbar))
pform = prettyForm(*pform.right(vbar))
return pform
def _print_Derivative(self, deriv):
# XXX use U('PARTIAL DIFFERENTIAL') here ?
syms = list(deriv.symbols)
syms.reverse()
x = None
for sym in syms:
if x is None:
x = prettyForm('d' + str(sym))
else:
x = prettyForm(*stringPict.next(x, ' d' + str(sym)))
f = prettyForm(binding=prettyForm.FUNC, *self._print(deriv.expr).parens())
pform = prettyForm('d')
if len(syms) > 1:
pform = pform ** prettyForm(str(len(deriv.symbols)))
pform = prettyForm(*pform.below(stringPict.LINE, x))
pform.baseline = pform.baseline + 1
pform = prettyForm(*stringPict.next(pform, f))
return pform
def _print_Integral(self, integral):
f = integral.function
# Add parentheses if a sum and create pretty form for argument
prettyF = self._print(f)
# XXX generalize parents
if isinstance(f, Basic.Add):
prettyF = prettyForm(*prettyF.parens())
# dx dy dz ...
arg = prettyF
for x,ab in integral.limits:
prettyArg = self._print(x)
# XXX qparens (parens if needs-parens)
if prettyArg.width() > 1:
prettyArg = prettyForm(*prettyArg.parens())
arg = prettyForm(*arg.right(' d', prettyArg))
# \int \int \int ...
firstterm = True
S = None
for x,ab in integral.limits:
# Create bar based on the height of the argument
h = arg.height()
H = h+2
# XXX hack!
ascii_mode = not pretty_use_unicode()
if ascii_mode:
H += 2
vint= vobj('int', H)
# Construct the pretty form with the integral sign and the argument
pform = prettyForm(vint)
#pform.baseline = pform.height()//2 # vcenter
pform.baseline = arg.baseline + (H-h)//2 # covering the whole argument
if ab is not None:
# Create pretty forms for endpoints, if definite integral
prettyA = self._print(ab[0])
prettyB = self._print(ab[1])
if ascii_mode: # XXX hack
# Add spacing so that endpoint can more easily be
# identified with the correct integral sign
spc = max(1, 3 - prettyB.width())
prettyB = prettyForm(*prettyB.left(' ' * spc))
spc = max(1, 4 - prettyA.width())
prettyA = prettyForm(*prettyA.right(' ' * spc))
pform = prettyForm(*pform.above(prettyB))
pform = prettyForm(*pform.below(prettyA))
#if ascii_mode: # XXX hack
# # too much vspace beetween \int and argument
# # but I left it as is
# pform = prettyForm(*pform.right(' '))
if not ascii_mode: # XXX hack
pform = prettyForm(*pform.right(' '))
if firstterm:
S = pform # first term
firstterm = False
else:
S = prettyForm(*S.left(pform))
pform = prettyForm(*arg.left(S))
return pform
def _print_exp(self, e):
base = prettyAtom(pretty_atom('Exp1', 'e'))
return base ** self._print(e[0])
def _print_Function(self, e):
# XXX works only for applied functions
func = e.func
args = e[:]
n = len(args)
func_name = func.__name__
prettyFunc = self._print(Basic.Symbol(func_name));
prettyArgs = self._print(args[0])
for i in xrange(1, n):
pform = self._print(args[i])
prettyArgs = prettyForm(*stringPict.next(prettyArgs, ', '))
prettyArgs = prettyForm(*stringPict.next(prettyArgs, pform))
prettyArgs = prettyForm(*prettyArgs.parens(ifascii_nougly=True))
pform = prettyForm(binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_Add(self, sum):
pforms = []
for x in sum:
# Check for negative "things" so that this information can be enforce upon
# the pretty form so that it can be made of use (such as in a sum).
if isinstance(x, Basic.Mul) and x.as_coeff_terms()[0] < 0:
pform1 = self._print(-x)
if len(pforms) == 0:
if pform1.height() > 1:
pform2 = '- '
else:
pform2 = '-'
else:
pform2 = ' - '
pform = stringPict.next(pform2, pform1)
pforms.append(prettyForm(binding=prettyForm.NEG, *pform))
elif isinstance(x, Basic.Number) and x < 0:
pform1 = self._print(-x)
if len(pforms) == 0:
if pform1.height() > 1:
pform2 = '- '
else:
pform2 = '-'
pform = stringPict.next(pform2, pform1)
else:
pform = stringPict.next(' - ', pform1)
pforms.append(prettyForm(binding=prettyForm.NEG, *pform))
else:
pforms.append(self._print(x))
return prettyForm.__add__(*pforms)
def _print_Mul(self, product):
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
# Gather terms for numerator/denominator
for item in product:
if isinstance(item, Basic.Pow) and item.exp == -1:
b.append(item.base)
elif isinstance(item, Basic.Rational):
if item.p != 1:
a.append( Basic.Rational(item.p) )
if item.q != 1:
b.append( Basic.Rational(item.q) )
else:
a.append(item)
# Convert to pretty forms. Add parens to Add instances if there
# is more than one term in the numer/denom
for i in xrange(0, len(a)):
if isinstance(a[i], Basic.Add) and len(a) > 1:
a[i] = prettyForm(*self._print(a[i]).parens())
else:
a[i] = self._print(a[i])
for i in xrange(0, len(b)):
if isinstance(b[i], Basic.Add) and len(b) > 1:
b[i] = prettyForm(*self._print(b[i]).parens())
else:
b[i] = self._print(b[i])
# Construct a pretty form
if len(b) == 0:
return prettyForm.__mul__(*a)
else:
if len(a) == 0:
a.append( self._print(Basic.One()) )
return prettyForm.__mul__(*a) / prettyForm.__mul__(*b)
def _print_Pow(self, power):
if isinstance(power.exp, Basic.Half):
# If it's a square root
bpretty = self._print(power.base)
H = bpretty.height()
_zZ= xobj('/',1)
s2 = stringPict(xobj('\\',1)+_zZ+' '*(H-1))
for x in xrange(1, H):
s3 = stringPict(' '*(x+1) + _zZ + ' '*(H-(x+1)))
s2 = stringPict(*s2.above(s3))
s2.baseline = bpretty.baseline # vertical: each-to-each
s = prettyForm(hobj('_', 2+ bpretty.width()))
s = prettyForm(*bpretty.above(s))
s = prettyForm(*s.left(s2))
return s
elif power.exp == -1:
# Things like 1/x
return prettyForm("1") / self._print(power.base)
# None of the above special forms, do a standard power
b,e = power.as_base_exp()
return self._print(b)**self._print(e)
def _print_Rational(self, r):
if r.q == 1:
return prettyAtom(str(r.p))
elif abs(r.p) >= 10 and abs(r.q) >= 10:
# If more than one digit in numer and denom, print larger fraction
if r.is_negative:
pform = prettyForm(str(-r.p))/prettyForm(str(r.q))
return prettyForm(binding=prettyForm.NEG, *pform.left('- '))
else:
return prettyForm(str(r.p))/prettyForm(str(r.q))
def pretty(expr, use_unicode=None):
"""
Returns a string containing the prettified form of expr. If use_unicode
is set to True then certain expressions will use unicode characters,
such as the greek letter pi for Basic.Pi instances.
"""
uflag = pretty_use_unicode(use_unicode)
try:
pp = PrettyPrinter()
return pp.doprint(expr)
finally:
pretty_use_unicode(uflag)
def pretty_print(expr, use_unicode=None):
"""
Prints expr in pretty form.
pprint is just a shortcut for this function
"""
print pretty(expr, use_unicode)
pprint = pretty_print
| 33.833333 | 106 | 0.539812 |
ace1593999d4ebf9ea05adc29fdde378f3f31735 | 21,065 | py | Python | Lib/test/test_marshal.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 2,441 | 2020-07-31T06:45:53.000Z | 2022-03-30T15:56:49.000Z | Lib/test/test_marshal.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | Lib/test/test_marshal.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 93 | 2020-08-09T12:00:17.000Z | 2022-03-25T07:57:24.000Z | from test import support
import array
import io
import marshal
import sys
import unittest
import os
import types
try:
import _testcapi
except ImportError:
_testcapi = None
class HelperMixin:
def helper(self, sample, *extra):
new = marshal.loads(marshal.dumps(sample, *extra))
self.assertEqual(sample, new)
try:
with open(support.TESTFN, "wb") as f:
marshal.dump(sample, f, *extra)
with open(support.TESTFN, "rb") as f:
new = marshal.load(f)
self.assertEqual(sample, new)
finally:
support.unlink(support.TESTFN)
class IntTestCase(unittest.TestCase, HelperMixin):
def test_ints(self):
# Test a range of Python ints larger than the machine word size.
n = sys.maxsize ** 2
while n:
for expected in (-n, n):
self.helper(expected)
n = n >> 1
def test_int64(self):
# Simulate int marshaling with TYPE_INT64.
maxint64 = (1 << 63) - 1
minint64 = -maxint64-1
for base in maxint64, minint64, -maxint64, -(minint64 >> 1):
while base:
s = b'I' + int.to_bytes(base, 8, 'little', signed=True)
got = marshal.loads(s)
self.assertEqual(base, got)
if base == -1: # a fixed-point for shifting right 1
base = 0
else:
base >>= 1
got = marshal.loads(b'I\xfe\xdc\xba\x98\x76\x54\x32\x10')
self.assertEqual(got, 0x1032547698badcfe)
got = marshal.loads(b'I\x01\x23\x45\x67\x89\xab\xcd\xef')
self.assertEqual(got, -0x1032547698badcff)
got = marshal.loads(b'I\x08\x19\x2a\x3b\x4c\x5d\x6e\x7f')
self.assertEqual(got, 0x7f6e5d4c3b2a1908)
got = marshal.loads(b'I\xf7\xe6\xd5\xc4\xb3\xa2\x91\x80')
self.assertEqual(got, -0x7f6e5d4c3b2a1909)
def test_bool(self):
for b in (True, False):
self.helper(b)
class FloatTestCase(unittest.TestCase, HelperMixin):
def test_floats(self):
# Test a few floats
small = 1e-25
n = sys.maxsize * 3.7e250
while n > small:
for expected in (-n, n):
self.helper(float(expected))
n /= 123.4567
f = 0.0
s = marshal.dumps(f, 2)
got = marshal.loads(s)
self.assertEqual(f, got)
# and with version <= 1 (floats marshalled differently then)
s = marshal.dumps(f, 1)
got = marshal.loads(s)
self.assertEqual(f, got)
n = sys.maxsize * 3.7e-250
while n < small:
for expected in (-n, n):
f = float(expected)
self.helper(f)
self.helper(f, 1)
n *= 123.4567
class StringTestCase(unittest.TestCase, HelperMixin):
def test_unicode(self):
for s in ["", "Andr\xe8 Previn", "abc", " "*10000]:
self.helper(marshal.loads(marshal.dumps(s)))
def test_string(self):
for s in ["", "Andr\xe8 Previn", "abc", " "*10000]:
self.helper(s)
def test_bytes(self):
for s in [b"", b"Andr\xe8 Previn", b"abc", b" "*10000]:
self.helper(s)
class ExceptionTestCase(unittest.TestCase):
def test_exceptions(self):
new = marshal.loads(marshal.dumps(StopIteration))
self.assertEqual(StopIteration, new)
class CodeTestCase(unittest.TestCase):
def test_code(self):
co = ExceptionTestCase.test_exceptions.__code__
new = marshal.loads(marshal.dumps(co))
self.assertEqual(co, new)
def test_many_codeobjects(self):
# Issue2957: bad recursion count on code objects
count = 5000 # more than MAX_MARSHAL_STACK_DEPTH
codes = (ExceptionTestCase.test_exceptions.__code__,) * count
marshal.loads(marshal.dumps(codes))
def test_different_filenames(self):
co1 = compile("x", "f1", "exec")
co2 = compile("y", "f2", "exec")
co1, co2 = marshal.loads(marshal.dumps((co1, co2)))
self.assertEqual(co1.co_filename, "f1")
self.assertEqual(co2.co_filename, "f2")
@support.cpython_only
def test_same_filename_used(self):
s = """def f(): pass\ndef g(): pass"""
co = compile(s, "myfile", "exec")
co = marshal.loads(marshal.dumps(co))
for obj in co.co_consts:
if isinstance(obj, types.CodeType):
self.assertIs(co.co_filename, obj.co_filename)
class ContainerTestCase(unittest.TestCase, HelperMixin):
d = {'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'alist': ['.zyx.41'],
'atuple': ('.zyx.41',)*10,
'aboolean': False,
'aunicode': "Andr\xe8 Previn"
}
def test_dict(self):
self.helper(self.d)
def test_list(self):
self.helper(list(self.d.items()))
def test_tuple(self):
self.helper(tuple(self.d.keys()))
def test_sets(self):
for constructor in (set, frozenset):
self.helper(constructor(self.d.keys()))
@support.cpython_only
def test_empty_frozenset_singleton(self):
# marshal.loads() must reuse the empty frozenset singleton
obj = frozenset()
obj2 = marshal.loads(marshal.dumps(obj))
self.assertIs(obj2, obj)
class BufferTestCase(unittest.TestCase, HelperMixin):
def test_bytearray(self):
b = bytearray(b"abc")
self.helper(b)
new = marshal.loads(marshal.dumps(b))
self.assertEqual(type(new), bytes)
def test_memoryview(self):
b = memoryview(b"abc")
self.helper(b)
new = marshal.loads(marshal.dumps(b))
self.assertEqual(type(new), bytes)
def test_array(self):
a = array.array('B', b"abc")
new = marshal.loads(marshal.dumps(a))
self.assertEqual(new, b"abc")
class BugsTestCase(unittest.TestCase):
def test_bug_5888452(self):
# Simple-minded check for SF 588452: Debug build crashes
marshal.dumps([128] * 1000)
def test_patch_873224(self):
self.assertRaises(Exception, marshal.loads, b'0')
self.assertRaises(Exception, marshal.loads, b'f')
self.assertRaises(Exception, marshal.loads, marshal.dumps(2**65)[:-1])
def test_version_argument(self):
# Python 2.4.0 crashes for any call to marshal.dumps(x, y)
self.assertEqual(marshal.loads(marshal.dumps(5, 0)), 5)
self.assertEqual(marshal.loads(marshal.dumps(5, 1)), 5)
def test_fuzz(self):
# simple test that it's at least not *totally* trivial to
# crash from bad marshal data
for i in range(256):
c = bytes([i])
try:
marshal.loads(c)
except Exception:
pass
def test_loads_recursion(self):
def run_tests(N, check):
# (((...None...),),)
check(b')\x01' * N + b'N')
check(b'(\x01\x00\x00\x00' * N + b'N')
# [[[...None...]]]
check(b'[\x01\x00\x00\x00' * N + b'N')
# {None: {None: {None: ...None...}}}
check(b'{N' * N + b'N' + b'0' * N)
# frozenset([frozenset([frozenset([...None...])])])
check(b'>\x01\x00\x00\x00' * N + b'N')
# Check that the generated marshal data is valid and marshal.loads()
# works for moderately deep nesting
run_tests(100, marshal.loads)
# Very deeply nested structure shouldn't blow the stack
def check(s):
self.assertRaises(ValueError, marshal.loads, s)
run_tests(2**20, check)
def test_recursion_limit(self):
# Create a deeply nested structure.
head = last = []
# The max stack depth should match the value in Python/marshal.c.
# BUG: https://bugs.python.org/issue33720
# Windows always limits the maximum depth on release and debug builds
#if os.name == 'nt' and hasattr(sys, 'gettotalrefcount'):
if os.name == 'nt':
MAX_MARSHAL_STACK_DEPTH = 1000
else:
MAX_MARSHAL_STACK_DEPTH = 2000
for i in range(MAX_MARSHAL_STACK_DEPTH - 2):
last.append([0])
last = last[-1]
# Verify we don't blow out the stack with dumps/load.
data = marshal.dumps(head)
new_head = marshal.loads(data)
# Don't use == to compare objects, it can exceed the recursion limit.
self.assertEqual(len(new_head), len(head))
self.assertEqual(len(new_head[0]), len(head[0]))
self.assertEqual(len(new_head[-1]), len(head[-1]))
last.append([0])
self.assertRaises(ValueError, marshal.dumps, head)
def test_exact_type_match(self):
# Former bug:
# >>> class Int(int): pass
# >>> type(loads(dumps(Int())))
# <type 'int'>
for typ in (int, float, complex, tuple, list, dict, set, frozenset):
# Note: str subclasses are not tested because they get handled
# by marshal's routines for objects supporting the buffer API.
subtyp = type('subtyp', (typ,), {})
self.assertRaises(ValueError, marshal.dumps, subtyp())
# Issue #1792 introduced a change in how marshal increases the size of its
# internal buffer; this test ensures that the new code is exercised.
def test_large_marshal(self):
size = int(1e6)
testString = 'abc' * size
marshal.dumps(testString)
def test_invalid_longs(self):
# Issue #7019: marshal.loads shouldn't produce unnormalized PyLongs
invalid_string = b'l\x02\x00\x00\x00\x00\x00\x00\x00'
self.assertRaises(ValueError, marshal.loads, invalid_string)
def test_multiple_dumps_and_loads(self):
# Issue 12291: marshal.load() should be callable multiple times
# with interleaved data written by non-marshal code
# Adapted from a patch by Engelbert Gruber.
data = (1, 'abc', b'def', 1.0, (2, 'a', ['b', b'c']))
for interleaved in (b'', b'0123'):
ilen = len(interleaved)
positions = []
try:
with open(support.TESTFN, 'wb') as f:
for d in data:
marshal.dump(d, f)
if ilen:
f.write(interleaved)
positions.append(f.tell())
with open(support.TESTFN, 'rb') as f:
for i, d in enumerate(data):
self.assertEqual(d, marshal.load(f))
if ilen:
f.read(ilen)
self.assertEqual(positions[i], f.tell())
finally:
support.unlink(support.TESTFN)
def test_loads_reject_unicode_strings(self):
# Issue #14177: marshal.loads() should not accept unicode strings
unicode_string = 'T'
self.assertRaises(TypeError, marshal.loads, unicode_string)
def test_bad_reader(self):
class BadReader(io.BytesIO):
def readinto(self, buf):
n = super().readinto(buf)
if n is not None and n > 4:
n += 10**6
return n
for value in (1.0, 1j, b'0123456789', '0123456789'):
self.assertRaises(ValueError, marshal.load,
BadReader(marshal.dumps(value)))
def test_eof(self):
data = marshal.dumps(("hello", "dolly", None))
for i in range(len(data)):
self.assertRaises(EOFError, marshal.loads, data[0: i])
LARGE_SIZE = 2**31
pointer_size = 8 if sys.maxsize > 0xFFFFFFFF else 4
class NullWriter:
def write(self, s):
pass
@unittest.skipIf(LARGE_SIZE > sys.maxsize, "test cannot run on 32-bit systems")
class LargeValuesTestCase(unittest.TestCase):
def check_unmarshallable(self, data):
self.assertRaises(ValueError, marshal.dump, data, NullWriter())
@support.bigmemtest(size=LARGE_SIZE, memuse=2, dry_run=False)
def test_bytes(self, size):
self.check_unmarshallable(b'x' * size)
@support.bigmemtest(size=LARGE_SIZE, memuse=2, dry_run=False)
def test_str(self, size):
self.check_unmarshallable('x' * size)
@support.bigmemtest(size=LARGE_SIZE, memuse=pointer_size + 1, dry_run=False)
def test_tuple(self, size):
self.check_unmarshallable((None,) * size)
@support.bigmemtest(size=LARGE_SIZE, memuse=pointer_size + 1, dry_run=False)
def test_list(self, size):
self.check_unmarshallable([None] * size)
@support.bigmemtest(size=LARGE_SIZE,
memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1),
dry_run=False)
def test_set(self, size):
self.check_unmarshallable(set(range(size)))
@support.bigmemtest(size=LARGE_SIZE,
memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1),
dry_run=False)
def test_frozenset(self, size):
self.check_unmarshallable(frozenset(range(size)))
@support.bigmemtest(size=LARGE_SIZE, memuse=2, dry_run=False)
def test_bytearray(self, size):
self.check_unmarshallable(bytearray(size))
def CollectObjectIDs(ids, obj):
"""Collect object ids seen in a structure"""
if id(obj) in ids:
return
ids.add(id(obj))
if isinstance(obj, (list, tuple, set, frozenset)):
for e in obj:
CollectObjectIDs(ids, e)
elif isinstance(obj, dict):
for k, v in obj.items():
CollectObjectIDs(ids, k)
CollectObjectIDs(ids, v)
return len(ids)
class InstancingTestCase(unittest.TestCase, HelperMixin):
keys = (123, 1.2345, 'abc', (123, 'abc'), frozenset({123, 'abc'}))
def helper3(self, rsample, recursive=False, simple=False):
#we have two instances
sample = (rsample, rsample)
n0 = CollectObjectIDs(set(), sample)
for v in range(3, marshal.version + 1):
s3 = marshal.dumps(sample, v)
n3 = CollectObjectIDs(set(), marshal.loads(s3))
#same number of instances generated
self.assertEqual(n3, n0)
if not recursive:
#can compare with version 2
s2 = marshal.dumps(sample, 2)
n2 = CollectObjectIDs(set(), marshal.loads(s2))
#old format generated more instances
self.assertGreater(n2, n0)
#if complex objects are in there, old format is larger
if not simple:
self.assertGreater(len(s2), len(s3))
else:
self.assertGreaterEqual(len(s2), len(s3))
def testInt(self):
intobj = 123321
self.helper(intobj)
self.helper3(intobj, simple=True)
def testFloat(self):
floatobj = 1.2345
self.helper(floatobj)
self.helper3(floatobj)
def testStr(self):
strobj = "abcde"*3
self.helper(strobj)
self.helper3(strobj)
def testBytes(self):
bytesobj = b"abcde"*3
self.helper(bytesobj)
self.helper3(bytesobj)
def testList(self):
for obj in self.keys:
listobj = [obj, obj]
self.helper(listobj)
self.helper3(listobj)
def testTuple(self):
for obj in self.keys:
tupleobj = (obj, obj)
self.helper(tupleobj)
self.helper3(tupleobj)
def testSet(self):
for obj in self.keys:
setobj = {(obj, 1), (obj, 2)}
self.helper(setobj)
self.helper3(setobj)
def testFrozenSet(self):
for obj in self.keys:
frozensetobj = frozenset({(obj, 1), (obj, 2)})
self.helper(frozensetobj)
self.helper3(frozensetobj)
def testDict(self):
for obj in self.keys:
dictobj = {"hello": obj, "goodbye": obj, obj: "hello"}
self.helper(dictobj)
self.helper3(dictobj)
def testModule(self):
with open(__file__, "rb") as f:
code = f.read()
if __file__.endswith(".py"):
code = compile(code, __file__, "exec")
self.helper(code)
self.helper3(code)
def testRecursion(self):
obj = 1.2345
d = {"hello": obj, "goodbye": obj, obj: "hello"}
d["self"] = d
self.helper3(d, recursive=True)
l = [obj, obj]
l.append(l)
self.helper3(l, recursive=True)
class CompatibilityTestCase(unittest.TestCase):
def _test(self, version):
with open(__file__, "rb") as f:
code = f.read()
if __file__.endswith(".py"):
code = compile(code, __file__, "exec")
data = marshal.dumps(code, version)
marshal.loads(data)
def test0To3(self):
self._test(0)
def test1To3(self):
self._test(1)
def test2To3(self):
self._test(2)
def test3To3(self):
self._test(3)
class InterningTestCase(unittest.TestCase, HelperMixin):
strobj = "this is an interned string"
strobj = sys.intern(strobj)
def testIntern(self):
s = marshal.loads(marshal.dumps(self.strobj))
self.assertEqual(s, self.strobj)
self.assertEqual(id(s), id(self.strobj))
s2 = sys.intern(s)
self.assertEqual(id(s2), id(s))
def testNoIntern(self):
s = marshal.loads(marshal.dumps(self.strobj, 2))
self.assertEqual(s, self.strobj)
self.assertNotEqual(id(s), id(self.strobj))
s2 = sys.intern(s)
self.assertNotEqual(id(s2), id(s))
@support.cpython_only
@unittest.skipUnless(_testcapi, 'requires _testcapi')
class CAPI_TestCase(unittest.TestCase, HelperMixin):
def test_write_long_to_file(self):
for v in range(marshal.version + 1):
_testcapi.pymarshal_write_long_to_file(0x12345678, support.TESTFN, v)
with open(support.TESTFN, 'rb') as f:
data = f.read()
support.unlink(support.TESTFN)
self.assertEqual(data, b'\x78\x56\x34\x12')
def test_write_object_to_file(self):
obj = ('\u20ac', b'abc', 123, 45.6, 7+8j, 'long line '*1000)
for v in range(marshal.version + 1):
_testcapi.pymarshal_write_object_to_file(obj, support.TESTFN, v)
with open(support.TESTFN, 'rb') as f:
data = f.read()
support.unlink(support.TESTFN)
self.assertEqual(marshal.loads(data), obj)
def test_read_short_from_file(self):
with open(support.TESTFN, 'wb') as f:
f.write(b'\x34\x12xxxx')
r, p = _testcapi.pymarshal_read_short_from_file(support.TESTFN)
support.unlink(support.TESTFN)
self.assertEqual(r, 0x1234)
self.assertEqual(p, 2)
with open(support.TESTFN, 'wb') as f:
f.write(b'\x12')
with self.assertRaises(EOFError):
_testcapi.pymarshal_read_short_from_file(support.TESTFN)
support.unlink(support.TESTFN)
def test_read_long_from_file(self):
with open(support.TESTFN, 'wb') as f:
f.write(b'\x78\x56\x34\x12xxxx')
r, p = _testcapi.pymarshal_read_long_from_file(support.TESTFN)
support.unlink(support.TESTFN)
self.assertEqual(r, 0x12345678)
self.assertEqual(p, 4)
with open(support.TESTFN, 'wb') as f:
f.write(b'\x56\x34\x12')
with self.assertRaises(EOFError):
_testcapi.pymarshal_read_long_from_file(support.TESTFN)
support.unlink(support.TESTFN)
def test_read_last_object_from_file(self):
obj = ('\u20ac', b'abc', 123, 45.6, 7+8j)
for v in range(marshal.version + 1):
data = marshal.dumps(obj, v)
with open(support.TESTFN, 'wb') as f:
f.write(data + b'xxxx')
r, p = _testcapi.pymarshal_read_last_object_from_file(support.TESTFN)
support.unlink(support.TESTFN)
self.assertEqual(r, obj)
with open(support.TESTFN, 'wb') as f:
f.write(data[:1])
with self.assertRaises(EOFError):
_testcapi.pymarshal_read_last_object_from_file(support.TESTFN)
support.unlink(support.TESTFN)
def test_read_object_from_file(self):
obj = ('\u20ac', b'abc', 123, 45.6, 7+8j)
for v in range(marshal.version + 1):
data = marshal.dumps(obj, v)
with open(support.TESTFN, 'wb') as f:
f.write(data + b'xxxx')
r, p = _testcapi.pymarshal_read_object_from_file(support.TESTFN)
support.unlink(support.TESTFN)
self.assertEqual(r, obj)
self.assertEqual(p, len(data))
with open(support.TESTFN, 'wb') as f:
f.write(data[:1])
with self.assertRaises(EOFError):
_testcapi.pymarshal_read_object_from_file(support.TESTFN)
support.unlink(support.TESTFN)
if __name__ == "__main__":
unittest.main()
| 34.818182 | 81 | 0.585189 |
ace15a8ba394593d2ec12c0747ca3e9984cc3ad4 | 894 | py | Python | tests/test_examples.py | devanshshukla99/pytest-remote-response | e63a7e49385fe32b857a437576de759a1bbc14b7 | [
"BSD-3-Clause"
] | null | null | null | tests/test_examples.py | devanshshukla99/pytest-remote-response | e63a7e49385fe32b857a437576de759a1bbc14b7 | [
"BSD-3-Clause"
] | 17 | 2021-06-23T17:50:25.000Z | 2021-07-02T10:07:59.000Z | tests/test_examples.py | devanshshukla99/pytest-response | e63a7e49385fe32b857a437576de759a1bbc14b7 | [
"BSD-3-Clause"
] | null | null | null | def test_examples(testdir):
testdir.makepyfile(
"""
import pytest
from pytest_response import response
from pytest_response.exceptions import RemoteBlockedError
def test_block_urllib():
with pytest.raises(RemoteBlockedError):
from examples import block_urllib # noqa
return
def test_capture_requests():
from examples import capture_requests # noqa
assert response.db.index() == ["https://www.python.org"]
return
def test_response_urllib3():
from examples import response_urllib3 # noqa
return
def test_insert_get_database():
from examples import insert_get_database # noqa
return
"""
)
result = testdir.runpytest("-q", "-p", "no:warnings")
result.assert_outcomes(passed=4)
| 29.8 | 68 | 0.606264 |
ace15aa3111fbf5fd2d30fd39ef89b9e0abbe9dc | 1,427 | py | Python | features/enable_debugging/python/debugging.py | jingtaoh/USD-Cookbook | 544a149407146f9a3e13c53348818579c679168b | [
"MIT"
] | 332 | 2019-06-11T01:26:05.000Z | 2022-03-29T10:09:46.000Z | features/enable_debugging/python/debugging.py | jingtaoh/USD-Cookbook | 544a149407146f9a3e13c53348818579c679168b | [
"MIT"
] | 5 | 2019-07-30T15:10:51.000Z | 2021-03-14T18:55:18.000Z | features/enable_debugging/python/debugging.py | jingtaoh/USD-Cookbook | 544a149407146f9a3e13c53348818579c679168b | [
"MIT"
] | 63 | 2019-06-24T16:31:19.000Z | 2022-02-13T01:09:56.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test out how to enable debug messages in USD."""
# IMPORT STANDARD LIBRARIES
import sys
# IMPORT THIRD-PARTY LIBRARIES
from pxr import Tf, Usd
# XXX : You can optionally redirec debug output to a custom file
Tf.Debug.SetOutputFile(sys.__stdout__)
def main():
"""Run the main execution of the current script."""
stage = Usd.Stage.CreateInMemory()
# XXX : The actual symbols are defined in C++ across many files.
# You can query them using `Tf.Debug.GetDebugSymbolNames()` or by
# searching for files that call the `TF_DEBUG_CODES` macro in C++.
# (Usually this is in files named "debugCodes.h").
#
symbols = Tf.Debug.GetDebugSymbolNames()
# XXX : Check if debug symbols are enabled
# (on my machine, they're all False by default)
#
for symbol in symbols:
print(Tf.Debug.IsDebugSymbolNameEnabled(symbol))
# XXX : Here's a full description of everything
print("Descriptions start")
print(Tf.Debug.GetDebugSymbolDescriptions())
print("Descriptions end")
# XXX : Enable change processing so we can see something happening
# You can also use glob matching. Like "USD_*" to enable many flags
# at once.
#
Tf.Debug.SetDebugSymbolsByName("USD_CHANGES", True)
stage.DefinePrim("/SomePrim") # This line will print multiple messages to stdout
if __name__ == "__main__":
main()
| 29.729167 | 85 | 0.69096 |
ace15b1f48740b7db321c17c7fc77ee5470f6899 | 626 | py | Python | pyfta/containers.py | gopar/pyfta | 25c6fe669054b5685513035890b105ac56743751 | [
"MIT"
] | 4 | 2017-12-19T16:43:00.000Z | 2018-07-22T20:40:36.000Z | pyfta/containers.py | gopar/pyfta | 25c6fe669054b5685513035890b105ac56743751 | [
"MIT"
] | null | null | null | pyfta/containers.py | gopar/pyfta | 25c6fe669054b5685513035890b105ac56743751 | [
"MIT"
] | null | null | null | import random
from typing import Any
from typing import List
from .randgen import RandomBase
from .randgen import RandomList
from .randgen import RandomGenerator
class ContainerBase:
def __init__(self, data_type: Any, how_many_to_create: int, allowed_types: List=None) -> None:
self.data_type = data_type
self.how_many_to_create = how_many_to_create
self.allowed_types = allowed_types
def resolve(self) -> Any:
data_types = {
list: RandomList
}
container = data_types[self.data_type]
return container(self.how_many_to_create, self.allowed_types)
| 28.454545 | 98 | 0.714058 |
ace15b8c538d86ddd920d80f13ccb0e3d788db84 | 3,182 | py | Python | squeezeDet/src/nets/vgg16_convDet.py | Walter1218/Self_Driving_Car_ND | 526a9583a2bc616cb19cdfc7921b5e1c0f9711bd | [
"MIT"
] | 2 | 2017-05-25T01:26:41.000Z | 2019-08-16T13:38:57.000Z | squeezeDet/src/nets/vgg16_convDet.py | Walter1218/Self_Driving_Car_ND | 526a9583a2bc616cb19cdfc7921b5e1c0f9711bd | [
"MIT"
] | null | null | null | squeezeDet/src/nets/vgg16_convDet.py | Walter1218/Self_Driving_Car_ND | 526a9583a2bc616cb19cdfc7921b5e1c0f9711bd | [
"MIT"
] | 1 | 2019-03-15T02:19:49.000Z | 2019-03-15T02:19:49.000Z | # Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016
"""VGG16+ConvDet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
from nn_skeleton import ModelSkeleton
class VGG16ConvDet(ModelSkeleton):
def __init__(self, mc, gpu_id):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph()
self._add_interpretation_graph()
self._add_loss_graph()
self._add_train_graph()
self._add_viz_graph()
def _add_forward_graph(self):
"""Build the VGG-16 model."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
with tf.variable_scope('conv1') as scope:
conv1_1 = self._conv_layer(
'conv1_1', self.image_input, filters=64, size=3, stride=1, freeze=True)
conv1_2 = self._conv_layer(
'conv1_2', conv1_1, filters=64, size=3, stride=1, freeze=True)
pool1 = self._pooling_layer(
'pool1', conv1_2, size=2, stride=2)
with tf.variable_scope('conv2') as scope:
conv2_1 = self._conv_layer(
'conv2_1', pool1, filters=128, size=3, stride=1, freeze=True)
conv2_2 = self._conv_layer(
'conv2_2', conv2_1, filters=128, size=3, stride=1, freeze=True)
pool2 = self._pooling_layer(
'pool2', conv2_2, size=2, stride=2)
with tf.variable_scope('conv3') as scope:
conv3_1 = self._conv_layer(
'conv3_1', pool2, filters=256, size=3, stride=1)
conv3_2 = self._conv_layer(
'conv3_2', conv3_1, filters=256, size=3, stride=1)
conv3_3 = self._conv_layer(
'conv3_3', conv3_2, filters=256, size=3, stride=1)
pool3 = self._pooling_layer(
'pool3', conv3_3, size=2, stride=2)
with tf.variable_scope('conv4') as scope:
conv4_1 = self._conv_layer(
'conv4_1', pool3, filters=512, size=3, stride=1)
conv4_2 = self._conv_layer(
'conv4_2', conv4_1, filters=512, size=3, stride=1)
conv4_3 = self._conv_layer(
'conv4_3', conv4_2, filters=512, size=3, stride=1)
pool4 = self._pooling_layer(
'pool4', conv4_3, size=2, stride=2)
with tf.variable_scope('conv5') as scope:
conv5_1 = self._conv_layer(
'conv5_1', pool4, filters=512, size=3, stride=1)
conv5_2 = self._conv_layer(
'conv5_2', conv5_1, filters=512, size=3, stride=1)
conv5_3 = self._conv_layer(
'conv5_3', conv5_2, filters=512, size=3, stride=1)
dropout5 = tf.nn.dropout(conv5_3, self.keep_prob, name='drop6')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer(
'conv6', dropout5, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
| 34.967033 | 81 | 0.657448 |
ace15bea585c92700724329498bcfd4aeeb48036 | 1,508 | py | Python | src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py | samotarnik/grpc | 3278bdceda8030d5aa130f12765e5f07263c860d | [
"Apache-2.0"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py | samotarnik/grpc | 3278bdceda8030d5aa130f12765e5f07263c860d | [
"Apache-2.0"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py | samotarnik/grpc | 3278bdceda8030d5aa130f12765e5f07263c860d | [
"Apache-2.0"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Insecure client-server interoperability as a unit test."""
import unittest
import grpc
from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import _intraop_test_case
from tests.interop import methods
from tests.interop import server
from tests.unit import test_common
class InsecureIntraopTest(_intraop_test_case.IntraopTestCase,
unittest.TestCase):
def setUp(self):
self.server = test_common.test_server()
test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
self.server)
port = self.server.add_insecure_port('[::]:0')
self.server.start()
self.stub = test_pb2_grpc.TestServiceStub(
grpc.insecure_channel('localhost:{}'.format(port)))
def tearDown(self):
self.server.stop(None)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 33.511111 | 78 | 0.706897 |
ace15c351694f0a36f85a8f74e8a8ddc6e89a12c | 261 | py | Python | catalog/bindings/csw/temporal_datum.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/temporal_datum.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/temporal_datum.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from bindings.csw.temporal_datum_type import TemporalDatumType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class TemporalDatum(TemporalDatumType):
class Meta:
namespace = "http://www.opengis.net/gml"
| 23.727273 | 62 | 0.773946 |
ace15c5305e75fac3a82971f4d71b92bcb37bafc | 1,606 | py | Python | src/test/python/apache/thermos/observer/test_task_observer.py | smarth-madan/incubator-aurora | f0d4fbdfe8402d433b1047b483877dca05eb1186 | [
"Apache-2.0"
] | null | null | null | src/test/python/apache/thermos/observer/test_task_observer.py | smarth-madan/incubator-aurora | f0d4fbdfe8402d433b1047b483877dca05eb1186 | [
"Apache-2.0"
] | null | null | null | src/test/python/apache/thermos/observer/test_task_observer.py | smarth-madan/incubator-aurora | f0d4fbdfe8402d433b1047b483877dca05eb1186 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import create_autospec, patch
from twitter.common.quantity import Amount, Time
from apache.thermos.observer.detector import ObserverTaskDetector
from apache.thermos.observer.task_observer import TaskObserver
class TaskObserverTest(unittest.TestCase):
def test_run_loop(self):
"""Test observer run loop."""
mock_task_detector = create_autospec(spec=ObserverTaskDetector)
with patch(
"apache.thermos.observer.task_observer.ObserverTaskDetector",
return_value=mock_task_detector) as mock_detector:
with patch('threading._Event.wait') as mock_wait:
run_count = 3
interval = 15
observer = TaskObserver(mock_detector, interval=Amount(interval, Time.SECONDS))
observer.start()
while len(mock_wait.mock_calls) < run_count:
pass
observer.stop()
assert len(mock_task_detector.mock_calls) >= run_count
assert len(mock_wait.mock_calls) >= run_count
args = mock_wait.mock_calls[1][1]
assert interval == args[0]
| 34.913043 | 87 | 0.733499 |
ace15c54d8aa39f16e9d0ede7a3427ca60b93910 | 2,381 | py | Python | 18b-async-await/charfinder/http_charfinder.py | matteoshen/example-code | b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3 | [
"MIT"
] | 199 | 2016-06-01T01:48:57.000Z | 2022-03-10T02:29:37.000Z | 18b-async-await/charfinder/http_charfinder.py | matteoshen/example-code | b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3 | [
"MIT"
] | 3 | 2016-05-31T23:10:26.000Z | 2017-01-22T17:38:31.000Z | 18b-async-await/charfinder/http_charfinder.py | matteoshen/example-code | b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3 | [
"MIT"
] | 91 | 2016-06-02T12:26:31.000Z | 2022-01-11T06:10:49.000Z | #!/usr/bin/env python3
import sys
import asyncio
from aiohttp import web
from charfinder import UnicodeNameIndex
TEMPLATE_NAME = 'http_charfinder.html'
CONTENT_TYPE = 'text/html; charset=UTF-8'
SAMPLE_WORDS = ('bismillah chess cat circled Malayalam digit'
' Roman face Ethiopic black mark symbol dot'
' operator Braille hexagram').split()
ROW_TPL = '<tr><td>{code_str}</td><th>{char}</th><td>{name}</td></tr>'
LINK_TPL = '<a href="/?query={0}" title="find "{0}"">{0}</a>'
LINKS_HTML = ', '.join(LINK_TPL.format(word) for word in
sorted(SAMPLE_WORDS, key=str.upper))
index = UnicodeNameIndex()
with open(TEMPLATE_NAME) as tpl:
template = tpl.read()
template = template.replace('{links}', LINKS_HTML)
# BEGIN HTTP_CHARFINDER_HOME
def home(request): # <1>
query = request.GET.get('query', '').strip() # <2>
print('Query: {!r}'.format(query)) # <3>
if query: # <4>
descriptions = list(index.find_descriptions(query))
res = '\n'.join(ROW_TPL.format(**vars(descr))
for descr in descriptions)
msg = index.status(query, len(descriptions))
else:
descriptions = []
res = ''
msg = 'Enter words describing characters.'
html = template.format(query=query, result=res, # <5>
message=msg)
print('Sending {} results'.format(len(descriptions))) # <6>
return web.Response(content_type=CONTENT_TYPE, text=html) # <7>
# END HTTP_CHARFINDER_HOME
# BEGIN HTTP_CHARFINDER_SETUP
async def init(loop, address, port): # <1>
app = web.Application(loop=loop) # <2>
app.router.add_route('GET', '/', home) # <3>
handler = app.make_handler() # <4>
server = await loop.create_server(handler,
address, port) # <5>
return server.sockets[0].getsockname() # <6>
def main(address="127.0.0.1", port=8888):
port = int(port)
loop = asyncio.get_event_loop()
host = loop.run_until_complete(init(loop, address, port)) # <7>
print('Serving on {}. Hit CTRL-C to stop.'.format(host))
try:
loop.run_forever() # <8>
except KeyboardInterrupt: # CTRL+C pressed
pass
print('Server shutting down.')
loop.close() # <9>
if __name__ == '__main__':
main(*sys.argv[1:])
# END HTTP_CHARFINDER_SETUP
| 33.069444 | 71 | 0.614028 |
ace15db7589e4bdec1b413d156e5d6f285826c8b | 3,549 | py | Python | xbox.py | Findarato/personal-influxdb | 1d41f12b1e4a4f46a8cb7552339e09838676b67f | [
"Apache-2.0"
] | 3 | 2019-12-12T17:29:14.000Z | 2019-12-12T21:00:05.000Z | xbox.py | Findarato/personal-influxdb | 1d41f12b1e4a4f46a8cb7552339e09838676b67f | [
"Apache-2.0"
] | null | null | null | xbox.py | Findarato/personal-influxdb | 1d41f12b1e4a4f46a8cb7552339e09838676b67f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# Copyright 2022 Sam Steele
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests, sys, re
from datetime import datetime, date
from bs4 import BeautifulSoup
from config import *
if not TRUE_ACHIEVEMENTS_ID:
logging.error("TRUE_ACHIEVEMENTS_ID not set in config.py")
sys.exit(1)
points = []
connect(XBOX_DATABASE)
try:
response = requests.get(f'https://www.trueachievements.com/gamer/{XBOX_GAMERTAG}/achievements?executeformfunction&function=AjaxList¶ms=oAchievementList%7C%26ddlPlatformIDs%3D%26ddlGenreIDs%3D%26ddlDLCFilter%3DInclude%20DLC%26ddlFlagIDs%3D%26ddlGamerScore%3D-1%26AchievementFilter%3DrdoAchievementsIHave%26chkExcludeDoneWith%3DTrue%26oAchievementList_Order%3DWonTimeStamp%26oAchievementList_Page%3D1%26oAchievementList_ItemsPerPage%3D100%26oAchievementList_ResponsiveMode%3DTrue%26oAchievementList_TimeZone%3DEastern%20Standard%20Time%26oAchievementList_ShowAll%3DFalse%26txtHideUnobtainableAchievement%3DFalse%26txtGamerID%3D{TRUE_ACHIEVEMENTS_ID}%26txtEasy%3DFalse%26txtShowDescriptions%3DTrue%26txtAlwaysShowUnlockedAchievementDescriptions%3DFalse%26txtYearWon%3D0%26txtMinRatio%3D0%26txtMaxRatio%3D0%26txtMaxTrueAchievement%3D0%26txtLastCharAlpha%3DFalse%26txtFirstCharAlpha%3DFalse%26txtOnlySecret%3DFalse%26txtChallenges%3DFalse%26txtContestID%3D0%26txtUseStringSQL%3DTrue%26txtOddGamerScore%3DFalse%26txtAchievementNameCharacters%3D0')
response.raise_for_status()
except requests.exceptions.HTTPError as err:
logging.error("HTTP request failed: %s", err)
sys.exit(1)
html = BeautifulSoup(response.text, 'html.parser')
table = html.find('table', id='oAchievementList')
for row in table.find_all('tr'):
if row['class'][0] == 'odd' or row['class'][0] == 'even':
if row.find('td', class_='date').string != 'Offline':
date = datetime.strptime(row.find('td', class_='date').string, '%d %b %y')
game = row.find('td', class_='gamethumb').find('img')['alt']
icon = 'https://www.trueachievements.com' + row.find('td', class_='achthumb').find('img')['src'].replace('/thumbs/', '/')
achievement = row.find('td', class_='wideachievement').find('a').string
description = list(row.find('td', class_='wideachievement').find('span').stripped_strings)[0]
apiname = re.search('(?<=/)\w+', row.find('td', class_='achthumb').find('a')['href'])[0]
points.append({
"measurement": "achievement",
"time": date.isoformat(),
"tags": {
"player_id": TRUE_ACHIEVEMENTS_ID,
"platform": "Xbox Live",
"player_name": XBOX_GAMERTAG,
"title": game,
"apiname": apiname
},
"fields": {
"name": achievement,
"description": description,
"icon": icon
}
})
write_points(points)
| 53.772727 | 1,049 | 0.682164 |
ace15e3aa4394fdb3c52ea320dacb63f447c5cd4 | 17,215 | py | Python | airflow/providers/google/cloud/sensors/gcs.py | wegamekinglc/incubator-airflow | fc174635b0729253a86e8c877d6d8551a815a2cb | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-07-30T17:14:05.000Z | 2021-08-03T13:51:25.000Z | airflow/providers/google/cloud/sensors/gcs.py | wegamekinglc/incubator-airflow | fc174635b0729253a86e8c877d6d8551a815a2cb | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/providers/google/cloud/sensors/gcs.py | wegamekinglc/incubator-airflow | fc174635b0729253a86e8c877d6d8551a815a2cb | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-03-03T01:44:08.000Z | 2021-03-03T01:44:08.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage sensors."""
import os
from datetime import datetime
from typing import Callable, List, Optional, Sequence, Set, Union
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.sensors.base import BaseSensorOperator, poke_mode_only
from airflow.utils.decorators import apply_defaults
class GCSObjectExistenceSensor(BaseSensorOperator):
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket: The Google Cloud Storage bucket where the object is.
:type bucket: str
:param object: The name of the object to check in the Google cloud
storage bucket.
:type object: str
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'object',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
*,
bucket: str,
object: str, # pylint: disable=redefined-builtin
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object = object
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def poke(self, context: dict) -> bool:
self.log.info('Sensor checks existence of : %s, %s', self.bucket, self.object)
hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return hook.exists(self.bucket, self.object)
def ts_function(context):
"""
Default callback for the GoogleCloudStorageObjectUpdatedSensor. The default
behaviour is check for the object being updated after execution_date +
schedule_interval.
"""
return context['dag'].following_schedule(context['execution_date'])
class GCSObjectUpdateSensor(BaseSensorOperator):
"""
Checks if an object is updated in Google Cloud Storage.
:param bucket: The Google Cloud Storage bucket where the object is.
:type bucket: str
:param object: The name of the object to download in the Google cloud
storage bucket.
:type object: str
:param ts_func: Callback for defining the update condition. The default callback
returns execution_date + schedule_interval. The callback takes the context
as parameter.
:type ts_func: function
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'object',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket: str,
object: str, # pylint: disable=redefined-builtin
ts_func: Callable = ts_function,
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object = object
self.ts_func = ts_func
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def poke(self, context: dict) -> bool:
self.log.info('Sensor checks existence of : %s, %s', self.bucket, self.object)
hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return hook.is_updated_after(self.bucket, self.object, self.ts_func(context))
class GCSObjectsWtihPrefixExistenceSensor(BaseSensorOperator):
"""
Checks for the existence of GCS objects at a given prefix, passing matches via XCom.
When files matching the given prefix are found, the poke method's criteria will be
fulfilled and the matching objects will be returned from the operator and passed
through XCom for downstream tasks.
:param bucket: The Google Cloud Storage bucket where the object is.
:type bucket: str
:param prefix: The name of the prefix to check in the Google cloud
storage bucket.
:type prefix: str
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'prefix',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket: str,
prefix: str,
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self._matches: List[str] = []
self.impersonation_chain = impersonation_chain
def poke(self, context: dict) -> bool:
self.log.info('Sensor checks existence of objects: %s, %s', self.bucket, self.prefix)
hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self._matches = hook.list(self.bucket, prefix=self.prefix)
return bool(self._matches)
def execute(self, context: dict) -> List[str]:
"""Overridden to allow matches to be passed"""
super().execute(context)
return self._matches
def get_time():
"""
This is just a wrapper of datetime.datetime.now to simplify mocking in the
unittests.
"""
return datetime.now()
@poke_mode_only
class GCSUploadSessionCompleteSensor(BaseSensorOperator):
"""
Checks for changes in the number of objects at prefix in Google Cloud Storage
bucket and returns True if the inactivity period has passed with no
increase in the number of objects. Note, this sensor will no behave correctly
in reschedule mode, as the state of the listed objects in the GCS bucket will
be lost between rescheduled invocations.
:param bucket: The Google Cloud Storage bucket where the objects are.
expected.
:type bucket: str
:param prefix: The name of the prefix to check in the Google cloud
storage bucket.
:param inactivity_period: The total seconds of inactivity to designate
an upload session is over. Note, this mechanism is not real time and
this operator may not return until a poke_interval after this period
has passed with no additional objects sensed.
:type inactivity_period: float
:param min_objects: The minimum number of objects needed for upload session
to be considered valid.
:type min_objects: int
:param previous_objects: The set of object ids found during the last poke.
:type previous_objects: set[str]
:param allow_delete: Should this sensor consider objects being deleted
between pokes valid behavior. If true a warning message will be logged
when this happens. If false an error will be raised.
:type allow_delete: bool
:param google_cloud_conn_id: The connection ID to use when connecting
to Google Cloud Storage.
:type google_cloud_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'prefix',
'impersonation_chain',
)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket: str,
prefix: str,
inactivity_period: float = 60 * 60,
min_objects: int = 1,
previous_objects: Optional[Set[str]] = None,
allow_delete: bool = True,
google_cloud_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
if inactivity_period < 0:
raise ValueError("inactivity_period must be non-negative")
self.inactivity_period = inactivity_period
self.min_objects = min_objects
self.previous_objects = previous_objects if previous_objects else set()
self.inactivity_seconds = 0
self.allow_delete = allow_delete
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
self.last_activity_time = None
self.impersonation_chain = impersonation_chain
self.hook: Optional[GCSHook] = None
def _get_gcs_hook(self) -> Optional[GCSHook]:
if not self.hook:
self.hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return self.hook
def is_bucket_updated(self, current_objects: Set[str]) -> bool:
"""
Checks whether new objects have been uploaded and the inactivity_period
has passed and updates the state of the sensor accordingly.
:param current_objects: set of object ids in bucket during last poke.
:type current_objects: set[str]
"""
current_num_objects = len(current_objects)
if current_objects > self.previous_objects:
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next poke.
self.log.info(
"New objects found at %s resetting last_activity_time.",
os.path.join(self.bucket, self.prefix),
)
self.log.debug("New objects: %s", "\n".join(current_objects - self.previous_objects))
self.last_activity_time = get_time()
self.inactivity_seconds = 0
self.previous_objects = current_objects
return False
if self.previous_objects - current_objects:
# During the last poke interval objects were deleted.
if self.allow_delete:
self.previous_objects = current_objects
self.last_activity_time = get_time()
self.log.warning(
"""
Objects were deleted during the last
poke interval. Updating the file counter and
resetting last_activity_time.
%s
""",
self.previous_objects - current_objects,
)
return False
raise AirflowException(
"""
Illegal behavior: objects were deleted in {} between pokes.
""".format(
os.path.join(self.bucket, self.prefix)
)
)
if self.last_activity_time:
self.inactivity_seconds = (get_time() - self.last_activity_time).total_seconds()
else:
# Handles the first poke where last inactivity time is None.
self.last_activity_time = get_time()
self.inactivity_seconds = 0
if self.inactivity_seconds >= self.inactivity_period:
path = os.path.join(self.bucket, self.prefix)
if current_num_objects >= self.min_objects:
self.log.info(
"""SUCCESS:
Sensor found %s objects at %s.
Waited at least %s seconds, with no new objects dropped.
""",
current_num_objects,
path,
self.inactivity_period,
)
return True
self.log.error("FAILURE: Inactivity Period passed, not enough objects found in %s", path)
return False
return False
def poke(self, context: dict) -> bool:
return self.is_bucket_updated(
set(self._get_gcs_hook().list(self.bucket, prefix=self.prefix)) # type: ignore[union-attr]
)
| 40.890736 | 103 | 0.668138 |
ace15eb0646789532586d2cac1b3dc7f019125e0 | 8,579 | py | Python | nuitka/codegen/templates/CodeTemplatesFrames.py | RESP3CT88/Nuitka | 0fcc25d9f00c4fc78c79a863c4b7987f573962e1 | [
"Apache-2.0"
] | 5,421 | 2018-09-24T08:04:06.000Z | 2022-03-31T20:02:37.000Z | venv/Lib/site-packages/nuitka/codegen/templates/CodeTemplatesFrames.py | matthijsvanvliet/raytracing-python | 73d692b47330ab94eedde579a51063e3a907e92b | [
"MIT"
] | 1,348 | 2018-09-22T13:41:00.000Z | 2022-03-31T22:33:40.000Z | venv/Lib/site-packages/nuitka/codegen/templates/CodeTemplatesFrames.py | matthijsvanvliet/raytracing-python | 73d692b47330ab94eedde579a51063e3a907e92b | [
"MIT"
] | 396 | 2018-09-28T15:37:03.000Z | 2022-03-29T10:52:09.000Z | # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Code templates for frames of all kinds.
"""
# Frame in a function
template_frame_guard_full_block = """\
if (isFrameUnusable(%(frame_cache_identifier)s)) {
Py_XDECREF(%(frame_cache_identifier)s);
#if _DEBUG_REFCOUNTS
if (%(frame_cache_identifier)s == NULL) {
count_active_frame_cache_instances += 1;
} else {
count_released_frame_cache_instances += 1;
}
count_allocated_frame_cache_instances += 1;
#endif
%(frame_cache_identifier)s = MAKE_FUNCTION_FRAME(%(code_identifier)s, %(module_identifier)s, %(locals_size)s);
#if _DEBUG_REFCOUNTS
} else {
count_hit_frame_cache_instances += 1;
#endif
}
assert(%(frame_cache_identifier)s->m_type_description == NULL);
%(frame_identifier)s = %(frame_cache_identifier)s;
// Push the new frame as the currently active one.
pushFrameStack(%(frame_identifier)s);
// Mark the frame object as in use, ref count 1 will be up for reuse.
assert(Py_REFCNT(%(frame_identifier)s) == 2); // Frame stack
// Framed code:
%(codes)s
#if %(needs_preserve)d
RESTORE_FRAME_EXCEPTION(%(frame_identifier)s);
#endif
// Put the previous frame back on top.
popFrameStack();
goto %(no_exception_exit)s;
"""
template_frame_guard_full_return_handler = """\
%(frame_return_exit)s:;
#if %(needs_preserve)d
RESTORE_FRAME_EXCEPTION(%(frame_identifier)s);
#endif
// Put the previous frame back on top.
popFrameStack();
goto %(return_exit)s;
"""
template_frame_attach_locals = """\
Nuitka_Frame_AttachLocals(
%(frame_identifier)s,
%(type_description)s%(frame_variable_refs)s
);
"""
template_frame_guard_full_exception_handler = """\
%(frame_exception_exit)s:;
#if %(needs_preserve)d
RESTORE_FRAME_EXCEPTION(%(frame_identifier)s);
#endif
if (%(exception_tb)s == NULL) {
%(exception_tb)s = %(tb_making)s;
} else if (%(exception_tb)s->tb_frame != &%(frame_identifier)s->m_frame) {
%(exception_tb)s = ADD_TRACEBACK(%(exception_tb)s, %(frame_identifier)s, %(exception_lineno)s);
}
// Attaches locals to frame if any.
%(attach_locals)s
// Release cached frame if used for exception.
if (%(frame_identifier)s == %(frame_cache_identifier)s) {
#if _DEBUG_REFCOUNTS
count_active_frame_cache_instances -= 1;
count_released_frame_cache_instances += 1;
#endif
Py_DECREF(%(frame_cache_identifier)s);
%(frame_cache_identifier)s = NULL;
}
assertFrameObject(%(frame_identifier)s);
// Put the previous frame back on top.
popFrameStack();
// Return the error.
goto %(parent_exception_exit)s;
"""
# Frame for a module. TODO: Use it for functions called only once.
# TODO: The once guard need not take a reference count in its frame class.
template_frame_guard_once_block = """\
// Frame without reuse.
%(frame_identifier)s = MAKE_MODULE_FRAME(%(code_identifier)s, %(module_identifier)s);
// Push the new frame as the currently active one, and we should be exclusively
// owning it.
pushFrameStack(%(frame_identifier)s);
assert(Py_REFCNT(%(frame_identifier)s) == 2);
// Framed code:
%(codes)s
// Restore frame exception if necessary.
#if %(needs_preserve)d
RESTORE_FRAME_EXCEPTION(%(frame_identifier)s);
#endif
popFrameStack();
assertFrameObject(%(frame_identifier)s);
goto %(no_exception_exit)s;
"""
template_frame_guard_once_exception_handler = """\
%(frame_exception_exit)s:;
#if %(needs_preserve)d
RESTORE_FRAME_EXCEPTION(%(frame_identifier)s);
#endif
if (%(exception_tb)s == NULL) {
%(exception_tb)s = %(tb_making)s;
} else if (exception_tb->tb_frame != &%(frame_identifier)s->m_frame) {
%(exception_tb)s = ADD_TRACEBACK(%(exception_tb)s, %(frame_identifier)s, %(exception_lineno)s);
}
// Put the previous frame back on top.
popFrameStack();
// Return the error.
goto %(parent_exception_exit)s;
"""
# Frame in a generator, coroutine or asyncgen.
template_frame_guard_generator = """\
if (isFrameUnusable(%(frame_cache_identifier)s)) {
Py_XDECREF(%(frame_cache_identifier)s);
#if _DEBUG_REFCOUNTS
if (%(frame_cache_identifier)s == NULL) {
count_active_frame_cache_instances += 1;
} else {
count_released_frame_cache_instances += 1;
}
count_allocated_frame_cache_instances += 1;
#endif
%(frame_cache_identifier)s = MAKE_FUNCTION_FRAME(%(code_identifier)s, %(module_identifier)s, %(locals_size)s);
#if _DEBUG_REFCOUNTS
} else {
count_hit_frame_cache_instances += 1;
#endif
}
%(context_identifier)s->m_frame = %(frame_cache_identifier)s;
// Mark the frame object as in use, ref count 1 will be up for reuse.
Py_INCREF(%(context_identifier)s->m_frame);
assert(Py_REFCNT(%(context_identifier)s->m_frame) == 2); // Frame stack
#if PYTHON_VERSION >= 0x340
%(context_identifier)s->m_frame->m_frame.f_gen = (PyObject *)%(context_identifier)s;
#endif
assert(%(context_identifier)s->m_frame->m_frame.f_back == NULL);
Py_CLEAR(%(context_identifier)s->m_frame->m_frame.f_back);
%(context_identifier)s->m_frame->m_frame.f_back = PyThreadState_GET()->frame;
Py_INCREF(%(context_identifier)s->m_frame->m_frame.f_back);
PyThreadState_GET()->frame = &%(context_identifier)s->m_frame->m_frame;
Py_INCREF(%(context_identifier)s->m_frame);
Nuitka_Frame_MarkAsExecuting(%(context_identifier)s->m_frame);
#if PYTHON_VERSION >= 0x300
// Accept currently existing exception as the one to publish again when we
// yield or yield from.
{
PyThreadState *thread_state = PyThreadState_GET();
EXC_TYPE_F(%(context_identifier)s) = EXC_TYPE(thread_state);
if (EXC_TYPE_F(%(context_identifier)s) == Py_None) EXC_TYPE_F(%(context_identifier)s) = NULL;
Py_XINCREF(EXC_TYPE_F(%(context_identifier)s));
EXC_VALUE_F(%(context_identifier)s) = EXC_VALUE(thread_state);
Py_XINCREF(EXC_VALUE_F(%(context_identifier)s));
EXC_TRACEBACK_F(%(context_identifier)s) = EXC_TRACEBACK(thread_state);
Py_XINCREF(EXC_TRACEBACK_F(%(context_identifier)s));
}
#endif
// Framed code:
%(codes)s
Nuitka_Frame_MarkAsNotExecuting(%(context_identifier)s->m_frame);
#if PYTHON_VERSION >= 0x300
Py_CLEAR(EXC_TYPE_F(%(context_identifier)s));
Py_CLEAR(EXC_VALUE_F(%(context_identifier)s));
Py_CLEAR(EXC_TRACEBACK_F(%(context_identifier)s));
#endif
// Allow re-use of the frame again.
Py_DECREF(%(context_identifier)s->m_frame);
goto %(no_exception_exit)s;
"""
# TODO: This cannot happen, can it?
template_frame_guard_generator_return_handler = """\
%(frame_return_exit)s:;
#if PYTHON_VERSION >= 0x300
Py_CLEAR(EXC_TYPE_F(%(context_identifier)s));
Py_CLEAR(EXC_VALUE_F(%(context_identifier)s));
Py_CLEAR(EXC_TRACEBACK_F(%(context_identifier)s));
#endif
Py_DECREF(%(frame_identifier)s);
goto %(return_exit)s;
"""
template_frame_guard_generator_exception_handler = """\
%(frame_exception_exit)s:;
// If it's not an exit exception, consider and create a traceback for it.
if (!EXCEPTION_MATCH_GENERATOR(%(exception_type)s)) {
if (%(exception_tb)s == NULL) {
%(exception_tb)s = %(tb_making)s;
} else if (%(exception_tb)s->tb_frame != &%(frame_identifier)s->m_frame) {
%(exception_tb)s = ADD_TRACEBACK(%(exception_tb)s, %(frame_identifier)s, %(exception_lineno)s);
}
%(attach_locals)s
// Release cached frame if used for exception.
if (%(frame_identifier)s == %(frame_cache_identifier)s) {
#if _DEBUG_REFCOUNTS
count_active_frame_cache_instances -= 1;
count_released_frame_cache_instances += 1;
#endif
Py_DECREF(%(frame_cache_identifier)s);
%(frame_cache_identifier)s = NULL;
}
assertFrameObject(%(frame_identifier)s);
}
#if PYTHON_VERSION >= 0x300
Py_CLEAR(EXC_TYPE_F(%(context_identifier)s));
Py_CLEAR(EXC_VALUE_F(%(context_identifier)s));
Py_CLEAR(EXC_TRACEBACK_F(%(context_identifier)s));
#endif
Py_DECREF(%(frame_identifier)s);
// Return the error.
goto %(parent_exception_exit)s;
"""
from . import TemplateDebugWrapper # isort:skip
TemplateDebugWrapper.checkDebug(globals())
| 29.279863 | 114 | 0.737848 |
ace16053cec214d5aac73067163d94d2c87bcc44 | 2,578 | py | Python | detection/get_flops.py | jlqzzz/CAT | 5263c6e93bde8c6b5a2efce48b6593dfb3a7adfb | [
"MIT"
] | 68 | 2021-05-28T10:40:52.000Z | 2022-03-18T05:58:36.000Z | detection/get_flops.py | jlqzzz/CAT | 5263c6e93bde8c6b5a2efce48b6593dfb3a7adfb | [
"MIT"
] | 2 | 2021-06-11T06:17:34.000Z | 2021-06-11T07:14:31.000Z | detection/get_flops.py | jlqzzz/CAT | 5263c6e93bde8c6b5a2efce48b6593dfb3a7adfb | [
"MIT"
] | 15 | 2021-06-11T12:32:32.000Z | 2021-12-09T05:02:39.000Z | import argparse
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
import cat
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 30.690476 | 79 | 0.641195 |
ace161892e0a1228e22a3e192739ac67871901a8 | 3,110 | py | Python | floris/utils/miscellaneous/wflo/geatpy_test_3.py | forestriveral/floris | 02c31e121283ad6ccae987cfa3aa1bf1e4b43014 | [
"Apache-2.0"
] | null | null | null | floris/utils/miscellaneous/wflo/geatpy_test_3.py | forestriveral/floris | 02c31e121283ad6ccae987cfa3aa1bf1e4b43014 | [
"Apache-2.0"
] | null | null | null | floris/utils/miscellaneous/wflo/geatpy_test_3.py | forestriveral/floris | 02c31e121283ad6ccae987cfa3aa1bf1e4b43014 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import geatpy as ea
class MyProblem(ea.Problem): # 继承Problem父类
def __init__(self):
name = 'BNH' # 初始化name(函数名称,可以随意设置)
M = 2 # 初始化M(目标维数)
maxormins = [1] * M # 初始化maxormins
Dim = 2 # 初始化Dim(决策变量维数)
varTypes = [0] * Dim # 初始化varTypes(决策变量的类型,0:实数;1:整数)
lb = [0] * Dim # 决策变量下界
ub = [5, 3] # 决策变量上界
lbin = [1] * Dim # 决策变量下边界
ubin = [1] * Dim # 决策变量上边界
# 调用父类构造方法完成实例化
ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb,
ub, lbin, ubin)
def aimFunc(self, pop): # 目标函数
Vars = pop.Phen # 得到决策变量矩阵
x1 = Vars[:, [0]] # 注意这样得到的x1是一个列向量,表示所有个体的x1
x2 = Vars[:, [1]]
f1 = 4*x1**2 + 4*x2**2
f2 = (x1 - 5)**2 + (x2 - 5)**2
# 采用可行性法则处理约束
pop.CV = np.hstack([(x1 - 5)**2 + x2**2 - 25,
-(x1 - 8)**2 - (x2 - 3)**2 + 7.7])
# 把求得的目标函数值赋值给种群pop的ObjV
pop.ObjV = np.hstack([f1, f2])
def calReferObjV(self): # 计算全局最优解
N = 10000 # 欲得到10000个真实前沿点
x1 = np.linspace(0, 5, N)
x2 = x1.copy()
x2[x1 >= 3] = 3
return np.vstack((4 * x1**2 + 4 * x2**2,
(x1 - 5)**2 + (x2 - 5)**2)).T
"""=======================实例化问题对象==========================="""
problem = MyProblem() # 实例化问题对象
"""=========================种群设置=============================="""
Encoding = 'RI' # 编码方式
NIND = 100 # 种群规模
Field = ea.crtfld(Encoding, problem.varTypes, problem.ranges,
problem.borders) # 创建区域描述器
population = ea.Population(Encoding, Field, NIND)
# 实例化种群对象(此时种群还没被真正初始化,仅仅是生成一个种群对象)
"""=========================算法参数设置============================"""
myAlgorithm = ea.moea_NSGA2_templet(problem, population) # 实例化一个算法模板对象
myAlgorithm.mutOper.Pm = 0.2 # 修改变异算子的变异概率
myAlgorithm.recOper.XOVR = 0.9 # 修改交叉算子的交叉概率
myAlgorithm.MAXGEN = 200 # 最大进化代数
myAlgorithm.logTras = 10 # 设置每多少代记录日志,若设置成0则表示不记录日志
myAlgorithm.verbose = False # 设置是否打印输出日志信息
myAlgorithm.drawing = 1
# 设置绘图方式(0:不绘图;1:绘制结果图;2:绘制目标空间过程动画; 3:绘制决策空间过程动画)
"""==========================调用算法模板进行种群进化==============
调用run执行算法模板,得到帕累托最优解集NDSet以及最后一代种群。
NDSet是一个种群类Population的对象。
NDSet.ObjV为最优解个体的目标函数值;NDSet.Phen为对应的决策变量值。
详见Population.py中关于种群类的定义。
"""
[NDSet, population] = myAlgorithm.run() # 执行算法模板,得到非支配种群以及最后一代种群
# NDSet.save() # 把非支配种群的信息保存到文件中
"""===========================输出结果========================"""
print('用时:%s 秒' % myAlgorithm.passTime)
print('非支配个体数:%d 个' % NDSet.sizes) if NDSet.sizes != 0 else print('没有找到可行解!')
if myAlgorithm.log is not None and NDSet.sizes != 0:
print('GD', myAlgorithm.log['gd'][-1])
print('IGD', myAlgorithm.log['igd'][-1])
print('HV', myAlgorithm.log['hv'][-1])
print('Spacing', myAlgorithm.log['spacing'][-1])
"""======================进化过程指标追踪分析=================="""
metricName = [['igd'], ['hv']]
Metrics = np.array([myAlgorithm.log[metricName[i][0]] for i in
range(len(metricName))]).T
# 绘制指标追踪分析图
ea.trcplot(Metrics, labels=metricName, titles=metricName)
| 38.395062 | 77 | 0.538585 |
ace1621b0de79d5a8f87c9f1ecad955e8107d280 | 1,105 | py | Python | proj02/proj02_01.py | coderken1236/savy | 8cc15df71aee4f849c772fa754415161ffe71fa4 | [
"MIT"
] | null | null | null | proj02/proj02_01.py | coderken1236/savy | 8cc15df71aee4f849c772fa754415161ffe71fa4 | [
"MIT"
] | null | null | null | proj02/proj02_01.py | coderken1236/savy | 8cc15df71aee4f849c772fa754415161ffe71fa4 | [
"MIT"
] | null | null | null | # Name:
# Date:
# proj02: sum
# Write a program that prompts the user to enter numbers, one per line,
# ending with a line containing 0, and keep a running sum of the numbers.
# Only print out the sum after all the numbers are entered
# (at least in your final version). Each time you read in a number,
# you can immediately use it for your sum,
# and then be done with the number just entered.
#Example:
# Enter a number to sum, or 0 to indicate you are finished: 4
# Enter a number to sum, or 0 to indicate you are finished: 5
# Enter a number to sum, or 0 to indicate you are finished: 2
# Enter a number to sum, or 0 to indicate you are finished: 10
# Enter a number to sum, or 0 to indicate you are finished: 0
#The sum of your numbers is: 21
loop_control = True
loop_number = 0
while loop_control == True:
Number = raw_input("Enter a number for me to sum, or 0 to show that you are finished: ")
Number = int(Number)
if Number == 0:
loop_control = False
print "The sum is ", loop_number
else:
loop_control = True
loop_number = loop_number + Number
| 34.53125 | 92 | 0.700452 |
ace1627626b544ae28b5b6ad1ebdb219212be58a | 2,694 | py | Python | tensorpack/dataflow/dataset/caltech101.py | dan-anghel/tensorpack | 86fcffbc167e2b703b9abd17d41388311c90fe7c | [
"Apache-2.0"
] | 2 | 2019-05-21T15:13:34.000Z | 2019-05-24T07:25:41.000Z | tensorpack/dataflow/dataset/caltech101.py | lkn123/tensorpack | d7a13cb74c9066bc791d7aafc3b744b60ee79a9f | [
"Apache-2.0"
] | 7 | 2019-12-16T21:58:30.000Z | 2022-02-10T00:17:01.000Z | tensorpack/dataflow/dataset/caltech101.py | lkn123/tensorpack | d7a13cb74c9066bc791d7aafc3b744b60ee79a9f | [
"Apache-2.0"
] | 2 | 2019-09-04T00:02:29.000Z | 2020-07-06T20:27:04.000Z | # -*- coding: utf-8 -*-
# File: caltech101.py
import os
from six.moves import range
from ...utils import logger
from ...utils.fs import download, get_dataset_path
from ..base import RNGDataFlow
__all__ = ["Caltech101Silhouettes"]
def maybe_download(url, work_directory):
"""Download the data from Marlin's website, unless it's already here."""
filename = url.split("/")[-1]
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
logger.info("Downloading to {}...".format(filepath))
download(url, work_directory)
return filepath
class Caltech101Silhouettes(RNGDataFlow):
"""
Produces [image, label] in Caltech101 Silhouettes dataset,
image is 28x28 in the range [0,1], label is an int in the range [0,100].
"""
_DIR_NAME = "caltech101_data"
_SOURCE_URL = "https://people.cs.umass.edu/~marlin/data/"
def __init__(self, name, shuffle=True, dir=None):
"""
Args:
name (str): 'train', 'test', 'val'
shuffle (bool): shuffle the dataset
"""
if dir is None:
dir = get_dataset_path(self._DIR_NAME)
assert name in ['train', 'test', 'val']
self.name = name
self.shuffle = shuffle
def get_images_and_labels(data_file):
f = maybe_download(self._SOURCE_URL + data_file, dir)
data = scipy.io.loadmat(f)
return data
self.data = get_images_and_labels("caltech101_silhouettes_28_split1.mat")
if self.name == "train":
self.images = self.data["train_data"].reshape((4100, 28, 28))
self.labels = self.data["train_labels"].ravel() - 1
elif self.name == "test":
self.images = self.data["test_data"].reshape((2307, 28, 28))
self.labels = self.data["test_labels"].ravel() - 1
else:
self.images = self.data["val_data"].reshape((2264, 28, 28))
self.labels = self.data["val_labels"].ravel() - 1
def __len__(self):
return self.images.shape[0]
def __iter__(self):
idxs = list(range(self.__len__()))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
img = self.images[k]
label = self.labels[k]
yield [img, label]
try:
import scipy.io
except ImportError:
from ...utils.develop import create_dummy_class
Caltech101Silhouettes = create_dummy_class('Caltech101Silhouettes', 'scipy.io') # noqa
if __name__ == "__main__":
ds = Caltech101Silhouettes("train")
ds.reset_state()
for (img, label) in ds:
from IPython import embed
embed()
break
| 29.604396 | 90 | 0.610987 |
ace162f28374a549bd3066f32af94911eaeac6ca | 5,255 | py | Python | python/mxnet/gluon/probability/distributions/categorical.py | sandutsar/incubator-mxnet | 3ae7ddfd9c99f40a33c4cb716b3810e5463b810a | [
"Apache-2.0",
"MIT"
] | 1 | 2019-12-20T11:25:06.000Z | 2019-12-20T11:25:06.000Z | python/mxnet/gluon/probability/distributions/categorical.py | sandutsar/incubator-mxnet | 3ae7ddfd9c99f40a33c4cb716b3810e5463b810a | [
"Apache-2.0",
"MIT"
] | 4 | 2021-03-30T13:16:50.000Z | 2021-10-04T08:49:33.000Z | python/mxnet/gluon/probability/distributions/categorical.py | sandutsar/incubator-mxnet | 3ae7ddfd9c99f40a33c4cb716b3810e5463b810a | [
"Apache-2.0",
"MIT"
] | 1 | 2019-12-11T19:22:13.000Z | 2019-12-11T19:22:13.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Categorical class."""
__all__ = ['Categorical']
from .distribution import Distribution
from .utils import prob2logit, logit2prob, cached_property, sample_n_shape_converter
from .constraint import Simplex, Real, IntegerInterval
from .... import np, npx
class Categorical(Distribution):
"""Create a categorical distribution object.
Parameters
----------
num_events : Int
Number of events.
prob : Tensor
Probabilities of each event.
logit : Tensor
The log-odds of each event
"""
# pylint: disable=abstract-method
has_enumerate_support = True
arg_constraints = {'prob': Simplex(),
'logit': Real()}
def __init__(self, num_events, prob=None, logit=None, validate_args=None):
if (num_events > 0):
num_events = int(num_events)
self.num_events = num_events
else:
raise ValueError("`num_events` should be greater than zero. " +
"Received num_events={}".format(num_events))
if (prob is None) == (logit is None):
raise ValueError(
"Either `prob` or `logit` must be specified, but not both. " +
"Received prob={}, logit={}".format(prob, logit))
if prob is not None:
self.prob = prob
else:
self.logit = logit
super(Categorical, self).__init__(
event_dim=0, validate_args=validate_args)
@cached_property
def prob(self):
# pylint: disable=method-hidden
"""Get the probability of sampling each class.
Returns
-------
Tensor
Parameter tensor.
"""
return logit2prob(self.logit, False)
@cached_property
def logit(self):
# pylint: disable=method-hidden
"""Get the log probability of sampling each class.
Returns
-------
Tensor
Parameter tensor.
"""
return prob2logit(self.prob, False)
@property
def support(self):
return IntegerInterval(0, self.num_events)
def log_prob(self, value):
"""Compute the log-likelihood of `value`
Parameters
----------
value : Tensor
samples from Categorical distribution
Returns
-------
Tensor
log-likelihood of `value`
"""
if self._validate_args:
self._validate_samples(value)
logit = self.logit
indices = np.expand_dims(value, -1).astype('int')
expanded_logit = logit * np.ones_like(logit + indices)
return npx.pick(expanded_logit, indices).squeeze()
def sample(self, size=None):
"""Sample from categorical distribution.
Given logit/prob of size `(batch_size, num_events)`,
`batch_size` samples will be drawn.
If `size` is given, `np.broadcast(size, batch_size)` samples will be drawn.
Parameters
----------
size : int or tuple of ints
Returns
-------
out : Tensor
Samples from the categorical distribution.
"""
if size is None:
size = ()
logit = self.logit
else:
if isinstance(size, int):
logit = np.broadcast_to(self.logit, (size,) + (-2,))
else:
logit = np.broadcast_to(self.logit, size + (-2,))
gumbel_samples = np.random.gumbel(logit)
return np.argmax(gumbel_samples, axis=-1)
def sample_n(self, size=None):
size = sample_n_shape_converter(size)
gumbel_samples = np.random.gumbel(self.logit, size=size)
return np.argmax(gumbel_samples, axis=-1)
def broadcast_to(self, batch_shape):
new_instance = self.__new__(type(self))
new_instance.prob = np.broadcast_to(self.prob, batch_shape + (-2,))
new_instance.logit = np.broadcast_to(self.logit, batch_shape + (-2,))
new_instance.num_events = self.num_events
super(Categorical, new_instance).__init__(event_dim=self.event_dim,
validate_args=False)
new_instance._validate_args = self._validate_args
return new_instance
def enumerate_support(self):
num_events = self.num_events
value = npx.arange_like(self.logit) % num_events
return np.moveaxis(value, -1, 0)
| 32.84375 | 84 | 0.613321 |
ace16331bf8ea4e4ea333899589705fcaf0ac9c6 | 4,367 | py | Python | tests/test_profile.py | brendan-ward/rasterio | 6fc3ab334200920819dc2ae063c7845988bcd9fc | [
"BSD-3-Clause"
] | 1 | 2020-03-17T10:17:57.000Z | 2020-03-17T10:17:57.000Z | tests/test_profile.py | brendan-ward/rasterio | 6fc3ab334200920819dc2ae063c7845988bcd9fc | [
"BSD-3-Clause"
] | null | null | null | tests/test_profile.py | brendan-ward/rasterio | 6fc3ab334200920819dc2ae063c7845988bcd9fc | [
"BSD-3-Clause"
] | 2 | 2019-09-03T12:18:04.000Z | 2020-03-17T10:18:07.000Z | import warnings
import pytest
import rasterio
from rasterio.errors import RasterioDeprecationWarning
from rasterio.profiles import Profile, DefaultGTiffProfile
from rasterio.profiles import default_gtiff_profile
def test_base_profile():
assert 'driver' not in Profile()
def test_base_profile_kwarg():
assert Profile(foo='bar')['foo'] == 'bar'
def test_gtiff_profile_format():
assert DefaultGTiffProfile()['driver'] == 'GTiff'
with pytest.warns(RasterioDeprecationWarning):
assert DefaultGTiffProfile()()['driver'] == 'GTiff'
def test_gtiff_profile_interleave():
assert DefaultGTiffProfile()['interleave'] == 'band'
def test_gtiff_profile_tiled():
assert DefaultGTiffProfile()['tiled'] is True
def test_gtiff_profile_blockxsize():
assert DefaultGTiffProfile()['blockxsize'] == 256
def test_gtiff_profile_blockysize():
assert DefaultGTiffProfile()['blockysize'] == 256
def test_gtiff_profile_compress():
assert DefaultGTiffProfile()['compress'] == 'lzw'
def test_gtiff_profile_nodata():
assert DefaultGTiffProfile()['nodata'] == 0
def test_gtiff_profile_dtype():
assert DefaultGTiffProfile()['dtype'] == rasterio.uint8
def test_gtiff_profile_other():
assert DefaultGTiffProfile(count=3)['count'] == 3
def test_gtiff_profile_dtype_override():
assert DefaultGTiffProfile(dtype='uint16')['dtype'] == rasterio.uint16
def test_open_with_profile(tmpdir):
tiffname = str(tmpdir.join('foo.tif'))
with rasterio.open(tiffname, 'w', **default_gtiff_profile(
count=1, width=256, height=256)) as dst:
assert not dst.closed
def test_blockxsize_guard(tmpdir):
"""blockxsize can't be greater than image width."""
tiffname = str(tmpdir.join('foo.tif'))
with pytest.raises(ValueError):
rasterio.open(tiffname, 'w', **default_gtiff_profile(
count=1, width=128, height=256))
def test_blockysize_guard(tmpdir):
"""blockysize can't be greater than image height."""
tiffname = str(tmpdir.join('foo.tif'))
with pytest.raises(ValueError):
rasterio.open(tiffname, 'w', **default_gtiff_profile(
count=1, width=256, height=128))
def test_profile_overlay():
with rasterio.open('tests/data/RGB.byte.tif') as src:
kwds = src.profile
kwds.update(**default_gtiff_profile())
assert kwds['tiled']
assert kwds['compress'] == 'lzw'
assert kwds['count'] == 3
def test_dataset_profile_property_tiled(data):
"""An tiled dataset's profile has block sizes"""
with rasterio.open('tests/data/shade.tif') as src:
assert src.profile['blockxsize'] == 256
assert src.profile['blockysize'] == 256
assert src.profile['tiled'] is True
def test_dataset_profile_property_untiled(data):
"""An untiled dataset's profile has no block sizes"""
with rasterio.open('tests/data/RGB.byte.tif') as src:
assert 'blockxsize' not in src.profile
assert 'blockysize' not in src.profile
assert src.profile['tiled'] is False
def test_dataset_profile_creation_kwds(data):
"""Updated creation keyword tags appear in profile"""
tiffile = str(data.join('RGB.byte.tif'))
with rasterio.open(tiffile, 'r+') as src:
src.update_tags(ns='rio_creation_kwds', foo='bar')
assert src.profile['tiled'] is False
assert src.profile['foo'] == 'bar'
def test_profile_affine_stashing():
"""Passing affine sets transform, with a warning"""
with pytest.warns(RasterioDeprecationWarning):
profile = Profile(affine='foo')
assert 'affine' not in profile
assert profile['transform'] == 'foo'
def test_profile_mixed_error():
"""Warn if both affine and transform are passed"""
with pytest.warns(RasterioDeprecationWarning):
profile = Profile(affine='foo', transform='bar')
assert 'affine' not in profile
assert profile['transform'] == 'bar'
def test_profile_affine_alias():
"""affine is an alias for transform, with a warning"""
profile = Profile(transform='foo')
with pytest.warns(RasterioDeprecationWarning):
assert profile['affine'] == 'foo'
def test_profile_affine_set():
"""TypeError is raised on set of affine item"""
profile = Profile()
profile['transform'] = 'foo'
with pytest.raises(TypeError):
profile['affine'] = 'bar'
| 29.707483 | 74 | 0.694527 |
ace163c9adb57c7441ae91c3e0bf2f67407ef674 | 246 | py | Python | python_practice/split_and_join.py | davidkimolo/hackerrank-solutions | a79964989f6f15b0c306ec06a4af13ab064fa807 | [
"MIT"
] | null | null | null | python_practice/split_and_join.py | davidkimolo/hackerrank-solutions | a79964989f6f15b0c306ec06a4af13ab064fa807 | [
"MIT"
] | null | null | null | python_practice/split_and_join.py | davidkimolo/hackerrank-solutions | a79964989f6f15b0c306ec06a4af13ab064fa807 | [
"MIT"
] | 1 | 2020-07-20T19:21:00.000Z | 2020-07-20T19:21:00.000Z | def split_and_join(line):
# write your code here
list_line = line.split()
result_string = "-".join(list_line)
return result_string
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result) | 22.363636 | 39 | 0.662602 |
ace163d3414638f14555279fd26168aecaffc7c7 | 28,733 | py | Python | mne/viz/backends/_qt.py | teonbrooks/mne-python | fa643e8dcde1024d21d695e4c788f1a1a8a0e41f | [
"BSD-3-Clause"
] | 1 | 2022-02-26T17:30:58.000Z | 2022-02-26T17:30:58.000Z | mne/viz/backends/_qt.py | teonbrooks/mne-python | fa643e8dcde1024d21d695e4c788f1a1a8a0e41f | [
"BSD-3-Clause"
] | null | null | null | mne/viz/backends/_qt.py | teonbrooks/mne-python | fa643e8dcde1024d21d695e4c788f1a1a8a0e41f | [
"BSD-3-Clause"
] | null | null | null | """Qt implementation of _Renderer and GUI."""
# Authors: Guillaume Favelier <guillaume.favelier@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from contextlib import contextmanager
import pyvista
from pyvistaqt.plotting import FileDialog
from PyQt5.QtCore import Qt, pyqtSignal, QLocale
from PyQt5.QtGui import QIcon, QImage, QPixmap, QCursor
from PyQt5.QtWidgets import (QComboBox, QDockWidget, QDoubleSpinBox, QGroupBox,
QHBoxLayout, QLabel, QToolButton, QMenuBar,
QSlider, QSpinBox, QVBoxLayout, QWidget,
QSizePolicy, QScrollArea, QStyle, QProgressBar,
QStyleOptionSlider, QLayout, QCheckBox,
QButtonGroup, QRadioButton, QLineEdit,
QFileDialog, QPushButton)
from ._pyvista import _PyVistaRenderer
from ._pyvista import (_close_all, _close_3d_figure, _check_3d_figure, # noqa: F401,E501 analysis:ignore
_set_3d_view, _set_3d_title, _take_3d_screenshot) # noqa: F401,E501 analysis:ignore
from ._abstract import (_AbstractDock, _AbstractToolBar, _AbstractMenuBar,
_AbstractStatusBar, _AbstractLayout, _AbstractWidget,
_AbstractWindow, _AbstractMplCanvas, _AbstractPlayback,
_AbstractBrainMplCanvas, _AbstractMplInterface,
_AbstractWidgetList, _AbstractAction)
from ._utils import _init_qt_resources, _qt_disable_paint
from ..utils import logger, _check_option
class _QtLayout(_AbstractLayout):
def _layout_initialize(self, max_width):
pass
def _layout_add_widget(self, layout, widget, stretch=0):
"""Add a widget to an existing layout."""
if isinstance(widget, QLayout):
layout.addLayout(widget)
else:
layout.addWidget(widget, stretch)
class _QtDock(_AbstractDock, _QtLayout):
def _dock_initialize(self, window=None, name="Controls",
area="left", max_width=None):
window = self._window if window is None else window
qt_area = Qt.LeftDockWidgetArea if area == "left" \
else Qt.RightDockWidgetArea
self._dock, self._dock_layout = _create_dock_widget(
self._window, name, qt_area, max_width=max_width
)
if area == "left":
window.setCorner(Qt.BottomLeftCorner, Qt.LeftDockWidgetArea)
else:
window.setCorner(Qt.BottomRightCorner, Qt.RightDockWidgetArea)
def _dock_finalize(self):
self._dock.setMinimumSize(self._dock.sizeHint().width(), 0)
self._dock_add_stretch(self._dock_layout)
def _dock_show(self):
self._dock.show()
def _dock_hide(self):
self._dock.hide()
def _dock_add_stretch(self, layout=None):
layout = self._dock_layout if layout is None else layout
layout.addStretch()
def _dock_add_layout(self, vertical=True):
layout = QVBoxLayout() if vertical else QHBoxLayout()
return layout
def _dock_add_label(
self, value, *, align=False, layout=None, selectable=False
):
layout = self._dock_layout if layout is None else layout
widget = QLabel()
if align:
widget.setAlignment(Qt.AlignCenter)
widget.setText(value)
widget.setWordWrap(True)
if selectable:
widget.setTextInteractionFlags(Qt.TextSelectableByMouse)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_button(
self, name, callback, *, style='pushbutton', tooltip=None, layout=None
):
_check_option(
parameter='style',
value=style,
allowed_values=('toolbutton', 'pushbutton')
)
if style == 'toolbutton':
widget = QToolButton()
widget.setText(name)
else:
widget = QPushButton(name)
# Don't change text color upon button press
widget.setStyleSheet(
'QPushButton:pressed {color: none;}'
)
_set_widget_tooltip(widget, tooltip)
widget.clicked.connect(callback)
layout = self._dock_layout if layout is None else layout
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_named_layout(self, name, *, layout=None, compact=True):
layout = self._dock_layout if layout is None else layout
if name is not None:
hlayout = self._dock_add_layout(not compact)
self._dock_add_label(
value=name, align=not compact, layout=hlayout)
self._layout_add_widget(layout, hlayout)
layout = hlayout
return layout
def _dock_add_slider(self, name, value, rng, callback, *,
compact=True, double=False, tooltip=None,
layout=None):
layout = self._dock_named_layout(
name=name, layout=layout, compact=compact)
slider_class = QFloatSlider if double else QSlider
cast = float if double else int
widget = slider_class(Qt.Horizontal)
_set_widget_tooltip(widget, tooltip)
widget.setMinimum(cast(rng[0]))
widget.setMaximum(cast(rng[1]))
widget.setValue(cast(value))
widget.valueChanged.connect(callback)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_check_box(self, name, value, callback, *, tooltip=None,
layout=None):
layout = self._dock_layout if layout is None else layout
widget = QCheckBox(name)
_set_widget_tooltip(widget, tooltip)
widget.setChecked(value)
widget.stateChanged.connect(callback)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_spin_box(self, name, value, rng, callback, *,
compact=True, double=True, step=None,
tooltip=None, layout=None):
layout = self._dock_named_layout(
name=name, layout=layout, compact=compact)
value = value if double else int(value)
widget = QDoubleSpinBox() if double else QSpinBox()
_set_widget_tooltip(widget, tooltip)
widget.setAlignment(Qt.AlignCenter)
widget.setMinimum(rng[0])
widget.setMaximum(rng[1])
widget.setKeyboardTracking(False)
if step is None:
inc = (rng[1] - rng[0]) / 20.
inc = max(int(round(inc)), 1) if not double else inc
widget.setSingleStep(inc)
else:
widget.setSingleStep(step)
widget.setValue(value)
widget.valueChanged.connect(callback)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_combo_box(self, name, value, rng, callback, *, compact=True,
tooltip=None, layout=None):
layout = self._dock_named_layout(
name=name, layout=layout, compact=compact)
widget = QComboBox()
_set_widget_tooltip(widget, tooltip)
widget.addItems(rng)
widget.setCurrentText(value)
widget.currentTextChanged.connect(callback)
widget.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self._layout_add_widget(layout, widget)
return _QtWidget(widget)
def _dock_add_radio_buttons(self, value, rng, callback, *, vertical=True,
layout=None):
layout = self._dock_layout if layout is None else layout
group_layout = QVBoxLayout() if vertical else QHBoxLayout()
group = QButtonGroup()
for val in rng:
button = QRadioButton(val)
if val == value:
button.setChecked(True)
group.addButton(button)
self._layout_add_widget(group_layout, button)
def func(button):
callback(button.text())
group.buttonClicked.connect(func)
self._layout_add_widget(layout, group_layout)
return _QtWidgetList(group)
def _dock_add_group_box(self, name, *, layout=None):
layout = self._dock_layout if layout is None else layout
hlayout = QVBoxLayout()
widget = QGroupBox(name)
widget.setLayout(hlayout)
self._layout_add_widget(layout, widget)
return hlayout
def _dock_add_text(self, name, value, placeholder, *, callback=None,
layout=None):
layout = self._dock_layout if layout is None else layout
widget = QLineEdit(value)
widget.setPlaceholderText(placeholder)
self._layout_add_widget(layout, widget)
if callback is not None:
widget.textChanged.connect(callback)
return _QtWidget(widget)
def _dock_add_file_button(
self, name, desc, func, *, filter=None, initial_directory=None,
value=None, save=False,
is_directory=False, input_text_widget=True,
placeholder="Type a file name", tooltip=None, layout=None
):
layout = self._dock_layout if layout is None else layout
if input_text_widget:
hlayout = self._dock_add_layout(vertical=False)
text_widget = self._dock_add_text(
name=f"{name}_field",
value=value,
placeholder=placeholder,
layout=hlayout,
)
def sync_text_widget(s):
text_widget.set_value(s)
else:
hlayout = layout
def callback():
if is_directory:
name = QFileDialog.getExistingDirectory(
directory=initial_directory
)
elif save:
name = QFileDialog.getSaveFileName(
directory=initial_directory,
filter=filter
)
else:
name = QFileDialog.getOpenFileName(
directory=initial_directory,
filter=filter
)
name = name[0] if isinstance(name, tuple) else name
# handle the cancel button
if len(name) == 0:
return
if input_text_widget:
sync_text_widget(name)
func(name)
button_widget = self._dock_add_button(
name=desc,
callback=callback,
tooltip=tooltip,
layout=hlayout,
)
if input_text_widget:
self._layout_add_widget(layout, hlayout)
return _QtWidgetList([text_widget, button_widget])
else:
return button_widget # It's already a _QtWidget instance
class QFloatSlider(QSlider):
"""Slider that handles float values."""
valueChanged = pyqtSignal(float)
def __init__(self, ori, parent=None):
"""Initialize the slider."""
super().__init__(ori, parent)
self._opt = QStyleOptionSlider()
self.initStyleOption(self._opt)
self._gr = self.style().subControlRect(
QStyle.CC_Slider, self._opt, QStyle.SC_SliderGroove, self)
self._sr = self.style().subControlRect(
QStyle.CC_Slider, self._opt, QStyle.SC_SliderHandle, self)
self._precision = 10000
super().valueChanged.connect(self._convert)
def _convert(self, value):
self.valueChanged.emit(value / self._precision)
def minimum(self):
"""Get the minimum."""
return super().minimum() / self._precision
def setMinimum(self, value):
"""Set the minimum."""
super().setMinimum(int(value * self._precision))
def maximum(self):
"""Get the maximum."""
return super().maximum() / self._precision
def setMaximum(self, value):
"""Set the maximum."""
super().setMaximum(int(value * self._precision))
def value(self):
"""Get the current value."""
return super().value() / self._precision
def setValue(self, value):
"""Set the current value."""
super().setValue(int(value * self._precision))
# Adapted from:
# https://stackoverflow.com/questions/52689047/moving-qslider-to-mouse-click-position # noqa: E501
def mousePressEvent(self, event):
"""Add snap-to-location handling."""
opt = QStyleOptionSlider()
self.initStyleOption(opt)
sr = self.style().subControlRect(
QStyle.CC_Slider, opt, QStyle.SC_SliderHandle, self)
if (event.button() != Qt.LeftButton or sr.contains(event.pos())):
super().mousePressEvent(event)
return
if self.orientation() == Qt.Vertical:
half = (0.5 * sr.height()) + 0.5
max_ = self.height()
pos = max_ - event.y()
else:
half = (0.5 * sr.width()) + 0.5
max_ = self.width()
pos = event.x()
max_ = max_ - 2 * half
pos = min(max(pos - half, 0), max_) / max_
val = self.minimum() + (self.maximum() - self.minimum()) * pos
val = (self.maximum() - val) if self.invertedAppearance() else val
self.setValue(val)
event.accept()
# Process afterward so it's seen as a drag
super().mousePressEvent(event)
class _QtToolBar(_AbstractToolBar, _QtLayout):
def _tool_bar_load_icons(self):
_init_qt_resources()
self.icons = dict()
self.icons["help"] = QIcon(":/help.svg")
self.icons["play"] = QIcon(":/play.svg")
self.icons["pause"] = QIcon(":/pause.svg")
self.icons["reset"] = QIcon(":/reset.svg")
self.icons["scale"] = QIcon(":/scale.svg")
self.icons["clear"] = QIcon(":/clear.svg")
self.icons["movie"] = QIcon(":/movie.svg")
self.icons["restore"] = QIcon(":/restore.svg")
self.icons["screenshot"] = QIcon(":/screenshot.svg")
self.icons["visibility_on"] = QIcon(":/visibility_on.svg")
self.icons["visibility_off"] = QIcon(":/visibility_off.svg")
def _tool_bar_initialize(self, name="default", window=None):
self.actions = dict()
window = self._window if window is None else window
self._tool_bar = window.addToolBar(name)
self._tool_bar_layout = self._tool_bar.layout()
def _tool_bar_add_button(self, name, desc, func, *, icon_name=None,
shortcut=None):
icon_name = name if icon_name is None else icon_name
icon = self.icons[icon_name]
self.actions[name] = self._tool_bar.addAction(icon, desc, func)
if shortcut is not None:
self.actions[name].setShortcut(shortcut)
def _tool_bar_update_button_icon(self, name, icon_name):
self.actions[name].setIcon(self.icons[icon_name])
def _tool_bar_add_text(self, name, value, placeholder):
pass
def _tool_bar_add_spacer(self):
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self._tool_bar.addWidget(spacer)
def _tool_bar_add_file_button(self, name, desc, func, *, shortcut=None):
def callback():
return FileDialog(
self.plotter.app_window,
callback=func,
)
self._tool_bar_add_button(
name=name,
desc=desc,
func=callback,
shortcut=shortcut,
)
def _tool_bar_add_play_button(self, name, desc, func, *, shortcut=None):
self._tool_bar_add_button(
name=name, desc=desc, func=func, icon_name=None, shortcut=shortcut)
def _tool_bar_set_theme(self, theme):
if theme == 'auto':
theme = _detect_theme()
if theme == 'dark':
for icon_key in self.icons:
icon = self.icons[icon_key]
image = icon.pixmap(80).toImage()
image.invertPixels(mode=QImage.InvertRgb)
self.icons[icon_key] = QIcon(QPixmap.fromImage(image))
class _QtMenuBar(_AbstractMenuBar):
def _menu_initialize(self, window=None):
self._menus = dict()
self._menu_actions = dict()
self._menu_bar = QMenuBar()
self._menu_bar.setNativeMenuBar(False)
window = self._window if window is None else window
window.setMenuBar(self._menu_bar)
def _menu_add_submenu(self, name, desc):
self._menus[name] = self._menu_bar.addMenu(desc)
self._menu_actions[name] = dict()
def _menu_add_button(self, menu_name, name, desc, func):
menu = self._menus[menu_name]
self._menu_actions[menu_name][name] = \
_QtAction(menu.addAction(desc, func))
class _QtStatusBar(_AbstractStatusBar, _QtLayout):
def _status_bar_initialize(self, window=None):
window = self._window if window is None else window
self._status_bar = window.statusBar()
self._status_bar_layout = self._status_bar.layout()
def _status_bar_add_label(self, value, *, stretch=0):
widget = QLabel(value)
self._layout_add_widget(self._status_bar_layout, widget, stretch)
return _QtWidget(widget)
def _status_bar_add_progress_bar(self, stretch=0):
widget = QProgressBar()
self._layout_add_widget(self._status_bar_layout, widget, stretch)
return _QtWidget(widget)
def _status_bar_update(self):
self._status_bar_layout.update()
class _QtPlayback(_AbstractPlayback):
def _playback_initialize(self, func, timeout, value, rng,
time_widget, play_widget):
self.figure.plotter.add_callback(func, timeout)
class _QtMplInterface(_AbstractMplInterface):
def _mpl_initialize(self):
from PyQt5 import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
self.canvas = FigureCanvasQTAgg(self.fig)
FigureCanvasQTAgg.setSizePolicy(
self.canvas,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding
)
FigureCanvasQTAgg.updateGeometry(self.canvas)
class _QtMplCanvas(_AbstractMplCanvas, _QtMplInterface):
def __init__(self, width, height, dpi):
super().__init__(width, height, dpi)
self._mpl_initialize()
class _QtBrainMplCanvas(_AbstractBrainMplCanvas, _QtMplInterface):
def __init__(self, brain, width, height, dpi):
super().__init__(brain, width, height, dpi)
self._mpl_initialize()
if brain.separate_canvas:
self.canvas.setParent(None)
else:
self.canvas.setParent(brain._renderer._window)
self._connect()
class _QtWindow(_AbstractWindow):
def _window_initialize(self):
super()._window_initialize()
self._interactor = self.figure.plotter.interactor
self._window = self.figure.plotter.app_window
self._window.setLocale(QLocale(QLocale.Language.English))
self._window.signal_close.connect(self._window_clean)
def _window_clean(self):
self.figure.plotter = None
self._interactor = None
def _window_close_connect(self, func):
self._window.signal_close.connect(func)
def _window_get_dpi(self):
return self._window.windowHandle().screen().logicalDotsPerInch()
def _window_get_size(self):
w = self._interactor.geometry().width()
h = self._interactor.geometry().height()
return (w, h)
def _window_get_simple_canvas(self, width, height, dpi):
return _QtMplCanvas(width, height, dpi)
def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces,
separate_canvas):
w, h = self._window_get_mplcanvas_size(interactor_fraction)
self._interactor_fraction = interactor_fraction
self._show_traces = show_traces
self._separate_canvas = separate_canvas
self._mplcanvas = _QtBrainMplCanvas(
brain, w, h, self._window_get_dpi())
return self._mplcanvas
def _window_adjust_mplcanvas_layout(self):
canvas = self._mplcanvas.canvas
self._mpl_dock, dock_layout = _create_dock_widget(
self._window, "Traces", Qt.BottomDockWidgetArea)
dock_layout.addWidget(canvas)
def _window_get_cursor(self):
return self._interactor.cursor()
def _window_set_cursor(self, cursor):
self._interactor.setCursor(cursor)
def _window_new_cursor(self, name):
return QCursor(getattr(Qt, name))
@contextmanager
def _window_ensure_minimum_sizes(self):
sz = self.figure.store['window_size']
adjust_mpl = (self._show_traces and not self._separate_canvas)
# plotter: pyvista.plotting.qt_plotting.BackgroundPlotter
# plotter.interactor: vtk.qt.QVTKRenderWindowInteractor.QVTKRenderWindowInteractor -> QWidget # noqa
# plotter.app_window: pyvista.plotting.qt_plotting.MainWindow -> QMainWindow # noqa
# plotter.frame: QFrame with QVBoxLayout with plotter.interactor as centralWidget # noqa
# plotter.ren_win: vtkXOpenGLRenderWindow
self._interactor.setMinimumSize(*sz)
# Lines like this are useful for debugging these issues:
# print('*' * 80)
# print(0, self._interactor.app_window.size().height(), self._interactor.size().height(), self._mpl_dock.widget().height(), self._mplcanvas.canvas.size().height()) # noqa
if adjust_mpl:
mpl_h = int(round((sz[1] * self._interactor_fraction) /
(1 - self._interactor_fraction)))
self._mplcanvas.canvas.setMinimumSize(sz[0], mpl_h)
self._mpl_dock.widget().setMinimumSize(sz[0], mpl_h)
try:
yield # show
finally:
# 1. Process events
self._process_events()
self._process_events()
# 2. Get the window and interactor sizes that work
win_sz = self._window.size()
ren_sz = self._interactor.size()
# 3. Undo the min size setting and process events
self._interactor.setMinimumSize(0, 0)
if adjust_mpl:
self._mplcanvas.canvas.setMinimumSize(0, 0)
self._mpl_dock.widget().setMinimumSize(0, 0)
self._process_events()
self._process_events()
# 4. Compute the extra height required for dock decorations and add
win_h = win_sz.height()
if adjust_mpl:
win_h += max(
self._mpl_dock.widget().size().height() - mpl_h, 0)
# 5. Resize the window and interactor to the correct size
# (not sure why, but this is required on macOS at least)
self._interactor.window_size = (win_sz.width(), win_h)
self._interactor.resize(ren_sz.width(), ren_sz.height())
self._process_events()
self._process_events()
def _window_set_theme(self, theme):
if theme == 'auto':
theme = _detect_theme()
if theme == 'dark':
try:
import qdarkstyle
except ModuleNotFoundError:
logger.info('For Dark-Mode "qdarkstyle" has to be installed! '
'You can install it with `pip install qdarkstyle`')
stylesheet = None
else:
stylesheet = qdarkstyle.load_stylesheet()
elif theme != 'light':
with open(theme, 'r') as file:
stylesheet = file.read()
else:
stylesheet = None
self._window.setStyleSheet(stylesheet)
class _QtWidgetList(_AbstractWidgetList):
def __init__(self, src):
self._src = src
self._widgets = list()
if isinstance(self._src, QButtonGroup):
widgets = self._src.buttons()
else:
widgets = src
for widget in widgets:
if not isinstance(widget, _QtWidget):
widget = _QtWidget(widget)
self._widgets.append(widget)
def set_enabled(self, state):
for widget in self._widgets:
widget.set_enabled(state)
def get_value(self, idx):
return self._widgets[idx].get_value()
def set_value(self, idx, value):
if isinstance(self._src, QButtonGroup):
self._widgets[idx].set_value(True)
else:
self._widgets[idx].set_value(value)
class _QtWidget(_AbstractWidget):
def set_value(self, value):
if isinstance(self._widget, (QRadioButton, QToolButton, QPushButton)):
self._widget.click()
else:
if hasattr(self._widget, "setValue"):
self._widget.setValue(value)
elif hasattr(self._widget, "setCurrentText"):
self._widget.setCurrentText(value)
elif hasattr(self._widget, "setChecked"):
self._widget.setChecked(value)
else:
assert hasattr(self._widget, "setText")
self._widget.setText(value)
def get_value(self):
if hasattr(self._widget, "value"):
return self._widget.value()
elif hasattr(self._widget, "currentText"):
return self._widget.currentText()
elif hasattr(self._widget, "checkState"):
return bool(self._widget.checkState())
else:
assert hasattr(self._widget, "text")
return self._widget.text()
def set_range(self, rng):
self._widget.setRange(rng[0], rng[1])
def show(self):
self._widget.show()
def hide(self):
self._widget.hide()
def set_enabled(self, state):
self._widget.setEnabled(state)
def is_enabled(self):
return self._widget.isEnabled()
def update(self, repaint=True):
self._widget.update()
if repaint:
self._widget.repaint()
def get_tooltip(self):
assert hasattr(self._widget, 'toolTip')
return self._widget.toolTip()
def set_tooltip(self, tooltip):
assert hasattr(self._widget, 'setToolTip')
self._widget.setToolTip(tooltip)
class _QtAction(_AbstractAction):
def trigger(self):
self._action.trigger()
class _Renderer(_PyVistaRenderer, _QtDock, _QtToolBar, _QtMenuBar,
_QtStatusBar, _QtWindow, _QtPlayback):
_kind = 'qt'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._window_initialize()
def show(self):
super().show()
with _qt_disable_paint(self.plotter):
with self._window_ensure_minimum_sizes():
self.plotter.app_window.show()
self._update()
for plotter in self._all_plotters:
plotter.updateGeometry()
plotter._render()
self._process_events()
def _set_widget_tooltip(widget, tooltip):
if tooltip is not None:
widget.setToolTip(tooltip)
def _create_dock_widget(window, name, area, *, max_width=None):
# create dock widget
dock = QDockWidget(name)
# add scroll area
scroll = QScrollArea(dock)
dock.setWidget(scroll)
# give the scroll area a child widget
widget = QWidget(scroll)
scroll.setWidget(widget)
scroll.setWidgetResizable(True)
dock.setAllowedAreas(area)
dock.setTitleBarWidget(QLabel(name))
window.addDockWidget(area, dock)
dock_layout = QVBoxLayout()
widget.setLayout(dock_layout)
# Fix resize grip size
# https://stackoverflow.com/a/65050468/2175965
styles = ['margin: 4px;']
if max_width is not None:
styles.append(f'max-width: {max_width};')
style_sheet = 'QDockWidget { ' + ' \n'.join(styles) + '\n}'
dock.setStyleSheet(style_sheet)
return dock, dock_layout
def _detect_theme():
try:
import darkdetect
return darkdetect.theme().lower()
except Exception:
return 'light'
@contextmanager
def _testing_context(interactive):
from . import renderer
orig_offscreen = pyvista.OFF_SCREEN
orig_testing = renderer.MNE_3D_BACKEND_TESTING
orig_interactive = renderer.MNE_3D_BACKEND_INTERACTIVE
renderer.MNE_3D_BACKEND_TESTING = True
if interactive:
pyvista.OFF_SCREEN = False
renderer.MNE_3D_BACKEND_INTERACTIVE = True
else:
pyvista.OFF_SCREEN = True
renderer.MNE_3D_BACKEND_INTERACTIVE = False
try:
yield
finally:
pyvista.OFF_SCREEN = orig_offscreen
renderer.MNE_3D_BACKEND_TESTING = orig_testing
renderer.MNE_3D_BACKEND_INTERACTIVE = orig_interactive
| 36.602548 | 179 | 0.622351 |
ace16542cd70105fa357af46a6cb3a31e7a5d3b3 | 6,128 | py | Python | test_paralleldomain/decoding/cityscapes/test_decoder.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
] | 10 | 2021-11-17T17:23:49.000Z | 2022-03-18T09:51:23.000Z | test_paralleldomain/decoding/cityscapes/test_decoder.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
] | 3 | 2021-12-02T17:16:20.000Z | 2022-01-07T12:47:13.000Z | test_paralleldomain/decoding/cityscapes/test_decoder.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
] | 2 | 2022-03-09T07:03:54.000Z | 2022-03-23T15:53:48.000Z | import os
import numpy as np
import pytest
from paralleldomain import Dataset
from paralleldomain.decoding.cityscapes.decoder import CityscapesDatasetDecoder
from paralleldomain.model.annotation import AnnotationTypes
from paralleldomain.model.sensor import CameraSensor
from paralleldomain.model.unordered_scene import UnorderedScene
from test_paralleldomain.decoding.constants import CITYSCAPES_DATASET_PATH_ENV
@pytest.fixture
def cityscapes_dataset_path() -> str:
if CITYSCAPES_DATASET_PATH_ENV in os.environ:
return os.environ[CITYSCAPES_DATASET_PATH_ENV]
else:
pytest.skip()
@pytest.fixture
def cityscapes_test_dataset(cityscapes_dataset_path: str) -> Dataset:
decoder = CityscapesDatasetDecoder(dataset_path=cityscapes_dataset_path, splits=["test"])
dataset = decoder.get_dataset()
return dataset
@pytest.fixture
def cityscapes_train_dataset(cityscapes_dataset_path: str) -> Dataset:
decoder = CityscapesDatasetDecoder(dataset_path=cityscapes_dataset_path, splits=["train"])
dataset = decoder.get_dataset()
return dataset
@pytest.fixture()
def cityscapes_first_train_scene(cityscapes_train_dataset: Dataset) -> UnorderedScene:
scene_names = cityscapes_train_dataset.unordered_scene_names
scene = cityscapes_train_dataset.get_unordered_scene(scene_name=scene_names[0])
return scene
def test_decode_test_scene_names(cityscapes_test_dataset: Dataset):
assert len(cityscapes_test_dataset.scene_names) == 0
scene_names = cityscapes_test_dataset.unordered_scene_names
test_scenes = ["berlin", "bielefeld", "bonn", "leverkusen", "mainz", "munich"]
assert len(test_scenes) == len(scene_names)
for scene_name in test_scenes:
assert f"test-{scene_name}" in scene_names
def test_decode_train_scene_names(cityscapes_train_dataset: Dataset):
assert len(cityscapes_train_dataset.scene_names) == 0
scene_names = cityscapes_train_dataset.unordered_scene_names
train_scenes = [
"aachen",
"bochum",
"bremen",
"cologne",
"darmstadt",
"dusseldorf",
"erfurt",
"hamburg",
"hanover",
"jena",
"krefeld",
"monchengladbach",
"strasbourg",
"stuttgart",
"tubingen",
"ulm",
"weimar",
"zurich",
]
assert len(train_scenes) == len(scene_names)
for scene_name in train_scenes:
assert f"train-{scene_name}" in scene_names
def test_decode_sensor_names(cityscapes_first_train_scene: UnorderedScene):
sensor_names = cityscapes_first_train_scene.sensor_names
assert len(sensor_names) == 1
camera_names = cityscapes_first_train_scene.camera_names
assert len(camera_names) == 1
lidar_names = cityscapes_first_train_scene.lidar_names
assert len(lidar_names) == 0
def test_decode_class_maps(cityscapes_first_train_scene: UnorderedScene):
class_maps = cityscapes_first_train_scene.class_maps
assert len(class_maps) == 1
assert AnnotationTypes.SemanticSegmentation2D in class_maps
assert len(class_maps[AnnotationTypes.SemanticSegmentation2D].class_names) == 35
def test_decode_frame_ids(cityscapes_first_train_scene: UnorderedScene):
assert len(cityscapes_first_train_scene.frame_ids) > 0
def test_decode_camera_frame_ids(cityscapes_first_train_scene: UnorderedScene):
camera = cityscapes_first_train_scene.get_camera_sensor(camera_name=cityscapes_first_train_scene.camera_names[0])
assert len(camera.frame_ids) > 0
def test_decode_available_annotation_types(cityscapes_first_train_scene: UnorderedScene):
assert len(cityscapes_first_train_scene.available_annotation_types) == 2
assert AnnotationTypes.SemanticSegmentation2D in cityscapes_first_train_scene.available_annotation_types
assert AnnotationTypes.InstanceSegmentation2D in cityscapes_first_train_scene.available_annotation_types
def test_decode_camera(cityscapes_first_train_scene: UnorderedScene):
camera = cityscapes_first_train_scene.get_camera_sensor(camera_name=cityscapes_first_train_scene.camera_names[0])
assert isinstance(camera, CameraSensor)
def test_decode_camera_image(cityscapes_first_train_scene: UnorderedScene):
camera = cityscapes_first_train_scene.get_camera_sensor(camera_name=cityscapes_first_train_scene.camera_names[0])
assert camera is not None
assert isinstance(camera, CameraSensor)
camera_frame = camera.get_frame(frame_id=list(camera.frame_ids)[0])
image = camera_frame.image
assert image is not None
rgb = image.rgb
assert isinstance(rgb, np.ndarray)
assert rgb.shape == (1024, 2048, 3)
assert rgb.shape[0] == image.height
assert rgb.shape[1] == image.width
assert rgb.shape[2] == image.channels
def test_decode_camera_semseg_2d(cityscapes_first_train_scene: UnorderedScene):
camera = cityscapes_first_train_scene.get_camera_sensor(camera_name=cityscapes_first_train_scene.camera_names[0])
assert camera is not None
assert isinstance(camera, CameraSensor)
camera_frame = camera.get_frame(frame_id=list(camera.frame_ids)[0])
semseg = camera_frame.get_annotations(annotation_type=AnnotationTypes.SemanticSegmentation2D)
assert semseg is not None
class_ids = semseg.class_ids
assert isinstance(class_ids, np.ndarray)
assert class_ids.shape == (1024, 2048, 1)
assert np.all(np.logical_and(np.unique(class_ids) <= 33, np.unique(class_ids) > -1))
def test_decode_camera_instance_seg_2d(cityscapes_first_train_scene: UnorderedScene):
camera = cityscapes_first_train_scene.get_camera_sensor(camera_name=cityscapes_first_train_scene.camera_names[0])
assert camera is not None
assert isinstance(camera, CameraSensor)
camera_frame = camera.get_frame(frame_id=list(camera.frame_ids)[0])
instanceseg = camera_frame.get_annotations(annotation_type=AnnotationTypes.InstanceSegmentation2D)
assert instanceseg is not None
instance_ids = instanceseg.instance_ids
assert isinstance(instance_ids, np.ndarray)
assert instance_ids.shape == (1024, 2048, 1)
assert len(np.unique(instance_ids) < 19) > 0
| 39.535484 | 117 | 0.779047 |
ace1664a49fe20e69683e42e55c33dbaf01ea0f8 | 1,802 | py | Python | esphomeyaml/components/sensor/hx711.py | n0bel/esphome | 6a76a3642ed0b242f5b065a654a4bd4e281c7dae | [
"MIT"
] | null | null | null | esphomeyaml/components/sensor/hx711.py | n0bel/esphome | 6a76a3642ed0b242f5b065a654a4bd4e281c7dae | [
"MIT"
] | 5 | 2019-02-10T22:27:11.000Z | 2019-04-02T02:40:00.000Z | esphomeyaml/components/sensor/hx711.py | n0bel/esphome | 6a76a3642ed0b242f5b065a654a4bd4e281c7dae | [
"MIT"
] | null | null | null | import voluptuous as vol
from esphomeyaml import pins
from esphomeyaml.components import sensor
import esphomeyaml.config_validation as cv
from esphomeyaml.const import CONF_CLK_PIN, CONF_GAIN, CONF_ID, CONF_NAME, CONF_UPDATE_INTERVAL
from esphomeyaml.cpp_generator import Pvariable, add
from esphomeyaml.cpp_helpers import gpio_input_pin_expression, setup_component
from esphomeyaml.cpp_types import App, Application
MakeHX711Sensor = Application.struct('MakeHX711Sensor')
HX711Sensor = sensor.sensor_ns.class_('HX711Sensor', sensor.PollingSensorComponent)
CONF_DOUT_PIN = 'dout_pin'
HX711Gain = sensor.sensor_ns.enum('HX711Gain')
GAINS = {
128: HX711Gain.HX711_GAIN_128,
32: HX711Gain.HX711_GAIN_32,
64: HX711Gain.HX711_GAIN_64,
}
PLATFORM_SCHEMA = cv.nameable(sensor.SENSOR_PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(HX711Sensor),
vol.Required(CONF_DOUT_PIN): pins.gpio_input_pin_schema,
vol.Required(CONF_CLK_PIN): pins.gpio_output_pin_schema,
vol.Optional(CONF_GAIN): cv.one_of(*GAINS, int=True),
vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,
}).extend(cv.COMPONENT_SCHEMA.schema))
def to_code(config):
for dout_pin in gpio_input_pin_expression(config[CONF_DOUT_PIN]):
yield
for sck_pin in gpio_input_pin_expression(config[CONF_CLK_PIN]):
yield
rhs = App.make_hx711_sensor(config[CONF_NAME], dout_pin, sck_pin,
config.get(CONF_UPDATE_INTERVAL))
hx711 = Pvariable(config[CONF_ID], rhs)
if CONF_GAIN in config:
add(hx711.set_gain(GAINS[config[CONF_GAIN]]))
sensor.setup_sensor(hx711, config)
setup_component(hx711, config)
BUILD_FLAGS = '-DUSE_HX711'
def to_hass_config(data, config):
return sensor.core_to_hass_config(data, config)
| 33.37037 | 95 | 0.77081 |
ace1668527237aa9abab09f127c4f07777ca154c | 7,789 | py | Python | tiddlyweb/serializations/html.py | angeluseve/tiddlyweb | d24a45d48faa2b014e1c1598ec176c4c1c98fb07 | [
"BSD-3-Clause"
] | 1 | 2016-05-09T15:26:17.000Z | 2016-05-09T15:26:17.000Z | tiddlyweb/serializations/html.py | angeluseve/tiddlyweb | d24a45d48faa2b014e1c1598ec176c4c1c98fb07 | [
"BSD-3-Clause"
] | null | null | null | tiddlyweb/serializations/html.py | angeluseve/tiddlyweb | d24a45d48faa2b014e1c1598ec176c4c1c98fb07 | [
"BSD-3-Clause"
] | null | null | null | """
HTML based serializers.
"""
import urllib
from tiddlyweb.serializations import SerializationInterface
class Serialization(SerializationInterface):
"""
Serialize entities and collections to and from
HTML representations. This is primarily used
to create browser based presentations.
"""
def __init__(self, environ=None):
if environ is None:
environ = {}
self.environ = environ
self.environ['tiddlyweb.title'] = ''
self.environ['tiddlyweb.links'] = []
def list_recipes(self, recipes):
"""
List the recipes on the system as html.
"""
self.environ['tiddlyweb.title'] = 'Recipes'
lines = []
output = '<ul id="recipes" class="listing">\n'
for recipe in recipes:
line = '<li><a href="recipes/%s">%s</a></li>' % (urllib.quote(recipe.name.encode('utf-8')), recipe.name)
lines.append(line)
output += "\n".join(lines)
return output + '\n</ul>'
def list_bags(self, bags):
"""
List the bags on the system as html.
"""
self.environ['tiddlyweb.title'] = 'Bags'
lines = []
output = '<ul id="bags" class="listing">\n'
for bag in bags:
line = '<li><a href="bags/%s">%s</a></li>' % (urllib.quote(bag.name.encode('utf-8')), bag.name)
lines.append(line)
output += "\n".join(lines)
return output + '\n</ul>'
def recipe_as(self, recipe):
"""
Recipe as html.
"""
self.environ['tiddlyweb.title'] = 'Recipe %s' % recipe.name
lines = []
for bag, filter_string in recipe.get_recipe():
line = '<li><a href="'
if not isinstance(bag, basestring):
bag = bag.name
line += '%s/bags/%s/tiddlers' % (self._server_prefix(), urllib.quote(bag.encode('utf-8')))
if filter_string:
line += '?filter=%s' % urllib.quote(filter_string.encode('utf-8'))
line += '">bag: %s filter:%s</a></li>' % (bag, filter_string)
lines.append(line)
output = "\n".join(lines)
title = 'Bags in Recipe %s' % recipe.name
tiddler_link = '%s/tiddlers' % urllib.quote(recipe.name.encode('utf-8'))
return """
<div id="recipedesc" class="description">%s</div>
<ul id="recipe" class="listing">
%s
</ul>
<div class="tiddlerslink"><a href="%s">Tiddlers in Recipe</a></div>
""" % (recipe.desc, output, tiddler_link)
def bag_as(self, bag):
"""
Bag as html.
"""
self.environ['tiddlyweb.title'] = 'Bag %s' % bag.name
tiddler_link = '%s/tiddlers' % urllib.quote(bag.name.encode('utf-8'))
return """
<div id="bagdesc" class="description">%s</div>
<div class="tiddlerslink"><a href="%s">Tiddlers in Bag %s</a></div>
""" % (bag.desc, tiddler_link, bag.name)
def list_tiddlers(self, bag):
"""
List the tiddlers in a bag as html.
"""
server_prefix = self._server_prefix()
lines = []
for tiddler in bag.list_tiddlers():
if tiddler.recipe:
base = 'recipes'
base_link = urllib.quote(tiddler.recipe.encode('utf-8'))
wiki_link = '%s/recipes/%s/tiddlers' % (server_prefix, base_link)
title = 'Tiddlers in Recipe %s' % tiddler.recipe
else:
base = 'bags'
base_link = urllib.quote(tiddler.bag.encode('utf-8'))
wiki_link = '%s/bags/%s/tiddlers' % (server_prefix, base_link)
title = 'Tiddlers in Bag %s' % tiddler.bag
if bag.revbag:
line = '<li><a href="%s/%s/%s/tiddlers/%s/revisions/%s">%s:%s</a></li>' % (
server_prefix,
base,
base_link,
urllib.quote(tiddler.title.encode('utf-8')),
tiddler.revision,
tiddler.title,
tiddler.revision)
wiki_link += '/%s/revisions' % urllib.quote(tiddler.title.encode('utf-8'))
title = 'Revisions of Tiddler %s' % tiddler.title
else:
line = '<li><a href="%s/%s/%s/tiddlers/%s">%s</a></li>' % (
server_prefix,
base,
base_link,
urllib.quote(tiddler.title.encode('utf-8')),
tiddler.title)
lines.append(line)
if bag.searchbag:
title = 'Found Tiddlers'
wiki_link = None
output = "\n".join(lines)
self.environ['tiddlyweb.title'] = title
return """
%s
<ul id="tiddlers" class="listing">
%s
</ul>
""" % (self._tiddler_list_header(wiki_link), output)
def tiddler_as(self, tiddler):
"""
Transform the provided tiddler into an HTML
representation of the tiddler packaged in a
DIV. If wikklytext is available the wikitext
will be rendered into formatted HTML.
"""
try:
return self._tiddler_to_wikklyhtml(tiddler)
except ImportError:
return self._tiddler_div(tiddler) + '<pre>%s</pre>' % self._html_encode(tiddler.text) + '</div>'
def _server_prefix(self):
"""
Return the string that is the server prefix,
for creating URLs.
"""
config = self.environ.get('tiddlyweb.config', {})
return config.get('server_prefix', '')
def _tiddler_list_header(self, wiki_link):
"""
The string we present at the top of a list of tiddlers.
"""
if wiki_link:
return """
<div id="tiddlersheader"><a href="%s">These Tiddlers as a TiddlyWiki</a></div>
""" % ('%s.wiki' % wiki_link)
return ''
def _tiddler_div(self, tiddler):
"""
The string that starts the div that contains a tiddler.
"""
return u'<div class="tiddler" title="%s" server.page.revision="%s" ' \
'modifier="%s" modified="%s" created="%s" tags="%s" %s>' % \
(tiddler.title, tiddler.revision, tiddler.modifier,
tiddler.modified, tiddler.created,
self.tags_as(tiddler.tags),
self._tiddler_fields(tiddler.fields))
def _tiddler_fields(self, fields):
"""
Turn tiddler fields into a string suitable for
_tiddler_div.
"""
output = []
for key in fields:
output.append('%s="%s"' % (key, fields[key]))
return ' '.join(output)
def _tiddler_to_wikklyhtml(self, tiddler):
"""
Render tiddler.text to HTML using wikklytext.
"""
server_prefix = self._server_prefix()
if tiddler.recipe:
list_link = 'recipes/%s/tiddlers' % tiddler.recipe.encode('utf-8')
list_title = 'Tiddlers in Recipe %s' % tiddler.recipe
else:
list_link = 'bags/%s/tiddlers' % tiddler.bag.encode('utf-8')
list_title = 'Tiddlers in Bag %s' % tiddler.bag
from tiddlyweb.wikklyhtml import wikitext_to_wikklyhtml
html = wikitext_to_wikklyhtml('%s/' % server_prefix,
list_link, tiddler.text)
# Have to be very careful in the following about UTF-8 handling
# because wikklytext wants to encode its output.
self.environ['tiddlyweb.title'] = tiddler.title
return """
<div class="tiddlerslink"><a href="%s" title="tiddler list">%s</a></div>
%s
%s
</div>
""" % (urllib.quote('%s/%s' % (server_prefix, list_link), safe='/'),
list_title.encode('utf-8'),
self._tiddler_div(tiddler).encode('utf-8'),
html)
| 36.568075 | 116 | 0.541918 |
ace167c296002455aeee613c5e53181b6508976c | 10,246 | py | Python | python/utils.py | cmantill/HHbbVV | 51b97949d8976e81f2a6d1806b0d07d946793bdf | [
"MIT"
] | 2 | 2021-07-14T20:37:50.000Z | 2021-07-14T20:38:06.000Z | python/utils.py | cmantill/HHbbVV | 51b97949d8976e81f2a6d1806b0d07d946793bdf | [
"MIT"
] | 1 | 2021-07-02T21:29:07.000Z | 2021-07-02T21:29:07.000Z | python/utils.py | cmantill/HHbbVV | 51b97949d8976e81f2a6d1806b0d07d946793bdf | [
"MIT"
] | 1 | 2021-06-30T17:16:28.000Z | 2021-06-30T17:16:28.000Z | """
Common functions for the analysis.
Author(s): Raghav Kansal
"""
from hist import Hist
import numpy as np
from copy import deepcopy
background_keys = ["V", "Top", "QCD"]
sig_key = "HHbbVV4q"
data_key = "Data"
all_keys = background_keys + [data_key, sig_key]
background_labels = ["VV/V+jets", "ST/TT", "QCD"]
sig_label = "HHbbVV4q"
data_label = "Data"
all_labels = background_labels + [data_label, sig_label]
def getAllKeys():
return all_keys
def getSigKey():
return sig_key
def getBackgroundKeys():
return background_keys
def getSimKeys():
return background_keys + [sig_key]
def getAllLabels():
return all_labels
def getSigLabel():
return sig_label
def getBackgroundLabels():
return background_labels
def getSimLabels():
return background_labels + [sig_label]
def add_bool_arg(parser, name, help, default=False, no_name=None):
"""Add a boolean command line argument for argparse"""
varname = "_".join(name.split("-")) # change hyphens to underscores
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--" + name, dest=varname, action="store_true", help=help)
if no_name is None:
no_name = "no-" + name
no_help = "don't " + help
else:
no_help = help
group.add_argument("--" + no_name, dest=varname, action="store_false", help=no_help)
parser.set_defaults(**{varname: default})
def load_events(data_path: str = "../../data/2017_combined/", keys: list = all_keys, do_print: bool = True):
"""Load events for samples in `keys` from pickles in `data_path`, which must be named `key`.pkl"""
import pickle
events = {}
for key in keys:
if do_print:
print(f"Loading {key} events")
with open(f"{data_path}{key}.pkl", "rb") as file:
events[key] = pickle.load(file)["skimmed_events"]
# Just for checking
if do_print:
for key in keys:
print(f"{key} events: {np.sum(events[key]['finalWeight']):.2f}")
return events
def make_vector(events: dict, name: str, mask=None):
"""
Creates Lorentz vector from input events and beginning name, assuming events contain {name}Pt, {name}Phi, {name}Eta, {Name}Msd variables
Optional input mask to select certain events
Args:
events (dict): dict of variables and corresponding numpy arrays
name (str): object string e.g. ak8FatJet
mask (bool array, optional): array selecting desired events
"""
import vector
if mask is None:
return vector.array(
{
"pt": events[f"{name}Pt"],
"phi": events[f"{name}Phi"],
"eta": events[f"{name}Eta"],
"M": events[f"{name}Msd"] if f"{name}Msd" in events else events[f"{name}Mass"],
}
)
else:
return vector.array(
{
"pt": events[f"{name}Pt"][mask],
"phi": events[f"{name}Phi"][mask],
"eta": events[f"{name}Eta"][mask],
"M": events[f"{name}Msd"][mask] if f"{name}Msd" in events else events[f"{name}Mass"][mask],
}
)
def getParticles(particle_list, particle_type):
"""
Finds particles in `particle_list` of type `particle_type`
Args:
particle_list: array of particle pdgIds
particle_type: can be 1) string: 'b', 'V' or 'H' currently, or TODO: 2) pdgID, 3) list of pdgIds
"""
B_PDGID = 5
Z_PDGID = 23
W_PDGID = 24
if particle_type == "b":
return abs(particle_list) == B_PDGID
elif particle_type == "V":
return (abs(particle_list) == W_PDGID) + (abs(particle_list) == Z_PDGID)
def singleVarHist(events: dict, var: str, bins: list, label: str, weight_key: str = "finalWeight", blind_region: list = None, selection: dict = None):
"""
Makes and fills a histogram for variable `var` using data in the `events` dict.
Args:
events (dict): a dict of events of format {sample1: {var1: np.array, var2: np.array, ...}, sample2: ...}
var (str): variable inside the events dict to make a histogram of
bins (list): bins in Hist format i.e. [num_bins, min_value, max_value]
label (str): label for variable (shows up when plotting)
weight_key (str, optional): which weight to use from events, if different from 'weight'
blind_region (list, optional): region to blind for data, in format [low_cut, high_cut]. Bins in this region will be set to 0 for data.
selection (dict, optional): if performing a selection first, dict of boolean arrays for each sample
"""
keys = list(events.keys())
h = Hist.new.StrCat(keys, name="Sample").Reg(*bins, name=var, label=label).Double()
for key in keys:
if selection is None:
fill_data = {var: events[key][var]}
weight = events[key][weight_key]
else:
fill_data = {var: events[key][var][selection[key]]}
weight = events[key][weight_key][selection[key]]
h.fill(Sample=key, **fill_data, weight=weight)
if blind_region is not None:
bins = h.axes[1].edges
lv = int(np.searchsorted(bins, blind_region[0], 'right'))
rv = int(np.searchsorted(bins, blind_region[1], 'left') + 1)
data_key_index = np.where(np.array(list(h.axes[0])) == 'Data')[0][0]
h.view(flow=True)[data_key_index][lv:rv] = 0
return h
def getSignalPlotScaleFactor(events: dict, weight_key: str = "finalWeight", selection: dict = None):
"""Get scale factor for signal in histogram plots"""
if selection is None:
return np.sum(events[data_key][weight_key]) / np.sum(events[sig_key][weight_key])
else:
return np.sum(events[data_key][weight_key][selection[data_key]]) / np.sum(events[sig_key][weight_key][selection[sig_key]])
def add_selection(name, sel, selection, cutflow, events, weight_key):
"""Adds selection to PackedSelection object and the cutflow"""
selection.add(name, sel)
cutflow[name] = np.sum(events[weight_key][selection.all(*selection.names)])
def make_selection(
var_cuts: dict, events: dict, weight_key: str = "finalWeight", cutflow: dict = None, selection: dict = None, MAX_VAL: float = 9999.0
):
"""
Makes cuts defined in `var_cuts` for each sample in `events`.
Args:
var_cuts (dict): a dict of cuts, with each (key, value) pair = (var, [lower cut value, upper cut value]).
events (dict): a dict of events of format {sample1: {var1: np.array, var2: np.array, ...}, sample2: ...}
weight_key (str): key to use for weights. Defaults to 'finalWeight'.
cutflow (dict): cutflow from previous cuts, if any. Defaults to None.
selection (dict): cutflow from previous selection, if any. Defaults to None.
MAX_VAL (float): if abs of one of the cuts equals or exceeds this value it will be ignored. Defaults to 9999.
Returns:
selection (dict): dict of each sample's cut boolean arrays.
cutflow (dict): dict of each sample's yields after each cut.
"""
from coffea.processor import PackedSelection
if selection is None:
selection = {}
else:
selection = deepcopy(selection)
if cutflow is None:
cutflow = {}
for s, evts in events.items():
if s not in cutflow:
cutflow[s] = {}
if s in selection:
new_selection = PackedSelection()
new_selection.add("Previous selection", selection[s])
selection[s] = new_selection
else:
selection[s] = PackedSelection()
for var, brange in var_cuts.items():
if "+" in var: # means OR-ing these cuts
vars = var.split("+")
if brange[0] > -MAX_VAL:
cut1 = evts[vars[0]] > brange[0]
for tvars in vars[1:]:
cut1 = cut1 + (evts[tvars] > brange[0])
add_selection(f"{' or '.join(vars[:])} > {brange[0]}", cut1, selection[s], cutflow[s], evts, weight_key)
if brange[1] < MAX_VAL:
cut2 = evts[vars[0]] < brange[1]
for tvars in vars[1:]:
cut2 = cut2 + (evts[tvars] < brange[1])
add_selection(f"{' or '.join(vars[:])} < {brange[1]}", cut2, selection[s], cutflow[s], evts, weight_key)
else:
if brange[0] > -MAX_VAL:
add_selection(f"{var} > {brange[0]}", evts[var] > brange[0], selection[s], cutflow[s], evts, weight_key)
if brange[1] < MAX_VAL:
add_selection(f"{var} < {brange[1]}", evts[var] < brange[1], selection[s], cutflow[s], evts, weight_key)
selection[s] = selection[s].all(*selection[s].names)
return selection, cutflow
def getSigSidebandBGYields(mass_key: str, mass_cuts: list, events: dict, weight_key: str = "finalWeight", selection: dict = None):
"""Get signal and background yields in the `mass_cuts` range ([mass_cuts[0], mass_cuts[1]]), using the data in the sideband regions as the bg estimate"""
sig_mass = events[sig_key][mass_key]
sig_weight = events[sig_key][weight_key]
if selection is not None:
sig_mass = sig_mass[selection[sig_key]]
sig_weight = sig_weight[selection[sig_key]]
data_mass = events[data_key][mass_key]
data_weight = events[data_key][weight_key]
if selection is not None:
data_mass = data_mass[selection[data_key]]
data_weight = data_weight[selection[data_key]]
sig_cut = (sig_mass > mass_cuts[0]) * (sig_mass < mass_cuts[1])
sig_yield = np.sum(sig_weight[sig_cut])
# calculate bg estimate from data in sideband regions
mass_range = mass_cuts[1] - mass_cuts[0]
low_mass_range = [mass_cuts[0] - mass_range / 2, mass_cuts[0]]
high_mass_range = [mass_cuts[1], mass_cuts[1] + mass_range / 2]
low_data_cut = (data_mass > low_mass_range[0]) * (data_mass < low_mass_range[1])
high_data_cut = (data_mass > high_mass_range[0]) * (data_mass < high_mass_range[1])
bg_yield = np.sum(data_weight[low_data_cut]) + np.sum(data_weight[high_data_cut])
return sig_yield, bg_yield
| 35.825175 | 157 | 0.622584 |
ace168c7068283a56f300837946417c5c082edc9 | 28,331 | py | Python | pycomm/cip/cip_base.py | leomarssilva/pycomm | 325f80d01ea11da36f85f265e3bf6649416820f1 | [
"MIT"
] | null | null | null | pycomm/cip/cip_base.py | leomarssilva/pycomm | 325f80d01ea11da36f85f265e3bf6649416820f1 | [
"MIT"
] | null | null | null | pycomm/cip/cip_base.py | leomarssilva/pycomm | 325f80d01ea11da36f85f265e3bf6649416820f1 | [
"MIT"
] | 1 | 2021-02-25T21:50:14.000Z | 2021-02-25T21:50:14.000Z | # -*- coding: utf-8 -*-
#
# cip_base.py - A set of classes methods and structures used to implement Ethernet/IP
#
#
# Copyright (c) 2014 Agostino Ruscito <ruscito@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import struct
import socket
import random
from os import getpid
from pycomm.cip.cip_const import *
from pycomm.common import PycommError
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
class CommError(PycommError):
pass
class DataError(PycommError):
pass
def pack_sint(n):
return struct.pack('b', n)
def pack_usint(n):
return struct.pack('B', n)
def pack_int(n):
"""pack 16 bit into 2 bytes little endian"""
return struct.pack('<h', n)
def pack_uint(n):
"""pack 16 bit into 2 bytes little endian"""
return struct.pack('<H', n)
def pack_dint(n):
"""pack 32 bit into 4 bytes little endian"""
return struct.pack('<i', n)
def pack_real(r):
"""unpack 4 bytes little endian to int"""
return struct.pack('<f', r)
def pack_lint(l):
"""unpack 4 bytes little endian to int"""
return struct.pack('<q', l)
def unpack_bool(st):
if not (int(struct.unpack('B', st[0])[0]) == 0):
return 1
return 0
def unpack_sint(st):
return int(struct.unpack('b', st[0:1])[0])
def unpack_usint(st):
return int(struct.unpack('B', st[0:1])[0])
def unpack_int(st):
"""unpack 2 bytes little endian to int"""
return int(struct.unpack('<h', st[0:2])[0])
def unpack_uint(st):
"""unpack 2 bytes little endian to int"""
return int(struct.unpack('<H', st[0:2])[0])
def unpack_dint(st):
"""unpack 4 bytes little endian to int"""
return int(struct.unpack('<i', st[0:4])[0])
def unpack_real(st):
"""unpack 4 bytes little endian to int"""
return float(struct.unpack('<f', st[0:4])[0])
def unpack_lint(st):
"""unpack 4 bytes little endian to int"""
return int(struct.unpack('<q', st[0:8])[0])
def get_bit(value, idx):
""":returns value of bit at position idx"""
return (value & (1 << idx)) != 0
PACK_DATA_FUNCTION = {
'BOOL': pack_sint,
'SINT': pack_sint, # Signed 8-bit integer
'INT': pack_int, # Signed 16-bit integer
'UINT': pack_uint, # Unsigned 16-bit integer
'USINT': pack_usint, # Unsigned Byte Integer
'DINT': pack_dint, # Signed 32-bit integer
'REAL': pack_real, # 32-bit floating point
'LINT': pack_lint,
'BYTE': pack_sint, # byte string 8-bits
'WORD': pack_uint, # byte string 16-bits
'DWORD': pack_dint, # byte string 32-bits
'LWORD': pack_lint # byte string 64-bits
}
UNPACK_DATA_FUNCTION = {
'BOOL': unpack_bool,
'SINT': unpack_sint, # Signed 8-bit integer
'INT': unpack_int, # Signed 16-bit integer
'UINT': unpack_uint, # Unsigned 16-bit integer
'USINT': unpack_usint, # Unsigned Byte Integer
'DINT': unpack_dint, # Signed 32-bit integer
'REAL': unpack_real, # 32-bit floating point,
'LINT': unpack_lint,
'BYTE': unpack_sint, # byte string 8-bits
'WORD': unpack_uint, # byte string 16-bits
'DWORD': unpack_dint, # byte string 32-bits
'LWORD': unpack_lint # byte string 64-bits
}
DATA_FUNCTION_SIZE = {
'BOOL': 1,
'SINT': 1, # Signed 8-bit integer
'USINT': 1, # Unisgned 8-bit integer
'INT': 2, # Signed 16-bit integer
'UINT': 2, # Unsigned 16-bit integer
'DINT': 4, # Signed 32-bit integer
'REAL': 4, # 32-bit floating point
'LINT': 8,
'BYTE': 1, # byte string 8-bits
'WORD': 2, # byte string 16-bits
'DWORD': 4, # byte string 32-bits
'LWORD': 8 # byte string 64-bits
}
UNPACK_PCCC_DATA_FUNCTION = {
'N': unpack_int,
'B': unpack_int,
'T': unpack_int,
'C': unpack_int,
'S': unpack_int,
'F': unpack_real,
'A': unpack_sint,
'R': unpack_dint,
'O': unpack_int,
'I': unpack_int
}
PACK_PCCC_DATA_FUNCTION = {
'N': pack_int,
'B': pack_int,
'T': pack_int,
'C': pack_int,
'S': pack_int,
'F': pack_real,
'A': pack_sint,
'R': pack_dint,
'O': pack_int,
'I': pack_int
}
def print_bytes_line(msg):
out = ''
for ch in msg:
out += "{:0>2x}".format(ch)
return out
def print_bytes_msg(msg, info=''):
out = info
new_line = True
line = 0
column = 0
for idx, ch in enumerate(msg):
if new_line:
out += "\n({:0>4d}) ".format(line * 10)
new_line = False
out += "{:0>2x} ".format(ch)
if column == 9:
new_line = True
column = 0
line += 1
else:
column += 1
return out
def get_extended_status(msg, start):
status = unpack_usint(msg[start:start+1])
# send_rr_data
# 42 General Status
# 43 Size of additional status
# 44..n additional status
# send_unit_data
# 48 General Status
# 49 Size of additional status
# 50..n additional status
extended_status_size = (unpack_usint(msg[start+1:start+2]))*2
extended_status = 0
if extended_status_size != 0:
# There is an additional status
if extended_status_size == 1:
extended_status = unpack_usint(msg[start+2:start+3])
elif extended_status_size == 2:
extended_status = unpack_uint(msg[start+2:start+4])
elif extended_status_size == 4:
extended_status = unpack_dint(msg[start+2:start+6])
else:
return 'Extended Status Size Unknown'
try:
return '{0}'.format(EXTEND_CODES[status][extended_status])
except LookupError:
return "Extended Status info not present"
def create_tag_rp(tag, multi_requests=False):
""" Create tag Request Packet
It returns the request packed wrapped around the tag passed.
If any error it returns none
"""
tags = tag.split('.')
rp = []
index = []
for tag in tags:
add_index = False
# Check if is an array tag
if tag.find('[') != -1:
# Remove the last square bracket
tag = tag[:len(tag)-1]
# Isolate the value inside bracket
inside_value = tag[tag.find('[')+1:]
# Now split the inside value in case part of multidimensional array
index = inside_value.split(',')
# Flag the existence of one o more index
add_index = True
# Get only the tag part
tag = tag[:tag.find('[')]
tag_length = len(tag)
# Create the request path
rp.append(EXTENDED_SYMBOL) # ANSI Ext. symbolic segment
rp.append(chr(tag_length)) # Length of the tag
# Add the tag to the Request path
for char in tag:
rp.append(char)
# Add pad byte because total length of Request path must be word-aligned
if tag_length % 2:
rp.append(PADDING_BYTE)
# Add any index
if add_index:
for idx in index:
val = int(idx)
if val <= 0xff:
rp.append(ELEMENT_ID["8-bit"])
rp.append(pack_usint(val))
elif val <= 0xffff:
rp.append(ELEMENT_ID["16-bit"]+PADDING_BYTE)
rp.append(pack_uint(val))
elif val <= 0xfffffffff:
rp.append(ELEMENT_ID["32-bit"]+PADDING_BYTE)
rp.append(pack_dint(val))
else:
# Cannot create a valid request packet
return None
# At this point the Request Path is completed,
if multi_requests:
request_path = chr(len(rp)/2) + ''.join(rp)
else:
request_path = ''.join(rp)
return request_path
def build_common_packet_format(message_type, message, addr_type, addr_data=None, timeout=10):
""" build_common_packet_format
It creates the common part for a CIP message. Check Volume 2 (page 2.22) of CIP specification for reference
"""
msg = pack_dint(0) # Interface Handle: shall be 0 for CIP
msg += pack_uint(timeout) # timeout
msg += pack_uint(2) # Item count: should be at list 2 (Address and Data)
msg += addr_type # Address Item Type ID
if addr_data is not None:
msg += pack_uint(len(addr_data)) # Address Item Length
msg += addr_data
else:
msg += pack_uint(0) # Address Item Length
msg += message_type # Data Type ID
msg += pack_uint(len(message)) # Data Item Length
msg += message
return msg
def build_multiple_service(rp_list, sequence=None):
mr = []
if sequence is not None:
mr.append(pack_uint(sequence))
mr.append(chr(TAG_SERVICES_REQUEST["Multiple Service Packet"])) # the Request Service
mr.append(pack_usint(2)) # the Request Path Size length in word
mr.append(CLASS_ID["8-bit"])
mr.append(CLASS_CODE["Message Router"])
mr.append(INSTANCE_ID["8-bit"])
mr.append(pack_usint(1)) # Instance 1
mr.append(pack_uint(len(rp_list))) # Number of service contained in the request
# Offset calculation
offset = (len(rp_list) * 2) + 2
for index, rp in enumerate(rp_list):
if index == 0:
mr.append(pack_uint(offset)) # Starting offset
else:
mr.append(pack_uint(offset))
offset += len(rp)
for rp in rp_list:
mr.append(rp)
return mr
def parse_multiple_request(message, tags, typ):
""" parse_multi_request
This function should be used to parse the message replayed to a multi request service rapped around the
send_unit_data message.
:param message: the full message returned from the PLC
:param tags: The list of tags to be read
:param typ: to specify if multi request service READ or WRITE
:return: a list of tuple in the format [ (tag name, value, data type), ( tag name, value, data type) ].
In case of error the tuple will be (tag name, None, None)
"""
offset = 50
position = 50
number_of_service_replies = unpack_uint(message[offset:offset+2])
tag_list = []
for index in range(number_of_service_replies):
position += 2
start = offset + unpack_uint(message[position:position+2])
general_status = unpack_usint(message[start+2:start+3])
if general_status == 0:
if typ == "READ":
data_type = unpack_uint(message[start+4:start+6])
try:
value_begin = start + 6
value_end = value_begin + DATA_FUNCTION_SIZE[I_DATA_TYPE[data_type]]
value = message[value_begin:value_end]
tag_list.append((tags[index],
UNPACK_DATA_FUNCTION[I_DATA_TYPE[data_type]](value),
I_DATA_TYPE[data_type]))
except LookupError:
tag_list.append((tags[index], None, None))
else:
tag_list.append((tags[index] + ('GOOD',)))
else:
if typ == "READ":
tag_list.append((tags[index], None, None))
else:
tag_list.append((tags[index] + ('BAD',)))
return tag_list
class Socket:
def __init__(self, timeout=5.0):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(timeout)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
def connect(self, host, port):
try:
self.sock.connect((host, port))
except socket.timeout:
raise CommError("Socket timeout during connection.")
def send(self, msg, timeout=0):
if timeout != 0:
self.sock.settimeout(timeout)
total_sent = 0
while total_sent < len(msg):
try:
sent = self.sock.send(msg[total_sent:])
if sent == 0:
raise CommError("socket connection broken.")
total_sent += sent
except socket.error:
raise CommError("socket connection broken.")
return total_sent
def receive(self, timeout=0):
if timeout != 0:
self.sock.settimeout(timeout)
msg_len = 28
chunks = []
bytes_recd = 0
one_shot = True
while bytes_recd < msg_len:
try:
chunk = self.sock.recv(min(msg_len - bytes_recd, 2048))
if chunk == '':
raise CommError("socket connection broken.")
if one_shot:
data_size = int(struct.unpack('<H', chunk[2:4])[0]) # Length
msg_len = HEADER_SIZE + data_size
one_shot = False
chunks.append(chunk)
bytes_recd += len(chunk)
except socket.error as e:
raise CommError(e)
return b''.join(chunks)
def close(self):
self.sock.close()
def parse_symbol_type(symbol):
""" parse_symbol_type
It parse the symbol to Rockwell Spec
:param symbol: the symbol associated to a tag
:return: A tuple containing information about the tag
"""
pass
return None
class Base(object):
_sequence = 0
def __init__(self):
if Base._sequence == 0:
Base._sequence = getpid()
else:
Base._sequence = Base._get_sequence()
self.__version__ = '0.3'
self.__sock = None
self.__direct_connections = False
self._session = 0
self._connection_opened = False
self._reply = None
self._message = None
self._target_cid = None
self._target_is_connected = False
self._tag_list = []
self._buffer = {}
self._device_description = "Device Unknown"
self._last_instance = 0
self._byte_offset = 0
self._last_position = 0
self._more_packets_available = False
self._last_tag_read = ()
self._last_tag_write = ()
self._status = (0, "")
self._output_raw = False # indicating value should be output as raw (hex)
self.attribs = {'context': b'_pycomm_', 'protocol version': 1, 'rpi': 5000, 'port': 0xAF12, 'timeout': 10,
'backplane': 1, 'cpu slot': 0, 'option': 0, 'cid': b'\x27\x04\x19\x71', 'csn': b'\x27\x04',
'vid': b'\x09\x10', 'vsn': b'\x09\x10\x19\x71', 'name': 'Base', 'ip address': None}
def __len__(self):
return len(self.attribs)
def __getitem__(self, key):
return self.attribs[key]
def __setitem__(self, key, value):
self.attribs[key] = value
def __delitem__(self, key):
try:
del self.attribs[key]
except LookupError:
pass
def __iter__(self):
return iter(self.attribs)
def __contains__(self, item):
return item in self.attribs
def _check_reply(self):
raise Socket.ImplementationError("The method has not been implemented")
@staticmethod
def _get_sequence():
""" Increase and return the sequence used with connected messages
:return: The New sequence
"""
if Base._sequence < 65535:
Base._sequence += 1
else:
Base._sequence = getpid() % 65535
return Base._sequence
def nop(self):
""" No replay command
A NOP provides a way for either an originator or target to determine if the TCP connection is still open.
"""
self._message = self.build_header(ENCAPSULATION_COMMAND['nop'], 0)
self._send()
def __repr__(self):
return self._device_description
def generate_cid(self):
self.attribs['cid'] = bytes([random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)])
def generate_vsn(self):
self.attribs['vsn'] = bytes([random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)])
def description(self):
return self._device_description
def list_identity(self):
""" ListIdentity command to locate and identify potential target
return true if the replay contains the device description
"""
self._message = self.build_header(ENCAPSULATION_COMMAND['list_identity'], 0)
self._send()
self._receive()
if self._check_reply():
try:
self._device_description = self._reply[63:-1]
return True
except Exception as e:
raise CommError(e)
return False
def send_rr_data(self, msg):
""" SendRRData transfer an encapsulated request/reply packet between the originator and target
:param msg: The message to be send to the target
:return: the replay received from the target
"""
self._message = self.build_header(ENCAPSULATION_COMMAND["send_rr_data"], len(msg))
self._message += msg
self._send()
self._receive()
return self._check_reply()
def send_unit_data(self, msg):
""" SendUnitData send encapsulated connected messages.
:param msg: The message to be send to the target
:return: the replay received from the target
"""
self._message = self.build_header(ENCAPSULATION_COMMAND["send_unit_data"], len(msg))
self._message += msg
self._send()
self._receive()
return self._check_reply()
def get_status(self):
""" Get the last status/error
This method can be used after any call to get any details in case of error
:return: A tuple containing (error group, error message)
"""
return self._status
def clear(self):
""" Clear the last status/error
:return: return am empty tuple
"""
self._status = (0, "")
def build_header(self, command, length):
""" Build the encapsulate message header
The header is 24 bytes fixed length, and includes the command and the length of the optional data portion.
:return: the headre
"""
try:
h = command # Command UINT
h += pack_uint(length) # Length UINT
h += pack_dint(self._session) # Session Handle UDINT
h += pack_dint(0) # Status UDINT
h += self.attribs['context'] # Sender Context 8 bytes
h += pack_dint(self.attribs['option']) # Option UDINT
return h
except Exception as e:
raise CommError(e)
def register_session(self):
""" Register a new session with the communication partner
:return: None if any error, otherwise return the session number
"""
if self._session:
return self._session
self._session = 0
self._message = self.build_header(ENCAPSULATION_COMMAND['register_session'], 4)
self._message += pack_uint(self.attribs['protocol version'])
self._message += pack_uint(0)
self._send()
self._receive()
if self._check_reply():
self._session = unpack_dint(self._reply[4:8])
logger.debug("Session ={0} has been registered.".format(print_bytes_line(self._reply[4:8])))
return self._session
self._status = 'Warning ! the session has not been registered.'
logger.warning(self._status)
return None
def forward_open(self):
""" CIP implementation of the forward open message
Refer to ODVA documentation Volume 1 3-5.5.2
:return: False if any error in the replayed message
"""
if self._session == 0:
self._status = (4, "A session need to be registered before to call forward_open.")
raise CommError("A session need to be registered before to call forward open")
forward_open_msg = [
FORWARD_OPEN,
pack_usint(2),
CLASS_ID["8-bit"],
CLASS_CODE["Connection Manager"], # Volume 1: 5-1
INSTANCE_ID["8-bit"],
CONNECTION_MANAGER_INSTANCE['Open Request'],
PRIORITY,
TIMEOUT_TICKS,
pack_dint(0),
self.attribs['cid'],
self.attribs['csn'],
self.attribs['vid'],
self.attribs['vsn'],
TIMEOUT_MULTIPLIER,
b'\x00\x00\x00',
pack_dint(self.attribs['rpi'] * 1000),
pack_uint(CONNECTION_PARAMETER['Default']),
pack_dint(self.attribs['rpi'] * 1000),
pack_uint(CONNECTION_PARAMETER['Default']),
TRANSPORT_CLASS, # Transport Class
# CONNECTION_SIZE['Backplane'],
# pack_usint(self.attribs['backplane']),
# pack_usint(self.attribs['cpu slot']),
CLASS_ID["8-bit"],
CLASS_CODE["Message Router"],
INSTANCE_ID["8-bit"],
pack_usint(1)
]
if self.__direct_connections:
forward_open_msg[20:1] = [
CONNECTION_SIZE['Direct Network'],
]
else:
forward_open_msg[20:3] = [
CONNECTION_SIZE['Backplane'],
pack_usint(self.attribs['backplane']),
pack_usint(self.attribs['cpu slot'])
]
if self.send_rr_data(
build_common_packet_format(DATA_ITEM['Unconnected'], b''.join(forward_open_msg), ADDRESS_ITEM['UCMM'],)):
self._target_cid = self._reply[44:48]
self._target_is_connected = True
return True
self._status = (4, "forward_open returned False")
return False
def forward_close(self):
""" CIP implementation of the forward close message
Each connection opened with the froward open message need to be closed.
Refer to ODVA documentation Volume 1 3-5.5.3
:return: False if any error in the replayed message
"""
if self._session == 0:
self._status = (5, "A session need to be registered before to call forward_close.")
raise CommError("A session need to be registered before to call forward_close.")
forward_close_msg = [
FORWARD_CLOSE,
pack_usint(2),
CLASS_ID["8-bit"],
CLASS_CODE["Connection Manager"], # Volume 1: 5-1
INSTANCE_ID["8-bit"],
CONNECTION_MANAGER_INSTANCE['Open Request'],
PRIORITY,
TIMEOUT_TICKS,
self.attribs['csn'],
self.attribs['vid'],
self.attribs['vsn'],
# CONNECTION_SIZE['Backplane'],
# '\x00', # Reserved
# pack_usint(self.attribs['backplane']),
# pack_usint(self.attribs['cpu slot']),
CLASS_ID["8-bit"],
CLASS_CODE["Message Router"],
INSTANCE_ID["8-bit"],
pack_usint(1)
]
if self.__direct_connections:
forward_close_msg[11:2] = [
CONNECTION_SIZE['Direct Network'],
b'\x00'
]
else:
forward_close_msg[11:4] = [
CONNECTION_SIZE['Backplane'],
b'\x00',
pack_usint(self.attribs['backplane']),
pack_usint(self.attribs['cpu slot'])
]
if self.send_rr_data(
build_common_packet_format(DATA_ITEM['Unconnected'], b''.join(forward_close_msg), ADDRESS_ITEM['UCMM'])):
self._target_is_connected = False
return True
self._status = (5, "forward_close returned False")
logger.warning(self._status)
return False
def un_register_session(self):
""" Un-register a connection
"""
self._message = self.build_header(ENCAPSULATION_COMMAND['unregister_session'], 0)
self._send()
self._session = None
def _send(self):
"""
socket send
:return: true if no error otherwise false
"""
try:
logger.debug(print_bytes_msg(self._message, '-------------- SEND --------------'))
self.__sock.send(self._message)
except Exception as e:
# self.clean_up()
raise CommError(e)
def _receive(self):
"""
socket receive
:return: true if no error otherwise false
"""
try:
self._reply = self.__sock.receive()
logger.debug(print_bytes_msg(self._reply, '----------- RECEIVE -----------'))
except Exception as e:
# self.clean_up()
raise CommError(e)
def open(self, ip_address, direct_connection=False):
"""
socket open
:param: ip address to connect to and type of connection. By default direct connection is disabled
:return: true if no error otherwise false
"""
# set type of connection needed
self.__direct_connections = direct_connection
# handle the socket layer
if not self._connection_opened:
try:
if self.__sock is None:
self.__sock = Socket()
self.__sock.connect(ip_address, self.attribs['port'])
self._connection_opened = True
self.attribs['ip address'] = ip_address
self.generate_cid()
self.generate_vsn()
if self.register_session() is None:
self._status = (13, "Session not registered")
return False
# not sure but maybe I can remove this because is used to clean up any previous unclosed connection
self.forward_close()
return True
except Exception as e:
# self.clean_up()
raise CommError(e)
def close(self):
"""
socket close
:return: true if no error otherwise false
"""
error_string = ''
try:
if self._target_is_connected:
self.forward_close()
if self._session != 0:
self.un_register_session()
except Exception as e:
error_string += "Error on close() -> session Err: %s" % e.message
logger.warning(error_string)
# %GLA must do a cleanup __sock.close()
try:
if self.__sock:
self.__sock.close()
except Exception as e:
error_string += "; close() -> __sock.close Err: %s" % e.message
logger.warning(error_string)
self.clean_up()
if error_string:
raise CommError(error_string)
def clean_up(self):
self.__sock = None
self._target_is_connected = False
self._session = 0
self._connection_opened = False
def is_connected(self):
return self._connection_opened
| 31.654749 | 133 | 0.582224 |
ace16a233ccd415e1702c623d0cecfcd66af1520 | 15,261 | py | Python | pytests/ent_backup_restore/enterprise_bkrs_collection.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | null | null | null | pytests/ent_backup_restore/enterprise_bkrs_collection.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | null | null | null | pytests/ent_backup_restore/enterprise_bkrs_collection.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | null | null | null | import re, copy, json, subprocess
from random import randrange, randint, choice
from threading import Thread
from collection.collections_cli_client import CollectionsCLI
from collection.collections_rest_client import CollectionsRest
from collection.collections_n1ql_client import CollectionsN1QL
from lib.couchbase_helper.documentgenerator import SDKDataLoader
from couchbase_helper.cluster import Cluster
from membase.helper.rebalance_helper import RebalanceHelper
from couchbase_helper.documentgenerator import BlobGenerator, DocumentGenerator
from ent_backup_restore.enterprise_bkrs_collection_base import EnterpriseBackupRestoreCollectionBase
from membase.api.rest_client import RestConnection, RestHelper, Bucket
from membase.helper.bucket_helper import BucketOperationHelper
from pytests.query_tests_helper import QueryHelperTests
from remote.remote_util import RemoteUtilHelper, RemoteMachineShellConnection
from security.auditmain import audit
from security.rbac_base import RbacBase
from upgrade.newupgradebasetest import NewUpgradeBaseTest
from couchbase.bucket import Bucket
from couchbase_helper.document import View
from eventing.eventing_base import EventingBaseTest
from tasks.future import Future, TimeoutError
from xdcr.xdcrnewbasetests import NodeHelper
from couchbase_helper.stats_tools import StatsCommon
from testconstants import COUCHBASE_DATA_PATH, WIN_COUCHBASE_DATA_PATH, \
COUCHBASE_FROM_4DOT6, ENT_BKRS, ENT_BKRS_FTS
AUDITBACKUPID = 20480
AUDITRESTOREID = 20485
SOURCE_CB_PARAMS = {
"authUser": "default",
"authPassword": "",
"authSaslUser": "",
"authSaslPassword": "",
"clusterManagerBackoffFactor": 0,
"clusterManagerSleepInitMS": 0,
"clusterManagerSleepMaxMS": 20000,
"dataManagerBackoffFactor": 0,
"dataManagerSleepInitMS": 0,
"dataManagerSleepMaxMS": 20000,
"feedBufferSizeBytes": 0,
"feedBufferAckThreshold": 0
}
INDEX_DEFINITION = {
"type": "fulltext-index",
"name": "",
"uuid": "",
"params": {},
"sourceType": "couchbase",
"sourceName": "default",
"sourceUUID": "",
"sourceParams": SOURCE_CB_PARAMS,
"planParams": {}
}
class EnterpriseBackupRestoreCollectionTest(EnterpriseBackupRestoreCollectionBase, NewUpgradeBaseTest):
def setUp(self):
super().setUp()
self.users_check_restore = \
self.input.param("users-check-restore", '').replace("ALL", "*").split(";")
if '' in self.users_check_restore:
self.users_check_restore.remove('')
for server in [self.backupset.backup_host, self.backupset.restore_cluster_host]:
conn = RemoteMachineShellConnection(server)
conn.extract_remote_info()
conn.terminate_processes(conn.info, ["cbbackupmgr"])
conn.disconnect()
self.bucket_helper = BucketOperationHelper()
self.bucket_map_collection = ""
def tearDown(self):
super(EnterpriseBackupRestoreCollectionTest, self).tearDown()
def test_backup_create(self):
self.backup_create_validate()
def test_backup_restore_collection_sanity(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Perform updates and create backups for specified number of times (test param number_of_backups)
3. Perform restores for the same number of times with random start and end values
"""
self.log.info("*** create collection in all buckets")
self.log.info("*** start to load items to all buckets")
self.active_resident_threshold = 100
self.load_all_buckets(self.backupset.cluster_host)
self.log.info("*** done to load items to all buckets")
self.ops_type = self.input.param("ops-type", "update")
self.expected_error = self.input.param("expected_error", None)
self.create_scope_cluster_host()
self.create_collection_cluster_host(self.backupset.col_per_scope)
backup_scopes = self.get_bucket_scope_cluster_host()
scopes_id = []
for scope in backup_scopes:
if scope == "_default":
continue
scopes_id.append(self.get_scopes_id_cluster_host(scope))
""" remove null and empty element """
scopes_id = [i for i in scopes_id if i]
backup_collections = self.get_bucket_collection_cluster_host()
col_stats = self.get_collection_stats_cluster_host()
for backup_scope in backup_scopes:
bk_scope_id = self.get_scopes_id_cluster_host(backup_scope)
if self.auto_failover:
self.log.info("Enabling auto failover on " + str(self.backupset.cluster_host))
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)
self.backup_create_validate()
for i in range(1, self.backupset.number_of_backups + 1):
if self.ops_type == "update":
self.log.info("*** start to update items in all buckets")
col_cmd = ""
if self.backupset.load_to_collection:
self.backupset.load_scope_id = choice(scopes_id)
col_cmd = " -c {0} ".format(self.backupset.load_scope_id)
self.load_all_buckets(self.backupset.cluster_host, ratio=0.1,
command_options=col_cmd)
self.log.info("*** done update items in all buckets")
self.sleep(10)
self.log.info("*** start to validate backup cluster")
self.backup_cluster_validate()
self.targetMaster = True
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
self.log.info("*** start to restore cluster")
restored = {"{0}/{1}".format(start, end): ""}
for i in range(1, self.backupset.number_of_backups + 1):
if self.reset_restore_cluster:
self.log.info("*** start to reset cluster")
self.backup_reset_clusters(self.cluster_to_restore)
if self.same_cluster:
self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])
else:
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
rest = RestConnection(self.backupset.restore_cluster_host)
rest.force_eject_node()
rest.init_node()
self.log.info("Done reset cluster")
self.sleep(10)
""" Add built-in user cbadminbucket to second cluster """
self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])
self.backupset.start = start
self.backupset.end = end
self.log.info("*** start restore validation")
data_map_collection = []
for scope in backup_scopes:
if "default" in scope:
continue
data_map_collection.append(self.buckets[0].name + "." + scope + "=" + \
self.buckets[0].name + "." + scope)
self.bucket_map_collection = ",".join(data_map_collection)
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=">=",
expected_error=self.expected_error)
if self.backupset.number_of_backups == 1:
continue
while "{0}/{1}".format(start, end) in restored:
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
restored["{0}/{1}".format(start, end)] = ""
restore_scopes = self.get_bucket_scope_restore_cluster_host()
restore_collections = self.get_bucket_collection_restore_cluster_host()
self.verify_collections_in_restore_cluster_host()
def test_backup_merge_collection_sanity(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes specified number of backups (param number_of_backups - should be atleast 2 for this test case)
3. Executes list command and validates if all backups are present
4. Randomly selects a start and end and merges the backups
5. Executes info command again and validates if the new merges set of backups are listed
"""
if self.backupset.number_of_backups < 2:
self.fail("Need number_of_backups >= 2")
self.create_scope_cluster_host()
self.create_collection_cluster_host(self.backupset.col_per_scope)
scopes = self.get_bucket_scope_cluster_host()
scopes_id = []
for scope in scopes:
if scope == "_default":
continue
scopes_id.append(self.get_scopes_id_cluster_host(scope))
""" remove null and empty element """
scopes_id = [i for i in scopes_id if i]
col_cmd = ""
if self.backupset.load_to_collection:
self.backupset.load_scope_id = choice(scopes_id)
col_cmd = " -c {0} ".format(self.backupset.load_scope_id)
self.load_all_buckets(self.backupset.cluster_host, ratio=0.9,
command_options=col_cmd)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_info()
if not status:
self.fail(message)
backup_count = 0
""" remove last 6 chars of offset time in backup name"""
if self.backups and self.backups[0][-3:] == "_00":
strip_backupset = [s[:-6] for s in self.backups]
if output and output[0]:
bk_info = json.loads(output[0])
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
backup_name = bk_info["backups"][i]["date"]
if self.debug_logs:
print("backup name ", backup_name)
print("backup set ", strip_backupset)
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in info command output".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Initial number of backups did not match")
self.log.info("Initial number of backups matched")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = randrange(self.backupset.start + 1, self.backupset.number_of_backups + 1)
status, output, message = self.backup_merge()
if not status:
self.fail(message)
status, output, message = self.backup_info()
if not status:
self.fail(message)
backup_count = 0
if output and output[0]:
bk_info = json.loads(output[0])
else:
return False, "No output content"
""" remove last 6 chars of offset time in backup name"""
if self.backups and self.backups[0][-3:] == "_00":
strip_backupset = [s[:-6] for s in self.backups]
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
backup_name = bk_info["backups"][i]["date"]
if self.debug_logs:
print("backup name ", backup_name)
print("backup set ", strip_backupset)
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in info command output".format(backup_name))
self.assertEqual(backup_count, len(strip_backupset), "Merged number of backups did not match")
self.log.info("Merged number of backups matched")
def _take_n_backups(self, n=1, validate=False):
for i in range(1, n + 1):
if validate:
self.backup_cluster_validate()
else:
self.backup_cluster()
def test_bkrs_collection_info(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes list command on the backupset and validates the output
"""
self.create_scope_cluster_host()
self.create_collection_cluster_host(self.backupset.col_per_scope)
scopes = self.get_bucket_scope_cluster_host()
scopes_id = []
for scope in scopes:
if scope == "_default":
continue
scopes_id.append(self.get_scopes_id_cluster_host(scope))
""" remove null and empty element """
scopes_id = [i for i in scopes_id if i]
self.backup_create()
col_cmd = ""
if self.backupset.load_to_collection:
self.backupset.load_scope_id = choice(scopes_id)
col_cmd = " -c {0} ".format(self.backupset.load_scope_id)
self.load_all_buckets(self.backupset.cluster_host, ratio=0.1,
command_options=col_cmd)
for i in range(1, self.backupset.number_of_backups + 1):
if self.ops_type == "update":
self.log.info("*** start to update items in all buckets")
self.load_all_buckets(self.backupset.cluster_host, ratio=0.1,
command_options=col_cmd)
self.log.info("*** done update items in all buckets")
self.sleep(10)
self.log.info("*** start to validate backup cluster")
self.backup_cluster_validate()
scopes = self.get_bucket_scope_cluster_host()
collections = self.get_bucket_collection_cluster_host()
self.backup_info_validate(scopes, collections)
def _kill_cbbackupmgr(self):
"""
kill all cbbackupmgr processes
"""
self.sleep(1, "times need for cbbackupmgr process run")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
if self.os_name != "windows":
cmd = "ps aux | grep cbbackupmgr | gawk '{print $2}' | xargs kill -9"
output, _ = shell.execute_command(cmd)
else:
cmd = "tasklist | grep cbbackupmgr | gawk '{printf$2}'"
output, _ = shell.execute_command(cmd)
if output:
kill_cmd = "taskkill /F /T /pid %d " % int(output[0])
output, _ = shell.execute_command(kill_cmd)
if output and "SUCCESS" not in output[0]:
self.fail("Failed to kill cbbackupmgr on windows")
shell.disconnect() | 47.990566 | 111 | 0.629841 |
ace16a45bfe0653ff713c607bb9e83951d176b5b | 12,289 | py | Python | tests/sentry/api/endpoints/test_organization_dashboard_details.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | tests/sentry/api/endpoints/test_organization_dashboard_details.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/api/endpoints/test_organization_dashboard_details.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import six
from django.core.urlresolvers import reverse
from sentry.models import (
Dashboard,
ObjectStatus,
Widget,
WidgetDataSource,
WidgetDataSourceTypes,
WidgetDisplayTypes,
)
from sentry.testutils import APITestCase
class OrganizationDashboardDetailsTestCase(APITestCase):
def setUp(self):
super(OrganizationDashboardDetailsTestCase, self).setUp()
self.login_as(self.user)
self.dashboard = Dashboard.objects.create(
title="Dashboard 1", created_by=self.user, organization=self.organization
)
self.widget_1 = Widget.objects.create(
dashboard=self.dashboard,
order=1,
title="Widget 1",
display_type=WidgetDisplayTypes.LINE_CHART,
)
self.widget_2 = Widget.objects.create(
dashboard=self.dashboard,
order=2,
title="Widget 2",
display_type=WidgetDisplayTypes.TABLE,
)
self.anon_users_query = {
"name": "anonymousUsersAffectedQuery",
"fields": [],
"conditions": [["user.email", "IS NULL", None]],
"aggregations": [["count()", None, "Anonymous Users"]],
"limit": 1000,
"orderby": "-time",
"groupby": ["time"],
"rollup": 86400,
}
self.known_users_query = {
"name": "knownUsersAffectedQuery",
"fields": [],
"conditions": [["user.email", "IS NOT NULL", None]],
"aggregations": [["uniq", "user.email", "Known Users"]],
"limit": 1000,
"orderby": "-time",
"groupby": ["time"],
"rollup": 86400,
}
self.geo_erorrs_query = {
"name": "errorsByGeo",
"fields": ["geo.country_code"],
"conditions": [["geo.country_code", "IS NOT NULL", None]],
"aggregations": [["count()", None, "count"]],
"limit": 10,
"orderby": "-count",
"groupby": ["geo.country_code"],
}
self.widget_1_data_1 = WidgetDataSource.objects.create(
widget=self.widget_1,
type=WidgetDataSourceTypes.DISCOVER_SAVED_SEARCH,
name="anonymousUsersAffectedQuery",
data=self.anon_users_query,
order=1,
)
self.widget_1_data_2 = WidgetDataSource.objects.create(
widget=self.widget_1,
type=WidgetDataSourceTypes.DISCOVER_SAVED_SEARCH,
name="knownUsersAffectedQuery",
data=self.known_users_query,
order=2,
)
self.widget_2_data_1 = WidgetDataSource.objects.create(
widget=self.widget_2,
type=WidgetDataSourceTypes.DISCOVER_SAVED_SEARCH,
name="errorsByGeo",
data=self.geo_erorrs_query,
order=1,
)
def url(self, dashboard_id):
return reverse(
"sentry-api-0-organization-dashboard-details",
kwargs={"organization_slug": self.organization.slug, "dashboard_id": dashboard_id},
)
def sort_by_order(self, widgets):
def get_order(x):
try:
return x["order"]
except TypeError:
return x.order
return sorted(widgets, key=get_order)
def assert_widget(self, data, expected_widget):
assert data["id"] == six.text_type(expected_widget.id)
assert data["title"] == expected_widget.title
assert data["displayType"] == WidgetDisplayTypes.get_type_name(expected_widget.display_type)
assert data["displayOptions"] == expected_widget.display_options
def assert_dashboard(self, data, dashboard):
assert data["id"] == six.text_type(dashboard.id)
assert data["organization"] == six.text_type(dashboard.organization.id)
assert data["title"] == dashboard.title
assert data["createdBy"] == six.text_type(dashboard.created_by.id)
def assert_widget_data_source(self, data, widget_data_source):
assert data["id"] == six.text_type(widget_data_source.id)
assert data["type"] == widget_data_source.type
assert data["name"] == widget_data_source.name
assert data["data"] == widget_data_source.data
assert data["order"] == six.text_type(widget_data_source.order)
class OrganizationDashboardDetailsGetTest(OrganizationDashboardDetailsTestCase):
def test_get(self):
response = self.client.get(self.url(self.dashboard.id))
assert response.status_code == 200, response.content
self.assert_dashboard(response.data, self.dashboard)
assert len(response.data["widgets"]) == 2
widgets = self.sort_by_order(response.data["widgets"])
self.assert_widget(widgets[0], self.widget_1)
self.assert_widget(widgets[1], self.widget_2)
widget_1_data_sources = self.sort_by_order(widgets[0]["dataSources"])
assert len(widget_1_data_sources) == 2
self.assert_widget_data_source(widget_1_data_sources[0], self.widget_1_data_1)
self.assert_widget_data_source(widget_1_data_sources[1], self.widget_1_data_2)
assert len(widgets[1]["dataSources"]) == 1
self.assert_widget_data_source(widgets[1]["dataSources"][0], self.widget_2_data_1)
def test_dashboard_does_not_exist(self):
response = self.client.get(self.url(1234567890))
assert response.status_code == 404
assert response.data == {u"detail": "The requested resource does not exist"}
class OrganizationDashboardDetailsDeleteTest(OrganizationDashboardDetailsTestCase):
def test_delete(self):
response = self.client.delete(self.url(self.dashboard.id))
assert response.status_code == 204
assert Dashboard.objects.get(id=self.dashboard.id).status == ObjectStatus.PENDING_DELETION
def test_dashboard_does_not_exist(self):
response = self.client.delete(self.url(1234567890))
assert response.status_code == 404
assert response.data == {u"detail": "The requested resource does not exist"}
class OrganizationDashboardDetailsPutTest(OrganizationDashboardDetailsTestCase):
def setUp(self):
super(OrganizationDashboardDetailsPutTest, self).setUp()
self.widget_3 = Widget.objects.create(
dashboard=self.dashboard,
order=3,
title="Widget 3",
display_type=WidgetDisplayTypes.LINE_CHART,
)
self.widget_4 = Widget.objects.create(
dashboard=self.dashboard,
order=4,
title="Widget 4",
display_type=WidgetDisplayTypes.LINE_CHART,
)
self.widget_ids = [self.widget_1.id, self.widget_2.id, self.widget_3.id, self.widget_4.id]
def assert_no_changes(self):
self.assert_dashboard_and_widgets(self.widget_ids, [1, 2, 3, 4])
def assert_dashboard_and_widgets(self, widget_ids, order):
assert Dashboard.objects.filter(
organization=self.organization, id=self.dashboard.id
).exists()
widgets = self.sort_by_order(
Widget.objects.filter(dashboard_id=self.dashboard.id, status=ObjectStatus.VISIBLE)
)
assert len(widgets) == len(list(widget_ids))
for widget, id, order in zip(widgets, widget_ids, order):
assert widget.id == id
assert widget.order == order
def test_put(self):
response = self.client.put(
self.url(self.dashboard.id),
data={
"title": "Changed the title",
"widgets": [
{"order": 4, "id": self.widget_1.id},
{"order": 3, "id": self.widget_2.id},
{"order": 2, "id": self.widget_3.id},
{"order": 1, "id": self.widget_4.id},
],
},
)
assert response.status_code == 200
self.assert_dashboard_and_widgets(reversed(self.widget_ids), [5, 6, 7, 8])
def test_change_dashboard_title(self):
response = self.client.put(self.url(self.dashboard.id), data={"title": "Dashboard Hello"})
assert response.status_code == 200
assert Dashboard.objects.filter(
title="Dashboard Hello", organization=self.organization, id=self.dashboard.id
).exists()
def test_reorder_widgets(self):
response = self.client.put(
self.url(self.dashboard.id),
data={
"widgets": [
{"order": 4, "id": self.widget_1.id},
{"order": 3, "id": self.widget_2.id},
{"order": 2, "id": self.widget_3.id},
{"order": 1, "id": self.widget_4.id},
]
},
)
assert response.status_code == 200
self.assert_dashboard_and_widgets(reversed(self.widget_ids), [5, 6, 7, 8])
def test_dashboard_does_not_exist(self):
response = self.client.put(self.url(1234567890))
assert response.status_code == 404
assert response.data == {u"detail": u"The requested resource does not exist"}
def test_duplicate_order(self):
response = self.client.put(
self.url(self.dashboard.id),
data={
"widgets": [
{"order": 4, "id": self.widget_1.id},
{"order": 4, "id": self.widget_2.id},
{"order": 2, "id": self.widget_3.id},
{"order": 1, "id": self.widget_4.id},
]
},
)
assert response.status_code == 400
assert response.data == {"widgets": [u"Widgets must not have duplicate order values."]}
self.assert_no_changes()
def test_partial_reordering_deletes_widgets(self):
response = self.client.put(
self.url(self.dashboard.id),
data={
"title": "Changed the title",
"widgets": [
{"order": 2, "id": self.widget_3.id},
{"order": 1, "id": self.widget_4.id},
],
},
)
assert response.status_code == 200
self.assert_dashboard_and_widgets([self.widget_4.id, self.widget_3.id], [5, 6])
deleted_widget_ids = [self.widget_1.id, self.widget_2.id]
assert not Widget.objects.filter(id__in=deleted_widget_ids).exists()
assert not WidgetDataSource.objects.filter(widget_id__in=deleted_widget_ids).exists()
def test_widget_does_not_belong_to_dashboard(self):
widget = Widget.objects.create(
order=5,
dashboard=Dashboard.objects.create(
organization=self.organization, title="Dashboard 2", created_by=self.user
),
title="Widget 200",
display_type=WidgetDisplayTypes.LINE_CHART,
)
response = self.client.put(
self.url(self.dashboard.id),
data={
"widgets": [
{"order": 5, "id": self.widget_1.id},
{"order": 4, "id": self.widget_2.id},
{"order": 3, "id": self.widget_3.id},
{"order": 2, "id": self.widget_4.id},
{"order": 1, "id": widget.id},
]
},
)
assert response.status_code == 400
assert response.data == {
"widgets": [u"All widgets must exist within this dashboard prior to reordering."]
}
self.assert_no_changes()
def test_widget_does_not_exist(self):
response = self.client.put(
self.url(self.dashboard.id),
data={
"widgets": [
{"order": 5, "id": self.widget_1.id},
{"order": 4, "id": self.widget_2.id},
{"order": 3, "id": self.widget_3.id},
{"order": 2, "id": self.widget_4.id},
{"order": 1, "id": 1234567890},
]
},
)
assert response.status_code == 400
assert response.data == {
"widgets": [u"All widgets must exist within this dashboard prior to reordering."]
}
self.assert_no_changes()
| 39.136943 | 100 | 0.584181 |
ace16b485dc4de91c6aa8ff175bc57e0f18d5f09 | 819 | py | Python | com/skillbox/sololearn/examples/variables.py | aruytehno/python-sololearn | 3f20a8f5b9c79c28b22991bd1d8e642ca74ca46b | [
"MIT"
] | null | null | null | com/skillbox/sololearn/examples/variables.py | aruytehno/python-sololearn | 3f20a8f5b9c79c28b22991bd1d8e642ca74ca46b | [
"MIT"
] | null | null | null | com/skillbox/sololearn/examples/variables.py | aruytehno/python-sololearn | 3f20a8f5b9c79c28b22991bd1d8e642ca74ca46b | [
"MIT"
] | null | null | null | # https://www.tutorialspoint.com/python/python_variable_types.htm
counter = 100 # Целочисленное
miles = 1000.0 # С плавающей точкой
name = "John" # Строка
print(counter)
print(miles)
print(name)
string = 'Hello World!'
print(string) # Печатает строку
print(string[0]) # Печатает первый символ строки
print(string[2:5]) # Печатает символы строки, начиная с 3-го по 5-й.
print(string[2:]) # Выводит строку, начиная с 3-го символа
print(string * 2) # Печатает строку два раза
print(string + "TEST") # Конкатенация строк
# get type
user_id = "12tomsmith438"
print(type(user_id)) # <class 'str'>
user_id = 234
print(type(user_id)) # <class 'int'>
# Срезы
s = 'abcdefg'
print(s[1])
print(s[-1])
print(s[1:3])
print(s[1:-1])
print(s[:3])
print(s[2:])
print(s[:-1])
print(s[::2])
print(s[1::2])
print(s[::-1])
| 21.552632 | 69 | 0.678877 |
ace16bd3ac1f1fc6d07801ccba07a3c9ec8d1e8c | 1,709 | py | Python | deployment/deploy.py | cacay/elm-cacay | ed137898e77b375e93374fda0a33c2b54248be8c | [
"MIT"
] | 1 | 2020-09-02T03:44:15.000Z | 2020-09-02T03:44:15.000Z | deployment/deploy.py | cacay/elm-cacay | ed137898e77b375e93374fda0a33c2b54248be8c | [
"MIT"
] | null | null | null | deployment/deploy.py | cacay/elm-cacay | ed137898e77b375e93374fda0a33c2b54248be8c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import os.path
import subprocess
import utility
class InvalidGitHashException(Exception):
def __init__(self, hash):
message = '"{}" is not a valid Git hash string'.format(hash)
super(InvalidGitHashException, self).__init__(message)
class InvalidCommitException(Exception):
def __init__(self, commit):
message = '"{}" is not a valid commit'.format(commit)
super(InvalidCommitException, self).__init__(message)
def rev_parse_commit(commit):
try:
return utility.sh(
['git', 'rev-parse', '--verify', '--quiet', commit + '^{commit}']
)
except subprocess.CalledProcessError:
raise InvalidCommitException(commit)
# Make sure that all changes have been committed
def ensure_clean():
if utility.sh(['git', 'status', '--porcelain', '--untracked-files=no']) != "":
raise Exception("You have uncommitted changes! Commit before deploying.")
def main():
ensure_clean()
env = {
'GIT_INDEX_FILE': utility.tmp_file(),
}
# Clear the index file
utility.sh(['rm', '-f', env['GIT_INDEX_FILE']])
def git(args):
return utility.sh(['git'] + args, env=env)
head_hash = rev_parse_commit('HEAD')
deploy_hash = rev_parse_commit('deploy')
git(['add', 'Procfile', 'package.json', 'npm-shrinkwrap.json'])
git(['add', '-f', 'dist'])
git(['add', 'server.js'])
tree = git(['write-tree'])
commit = git(['commit-tree', tree, '-p', deploy_hash, '-m', "deploy " + head_hash])
git(['branch', '-f', 'deploy', commit])
git(['push', 'origin', 'deploy'])
print('New commit: ', commit)
if __name__ == "__main__":
main()
| 25.893939 | 87 | 0.623171 |
ace16c2ccfbbea963b2bfe088cc37317fda17507 | 18,052 | py | Python | tests/test_mlabwrap.py | aschmolck/mlabwrap | b42880adb50e7359ab4fc3405827beb34d12b8f4 | [
"MIT"
] | 5 | 2015-05-08T10:56:23.000Z | 2021-04-20T03:25:36.000Z | tests/test_mlabwrap.py | aschmolck/mlabwrap | b42880adb50e7359ab4fc3405827beb34d12b8f4 | [
"MIT"
] | null | null | null | tests/test_mlabwrap.py | aschmolck/mlabwrap | b42880adb50e7359ab4fc3405827beb34d12b8f4 | [
"MIT"
] | 1 | 2019-02-02T10:05:04.000Z | 2019-02-02T10:05:04.000Z | ##############################################################################
################### test_mlabwrap: unittests for mlabwrap ####################
##############################################################################
##
## o authors: Alexander Schmolck, Vivek Rathod
## o created: 2003-07-00 00:00:00+00:00
import sys, os, re
import gc
from pdb import pm # for debugging test failures
try:
import numpy
from numpy.random import rand, randn
toscalar = lambda a:a.item()
except ImportError:
import Numeric as numpy
from MLab import rand, randn
toscalar = lambda a:a.toscalar()
from tempfile import mktemp
try: # python >= 2.3 has better mktemp
from tempfile import mkstemp as _mkstemp
mktemp = lambda *args,**kwargs: _mkstemp(*args, **kwargs)[1]
except ImportError: pass
degensym_proxy = lambda s, rex=re.compile(r'(PROXY_VAL)\d+'): rex.sub(r'\1',s)
import unittest
TestCase = unittest.TestCase
TestSuite = unittest.TestSuite
try:
import awmstest
TestCase = awmstest.PermeableTestCase2
TestSuite = awmstest.RotatingTestSuite
except ImportError: pass
from awmstools import indexme, without
from mlabwrap import *
BUFSIZE=4096 # must be the same as in mlabraw.cpp
#XXX for testing in running session with existing mlab
## mlab
## mlab = MlabWrap()
mlab._dont_proxy['cell'] = True
WHO_AT_STARTUP = mlab.who()
mlab._dont_proxy['cell'] = False
# FIXME should do this differentlya
funnies = without(WHO_AT_STARTUP, ['HOME', 'V', 'WLVERBOSE', 'MLABRAW_ERROR_'])
if funnies:
print >> sys.stderr, "Hmm, got some funny stuff in matlab env: %s" % funnies
#FIXME both below untested
def fitString(s, maxCol=79, newlineReplacement="\\n"):
if newlineReplacement or isinstance(newlineReplacement, basestring):
s = s.replace("\n", newlineReplacement)
if maxCol is not None and len(s) > maxCol:
s = "%s..." % s[:maxCol-3]
return s
class NumericTestCase(TestCase):
"""Simple extensio to TestCase to handle array equality tests 'correctly'
(i.e. work around rich comparisons). Since array repr's can also be
very large, the printing of large reprs is controlled by
``maxReprLength`` (None to print everything) and
``reprNewlineReplacement`` (None not to replace newlines in the repr).
"""
maxReprLength = 30 #
reprNewlineReplacement = "\\n"
def _reallyEqual(self, first, second, testShape=True):
#FIXME should this check for identical argument type, too?
res = first == second
# find out if are dealing with a sized object; looking for a __len__
# attr does *NOT* work, because of #$@-C extension crap
try:
len(res)
except TypeError:
return res
else:
# HACK
if len(first) == len(second) == 0:
return `first` == `second` # deal with empty arrays
res = ((not testShape or numpy.shape(first) == numpy.shape(second)) and
# it is necessary to exclude 0 element arrays, because
# identical zero-element arrays don't compare true (``and True`` normalizes)
(not len(first) and not len(second)
or bool(numpy.alltrue((numpy.ravel(first == second))))))
return res
def _smallRepr(self, *args):
return tuple([fitString(repr(arg), maxCol=self.maxReprLength,
newlineReplacement=self.reprNewlineReplacement)
for arg in args])
def assertEqual(self, first, second, msg=None):
if not self._reallyEqual(first, second):
raise self.failureException, \
(msg or '%s != %s' % self._smallRepr(first, second))
assertEqual = failUnlessEqual = assertEqual
def assertNotEqual(self, first, second, msg=None):
if self._reallyEqual(first, second):
raise self.failureException, \
(msg or '%s == %s' % self._smallRepr(first, second))
assertNotEquals = failIfEqual = assertNotEqual
def assertAlmostEqual(self, first, second, places=7, msg=None):
if not (numpy.shape(first) == numpy.shape(second) and \
self._reallyEqual(numpy.around(second-first, places), 0, testShape=False)):
raise self.failureException, \
(msg or '%s != %s within %s places' % self._smallRepr(first,second,places))
assertAlmostEquals = failUnlessAlmostEqual = assertAlmostEqual
def assertNotAlmostEqual(self, first, second, places=7, msg=None):
if not (numpy.shape(first) == numpy.shape(second) and \
not self._reallyEqual(numpy.around(second-first, places), 0, testShape=False)):
raise self.failureException, \
(msg or '%s == %s within %s places' % self._smallRepr(first,second,places))
failIfAlmostEqual = assertNotAlmostEquals = assertNotAlmostEqual
def _canonicalMShape(a):
"""Matlab arrays are rank-less (rank is specified by indexing), so all
arrays w/ trailing 1s in their shape are equivalent. This returns the
canonical form for comparison purposes: no trailing 1s in the shape,
unless the array would otherwise become a scalar or vector.
"""
s = list(a.shape)
if len(s) < 2: s.append(1)
while s[2:] and s[-1] == 1: s.pop()
return a.reshape(s)
class mlabwrapTC(NumericTestCase):
## def assertEqual(self, first, second):
## res = first == second
## if len(res):
## res = numpy.shape(first) == numpy.shape(second) and \
## bool(numpy.alltrue((numpy.ravel(a1 == a2))))
## super(TestCase, self).assertEquals(res, True)
def testBasic(self):
"""Test basic behavior."""
array = numpy.array
from random import randrange
for isComplex in [False, True]:
for i in range(30):
#flat vector
if i % 3:
nDims = 1
dims = randrange(1, 20)
#2 - 6 dimensions.
else:
nDims = randrange(2, 7)
dims = [randrange(1, 7) for j in range(nDims)]
a = numpy.random.random(dims)
if isComplex: a = a + 1j*numpy.random.random(dims)
a1 = a.copy()
mlab._set('a', a)
#### test simple get ####
self.assertEqual(_canonicalMShape(a), mlab._get('a'))
self.assertEqual(a,a1)
### test sliced arrays (stride handling test) ###
b = a1.copy()
for i in range(nDims):
z=0
while not z: z = randrange(-3,4)
b = b[::z]
mlab._set('b',b)
self.assertEqual(_canonicalMShape(b),mlab._get('b'))
self.assertEqual(a1,a)
########## test for aliasing problems ##########
if nDims > 1:
newA = mlab._get('a')
newA -= 1e4
if len(newA):
self.assertNotEqual(newA, mlab._get('a'))
self.assertEqual(a,a1)
mlab.clear('a')
mlab.clear('b')
# the tricky diversity of empty arrays
mlab._set('a', [[]])
self.assertEqual(mlab._get('a'), numpy.zeros((1, 0), 'd'))
mlab._set('a', numpy.zeros((0,0)))
self.assertEqual(mlab._get('a'), numpy.zeros((0, 0), 'd'))
mlab._set('a', [])
self.assertEqual(mlab._get('a'), numpy.zeros((0, 0), 'd'))
# complex empty
mlab._set('a', numpy.zeros((0,0), 'D'))
self.assertEqual(mlab._get('a'), numpy.zeros((0, 0), 'd')) #XXX
# 0d
mlab._set('a', -2)
self.assertEqual(mlab._get('a'), array([ [-2.]]))
mlab._set('a', array(-2))
self.assertEqual(mlab._get('a'), array([ [-2.]]))
mlab.clear('a')
# try basic error handling
self.failUnlessRaises(MlabError, mlab._get, 'dontexist')
self.failUnlessRaises(MlabError,mlab.round)
assert toscalar(mlab.round(1.6)) == 2.0
self.assertEqual(mlab.max([20,10],nout=2), (numpy.array([[20]]), array([[1]])))
self.assertEqual(mlab.max([20,10]), numpy.array([[20]]))
def testDoc(self):
"""Test that docstring extraction works OK."""
mlab.who.__doc__.index('WHO lists the variables in the current workspace')
def setUp(self):
"""Back up options."""
self.backup = {}
for opt in """\
_array_cast
_autosync_dirs
_flatten_row_vecs
_flatten_col_vecs
_clear_call_args
_session
_proxies
_proxy_count
_mlabraw_can_convert
_dont_proxy""".split():
self.backup[opt] = mlab.__dict__[opt]
mlab.addpath(os.path.dirname(__file__)) # XXX
print "ADDPATHed", os.path.dirname(__file__)
def tearDown(self):
"""Reset options."""
mlab.__dict__.update(self.backup)
def testCallArgs(self):
mlab._dont_proxy['cell'] = True
try:
mlab._clear_call_args = False
mlab.sin(1.23)
assert mlab._get('arg0__', True) == 1.23
mlab._clear_call_args = True
mlab.sin(1.23)
assert not 'arg0__' in mlab.who()
finally:
mlab._clear_call_args = True
mlab._dont_proxy['cell'] = False
def testXXXSubtler(self):
"""test more subtle stuff. This must come last, hence the XXX"""
import os, cPickle
array = numpy.array
# simple strings:
assert (mlab._do("''"), mlab._do("'foobar'")) == ('', 'foobar')
self.assertEqual(mlab.sort(1), numpy.array([[1.]]))
self.assertEqual(mlab.sort([3,1,2]), numpy.array([[1.], [2.], [3.]]))
self.assertEqual(mlab.sort(numpy.array([3,1,2])), numpy.array([[1.], [2.], [3.]]))
sct = mlab._do("struct('type',{'big','little'},'color','red','x',{3 4})")
bct = mlab._do("struct('type',{'BIG','little'},'color','red')")
self.assertEqual(sct[1].x, numpy.array([[4]]))
self.assertEqual(sct[0].x, numpy.array([[3]]))
#FIXME sct[:].x wouldn't work, but currently I'm not sure that's my fault
sct[1].x = 'New Value'
assert sct[1].x == 'New Value'
assert bct[0].type == 'BIG' and sct[0].type == 'big'
mlab._set('foo', 1)
assert mlab._get('foo') == numpy.array([1.])
assert (mlab._do("{'A', 'b', {3,4, {5,6}}}") !=
['A', 'b', [array([[ 3.]]), array([[ 4.]]),
[array([[ 5.]]), array([[ 6.]])]]])
mlab._dont_proxy['cell'] = True
self.assertEquals(mlab._do("{'a', 'b', {3,4, {5,6}}}"),
['a', 'b', [array([ 3.]), array([ 4.]),
[array([ 5.]), array([ 6.])]]])
mlab._dont_proxy['cell'] = False
mlab.clear('foo')
self.assertRaises(MlabError, mlab._get, 'foo')
# XXX: note to self: ``format compact`` in startup.m will cause this
# test to fail, but should otherwise be harmless.
self.assertEquals(degensym_proxy(repr(sct)),
"<MlabObjectProxy of matlab-class: 'struct'; "
"internal name: 'PROXY_VAL__'; has parent: no>\n"
"1x2 struct array with fields:\n"
" type\n color\n x\n\n")
#FIXME: add tests for assigning and nesting proxies
## ensure proxies work OK as arguments
self.assertEqual(mlab.size(sct), array([[1., 2.]]))
self.assertEqual(mlab.size(sct, 1), array([[1]]))
# test that exceptions on calls with proxy arguments don't result in
# trouble
self.assertRaises(MlabError, mlab.svd, sct)
self.assertEqual(mlab.size(sct, [2]), array([[2]]))
mlab._dont_proxy['cell'] = True
gc.collect()
assert map(degensym_proxy,without(mlab.who(), WHO_AT_STARTUP)) == (
['PROXY_VAL__', 'PROXY_VAL__'])
# test pickling
pickleFilename = mktemp()
f = open(pickleFilename, 'wb')
try:
cPickle.dump({'sct': sct, 'bct': bct},f,1)
f.close()
f = open(pickleFilename, 'rb')
namespace = cPickle.load(f)
f.close()
finally:
os.remove(pickleFilename)
gc.collect()
assert len(mlab._proxies) == 4, "%d proxies!" % len(mlab._proxies)
assert namespace['sct'][1].x == 'New Value'
namespace['sct'][1].x = 'Even Newer Value'
assert namespace['sct'][1].x == 'Even Newer Value'
assert sct[1].x == 'New Value'
del sct
del bct
del namespace['sct']
del namespace['bct']
mlab._set('bar', '1234')
x = []
mlab._do("disp 'hallo'" ,nout=0, handle_out=x.append)
assert x[0] == 'hallo\n'
mlab._dont_proxy['cell'] = False
self.assertRaises(ValueError, getattr, mlab, "buggy('ipython lookup')")
def testSparseArrays(self):
"""Make sure sparse arrays work."""
s = mlab.sparse(numpy.zeros([100,100]))
self.assertEqual(mlab.full(s), numpy.zeros([100,100]))
# FIXME: add these once we have multi-dimensional proxying
## s = mlab.sparse(numpy.zeros([100,100]))
## self.assertEqual(s[0,0], 0.0)
## self.assertEqual(s[99,99], 0.0)
## t = mlab.sparse(numpy.array([[1.,2,3],[0,0,0],[4,5,6]]))
## self.assertEqual(t[0,0], 1.0)
## self.assertEqual(t[1,1], 0)
## self.assertEqual(t[2,2], 6)
def testProxyIndexing(self):
"indexing and co: time for some advanced proxied __getitem__ and __setitem__ etc.."
p=mlab.proxyTest(mlab.struct('a', 1, 'b', '2'))
p.c = [[4,5]]
assert p.a == 1.0
assert p.b == '2'
assert list(p.c.flat) == [4,5]
# test all combinations of 1D indexing
sv = mlab.proxyTest(range(4))
assert sv[0] == 0
sv[0] = -33
assert sv[0] == -33
# test curly indexing; the proxyTest class in matlab arbitrarily uses
# string conversion on ``{}`` indexing to have something to distinguish
# it from "normal" ``()`` indexing
sv._[0] = '0'
assert sv._[0] == '0' == str(int(toscalar(sv[0])))
assert sv["some'string\nwith\\funny\tstuff"] == (
"you ()-indexed with the string <<some'string\nwith\\funny\tstuff>>")
# FIXME this is something to potentially add, but that also raises issues
## assert numpy.ndim(sv) == 2 # FIXME change that to 1?
## assert numpy.shape(sv[:]) == (4,1) # FIXME change that to 1?
assert list(sv[:].flat) == range(4)
# more complicated "open-ended" slices aren't supported (yet)
self.assertEqual(sv[0:], sv[:])
self.assertEqual(sv[:-1], sv[0:-1])
self.assertEqual(sv[0:-1:1], sv[:-1])
self.assertEqual(sv[-4:], sv[:])
self.assertEqual(sv[-4:-3], sv[0:1])
for b in [None] + range(-4,4):
for e in [None] + range(-4,4):
for s in [None,1]:
assert list(sv[b:e:s].flat) == range(4)[b:e:s], (
"sv[b:e:s]: %s (b,e,s): %s" % (sv[b:e:s], (b,e,s)))
sv[:-1] = -numpy.arange(3)
assert list(sv[:].flat) == [-x for x in range(3)] + [3]
sv[:] = numpy.arange(4)
assert list(sv[:].flat) == range(4)
sv[-2:] = numpy.arange(2)+10
assert list(sv[:].flat) == [0,1,10,11]
# FIXME math ops aren't yet implemented
# sv *= 10
# sv[1:3] *= 10 # FIXME
# sv + 3
# FIXME multi-D stuff isn't either
## sm = mlab.proxyTest(arange(6).reshape(3,2))
## assert sm.ndim == 2
## assert sm.shape == (3,2)
## assert len(sm) == 3
## assert len(sm).T
## p.sv = sv
## assert p.sv is sv
## assert p.sv[:]
def testRawMlabraw(self):
"""A few explicit tests for mlabraw"""
import mlabraw
#print "test mlabraw"
self.assertRaises(TypeError, mlabraw.put, 33, 'a',1)
self.assertRaises(TypeError, mlabraw.get, object(), 'a')
self.assertRaises(TypeError, mlabraw.eval, object(), '1')
# -100 is picked kinda arbitrarily to account for internal "overhead";
# I don't want to hardcode the exact value; users can assume 1000
# chars is safe
mlabraw.eval(mlab._session, '1'*(BUFSIZE-100))
assert numpy.inf == mlabraw.get(mlab._session, 'ans');
# test for buffer overflow detection
self.assertRaises(Exception, mlabraw.eval, mlab._session, '1'*BUFSIZE)
self.assertEqual(mlabraw.eval(mlab._session, r"fprintf('1\n')"),'1\n')
try:
self.assertEqual(mlabraw.eval(mlab._session, r"1"),'')
finally:
mlabraw.eval(mlab._session,'clear ans')
#print "tested mlabraw"
def testOrder(self):
"""Testing order flags cause no problems"""
try: import numpy
except ImportError: return
fa=numpy.array([[1,2,3],[4,5,6]],order='F')
self.assertEqual(mlab.conj(fa),fa)
self.assertEqual([[2]],mlab.subsref(fa, mlab.struct('type', '()', 'subs',mlab._do('{{1,2}}'))))
suite = TestSuite(map(unittest.makeSuite,
(mlabwrapTC,
)))
unittest.TextTestRunner(verbosity=2).run(suite)
#FIXME strangely enough we can't test this in the function!
gc.collect()
mlab._dont_proxy['cell'] = True
# XXX got no idea where HOME comes from, not there under win
assert without(mlab.who(), WHO_AT_STARTUP) == ['bar'], "who is:%r" % mlab.who()
mlab.clear()
assert without(mlab.who(), ['MLABRAW_ERROR_']) == [] == mlab._do('{}'),(
"who is:%r" % mlab.who())
mlab._dont_proxy['cell'] = False
| 42.079254 | 103 | 0.552903 |
ace16e13bdc46530bcd5ec79d1242be6f5d0cbcf | 2,489 | py | Python | tests/unit/core/providers/aws/s3/test_sync_handler.py | avosper-intellaegis/runway | 757d4e7db269ec16479b044ac82a69f25fa2a450 | [
"Apache-2.0"
] | 134 | 2018-02-26T21:35:23.000Z | 2022-03-03T00:30:27.000Z | tests/unit/core/providers/aws/s3/test_sync_handler.py | asksmruti/runway | 8aca76df9372e3d13eb35e12f81758f618e89e74 | [
"Apache-2.0"
] | 937 | 2018-03-08T22:04:35.000Z | 2022-03-30T12:21:47.000Z | tests/unit/core/providers/aws/s3/test_sync_handler.py | asksmruti/runway | 8aca76df9372e3d13eb35e12f81758f618e89e74 | [
"Apache-2.0"
] | 70 | 2018-02-26T23:48:11.000Z | 2022-03-02T18:44:30.000Z | """Test runway.core.providers.aws.s3._sync_handler."""
# pylint: disable=no-self-use,protected-access
from __future__ import annotations
from typing import TYPE_CHECKING
from mock import Mock
from runway.core.providers.aws.s3._sync_handler import S3SyncHandler
if TYPE_CHECKING:
from pytest_mock import MockerFixture
from .....factories import MockRunwayContext
MODULE = "runway.core.providers.aws.s3._sync_handler"
class TestS3SyncHandler:
"""Test S3SyncHandler."""
def test_client(self, runway_context: MockRunwayContext) -> None:
"""Test client."""
runway_context.add_stubber("s3")
assert S3SyncHandler(
runway_context, dest="", src=""
).client == runway_context.get_session().client("s3")
def test_run(
self, mocker: MockerFixture, runway_context: MockRunwayContext
) -> None:
"""Test run."""
mock_register_sync_strategies = mocker.patch(
f"{MODULE}.register_sync_strategies"
)
mock_action = mocker.patch(f"{MODULE}.ActionArchitecture")
transfer_config = mocker.patch.object(
S3SyncHandler, "transfer_config", {"key": "val"}
)
obj = S3SyncHandler(runway_context, dest="", src="")
assert not obj.run()
mock_register_sync_strategies.assert_called_once_with(obj._botocore_session)
mock_action.assert_called_once_with(
session=obj._session,
botocore_session=obj._botocore_session,
action="sync",
parameters=obj.parameters.data,
runtime_config=transfer_config,
)
mock_action().run.assert_called_once_with()
def test_transfer_config(
self, mocker: MockerFixture, runway_context: MockRunwayContext
) -> None:
"""Test transfer_config."""
mock_runtime_config = mocker.patch(
f"{MODULE}.RuntimeConfig", build_config=Mock(return_value="success")
)
config = {"key": "val"}
scoped_config = Mock(get=Mock(return_value=config))
obj = S3SyncHandler(runway_context, dest="", src="")
obj._botocore_session.get_scoped_config = Mock(return_value=scoped_config)
assert obj.transfer_config == mock_runtime_config.build_config.return_value
obj._botocore_session.get_scoped_config.assert_called_once_with()
scoped_config.get.assert_called_once_with("s3", {})
mock_runtime_config.build_config.assert_called_once_with(**config)
| 37.149254 | 84 | 0.6818 |
ace16ea1f45d731f5cbdf31b2626c3db1aa89add | 1,348 | py | Python | django/mysite/polls/models.py | matheuskiser/pdx_code_guild | 49a5c62fb468253eb4d9a1fb11166df79bb10873 | [
"MIT"
] | null | null | null | django/mysite/polls/models.py | matheuskiser/pdx_code_guild | 49a5c62fb468253eb4d9a1fb11166df79bb10873 | [
"MIT"
] | null | null | null | django/mysite/polls/models.py | matheuskiser/pdx_code_guild | 49a5c62fb468253eb4d9a1fb11166df79bb10873 | [
"MIT"
] | null | null | null | from django.utils import timezone
from django.db import models
import datetime
from django.contrib.auth.models import User
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
class UserProfile(models.Model):
# This line is required. Links UserProfile to a User model instance.
user = models.OneToOneField(User, related_name='profile')
# The additional attributes we wish to include.
website = models.URLField(blank=True)
picture = models.ImageField(upload_to='polls/static/polls/profile_images', blank=True)
# Override the __unicode__() method to return out something meaningful!
def __unicode__(self):
return self.user.username
| 30.636364 | 90 | 0.737389 |
ace16ef597454f8f3ae66567b74014afad7cc923 | 522 | py | Python | Bio/Phylo/Applications/__init__.py | bioinf-mcb/biopython | 1a1f4a7ee4e0efba517d3d607c56c27e72e399cc | [
"BSD-3-Clause"
] | 2 | 2020-06-25T12:52:03.000Z | 2020-07-11T09:47:34.000Z | Bio/Phylo/Applications/__init__.py | cosign070128/biopython | 2f02e34ba76306e9c27eec9e051809bec2cece9b | [
"BSD-3-Clause"
] | 14 | 2021-03-26T20:54:22.000Z | 2021-04-06T17:18:53.000Z | Bio/Phylo/Applications/__init__.py | cosign070128/biopython | 2f02e34ba76306e9c27eec9e051809bec2cece9b | [
"BSD-3-Clause"
] | 3 | 2020-05-17T19:43:05.000Z | 2020-06-04T20:44:38.000Z | # Copyright 2011 by Eric Talevich. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Phylogenetics command line tool wrappers."""
from ._Phyml import PhymlCommandline
from ._Raxml import RaxmlCommandline
from ._Fasttree import FastTreeCommandline
# Make this explicit, then they show up in the API docs
__all__ = ("PhymlCommandline", "RaxmlCommandline", "FastTreeCommandline")
| 40.153846 | 73 | 0.791188 |
ace16f98aaacdcf7ddabb9052149fd43325c3c91 | 11,996 | py | Python | services/traction/acapy_client/model/keylist_update.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 12 | 2022-01-29T20:30:03.000Z | 2022-03-29T11:46:14.000Z | services/traction/acapy_client/model/keylist_update.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 38 | 2021-11-22T17:52:50.000Z | 2022-03-31T17:52:00.000Z | services/traction/acapy_client/model/keylist_update.py | Open-Earth-Foundation/traction | 908b555a7f408a88541b7692d3730e37a297c919 | [
"Apache-2.0"
] | 9 | 2021-11-22T18:05:48.000Z | 2022-03-29T11:25:08.000Z | """
Aries Cloud Agent
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0.7.2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from acapy_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel,
)
from acapy_client.exceptions import ApiAttributeError
def lazy_import():
from acapy_client.model.keylist_update_rule import KeylistUpdateRule
globals()["KeylistUpdateRule"] = KeylistUpdateRule
class KeylistUpdate(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"id": (str,), # noqa: E501
"type": (str,), # noqa: E501
"updates": ([KeylistUpdateRule],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"id": "@id", # noqa: E501
"type": "@type", # noqa: E501
"updates": "updates", # noqa: E501
}
read_only_vars = {
"type", # noqa: E501
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""KeylistUpdate - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): Message identifier. [optional] # noqa: E501
type (str): Message type. [optional] # noqa: E501
updates ([KeylistUpdateRule]): List of update rules. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""KeylistUpdate - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): Message identifier. [optional] # noqa: E501
type (str): Message type. [optional] # noqa: E501
updates ([KeylistUpdateRule]): List of update rules. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(
f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes."
)
| 41.508651 | 124 | 0.56102 |
ace170a2d3fe0b96efe23eb32bd86ff496dea2af | 377 | py | Python | Hcf.py | sanxy/hacktoberfest-1 | 913582b310688d496602e8b1bc9166cb64866e38 | [
"MIT"
] | null | null | null | Hcf.py | sanxy/hacktoberfest-1 | 913582b310688d496602e8b1bc9166cb64866e38 | [
"MIT"
] | null | null | null | Hcf.py | sanxy/hacktoberfest-1 | 913582b310688d496602e8b1bc9166cb64866e38 | [
"MIT"
] | 1 | 2020-09-30T18:53:05.000Z | 2020-09-30T18:53:05.000Z | # Python program to find H.C.F of two numbers
# define a function
def compute_hcf(x, y):
# choose the smaller number
if x > y:
smaller = y
else:
smaller = x
for i in range(1, smaller+1):
if((x % i == 0) and (y % i == 0)):
hcf = i
return hcf
num1 = 54
num2 = 24
print("The H.C.F. is", compute_hcf(num1, num2))
| 12.566667 | 47 | 0.527851 |
ace171563a46090d2e375e9a54aabefe7975a56b | 11,445 | py | Python | sarnet_td3/trainer/comm_trainer_reinforce.py | JingdiC/SARNet | 05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea | [
"MIT"
] | null | null | null | sarnet_td3/trainer/comm_trainer_reinforce.py | JingdiC/SARNet | 05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea | [
"MIT"
] | null | null | null | sarnet_td3/trainer/comm_trainer_reinforce.py | JingdiC/SARNet | 05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
import sarnet_td3.common.buffer_util_td3 as butil
from sarnet_td3 import MAgentTrainer
from sarnet_td3.common.distributions import make_pdtype
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r
r = r*(1.-done)
discounted.append(r)
return discounted[::-1]
def make_update_exp(vals, target_vals, polyak):
polyak = 1.0 - polyak
expression = []
for var, var_target in zip(sorted(vals, key=lambda v: v.name), sorted(target_vals, key=lambda v: v.name)):
expression.append(var_target.assign(polyak * var_target + (1.0-polyak) * var))
expression = tf.group(*expression)
return U.function([], [], updates=[expression])
def create_placeholder_vpg(obs_shape_n, act_space_n, num_agents, args):
# Create placeholders
with tf.name_scope("placeholders"):
obs_ph_n = []
memory_ph_n = []
h_ph_n = []
c_ph_n = []
return_ph_n = []
for i in range(num_agents):
if args.env_type == "mpe":
obs_ph_n.append(U.BatchInput(obs_shape_n[i], name="observation" + str(i), traj=True).get())
else:
obs_ph_n.append(U.BatchInput((obs_shape_n[i],), name="observation" + str(i), traj=True).get())
h_ph_n.append(U.BatchInput((args.gru_units,), name="gru_ph1" + str(i)).get())
c_ph_n.append(U.BatchInput((args.gru_units,), name="gru_ph2" + str(i)).get())
memory_ph_n.append(U.BatchInput((args.value_units,), name="memory_ph" + str(i)).get())
return_ph_n.append(tf.compat.v1.placeholder(tf.float32, [None, None], name="returns" + str(i)))
act_pdtype_n = [make_pdtype(act_space, args.env_type) for act_space in act_space_n]
act_ph_n = [tf.compat.v1.placeholder(tf.int32, [None, None], name="act_one_hot" + str(i)) for i in range(len(act_space_n))]
return obs_ph_n, h_ph_n, c_ph_n, memory_ph_n, act_ph_n, act_space_n, return_ph_n
class CommAgentTrainerVPG(MAgentTrainer):
def __init__(self, name, p_model, obs_ph_n, h_ph_n, c_ph_n, memory_ph_n, act_ph_n,
action_space_n, return_in_ph, args, p_index, num_env=1, is_train=False):
self.name = name
self.args = args
self.p_index = p_index
self.reuse = False
self.num_adv = self.args.num_adversaries
self.n = len(obs_ph_n) # Total number of agents
self.n_start = 0
self.n_end = self.num_adv
self.comm_type = self.args.adv_test
# Update at these many number of steps
self.step_update_time = 10
if self.args.optimizer == "RMSProp":
self.optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=self.args.actor_lr, decay=0.97, epsilon=1e-6)
else:
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.args.actor_lr)
# Setup weight sharing for first initialization of adv/good policy
if not(self.p_index == 0 or self.p_index == self.num_adv): self.reuse = True
# Prepare indexing parameters
if self.name == "good_agent":
self.comm_type = self.args.good_test
self.n_start = self.num_adv
self.n_end = self.n
# Batch size and number of agents/environments
self.num_env = num_env
# Initialize actor network for communication
actor_net = p_model(is_train, self.args, reuse=self.reuse)
pMA_model = self.agent_model(self.comm_type, actor_net)
self.max_replay_buffer_len = self.args.update_lag
self.act, self.p_train, self.v_train = self._pMA_VPG_train(
scope=self.name,
make_obs_ph_n=obs_ph_n,
make_memory_ph_n=memory_ph_n,
make_h_ph_n=h_ph_n,
make_c_ph_n=c_ph_n,
make_act_ph_n=act_ph_n,
action_space_n=action_space_n,
make_return_ph_n=return_in_ph,
p_func=pMA_model,
grad_norm_clipping=0.5,
reuse=self.reuse,
)
def agent_model(self, comm_type, p_model):
if comm_type == "SARNET":
return p_model.sarnet
elif comm_type == "TARMAC":
return p_model.tarmac
elif comm_type == "COMMNET":
return p_model.commnet
elif comm_type == "DDPG":
return p_model.ddpg
elif comm_type == "IC3NET":
return p_model.ic3net
def _p_setup_placeholder(self, obs_ph_n, h_ph_n, c_ph_n, memory_ph_n):
p_input = [None] * int(self.n * 4)
for i in range(self.n):
p_input[i] = obs_ph_n[i]
p_input[i + self.n] = h_ph_n[i]
p_input[i + int(2 * self.n)] = c_ph_n[i]
p_input[i + int(3 * self.n)] = memory_ph_n[i]
return p_input
def _pMA_VPG_train(self, make_obs_ph_n, make_memory_ph_n, make_h_ph_n, make_c_ph_n, make_act_ph_n, action_space_n, make_return_ph_n, p_func, grad_norm_clipping=None, scope="agent", reuse=None):
with tf.compat.v1.variable_scope(scope, reuse=reuse):
# create distributions
act_pdtype_n = [make_pdtype(act_space, self.args.env_type) for act_space in action_space_n]
# set up placeholders
obs_ph_n = make_obs_ph_n
memory_ph_n = make_memory_ph_n
h_ph_n = make_h_ph_n
c_ph_n = make_c_ph_n
act_onehot_ph = make_act_ph_n[self.p_index]
return_ph = make_return_ph_n[self.p_index]
# Feed all inputs. Let the model decide what to choose.
p_input = self._p_setup_placeholder(obs_ph_n, h_ph_n, c_ph_n, memory_ph_n)
p, enc_state, memory_state, attention, value = p_func(p_input, int(act_pdtype_n[self.p_index].param_shape()[0]), self.p_index, self.n, self.n_start, self.n_end, scope="p_func", reuse=reuse)
# wrap parameters in distribution and sample
act_pd = act_pdtype_n[self.p_index].pdfromflat(p)
act_soft_sample = act_pd.sample(noise=False)
# print(act_soft_sample)
act_onehot = tf.multinomial(act_soft_sample[-1,:,:], 1)
# print(act_onehot)
value_out = tf.squeeze(value, axis=0) # remove the time dimension from the output for storing in the buffer
return_ph_expd = tf.expand_dims(return_ph, axis=-1)
# Value Network Optimization
# value = tf.squeeze(value, axis=-1) # remove the last single out dim, to align with return (#trajlen, #batch)
target = return_ph_expd - value
loss_v = tf.reduce_mean(tf.math.squared_difference(value, return_ph_expd))
optim_v = self.optimizer.minimize(loss_v, name='adam_optim_v')
# Policy Network Optimization
# print(act_soft_sample)
target_pi = tf.squeeze(target, axis=-1)
loss_pi = tf.reduce_mean(tf.stop_gradient(target_pi) * tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=p, labels=act_onehot_ph), name='loss_pi')
optim_pi = self.optimizer.minimize(loss_pi, name='adam_optim_pi')
# Create callable functions
# policy network
# Use sess.run to the feed the dictionary, since we are not calling it anywhere else, simi
update_pi = optim_pi
update_v = optim_v
train_v = U.function(inputs=p_input + [return_ph], outputs=update_v)
train_pi = U.function(inputs=p_input + [act_onehot_ph] + [return_ph], outputs=update_pi)
act = U.function(inputs=p_input, outputs=[act_onehot, act_soft_sample, enc_state, memory_state, attention, value_out])
return act, train_pi, train_v
def prep_input(self, obs, h, c, memory, is_train=True):
input = [None] * int(self.n * 4)
for i in range(self.n):
input[i] = obs[i]
input[i + self.n] = h[i]
input[i + int(2 * self.n)] = c[i]
input[i + int(3 * self.n)] = memory[i]
return input
def action(self, input, is_train=False):
return self.act(*input)
def sample_experience(self, bufferop):
# Receive all the data for the sampled trajectories
data, index, importance = bufferop.return_exp()
return data, index, importance
def update(self, agents, buffer_data, t):
# Check if an update is needed
# if not (t % self.step_update_time == 0): # only update every 10 steps for policy, 5 for critic
# return "no_update"
# Get mini-batch of trajectories
# Returns the following indexing scheme
# Shape of the trajectory is [# numtraj, [agent, trajlen, numenv, dim] or
# [numtraj [agent, trajlen, num_env]] for rew/done
obs_n_buffer, h_n_buffer, c_n_buffer, memory_n_buffer, action_n_buffer, action_n_logits_buffer, rew_n_buffer, \
value_n_buffer, done_n_buffer = buffer_data
""" Prepare Inputs for network feed """
# Receives [batch_size, [trajlen, numenv, agent]] -> concat [trajlen, batch x numenv, agent]
# Reshape to - [agent, trajlen, batchsize x num_env]]
rew_n_buffer = np.transpose(np.concatenate(rew_n_buffer, axis=1), (2, 0, 1))
# done_n_buffer = np.transpose(np.concatenate(done_n_buffer, axis=1), (2, 0, 1))
# Receives [batch_size, [trajlen, agent, numenv]] -> concat [trajlen, agent, batch x numenv]
# Reshape to - [agent, trajlen, batchsize x num_env]]
# value_n_buffer = np.transpose(np.concatenate(value_n_buffer, axis=-1), (2, 0, 1))
# Receives [batch, [traj, agent, numevn, dim]] -> [traj, agent, numenv x batch, dim]
# Reshape to [agent, trajlen, numenv x batch, dim]
obs_n_buffer = np.swapaxes(np.concatenate(obs_n_buffer, axis=-2), 1, 0)
action_n_buffer = np.squeeze(np.swapaxes(np.concatenate(action_n_buffer, axis=-2), 1, 0))
# For hidden states we only feed the start (i.e. no trajlen)
h_n_buffer = np.swapaxes(np.concatenate(h_n_buffer, axis=-2), 1, 0)
h_n_buffer = h_n_buffer[:, 0, :, :]
c_n_buffer = np.swapaxes(np.concatenate(c_n_buffer, axis=-2), 1, 0)
c_n_buffer = c_n_buffer[:, 0, :, :]
memory_n_buffer = np.swapaxes(np.concatenate(memory_n_buffer, axis=-2), 1, 0)
memory_n_buffer = memory_n_buffer[:, 0, :, :]
returns = []
advantages = []
# Calculate returns
return_so_far = np.zeros(np.shape(rew_n_buffer[self.p_index, 0, :]))
# Get trajectory length to compute the returns in reverse
traj_len, _ = rew_n_buffer[self.p_index].shape
# Do returns calculation for individual agent
for traj_idx in reversed(range(traj_len)):
return_so_far = self.args.gamma * return_so_far + rew_n_buffer[self.p_index, traj_idx, :]
returns.append(return_so_far)
# Returns is of the form [trajlen, dim]
# We need first indexes as agents for easier data manipulation
# returns = np.stack(returns, axis=0)
train_input = self.prep_input(obs_n_buffer, h_n_buffer, c_n_buffer, memory_n_buffer)
_ = self.v_train(*(train_input + [returns]))
_ = self.p_train(*(train_input + [action_n_buffer[self.p_index]] + [returns]))
return "update done" | 46.149194 | 201 | 0.635387 |
ace1719ad7d69a61cb58219d70a02ba2b5c02d14 | 269 | py | Python | backend/webapp/__init__.py | geometalab/retrieve-height-service | ac010b4fc9d795cc17cfcec953ad1902f7762e17 | [
"Apache-2.0"
] | 5 | 2018-06-05T10:12:09.000Z | 2021-11-19T03:36:18.000Z | backend/webapp/__init__.py | geometalab/retrieve-height-service | ac010b4fc9d795cc17cfcec953ad1902f7762e17 | [
"Apache-2.0"
] | 1 | 2020-10-09T16:52:57.000Z | 2020-10-09T16:52:57.000Z | backend/webapp/__init__.py | geometalab/retrieve-height-service | ac010b4fc9d795cc17cfcec953ad1902f7762e17 | [
"Apache-2.0"
] | 1 | 2015-05-21T06:59:23.000Z | 2015-05-21T06:59:23.000Z | """
Created on 230315
Created by Phua Joon Kai Eugene
Last Modification on 050515
"""
from flask import Flask
from GetHeight import ELE
from GetHighestElevationNearby import HIGH_ELE
app = Flask(__name__)
app.register_blueprint(ELE)
app.register_blueprint(HIGH_ELE)
| 17.933333 | 46 | 0.814126 |
ace172be82f54117d7cec89ae3ad0e8ad076fea8 | 10,271 | py | Python | tests/aos_voting_test.py | ArisenIO/arisenx | 366561b7a40d5a0e61cbdd51d862e7860d7c9611 | [
"MIT"
] | 7 | 2019-09-12T05:59:14.000Z | 2021-09-21T10:12:16.000Z | tests/aos_voting_test.py | ArisenIO/arisenx | 366561b7a40d5a0e61cbdd51d862e7860d7c9611 | [
"MIT"
] | null | null | null | tests/aos_voting_test.py | ArisenIO/arisenx | 366561b7a40d5a0e61cbdd51d862e7860d7c9611 | [
"MIT"
] | 1 | 2020-02-17T07:46:54.000Z | 2020-02-17T07:46:54.000Z | #!/usr/bin/env python3
from testUtils import Utils
import testUtils
from Cluster import Cluster
from WalletMgr import WalletMgr
from Node import Node
from TestHelper import TestHelper
import decimal
import math
import re
###############################################################
# aos_voting_test
#
# This test sets up multiple producing nodes, each with multiple producers per node. Different combinations of producers
# are voted into the production schedule and the block production is analyzed to determine if the correct producers are
# producing blocks and in the right number and order.
#
###############################################################
class ProducerToNode:
map={}
@staticmethod
def populate(node, num):
for prod in node.producers:
ProducerToNode.map[prod]=num
Utils.Print("Producer=%s for nodeNum=%s" % (prod,num))
def isValidBlockProducer(prodsActive, blockNum, node):
blockProducer=node.getBlockProducerByNum(blockNum)
if blockProducer not in prodsActive:
return False
return prodsActive[blockProducer]
def validBlockProducer(prodsActive, prodsSeen, blockNum, node):
blockProducer=node.getBlockProducerByNum(blockNum)
if blockProducer not in prodsActive:
Utils.cmdError("unexpected block producer %s at blockNum=%s" % (blockProducer,blockNum))
Utils.errorExit("Failed because of invalid block producer")
if not prodsActive[blockProducer]:
Utils.cmdError("block producer %s for blockNum=%s not elected, belongs to node %s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer]))
Utils.errorExit("Failed because of incorrect block producer")
prodsSeen[blockProducer]=True
def setActiveProducers(prodsActive, activeProducers):
for prod in prodsActive:
prodsActive[prod]=prod in activeProducers
def verifyProductionRounds(trans, node, prodsActive, rounds):
blockNum=node.getNextCleanProductionCycle(trans)
Utils.Print("Validating blockNum=%s" % (blockNum))
temp=Utils.Debug
Utils.Debug=False
Utils.Print("FIND VALID BLOCK PRODUCER")
blockProducer=node.getBlockProducerByNum(blockNum)
lastBlockProducer=blockProducer
adjust=False
while not isValidBlockProducer(prodsActive, blockNum, node):
adjust=True
blockProducer=node.getBlockProducerByNum(blockNum)
if lastBlockProducer!=blockProducer:
Utils.Print("blockProducer=%s for blockNum=%s is for node=%s" % (blockProducer, blockNum, ProducerToNode.map[blockProducer]))
lastBlockProducer=blockProducer
blockNum+=1
Utils.Print("VALID BLOCK PRODUCER")
saw=0
sawHigh=0
startingFrom=blockNum
doPrint=0
invalidCount=0
while adjust:
invalidCount+=1
if lastBlockProducer==blockProducer:
saw+=1;
else:
if saw>=12:
startingFrom=blockNum
if saw>12:
Utils.Print("ERROR!!!!!!!!!!!!!! saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum))
break
else:
if saw > sawHigh:
sawHigh = saw
Utils.Print("sawHigh=%s" % (sawHigh))
if doPrint < 5:
doPrint+=1
Utils.Print("saw=%s, blockProducer=%s, blockNum=%s" % (saw,blockProducer,blockNum))
lastBlockProducer=blockProducer
saw=1
blockProducer=node.getBlockProducerByNum(blockNum)
blockNum+=1
if adjust:
blockNum-=1
Utils.Print("ADJUSTED %s blocks" % (invalidCount-1))
prodsSeen=None
reportFirstMissedBlock=False
Utils.Print("Verify %s complete rounds of all producers producing" % (rounds))
for i in range(0, rounds):
prodsSeen={}
lastBlockProducer=None
for j in range(0, 21):
# each new set of 12 blocks should have a different blockProducer
if lastBlockProducer is not None and lastBlockProducer==node.getBlockProducerByNum(blockNum):
Utils.cmdError("expected blockNum %s to be produced by any of the valid producers except %s" % (blockNum, lastBlockProducer))
Utils.errorExit("Failed because of incorrect block producer order")
# make sure that the next set of 12 blocks all have the same blockProducer
lastBlockProducer=node.getBlockProducerByNum(blockNum)
for k in range(0, 12):
validBlockProducer(prodsActive, prodsSeen, blockNum, node1)
blockProducer=node.getBlockProducerByNum(blockNum)
if lastBlockProducer!=blockProducer:
if not reportFirstMissedBlock:
printStr=""
newBlockNum=blockNum-18
for l in range(0,36):
printStr+="%s" % (newBlockNum)
printStr+=":"
newBlockProducer=node.getBlockProducerByNum(newBlockNum)
printStr+="%s" % (newBlockProducer)
printStr+=" "
newBlockNum+=1
Utils.Print("NOTE: expected blockNum %s (started from %s) to be produced by %s, but produded by %s: round=%s, prod slot=%s, prod num=%s - %s" % (blockNum, startingFrom, lastBlockProducer, blockProducer, i, j, k, printStr))
reportFirstMissedBlock=True
break
blockNum+=1
# make sure that we have seen all 21 producers
prodsSeenKeys=prodsSeen.keys()
if len(prodsSeenKeys)!=21:
Utils.cmdError("only saw %s producers of expected 21. At blockNum %s only the following producers were seen: %s" % (len(prodsSeenKeys), blockNum, ",".join(prodsSeenKeys)))
Utils.errorExit("Failed because of missing block producers")
Utils.Debug=temp
Print=Utils.Print
errorExit=Utils.errorExit
from core_symbol import CORE_SYMBOL
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
"--wallet-port"})
Utils.Debug=args.v
totalNodes=4
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
prodCount=args.prod_count
killAll=args.clean_run
walletPort=args.wallet_port
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killrsnInstances=not dontKill
killWallet=not dontKill
WalletdName=Utils.rsnWalletName
ClientName="arisecli"
try:
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
Print("Stand up cluster")
if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, useBiosBootFile=False) is False:
Utils.cmdError("launcher")
Utils.errorExit("Failed to stand up rsn cluster.")
Print("Validating system accounts after bootstrap")
cluster.validateAccounts(None)
accounts=cluster.createAccountKeys(5)
if accounts is None:
Utils.errorExit("FAILURE - create keys")
accounts[0].name="tester111111"
accounts[1].name="tester222222"
accounts[2].name="tester333333"
accounts[3].name="tester444444"
accounts[4].name="tester555555"
testWalletName="test"
Print("Creating wallet \"%s\"." % (testWalletName))
testWallet=walletMgr.create(testWalletName, [cluster.arisenAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]])
for _, account in cluster.defProducerAccounts.items():
walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True)
Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8")))
for i in range(0, totalNodes):
node=cluster.getNode(i)
node.producers=Cluster.parseProducers(i)
for prod in node.producers:
trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True)
node0=cluster.getNode(0)
node1=cluster.getNode(1)
node2=cluster.getNode(2)
node3=cluster.getNode(3)
node=node0
# create accounts via arisen as otherwise a bid is needed
for account in accounts:
Print("Create new account %s via %s" % (account.name, cluster.arisenAccount.name))
trans=node.createInitializeAccount(account, cluster.arisenAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True)
transferAmount="100000000.0000 {0}".format(CORE_SYMBOL)
Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.arisenAccount.name, account.name))
node.transferFunds(cluster.arisenAccount, account, transferAmount, "test transfer")
trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True, exitOnError=True)
# containers for tracking producers
prodsActive={}
for i in range(0, 4):
node=cluster.getNode(i)
ProducerToNode.populate(node, i)
for prod in node.producers:
prodsActive[prod]=False
#first account will vote for node0 producers, all others will vote for node1 producers
node=node0
for account in accounts:
trans=node.vote(account, node.producers, waitForTransBlock=True)
node=node1
setActiveProducers(prodsActive, node1.producers)
verifyProductionRounds(trans, node2, prodsActive, 2)
# test shifting all 21 away from one node to another
# first account will vote for node2 producers, all others will vote for node3 producers
node1
for account in accounts:
trans=node.vote(account, node.producers, waitForTransBlock=True)
node=node2
setActiveProducers(prodsActive, node2.producers)
verifyProductionRounds(trans, node1, prodsActive, 2)
testSuccessful=True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killrsnInstances=killrsnInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails)
exit(0)
| 39.810078 | 246 | 0.67345 |
ace17323769fae4682472cce158da509d875a978 | 452 | py | Python | reana_job_controller/version.py | dleehr/reana-job-controller | aa23803db5528ff1e31bb674fd57a28f83fd41ed | [
"MIT"
] | null | null | null | reana_job_controller/version.py | dleehr/reana-job-controller | aa23803db5528ff1e31bb674fd57a28f83fd41ed | [
"MIT"
] | null | null | null | reana_job_controller/version.py | dleehr/reana-job-controller | aa23803db5528ff1e31bb674fd57a28f83fd41ed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Version information for REANA-Job-Controller.
This file is imported by ``reana_job_controller.__init__`` and parsed by
``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = "0.4.0"
| 25.111111 | 72 | 0.727876 |
ace17518cdd21d904ee4db365b676d6915286e0e | 846 | py | Python | test/convert_worker/unit/test_tif_to_jpg.py | dainst/cilantro | d9a931d4a8ea4e505ffe7b22ac27ff0fe6cb6fa2 | [
"Apache-2.0"
] | 3 | 2019-09-04T12:40:33.000Z | 2021-12-28T16:33:27.000Z | test/convert_worker/unit/test_tif_to_jpg.py | dainst/cilantro | d9a931d4a8ea4e505ffe7b22ac27ff0fe6cb6fa2 | [
"Apache-2.0"
] | 97 | 2018-05-29T13:27:04.000Z | 2021-11-02T11:03:33.000Z | test/convert_worker/unit/test_tif_to_jpg.py | dainst/cilantro | d9a931d4a8ea4e505ffe7b22ac27ff0fe6cb6fa2 | [
"Apache-2.0"
] | 16 | 2018-04-25T11:39:21.000Z | 2019-12-16T14:37:39.000Z | import os
import logging
from pathlib import Path
from test.convert_worker.unit.convert_test import ConvertTest
from workers.convert.convert_image import convert_tif_to_jpg
log = logging.getLogger(__name__)
class TifToJpgTest(ConvertTest):
def setUp(self):
super().setUp()
self.tif_path = f'{self.resource_dir}/files/test.tif'
self.broken_tif_path = f'{self.resource_dir}/files/broken.tif'
self.jpg_path = f'{self.working_dir}/test.jpg'
def test_success(self):
convert_tif_to_jpg(self.tif_path, self.jpg_path)
self.assertTrue(Path(self.jpg_path).is_file())
stat = os.stat(self.jpg_path)
self.assertGreater(stat.st_size, 0)
def test_error(self):
self.assertRaises(OSError, convert_tif_to_jpg, self.broken_tif_path,
self.jpg_path)
| 30.214286 | 76 | 0.700946 |
ace1751bbf46253a0be1fa9f9e1c49c3c8b887f8 | 159,696 | py | Python | pubsub/google/cloud/pubsub_v1/proto/pubsub_pb2.py | Kami/google-cloud-python | a14ffbaa50f7823c2792e91413a37cbc3ce687f5 | [
"Apache-2.0"
] | 1 | 2019-06-14T10:11:59.000Z | 2019-06-14T10:11:59.000Z | pubsub/google/cloud/pubsub_v1/proto/pubsub_pb2.py | Kami/google-cloud-python | a14ffbaa50f7823c2792e91413a37cbc3ce687f5 | [
"Apache-2.0"
] | null | null | null | pubsub/google/cloud/pubsub_v1/proto/pubsub_pb2.py | Kami/google-cloud-python | a14ffbaa50f7823c2792e91413a37cbc3ce687f5 | [
"Apache-2.0"
] | 1 | 2020-04-14T10:47:41.000Z | 2020-04-14T10:47:41.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/pubsub_v1/proto/pubsub.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/pubsub_v1/proto/pubsub.proto",
package="google.pubsub.v1",
syntax="proto3",
serialized_options=_b(
"\n\024com.google.pubsub.v1B\013PubsubProtoP\001Z6google.golang.org/genproto/googleapis/pubsub/v1;pubsub\370\001\001\252\002\026Google.Cloud.PubSub.V1\312\002\026Google\\Cloud\\PubSub\\V1\352\002\031Google::Cloud::PubSub::V1"
),
serialized_pb=_b(
'\n)google/cloud/pubsub_v1/proto/pubsub.proto\x12\x10google.pubsub.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto";\n\x14MessageStoragePolicy\x12#\n\x1b\x61llowed_persistence_regions\x18\x01 \x03(\t"\xd7\x01\n\x05Topic\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x33\n\x06labels\x18\x02 \x03(\x0b\x32#.google.pubsub.v1.Topic.LabelsEntry\x12\x46\n\x16message_storage_policy\x18\x03 \x01(\x0b\x32&.google.pubsub.v1.MessageStoragePolicy\x12\x14\n\x0ckms_key_name\x18\x05 \x01(\t\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xf1\x01\n\rPubsubMessage\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\x43\n\nattributes\x18\x02 \x03(\x0b\x32/.google.pubsub.v1.PubsubMessage.AttributesEntry\x12\x12\n\nmessage_id\x18\x03 \x01(\t\x12\x30\n\x0cpublish_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cordering_key\x18\x05 \x01(\t\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01" \n\x0fGetTopicRequest\x12\r\n\x05topic\x18\x01 \x01(\t"m\n\x12UpdateTopicRequest\x12&\n\x05topic\x18\x01 \x01(\x0b\x32\x17.google.pubsub.v1.Topic\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"R\n\x0ePublishRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x31\n\x08messages\x18\x02 \x03(\x0b\x32\x1f.google.pubsub.v1.PubsubMessage"&\n\x0fPublishResponse\x12\x13\n\x0bmessage_ids\x18\x01 \x03(\t"K\n\x11ListTopicsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"V\n\x12ListTopicsResponse\x12\'\n\x06topics\x18\x01 \x03(\x0b\x32\x17.google.pubsub.v1.Topic\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"U\n\x1dListTopicSubscriptionsRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"P\n\x1eListTopicSubscriptionsResponse\x12\x15\n\rsubscriptions\x18\x01 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"Q\n\x19ListTopicSnapshotsRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"H\n\x1aListTopicSnapshotsResponse\x12\x11\n\tsnapshots\x18\x01 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"#\n\x12\x44\x65leteTopicRequest\x12\r\n\x05topic\x18\x01 \x01(\t"\xa5\x03\n\x0cSubscription\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x31\n\x0bpush_config\x18\x04 \x01(\x0b\x32\x1c.google.pubsub.v1.PushConfig\x12\x1c\n\x14\x61\x63k_deadline_seconds\x18\x05 \x01(\x05\x12\x1d\n\x15retain_acked_messages\x18\x07 \x01(\x08\x12=\n\x1amessage_retention_duration\x18\x08 \x01(\x0b\x32\x19.google.protobuf.Duration\x12:\n\x06labels\x18\t \x03(\x0b\x32*.google.pubsub.v1.Subscription.LabelsEntry\x12\x1f\n\x17\x65nable_message_ordering\x18\n \x01(\x08\x12=\n\x11\x65xpiration_policy\x18\x0b \x01(\x0b\x32".google.pubsub.v1.ExpirationPolicy\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01":\n\x10\x45xpirationPolicy\x12&\n\x03ttl\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration"\xad\x02\n\nPushConfig\x12\x15\n\rpush_endpoint\x18\x01 \x01(\t\x12@\n\nattributes\x18\x02 \x03(\x0b\x32,.google.pubsub.v1.PushConfig.AttributesEntry\x12<\n\noidc_token\x18\x03 \x01(\x0b\x32&.google.pubsub.v1.PushConfig.OidcTokenH\x00\x1a<\n\tOidcToken\x12\x1d\n\x15service_account_email\x18\x01 \x01(\t\x12\x10\n\x08\x61udience\x18\x02 \x01(\t\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x17\n\x15\x61uthentication_method"S\n\x0fReceivedMessage\x12\x0e\n\x06\x61\x63k_id\x18\x01 \x01(\t\x12\x30\n\x07message\x18\x02 \x01(\x0b\x32\x1f.google.pubsub.v1.PubsubMessage".\n\x16GetSubscriptionRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t"\x82\x01\n\x19UpdateSubscriptionRequest\x12\x34\n\x0csubscription\x18\x01 \x01(\x0b\x32\x1e.google.pubsub.v1.Subscription\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"R\n\x18ListSubscriptionsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"k\n\x19ListSubscriptionsResponse\x12\x35\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x1e.google.pubsub.v1.Subscription\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"1\n\x19\x44\x65leteSubscriptionRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t"b\n\x17ModifyPushConfigRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x31\n\x0bpush_config\x18\x02 \x01(\x0b\x32\x1c.google.pubsub.v1.PushConfig"U\n\x0bPullRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x1a\n\x12return_immediately\x18\x02 \x01(\x08\x12\x14\n\x0cmax_messages\x18\x03 \x01(\x05"L\n\x0cPullResponse\x12<\n\x11received_messages\x18\x01 \x03(\x0b\x32!.google.pubsub.v1.ReceivedMessage"_\n\x18ModifyAckDeadlineRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x04 \x03(\t\x12\x1c\n\x14\x61\x63k_deadline_seconds\x18\x03 \x01(\x05";\n\x12\x41\x63knowledgeRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x02 \x03(\t"\xa4\x01\n\x14StreamingPullRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x02 \x03(\t\x12\x1f\n\x17modify_deadline_seconds\x18\x03 \x03(\x05\x12\x1f\n\x17modify_deadline_ack_ids\x18\x04 \x03(\t\x12#\n\x1bstream_ack_deadline_seconds\x18\x05 \x01(\x05"U\n\x15StreamingPullResponse\x12<\n\x11received_messages\x18\x01 \x03(\x0b\x32!.google.pubsub.v1.ReceivedMessage"\xaf\x01\n\x15\x43reateSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csubscription\x18\x02 \x01(\t\x12\x43\n\x06labels\x18\x03 \x03(\x0b\x32\x33.google.pubsub.v1.CreateSnapshotRequest.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"v\n\x15UpdateSnapshotRequest\x12,\n\x08snapshot\x18\x01 \x01(\x0b\x32\x1a.google.pubsub.v1.Snapshot\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xbf\x01\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12/\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x06labels\x18\x04 \x03(\x0b\x32&.google.pubsub.v1.Snapshot.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"&\n\x12GetSnapshotRequest\x12\x10\n\x08snapshot\x18\x01 \x01(\t"N\n\x14ListSnapshotsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"_\n\x15ListSnapshotsResponse\x12-\n\tsnapshots\x18\x01 \x03(\x0b\x32\x1a.google.pubsub.v1.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t")\n\x15\x44\x65leteSnapshotRequest\x12\x10\n\x08snapshot\x18\x01 \x01(\t"m\n\x0bSeekRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12*\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x12\n\x08snapshot\x18\x03 \x01(\tH\x00\x42\x08\n\x06target"\x0e\n\x0cSeekResponse2\xbf\x08\n\tPublisher\x12j\n\x0b\x43reateTopic\x12\x17.google.pubsub.v1.Topic\x1a\x17.google.pubsub.v1.Topic")\x82\xd3\xe4\x93\x02#\x1a\x1e/v1/{name=projects/*/topics/*}:\x01*\x12}\n\x0bUpdateTopic\x12$.google.pubsub.v1.UpdateTopicRequest\x1a\x17.google.pubsub.v1.Topic"/\x82\xd3\xe4\x93\x02)2$/v1/{topic.name=projects/*/topics/*}:\x01*\x12\x82\x01\n\x07Publish\x12 .google.pubsub.v1.PublishRequest\x1a!.google.pubsub.v1.PublishResponse"2\x82\xd3\xe4\x93\x02,"\'/v1/{topic=projects/*/topics/*}:publish:\x01*\x12o\n\x08GetTopic\x12!.google.pubsub.v1.GetTopicRequest\x1a\x17.google.pubsub.v1.Topic"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v1/{topic=projects/*/topics/*}\x12\x80\x01\n\nListTopics\x12#.google.pubsub.v1.ListTopicsRequest\x1a$.google.pubsub.v1.ListTopicsResponse"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v1/{project=projects/*}/topics\x12\xb2\x01\n\x16ListTopicSubscriptions\x12/.google.pubsub.v1.ListTopicSubscriptionsRequest\x1a\x30.google.pubsub.v1.ListTopicSubscriptionsResponse"5\x82\xd3\xe4\x93\x02/\x12-/v1/{topic=projects/*/topics/*}/subscriptions\x12\xa2\x01\n\x12ListTopicSnapshots\x12+.google.pubsub.v1.ListTopicSnapshotsRequest\x1a,.google.pubsub.v1.ListTopicSnapshotsResponse"1\x82\xd3\xe4\x93\x02+\x12)/v1/{topic=projects/*/topics/*}/snapshots\x12t\n\x0b\x44\x65leteTopic\x12$.google.pubsub.v1.DeleteTopicRequest\x1a\x16.google.protobuf.Empty"\'\x82\xd3\xe4\x93\x02!*\x1f/v1/{topic=projects/*/topics/*}2\xf9\x11\n\nSubscriber\x12\x86\x01\n\x12\x43reateSubscription\x12\x1e.google.pubsub.v1.Subscription\x1a\x1e.google.pubsub.v1.Subscription"0\x82\xd3\xe4\x93\x02*\x1a%/v1/{name=projects/*/subscriptions/*}:\x01*\x12\x92\x01\n\x0fGetSubscription\x12(.google.pubsub.v1.GetSubscriptionRequest\x1a\x1e.google.pubsub.v1.Subscription"5\x82\xd3\xe4\x93\x02/\x12-/v1/{subscription=projects/*/subscriptions/*}\x12\xa0\x01\n\x12UpdateSubscription\x12+.google.pubsub.v1.UpdateSubscriptionRequest\x1a\x1e.google.pubsub.v1.Subscription"=\x82\xd3\xe4\x93\x02\x37\x32\x32/v1/{subscription.name=projects/*/subscriptions/*}:\x01*\x12\x9c\x01\n\x11ListSubscriptions\x12*.google.pubsub.v1.ListSubscriptionsRequest\x1a+.google.pubsub.v1.ListSubscriptionsResponse".\x82\xd3\xe4\x93\x02(\x12&/v1/{project=projects/*}/subscriptions\x12\x90\x01\n\x12\x44\x65leteSubscription\x12+.google.pubsub.v1.DeleteSubscriptionRequest\x1a\x16.google.protobuf.Empty"5\x82\xd3\xe4\x93\x02/*-/v1/{subscription=projects/*/subscriptions/*}\x12\xa3\x01\n\x11ModifyAckDeadline\x12*.google.pubsub.v1.ModifyAckDeadlineRequest\x1a\x16.google.protobuf.Empty"J\x82\xd3\xe4\x93\x02\x44"?/v1/{subscription=projects/*/subscriptions/*}:modifyAckDeadline:\x01*\x12\x91\x01\n\x0b\x41\x63knowledge\x12$.google.pubsub.v1.AcknowledgeRequest\x1a\x16.google.protobuf.Empty"D\x82\xd3\xe4\x93\x02>"9/v1/{subscription=projects/*/subscriptions/*}:acknowledge:\x01*\x12\x84\x01\n\x04Pull\x12\x1d.google.pubsub.v1.PullRequest\x1a\x1e.google.pubsub.v1.PullResponse"=\x82\xd3\xe4\x93\x02\x37"2/v1/{subscription=projects/*/subscriptions/*}:pull:\x01*\x12\x66\n\rStreamingPull\x12&.google.pubsub.v1.StreamingPullRequest\x1a\'.google.pubsub.v1.StreamingPullResponse"\x00(\x01\x30\x01\x12\xa0\x01\n\x10ModifyPushConfig\x12).google.pubsub.v1.ModifyPushConfigRequest\x1a\x16.google.protobuf.Empty"I\x82\xd3\xe4\x93\x02\x43">/v1/{subscription=projects/*/subscriptions/*}:modifyPushConfig:\x01*\x12~\n\x0bGetSnapshot\x12$.google.pubsub.v1.GetSnapshotRequest\x1a\x1a.google.pubsub.v1.Snapshot"-\x82\xd3\xe4\x93\x02\'\x12%/v1/{snapshot=projects/*/snapshots/*}\x12\x8c\x01\n\rListSnapshots\x12&.google.pubsub.v1.ListSnapshotsRequest\x1a\'.google.pubsub.v1.ListSnapshotsResponse"*\x82\xd3\xe4\x93\x02$\x12"/v1/{project=projects/*}/snapshots\x12\x83\x01\n\x0e\x43reateSnapshot\x12\'.google.pubsub.v1.CreateSnapshotRequest\x1a\x1a.google.pubsub.v1.Snapshot",\x82\xd3\xe4\x93\x02&\x1a!/v1/{name=projects/*/snapshots/*}:\x01*\x12\x8c\x01\n\x0eUpdateSnapshot\x12\'.google.pubsub.v1.UpdateSnapshotRequest\x1a\x1a.google.pubsub.v1.Snapshot"5\x82\xd3\xe4\x93\x02/2*/v1/{snapshot.name=projects/*/snapshots/*}:\x01*\x12\x80\x01\n\x0e\x44\x65leteSnapshot\x12\'.google.pubsub.v1.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"-\x82\xd3\xe4\x93\x02\'*%/v1/{snapshot=projects/*/snapshots/*}\x12\x84\x01\n\x04Seek\x12\x1d.google.pubsub.v1.SeekRequest\x1a\x1e.google.pubsub.v1.SeekResponse"=\x82\xd3\xe4\x93\x02\x37"2/v1/{subscription=projects/*/subscriptions/*}:seek:\x01*B\xae\x01\n\x14\x63om.google.pubsub.v1B\x0bPubsubProtoP\x01Z6google.golang.org/genproto/googleapis/pubsub/v1;pubsub\xf8\x01\x01\xaa\x02\x16Google.Cloud.PubSub.V1\xca\x02\x16Google\\Cloud\\PubSub\\V1\xea\x02\x19Google::Cloud::PubSub::V1b\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_api_dot_resource__pb2.DESCRIPTOR,
google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,
google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,
google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_MESSAGESTORAGEPOLICY = _descriptor.Descriptor(
name="MessageStoragePolicy",
full_name="google.pubsub.v1.MessageStoragePolicy",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="allowed_persistence_regions",
full_name="google.pubsub.v1.MessageStoragePolicy.allowed_persistence_regions",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=248,
serialized_end=307,
)
_TOPIC_LABELSENTRY = _descriptor.Descriptor(
name="LabelsEntry",
full_name="google.pubsub.v1.Topic.LabelsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.pubsub.v1.Topic.LabelsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.pubsub.v1.Topic.LabelsEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=480,
serialized_end=525,
)
_TOPIC = _descriptor.Descriptor(
name="Topic",
full_name="google.pubsub.v1.Topic",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.pubsub.v1.Topic.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="labels",
full_name="google.pubsub.v1.Topic.labels",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="message_storage_policy",
full_name="google.pubsub.v1.Topic.message_storage_policy",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="kms_key_name",
full_name="google.pubsub.v1.Topic.kms_key_name",
index=3,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_TOPIC_LABELSENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=310,
serialized_end=525,
)
_PUBSUBMESSAGE_ATTRIBUTESENTRY = _descriptor.Descriptor(
name="AttributesEntry",
full_name="google.pubsub.v1.PubsubMessage.AttributesEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.pubsub.v1.PubsubMessage.AttributesEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.pubsub.v1.PubsubMessage.AttributesEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=720,
serialized_end=769,
)
_PUBSUBMESSAGE = _descriptor.Descriptor(
name="PubsubMessage",
full_name="google.pubsub.v1.PubsubMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="data",
full_name="google.pubsub.v1.PubsubMessage.data",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="attributes",
full_name="google.pubsub.v1.PubsubMessage.attributes",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="message_id",
full_name="google.pubsub.v1.PubsubMessage.message_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="publish_time",
full_name="google.pubsub.v1.PubsubMessage.publish_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="ordering_key",
full_name="google.pubsub.v1.PubsubMessage.ordering_key",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_PUBSUBMESSAGE_ATTRIBUTESENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=528,
serialized_end=769,
)
_GETTOPICREQUEST = _descriptor.Descriptor(
name="GetTopicRequest",
full_name="google.pubsub.v1.GetTopicRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="topic",
full_name="google.pubsub.v1.GetTopicRequest.topic",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=771,
serialized_end=803,
)
_UPDATETOPICREQUEST = _descriptor.Descriptor(
name="UpdateTopicRequest",
full_name="google.pubsub.v1.UpdateTopicRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="topic",
full_name="google.pubsub.v1.UpdateTopicRequest.topic",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.pubsub.v1.UpdateTopicRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=805,
serialized_end=914,
)
_PUBLISHREQUEST = _descriptor.Descriptor(
name="PublishRequest",
full_name="google.pubsub.v1.PublishRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="topic",
full_name="google.pubsub.v1.PublishRequest.topic",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="messages",
full_name="google.pubsub.v1.PublishRequest.messages",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=916,
serialized_end=998,
)
_PUBLISHRESPONSE = _descriptor.Descriptor(
name="PublishResponse",
full_name="google.pubsub.v1.PublishResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="message_ids",
full_name="google.pubsub.v1.PublishResponse.message_ids",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1000,
serialized_end=1038,
)
_LISTTOPICSREQUEST = _descriptor.Descriptor(
name="ListTopicsRequest",
full_name="google.pubsub.v1.ListTopicsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project",
full_name="google.pubsub.v1.ListTopicsRequest.project",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.pubsub.v1.ListTopicsRequest.page_size",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.pubsub.v1.ListTopicsRequest.page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1040,
serialized_end=1115,
)
_LISTTOPICSRESPONSE = _descriptor.Descriptor(
name="ListTopicsResponse",
full_name="google.pubsub.v1.ListTopicsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="topics",
full_name="google.pubsub.v1.ListTopicsResponse.topics",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.pubsub.v1.ListTopicsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1117,
serialized_end=1203,
)
_LISTTOPICSUBSCRIPTIONSREQUEST = _descriptor.Descriptor(
name="ListTopicSubscriptionsRequest",
full_name="google.pubsub.v1.ListTopicSubscriptionsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="topic",
full_name="google.pubsub.v1.ListTopicSubscriptionsRequest.topic",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.pubsub.v1.ListTopicSubscriptionsRequest.page_size",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.pubsub.v1.ListTopicSubscriptionsRequest.page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1205,
serialized_end=1290,
)
_LISTTOPICSUBSCRIPTIONSRESPONSE = _descriptor.Descriptor(
name="ListTopicSubscriptionsResponse",
full_name="google.pubsub.v1.ListTopicSubscriptionsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscriptions",
full_name="google.pubsub.v1.ListTopicSubscriptionsResponse.subscriptions",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.pubsub.v1.ListTopicSubscriptionsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1292,
serialized_end=1372,
)
_LISTTOPICSNAPSHOTSREQUEST = _descriptor.Descriptor(
name="ListTopicSnapshotsRequest",
full_name="google.pubsub.v1.ListTopicSnapshotsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="topic",
full_name="google.pubsub.v1.ListTopicSnapshotsRequest.topic",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.pubsub.v1.ListTopicSnapshotsRequest.page_size",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.pubsub.v1.ListTopicSnapshotsRequest.page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1374,
serialized_end=1455,
)
_LISTTOPICSNAPSHOTSRESPONSE = _descriptor.Descriptor(
name="ListTopicSnapshotsResponse",
full_name="google.pubsub.v1.ListTopicSnapshotsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="snapshots",
full_name="google.pubsub.v1.ListTopicSnapshotsResponse.snapshots",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.pubsub.v1.ListTopicSnapshotsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1457,
serialized_end=1529,
)
_DELETETOPICREQUEST = _descriptor.Descriptor(
name="DeleteTopicRequest",
full_name="google.pubsub.v1.DeleteTopicRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="topic",
full_name="google.pubsub.v1.DeleteTopicRequest.topic",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1531,
serialized_end=1566,
)
_SUBSCRIPTION_LABELSENTRY = _descriptor.Descriptor(
name="LabelsEntry",
full_name="google.pubsub.v1.Subscription.LabelsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.pubsub.v1.Subscription.LabelsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.pubsub.v1.Subscription.LabelsEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=480,
serialized_end=525,
)
_SUBSCRIPTION = _descriptor.Descriptor(
name="Subscription",
full_name="google.pubsub.v1.Subscription",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.pubsub.v1.Subscription.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="topic",
full_name="google.pubsub.v1.Subscription.topic",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="push_config",
full_name="google.pubsub.v1.Subscription.push_config",
index=2,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="ack_deadline_seconds",
full_name="google.pubsub.v1.Subscription.ack_deadline_seconds",
index=3,
number=5,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="retain_acked_messages",
full_name="google.pubsub.v1.Subscription.retain_acked_messages",
index=4,
number=7,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="message_retention_duration",
full_name="google.pubsub.v1.Subscription.message_retention_duration",
index=5,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="labels",
full_name="google.pubsub.v1.Subscription.labels",
index=6,
number=9,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="enable_message_ordering",
full_name="google.pubsub.v1.Subscription.enable_message_ordering",
index=7,
number=10,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="expiration_policy",
full_name="google.pubsub.v1.Subscription.expiration_policy",
index=8,
number=11,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_SUBSCRIPTION_LABELSENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1569,
serialized_end=1990,
)
_EXPIRATIONPOLICY = _descriptor.Descriptor(
name="ExpirationPolicy",
full_name="google.pubsub.v1.ExpirationPolicy",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="ttl",
full_name="google.pubsub.v1.ExpirationPolicy.ttl",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1992,
serialized_end=2050,
)
_PUSHCONFIG_OIDCTOKEN = _descriptor.Descriptor(
name="OidcToken",
full_name="google.pubsub.v1.PushConfig.OidcToken",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="service_account_email",
full_name="google.pubsub.v1.PushConfig.OidcToken.service_account_email",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="audience",
full_name="google.pubsub.v1.PushConfig.OidcToken.audience",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2218,
serialized_end=2278,
)
_PUSHCONFIG_ATTRIBUTESENTRY = _descriptor.Descriptor(
name="AttributesEntry",
full_name="google.pubsub.v1.PushConfig.AttributesEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.pubsub.v1.PushConfig.AttributesEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.pubsub.v1.PushConfig.AttributesEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=720,
serialized_end=769,
)
_PUSHCONFIG = _descriptor.Descriptor(
name="PushConfig",
full_name="google.pubsub.v1.PushConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="push_endpoint",
full_name="google.pubsub.v1.PushConfig.push_endpoint",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="attributes",
full_name="google.pubsub.v1.PushConfig.attributes",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="oidc_token",
full_name="google.pubsub.v1.PushConfig.oidc_token",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_PUSHCONFIG_OIDCTOKEN, _PUSHCONFIG_ATTRIBUTESENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="authentication_method",
full_name="google.pubsub.v1.PushConfig.authentication_method",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=2053,
serialized_end=2354,
)
_RECEIVEDMESSAGE = _descriptor.Descriptor(
name="ReceivedMessage",
full_name="google.pubsub.v1.ReceivedMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="ack_id",
full_name="google.pubsub.v1.ReceivedMessage.ack_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="message",
full_name="google.pubsub.v1.ReceivedMessage.message",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2356,
serialized_end=2439,
)
_GETSUBSCRIPTIONREQUEST = _descriptor.Descriptor(
name="GetSubscriptionRequest",
full_name="google.pubsub.v1.GetSubscriptionRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscription",
full_name="google.pubsub.v1.GetSubscriptionRequest.subscription",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2441,
serialized_end=2487,
)
_UPDATESUBSCRIPTIONREQUEST = _descriptor.Descriptor(
name="UpdateSubscriptionRequest",
full_name="google.pubsub.v1.UpdateSubscriptionRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscription",
full_name="google.pubsub.v1.UpdateSubscriptionRequest.subscription",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.pubsub.v1.UpdateSubscriptionRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2490,
serialized_end=2620,
)
_LISTSUBSCRIPTIONSREQUEST = _descriptor.Descriptor(
name="ListSubscriptionsRequest",
full_name="google.pubsub.v1.ListSubscriptionsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project",
full_name="google.pubsub.v1.ListSubscriptionsRequest.project",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.pubsub.v1.ListSubscriptionsRequest.page_size",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.pubsub.v1.ListSubscriptionsRequest.page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2622,
serialized_end=2704,
)
_LISTSUBSCRIPTIONSRESPONSE = _descriptor.Descriptor(
name="ListSubscriptionsResponse",
full_name="google.pubsub.v1.ListSubscriptionsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscriptions",
full_name="google.pubsub.v1.ListSubscriptionsResponse.subscriptions",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.pubsub.v1.ListSubscriptionsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2706,
serialized_end=2813,
)
_DELETESUBSCRIPTIONREQUEST = _descriptor.Descriptor(
name="DeleteSubscriptionRequest",
full_name="google.pubsub.v1.DeleteSubscriptionRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscription",
full_name="google.pubsub.v1.DeleteSubscriptionRequest.subscription",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2815,
serialized_end=2864,
)
_MODIFYPUSHCONFIGREQUEST = _descriptor.Descriptor(
name="ModifyPushConfigRequest",
full_name="google.pubsub.v1.ModifyPushConfigRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscription",
full_name="google.pubsub.v1.ModifyPushConfigRequest.subscription",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="push_config",
full_name="google.pubsub.v1.ModifyPushConfigRequest.push_config",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2866,
serialized_end=2964,
)
_PULLREQUEST = _descriptor.Descriptor(
name="PullRequest",
full_name="google.pubsub.v1.PullRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscription",
full_name="google.pubsub.v1.PullRequest.subscription",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="return_immediately",
full_name="google.pubsub.v1.PullRequest.return_immediately",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_messages",
full_name="google.pubsub.v1.PullRequest.max_messages",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2966,
serialized_end=3051,
)
_PULLRESPONSE = _descriptor.Descriptor(
name="PullResponse",
full_name="google.pubsub.v1.PullResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="received_messages",
full_name="google.pubsub.v1.PullResponse.received_messages",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3053,
serialized_end=3129,
)
_MODIFYACKDEADLINEREQUEST = _descriptor.Descriptor(
name="ModifyAckDeadlineRequest",
full_name="google.pubsub.v1.ModifyAckDeadlineRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscription",
full_name="google.pubsub.v1.ModifyAckDeadlineRequest.subscription",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="ack_ids",
full_name="google.pubsub.v1.ModifyAckDeadlineRequest.ack_ids",
index=1,
number=4,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="ack_deadline_seconds",
full_name="google.pubsub.v1.ModifyAckDeadlineRequest.ack_deadline_seconds",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3131,
serialized_end=3226,
)
_ACKNOWLEDGEREQUEST = _descriptor.Descriptor(
name="AcknowledgeRequest",
full_name="google.pubsub.v1.AcknowledgeRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscription",
full_name="google.pubsub.v1.AcknowledgeRequest.subscription",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="ack_ids",
full_name="google.pubsub.v1.AcknowledgeRequest.ack_ids",
index=1,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3228,
serialized_end=3287,
)
_STREAMINGPULLREQUEST = _descriptor.Descriptor(
name="StreamingPullRequest",
full_name="google.pubsub.v1.StreamingPullRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscription",
full_name="google.pubsub.v1.StreamingPullRequest.subscription",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="ack_ids",
full_name="google.pubsub.v1.StreamingPullRequest.ack_ids",
index=1,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="modify_deadline_seconds",
full_name="google.pubsub.v1.StreamingPullRequest.modify_deadline_seconds",
index=2,
number=3,
type=5,
cpp_type=1,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="modify_deadline_ack_ids",
full_name="google.pubsub.v1.StreamingPullRequest.modify_deadline_ack_ids",
index=3,
number=4,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="stream_ack_deadline_seconds",
full_name="google.pubsub.v1.StreamingPullRequest.stream_ack_deadline_seconds",
index=4,
number=5,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3290,
serialized_end=3454,
)
_STREAMINGPULLRESPONSE = _descriptor.Descriptor(
name="StreamingPullResponse",
full_name="google.pubsub.v1.StreamingPullResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="received_messages",
full_name="google.pubsub.v1.StreamingPullResponse.received_messages",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3456,
serialized_end=3541,
)
_CREATESNAPSHOTREQUEST_LABELSENTRY = _descriptor.Descriptor(
name="LabelsEntry",
full_name="google.pubsub.v1.CreateSnapshotRequest.LabelsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.pubsub.v1.CreateSnapshotRequest.LabelsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.pubsub.v1.CreateSnapshotRequest.LabelsEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=480,
serialized_end=525,
)
_CREATESNAPSHOTREQUEST = _descriptor.Descriptor(
name="CreateSnapshotRequest",
full_name="google.pubsub.v1.CreateSnapshotRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.pubsub.v1.CreateSnapshotRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="subscription",
full_name="google.pubsub.v1.CreateSnapshotRequest.subscription",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="labels",
full_name="google.pubsub.v1.CreateSnapshotRequest.labels",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_CREATESNAPSHOTREQUEST_LABELSENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3544,
serialized_end=3719,
)
_UPDATESNAPSHOTREQUEST = _descriptor.Descriptor(
name="UpdateSnapshotRequest",
full_name="google.pubsub.v1.UpdateSnapshotRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="snapshot",
full_name="google.pubsub.v1.UpdateSnapshotRequest.snapshot",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.pubsub.v1.UpdateSnapshotRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3721,
serialized_end=3839,
)
_SNAPSHOT_LABELSENTRY = _descriptor.Descriptor(
name="LabelsEntry",
full_name="google.pubsub.v1.Snapshot.LabelsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.pubsub.v1.Snapshot.LabelsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.pubsub.v1.Snapshot.LabelsEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=480,
serialized_end=525,
)
_SNAPSHOT = _descriptor.Descriptor(
name="Snapshot",
full_name="google.pubsub.v1.Snapshot",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.pubsub.v1.Snapshot.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="topic",
full_name="google.pubsub.v1.Snapshot.topic",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="expire_time",
full_name="google.pubsub.v1.Snapshot.expire_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="labels",
full_name="google.pubsub.v1.Snapshot.labels",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_SNAPSHOT_LABELSENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3842,
serialized_end=4033,
)
_GETSNAPSHOTREQUEST = _descriptor.Descriptor(
name="GetSnapshotRequest",
full_name="google.pubsub.v1.GetSnapshotRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="snapshot",
full_name="google.pubsub.v1.GetSnapshotRequest.snapshot",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4035,
serialized_end=4073,
)
_LISTSNAPSHOTSREQUEST = _descriptor.Descriptor(
name="ListSnapshotsRequest",
full_name="google.pubsub.v1.ListSnapshotsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project",
full_name="google.pubsub.v1.ListSnapshotsRequest.project",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.pubsub.v1.ListSnapshotsRequest.page_size",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.pubsub.v1.ListSnapshotsRequest.page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4075,
serialized_end=4153,
)
_LISTSNAPSHOTSRESPONSE = _descriptor.Descriptor(
name="ListSnapshotsResponse",
full_name="google.pubsub.v1.ListSnapshotsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="snapshots",
full_name="google.pubsub.v1.ListSnapshotsResponse.snapshots",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.pubsub.v1.ListSnapshotsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4155,
serialized_end=4250,
)
_DELETESNAPSHOTREQUEST = _descriptor.Descriptor(
name="DeleteSnapshotRequest",
full_name="google.pubsub.v1.DeleteSnapshotRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="snapshot",
full_name="google.pubsub.v1.DeleteSnapshotRequest.snapshot",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4252,
serialized_end=4293,
)
_SEEKREQUEST = _descriptor.Descriptor(
name="SeekRequest",
full_name="google.pubsub.v1.SeekRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="subscription",
full_name="google.pubsub.v1.SeekRequest.subscription",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="time",
full_name="google.pubsub.v1.SeekRequest.time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="snapshot",
full_name="google.pubsub.v1.SeekRequest.snapshot",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="target",
full_name="google.pubsub.v1.SeekRequest.target",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=4295,
serialized_end=4404,
)
_SEEKRESPONSE = _descriptor.Descriptor(
name="SeekResponse",
full_name="google.pubsub.v1.SeekResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4406,
serialized_end=4420,
)
_TOPIC_LABELSENTRY.containing_type = _TOPIC
_TOPIC.fields_by_name["labels"].message_type = _TOPIC_LABELSENTRY
_TOPIC.fields_by_name["message_storage_policy"].message_type = _MESSAGESTORAGEPOLICY
_PUBSUBMESSAGE_ATTRIBUTESENTRY.containing_type = _PUBSUBMESSAGE
_PUBSUBMESSAGE.fields_by_name[
"attributes"
].message_type = _PUBSUBMESSAGE_ATTRIBUTESENTRY
_PUBSUBMESSAGE.fields_by_name[
"publish_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATETOPICREQUEST.fields_by_name["topic"].message_type = _TOPIC
_UPDATETOPICREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_PUBLISHREQUEST.fields_by_name["messages"].message_type = _PUBSUBMESSAGE
_LISTTOPICSRESPONSE.fields_by_name["topics"].message_type = _TOPIC
_SUBSCRIPTION_LABELSENTRY.containing_type = _SUBSCRIPTION
_SUBSCRIPTION.fields_by_name["push_config"].message_type = _PUSHCONFIG
_SUBSCRIPTION.fields_by_name[
"message_retention_duration"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_SUBSCRIPTION.fields_by_name["labels"].message_type = _SUBSCRIPTION_LABELSENTRY
_SUBSCRIPTION.fields_by_name["expiration_policy"].message_type = _EXPIRATIONPOLICY
_EXPIRATIONPOLICY.fields_by_name[
"ttl"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_PUSHCONFIG_OIDCTOKEN.containing_type = _PUSHCONFIG
_PUSHCONFIG_ATTRIBUTESENTRY.containing_type = _PUSHCONFIG
_PUSHCONFIG.fields_by_name["attributes"].message_type = _PUSHCONFIG_ATTRIBUTESENTRY
_PUSHCONFIG.fields_by_name["oidc_token"].message_type = _PUSHCONFIG_OIDCTOKEN
_PUSHCONFIG.oneofs_by_name["authentication_method"].fields.append(
_PUSHCONFIG.fields_by_name["oidc_token"]
)
_PUSHCONFIG.fields_by_name["oidc_token"].containing_oneof = _PUSHCONFIG.oneofs_by_name[
"authentication_method"
]
_RECEIVEDMESSAGE.fields_by_name["message"].message_type = _PUBSUBMESSAGE
_UPDATESUBSCRIPTIONREQUEST.fields_by_name["subscription"].message_type = _SUBSCRIPTION
_UPDATESUBSCRIPTIONREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTSUBSCRIPTIONSRESPONSE.fields_by_name["subscriptions"].message_type = _SUBSCRIPTION
_MODIFYPUSHCONFIGREQUEST.fields_by_name["push_config"].message_type = _PUSHCONFIG
_PULLRESPONSE.fields_by_name["received_messages"].message_type = _RECEIVEDMESSAGE
_STREAMINGPULLRESPONSE.fields_by_name[
"received_messages"
].message_type = _RECEIVEDMESSAGE
_CREATESNAPSHOTREQUEST_LABELSENTRY.containing_type = _CREATESNAPSHOTREQUEST
_CREATESNAPSHOTREQUEST.fields_by_name[
"labels"
].message_type = _CREATESNAPSHOTREQUEST_LABELSENTRY
_UPDATESNAPSHOTREQUEST.fields_by_name["snapshot"].message_type = _SNAPSHOT
_UPDATESNAPSHOTREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_SNAPSHOT_LABELSENTRY.containing_type = _SNAPSHOT
_SNAPSHOT.fields_by_name[
"expire_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SNAPSHOT.fields_by_name["labels"].message_type = _SNAPSHOT_LABELSENTRY
_LISTSNAPSHOTSRESPONSE.fields_by_name["snapshots"].message_type = _SNAPSHOT
_SEEKREQUEST.fields_by_name[
"time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SEEKREQUEST.oneofs_by_name["target"].fields.append(_SEEKREQUEST.fields_by_name["time"])
_SEEKREQUEST.fields_by_name["time"].containing_oneof = _SEEKREQUEST.oneofs_by_name[
"target"
]
_SEEKREQUEST.oneofs_by_name["target"].fields.append(
_SEEKREQUEST.fields_by_name["snapshot"]
)
_SEEKREQUEST.fields_by_name["snapshot"].containing_oneof = _SEEKREQUEST.oneofs_by_name[
"target"
]
DESCRIPTOR.message_types_by_name["MessageStoragePolicy"] = _MESSAGESTORAGEPOLICY
DESCRIPTOR.message_types_by_name["Topic"] = _TOPIC
DESCRIPTOR.message_types_by_name["PubsubMessage"] = _PUBSUBMESSAGE
DESCRIPTOR.message_types_by_name["GetTopicRequest"] = _GETTOPICREQUEST
DESCRIPTOR.message_types_by_name["UpdateTopicRequest"] = _UPDATETOPICREQUEST
DESCRIPTOR.message_types_by_name["PublishRequest"] = _PUBLISHREQUEST
DESCRIPTOR.message_types_by_name["PublishResponse"] = _PUBLISHRESPONSE
DESCRIPTOR.message_types_by_name["ListTopicsRequest"] = _LISTTOPICSREQUEST
DESCRIPTOR.message_types_by_name["ListTopicsResponse"] = _LISTTOPICSRESPONSE
DESCRIPTOR.message_types_by_name[
"ListTopicSubscriptionsRequest"
] = _LISTTOPICSUBSCRIPTIONSREQUEST
DESCRIPTOR.message_types_by_name[
"ListTopicSubscriptionsResponse"
] = _LISTTOPICSUBSCRIPTIONSRESPONSE
DESCRIPTOR.message_types_by_name[
"ListTopicSnapshotsRequest"
] = _LISTTOPICSNAPSHOTSREQUEST
DESCRIPTOR.message_types_by_name[
"ListTopicSnapshotsResponse"
] = _LISTTOPICSNAPSHOTSRESPONSE
DESCRIPTOR.message_types_by_name["DeleteTopicRequest"] = _DELETETOPICREQUEST
DESCRIPTOR.message_types_by_name["Subscription"] = _SUBSCRIPTION
DESCRIPTOR.message_types_by_name["ExpirationPolicy"] = _EXPIRATIONPOLICY
DESCRIPTOR.message_types_by_name["PushConfig"] = _PUSHCONFIG
DESCRIPTOR.message_types_by_name["ReceivedMessage"] = _RECEIVEDMESSAGE
DESCRIPTOR.message_types_by_name["GetSubscriptionRequest"] = _GETSUBSCRIPTIONREQUEST
DESCRIPTOR.message_types_by_name[
"UpdateSubscriptionRequest"
] = _UPDATESUBSCRIPTIONREQUEST
DESCRIPTOR.message_types_by_name["ListSubscriptionsRequest"] = _LISTSUBSCRIPTIONSREQUEST
DESCRIPTOR.message_types_by_name[
"ListSubscriptionsResponse"
] = _LISTSUBSCRIPTIONSRESPONSE
DESCRIPTOR.message_types_by_name[
"DeleteSubscriptionRequest"
] = _DELETESUBSCRIPTIONREQUEST
DESCRIPTOR.message_types_by_name["ModifyPushConfigRequest"] = _MODIFYPUSHCONFIGREQUEST
DESCRIPTOR.message_types_by_name["PullRequest"] = _PULLREQUEST
DESCRIPTOR.message_types_by_name["PullResponse"] = _PULLRESPONSE
DESCRIPTOR.message_types_by_name["ModifyAckDeadlineRequest"] = _MODIFYACKDEADLINEREQUEST
DESCRIPTOR.message_types_by_name["AcknowledgeRequest"] = _ACKNOWLEDGEREQUEST
DESCRIPTOR.message_types_by_name["StreamingPullRequest"] = _STREAMINGPULLREQUEST
DESCRIPTOR.message_types_by_name["StreamingPullResponse"] = _STREAMINGPULLRESPONSE
DESCRIPTOR.message_types_by_name["CreateSnapshotRequest"] = _CREATESNAPSHOTREQUEST
DESCRIPTOR.message_types_by_name["UpdateSnapshotRequest"] = _UPDATESNAPSHOTREQUEST
DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT
DESCRIPTOR.message_types_by_name["GetSnapshotRequest"] = _GETSNAPSHOTREQUEST
DESCRIPTOR.message_types_by_name["ListSnapshotsRequest"] = _LISTSNAPSHOTSREQUEST
DESCRIPTOR.message_types_by_name["ListSnapshotsResponse"] = _LISTSNAPSHOTSRESPONSE
DESCRIPTOR.message_types_by_name["DeleteSnapshotRequest"] = _DELETESNAPSHOTREQUEST
DESCRIPTOR.message_types_by_name["SeekRequest"] = _SEEKREQUEST
DESCRIPTOR.message_types_by_name["SeekResponse"] = _SEEKRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MessageStoragePolicy = _reflection.GeneratedProtocolMessageType(
"MessageStoragePolicy",
(_message.Message,),
dict(
DESCRIPTOR=_MESSAGESTORAGEPOLICY,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""A message storage policy.
Attributes:
allowed_persistence_regions:
The list of GCP region IDs where messages that are published
to the topic may be persisted in storage. Messages published
by publishers running in non-allowed GCP regions (or running
outside of GCP altogether) will be routed for storage in one
of the allowed regions. An empty list indicates a
misconfiguration at the project or organization level, which
will result in all Publish operations failing.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.MessageStoragePolicy)
),
)
_sym_db.RegisterMessage(MessageStoragePolicy)
Topic = _reflection.GeneratedProtocolMessageType(
"Topic",
(_message.Message,),
dict(
LabelsEntry=_reflection.GeneratedProtocolMessageType(
"LabelsEntry",
(_message.Message,),
dict(
DESCRIPTOR=_TOPIC_LABELSENTRY,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2"
# @@protoc_insertion_point(class_scope:google.pubsub.v1.Topic.LabelsEntry)
),
),
DESCRIPTOR=_TOPIC,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""A topic resource.
Attributes:
name:
The name of the topic. It must have the format
``"projects/{project}/topics/{topic}"``. ``{topic}`` must
start with a letter, and contain only letters (``[A-Za-z]``),
numbers (``[0-9]``), dashes (``-``), underscores (``_``),
periods (``.``), tildes (``~``), plus (``+``) or percent signs
(``%``). It must be between 3 and 255 characters in length,
and it must not start with ``"goog"``.
labels:
See Creating and managing labels.
message_storage_policy:
Policy constraining how messages published to the topic may be
stored. It is determined when the topic is created based on
the policy configured at the project level. It must not be set
by the caller in the request to CreateTopic or to UpdateTopic.
This field will be populated in the responses for GetTopic,
CreateTopic, and UpdateTopic: if not present in the response,
then no constraints are in effect.
kms_key_name:
The resource name of the Cloud KMS CryptoKey to be used to
protect access to messages published on this topic. The
expected format is
``projects/*/locations/*/keyRings/*/cryptoKeys/*``.
EXPERIMENTAL: This feature is part of a closed alpha release.
This API might be changed in backward-incompatible ways and is
not recommended for production use. It is not subject to any
SLA or deprecation policy.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.Topic)
),
)
_sym_db.RegisterMessage(Topic)
_sym_db.RegisterMessage(Topic.LabelsEntry)
PubsubMessage = _reflection.GeneratedProtocolMessageType(
"PubsubMessage",
(_message.Message,),
dict(
AttributesEntry=_reflection.GeneratedProtocolMessageType(
"AttributesEntry",
(_message.Message,),
dict(
DESCRIPTOR=_PUBSUBMESSAGE_ATTRIBUTESENTRY,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2"
# @@protoc_insertion_point(class_scope:google.pubsub.v1.PubsubMessage.AttributesEntry)
),
),
DESCRIPTOR=_PUBSUBMESSAGE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""A message that is published by publishers and consumed by subscribers.
The message must contain either a non-empty data field or at least one
attribute. Note that client libraries represent this object differently
depending on the language. See the corresponding client library
documentation for more information. See Quotas and limits for more
information about message limits.
Attributes:
data:
The message data field. If this field is empty, the message
must contain at least one attribute.
attributes:
Optional attributes for this message.
message_id:
ID of this message, assigned by the server when the message is
published. Guaranteed to be unique within the topic. This
value may be read by a subscriber that receives a
``PubsubMessage`` via a ``Pull`` call or a push delivery. It
must not be populated by the publisher in a ``Publish`` call.
publish_time:
The time at which the message was published, populated by the
server when it receives the ``Publish`` call. It must not be
populated by the publisher in a ``Publish`` call.
ordering_key:
Identifies related messages for which publish order should be
respected. If a ``Subscription`` has
``enable_message_ordering`` set to ``true``, messages
published with the same ``ordering_key`` value will be
delivered to subscribers in the order in which they are
received by the Pub/Sub system. EXPERIMENTAL: This feature is
part of a closed alpha release. This API might be changed in
backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation
policy.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.PubsubMessage)
),
)
_sym_db.RegisterMessage(PubsubMessage)
_sym_db.RegisterMessage(PubsubMessage.AttributesEntry)
GetTopicRequest = _reflection.GeneratedProtocolMessageType(
"GetTopicRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETTOPICREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the GetTopic method.
Attributes:
topic:
The name of the topic to get. Format is
``projects/{project}/topics/{topic}``.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.GetTopicRequest)
),
)
_sym_db.RegisterMessage(GetTopicRequest)
UpdateTopicRequest = _reflection.GeneratedProtocolMessageType(
"UpdateTopicRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATETOPICREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the UpdateTopic method.
Attributes:
topic:
The updated topic object.
update_mask:
Indicates which fields in the provided topic to update. Must
be specified and non-empty. Note that if ``update_mask``
contains "message\_storage\_policy" then the new value will be
determined based on the policy configured at the project or
organization level. The ``message_storage_policy`` must not be
set in the ``topic`` provided above.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.UpdateTopicRequest)
),
)
_sym_db.RegisterMessage(UpdateTopicRequest)
PublishRequest = _reflection.GeneratedProtocolMessageType(
"PublishRequest",
(_message.Message,),
dict(
DESCRIPTOR=_PUBLISHREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the Publish method.
Attributes:
topic:
The messages in the request will be published on this topic.
Format is ``projects/{project}/topics/{topic}``.
messages:
The messages to publish.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.PublishRequest)
),
)
_sym_db.RegisterMessage(PublishRequest)
PublishResponse = _reflection.GeneratedProtocolMessageType(
"PublishResponse",
(_message.Message,),
dict(
DESCRIPTOR=_PUBLISHRESPONSE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Response for the ``Publish`` method.
Attributes:
message_ids:
The server-assigned ID of each published message, in the same
order as the messages in the request. IDs are guaranteed to be
unique within the topic.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.PublishResponse)
),
)
_sym_db.RegisterMessage(PublishResponse)
ListTopicsRequest = _reflection.GeneratedProtocolMessageType(
"ListTopicsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTTOPICSREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``ListTopics`` method.
Attributes:
project:
The name of the project in which to list topics. Format is
``projects/{project-id}``.
page_size:
Maximum number of topics to return.
page_token:
The value returned by the last ``ListTopicsResponse``;
indicates that this is a continuation of a prior
``ListTopics`` call, and that the system should return the
next page of data.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicsRequest)
),
)
_sym_db.RegisterMessage(ListTopicsRequest)
ListTopicsResponse = _reflection.GeneratedProtocolMessageType(
"ListTopicsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTTOPICSRESPONSE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Response for the ``ListTopics`` method.
Attributes:
topics:
The resulting topics.
next_page_token:
If not empty, indicates that there may be more topics that
match the request; this value should be passed in a new
``ListTopicsRequest``.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicsResponse)
),
)
_sym_db.RegisterMessage(ListTopicsResponse)
ListTopicSubscriptionsRequest = _reflection.GeneratedProtocolMessageType(
"ListTopicSubscriptionsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTTOPICSUBSCRIPTIONSREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``ListTopicSubscriptions`` method.
Attributes:
topic:
The name of the topic that subscriptions are attached to.
Format is ``projects/{project}/topics/{topic}``.
page_size:
Maximum number of subscription names to return.
page_token:
The value returned by the last
``ListTopicSubscriptionsResponse``; indicates that this is a
continuation of a prior ``ListTopicSubscriptions`` call, and
that the system should return the next page of data.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicSubscriptionsRequest)
),
)
_sym_db.RegisterMessage(ListTopicSubscriptionsRequest)
ListTopicSubscriptionsResponse = _reflection.GeneratedProtocolMessageType(
"ListTopicSubscriptionsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTTOPICSUBSCRIPTIONSRESPONSE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Response for the ``ListTopicSubscriptions`` method.
Attributes:
subscriptions:
The names of the subscriptions that match the request.
next_page_token:
If not empty, indicates that there may be more subscriptions
that match the request; this value should be passed in a new
``ListTopicSubscriptionsRequest`` to get more subscriptions.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicSubscriptionsResponse)
),
)
_sym_db.RegisterMessage(ListTopicSubscriptionsResponse)
ListTopicSnapshotsRequest = _reflection.GeneratedProtocolMessageType(
"ListTopicSnapshotsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTTOPICSNAPSHOTSREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``ListTopicSnapshots`` method.
Attributes:
topic:
The name of the topic that snapshots are attached to. Format
is ``projects/{project}/topics/{topic}``.
page_size:
Maximum number of snapshot names to return.
page_token:
The value returned by the last ``ListTopicSnapshotsResponse``;
indicates that this is a continuation of a prior
``ListTopicSnapshots`` call, and that the system should return
the next page of data.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicSnapshotsRequest)
),
)
_sym_db.RegisterMessage(ListTopicSnapshotsRequest)
ListTopicSnapshotsResponse = _reflection.GeneratedProtocolMessageType(
"ListTopicSnapshotsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTTOPICSNAPSHOTSRESPONSE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Response for the ``ListTopicSnapshots`` method.
Attributes:
snapshots:
The names of the snapshots that match the request.
next_page_token:
If not empty, indicates that there may be more snapshots that
match the request; this value should be passed in a new
``ListTopicSnapshotsRequest`` to get more snapshots.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicSnapshotsResponse)
),
)
_sym_db.RegisterMessage(ListTopicSnapshotsResponse)
DeleteTopicRequest = _reflection.GeneratedProtocolMessageType(
"DeleteTopicRequest",
(_message.Message,),
dict(
DESCRIPTOR=_DELETETOPICREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``DeleteTopic`` method.
Attributes:
topic:
Name of the topic to delete. Format is
``projects/{project}/topics/{topic}``.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.DeleteTopicRequest)
),
)
_sym_db.RegisterMessage(DeleteTopicRequest)
Subscription = _reflection.GeneratedProtocolMessageType(
"Subscription",
(_message.Message,),
dict(
LabelsEntry=_reflection.GeneratedProtocolMessageType(
"LabelsEntry",
(_message.Message,),
dict(
DESCRIPTOR=_SUBSCRIPTION_LABELSENTRY,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2"
# @@protoc_insertion_point(class_scope:google.pubsub.v1.Subscription.LabelsEntry)
),
),
DESCRIPTOR=_SUBSCRIPTION,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""A subscription resource.
Attributes:
name:
The name of the subscription. It must have the format
``"projects/{project}/subscriptions/{subscription}"``.
``{subscription}`` must start with a letter, and contain only
letters (``[A-Za-z]``), numbers (``[0-9]``), dashes (``-``),
underscores (``_``), periods (``.``), tildes (``~``), plus
(``+``) or percent signs (``%``). It must be between 3 and 255
characters in length, and it must not start with ``"goog"``.
topic:
The name of the topic from which this subscription is
receiving messages. Format is
``projects/{project}/topics/{topic}``. The value of this field
will be ``_deleted-topic_`` if the topic has been deleted.
push_config:
If push delivery is used with this subscription, this field is
used to configure it. An empty ``pushConfig`` signifies that
the subscriber will pull and ack messages using API methods.
ack_deadline_seconds:
The approximate amount of time (on a best-effort basis)
Pub/Sub waits for the subscriber to acknowledge receipt before
resending the message. In the interval after the message is
delivered and before it is acknowledged, it is considered to
be outstanding. During that time period, the message will not
be redelivered (on a best-effort basis). For pull
subscriptions, this value is used as the initial value for the
ack deadline. To override this value for a given message, call
``ModifyAckDeadline`` with the corresponding ``ack_id`` if
using non-streaming pull or send the ``ack_id`` in a
``StreamingModifyAckDeadlineRequest`` if using streaming pull.
The minimum custom deadline you can specify is 10 seconds. The
maximum custom deadline you can specify is 600 seconds (10
minutes). If this parameter is 0, a default value of 10
seconds is used. For push delivery, this value is also used
to set the request timeout for the call to the push endpoint.
If the subscriber never acknowledges the message, the Pub/Sub
system will eventually redeliver the message.
retain_acked_messages:
Indicates whether to retain acknowledged messages. If true,
then messages are not expunged from the subscription's
backlog, even if they are acknowledged, until they fall out of
the ``message_retention_duration`` window. This must be true
if you would like to Seek to a timestamp.
message_retention_duration:
How long to retain unacknowledged messages in the
subscription's backlog, from the moment a message is
published. If ``retain_acked_messages`` is true, then this
also configures the retention of acknowledged messages, and
thus configures how far back in time a ``Seek`` can be done.
Defaults to 7 days. Cannot be more than 7 days or less than 10
minutes.
labels:
See Creating and managing labels.
enable_message_ordering:
If true, messages published with the same ``ordering_key`` in
``PubsubMessage`` will be delivered to the subscribers in the
order in which they are received by the Pub/Sub system.
Otherwise, they may be delivered in any order. EXPERIMENTAL:
This feature is part of a closed alpha release. This API might
be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA
or deprecation policy.
expiration_policy:
A policy that specifies the conditions for this subscription's
expiration. A subscription is considered active as long as any
connected subscriber is successfully consuming messages from
the subscription or is issuing operations on the subscription.
If ``expiration_policy`` is not set, a *default policy* with
``ttl`` of 31 days will be used. The minimum allowed value for
``expiration_policy.ttl`` is 1 day.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.Subscription)
),
)
_sym_db.RegisterMessage(Subscription)
_sym_db.RegisterMessage(Subscription.LabelsEntry)
ExpirationPolicy = _reflection.GeneratedProtocolMessageType(
"ExpirationPolicy",
(_message.Message,),
dict(
DESCRIPTOR=_EXPIRATIONPOLICY,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""A policy that specifies the conditions for resource expiration (i.e.,
automatic resource deletion).
Attributes:
ttl:
Specifies the "time-to-live" duration for an associated
resource. The resource expires if it is not active for a
period of ``ttl``. The definition of "activity" depends on the
type of the associated resource. The minimum and maximum
allowed values for ``ttl`` depend on the type of the
associated resource, as well. If ``ttl`` is not set, the
associated resource never expires.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ExpirationPolicy)
),
)
_sym_db.RegisterMessage(ExpirationPolicy)
PushConfig = _reflection.GeneratedProtocolMessageType(
"PushConfig",
(_message.Message,),
dict(
OidcToken=_reflection.GeneratedProtocolMessageType(
"OidcToken",
(_message.Message,),
dict(
DESCRIPTOR=_PUSHCONFIG_OIDCTOKEN,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Contains information needed for generating an `OpenID Connect
token <https://developers.google.com/identity/protocols/OpenIDConnect>`__.
Attributes:
service_account_email:
`Service account email
<https://cloud.google.com/iam/docs/service-accounts>`__ to be
used for generating the OIDC token. The caller (for
CreateSubscription, UpdateSubscription, and ModifyPushConfig
RPCs) must have the iam.serviceAccounts.actAs permission for
the service account.
audience:
Audience to be used when generating OIDC token. The audience
claim identifies the recipients that the JWT is intended for.
The audience value is a single case-sensitive string. Having
multiple values (array) for the audience field is not
supported. More info about the OIDC JWT token audience here:
https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not
specified, the Push endpoint URL will be used.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.PushConfig.OidcToken)
),
),
AttributesEntry=_reflection.GeneratedProtocolMessageType(
"AttributesEntry",
(_message.Message,),
dict(
DESCRIPTOR=_PUSHCONFIG_ATTRIBUTESENTRY,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2"
# @@protoc_insertion_point(class_scope:google.pubsub.v1.PushConfig.AttributesEntry)
),
),
DESCRIPTOR=_PUSHCONFIG,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Configuration for a push delivery endpoint.
Attributes:
push_endpoint:
A URL locating the endpoint to which messages should be
pushed. For example, a Webhook endpoint might use
"https://example.com/push".
attributes:
Endpoint configuration attributes. Every endpoint has a set
of API supported attributes that can be used to control
different aspects of the message delivery. The currently
supported attribute is ``x-goog-version``, which you can use
to change the format of the pushed message. This attribute
indicates the version of the data expected by the endpoint.
This controls the shape of the pushed message (i.e., its
fields and metadata). The endpoint version is based on the
version of the Pub/Sub API. If not present during the
``CreateSubscription`` call, it will default to the version of
the API used to make such call. If not present during a
``ModifyPushConfig`` call, its value will not be changed.
``GetSubscription`` calls will always return a valid version,
even if the subscription was created without this attribute.
The possible values for this attribute are: - ``v1beta1``:
uses the push format defined in the v1beta1 Pub/Sub API. -
``v1`` or ``v1beta2``: uses the push format defined in the v1
Pub/Sub API.
authentication_method:
An authentication method used by push endpoints to verify the
source of push requests. This can be used with push endpoints
that are private by default to allow requests only from the
Cloud Pub/Sub system, for example. This field is optional and
should be set only by users interested in authenticated push.
oidc_token:
If specified, Pub/Sub will generate and attach an OIDC JWT
token as an ``Authorization`` header in the HTTP request for
every pushed message.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.PushConfig)
),
)
_sym_db.RegisterMessage(PushConfig)
_sym_db.RegisterMessage(PushConfig.OidcToken)
_sym_db.RegisterMessage(PushConfig.AttributesEntry)
ReceivedMessage = _reflection.GeneratedProtocolMessageType(
"ReceivedMessage",
(_message.Message,),
dict(
DESCRIPTOR=_RECEIVEDMESSAGE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""A message and its corresponding acknowledgment ID.
Attributes:
ack_id:
This ID can be used to acknowledge the received message.
message:
The message.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ReceivedMessage)
),
)
_sym_db.RegisterMessage(ReceivedMessage)
GetSubscriptionRequest = _reflection.GeneratedProtocolMessageType(
"GetSubscriptionRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETSUBSCRIPTIONREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the GetSubscription method.
Attributes:
subscription:
The name of the subscription to get. Format is
``projects/{project}/subscriptions/{sub}``.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.GetSubscriptionRequest)
),
)
_sym_db.RegisterMessage(GetSubscriptionRequest)
UpdateSubscriptionRequest = _reflection.GeneratedProtocolMessageType(
"UpdateSubscriptionRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATESUBSCRIPTIONREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the UpdateSubscription method.
Attributes:
subscription:
The updated subscription object.
update_mask:
Indicates which fields in the provided subscription to update.
Must be specified and non-empty.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.UpdateSubscriptionRequest)
),
)
_sym_db.RegisterMessage(UpdateSubscriptionRequest)
ListSubscriptionsRequest = _reflection.GeneratedProtocolMessageType(
"ListSubscriptionsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSUBSCRIPTIONSREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``ListSubscriptions`` method.
Attributes:
project:
The name of the project in which to list subscriptions. Format
is ``projects/{project-id}``.
page_size:
Maximum number of subscriptions to return.
page_token:
The value returned by the last ``ListSubscriptionsResponse``;
indicates that this is a continuation of a prior
``ListSubscriptions`` call, and that the system should return
the next page of data.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSubscriptionsRequest)
),
)
_sym_db.RegisterMessage(ListSubscriptionsRequest)
ListSubscriptionsResponse = _reflection.GeneratedProtocolMessageType(
"ListSubscriptionsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSUBSCRIPTIONSRESPONSE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Response for the ``ListSubscriptions`` method.
Attributes:
subscriptions:
The subscriptions that match the request.
next_page_token:
If not empty, indicates that there may be more subscriptions
that match the request; this value should be passed in a new
``ListSubscriptionsRequest`` to get more subscriptions.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSubscriptionsResponse)
),
)
_sym_db.RegisterMessage(ListSubscriptionsResponse)
DeleteSubscriptionRequest = _reflection.GeneratedProtocolMessageType(
"DeleteSubscriptionRequest",
(_message.Message,),
dict(
DESCRIPTOR=_DELETESUBSCRIPTIONREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the DeleteSubscription method.
Attributes:
subscription:
The subscription to delete. Format is
``projects/{project}/subscriptions/{sub}``.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.DeleteSubscriptionRequest)
),
)
_sym_db.RegisterMessage(DeleteSubscriptionRequest)
ModifyPushConfigRequest = _reflection.GeneratedProtocolMessageType(
"ModifyPushConfigRequest",
(_message.Message,),
dict(
DESCRIPTOR=_MODIFYPUSHCONFIGREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ModifyPushConfig method.
Attributes:
subscription:
The name of the subscription. Format is
``projects/{project}/subscriptions/{sub}``.
push_config:
The push configuration for future deliveries. An empty
``pushConfig`` indicates that the Pub/Sub system should stop
pushing messages from the given subscription and allow
messages to be pulled and acknowledged - effectively pausing
the subscription if ``Pull`` or ``StreamingPull`` is not
called.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ModifyPushConfigRequest)
),
)
_sym_db.RegisterMessage(ModifyPushConfigRequest)
PullRequest = _reflection.GeneratedProtocolMessageType(
"PullRequest",
(_message.Message,),
dict(
DESCRIPTOR=_PULLREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``Pull`` method.
Attributes:
subscription:
The subscription from which messages should be pulled. Format
is ``projects/{project}/subscriptions/{sub}``.
return_immediately:
If this field set to true, the system will respond immediately
even if it there are no messages available to return in the
``Pull`` response. Otherwise, the system may wait (for a
bounded amount of time) until at least one message is
available, rather than returning no messages.
max_messages:
The maximum number of messages returned for this request. The
Pub/Sub system may return fewer than the number specified.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.PullRequest)
),
)
_sym_db.RegisterMessage(PullRequest)
PullResponse = _reflection.GeneratedProtocolMessageType(
"PullResponse",
(_message.Message,),
dict(
DESCRIPTOR=_PULLRESPONSE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Response for the ``Pull`` method.
Attributes:
received_messages:
Received Pub/Sub messages. The list will be empty if there are
no more messages available in the backlog. For JSON, the
response can be entirely empty. The Pub/Sub system may return
fewer than the ``maxMessages`` requested even if there are
more messages available in the backlog.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.PullResponse)
),
)
_sym_db.RegisterMessage(PullResponse)
ModifyAckDeadlineRequest = _reflection.GeneratedProtocolMessageType(
"ModifyAckDeadlineRequest",
(_message.Message,),
dict(
DESCRIPTOR=_MODIFYACKDEADLINEREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ModifyAckDeadline method.
Attributes:
subscription:
The name of the subscription. Format is
``projects/{project}/subscriptions/{sub}``.
ack_ids:
List of acknowledgment IDs.
ack_deadline_seconds:
The new ack deadline with respect to the time this request was
sent to the Pub/Sub system. For example, if the value is 10,
the new ack deadline will expire 10 seconds after the
``ModifyAckDeadline`` call was made. Specifying zero might
immediately make the message available for delivery to another
subscriber client. This typically results in an increase in
the rate of message redeliveries (that is, duplicates). The
minimum deadline you can specify is 0 seconds. The maximum
deadline you can specify is 600 seconds (10 minutes).
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ModifyAckDeadlineRequest)
),
)
_sym_db.RegisterMessage(ModifyAckDeadlineRequest)
AcknowledgeRequest = _reflection.GeneratedProtocolMessageType(
"AcknowledgeRequest",
(_message.Message,),
dict(
DESCRIPTOR=_ACKNOWLEDGEREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the Acknowledge method.
Attributes:
subscription:
The subscription whose message is being acknowledged. Format
is ``projects/{project}/subscriptions/{sub}``.
ack_ids:
The acknowledgment ID for the messages being acknowledged that
was returned by the Pub/Sub system in the ``Pull`` response.
Must not be empty.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.AcknowledgeRequest)
),
)
_sym_db.RegisterMessage(AcknowledgeRequest)
StreamingPullRequest = _reflection.GeneratedProtocolMessageType(
"StreamingPullRequest",
(_message.Message,),
dict(
DESCRIPTOR=_STREAMINGPULLREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``StreamingPull`` streaming RPC method. This request is
used to establish the initial stream as well as to stream
acknowledgements and ack deadline modifications from the client to the
server.
Attributes:
subscription:
The subscription for which to initialize the new stream. This
must be provided in the first request on the stream, and must
not be set in subsequent requests from client to server.
Format is ``projects/{project}/subscriptions/{sub}``.
ack_ids:
List of acknowledgement IDs for acknowledging previously
received messages (received on this stream or a different
stream). If an ack ID has expired, the corresponding message
may be redelivered later. Acknowledging a message more than
once will not result in an error. If the acknowledgement ID is
malformed, the stream will be aborted with status
``INVALID_ARGUMENT``.
modify_deadline_seconds:
The list of new ack deadlines for the IDs listed in
``modify_deadline_ack_ids``. The size of this list must be the
same as the size of ``modify_deadline_ack_ids``. If it differs
the stream will be aborted with ``INVALID_ARGUMENT``. Each
element in this list is applied to the element in the same
position in ``modify_deadline_ack_ids``. The new ack deadline
is with respect to the time this request was sent to the
Pub/Sub system. Must be >= 0. For example, if the value is 10,
the new ack deadline will expire 10 seconds after this request
is received. If the value is 0, the message is immediately
made available for another streaming or non-streaming pull
request. If the value is < 0 (an error), the stream will be
aborted with status ``INVALID_ARGUMENT``.
modify_deadline_ack_ids:
List of acknowledgement IDs whose deadline will be modified
based on the corresponding element in
``modify_deadline_seconds``. This field can be used to
indicate that more time is needed to process a message by the
subscriber, or to make the message available for redelivery if
the processing was interrupted.
stream_ack_deadline_seconds:
The ack deadline to use for the stream. This must be provided
in the first request on the stream, but it can also be updated
on subsequent requests from client to server. The minimum
deadline you can specify is 10 seconds. The maximum deadline
you can specify is 600 seconds (10 minutes).
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.StreamingPullRequest)
),
)
_sym_db.RegisterMessage(StreamingPullRequest)
StreamingPullResponse = _reflection.GeneratedProtocolMessageType(
"StreamingPullResponse",
(_message.Message,),
dict(
DESCRIPTOR=_STREAMINGPULLRESPONSE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Response for the ``StreamingPull`` method. This response is used to
stream messages from the server to the client.
Attributes:
received_messages:
Received Pub/Sub messages. This will not be empty.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.StreamingPullResponse)
),
)
_sym_db.RegisterMessage(StreamingPullResponse)
CreateSnapshotRequest = _reflection.GeneratedProtocolMessageType(
"CreateSnapshotRequest",
(_message.Message,),
dict(
LabelsEntry=_reflection.GeneratedProtocolMessageType(
"LabelsEntry",
(_message.Message,),
dict(
DESCRIPTOR=_CREATESNAPSHOTREQUEST_LABELSENTRY,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2"
# @@protoc_insertion_point(class_scope:google.pubsub.v1.CreateSnapshotRequest.LabelsEntry)
),
),
DESCRIPTOR=_CREATESNAPSHOTREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``CreateSnapshot`` method.
Attributes:
name:
Optional user-provided name for this snapshot. If the name is
not provided in the request, the server will assign a random
name for this snapshot on the same project as the
subscription. Note that for REST API requests, you must
specify a name. See the resource name rules. Format is
``projects/{project}/snapshots/{snap}``.
subscription:
The subscription whose backlog the snapshot retains.
Specifically, the created snapshot is guaranteed to retain:
(a) The existing backlog on the subscription. More precisely,
this is defined as the messages in the subscription's backlog
that are unacknowledged upon the successful completion of the
``CreateSnapshot`` request; as well as: (b) Any messages
published to the subscription's topic following the successful
completion of the CreateSnapshot request. Format is
``projects/{project}/subscriptions/{sub}``.
labels:
See Creating and managing labels.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.CreateSnapshotRequest)
),
)
_sym_db.RegisterMessage(CreateSnapshotRequest)
_sym_db.RegisterMessage(CreateSnapshotRequest.LabelsEntry)
UpdateSnapshotRequest = _reflection.GeneratedProtocolMessageType(
"UpdateSnapshotRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATESNAPSHOTREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the UpdateSnapshot method.
Attributes:
snapshot:
The updated snapshot object.
update_mask:
Indicates which fields in the provided snapshot to update.
Must be specified and non-empty.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.UpdateSnapshotRequest)
),
)
_sym_db.RegisterMessage(UpdateSnapshotRequest)
Snapshot = _reflection.GeneratedProtocolMessageType(
"Snapshot",
(_message.Message,),
dict(
LabelsEntry=_reflection.GeneratedProtocolMessageType(
"LabelsEntry",
(_message.Message,),
dict(
DESCRIPTOR=_SNAPSHOT_LABELSENTRY,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2"
# @@protoc_insertion_point(class_scope:google.pubsub.v1.Snapshot.LabelsEntry)
),
),
DESCRIPTOR=_SNAPSHOT,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""A snapshot resource. Snapshots are used in Seek operations, which allow
you to manage message acknowledgments in bulk. That is, you can set the
acknowledgment state of messages in an existing subscription to the
state captured by a snapshot.
Attributes:
name:
The name of the snapshot.
topic:
The name of the topic from which this snapshot is retaining
messages.
expire_time:
The snapshot is guaranteed to exist up until this time. A
newly-created snapshot expires no later than 7 days from the
time of its creation. Its exact lifetime is determined at
creation by the existing backlog in the source subscription.
Specifically, the lifetime of the snapshot is ``7 days - (age
of oldest unacked message in the subscription)``. For example,
consider a subscription whose oldest unacked message is 3 days
old. If a snapshot is created from this subscription, the
snapshot -- which will always capture this 3-day-old backlog
as long as the snapshot exists -- will expire in 4 days. The
service will refuse to create a snapshot that would expire in
less than 1 hour after creation.
labels:
See Creating and managing labels.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.Snapshot)
),
)
_sym_db.RegisterMessage(Snapshot)
_sym_db.RegisterMessage(Snapshot.LabelsEntry)
GetSnapshotRequest = _reflection.GeneratedProtocolMessageType(
"GetSnapshotRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETSNAPSHOTREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the GetSnapshot method.
Attributes:
snapshot:
The name of the snapshot to get. Format is
``projects/{project}/snapshots/{snap}``.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.GetSnapshotRequest)
),
)
_sym_db.RegisterMessage(GetSnapshotRequest)
ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType(
"ListSnapshotsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSNAPSHOTSREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``ListSnapshots`` method.
Attributes:
project:
The name of the project in which to list snapshots. Format is
``projects/{project-id}``.
page_size:
Maximum number of snapshots to return.
page_token:
The value returned by the last ``ListSnapshotsResponse``;
indicates that this is a continuation of a prior
``ListSnapshots`` call, and that the system should return the
next page of data.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSnapshotsRequest)
),
)
_sym_db.RegisterMessage(ListSnapshotsRequest)
ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType(
"ListSnapshotsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSNAPSHOTSRESPONSE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Response for the ``ListSnapshots`` method.
Attributes:
snapshots:
The resulting snapshots.
next_page_token:
If not empty, indicates that there may be more snapshot that
match the request; this value should be passed in a new
``ListSnapshotsRequest``.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSnapshotsResponse)
),
)
_sym_db.RegisterMessage(ListSnapshotsResponse)
DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType(
"DeleteSnapshotRequest",
(_message.Message,),
dict(
DESCRIPTOR=_DELETESNAPSHOTREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``DeleteSnapshot`` method.
Attributes:
snapshot:
The name of the snapshot to delete. Format is
``projects/{project}/snapshots/{snap}``.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.DeleteSnapshotRequest)
),
)
_sym_db.RegisterMessage(DeleteSnapshotRequest)
SeekRequest = _reflection.GeneratedProtocolMessageType(
"SeekRequest",
(_message.Message,),
dict(
DESCRIPTOR=_SEEKREQUEST,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Request for the ``Seek`` method.
Attributes:
subscription:
The subscription to affect.
time:
The time to seek to. Messages retained in the subscription
that were published before this time are marked as
acknowledged, and messages retained in the subscription that
were published after this time are marked as unacknowledged.
Note that this operation affects only those messages retained
in the subscription (configured by the combination of
``message_retention_duration`` and ``retain_acked_messages``).
For example, if ``time`` corresponds to a point before the
message retention window (or to a point before the system's
notion of the subscription creation time), only retained
messages will be marked as unacknowledged, and already-
expunged messages will not be restored.
snapshot:
The snapshot to seek to. The snapshot's topic must be the same
as that of the provided subscription. Format is
``projects/{project}/snapshots/{snap}``.
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.SeekRequest)
),
)
_sym_db.RegisterMessage(SeekRequest)
SeekResponse = _reflection.GeneratedProtocolMessageType(
"SeekResponse",
(_message.Message,),
dict(
DESCRIPTOR=_SEEKRESPONSE,
__module__="google.cloud.pubsub_v1.proto.pubsub_pb2",
__doc__="""Response for the ``Seek`` method (this response is empty).
""",
# @@protoc_insertion_point(class_scope:google.pubsub.v1.SeekResponse)
),
)
_sym_db.RegisterMessage(SeekResponse)
DESCRIPTOR._options = None
_TOPIC_LABELSENTRY._options = None
_PUBSUBMESSAGE_ATTRIBUTESENTRY._options = None
_SUBSCRIPTION_LABELSENTRY._options = None
_PUSHCONFIG_ATTRIBUTESENTRY._options = None
_CREATESNAPSHOTREQUEST_LABELSENTRY._options = None
_SNAPSHOT_LABELSENTRY._options = None
_PUBLISHER = _descriptor.ServiceDescriptor(
name="Publisher",
full_name="google.pubsub.v1.Publisher",
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=4423,
serialized_end=5510,
methods=[
_descriptor.MethodDescriptor(
name="CreateTopic",
full_name="google.pubsub.v1.Publisher.CreateTopic",
index=0,
containing_service=None,
input_type=_TOPIC,
output_type=_TOPIC,
serialized_options=_b(
"\202\323\344\223\002#\032\036/v1/{name=projects/*/topics/*}:\001*"
),
),
_descriptor.MethodDescriptor(
name="UpdateTopic",
full_name="google.pubsub.v1.Publisher.UpdateTopic",
index=1,
containing_service=None,
input_type=_UPDATETOPICREQUEST,
output_type=_TOPIC,
serialized_options=_b(
"\202\323\344\223\002)2$/v1/{topic.name=projects/*/topics/*}:\001*"
),
),
_descriptor.MethodDescriptor(
name="Publish",
full_name="google.pubsub.v1.Publisher.Publish",
index=2,
containing_service=None,
input_type=_PUBLISHREQUEST,
output_type=_PUBLISHRESPONSE,
serialized_options=_b(
"\202\323\344\223\002,\"'/v1/{topic=projects/*/topics/*}:publish:\001*"
),
),
_descriptor.MethodDescriptor(
name="GetTopic",
full_name="google.pubsub.v1.Publisher.GetTopic",
index=3,
containing_service=None,
input_type=_GETTOPICREQUEST,
output_type=_TOPIC,
serialized_options=_b(
"\202\323\344\223\002!\022\037/v1/{topic=projects/*/topics/*}"
),
),
_descriptor.MethodDescriptor(
name="ListTopics",
full_name="google.pubsub.v1.Publisher.ListTopics",
index=4,
containing_service=None,
input_type=_LISTTOPICSREQUEST,
output_type=_LISTTOPICSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002!\022\037/v1/{project=projects/*}/topics"
),
),
_descriptor.MethodDescriptor(
name="ListTopicSubscriptions",
full_name="google.pubsub.v1.Publisher.ListTopicSubscriptions",
index=5,
containing_service=None,
input_type=_LISTTOPICSUBSCRIPTIONSREQUEST,
output_type=_LISTTOPICSUBSCRIPTIONSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002/\022-/v1/{topic=projects/*/topics/*}/subscriptions"
),
),
_descriptor.MethodDescriptor(
name="ListTopicSnapshots",
full_name="google.pubsub.v1.Publisher.ListTopicSnapshots",
index=6,
containing_service=None,
input_type=_LISTTOPICSNAPSHOTSREQUEST,
output_type=_LISTTOPICSNAPSHOTSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002+\022)/v1/{topic=projects/*/topics/*}/snapshots"
),
),
_descriptor.MethodDescriptor(
name="DeleteTopic",
full_name="google.pubsub.v1.Publisher.DeleteTopic",
index=7,
containing_service=None,
input_type=_DELETETOPICREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=_b(
"\202\323\344\223\002!*\037/v1/{topic=projects/*/topics/*}"
),
),
],
)
_sym_db.RegisterServiceDescriptor(_PUBLISHER)
DESCRIPTOR.services_by_name["Publisher"] = _PUBLISHER
_SUBSCRIBER = _descriptor.ServiceDescriptor(
name="Subscriber",
full_name="google.pubsub.v1.Subscriber",
file=DESCRIPTOR,
index=1,
serialized_options=None,
serialized_start=5513,
serialized_end=7810,
methods=[
_descriptor.MethodDescriptor(
name="CreateSubscription",
full_name="google.pubsub.v1.Subscriber.CreateSubscription",
index=0,
containing_service=None,
input_type=_SUBSCRIPTION,
output_type=_SUBSCRIPTION,
serialized_options=_b(
"\202\323\344\223\002*\032%/v1/{name=projects/*/subscriptions/*}:\001*"
),
),
_descriptor.MethodDescriptor(
name="GetSubscription",
full_name="google.pubsub.v1.Subscriber.GetSubscription",
index=1,
containing_service=None,
input_type=_GETSUBSCRIPTIONREQUEST,
output_type=_SUBSCRIPTION,
serialized_options=_b(
"\202\323\344\223\002/\022-/v1/{subscription=projects/*/subscriptions/*}"
),
),
_descriptor.MethodDescriptor(
name="UpdateSubscription",
full_name="google.pubsub.v1.Subscriber.UpdateSubscription",
index=2,
containing_service=None,
input_type=_UPDATESUBSCRIPTIONREQUEST,
output_type=_SUBSCRIPTION,
serialized_options=_b(
"\202\323\344\223\002722/v1/{subscription.name=projects/*/subscriptions/*}:\001*"
),
),
_descriptor.MethodDescriptor(
name="ListSubscriptions",
full_name="google.pubsub.v1.Subscriber.ListSubscriptions",
index=3,
containing_service=None,
input_type=_LISTSUBSCRIPTIONSREQUEST,
output_type=_LISTSUBSCRIPTIONSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002(\022&/v1/{project=projects/*}/subscriptions"
),
),
_descriptor.MethodDescriptor(
name="DeleteSubscription",
full_name="google.pubsub.v1.Subscriber.DeleteSubscription",
index=4,
containing_service=None,
input_type=_DELETESUBSCRIPTIONREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=_b(
"\202\323\344\223\002/*-/v1/{subscription=projects/*/subscriptions/*}"
),
),
_descriptor.MethodDescriptor(
name="ModifyAckDeadline",
full_name="google.pubsub.v1.Subscriber.ModifyAckDeadline",
index=5,
containing_service=None,
input_type=_MODIFYACKDEADLINEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=_b(
'\202\323\344\223\002D"?/v1/{subscription=projects/*/subscriptions/*}:modifyAckDeadline:\001*'
),
),
_descriptor.MethodDescriptor(
name="Acknowledge",
full_name="google.pubsub.v1.Subscriber.Acknowledge",
index=6,
containing_service=None,
input_type=_ACKNOWLEDGEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=_b(
'\202\323\344\223\002>"9/v1/{subscription=projects/*/subscriptions/*}:acknowledge:\001*'
),
),
_descriptor.MethodDescriptor(
name="Pull",
full_name="google.pubsub.v1.Subscriber.Pull",
index=7,
containing_service=None,
input_type=_PULLREQUEST,
output_type=_PULLRESPONSE,
serialized_options=_b(
'\202\323\344\223\0027"2/v1/{subscription=projects/*/subscriptions/*}:pull:\001*'
),
),
_descriptor.MethodDescriptor(
name="StreamingPull",
full_name="google.pubsub.v1.Subscriber.StreamingPull",
index=8,
containing_service=None,
input_type=_STREAMINGPULLREQUEST,
output_type=_STREAMINGPULLRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name="ModifyPushConfig",
full_name="google.pubsub.v1.Subscriber.ModifyPushConfig",
index=9,
containing_service=None,
input_type=_MODIFYPUSHCONFIGREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=_b(
'\202\323\344\223\002C">/v1/{subscription=projects/*/subscriptions/*}:modifyPushConfig:\001*'
),
),
_descriptor.MethodDescriptor(
name="GetSnapshot",
full_name="google.pubsub.v1.Subscriber.GetSnapshot",
index=10,
containing_service=None,
input_type=_GETSNAPSHOTREQUEST,
output_type=_SNAPSHOT,
serialized_options=_b(
"\202\323\344\223\002'\022%/v1/{snapshot=projects/*/snapshots/*}"
),
),
_descriptor.MethodDescriptor(
name="ListSnapshots",
full_name="google.pubsub.v1.Subscriber.ListSnapshots",
index=11,
containing_service=None,
input_type=_LISTSNAPSHOTSREQUEST,
output_type=_LISTSNAPSHOTSRESPONSE,
serialized_options=_b(
'\202\323\344\223\002$\022"/v1/{project=projects/*}/snapshots'
),
),
_descriptor.MethodDescriptor(
name="CreateSnapshot",
full_name="google.pubsub.v1.Subscriber.CreateSnapshot",
index=12,
containing_service=None,
input_type=_CREATESNAPSHOTREQUEST,
output_type=_SNAPSHOT,
serialized_options=_b(
"\202\323\344\223\002&\032!/v1/{name=projects/*/snapshots/*}:\001*"
),
),
_descriptor.MethodDescriptor(
name="UpdateSnapshot",
full_name="google.pubsub.v1.Subscriber.UpdateSnapshot",
index=13,
containing_service=None,
input_type=_UPDATESNAPSHOTREQUEST,
output_type=_SNAPSHOT,
serialized_options=_b(
"\202\323\344\223\002/2*/v1/{snapshot.name=projects/*/snapshots/*}:\001*"
),
),
_descriptor.MethodDescriptor(
name="DeleteSnapshot",
full_name="google.pubsub.v1.Subscriber.DeleteSnapshot",
index=14,
containing_service=None,
input_type=_DELETESNAPSHOTREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=_b(
"\202\323\344\223\002'*%/v1/{snapshot=projects/*/snapshots/*}"
),
),
_descriptor.MethodDescriptor(
name="Seek",
full_name="google.pubsub.v1.Subscriber.Seek",
index=15,
containing_service=None,
input_type=_SEEKREQUEST,
output_type=_SEEKRESPONSE,
serialized_options=_b(
'\202\323\344\223\0027"2/v1/{subscription=projects/*/subscriptions/*}:seek:\001*'
),
),
],
)
_sym_db.RegisterServiceDescriptor(_SUBSCRIBER)
DESCRIPTOR.services_by_name["Subscriber"] = _SUBSCRIBER
# @@protoc_insertion_point(module_scope)
| 35.214112 | 11,769 | 0.62572 |
ace1759556a62cd1723b65f5f504ac7b51361e49 | 10,024 | py | Python | test_4_delay_dropout_mask_change.py | LinghengMeng/Dropout-as-a-Bayesian-Approximation | 9565d30d4904496f1ee300fba6b792cafdd93d9e | [
"MIT"
] | null | null | null | test_4_delay_dropout_mask_change.py | LinghengMeng/Dropout-as-a-Bayesian-Approximation | 9565d30d4904496f1ee300fba6b792cafdd93d9e | [
"MIT"
] | null | null | null | test_4_delay_dropout_mask_change.py | LinghengMeng/Dropout-as-a-Bayesian-Approximation | 9565d30d4904496f1ee300fba6b792cafdd93d9e | [
"MIT"
] | null | null | null | """
Created on 2019-03-09 2:54 PM
@author: jack.lingheng.meng
"""
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib.distributions import Bernoulli
import matplotlib.pyplot as plt
def get_vars(scope):
return [x for x in tf.global_variables() if scope in x.name]
class VariationalDense:
"""Variational Dense Layer Class"""
def __init__(self, n_in, n_out, dropout_mask, model_prob=0.9, model_lam=1e-2, activation=None, name="hidden"):
self.model_prob = model_prob # probability to keep units
self.model_lam = model_lam # l^2 / 2*tau
self.dropout_mask = dropout_mask
if activation is None:
self.activation = tf.identity
else:
self.activation = activation
kernel_initializer = tf.initializers.truncated_normal(mean=0.0, stddev=0.01)
self.model_M = tf.get_variable("{}_M".format(name), initializer=kernel_initializer([n_in, n_out])) # variational parameters
self.model_m = tf.get_variable("{}_b".format(name), initializer=tf.zeros([n_out]))
self.model_W = tf.matmul(tf.diag(self.dropout_mask), self.model_M)
def __call__(self, X):
output = self.activation(tf.matmul(X, self.model_W) + self.model_m)
if self.model_M.shape[1] == 1:
output = tf.squeeze(output)
return output
@property
def regularization(self):
return self.model_lam * (
self.model_prob * tf.reduce_sum(tf.square(self.model_M)) +
tf.reduce_sum(tf.square(self.model_m))
)
def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):
for h in hidden_sizes[:-1]:
x = tf.layers.dense(x, units=h, activation=activation,
kernel_initializer=tf.initializers.truncated_normal(mean=0.0, stddev=0.01))
return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation,
kernel_initializer=tf.initializers.truncated_normal(mean=0.0, stddev=0.01))
def mlp_dropout(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None, seed = 0, training=False):
regularization = 0
model_lam = 1e-2
model_prob = 0.1
kernel_initializer = tf.initializers.truncated_normal(mean=0.0, stddev=0.01)
# Hidden Layer
for l, h in enumerate(hidden_sizes[:-1]):
x = tf.layers.Dropout(rate=model_prob, seed=seed)(x, training=True)
# import pdb; pdb.set_trace()
hidden_layer = tf.layers.Dense(units=h,
activation=activation,
kernel_initializer=kernel_initializer)
x = hidden_layer(x)
# hidden_layer.get_weights()[0]
regularization += model_lam * (
model_prob * tf.reduce_sum(tf.square(hidden_layer.weights[0])) +
tf.reduce_sum(tf.square(hidden_layer.weights[1]))
)
# Output Layer
x = tf.layers.Dropout(rate=model_prob, seed=seed)(x, training=True)
output_layer = tf.layers.Dense(units=hidden_sizes[-1], activation=output_activation,
kernel_initializer=kernel_initializer)
x = output_layer(x)
# regularization += model_lam * (
# model_prob * tf.reduce_sum(tf.square(output_layer.get_weights()[0])) +
# tf.reduce_sum(tf.square(output_layer.get_weights()[1]))
# )
return x, regularization
def generate_dropout_mask_placeholders(x_dim, hidden_sizes=(32,)):
dropout_mask_placeholders = []
for l, size in enumerate((x_dim, *hidden_sizes)):
dropout_mask_placeholders.append(tf.placeholder(dtype=tf.float32, name='dropout_mask_{}'.format(l)))
return dropout_mask_placeholders
def update_dropout_masks(x_dim, hidden_sizes=(32,), model_prob=0.9):
model_bern = Bernoulli(probs=model_prob, dtype=tf.float32)
new_dropout_masks = []
for l, size in enumerate((x_dim, *hidden_sizes)):
new_dropout_masks.append(model_bern.sample((size,)))
return new_dropout_masks
def mlp_variational(x, dropout_mask_phs, hidden_sizes=(32,), activation=tf.tanh, output_activation=None, dropout_rate=0.1):
# Hidden layers
regularization = 0
for l, h in enumerate(hidden_sizes[:-1]):
hidden_layer = VariationalDense(n_in=x.shape.as_list()[1],
n_out=h,
dropout_mask = dropout_mask_phs[l],
model_prob=1.0-dropout_rate,
model_lam=1e-2,
activation=activation,
name="h{}".format(l+1))
x = hidden_layer(x)
regularization += hidden_layer.regularization
# Output layer
out_layer = VariationalDense(n_in=x.shape.as_list()[1],
n_out=hidden_sizes[-1],
dropout_mask=dropout_mask_phs[-1],
model_prob=1.0-dropout_rate,
model_lam=1e-2,
activation=output_activation,
name="Out")
x = out_layer(x)
regularization += out_layer.regularization
return x, regularization
# Created sample data.
seed=0
np.random.seed(seed)
tf.set_random_seed(seed)
n_samples = 200
X = np.random.normal(size=(n_samples, 1))
y = np.random.normal(np.cos(5.*X) / (np.abs(X) + 1.), 0.1).ravel()
X_pred = np.atleast_2d(np.linspace(-6., 6., num=10000)).T
y_pred = np.random.normal(np.cos(5.*X_pred) / (np.abs(X_pred) + 1.), 0.1).ravel()
X = np.hstack((X, X**2, X**3))
X_pred = np.hstack((X_pred, X_pred**2, X_pred**3))
# Create the TensorFlow model.
obs_dim = X.shape[1]
# hidden_sizes = (300, 300, 300)
hidden_sizes = (100, 100)
model_X = tf.placeholder(tf.float32, [None, obs_dim])
model_X_targ = tf.placeholder(tf.float32, [None, obs_dim])
dropout_rate = 0.1 #0.1
new_dropout_masks = update_dropout_masks(obs_dim, hidden_sizes, model_prob=1.0-dropout_rate)
dropout_mask_phs = generate_dropout_mask_placeholders(obs_dim, hidden_sizes)
with tf.variable_scope('main'):
q, q_reg = mlp_variational(model_X, dropout_mask_phs, list(hidden_sizes)+[1], tf.nn.relu, None, dropout_rate)
# q, q_reg = mlp_dropout(model_X, list(hidden_sizes)+[1], tf.nn.relu, None)
# q = mlp(model_X, list(hidden_sizes)+[1], tf.nn.relu, None)
with tf.variable_scope('target'):
q_targ, q_reg_targ = mlp_variational(model_X_targ, dropout_mask_phs, list(hidden_sizes) + [1], tf.nn.relu, None, dropout_rate)
# q_targ, q_reg_targ = mlp_dropout(model_X_targ, list(hidden_sizes)+[1], tf.nn.relu, None)
# q_targ = mlp(model_X_targ, list(hidden_sizes)+[1], tf.nn.relu, None)
target_update = tf.group([tf.assign(v_targ, v_main) for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
# q = tf.squeeze(q)
# q_targ = tf.squeeze(q_targ)
# q_reg = tf.squeeze(q_reg)
model_y = tf.placeholder(tf.float32, [None])
# q_loss = tf.reduce_mean((q-model_y)**2) + q_reg / n_samples
q_loss = (tf.reduce_mean((q-model_y)**2))
model_mse = tf.reduce_mean((q-model_y)**2)
train_step = tf.train.AdamOptimizer(1e-3).minimize(q_loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_summary_dir = os.path.join('./', 'tf_summary')
writer = tf.summary.FileWriter(tf_summary_dir, sess.graph)
training_epoches = 8000#10000
for i in range(training_epoches):
# Create feed_dictionary
delay_dropout = 100 # 100
if i % delay_dropout == 0:
dropout_masks = sess.run(new_dropout_masks)
feed_dictionary = {model_X: X, model_y: y}
for mask_i in range(len(dropout_mask_phs)):
# import pdb;
# pdb.set_trace()
feed_dictionary[dropout_mask_phs[mask_i]] = dropout_masks[mask_i] # np.ones(new_dropout_masks[mask_i].shape.as_list()) # dropout_masks[mask_i]
sess.run([train_step], feed_dictionary)
if i % 100 == 0:
mse = sess.run(model_mse, feed_dictionary)
print("Iteration {}. Mean squared error: {}.".format(i, mse))
sess.run(target_update)
# Sample from the posterior.
n_post = 200#1000
Y_post = np.zeros((n_post, X_pred.shape[0]))
Y_post_targ = np.zeros((n_post, X_pred.shape[0]))
for i in range(n_post):
dropout_masks = sess.run(new_dropout_masks)
feed_dictionary = {model_X: X_pred, model_X_targ: X_pred}
for mask_i in range(len(dropout_mask_phs)):
feed_dictionary[dropout_mask_phs[mask_i]] = dropout_masks[mask_i]
Y_post[i] = sess.run(q, feed_dictionary)
Y_post_targ[i] = sess.run(q_targ, feed_dictionary)
# import pdb;
# pdb.set_trace()
if True:
plt.figure(figsize=(8, 6))
alpha = 1. / 10 #1. / 200
for i in range(len(Y_post)):
handle_0, = plt.plot(X_pred[:, 0], Y_post[i], "b-", alpha=alpha)
handle_1, = plt.plot(X[:, 0], y, "r.", markersize=2)
handle_2, = plt.plot(X_pred[:, 0], np.median(Y_post, axis=0), "g-")
plt.title('traning_epoches={}, traning_samples={}, post_samples={}'.format(training_epoches, n_samples, n_post))
plt.ylim([-6, 6])
plt.legend(handles=[handle_0, handle_1, handle_2],
labels=['post sample', 'training samples', 'median of post sample'])
plt.grid()
plt.show()
# plt.figure(figsize=(8, 6))
# alpha = 1. / 10 # 1. / 200
# for i in range(len(Y_post)):
# handle_0, = plt.plot(X_pred[:, 0], Y_post_targ[i], "b-", alpha=alpha)
# handle_1, = plt.plot(X[:, 0], y, "r.", markersize=2)
# handle_2, = plt.plot(X_pred[:, 0], np.median(Y_post_targ, axis=0), "g-")
# plt.title('traning_epoches={}, traning_samples={}, post_samples={}'.format(training_epoches, n_samples, n_post))
# plt.ylim([-6, 6])
# plt.legend(handles=[handle_0, handle_1, handle_2],
# labels=['post sample', 'training samples', 'median of post sample'])
# plt.grid()
# plt.show() | 40.914286 | 154 | 0.635375 |
ace175f92f3da04bd21cccb2659efdb7f37c46aa | 497 | py | Python | main.py | TechGuyKUNAL/Random_Password_generator | e0b6c27f26891441f6cab15ffdaaf8bf2011afa1 | [
"MIT"
] | null | null | null | main.py | TechGuyKUNAL/Random_Password_generator | e0b6c27f26891441f6cab15ffdaaf8bf2011afa1 | [
"MIT"
] | null | null | null | main.py | TechGuyKUNAL/Random_Password_generator | e0b6c27f26891441f6cab15ffdaaf8bf2011afa1 | [
"MIT"
] | null | null | null | import random
uppercase_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
lowercase_letters = uppercase_letters.lower()
digits = "0123456789"
symbols = "@#[+=$%_^-]&*!"
upper, lower, nums, syms = True, True, True, True
all = ""
if upper:
all += uppercase_letters
if lower:
all += lowercase_letters
if nums:
all += digits
if syms:
all += symbols
length = 40
amount = 30
for x in range(amount):
password = "".join(random.sample(all, length))
print(password) | 19.88 | 51 | 0.641851 |
ace17689fcdf861d9d50c162bcf3b73337da7140 | 4,297 | py | Python | equivalence/main.py | Shurik412/equivalence_rastr_win3 | c37119f9cc59dcf96d236cd8f3ec090dc65a5db6 | [
"MIT"
] | null | null | null | equivalence/main.py | Shurik412/equivalence_rastr_win3 | c37119f9cc59dcf96d236cd8f3ec090dc65a5db6 | [
"MIT"
] | null | null | null | equivalence/main.py | Shurik412/equivalence_rastr_win3 | c37119f9cc59dcf96d236cd8f3ec090dc65a5db6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
from equivalence.AstraRastr import RASTR
from equivalence.Load import LoadFile
from equivalence.Save import save_file
from equivalence.actions.zeroing import Zeroing
from equivalence.calculation.equivalent import Equivalent
from equivalence.calculation.regime import Regime
from equivalence.correction.generators import delete_Generator_without_nodes, off_the_generator_if_the_node_off
from equivalence.correction.node import removing_nodes_without_branches
from equivalence.correction.reactors import reactors_change
from equivalence.correction.vetv import remove_line, off_the_line_from_two_side, del_vetv
from equivalence.delete_switches.swiches import delete_area, delete_UKR, del_swiches, del_switches_gen, vzd_node
from equivalence.equivalence_node.equalization_full import equivalent_full_to_nodes
from equivalence.making_settings.equivalence import set_com_ekviv
from equivalence.settings import PATH_FILE_RASTR_LOAD, PATH_FILE_RASTR_SAVE
from equivalence.settings import area_json
from equivalence.tools.tool import changing_number_of_semicolons
def main():
regime_obj = Regime(rastr_win=RASTR, switch_command_line=True)
equivalent_obj = Equivalent(rastr_win=RASTR)
zeroing_object = Zeroing(rastr_win=RASTR)
print("Запуск:\n")
# устанавливает настройки эквивалентирования
set_com_ekviv(
rastr_win=RASTR,
zmax=1000,
otm_n=0,
smart=0,
tip_ekv=0,
ekvgen=0,
tip_gen=1,
ek_sh=0
)
# задает выделение sel=0 (обнуление)
zeroing_object.node()
zeroing_object.vetv()
# расчет режима
regime_obj.rgm(par='p')
# отключение линий односторонне включенных
off_the_line_from_two_side(rastr_win=RASTR)
# откл. генераторов если откл. узел
off_the_generator_if_the_node_off(rastr_win=RASTR)
regime_obj.rgm(par='p')
for area in area_json:
if area['equivalent']:
equivalent_full_to_nodes(
rastr_win=RASTR,
name_area=area['area_name'],
uhom=area['u_min_TR'],
v_ip_input=area['u_min_AREA'],
v_iq_input=area['u_max_AREA']
)
equivalent_obj.ekv()
# обнуление
zeroing_object.node()
zeroing_object.vetv()
regime_obj.rgm(par='p')
del_swiches(rastr_win=RASTR)
print('1. del_swiches')
zeroing_object.node()
zeroing_object.vetv()
regime_obj.rgm(par='p')
del_switches_gen(rastr_win=RASTR)
print('2. del_switches_gen')
zeroing_object.node()
zeroing_object.vetv()
regime_obj.rgm(par='p')
delete_UKR(rastr_win=RASTR)
print('3. delete_UKR')
zeroing_object.node()
zeroing_object.vetv()
regime_obj.rgm(par='p')
delete_area(rastr_win=RASTR)
print('4. delete_area')
zeroing_object.node()
zeroing_object.vetv()
regime_obj.rgm(par='p')
vzd_node(rastr_win=RASTR)
print('6. vzd_node')
zeroing_object.node()
zeroing_object.vetv()
regime_obj.rgm(par='p')
off_the_generator_if_the_node_off(rastr_win=RASTR)
zeroing_object.node()
zeroing_object.vetv()
delete_Generator_without_nodes(rastr_win=RASTR)
zeroing_object.node()
zeroing_object.vetv()
reactors_change(rastr_win=RASTR)
zeroing_object.node()
zeroing_object.vetv()
# remove_line(rastr_win=RASTR)
# zeroing_object.node()
# zeroing_object.vetv()
regime_obj.rgm(par='p')
del_vetv(rastr_win=RASTR)
zeroing_object.node()
zeroing_object.vetv()
regime_obj.rgm(par='p')
removing_nodes_without_branches(rastr_win=RASTR)
zeroing_object.node()
zeroing_object.vetv()
delete_Generator_without_nodes(rastr_win=RASTR)
regime_obj.rgm(par='p')
if __name__ == '__main__':
LoadFile(rastr_win=RASTR).load(path_file=PATH_FILE_RASTR_LOAD, name_shabl_russian='режим')
start = time.time()
main()
end = time.time()
print(
f'Время работы: '
f'{changing_number_of_semicolons(number=(end - start) / 60, digits=2)} мин.'
)
save_file(rastr_win=RASTR, path_file=PATH_FILE_RASTR_SAVE, name_shabl_russian='режим')
save_file(rastr_win=RASTR, path_file=f'{PATH_FILE_RASTR_SAVE}.rst', name_shabl_russian='динамика') | 26.20122 | 112 | 0.717012 |
ace177c19ee634e2ed121954d3dc4a47fbb81629 | 4,119 | py | Python | nipyapi/nifi/models/resource_dto.py | Paul-Verardi/nipyapi | 7a709611d9cf30e4ce8943db4d4dd617f2f7c81c | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/models/resource_dto.py | Paul-Verardi/nipyapi | 7a709611d9cf30e4ce8943db4d4dd617f2f7c81c | [
"Apache-2.0"
] | 1 | 2018-11-13T21:01:33.000Z | 2018-11-13T21:01:33.000Z | nipyapi/nifi/models/resource_dto.py | Paul-Verardi/nipyapi | 7a709611d9cf30e4ce8943db4d4dd617f2f7c81c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ResourceDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'identifier': 'str',
'name': 'str'
}
attribute_map = {
'identifier': 'identifier',
'name': 'name'
}
def __init__(self, identifier=None, name=None):
"""
ResourceDTO - a model defined in Swagger
"""
self._identifier = None
self._name = None
if identifier is not None:
self.identifier = identifier
if name is not None:
self.name = name
@property
def identifier(self):
"""
Gets the identifier of this ResourceDTO.
The identifier of the resource.
:return: The identifier of this ResourceDTO.
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""
Sets the identifier of this ResourceDTO.
The identifier of the resource.
:param identifier: The identifier of this ResourceDTO.
:type: str
"""
self._identifier = identifier
@property
def name(self):
"""
Gets the name of this ResourceDTO.
The name of the resource.
:return: The name of this ResourceDTO.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ResourceDTO.
The name of the resource.
:param name: The name of this ResourceDTO.
:type: str
"""
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ResourceDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.746753 | 479 | 0.538237 |
ace178cf6164450793ee064cd88ba13d4b9f425f | 2,185 | py | Python | zunzun/http_kernel/router.py | aprezcuba24/zunzun | cc294d9dfb84695be0ed1425cf946a0f4ea644a9 | [
"MIT"
] | null | null | null | zunzun/http_kernel/router.py | aprezcuba24/zunzun | cc294d9dfb84695be0ed1425cf946a0f4ea644a9 | [
"MIT"
] | null | null | null | zunzun/http_kernel/router.py | aprezcuba24/zunzun | cc294d9dfb84695be0ed1425cf946a0f4ea644a9 | [
"MIT"
] | null | null | null | from .. import inspect
from werkzeug.routing import Map, Rule
from injector import Injector, inject, singleton
@singleton
class Router:
@inject
def __init__(self, injector: Injector):
self.routers_map = Map()
self.injector = injector
def _get_parameters(self, action, parameters):
args_spec = inspect.getfullargspec(action)
parameters = {
**parameters,
**self._load_parameter_by_type(args_spec.annotations),
}
if args_spec.varkw or args_spec.varargs:
return parameters
parameters_result = dict()
for item in args_spec.args:
parameters_result[item] = parameters.get(item)
return parameters_result
def _load_parameter_by_type(self, parameters):
values = dict()
for k, v in parameters.items():
values[k] = self._create_object(v)
return values
def _run_action(self, action, **kwargs):
parameters = self._get_parameters(action, kwargs)
cls = inspect.findclass(action)
if cls:
parameters["self"] = self._create_controller(cls)
return action(**parameters)
def _create_controller(self, cls):
args_spec = inspect.getfullargspec(cls)
parameters = self._load_parameter_by_type(args_spec.annotations)
return cls(**parameters)
def _create_object(self, cls):
return self.injector.get(cls)
def _add_route(self, path, **kwargs):
self.routers_map.add(Rule(path, **kwargs))
def any(self, path, **kwargs):
def wrapper(action):
self._add_route(
path,
endpoint=lambda **parameters: self._run_action(action, **parameters),
**kwargs
)
return wrapper
def get(self, path, **kwargs):
return self.any(path, methods=["GET"], **kwargs)
def post(self, path, **kwargs):
return self.any(path, methods=["POST"], **kwargs)
def patch(self, path, **kwargs):
return self.any(path, methods=["PATCH"], **kwargs)
def delete(self, path, **kwargs):
return self.any(path, methods=["DELETE"], **kwargs)
| 30.774648 | 85 | 0.616934 |
ace179118b82bd16e9161d6c7fc3ec454061ed32 | 905 | py | Python | ArduinoController/urls.py | dmdhrumilmistry/DeviceControllerWebInterface | 0c170a697f21511de9e4bb1c13a0da8bb1243636 | [
"MIT"
] | 1 | 2021-11-15T14:55:36.000Z | 2021-11-15T14:55:36.000Z | ArduinoController/urls.py | dmdhrumilmistry/DeviceControllerWebInterface | 0c170a697f21511de9e4bb1c13a0da8bb1243636 | [
"MIT"
] | null | null | null | ArduinoController/urls.py | dmdhrumilmistry/DeviceControllerWebInterface | 0c170a697f21511de9e4bb1c13a0da8bb1243636 | [
"MIT"
] | null | null | null | """ArduinoController URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
admin.site.site_header = "Devices Controller Admin Page"
admin.site.site_title = "Admin Page"
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('controller.urls'))
]
| 33.518519 | 77 | 0.714917 |
ace17957900de68e63400f0ad03913fd4abce1ed | 454 | py | Python | bulk_action/urls.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 22 | 2015-01-16T01:36:32.000Z | 2020-06-08T00:46:18.000Z | bulk_action/urls.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 9 | 2019-03-15T11:39:32.000Z | 2019-04-30T00:59:50.000Z | bulk_action/urls.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 13 | 2015-01-13T20:56:22.000Z | 2022-02-23T06:01:17.000Z | from django.conf.urls.defaults import patterns, url
from bulk_action.views import (
bulk_action_export, bulk_action_import, bulk_gather_vlan_pools
)
urlpatterns = patterns(
'bulk_action',
url(r'^export/', bulk_action_export, name='bulk-action-export'),
url(r'^import/', bulk_action_import, name='bulk-action-import'),
url(
r'^gather_vlan_pools/', bulk_gather_vlan_pools,
name='bulk-action-gather-vlan-pools'
),
)
| 30.266667 | 68 | 0.715859 |
ace1798f7162b19bf13a54994796d1b2b75c4bbb | 729 | py | Python | electrum/scripts/servers.py | ShadowMyst/electrum | 4c6379a9365dfaeefe09ee2d52ee9332b9bf6129 | [
"MIT"
] | 5 | 2018-10-31T18:47:54.000Z | 2021-09-20T02:04:42.000Z | electrum/scripts/servers.py | project-mynt/electrum-mynt | ca1548e008854f2a3eff900a69365307cc20bd57 | [
"MIT"
] | 7 | 2018-01-12T12:01:39.000Z | 2018-11-13T05:31:22.000Z | electrum/scripts/servers.py | project-mynt/electrum-mynt | ca1548e008854f2a3eff900a69365307cc20bd57 | [
"MIT"
] | 11 | 2018-10-31T19:46:05.000Z | 2019-09-25T20:18:37.000Z | #!/usr/bin/env python3
import json
import asyncio
from electrum.simple_config import SimpleConfig
from electrum.network import filter_version, Network
from electrum.util import create_and_start_event_loop, log_exceptions
from electrum import constants
import util
# testnet?
#constants.set_testnet()
config = SimpleConfig({'testnet': False})
loop, stopping_fut, loop_thread = create_and_start_event_loop()
network = Network(config)
network.start()
@log_exceptions
async def f():
try:
peers = await util.get_peers(network)
peers = filter_version(peers)
print(json.dumps(peers, sort_keys=True, indent=4))
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(f(), loop)
| 24.3 | 69 | 0.762689 |
ace1799ae6e7d2484fc2600cef7909a456399403 | 444 | py | Python | portfolio/models.py | benjaminbills/portfolio | 8c017a9028f306a916f5d56d32a3cd2e1f97bacc | [
"MIT"
] | null | null | null | portfolio/models.py | benjaminbills/portfolio | 8c017a9028f306a916f5d56d32a3cd2e1f97bacc | [
"MIT"
] | null | null | null | portfolio/models.py | benjaminbills/portfolio | 8c017a9028f306a916f5d56d32a3cd2e1f97bacc | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Project(models.Model):
title=models.CharField(max_length=100)
intro=models.CharField(max_length=100, blank=True, default='Blank')
image=models.ImageField(upload_to='project_photos/')
description=models.TextField()
link=models.URLField()
githublink=models.URLField(blank=True, default='Blank')
techUsed=models.CharField(max_length=500, blank=True, default='Tech used') | 40.363636 | 76 | 0.781532 |
ace179e2be52128c9f630440b2b5eee606b4e192 | 3,573 | py | Python | Algorithm.Python/DropboxUniverseSelectionAlgorithm.py | szymanskilukasz/Lean | fe2ac131af2d0614494e5c970a57d4b7c89d5f88 | [
"Apache-2.0"
] | 4 | 2020-03-30T06:00:05.000Z | 2020-06-29T02:51:25.000Z | Algorithm.Python/DropboxUniverseSelectionAlgorithm.py | szymanskilukasz/Lean | fe2ac131af2d0614494e5c970a57d4b7c89d5f88 | [
"Apache-2.0"
] | 1 | 2021-02-22T21:03:27.000Z | 2021-02-23T19:01:04.000Z | Algorithm.Python/DropboxUniverseSelectionAlgorithm.py | szymanskilukasz/Lean | fe2ac131af2d0614494e5c970a57d4b7c89d5f88 | [
"Apache-2.0"
] | 1 | 2021-01-04T18:03:14.000Z | 2021-01-04T18:03:14.000Z | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Data.UniverseSelection import *
import base64
### <summary>
### In this algortihm we show how you can easily use the universe selection feature to fetch symbols
### to be traded using the BaseData custom data system in combination with the AddUniverse{T} method.
### AddUniverse{T} requires a function that will return the symbols to be traded.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="custom universes" />
class DropboxUniverseSelectionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2017, 7, 4)
self.SetEndDate(2018, 7, 4)
self.backtestSymbolsPerDay = {}
self.current_universe = []
self.UniverseSettings.Resolution = Resolution.Daily
self.AddUniverse("my-dropbox-universe", self.selector)
def selector(self, date):
# handle live mode file format
if self.LiveMode:
# fetch the file from dropbox
str = self.Download("https://www.dropbox.com/s/2l73mu97gcehmh7/daily-stock-picker-live.csv?dl=1")
# if we have a file for today, return symbols, else leave universe unchanged
self.current_universe = str.split(',') if len(str) > 0 else self.current_universe
return self.current_universe
# backtest - first cache the entire file
if len(self.backtestSymbolsPerDay) == 0:
# No need for headers for authorization with dropbox, these two lines are for example purposes
byteKey = base64.b64encode("UserName:Password".encode('ASCII'))
# The headers must be passed to the Download method as dictionary
headers = { 'Authorization' : f'Basic ({byteKey.decode("ASCII")})' }
str = self.Download("https://www.dropbox.com/s/ae1couew5ir3z9y/daily-stock-picker-backtest.csv?dl=1", headers)
for line in str.splitlines():
data = line.split(',')
self.backtestSymbolsPerDay[data[0]] = data[1:]
index = date.strftime("%Y%m%d")
self.current_universe = self.backtestSymbolsPerDay.get(index, self.current_universe)
return self.current_universe
def OnData(self, slice):
if slice.Bars.Count == 0: return
if self.changes is None: return
# start fresh
self.Liquidate()
percentage = 1 / slice.Bars.Count
for tradeBar in slice.Bars.Values:
self.SetHoldings(tradeBar.Symbol, percentage)
# reset changes
self.changes = None
def OnSecuritiesChanged(self, changes):
self.changes = changes | 40.602273 | 122 | 0.690456 |
ace17a4933cd4d41ec0226d29a12a3bbaac691fd | 5,937 | py | Python | roadsearch/utils/catmull.py | ERATOMMSD/roadsearch | e5b32b70835a51d56d10547720d90e34ade08564 | [
"MIT"
] | null | null | null | roadsearch/utils/catmull.py | ERATOMMSD/roadsearch | e5b32b70835a51d56d10547720d90e34ade08564 | [
"MIT"
] | null | null | null | roadsearch/utils/catmull.py | ERATOMMSD/roadsearch | e5b32b70835a51d56d10547720d90e34ade08564 | [
"MIT"
] | null | null | null | # This code is used in the paper
# "Model-based exploration of the frontier of behaviours for deep learning system testing"
# by V. Riccio and P. Tonella
# https://doi.org/10.1145/3368089.3409730
import numpy as np
from random import randint
from typing import List, Tuple
from shapely.geometry import Point
import math
def catmull_rom_spline(p0, p1, p2, p3, num_points=20):
"""p0, p1, p2, and p3 should be (x,y) point pairs that define the Catmull-Rom spline.
num_points is the number of points to include in this curve segment."""
# Convert the points to numpy so that we can do array multiplication
p0, p1, p2, p3 = map(np.array, [p0, p1, p2, p3])
# Calculate t0 to t4
# For knot parametrization
alpha = 0.5
def tj(ti, p_i, p_j):
xi, yi = p_i
xj, yj = p_j
return (((xj - xi) ** 2 + (yj - yi) ** 2) ** 0.5) ** alpha + ti
# Knot sequence
t0 = 0
t1 = tj(t0, p0, p1)
t2 = tj(t1, p1, p2)
t3 = tj(t2, p2, p3)
# Only calculate points between p1 and p2
t = np.linspace(t1, t2, num_points)
# Reshape so that we can multiply by the points p0 to p3
# and get a point for each value of t.
t = t.reshape(len(t), 1)
a1 = (t1 - t) / (t1 - t0) * p0 + (t - t0) / (t1 - t0) * p1
a2 = (t2 - t) / (t2 - t1) * p1 + (t - t1) / (t2 - t1) * p2
a3 = (t3 - t) / (t3 - t2) * p2 + (t - t2) / (t3 - t2) * p3
b1 = (t2 - t) / (t2 - t0) * a1 + (t - t0) / (t2 - t0) * a2
b2 = (t3 - t) / (t3 - t1) * a2 + (t - t1) / (t3 - t1) * a3
c = (t2 - t) / (t2 - t1) * b1 + (t - t1) / (t2 - t1) * b2
return c
def catmull_rom_chain(points: List[tuple], num_spline_points=20) -> List:
"""Calculate Catmull-Rom for a chain of points and return the combined curve."""
# The curve cr will contain an array of (x, y) points.
cr = []
for i in range(len(points) - 3):
c = catmull_rom_spline(points[i], points[i + 1], points[i + 2], points[i + 3], num_spline_points)
if i > 0:
c = np.delete(c, [0], axis=0)
cr.extend(c)
return cr
def catmull_rom_2d(points: List[tuple], num_points=20) -> List[tuple]:
if len(points) < 4:
raise ValueError("points should have at least 4 points")
np_points_array = catmull_rom_chain(points, num_points)
return [(p[0], p[1]) for p in np_points_array]
def catmull_rom(points: List[tuple], num_spline_points=20) -> List[tuple]:
if len(points) < 4:
raise ValueError("points should have at least 4 points")
assert all(x[3] == points[0][3] for x in points)
np_point_array = catmull_rom_chain([(p[0], p[1]) for p in points], num_spline_points)
z0 = points[0][2]
width = points[0][3]
return [(p[0], p[1], z0, width) for p in np_point_array]
Tuple4F = Tuple[float, float, float, float]
Tuple2F = Tuple[float, float]
class ControlNodesGenerator:
"""Generate random roads given the configuration parameters"""
NUM_INITIAL_SEGMENTS_THRESHOLD = 2
NUM_UNDO_ATTEMPTS = 20
def __init__(self, num_control_nodes=15, max_angle=None, seg_length=None,
num_spline_nodes=None, initial_node=(10.0, 0.0, -28.0, 8.0)):
assert num_control_nodes > 1 and num_spline_nodes > 0
assert 0 <= max_angle <= 360
assert seg_length > 0
assert len(initial_node) == 4
self.num_control_nodes = num_control_nodes
self.num_spline_nodes = num_spline_nodes
self.initial_node = initial_node
self.max_angle = max_angle
self.seg_length = seg_length
def generate_control_nodes(self, num_control_nodes=None) -> List[Tuple4F]:
if not num_control_nodes:
num_control_nodes = self.num_control_nodes
nodes = [self._get_initial_control_node(), self.initial_node]
# +2 is added to reflect the two initial nodes that are necessary for catmull_rom
while len(nodes) < num_control_nodes + 2:
nodes.append(self._get_next_node(nodes[-2], nodes[-1], self._get_next_max_angle(len(nodes) - 2)))
return nodes
def generate(self, num_control_nodes=None):
control_nodes = self.generate_key_control_nodes(num_control_nodes)
return self.control_nodes_to_road(control_nodes)
def generate_key_control_nodes(self, num_control_nodes):
# original call to is_valid and loop was removed since the pipeline is in charge of testing that
control_nodes = self.generate_control_nodes(num_control_nodes=num_control_nodes)
control_nodes = control_nodes[2:]
return control_nodes
def control_nodes_to_road(self, control_nodes):
nodes = [self.initial_node] + control_nodes
sample_nodes = catmull_rom(nodes, self.num_spline_nodes)
road = [(node[0], node[1]) for node in sample_nodes]
return road
def _get_initial_point(self) -> Point:
return Point(self.initial_node[0], self.initial_node[1])
def _get_initial_control_node(self) -> Tuple4F:
x0, y0, z, width = self.initial_node
x, y = self._get_next_xy(x0, y0, 270)
return x, y, z, width
def _get_next_node(self, first_node, second_node: Tuple4F, max_angle) -> Tuple4F:
v = np.subtract(second_node, first_node)
start_angle = int(np.degrees(np.arctan2(v[1], v[0])))
angle = randint(start_angle - max_angle, start_angle + max_angle)
x0, y0, z0, width0 = second_node
x1, y1 = self._get_next_xy(x0, y0, angle)
return x1, y1, z0, width0
def _get_next_xy(self, x0: float, y0: float, angle: float) -> Tuple2F:
angle_rad = math.radians(angle)
return x0 + self.seg_length * math.cos(angle_rad), y0 + self.seg_length * math.sin(angle_rad)
def _get_next_max_angle(self, i: int, threshold=NUM_INITIAL_SEGMENTS_THRESHOLD) -> float:
if i < threshold or i == self.num_control_nodes - 1:
return 0
else:
return self.max_angle
| 37.815287 | 109 | 0.640054 |
ace17bde255357a5634f06ebe0b0351d1787c981 | 2,054 | py | Python | kyc_aml/forms.py | James-spiff/p2p_lend | 0c2bab307c6e86c11d3f79667fd943be089b7ebf | [
"BSD-3-Clause"
] | null | null | null | kyc_aml/forms.py | James-spiff/p2p_lend | 0c2bab307c6e86c11d3f79667fd943be089b7ebf | [
"BSD-3-Clause"
] | 1 | 2022-03-30T10:23:26.000Z | 2022-03-30T10:23:26.000Z | kyc_aml/forms.py | James-spiff/p2p_lend | 0c2bab307c6e86c11d3f79667fd943be089b7ebf | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
from ipware import get_client_ip #get's the clients ip address
from crispy_forms.helper import FormHelper
from django import forms
from django.forms import ValidationError
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from .models import KYCApplication
from locations.models import Country
User = settings.AUTH_USER_MODEL
class DateInput(forms.DateInput):
input_type = 'date'
class KYCApplicationForm(forms.ModelForm):
citizenship = forms.ModelChoiceField(
queryset=Country.objects.filter(accept_signup=True)) #Only display countries we can accept signup from
class Meta:
model = KYCApplication
exclude = (
'kyc_status', 'created_date', 'modified_date', 'reviewer'
'kyc_submitted_ip_address', 'selfie_with_id', 'user'
'reference', 'date_of_birth', 'number_of_kyc_tries'
)
def __init__(self, *args, **kwargs):
super(KYCApplicationForm, self).__init__(*args, **kwargs)
self.fields['legal_first_name'].help_text = _("As shown in your documents")
self.fields['legal_last_name'].help_text = _("As shown in your documents")
self.fields['politically_exposed_person'].help_text = _("""A politically exposed person(PEP) is one who has been entrusted with
a prominent public function. A PEP generally present a higher risk for potential involvement in bribery
and corruption by virtue of their position and the influence that they may hold. 'not_pep'
implies the user is not politically exposed and 'pep' implies the user is. """)
self.fields['country_residence'].help_text = _("The country of residence of the user as shown in documents")
self.fields['citizenship'].help_text = _("The citizenship of the user as shown in documents")
#These fields where disabled because they were already provided during signup
self.fields['country_residence'].disabled = True
self.fields['legal_first_name'].disabled = True
self.fields['legal_last_name'].disabled = True
self.helper = FormHelper()
self.helper.form_show_labels = False
| 41.918367 | 130 | 0.773612 |
ace17cac96c1eb0d094aa39515afa36b0bf43773 | 6,199 | py | Python | src/cloudwatch/modules/configuration/whitelist.py | jsotogaviard/collectd-cloudwatch | 01c2498be917e6e3903cf4518f998ee1b1cc91e5 | [
"MIT"
] | null | null | null | src/cloudwatch/modules/configuration/whitelist.py | jsotogaviard/collectd-cloudwatch | 01c2498be917e6e3903cf4518f998ee1b1cc91e5 | [
"MIT"
] | null | null | null | src/cloudwatch/modules/configuration/whitelist.py | jsotogaviard/collectd-cloudwatch | 01c2498be917e6e3903cf4518f998ee1b1cc91e5 | [
"MIT"
] | null | null | null | import re
from os import path
from string import strip
from threading import Lock
from .configreader import ConfigReader
from ..logger.logger import get_logger
class WhitelistConfigReader(object):
"""
The WhitelistReader is responsible for parsing the whitelist.conf file into a whitelist regex list
used by the Whitelist class. During this process the syntax of each line from whitelist.conf is validated.
Any line that is not a valid regex will be logged and ignored.
"""
_LOGGER = get_logger(__name__)
NO_SUCH_FILE = 2
START_STRING = "^"
END_STRING = "$"
EMPTY_REGEX = START_STRING + END_STRING
PASS_THROUGH_REGEX_STRING = "^\.[\*\+]?\s.*$|^.*?\s\.[\*\+]|^\.[\*\+]$" # matches single .*, .+ strings
# as well as strings with .* or .+ preceded or followed by whitespace.
def __init__(self, whitelist_config_path, pass_through_allowed):
self.whitelist_config_path = whitelist_config_path
self.pass_through_allowed = pass_through_allowed
self.pass_through_regex = re.compile(self.PASS_THROUGH_REGEX_STRING)
def get_regex_list(self):
"""
Reads whitelist configuration file and returns a single string with compound regex.
:return: regex string used to test if metric is whitelisted
"""
try:
return self._get_whitelisted_names_from_file(self.whitelist_config_path)
except IOError as e:
if e.errno is self.NO_SUCH_FILE:
self._create_whitelist_file(self.whitelist_config_path)
else:
self._LOGGER.warning("Could not open whitelist file '" + self.whitelist_config_path + "'. Reason: " + str(e))
return [self.EMPTY_REGEX]
def _get_whitelisted_names_from_file(self, whitelist_path):
with open(whitelist_path) as whitelist_file:
return self._filter_valid_regexes(map(strip, whitelist_file))
def _create_whitelist_file(self, whitelist_path):
if not path.exists(whitelist_path):
self._LOGGER.warning("The whitelist configuration file was not detected at " +
whitelist_path + ". Creating new file.")
with open(whitelist_path, 'w') as whitelist_file:
whitelist_file.write("")
def _filter_valid_regexes(self, regex_list):
valid_regexes = [self._decorate_regex_line(line) for line in regex_list if self._is_valid_regex(line)]
return valid_regexes or [self.EMPTY_REGEX]
def _is_valid_regex(self, regex_string):
try:
if self._is_allowed_regex(regex_string):
re.compile(self._decorate_regex_line(regex_string))
return True
return False
except Exception as e:
self._LOGGER.warning("The whitelist rule: '{}' is invalid, reason: {}".format(str(regex_string), str(e.message)))
return False
def _is_allowed_regex(self, regex_string):
if self.pass_through_allowed:
return True
if self.pass_through_regex.match(regex_string):
self._LOGGER.warning("The unsafe whitelist rule: '{}' was disabled. "
"Revisit the rule or change {} option in the plugin configuration.".format(regex_string, ConfigReader.PASS_THROUGH_CONFIG_KEY))
return False
return True
def _decorate_regex_line(self, line):
return self.START_STRING + str(line).strip() + self.END_STRING
class BlockedMetricLogger(object):
"""
The BlockedMetricLoger maintains a separate log of metrics that are rejected by the whitelist.
The log will be recreated on plugin startup to ensure that it contains the most recent metrics.
"""
_LOGGER = get_logger(__name__)
BLOCKED_LOG_HEADER = "# This file is automatically generated - do not modify this file.\
\n# Use this file to find metrics to be added to the whitelist file instead.\n"
def __init__(self, log_path):
self._log_path = log_path
self._lock = Lock()
self._create_log()
def _create_log(self):
try:
with self._lock:
with open(self._log_path, 'w') as blocked_file:
blocked_file.write(self.BLOCKED_LOG_HEADER)
except IOError as e:
self._LOGGER.warning("Could not create list of blocked metrics '" + self._log_path +
"'. Reason: " + str(e))
def log_metric(self, metric_name):
try:
with self._lock:
with open(self._log_path, 'a') as blocked_file:
blocked_file.write(metric_name + "\n")
except IOError as e:
self._LOGGER.warning("Could not update list of blocked metrics '" + self._log_path +
"' with metric: '" + metric_name + "'. Reason: " + str(e))
class Whitelist(object):
"""
The Whitelist is responsible for testing whether a metric should be published or not.
Whitelist object will run regex test against each unique metric only once, after this a cached result will be used.
Blocked metrics are also automatically written to a separate log file.
"""
_LOGGER = get_logger(__name__)
def __init__(self, whitelist_regex_list, blocked_metric_log_path):
self.blocked_metric_log = BlockedMetricLogger(blocked_metric_log_path)
self._whitelist_regex = re.compile("|".join(whitelist_regex_list))
self._allowed_metrics = {}
def is_whitelisted(self, metric_key):
"""
Checks whether metric should be emitted or not. All unique metrics that are blocked will also be logged.
:param metric_key: string describing all parts that make the actual name of a collectd metric
:return: True if test is positive, False otherwise.
"""
if metric_key not in self._allowed_metrics:
if self._whitelist_regex.match(metric_key):
self._allowed_metrics[metric_key] = True
else:
self._allowed_metrics[metric_key] = False
self.blocked_metric_log.log_metric(metric_key)
return self._allowed_metrics[metric_key]
| 43.048611 | 160 | 0.659461 |
ace17cc8e712271c355325bb492b2056fa4aca1f | 46,261 | py | Python | drf_spectacular/plumbing.py | quodlibetor/drf-spectacular | fffae4dc07294293decdf1e40463af47a194fa05 | [
"BSD-3-Clause"
] | null | null | null | drf_spectacular/plumbing.py | quodlibetor/drf-spectacular | fffae4dc07294293decdf1e40463af47a194fa05 | [
"BSD-3-Clause"
] | null | null | null | drf_spectacular/plumbing.py | quodlibetor/drf-spectacular | fffae4dc07294293decdf1e40463af47a194fa05 | [
"BSD-3-Clause"
] | null | null | null | import collections
import functools
import hashlib
import inspect
import json
import re
import sys
import types
import typing
import urllib.parse
from abc import ABCMeta
from collections import OrderedDict, defaultdict
from decimal import Decimal
from enum import Enum
from typing import Any, DefaultDict, Generic, List, Optional, Tuple, Type, TypeVar, Union
import inflection
import uritemplate
from django.apps import apps
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields.related_descriptors import (
ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor,
ReverseOneToOneDescriptor,
)
from django.db.models.fields.reverse_related import ForeignObjectRel
from django.db.models.sql.query import Query
from django.urls.converters import get_converters
from django.urls.resolvers import ( # type: ignore[attr-defined]
_PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver,
get_resolver,
)
from django.utils.functional import Promise, cached_property
from django.utils.module_loading import import_string
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions, fields, mixins, serializers, versioning
from rest_framework.compat import unicode_http_header
from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory
from rest_framework.utils.mediatypes import _MediaType
from uritemplate import URITemplate
from drf_spectacular.drainage import Literal, _TypedDictMeta, cache, error, warn
from drf_spectacular.settings import spectacular_settings
from drf_spectacular.types import (
DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes,
)
from drf_spectacular.utils import OpenApiParameter
try:
from django.db.models.enums import Choices # only available in Django>3
except ImportError:
class Choices: # type: ignore
pass
# types.UnionType was added in Python 3.10 for new PEP 604 pipe union syntax
if hasattr(types, 'UnionType'):
UNION_TYPES: Tuple[Any, ...] = (typing.Union, types.UnionType) # type: ignore
else:
UNION_TYPES = (typing.Union,)
if sys.version_info >= (3, 8):
CACHED_PROPERTY_FUNCS = (functools.cached_property, cached_property) # type: ignore
else:
CACHED_PROPERTY_FUNCS = (cached_property,) # type: ignore
T = TypeVar('T')
class UnableToProceedError(Exception):
pass
def get_class(obj) -> type:
return obj if inspect.isclass(obj) else obj.__class__
def force_instance(serializer_or_field):
if not inspect.isclass(serializer_or_field):
return serializer_or_field
elif issubclass(serializer_or_field, (serializers.BaseSerializer, fields.Field)):
return serializer_or_field()
else:
return serializer_or_field
def is_serializer(obj) -> bool:
from drf_spectacular.serializers import OpenApiSerializerExtension
return (
isinstance(force_instance(obj), serializers.BaseSerializer)
or bool(OpenApiSerializerExtension.get_match(obj))
)
def is_list_serializer(obj) -> bool:
return isinstance(force_instance(obj), serializers.ListSerializer)
def is_basic_serializer(obj) -> bool:
return is_serializer(obj) and not is_list_serializer(obj)
def is_field(obj):
# make sure obj is a serializer field and nothing else.
# guard against serializers because BaseSerializer(Field)
return isinstance(force_instance(obj), fields.Field) and not is_serializer(obj)
def is_basic_type(obj, allow_none=True):
if not isinstance(obj, collections.abc.Hashable):
return False
if not allow_none and (obj is None or obj is OpenApiTypes.NONE):
return False
return obj in get_openapi_type_mapping() or obj in PYTHON_TYPE_MAPPING
def is_patched_serializer(serializer, direction):
return bool(
spectacular_settings.COMPONENT_SPLIT_PATCH
and serializer.partial
and not serializer.read_only
and not (spectacular_settings.COMPONENT_SPLIT_REQUEST and direction == 'response')
)
def is_trivial_string_variation(a: str, b: str):
a = (a or '').strip().lower().replace(' ', '_').replace('-', '_')
b = (b or '').strip().lower().replace(' ', '_').replace('-', '_')
return a == b
def assert_basic_serializer(serializer):
assert is_basic_serializer(serializer), (
f'internal assumption violated because we expected a basic serializer here and '
f'instead got a "{serializer}". This may be the result of another app doing '
f'some unexpected magic or an invalid internal call. Feel free to report this '
f'as a bug at https://github.com/tfranzel/drf-spectacular/issues'
)
@cache
def get_lib_doc_excludes():
# do not import on package level due to potential import recursion when loading
# extensions as recommended: USER's settings.py -> USER EXTENSIONS -> extensions.py
# -> plumbing.py -> DRF views -> DRF DefaultSchema -> openapi.py - plumbing.py -> Loop
from rest_framework import generics, views, viewsets
return [
object,
dict,
views.APIView,
*[getattr(serializers, c) for c in dir(serializers) if c.endswith('Serializer')],
*[getattr(viewsets, c) for c in dir(viewsets) if c.endswith('ViewSet')],
*[getattr(generics, c) for c in dir(generics) if c.endswith('APIView')],
*[getattr(mixins, c) for c in dir(mixins) if c.endswith('Mixin')],
]
def get_view_model(view, emit_warnings=True):
"""
obtain model from view via view's queryset. try safer view attribute first
before going through get_queryset(), which may perform arbitrary operations.
"""
model = getattr(getattr(view, 'queryset', None), 'model', None)
if model is not None:
return model
try:
return view.get_queryset().model
except Exception as exc:
if emit_warnings:
warn(
f'Failed to obtain model through view\'s queryset due to raised exception. '
f'Prevent this either by setting "queryset = Model.objects.none()" on the '
f'view, checking for "getattr(self, "swagger_fake_view", False)" in '
f'get_queryset() or by simply using @extend_schema. (Exception: {exc})'
)
def get_doc(obj):
""" get doc string with fallback on obj's base classes (ignoring DRF documentation). """
def post_cleanup(doc: str):
# also clean up trailing whitespace for each line
return '\n'.join(line.rstrip() for line in doc.rstrip().split('\n'))
if not inspect.isclass(obj):
return post_cleanup(inspect.getdoc(obj) or '')
def safe_index(lst, item):
try:
return lst.index(item)
except ValueError:
return float("inf")
lib_barrier = min(
safe_index(obj.__mro__, c) for c in spectacular_settings.GET_LIB_DOC_EXCLUDES()
)
for cls in obj.__mro__[:lib_barrier]:
if cls.__doc__:
return post_cleanup(inspect.cleandoc(cls.__doc__))
return ''
def get_type_hints(obj):
""" unpack wrapped partial object and use actual func object """
if isinstance(obj, functools.partial):
obj = obj.func
return typing.get_type_hints(obj)
@cache
def get_openapi_type_mapping():
return {
**OPENAPI_TYPE_MAPPING,
OpenApiTypes.OBJECT: build_generic_type(),
}
def build_generic_type():
if spectacular_settings.GENERIC_ADDITIONAL_PROPERTIES is None:
return {'type': 'object'}
elif spectacular_settings.GENERIC_ADDITIONAL_PROPERTIES == 'bool':
return {'type': 'object', 'additionalProperties': True}
else:
return {'type': 'object', 'additionalProperties': {}}
def build_basic_type(obj):
"""
resolve either enum or actual type and yield schema template for modification
"""
openapi_type_mapping = get_openapi_type_mapping()
if obj is None or type(obj) is None or obj is OpenApiTypes.NONE:
return None
elif obj in openapi_type_mapping:
return dict(openapi_type_mapping[obj])
elif obj in PYTHON_TYPE_MAPPING:
return dict(openapi_type_mapping[PYTHON_TYPE_MAPPING[obj]])
else:
warn(f'could not resolve type for "{obj}". defaulting to "string"')
return dict(openapi_type_mapping[OpenApiTypes.STR])
def build_array_type(schema, min_length=None, max_length=None):
schema = {'type': 'array', 'items': schema}
if min_length is not None:
schema['minLength'] = min_length
if max_length is not None:
schema['maxLength'] = max_length
return schema
def build_object_type(
properties=None,
required=None,
description=None,
**kwargs
):
schema = {'type': 'object'}
if description:
schema['description'] = description.strip()
if properties:
schema['properties'] = properties
if 'additionalProperties' in kwargs:
schema['additionalProperties'] = kwargs.pop('additionalProperties')
if required:
schema['required'] = sorted(required)
schema.update(kwargs)
return schema
def build_media_type_object(schema, examples=None):
media_type_object = {'schema': schema}
if examples:
media_type_object['examples'] = examples
return media_type_object
def build_examples_list(examples):
schema = {}
for example in examples:
normalized_name = inflection.camelize(example.name.replace(' ', '_'))
sub_schema = {}
if example.value:
sub_schema['value'] = example.value
if example.external_value:
sub_schema['externalValue'] = example.external_value
if example.summary:
sub_schema['summary'] = example.summary
elif normalized_name != example.name:
sub_schema['summary'] = example.name
if example.description:
sub_schema['description'] = example.description
schema[normalized_name] = sub_schema
return schema
def build_parameter_type(
name,
schema,
location,
required=False,
description=None,
enum=None,
deprecated=False,
explode=None,
style=None,
default=None,
allow_blank=True,
examples=None,
extensions=None,
):
irrelevant_field_meta = ['readOnly', 'writeOnly']
if location == OpenApiParameter.PATH:
irrelevant_field_meta += ['nullable', 'default']
schema = {
'in': location,
'name': name,
'schema': {k: v for k, v in schema.items() if k not in irrelevant_field_meta},
}
if description:
schema['description'] = description
if required or location == 'path':
schema['required'] = True
if deprecated:
schema['deprecated'] = True
if explode is not None:
schema['explode'] = explode
if style is not None:
schema['style'] = style
if enum:
schema['schema']['enum'] = sorted(enum)
if default is not None and 'default' not in irrelevant_field_meta:
schema['schema']['default'] = default
if not allow_blank and schema['schema'].get('type') == 'string':
schema['schema']['minLength'] = schema['schema'].get('minLength', 1)
if examples:
schema['examples'] = examples
if extensions:
schema.update(sanitize_specification_extensions(extensions))
return schema
def build_choice_field(field):
choices = list(OrderedDict.fromkeys(field.choices)) # preserve order and remove duplicates
if all(isinstance(choice, bool) for choice in choices):
type = 'boolean'
elif all(isinstance(choice, int) for choice in choices):
type = 'integer'
elif all(isinstance(choice, (int, float, Decimal)) for choice in choices): # `number` includes `integer`
# Ref: https://tools.ietf.org/html/draft-wright-json-schema-validation-00#section-5.21
type = 'number'
elif all(isinstance(choice, str) for choice in choices):
type = 'string'
else:
type = None
if field.allow_blank:
choices.append('')
if field.allow_null:
choices.append(None)
schema = {
# The value of `enum` keyword MUST be an array and SHOULD be unique.
# Ref: https://tools.ietf.org/html/draft-wright-json-schema-validation-00#section-5.20
'enum': choices
}
# If We figured out `type` then and only then we should set it. It must be a string.
# Ref: https://swagger.io/docs/specification/data-models/data-types/#mixed-type
# It is optional but it can not be null.
# Ref: https://tools.ietf.org/html/draft-wright-json-schema-validation-00#section-5.21
if type:
schema['type'] = type
return schema
def build_bearer_security_scheme_object(header_name, token_prefix, bearer_format=None):
""" Either build a bearer scheme or a fallback due to OpenAPI 3.0.3 limitations """
# normalize Django header quirks
if header_name.startswith('HTTP_'):
header_name = header_name[5:]
header_name = header_name.replace('_', '-').capitalize()
if token_prefix == 'Bearer' and header_name == 'Authorization':
return {
'type': 'http',
'scheme': 'bearer',
**({'bearerFormat': bearer_format} if bearer_format else {}),
}
else:
return {
'type': 'apiKey',
'in': 'header',
'name': header_name,
'description': _(
'Token-based authentication with required prefix "%s"'
) % token_prefix
}
def build_root_object(paths, components, version):
settings = spectacular_settings
if settings.VERSION and version:
version = f'{settings.VERSION} ({version})'
else:
version = settings.VERSION or version or ''
root = {
'openapi': '3.0.3',
'info': {
'title': settings.TITLE,
'version': version,
**sanitize_specification_extensions(settings.EXTENSIONS_INFO),
},
'paths': {**paths, **settings.APPEND_PATHS},
'components': components,
**sanitize_specification_extensions(settings.EXTENSIONS_ROOT),
}
if settings.DESCRIPTION:
root['info']['description'] = settings.DESCRIPTION
if settings.TOS:
root['info']['termsOfService'] = settings.TOS
if settings.CONTACT:
root['info']['contact'] = settings.CONTACT
if settings.LICENSE:
root['info']['license'] = settings.LICENSE
if settings.SERVERS:
root['servers'] = settings.SERVERS
if settings.TAGS:
root['tags'] = settings.TAGS
if settings.EXTERNAL_DOCS:
root['externalDocs'] = settings.EXTERNAL_DOCS
return root
def safe_ref(schema):
"""
ensure that $ref has its own context and does not remove potential sibling
entries when $ref is substituted.
"""
if '$ref' in schema and len(schema) > 1:
return {'allOf': [{'$ref': schema.pop('$ref')}], **schema}
return schema
def append_meta(schema, meta):
return safe_ref({**schema, **meta})
def _follow_field_source(model, path: List[str]):
"""
navigate through root model via given navigation path. supports forward/reverse relations.
"""
field_or_property = getattr(model, path[0], None)
if len(path) == 1:
# end of traversal
if isinstance(field_or_property, property):
return field_or_property.fget
elif isinstance(field_or_property, CACHED_PROPERTY_FUNCS):
return field_or_property.func
elif callable(field_or_property):
return field_or_property
elif isinstance(field_or_property, ManyToManyDescriptor):
if field_or_property.reverse:
return field_or_property.rel.target_field # m2m reverse
else:
return field_or_property.field.target_field # m2m forward
elif isinstance(field_or_property, ReverseOneToOneDescriptor):
return field_or_property.related.target_field # o2o reverse
elif isinstance(field_or_property, ReverseManyToOneDescriptor):
return field_or_property.rel.target_field # type: ignore # foreign reverse
elif isinstance(field_or_property, ForwardManyToOneDescriptor):
return field_or_property.field.target_field # type: ignore # o2o & foreign forward
else:
field = model._meta.get_field(path[0])
if isinstance(field, ForeignObjectRel):
# case only occurs when relations are traversed in reverse and
# not via the related_name (default: X_set) but the model name.
return field.target_field
else:
return field
else:
if (
isinstance(field_or_property, (property,) + CACHED_PROPERTY_FUNCS)
or callable(field_or_property)
):
if isinstance(field_or_property, property):
target_model = _follow_return_type(field_or_property.fget)
elif isinstance(field_or_property, CACHED_PROPERTY_FUNCS):
target_model = _follow_return_type(field_or_property.func)
else:
target_model = _follow_return_type(field_or_property)
if not target_model:
raise UnableToProceedError(
f'could not follow field source through intermediate property "{path[0]}" '
f'on model {model}. Please add a type hint on the model\'s property/function '
f'to enable traversal of the source path "{".".join(path)}".'
)
return _follow_field_source(target_model, path[1:])
else:
target_model = model._meta.get_field(path[0]).related_model
return _follow_field_source(target_model, path[1:])
def _follow_return_type(a_callable):
target_type = get_type_hints(a_callable).get('return')
if target_type is None:
return target_type
origin, args = _get_type_hint_origin(target_type)
if origin in UNION_TYPES:
type_args = [arg for arg in args if arg is not type(None)] # noqa: E721
if len(type_args) > 1:
warn(
f'could not traverse Union type, because we don\'t know which type to choose '
f'from {type_args}. Consider terminating "source" on a custom property '
f'that indicates the expected Optional/Union type. Defaulting to "string"'
)
return target_type
# Optional:
return type_args[0]
return target_type
def follow_field_source(model, path, emit_warnings=True):
"""
a model traversal chain "foreignkey.foreignkey.value" can either end with an actual model field
instance "value" or a model property function named "value". differentiate the cases.
:return: models.Field or function object
"""
try:
return _follow_field_source(model, path)
except UnableToProceedError as e:
if emit_warnings:
warn(e)
except Exception as exc:
if emit_warnings:
warn(
f'could not resolve field on model {model} with path "{".".join(path)}". '
f'This is likely a custom field that does some unknown magic. Maybe '
f'consider annotating the field/property? Defaulting to "string". (Exception: {exc})'
)
def dummy_property(obj) -> str:
pass # pragma: no cover
return dummy_property
def follow_model_field_lookup(model, lookup):
"""
Follow a model lookup `foreignkey__foreignkey__field` in the same
way that Django QuerySet.filter() does, returning the final models.Field.
"""
query = Query(model)
lookup_splitted = lookup.split(LOOKUP_SEP)
_, field, _, _ = query.names_to_path(lookup_splitted, query.get_meta())
return field
def alpha_operation_sorter(endpoint):
""" sort endpoints first alphanumerically by path, then by method order """
path, path_regex, method, callback = endpoint
method_priority = {
'GET': 0,
'POST': 1,
'PUT': 2,
'PATCH': 3,
'DELETE': 4
}.get(method, 5)
# Sort foo{arg} after foo/, but before foo/bar
if path.endswith('/'):
path = path[:-1] + ' '
path = path.replace('{', '!')
return path, method_priority
class ResolvedComponent:
SCHEMA = 'schemas'
SECURITY_SCHEMA = 'securitySchemes'
def __init__(self, name, type, schema=None, object=None):
self.name = name
self.type = type
self.schema = schema
self.object = object
def __bool__(self):
return bool(self.name and self.type and self.object)
@property
def key(self):
return self.name, self.type
@property
def ref(self) -> dict:
assert self.__bool__()
return {'$ref': f'#/components/{self.type}/{self.name}'}
class ComponentRegistry:
def __init__(self):
self._components = {}
def register(self, component: ResolvedComponent):
if component in self:
warn(
f'trying to re-register a {component.type} component with name '
f'{self._components[component.key].name}. this might lead to '
f'a incorrect schema. Look out for reused names'
)
self._components[component.key] = component
def register_on_missing(self, component: ResolvedComponent):
if component not in self:
self._components[component.key] = component
def __contains__(self, component):
if component.key not in self._components:
return False
query_obj = component.object
registry_obj = self._components[component.key].object
query_class = query_obj if inspect.isclass(query_obj) else query_obj.__class__
registry_class = query_obj if inspect.isclass(registry_obj) else registry_obj.__class__
if query_class != registry_class:
warn(
f'Encountered 2 components with identical names "{component.name}" and '
f'different classes {query_class} and {registry_class}. This will very '
f'likely result in an incorrect schema. Try renaming one.'
)
return True
def __getitem__(self, key):
if isinstance(key, ResolvedComponent):
key = key.key
return self._components[key]
def __delitem__(self, key):
if isinstance(key, ResolvedComponent):
key = key.key
del self._components[key]
def build(self, extra_components) -> dict:
output: DefaultDict[str, dict] = defaultdict(dict)
# build tree from flat registry
for component in self._components.values():
output[component.type][component.name] = component.schema
# add/override extra components
for extra_type, extra_component_dict in extra_components.items():
for component_name, component_schema in extra_component_dict.items():
output[extra_type][component_name] = component_schema
# sort by component type then by name
return {
type: {name: output[type][name] for name in sorted(output[type].keys())}
for type in sorted(output.keys())
}
class OpenApiGeneratorExtension(Generic[T], metaclass=ABCMeta):
_registry: List[T] = []
target_class: Union[None, str, Type[object]] = None
match_subclasses = False
priority = 0
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls._registry.append(cls)
def __init__(self, target):
self.target = target
@classmethod
def _load_class(cls):
try:
cls.target_class = import_string(cls.target_class)
except ImportError:
installed_apps = apps.app_configs.keys()
if any(cls.target_class.startswith(app + '.') for app in installed_apps):
warn(
f'registered extensions {cls.__name__} for "{cls.target_class}" '
f'has an installed app but target class was not found.'
)
cls.target_class = None
@classmethod
def _matches(cls, target) -> bool:
if isinstance(cls.target_class, str):
cls._load_class()
if cls.target_class is None:
return False # app not installed
elif cls.match_subclasses:
return issubclass(get_class(target), cls.target_class) # type: ignore
else:
return get_class(target) == cls.target_class
@classmethod
def get_match(cls, target) -> Optional[T]:
for extension in sorted(cls._registry, key=lambda e: e.priority, reverse=True):
if extension._matches(target):
return extension(target)
return None
def deep_import_string(string):
""" augmented import from string, e.g. MODULE.CLASS/OBJECT.ATTRIBUTE """
try:
return import_string(string)
except ImportError:
pass
try:
*path, attr = string.split('.')
obj = import_string('.'.join(path))
return getattr(obj, attr)
except (ImportError, AttributeError):
pass
@cache
def load_enum_name_overrides():
overrides = {}
for name, choices in spectacular_settings.ENUM_NAME_OVERRIDES.items():
if isinstance(choices, str):
choices = deep_import_string(choices)
if not choices:
warn(
f'unable to load choice override for {name} from ENUM_NAME_OVERRIDES. '
f'please check module path string.'
)
continue
if inspect.isclass(choices) and issubclass(choices, Choices):
choices = choices.choices
if inspect.isclass(choices) and issubclass(choices, Enum):
choices = [c.value for c in choices]
normalized_choices = []
for choice in choices:
if isinstance(choice, str):
normalized_choices.append((choice, choice)) # simple choice list
elif isinstance(choice[1], (list, tuple)):
normalized_choices.extend(choice[1]) # categorized nested choices
else:
normalized_choices.append(choice) # normal 2-tuple form
overrides[list_hash(list(dict(normalized_choices).keys()))] = name
if len(spectacular_settings.ENUM_NAME_OVERRIDES) != len(overrides):
error(
'ENUM_NAME_OVERRIDES has duplication issues. Encountered multiple names '
'for the same choice set. Enum naming might be unexpected.'
)
return overrides
def list_hash(lst):
return hashlib.sha256(json.dumps(list(lst), sort_keys=True).encode()).hexdigest()
def anchor_pattern(pattern: str) -> str:
if not pattern.startswith('^'):
pattern = '^' + pattern
if not pattern.endswith('$'):
pattern = pattern + '$'
return pattern
def resolve_django_path_parameter(path_regex, variable, available_formats):
"""
convert django style path parameters to OpenAPI parameters.
"""
registered_converters = get_converters()
for match in _PATH_PARAMETER_COMPONENT_RE.finditer(path_regex):
converter, parameter = match.group('converter'), match.group('parameter')
enum_values = None
if api_settings.SCHEMA_COERCE_PATH_PK and parameter == 'pk':
parameter = 'id'
elif spectacular_settings.SCHEMA_COERCE_PATH_PK_SUFFIX and parameter.endswith('_pk'):
parameter = f'{parameter[:-3]}_id'
if parameter != variable:
continue
# RE also matches untyped patterns (e.g. "<id>")
if not converter:
return None
# special handling for drf_format_suffix
if converter.startswith('drf_format_suffix_'):
explicit_formats = converter[len('drf_format_suffix_'):].split('_')
enum_values = [
f'.{suffix}' for suffix in explicit_formats if suffix in available_formats
]
converter = 'drf_format_suffix'
elif converter == 'drf_format_suffix':
enum_values = [f'.{suffix}' for suffix in available_formats]
if converter in spectacular_settings.PATH_CONVERTER_OVERRIDES:
override = spectacular_settings.PATH_CONVERTER_OVERRIDES[converter]
if is_basic_type(override):
schema = build_basic_type(override)
elif isinstance(override, dict):
schema = dict(override)
else:
warn(
f'Unable to use path converter override for "{converter}". '
f'Please refer to the documentation on how to use this.'
)
return None
elif converter in DJANGO_PATH_CONVERTER_MAPPING:
schema = build_basic_type(DJANGO_PATH_CONVERTER_MAPPING[converter])
elif converter in registered_converters:
# gracious fallback for custom converters that have no override specified.
schema = build_basic_type(OpenApiTypes.STR)
schema['pattern'] = anchor_pattern(registered_converters[converter].regex)
else:
error(f'Encountered path converter "{converter}" that is unknown to Django.')
return None
return build_parameter_type(
name=variable,
schema=schema,
location=OpenApiParameter.PATH,
enum=enum_values,
)
return None
def resolve_regex_path_parameter(path_regex, variable):
"""
convert regex path parameter to OpenAPI parameter, if pattern is
explicitly chosen and not the generic non-empty default '[^/.]+'.
"""
for parameter, pattern in analyze_named_regex_pattern(path_regex).items():
if api_settings.SCHEMA_COERCE_PATH_PK and parameter == 'pk':
parameter = 'id'
elif spectacular_settings.SCHEMA_COERCE_PATH_PK_SUFFIX and parameter.endswith('_pk'):
parameter = f'{parameter[:-3]}_id'
if parameter != variable:
continue
# do not use default catch-all pattern and defer to model resolution
if pattern == '[^/.]+':
return None
return build_parameter_type(
name=variable,
schema={
**build_basic_type(OpenApiTypes.STR),
'pattern': anchor_pattern(pattern),
},
location=OpenApiParameter.PATH,
)
return None
def is_versioning_supported(versioning_class):
return issubclass(versioning_class, (
versioning.URLPathVersioning,
versioning.NamespaceVersioning,
versioning.AcceptHeaderVersioning
))
def operation_matches_version(view, requested_version):
try:
version, _ = view.determine_version(view.request, **view.kwargs)
except exceptions.NotAcceptable:
return False
else:
return str(version) == str(requested_version)
def modify_for_versioning(patterns, method, path, view, requested_version):
assert view.versioning_class and view.request
assert requested_version
view.request.version = requested_version
if issubclass(view.versioning_class, versioning.URLPathVersioning):
version_param = view.versioning_class.version_param
# substitute version variable to emulate request
path = uritemplate.partial(path, var_dict={version_param: requested_version})
if isinstance(path, URITemplate):
path = path.uri
# emulate router behaviour by injecting substituted variable into view
view.kwargs[version_param] = requested_version
elif issubclass(view.versioning_class, versioning.NamespaceVersioning):
try:
view.request.resolver_match = get_resolver(
urlconf=detype_patterns(tuple(patterns)),
).resolve(path)
except Resolver404:
error(f"namespace versioning path resolution failed for {path}. Path will be ignored.")
elif issubclass(view.versioning_class, versioning.AcceptHeaderVersioning):
# Append the version into request accepted_media_type.
# e.g "application/json; version=1.0"
# To allow the AcceptHeaderVersioning negotiator going through.
if not hasattr(view.request, 'accepted_renderer'):
# Probably a mock request, content negotiation was not performed, so, we do it now.
negotiated = view.perform_content_negotiation(view.request)
view.request.accepted_renderer, view.request.accepted_media_type = negotiated
media_type = _MediaType(view.request.accepted_media_type)
view.request.accepted_media_type = (
f'{media_type.full_type}; {view.versioning_class.version_param}={requested_version}'
)
return path
def modify_media_types_for_versioning(view, media_types: List[str]) -> List[str]:
if (
not view.versioning_class
or not issubclass(view.versioning_class, versioning.AcceptHeaderVersioning)
):
return media_types
media_type = _MediaType(view.request.accepted_media_type)
version = media_type.params.get(view.versioning_class.version_param) # type: ignore
version = unicode_http_header(version)
if not version or version == view.versioning_class.default_version:
return media_types
return [
f'{media_type}; {view.versioning_class.version_param}={version}'
for media_type in media_types
]
def analyze_named_regex_pattern(path):
""" safely extract named groups and their pattern from given regex pattern """
result = {}
stack = 0
name_capture, name_buffer = False, ''
regex_capture, regex_buffer = False, ''
i = 0
while i < len(path):
# estimate state at position i
skip = False
if path[i] == '\\':
ff = 2
elif path[i:i + 4] == '(?P<':
skip = True
name_capture = True
ff = 4
elif path[i] in '(':
stack += 1
ff = 1
elif path[i] == '>' and name_capture:
assert name_buffer
name_capture = False
regex_capture = True
skip = True
ff = 1
elif path[i] in ')':
if regex_capture and not stack:
regex_capture = False
result[name_buffer] = regex_buffer
name_buffer, regex_buffer = '', ''
else:
stack -= 1
ff = 1
else:
ff = 1
# fill buffer based on state
if name_capture and not skip:
name_buffer += path[i:i + ff]
elif regex_capture and not skip:
regex_buffer += path[i:i + ff]
i += ff
assert not stack
return result
@cache
def detype_patterns(patterns):
"""Cache detyped patterns due to the expensive nature of rebuilding URLResolver."""
return tuple(detype_pattern(pattern) for pattern in patterns)
def detype_pattern(pattern):
"""
return an equivalent pattern that accepts arbitrary values for path parameters.
de-typing the path will ease determining a matching route without having properly
formatted dummy values for all path parameters.
"""
if isinstance(pattern, URLResolver):
return URLResolver(
pattern=detype_pattern(pattern.pattern),
urlconf_name=[detype_pattern(p) for p in pattern.url_patterns],
default_kwargs=pattern.default_kwargs,
app_name=pattern.app_name,
namespace=pattern.namespace,
)
elif isinstance(pattern, URLPattern):
return URLPattern(
pattern=detype_pattern(pattern.pattern),
callback=pattern.callback,
default_args=pattern.default_args,
name=pattern.name,
)
elif isinstance(pattern, RoutePattern):
return RoutePattern(
route=re.sub(r'<\w+:(\w+)>', r'<\1>', pattern._route),
name=pattern.name,
is_endpoint=pattern._is_endpoint
)
elif isinstance(pattern, RegexPattern):
detyped_regex = pattern._regex
for name, regex in analyze_named_regex_pattern(pattern._regex).items():
detyped_regex = detyped_regex.replace(
f'(?P<{name}>{regex})',
f'(?P<{name}>[^/]+)',
)
return RegexPattern(
regex=detyped_regex,
name=pattern.name,
is_endpoint=pattern._is_endpoint
)
else:
warn(f'unexpected pattern "{pattern}" encountered while simplifying urlpatterns.')
return pattern
def normalize_result_object(result):
""" resolve non-serializable objects like lazy translation strings and OrderedDict """
if isinstance(result, dict) or isinstance(result, OrderedDict):
return {k: normalize_result_object(v) for k, v in result.items()}
if isinstance(result, list) or isinstance(result, tuple):
return [normalize_result_object(v) for v in result]
if isinstance(result, Promise):
return str(result)
for base_type in [bool, int, float, str]:
if isinstance(result, base_type):
return base_type(result) # coerce basic sub types
return result
def sanitize_result_object(result):
# warn about and resolve operationId collisions with suffixes
operations = defaultdict(list)
for path, methods in result['paths'].items():
for method, operation in methods.items():
operations[operation['operationId']].append((path, method))
for operation_id, paths in operations.items():
if len(paths) == 1:
continue
warn(f'operationId "{operation_id}" has collisions {paths}. resolving with numeral suffixes.')
for idx, (path, method) in enumerate(sorted(paths)[1:], start=2):
suffix = str(idx) if spectacular_settings.CAMELIZE_NAMES else f'_{idx}'
result['paths'][path][method]['operationId'] += suffix
return result
def sanitize_specification_extensions(extensions):
# https://spec.openapis.org/oas/v3.0.3#specification-extensions
output = {}
for key, value in extensions.items():
if not re.match(r'^x-', key):
warn(f'invalid extension {key!r}. vendor extensions must start with "x-"')
else:
output[key] = value
return output
def camelize_operation(path, operation):
for path_variable in re.findall(r'\{(\w+)\}', path):
path = path.replace(
f'{{{path_variable}}}',
f'{{{inflection.camelize(path_variable, False)}}}'
)
for parameter in operation.get('parameters', []):
if parameter['in'] == OpenApiParameter.PATH:
parameter['name'] = inflection.camelize(parameter['name'], False)
operation['operationId'] = inflection.camelize(operation['operationId'], False)
return path, operation
def build_mock_request(method, path, view, original_request, **kwargs):
""" build a mocked request and use original request as reference if available """
request = getattr(APIRequestFactory(), method.lower())(path=path)
request = view.initialize_request(request)
if original_request:
request.user = original_request.user
request.auth = original_request.auth
# ignore headers related to authorization as it has been handled above.
# also ignore ACCEPT as the MIME type refers to SpectacularAPIView and the
# version (if available) has already been processed by SpectacularAPIView.
for name, value in original_request.META.items():
if not name.startswith('HTTP_'):
continue
if name in ['HTTP_ACCEPT', 'HTTP_COOKIE', 'HTTP_AUTHORIZATION']:
continue
request.META[name] = value
return request
def set_query_parameters(url, **kwargs) -> str:
""" deconstruct url, safely attach query parameters in kwargs, and serialize again """
scheme, netloc, path, params, query, fragment = urllib.parse.urlparse(url)
query = urllib.parse.parse_qs(query)
query.update({k: v for k, v in kwargs.items() if v is not None})
query = urllib.parse.urlencode(query, doseq=True)
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def get_relative_url(url: str) -> str:
scheme, netloc, path, params, query, fragment = urllib.parse.urlparse(url)
return urllib.parse.urlunparse(('', '', path, params, query, fragment))
def _get_type_hint_origin(hint):
""" graceful fallback for py 3.8 typing functionality """
if sys.version_info >= (3, 8):
return typing.get_origin(hint), typing.get_args(hint)
else:
origin = getattr(hint, '__origin__', None)
args = getattr(hint, '__args__', None)
origin = {
typing.List: list,
typing.Dict: dict,
typing.Tuple: tuple,
typing.Set: set,
typing.FrozenSet: frozenset
}.get(origin, origin)
return origin, args
def _resolve_typeddict(hint):
"""resolve required fields for TypedDicts if on 3.9 or above"""
required = None
if sys.version_info >= (3, 9):
required = [h for h in hint.__required_keys__]
return build_object_type(
properties={
k: resolve_type_hint(v) for k, v in get_type_hints(hint).items()
},
required=required,
description=get_doc(hint),
)
def resolve_type_hint(hint):
""" resolve return value type hints to schema """
origin, args = _get_type_hint_origin(hint)
if origin is None and is_basic_type(hint, allow_none=False):
return build_basic_type(hint)
elif origin is None and inspect.isclass(hint) and issubclass(hint, tuple):
# a convoluted way to catch NamedTuple. suggestions welcome.
if get_type_hints(hint):
properties = {k: resolve_type_hint(v) for k, v in get_type_hints(hint).items()}
else:
properties = {k: build_basic_type(OpenApiTypes.ANY) for k in hint._fields}
return build_object_type(properties=properties, required=properties.keys())
elif origin is list or hint is list:
return build_array_type(
resolve_type_hint(args[0]) if args else build_basic_type(OpenApiTypes.ANY)
)
elif origin is tuple:
return build_array_type(
schema=build_basic_type(args[0]),
max_length=len(args),
min_length=len(args),
)
elif origin is dict or origin is defaultdict or origin is OrderedDict:
schema = build_basic_type(OpenApiTypes.OBJECT)
if args and args[1] is not typing.Any:
schema['additionalProperties'] = resolve_type_hint(args[1])
return schema
elif origin is set:
return build_array_type(resolve_type_hint(args[0]))
elif origin is frozenset:
return build_array_type(resolve_type_hint(args[0]))
elif origin is Literal:
# Literal only works for python >= 3.8 despite typing_extensions, because it
# behaves slightly different w.r.t. __origin__
schema = {'enum': list(args)}
if all(type(args[0]) is type(choice) for choice in args):
schema.update(build_basic_type(type(args[0])))
return schema
elif inspect.isclass(hint) and issubclass(hint, Enum):
schema = {'enum': [item.value for item in hint]}
mixin_base_types = [t for t in hint.__mro__ if is_basic_type(t)]
if mixin_base_types:
schema.update(build_basic_type(mixin_base_types[0]))
return schema
elif isinstance(hint, _TypedDictMeta):
return _resolve_typeddict(hint)
elif origin in UNION_TYPES:
type_args = [arg for arg in args if arg is not type(None)] # noqa: E721
if len(type_args) > 1:
schema = {'oneOf': [resolve_type_hint(arg) for arg in type_args]}
else:
schema = resolve_type_hint(type_args[0])
if type(None) in args:
schema['nullable'] = True
return schema
elif origin is collections.abc.Iterable:
return build_array_type(resolve_type_hint(args[0]))
else:
raise UnableToProceedError()
def whitelisted(obj: object, classes: List[Type[object]], exact=False):
if not classes:
return True
if exact:
return obj.__class__ in classes
else:
return isinstance(obj, tuple(classes))
def build_mocked_view(method: str, path: str, extend_schema_decorator, registry):
from rest_framework import parsers, views
@extend_schema_decorator
class TmpView(views.APIView):
parser_classes = [parsers.JSONParser]
# emulate what Generator would do to setup schema generation.
view_callable = TmpView.as_view()
view = view_callable.cls() # type: ignore
view.request = spectacular_settings.GET_MOCK_REQUEST(
method.upper(), path, view, None
)
view.kwargs = {}
# prepare AutoSchema with "init" values as if get_operation() was called
view.schema.registry = registry
view.schema.path = path
view.schema.path_regex = path
view.schema.path_prefix = ''
view.schema.method = method.upper()
return view
def build_listed_example_value(value: Any, paginator, direction):
if not paginator or direction == 'request':
return [value]
sentinel = object()
schema = paginator.get_paginated_response_schema(sentinel)
try:
return {
field_name: value if field_schema is sentinel else field_schema['example']
for field_name, field_schema in schema['properties'].items()
}
except (AttributeError, KeyError):
warn(
f"OpenApiExample could not be paginated because {paginator.__class__} either "
f"has an unknown schema structure or the individual pagination fields did not "
f"provide example values themselves. Using the plain example value as fallback."
)
return value
| 36.56996 | 109 | 0.651759 |
ace17df2abedabec38b72a0de077539dd3341634 | 72 | py | Python | coding_test_1.py | chae-heechan/Programmers_Python_Algorithm_Study | c61af0b1b97d790e2332581eb0b7da42c3e510fa | [
"MIT"
] | null | null | null | coding_test_1.py | chae-heechan/Programmers_Python_Algorithm_Study | c61af0b1b97d790e2332581eb0b7da42c3e510fa | [
"MIT"
] | null | null | null | coding_test_1.py | chae-heechan/Programmers_Python_Algorithm_Study | c61af0b1b97d790e2332581eb0b7da42c3e510fa | [
"MIT"
] | null | null | null | def solution():
a = 1
if 0 < a < 2:
print(a)
solution() | 12 | 17 | 0.444444 |
ace17e3aef9f2456555971bce17999a7f4b0a783 | 1,724 | py | Python | source/polarity_scorer_nltk.py | johnnytorres/cwcontribution | eb574fcc9de5007936064ec6235af65e6470f8d7 | [
"MIT"
] | null | null | null | source/polarity_scorer_nltk.py | johnnytorres/cwcontribution | eb574fcc9de5007936064ec6235af65e6470f8d7 | [
"MIT"
] | null | null | null | source/polarity_scorer_nltk.py | johnnytorres/cwcontribution | eb574fcc9de5007936064ec6235af65e6470f8d7 | [
"MIT"
] | null | null | null |
import csv
import os
from datetime import datetime
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk import tokenize
def analyze_sentiment(opinions_file):
sid = SentimentIntensityAnalyzer()
sent_file = os.path.splitext(opinions_file)[0] + '_sentiment.csv'
with open(sent_file, 'w') as f:
writer = csv.writer(f)
writer.writerow(["page_id","page_title","contributor","timestamp", 'topic', 'compound', 'neg','neu','pos', 'sentence'])
with open(opinions_file, 'r') as f:
csvreader = csv.reader(f)
csvreader.next()
for row in csvreader:
page_id = row[0]
page_title = row[1]
contributor = row[2]
timestamp = datetime.fromtimestamp(float(row[3]))
comment = row[4]
topic = row[5]
opinion = row[6].decode('utf-8')
sentences = tokenize.sent_tokenize(opinion)
output_rows = []
for sentence in sentences:
ss = sid.polarity_scores(sentence)
stats = ['{0}'.format(ss[k]) for k in sorted(ss)]
output = [page_id, page_title, contributor, timestamp, topic]
output.extend(stats)
output.append(sentence.encode('utf-8'))
output_rows.append(output)
#message = ','.join(stats)
#print(message)
with open(sent_file, 'a') as f:
writer = csv.writer(f,quoting=csv.QUOTE_ALL)
writer.writerows(output_rows)
if __name__ == '__main__':
opinions_file = '/Users/john/projects/source/dataset/enwiki-discussions/rafael_correa_opinions.csv'
analyze_sentiment(opinions_file) | 34.48 | 127 | 0.598028 |
ace17e77a1d1316cf5cb5829eeda6a043ddbacf3 | 6,405 | py | Python | contrib/seeds/makeseeds.py | bluejaytodd/bitcoincandy_CVE | 98ba0e2697ff9de280c52c06048443b90aa4ec33 | [
"MIT"
] | 32 | 2018-02-11T13:24:51.000Z | 2021-10-01T17:46:38.000Z | contrib/seeds/makeseeds.py | bluejaytodd/bitcoincandy_CVE | 98ba0e2697ff9de280c52c06048443b90aa4ec33 | [
"MIT"
] | 12 | 2018-02-16T07:29:38.000Z | 2021-11-17T16:52:15.000Z | contrib/seeds/makeseeds.py | bluejaytodd/bitcoincandy_CVE | 98ba0e2697ff9de280c52c06048443b90aa4ec33 | [
"MIT"
] | 11 | 2018-02-19T08:37:42.000Z | 2019-03-03T14:02:33.000Z | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS = 512
MAX_SEEDS_PER_ASN = 2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(
r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(
r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
# Used to only select nodes with a user agent string compatible with the
# BCH/UAHF specification.
PATTERN_AGENT = re.compile(
r"^(/BitcoinABC:0.16.(\d+)\(\S+\)/|/BitcoinXT:0.11\(\S+\)/|/BUCash:1.1.(\d+)\(\S+\)/)")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
# All BCH clients apart BU and Classic has a space in the useragent string
if len(sline) == 13:
sline[11] = sline[11] + sline[12]
if len(sline) == 14:
sline[11] = sline[11] + sline[12] + sline[13]
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0, 4):
if int(m.group(i + 2)) < 0 or int(m.group(i + 2)) > 255:
return None
ip = ip + (int(m.group(i + 2)) << (8 * (3 - i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key, value) in list(hist.items()) if len(value) == 1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text()
for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write(
'ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
# TODO set it back to 50% once nodes will have enough uptime.
ips = [ip for ip in ips if ip['uptime'] > 0]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x:
(x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
# TODO during this bootstrap phase we need any BCH full nodes
# active on the network, uncomment the following line once the
# BCH chain will be consolidated.
# ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.51269 | 174 | 0.568931 |
ace181f563efe7c6b257115a33b4ee7167c3b76e | 245 | py | Python | examples/test.py | possoumous/Watchers | 51f22853f7bd4b3021f318107aa29501a3f45e5f | [
"MIT"
] | null | null | null | examples/test.py | possoumous/Watchers | 51f22853f7bd4b3021f318107aa29501a3f45e5f | [
"MIT"
] | null | null | null | examples/test.py | possoumous/Watchers | 51f22853f7bd4b3021f318107aa29501a3f45e5f | [
"MIT"
] | null | null | null | from pyvirtualdisplay import Display
from selenium import webdriver
display = Display(visible=0, size=(800, 600))
display.start()
driver = webdriver.Chrome()
driver.get('https://stocktwits.com/symbol/CYTR?q=cytr')
print driver.title
| 20.416667 | 55 | 0.746939 |
ace18241aafef53c9995dbd70d253f960fff064e | 8,512 | py | Python | payu/models/fms.py | marshallward/payu | bb72250baa6411cfc48053a26dec5a2f1c4c9630 | [
"Apache-2.0"
] | 5 | 2016-05-12T03:17:32.000Z | 2018-06-08T23:50:41.000Z | payu/models/fms.py | marshallward/payu | bb72250baa6411cfc48053a26dec5a2f1c4c9630 | [
"Apache-2.0"
] | 117 | 2015-01-13T00:51:48.000Z | 2021-04-07T15:53:20.000Z | payu/models/fms.py | marshallward/payu | bb72250baa6411cfc48053a26dec5a2f1c4c9630 | [
"Apache-2.0"
] | 5 | 2015-02-27T05:20:38.000Z | 2019-01-29T05:16:45.000Z | """Driver interface to the FMS model framework.
:copyright: Copyright 2011 Marshall Ward, see AUTHORS for details
:license: Apache License, Version 2.0, see LICENSE for details
"""
from __future__ import print_function
from collections import defaultdict
import multiprocessing
import os
import resource as res
import shlex
import shutil
import subprocess as sp
import sys
from itertools import count
import fnmatch
from payu.models.model import Model
from payu import envmod
# There is a limit on the number of command line arguments in a forked
# MPI process. This applies only to mppnccombine-fast. The limit is higher
# than this, but mppnccombine-fast is very slow with large numbers of files
MPI_FORK_MAX_FILE_LIMIT = 1000
def cmdthread(cmd, cwd):
# This is run in a thread, so the GIL of python makes it sensible to
# capture the output from each process and print it out at the end so
# it doesn't get scrambled when collates are run in parallel
output = ''
returncode = None
try:
output = sp.check_output(shlex.split(cmd), cwd=cwd, stderr=sp.STDOUT)
except sp.CalledProcessError as e:
output = e.output
returncode = e.returncode
return returncode, output
class Fms(Model):
def __init__(self, expt, name, config):
# payu initialisation
super(Fms, self).__init__(expt, name, config)
def set_model_pathnames(self):
super(Fms, self).set_model_pathnames()
# Define local FMS directories
self.work_restart_path = os.path.join(self.work_path, 'RESTART')
self.work_input_path = os.path.join(self.work_path, 'INPUT')
self.work_init_path = self.work_input_path
@staticmethod
def get_uncollated_files(dir):
if not os.path.isdir(dir):
return []
# Generate collated file list and identify the first tile
tile_fnames = [f for f in os.listdir(dir)
if f[-4:].isdigit() and f[-8:-4] == '.nc.']
# print("dir: ",tile_fnames)
tile_fnames.sort()
return tile_fnames
def archive(self, **kwargs):
super(Fms, self).archive()
# Remove the 'INPUT' path
shutil.rmtree(self.work_input_path, ignore_errors=True)
# Archive restart files before processing model output
if os.path.isdir(self.restart_path):
os.rmdir(self.restart_path)
shutil.move(self.work_restart_path, self.restart_path)
def collate(self):
# Set the stacksize to be unlimited
res.setrlimit(res.RLIMIT_STACK, (res.RLIM_INFINITY, res.RLIM_INFINITY))
collate_config = self.expt.config.get('collate', {})
# The mpi flag implies using mppnccombine-fast
mpi = collate_config.get('mpi', False)
if mpi:
# Must use envmod to be able to load mpi modules for collation
envmod.setup()
self.expt.load_modules()
default_exe = 'mppnccombine-fast'
else:
default_exe = 'mppnccombine'
# Locate the FMS collation tool
# Check config for collate executable
mppnc_path = collate_config.get('exe')
if mppnc_path is None:
for f in os.listdir(self.expt.lab.bin_path):
if f == default_exe:
mppnc_path = os.path.join(self.expt.lab.bin_path, f)
break
else:
if not os.path.isabs(mppnc_path):
mppnc_path = os.path.join(self.expt.lab.bin_path, mppnc_path)
assert mppnc_path, 'No mppnccombine program found'
# Check config for collate command line options
collate_flags = collate_config.get('flags')
if collate_flags is None:
if mpi:
collate_flags = '-r'
else:
collate_flags = '-n4 -z -m -r'
if mpi:
# The output file is the first argument after the flags
# and mppnccombine-fast uses an explicit -o flag to specify
# the output
collate_flags = " ".join([collate_flags, '-o'])
envmod.lib_update(mppnc_path, 'libmpi.so')
# Import list of collated files to ignore
collate_ignore = collate_config.get('ignore')
if collate_ignore is None:
collate_ignore = []
elif type(collate_ignore) != list:
collate_ignore = [collate_ignore]
# Generate collated file list and identify the first tile
tile_fnames = {}
fnames = Fms.get_uncollated_files(self.output_path)
tile_fnames[self.output_path] = fnames
print(tile_fnames)
if (collate_config.get('restart', False) and
self.prior_restart_path is not None):
# Add uncollated restart files
fnames = Fms.get_uncollated_files(self.prior_restart_path)
tile_fnames[self.prior_restart_path] = fnames
# mnc_tiles = defaultdict(list)
mnc_tiles = defaultdict(defaultdict(list).copy)
for t_dir in tile_fnames:
for t_fname in tile_fnames[t_dir]:
t_base, t_ext = os.path.splitext(t_fname)
t_ext = t_ext.lstrip('.')
# Skip any files listed in the ignore list
if t_base in collate_ignore:
continue
mnc_tiles[t_dir][t_base].append(t_fname)
# print(mnc_tiles)
if mpi and collate_config.get('glob', True):
for t_base in mnc_tiles:
globstr = "{}.*".format(t_base)
# Try an equivalent glob and check the same files are returned
mnc_glob = fnmatch.filter(os.listdir(self.output_path),
globstr)
if mnc_tiles[t_base] == sorted(mnc_glob):
mnc_tiles[t_base] = [globstr, ]
print("Note: using globstr ({}) for collating {}"
.format(globstr, t_base))
else:
print("Warning: cannot use globstr {} to collate {}"
.format(globstr, t_base))
if len(mnc_tiles[t_base]) > MPI_FORK_MAX_FILE_LIMIT:
print("Warning: large number of tiles: {} "
.format(len(mnc_tiles[t_base])))
print("Warning: collation will be slow and may fail")
cpucount = int(collate_config.get('ncpus',
multiprocessing.cpu_count()))
if mpi:
# Default to one for mpi
nprocesses = int(collate_config.get('threads', 1))
else:
nprocesses = int(collate_config.get('threads', cpucount))
ncpusperprocess = int(cpucount/nprocesses)
if ncpusperprocess == 1 and mpi:
print("Warning: running collate with mpirun on a single processor")
pool = multiprocessing.Pool(processes=nprocesses)
# Collate each tileset into a single file
results = []
codes = []
outputs = []
for output_path in mnc_tiles:
for nc_fname in mnc_tiles[output_path]:
nc_path = os.path.join(output_path, nc_fname)
# Remove the collated file if it already exists, since it is
# probably from a failed collation attempt
# TODO: Validate this somehow
if os.path.isfile(nc_path):
os.remove(nc_path)
cmd = ' '.join([mppnc_path, collate_flags, nc_fname,
' '.join(mnc_tiles[output_path][nc_fname])])
if mpi:
cmd = "mpirun -n {} {}".format(ncpusperprocess, cmd)
print(cmd)
results.append(
pool.apply_async(cmdthread, args=(cmd, output_path)))
pool.close()
pool.join()
for result in results:
rc, op = result.get()
codes.append(rc)
outputs.append(op)
# TODO: Categorise the return codes
if any(rc is not None for rc in codes):
for p, rc, op in zip(count(), codes, outputs):
if rc is not None:
print('payu: error: Thread {p} crashed with error code '
'{rc}.'.format(p=p, rc=rc), file=sys.stderr)
print(' Error message:', file=sys.stderr)
print(op.decode(), file=sys.stderr)
sys.exit(-1)
| 35.466667 | 79 | 0.586584 |
ace1831172389276fa81f115d54232d028149a22 | 69,880 | py | Python | tensorflow_hub/native_module_test.py | stephanieyang/gabby | 677bf3bacde7cc10891f150cd83b03ebe36ef171 | [
"Apache-2.0"
] | 1 | 2019-04-25T09:29:35.000Z | 2019-04-25T09:29:35.000Z | tensorflow_hub/native_module_test.py | AnalystSubranjit/hub | c2ff9d4ae06fe600633694f81b859aead189754c | [
"Apache-2.0"
] | null | null | null | tensorflow_hub/native_module_test.py | AnalystSubranjit/hub | c2ff9d4ae06fe600633694f81b859aead189754c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_hub.native_module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow_hub import module_def_pb2
from tensorflow_hub import native_module
from tensorflow_hub import tf_v1
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import function as function_eager
from tensorflow.python.framework import function
from tensorflow.python.ops.control_flow_ops import ControlFlowContext
from tensorflow.python.ops.lookup_ops import HashTable
from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file
from tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer
# pylint: enable=g-direct-tensorflow-import
def load_module_spec(spec):
"""Force use of native_module implementation."""
return native_module.Loader()(spec)
def multi_signature_module():
x = tf_v1.placeholder(tf.float32, shape=[None])
native_module.add_signature("double", {"x": x}, {"y": 2*x})
z = tf_v1.placeholder(tf.float32, shape=[None])
native_module.add_signature("square", {"z": z}, {"z_out": z*z})
def batch_norm_module(training):
x = tf_v1.placeholder(tf.float32, shape=[None, 3])
y = tf_v1.layers.batch_normalization(x, training=training)
native_module.add_signature(inputs=x, outputs=y)
def module_with_variables():
tf_v1.get_variable(
name="weights",
shape=[3],
initializer=tf_v1.zeros_initializer())
tf_v1.get_variable(
name="partition",
shape=[4],
initializer=tf_v1.zeros_initializer(),
partitioner=tf_v1.fixed_size_partitioner(3))
hub.add_signature(outputs=tf.constant(1.0))
class NativeModuleTest(tf.test.TestCase):
def testModuleWithMissingRequiredFeature(self):
path = os.path.join(self.get_temp_dir(), "required-feature")
tf_v1.gfile.MakeDirs(path)
proto_path = native_module.get_module_proto_path(path)
with tf_v1.gfile.Open(proto_path, mode="wb") as f:
module_def_proto = module_def_pb2.ModuleDef()
module_def_proto.format = module_def_pb2.ModuleDef.FORMAT_V3
module_def_proto.required_features.extend(["foo-test-missing"])
f.write(module_def_proto.SerializeToString())
with self.assertRaisesRegexp(ValueError, "foo-test-missing"):
load_module_spec(path)
def testMultiSignatureSpec(self):
spec = native_module.create_module_spec(multi_signature_module)
self.assertAllEqual(sorted(spec.get_signature_names()),
["double", "square"])
self.assertAllEqual(list(spec.get_input_info_dict("double").keys()), ["x"])
self.assertAllEqual(list(spec.get_output_info_dict("double").keys()), ["y"])
self.assertAllEqual(list(spec.get_input_info_dict("square").keys()), ["z"])
self.assertAllEqual(list(spec.get_output_info_dict("square").keys()),
["z_out"])
def testDefaultTagSpec(self):
spec = native_module.create_module_spec(multi_signature_module)
self.assertAllEqual(sorted(spec.get_tags()), [set()])
def testMultiTagSpec(self):
spec = native_module.create_module_spec(
batch_norm_module,
[({"training"}, {"training": True}),
({"inference"}, {"training": False})])
self.assertAllEqual(sorted(spec.get_tags()),
[set(["training"]), set(["inference"])])
def testModuleWithVariablesAndNoCheckpoint(self):
with tf.Graph().as_default():
spec = native_module.create_module_spec(module_with_variables)
spec._create_impl(name="module", trainable=False, tags=None)
self.assertAllEqual(
[x.op.name for x in tf_v1.global_variables()],
[
"module/weights",
"module/partition/part_0",
"module/partition/part_1",
"module/partition/part_2",
])
with tf_v1.Session() as session:
session.run(tf_v1.initializers.global_variables())
expected_values = [
[0.0, 0.0, 0.0],
[0.0, 0.0],
[0.0],
[0.0],
]
for a, b in zip(session.run(tf_v1.global_variables()), expected_values):
self.assertAllEqual(a, b)
def testNoSignaturesPresent(self):
def wrong_module_fn():
x = tf_v1.placeholder(tf.float32, shape=[None, 3])
return tf.identity(x)
with self.assertRaises(ValueError) as cm:
spec = native_module.create_module_spec(wrong_module_fn)
self.assertIn("No signatures present", str(cm.exception))
def testUnsupportedCollections(self):
def module_fn():
scale = tf_v1.get_variable("x", (), collections=["my_scope"])
x = tf_v1.placeholder(tf.float32, shape=[None, 3])
native_module.add_signature("my_func", {"x": x}, {"y": x*scale})
with self.assertRaises(ValueError) as cm:
_ = native_module.create_module_spec(module_fn)
self.assertIn("Unsupported collections in graph", cm)
with tf.Graph().as_default() as tmp_graph:
module_fn()
unsupported_collections = native_module.get_unsupported_collections(
tmp_graph.get_all_collection_keys())
self.assertEqual(["my_scope"], unsupported_collections)
_ = native_module.create_module_spec(
module_fn, drop_collections=unsupported_collections)
class RecoverPartitionedVariableMapTest(tf.test.TestCase):
def testRecoverPartitionedVariableMap(self):
with tf.Graph().as_default():
with tf_v1.variable_scope("test"):
partitioner = tf_v1.fixed_size_partitioner(3)
tf_v1.get_variable(
initializer=tf.ones([11, 5]),
name="partitioned_variable",
partitioner=partitioner)
tf_v1.get_variable(
initializer=tf.ones([11, 5]),
name="normal_variable")
all_vars = tf_v1.global_variables()
all_vars_dict = {var.op.name[5:]: var for var in all_vars}
self.assertEqual(set(all_vars_dict.keys()), set([
"partitioned_variable/part_0",
"partitioned_variable/part_1",
"partitioned_variable/part_2",
"normal_variable"]))
self.assertEqual(len(all_vars_dict), 4)
var_map = native_module.recover_partitioned_variable_map(all_vars_dict)
self.assertEqual(set(var_map.keys()), set([
"partitioned_variable", "normal_variable"]))
# Verify order of the partitioned variable list
self.assertAllEqual(
[v.op.name for v in var_map["partitioned_variable"]],
[
"test/partitioned_variable/part_0",
"test/partitioned_variable/part_1",
"test/partitioned_variable/part_2",
])
def stateless_module_fn():
x = tf_v1.placeholder(tf.int64)
y = x*x
hub.add_signature(inputs=x, outputs=y)
def unused_input_module_fn():
x = tf_v1.placeholder(tf.int64)
y = tf_v1.placeholder(tf.int64)
result = x*x
hub.add_signature(
inputs={"x": x, "unused": y},
outputs=result)
def double_module_fn():
w = tf.Variable(2.0)
x = tf_v1.placeholder(dtype=tf.float32)
hub.add_signature(inputs=x, outputs=x*w)
def create_partitioned_variable_module_fn(partitions, shape):
"""Returns a module summing one normal and one partitioned variable."""
def module_fn():
"""A module summing one normal and one partitioned variable."""
partitioner = tf_v1.fixed_size_partitioner(partitions)
var_1 = tf_v1.get_variable(
initializer=tf.ones(shape),
name="partitioned_variable",
partitioner=partitioner)
var_2 = tf_v1.get_variable(
initializer=tf.ones(shape), name="normal_variable")
hub.add_signature(outputs=var_1 + var_2)
return module_fn
class TFHubStatelessModuleTest(tf.test.TestCase):
def testLoadModuleFromFuncDef(self):
with tf_v1.Session() as sess:
v = tf_v1.placeholder(tf.int64)
spec = hub.create_module_spec(stateless_module_fn)
m = hub.Module(spec)
y = m(v)
self.assertEqual(sess.run(y, feed_dict={v: 10}), 100)
def testUnusedInputModule(self):
with tf_v1.Session() as sess:
v1 = tf_v1.placeholder(tf.int64)
v2 = tf_v1.placeholder(tf.int64)
spec = hub.create_module_spec(unused_input_module_fn)
m = hub.Module(spec)
out = m({"x": v1, "unused": v2})
self.assertEqual(sess.run(out, feed_dict={v1: 10, v2: 4}), 100)
def testConvertToTensor(self):
spec = hub.create_module_spec(stateless_module_fn)
with tf_v1.Session() as sess:
m = hub.Module(spec)
y = m([10, 2])
self.assertAllEqual(sess.run(y), [100, 4])
with tf_v1.Session() as sess:
m = hub.Module(spec)
with self.assertRaises(TypeError):
m("hello")
def testArgErrors(self):
spec = hub.create_module_spec(stateless_module_fn)
with tf_v1.Session():
m = hub.Module(spec)
with self.assertRaisesRegexp(TypeError, "missing"):
m()
def testUseWithinWhileLoop(self):
with tf.Graph().as_default():
spec = hub.create_module_spec(double_module_fn)
m = hub.Module(spec)
i = tf.constant(0)
x = tf.constant(10.0)
p = tf_v1.placeholder(dtype=tf.int32)
c = lambda i, x: tf.less(i, p)
b = lambda i, x: (tf.add(i, 1), m(x))
oi, ox = tf.while_loop(c, b, [i, x])
dox = tf.gradients(ox, x)[0]
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllEqual(sess.run([oi, ox], feed_dict={p: 1}), [1, 20])
self.assertAllEqual(sess.run([oi, ox], feed_dict={p: 2}), [2, 40])
self.assertAllEqual(sess.run([oi, ox], feed_dict={p: 4}), [4, 160])
# Gradients also use the control flow structures setup earlier.
# Also check they are working properly.
self.assertAllEqual(sess.run([dox], feed_dict={p: 1}), [2])
self.assertAllEqual(sess.run([dox], feed_dict={p: 2}), [4])
self.assertAllEqual(sess.run([dox], feed_dict={p: 4}), [16])
def testClearControlDependenciesForModuleStateButNotApplyGraphs(self):
module_spec = hub.create_module_spec(stateless_module_fn)
with tf.Graph().as_default() as g1:
v = tf_v1.placeholder(dtype=tf.int64, name="v")
m = hub.Module(module_spec)
m(v)
with tf.Graph().as_default() as g2:
v = tf_v1.placeholder(dtype=tf.int64, name="v")
with tf.control_dependencies([v]):
m = hub.Module(module_spec)
m(v)
self.assertEqual(g1.as_graph_def(), g2.as_graph_def())
with tf.Graph().as_default() as g3:
v = tf_v1.placeholder(dtype=tf.int64, name="v")
m = hub.Module(module_spec)
m(v)
with tf.Graph().as_default() as g4:
v = tf_v1.placeholder(dtype=tf.int64, name="v")
m = hub.Module(module_spec)
with tf.control_dependencies([v]):
m(v)
self.assertNotEqual(g3.as_graph_def(), g4.as_graph_def())
def sparse_square_module_fn():
x = tf_v1.sparse_placeholder(dtype=tf.int64, name="x")
out = tf.SparseTensor(x.indices, x.values * x.values, x.dense_shape)
hub.add_signature(inputs=x, outputs=out)
class TFHubSparseTensorModuleTest(tf.test.TestCase):
def testSparseTensors(self):
square_spec = hub.create_module_spec(sparse_square_module_fn)
with tf.Graph().as_default():
square = hub.Module(square_spec)
v = tf_v1.sparse_placeholder(dtype=tf.int64, name="v")
y = square(v)
with tf_v1.Session().as_default():
indices = [[0, 0], [0, 1], [1, 1]]
values = [10, 2, 1]
shape = [2, 2]
v1 = tf_v1.SparseTensorValue(indices, values, shape)
v2 = y.eval(feed_dict={v: v1})
v4 = y.eval(feed_dict={v: v2})
self.assertAllEqual(v4.indices, indices) # Unchanged.
self.assertAllEqual(v4.values, [t**4 for t in values]) # Squared twice.
self.assertAllEqual(v4.dense_shape, shape) # Unchanged.
def stateful_module_fn():
v = tf_v1.get_variable(
"var123", shape=[3],
initializer=tf_v1.constant_initializer([1.0, 2.0, 3.0]))
hub.add_signature(outputs=v.value())
def stateful_rv_module_fn():
r = tf_v1.get_variable(
"rv_var123", shape=[],
initializer=tf_v1.constant_initializer(10.0),
use_resource=True)
hub.add_signature(outputs=r.value())
class TPUReplicateContext(ControlFlowContext):
def __init__(self):
super(TPUReplicateContext, self).__init__()
self._name = "TPUReplicateContext"
def AddOp(self, _):
pass
def AddValue(self, x):
return x
def to_control_flow_context_def(self, context_def, export_scope=None):
super(TPUReplicateContext, self).to_control_flow_context_def(
context_def, export_scope)
def stateful_random_rv_module_fn():
r = tf_v1.get_variable(
"rv_var123",
shape=[],
initializer=tf_v1.random_uniform_initializer(),
use_resource=True)
hub.add_signature(outputs=r.value())
def stateful_rv_with_input_module_fn():
x = tf_v1.placeholder(dtype=tf.float32, name="x")
# Add a placeholder/variable that doesn't go to an output.
y = tf_v1.placeholder(dtype=tf.float32, name="y")
r = tf_v1.get_variable(
"rv_var123",
shape=[],
initializer=tf_v1.constant_initializer(10.0),
use_resource=True)
t = tf_v1.get_variable(
"rv_var456",
shape=[],
initializer=tf_v1.constant_initializer(10.0),
use_resource=True)
t.assign(y)
res = x + r
hub.add_signature(inputs={"x": x}, outputs=res)
def control_dependency_module_fn():
const_op = tf.constant(1.0, name="dependency_op")
with tf.control_dependencies([const_op]):
res = tf.constant(3.0) + tf.constant(2.0)
hub.add_signature(inputs={}, outputs=res)
def stateful_non_rv_module_fn():
v = tf_v1.get_variable(
"var123", shape=[],
initializer=tf_v1.constant_initializer(10.0),
use_resource=False)
hub.add_signature(outputs=v.value())
def stateful_module_fn_with_colocation():
v = tf_v1.get_variable(
"var123", shape=[],
initializer=tf_v1.constant_initializer(1.0),
use_resource=False)
v_value = v.value()
x = tf_v1.placeholder(dtype=tf.float32, name="x")
with tf_v1.colocate_with(v), tf_v1.colocate_with(x):
y = tf.add(v_value, x, name="y")
hub.add_signature(inputs=x, outputs=y)
class TFHubStatefulModuleTest(tf.test.TestCase):
def testVariables(self):
with tf.Graph().as_default():
spec = hub.create_module_spec(stateful_module_fn)
m = hub.Module(spec, name="test")
out = m()
self.assertEqual(list(m.variable_map.keys()), ["var123"])
self.assertEqual(m.variable_map["var123"].name, "test/var123:0")
self.assertEqual([v.name for v in m.variables], ["test/var123:0"])
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(out), [1.0, 2.0, 3.0])
def testResourceVariables(self):
with tf.Graph().as_default():
spec = hub.create_module_spec(stateful_rv_module_fn)
m = hub.Module(spec, name="test_rv")
out = m()
self.assertEqual(list(m.variable_map.keys()), ["rv_var123"])
self.assertEqual(m.variable_map["rv_var123"].name, "test_rv/rv_var123:0")
self.assertEqual([v.name for v in m.variables], ["test_rv/rv_var123:0"])
# Check that "shared_name" attributes are adapted correctly:
var_handle_op_name = "test_rv/rv_var123"
var_handle_op = tf_v1.get_default_graph().get_operation_by_name(
var_handle_op_name)
self.assertEqual(
var_handle_op.get_attr("shared_name"),
tf.compat.as_bytes(var_handle_op_name))
export_path = os.path.join(self.get_temp_dir(), "resource-variables")
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(out), 10.0)
m.export(export_path, sess)
with tf.Graph().as_default():
f = hub.Module(export_path)
out = f()
# Test colocation constraints on the read op in the apply graph.
# It has two legal values:
# - Colocation with the VarHandleOp in the state graph.
# - No constraint, in which case it reports its own colocation_group.
# This appears to happen at the time of this writing (March 2018)
# because the Python code relies on the TensorFlow core to handle
# VariableReadOps as a special case and colocate them with their
# VarHandleOp input, which is mapped to the state graph.
# In any case, the point is to *not* colocate with the stillborn copy
# of the VarHandleOp in the apply graph scope.
if out.op.colocation_groups() != [
tf.compat.as_bytes("loc:@" + out.op.name)]:
self.assertItemsEqual(out.op.colocation_groups(),
[tf.compat.as_bytes("loc:@module/rv_var123")])
# Check that "shared_name" attributes are adapted correctly:
var_handle_op_name = "module/rv_var123"
var_handle_op = tf_v1.get_default_graph().get_operation_by_name(
var_handle_op_name)
self.assertEqual(
var_handle_op.get_attr("shared_name"),
tf.compat.as_bytes(var_handle_op_name))
# Create a saver for the whole graph.
saver = tf_v1.train.Saver()
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(out), 10.0)
# Make sure that the variable names stored in a checkpoint of the graph
# are as expected.
variables_path = os.path.join(self.get_temp_dir(), "variables")
saver.save(
sess, variables_path, write_meta_graph=False, write_state=False)
variable_names_and_shapes = tf_v1.train.list_variables(
ckpt_dir_or_file=variables_path)
variable_names = set(name for name, _ in variable_names_and_shapes)
self.assertEqual(variable_names, {"module/rv_var123"})
def testNonResourceVariables(self):
with tf.Graph().as_default():
spec = hub.create_module_spec(stateful_non_rv_module_fn)
m = hub.Module(spec, name="test_non_rv")
out = m()
self.assertEqual(list(m.variable_map.keys()), ["var123"])
self.assertEqual(m.variable_map["var123"].name, "test_non_rv/var123:0")
self.assertEqual([v.name for v in m.variables], ["test_non_rv/var123:0"])
export_path = os.path.join(self.get_temp_dir(), "non-resource-variables")
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(out), 10.0)
m.export(export_path, sess)
with tf.Graph().as_default():
f = hub.Module(export_path)
out = f()
# Test that the read op in the apply graph gets colocated with the
# variable in the state graph scope "module/" (and not the stillborn
# copy in the apply graph scope).
self.assertItemsEqual(out.op.colocation_groups(),
[tf.compat.as_bytes("loc:@module/var123")])
# Create a saver for the whole graph.
saver = tf_v1.train.Saver()
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(out), 10.0)
# Make sure that the variable names stored in a checkpoint of the
# graph are as expected.
variables_path = os.path.join(self.get_temp_dir(), "variables")
saver.save(
sess, variables_path, write_meta_graph=False, write_state=False)
variable_names_and_shapes = tf_v1.train.list_variables(
ckpt_dir_or_file=variables_path)
variable_names = set(name for name, _ in variable_names_and_shapes)
self.assertEqual(variable_names, {"module/var123"})
def testNonResourceVariableInWhileLoop(self):
with tf.Graph().as_default():
# This test uses non-Resource variables to see an actual colocation
# constraint propagated to the context Enter op. The long comment on
# colocation in testResourceVariables explains why they may not offer
# that.
spec = hub.create_module_spec(stateful_non_rv_module_fn)
m = hub.Module(spec)
cond = lambda i, x: tf.less(i, 4)
def body(i, x):
v = m()
self.assertItemsEqual(v.op.colocation_groups(),
[tf.compat.as_bytes("loc:@module/var123")])
return (tf.add(i, 1), 2*x)
oi, ox = tf.while_loop(cond, body, [0, 10.0])
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllEqual(sess.run([oi, ox]), [4, 160.0])
def testNonResourceVariableInCond(self):
with tf.Graph().as_default():
spec = hub.create_module_spec(stateful_non_rv_module_fn)
m = hub.Module(spec)
pred = tf_v1.placeholder(tf.bool)
def true_fn():
v = m()
self.assertItemsEqual(v.op.colocation_groups(),
[tf.compat.as_bytes("loc:@module/var123")])
return v
def false_fn():
return tf.constant(9.0)
out = tf.cond(pred, true_fn, false_fn)
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertEqual(sess.run(out, feed_dict={pred: True}), 10.0)
self.assertEqual(sess.run(out, feed_dict={pred: False}), 9.0)
def testVariableColocationPropagation(self):
with tf.Graph().as_default():
spec = hub.create_module_spec(stateful_module_fn_with_colocation)
m = hub.Module(spec)
u1 = tf.constant(1, name="u1")
u2 = tf.constant(2, name="u2")
with tf_v1.colocate_with(u1), tf_v1.colocate_with(u2):
x = tf.constant(100.0, name="x")
y = m(x)
self.assertItemsEqual(y.op.colocation_groups(),
[tf.compat.as_bytes("loc:@module/var123"),
tf.compat.as_bytes("loc:@u1"),
tf.compat.as_bytes("loc:@u2")])
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertEqual(sess.run(y), 101.0)
def testPartitionedVariables(self):
with tf.Graph().as_default():
spec = hub.create_module_spec(
create_partitioned_variable_module_fn(partitions=3, shape=[7, 3]))
m = hub.Module(spec, name="test")
out = m()
self.assertEqual(len(m.variable_map), 2)
self.assertEqual(m.variable_map["normal_variable"].name,
"test/normal_variable:0")
self.assertAllEqual([
variable.name for variable in m.variable_map["partitioned_variable"]
], [
"test/partitioned_variable/part_0:0",
"test/partitioned_variable/part_1:0",
"test/partitioned_variable/part_2:0"
])
self.assertAllEqual( # Check deterministric order (by variable_map key).
[variable.name for variable in m.variables],
["test/normal_variable:0",
"test/partitioned_variable/part_0:0",
"test/partitioned_variable/part_1:0",
"test/partitioned_variable/part_2:0"])
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(out), 2 * np.ones([7, 3]))
def testLargePartitionedVariables(self):
with tf.Graph().as_default():
spec = hub.create_module_spec(
create_partitioned_variable_module_fn(partitions=25, shape=[600, 3]))
m = hub.Module(spec, name="test")
out = m()
self.assertEqual(len(m.variable_map), 2)
self.assertEqual(len(m.variable_map["partitioned_variable"]), 25)
self.assertEqual(len(m.variables), 26)
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(out), 2 * np.ones([600, 3]))
def testLoadTrainableModuleFromFuncDef(self):
with tf_v1.Session() as sess:
spec = hub.create_module_spec(stateful_module_fn)
m = hub.Module(spec, trainable=True)
x = m()
step = tf.Variable(0, trainable=False, name="global_step")
train_op = tf_v1.train.GradientDescentOptimizer(0.40).minimize(
loss=tf_v1.losses.mean_squared_error(x, [3.1, 3.2, 3.3]),
global_step=step)
sess.run(tf_v1.global_variables_initializer())
for _ in range(50):
sess.run(train_op)
got = sess.run(x)
self.assertAllClose(got, [3.1, 3.2, 3.3])
# TODO(b/112575006): The following tests verify functionality of function call
# within a TPU context. Work to generalize this for all function calls is
# ongoing.
def testTPUModuleInitializeOnceWithDefun(self):
spec = hub.create_module_spec(stateful_random_rv_module_fn)
@function.Defun()
def import_computation():
context = TPUReplicateContext()
context.Enter()
m = hub.Module(spec, name="module_", trainable=True)
return [m(), m()]
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
x = import_computation()
sess.run(tf_v1.global_variables_initializer())
got = sess.run(x)
# Check the values are equal. If the initializer ran on each call,
# the values would be different.
self.assertEqual(got[0], got[1])
def testTPUPruneWithUnusedInput(self):
spec = hub.create_module_spec(unused_input_module_fn)
@function.Defun()
def import_computation(x):
context = TPUReplicateContext()
context.Enter()
m = hub.Module(spec, name="module_", trainable=True)
return m({
"x": tf.cast(x, dtype=tf.int64),
"unused": tf.constant(2, dtype=tf.int64)
})
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
x = import_computation(5)
got = sess.run(x)
self.assertEqual(got, 25)
def testTPUModuleDoesntPruneControlDependencies(self):
spec = hub.create_module_spec(control_dependency_module_fn)
@function.Defun()
def import_computation():
context = TPUReplicateContext()
context.Enter()
m = hub.Module(spec, name="module_", trainable=True)
return m()
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
x = import_computation()
got = sess.run(x)
self.assertEqual(got, 5.0)
# If the op got pruned, the following get_operation_by_name should fail
# with a dependency error.
tf_v1.get_default_graph().get_operation_by_name("module_/dependency_op")
def testTPUModuleWithDefun(self):
spec = hub.create_module_spec(stateful_rv_with_input_module_fn)
@function.Defun()
def import_computation(first, second):
context = TPUReplicateContext()
context.Enter()
m = hub.Module(spec, name="module_", trainable=True)
return [m(first), m(second)]
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
x = import_computation(9.0, 6.0)
sess.run(tf_v1.global_variables_initializer())
got = sess.run(x)
self.assertEqual(got, (19.0, 16.0))
def testTPUModuleWithTFEDefun(self):
with tf_v1.Graph().as_default() as graph, tf_v1.Session() as sess:
spec = hub.create_module_spec(stateful_rv_with_input_module_fn)
@function_eager.defun()
def import_computation(first, second):
context = TPUReplicateContext()
context.Enter()
m = hub.Module(spec, trainable=True)
return [m(first), m(second)]
x = import_computation(9.0, 6.0)
sess.run(tf_v1.global_variables_initializer())
got = sess.run(x)
self.assertEqual(got, [19.0, 16.0])
def testTPUModuleWithWrapFunc(self):
spec = hub.create_module_spec(stateful_rv_with_input_module_fn)
def import_computation(first, second):
context = TPUReplicateContext()
context.Enter()
m = hub.Module(spec, trainable=True)
return [m(first), m(second)]
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
x = tf_v1.wrap_function(
import_computation,
[tf.TensorSpec((), tf.float32),
tf.TensorSpec((), tf.float32)])
sess.run(tf_v1.global_variables_initializer())
got = sess.run(x(9.0, 6.0))
self.assertEqual(got, [19.0, 16.0])
# The following tests should all fail until b/112575006 is resolved.
def testModuleWithDefun(self):
spec = hub.create_module_spec(stateful_rv_with_input_module_fn)
@function.Defun()
def import_computation(first, second):
m = hub.Module(spec, name="module_", trainable=True)
return [m(first), m(second)]
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
# In the case where we don't handle the variables, they will not be
# hoisted so they are not handled properly.
with self.assertRaisesRegexp(
NotImplementedError,
"Using TF-Hub module within a TensorFlow defined function "
"is currently not supported."):
import_computation(9.0, 6.0)
def testModuleWithEagerDefun(self):
spec = hub.create_module_spec(stateful_rv_with_input_module_fn)
def import_computation(first, second):
# In the case where we don't handle the variables, they will not be
# hoisted so they are not handled properly.
with self.assertRaisesRegexp(
NotImplementedError,
"Using TF-Hub module within a TensorFlow defined function "
"is currently not supported."):
m = hub.Module(spec, trainable=True)
return [m(first), m(second)]
x = function_eager.defun(import_computation)
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
sess.run(x(9.0, 6.0))
def testModuleWithWrapFunc(self):
spec = hub.create_module_spec(stateful_rv_with_input_module_fn)
def import_computation(first, second):
m = hub.Module(spec, trainable=True)
return [m(first), m(second)]
# In the case where we don't handle the variables, they will not be
# hoisted so they are not handled properly.
with tf_v1.Graph().as_default(), tf_v1.Session() as sess:
with self.assertRaisesRegexp(
NotImplementedError,
"Using TF-Hub module within a TensorFlow defined function "
"is currently not supported."):
tf_v1.wrap_function(
import_computation,
[tf.TensorSpec((), tf.float32),
tf.TensorSpec((), tf.float32)])
def _exportModulewithTrainedVariable(self):
export_path = os.path.join(self.get_temp_dir(), "var-module")
with tf.Graph().as_default():
spec = hub.create_module_spec(stateful_module_fn)
m = hub.Module(spec, trainable=True)
assign_op = tf_v1.assign(m.variable_map["var123"],
tf.constant([9.0, 9.0, 9.0]))
with tf_v1.Session() as sess:
sess.run(assign_op)
m.export(export_path, sess)
return export_path
def testModuleWithTrainedVariable(self):
with tf.Graph().as_default():
f = hub.Module(self._exportModulewithTrainedVariable())
out = f()
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
got = sess.run(out)
self.assertAllClose(got, [9.0, 9.0, 9.0])
def testModuleEvalWithTrainedVariable(self):
export_path = self._exportModulewithTrainedVariable()
with hub.eval_function_for_module(export_path) as f:
self.assertAllClose(f(), [9.0, 9.0, 9.0])
def table_lookup_module_fn():
x = tf_v1.placeholder(dtype=tf.int64, name="x")
keys = tf.constant([0, 1, 2], dtype=tf.int64)
values = tf.constant(["index0", "hello", "world"])
tbl_init = KeyValueTensorInitializer(keys, values)
table = HashTable(tbl_init, "UNK")
hub.add_signature(inputs=x, outputs=table.lookup(x))
class TFHubTableLookupModuleTest(tf.test.TestCase):
def _exportModuleWithTable(self):
export_path = os.path.join(self.get_temp_dir(), "table-module")
with tf.Graph().as_default():
spec = hub.create_module_spec(table_lookup_module_fn)
m = hub.Module(spec)
# Export requires a session to work regardless of the module having no
# variables to export.
with tf_v1.Session() as sess:
m.export(export_path, sess)
return export_path
def testModuleWithTable(self):
with tf.Graph().as_default():
v = tf_v1.placeholder(dtype=tf.int64)
f = hub.Module(self._exportModuleWithTable())
y = f(v)
with tf_v1.Session() as sess:
sess.run(tf_v1.tables_initializer())
got = sess.run(y, feed_dict={v: [0, 1, 2, 3]})
self.assertAllEqual(list(got), [b"index0", b"hello", b"world", b"UNK"])
def testModuleEvalWithTable(self):
with hub.eval_function_for_module(self._exportModuleWithTable()) as f:
got = f([0, 1, 2, 3])
self.assertAllEqual(list(got), [b"index0", b"hello", b"world", b"UNK"])
def do_table_lookup(indices, vocabulary_file):
table = index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
default_value="UNKNOWN")
return table.lookup(indices)
def layers_module_fn():
"""Module that exercises the use of layers."""
# This is a plain linear map Mx+b regularized by the sum of the squares
# of the coefficients in M and b.
x = tf_v1.placeholder(dtype=tf.float32, shape=[None, 2], name="x")
def l2(weights):
"""Applies l2 regularization to weights."""
with tf.control_dependencies([weights]):
return 2.0 * tf_v1.nn.l2_loss(weights)
h = tf_v1.layers.dense(
x, 2,
activation=None,
kernel_regularizer=l2,
bias_regularizer=l2)
hub.add_signature(inputs=x, outputs=h)
class TFHubLayersModuleTest(tf.test.TestCase):
def testModuleWithLayers(self):
export_path = os.path.join(self.get_temp_dir(), "layers-module")
sample_input = [[1.0, 2.0], [3.1, 10.0]]
spec = hub.create_module_spec(layers_module_fn)
with tf.Graph().as_default():
m = hub.Module(spec, trainable=False)
x = tf_v1.placeholder(dtype=tf.float32)
y = m(x)
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
sample_output = sess.run(y, feed_dict={x: sample_input})
m.export(export_path, sess)
with tf.Graph().as_default():
x = tf_v1.placeholder(dtype=tf.float32)
y = hub.Module(export_path)(x)
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
got = sess.run(y, feed_dict={x: sample_input})
self.assertAllEqual(got, sample_output)
def testModuleWithRegularizedLayers(self):
# The linear map y = Mx + b with L2 regularization on M and b
# when trained at x = [1,1] with L2 loss towards the target y' = [4,4]
# learns M = [[1,1],[1,1]], b = [1,1], y = [3,3], with eight balanced
# loss terms: the elements of M, b, and y' - y are all distance 1 from zero.
train_input = [[1.0, 1.0]]
target = [[4.0, 4.0]]
spec = hub.create_module_spec(layers_module_fn)
with tf.Graph().as_default():
m = hub.Module(spec, trainable=True)
x = tf_v1.placeholder(dtype=tf.float32)
y = m(x)
squared_loss = tf_v1.losses.mean_squared_error(y, target, weights=2.0)
# Recover REGULARIZATION_LOSSES from the module.
total_loss = squared_loss + tf_v1.losses.get_regularization_loss()
step = tf.Variable(0, trainable=False, name="global_step")
train = tf_v1.train.GradientDescentOptimizer(0.1).minimize(
loss=total_loss, global_step=step)
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
for _ in range(50):
sess.run(train, feed_dict={x: train_input})
# Verify M = [[1,1],[1,1]], b = [1,1] by evaluating at three points.
# Without regularization, the result would be an underdetermined mess.
out = sess.run(y, feed_dict={x: [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]})
self.assertAllClose(
out, [[1.0, 1.0], [2.0, 2.0], [2.0, 2.0]], atol=0.001)
def valid_colocation_module_fn():
w = tf.Variable(42 + 69, name="w")
# w.op has the same name on resource and non-resource variables
with tf_v1.colocate_with(w.op):
# A colocation reference among state nodes is ok.
v = tf.Variable(1.0, name="v")
assert v.op.colocation_groups() == [tf.compat.as_bytes("loc:@w")]
# A colocation reference from other nodes to state nodes is ok.
y = tf.add(v, 1, name="y")
assert y.op.colocation_groups() == [tf.compat.as_bytes("loc:@w")]
x = tf_v1.placeholder(dtype=tf.float32, name="x")
with tf_v1.colocate_with(x):
# A colocation reference from other nodes to input nodes is ok.
z = tf.add(x, 1, name="z")
assert z.op.colocation_groups() == [tf.compat.as_bytes("loc:@x")]
hub.add_signature(inputs=dict(x=x), outputs=dict(y=y, z=z))
def bad_input_colocation_module_fn():
u = tf.add(42, 69, name="u")
with tf_v1.colocate_with(u):
# Inputs must not reference other nodes for colocation.
x = tf_v1.placeholder(tf.float32, name="x")
y = x + 1.0
hub.add_signature(inputs=x, outputs=y)
def bad_state_colocation_module_fn():
u = tf.add(42, 69, name="u")
with tf_v1.colocate_with(u):
# State-holding nodes must not reference other nodes for colocation.
v = tf.Variable(1.0, name="v")
x = tf_v1.placeholder(dtype=tf.float32)
y = x + v
hub.add_signature(inputs=x, outputs=y)
def brittle_multivalued_colocation_module_fn():
x, y = tf.split([1, 2], 2, name="split")
with tf_v1.colocate_with(x), tf_v1.colocate_with(y):
z = tf.add(x, y, name="add")
assert z.op.colocation_groups() == [tf.compat.as_bytes("loc:@split")]
hub.add_signature(inputs=dict(x=x, y=y), outputs=z, name="both")
hub.add_signature(inputs=dict(x=x), outputs=z, name="partial")
class ColocationRewritingTest(tf.test.TestCase):
def testValidCase(self):
"""Tests a complex, valid case end-to-end."""
spec = hub.create_module_spec(valid_colocation_module_fn)
with tf.Graph().as_default():
u = tf.constant(7.0, name="u")
m = hub.Module(spec, name="m")
outputs = m(dict(x=u), as_dict=True)
self.assertItemsEqual(outputs["y"].op.colocation_groups(),
[tf.compat.as_bytes("loc:@m/w")])
self.assertItemsEqual(outputs["z"].op.colocation_groups(),
[tf.compat.as_bytes("loc:@u")])
def testBadInputColocation(self):
"""Tests catching bad colocation of inputs during create_module_spec."""
with self.assertRaisesRegexp(ValueError, "(?s)input.*colocate.*loc:@u"):
_ = hub.create_module_spec(bad_input_colocation_module_fn)
def testBadStateColocation(self):
"""Tests catching bad colocation of states during create_module_spec."""
with self.assertRaisesRegexp(ValueError, "(?s)state.*colocate.*loc:@u"):
_ = hub.create_module_spec(bad_state_colocation_module_fn)
def testInputsFromMultivaluedOp(self):
"""Tests warning for inputs from multivalued ops in create_module_spec."""
# Ideally, one would be able to write
# with self.assertLogs("blah"): hub.create_module_spec(module_fn)
# but in the absence of assertions on logs, we test the underlying helper
# in the environment seen from within a module_fn.
with tf.Graph().as_default():
first, _ = tf.split([[1, 2], [3, 4]], 2, name="split1")
_, second = tf.split([[5, 6], [7, 8]], 2, name="split2")
third = tf.constant(105, name="const")
message = native_module.find_signature_inputs_from_multivalued_ops(
dict(first=first, second=second, third=third))
self.assertRegexpMatches(
message,
".*single output.*\n"
"Affected inputs: first='split1:0', second='split2:1'$")
# Also test the case of no errors.
with tf.Graph().as_default():
first = tf.constant(101)
second = tf.constant(102)
third = tf.constant(103)
message = native_module.find_signature_inputs_from_multivalued_ops(
dict(first=first, second=second, third=third))
self.assertIsNone(message)
def testSparseInputsFromMultivaluedOp(self):
"""Tests warning for SparseTensor inputs from multivalued ops."""
with tf.Graph().as_default():
one, _ = tf_v1.sparse_split(
sp_input=tf.SparseTensor(indices=[[0, 1], [1, 2]], values=[1, 2],
dense_shape=[2, 3]),
num_split=2, axis=0, name="op1")
_, two = tf_v1.sparse_split(
sp_input=tf.SparseTensor(indices=[[0, 0], [1, 1]], values=[3, 4],
dense_shape=[2, 3]),
num_split=2, axis=0, name="op2")
three = tf.SparseTensor(indices=[[0]], values=[5], dense_shape=[2])
message = native_module.find_signature_inputs_from_multivalued_ops(
dict(one=one, two=two, three=three))
self.assertRegexpMatches(
message,
".*single output.*\nAffected inputs: "
"one.indices='op1:0', one.values='op1:2', one.dense_shape='op1:4', "
"two.indices='op2:1', two.values='op2:3', two.dense_shape='op2:5'$")
# Also test the case of no errors.
with tf.Graph().as_default():
one = tf.SparseTensor(indices=[[0]], values=[1], dense_shape=[2])
two = tf.SparseTensor(indices=[[1]], values=[2], dense_shape=[2])
message = native_module.find_signature_inputs_from_multivalued_ops(
dict(one=one, two=two, three=three))
self.assertIsNone(message)
def testBrittleColocationWithInputsFromMultivaluedOp(self):
"""Tests handling of ambiguous rewrites during module.__call__."""
spec = hub.create_module_spec(brittle_multivalued_colocation_module_fn)
with tf.Graph().as_default():
u = tf.constant([1], name="u")
with tf_v1.colocate_with(u):
v = tf.constant([2], name="v")
w = tf.constant([3], name="w")
m = hub.Module(spec, name="m")
# It works if both inputs are mapped to ops with equal colocation groups.
assert u.op.colocation_groups() == v.op.colocation_groups()
z = m(dict(x=u, y=v), signature="both")
self.assertItemsEqual(z.op.colocation_groups(),
[tf.compat.as_bytes("loc:@u")])
# It crashes in the general case.
assert u.op.colocation_groups() != w.op.colocation_groups()
with self.assertRaisesRegexp(
ValueError,
# In Python 3 (but not 2), colocation groups are lists of bytes,
# which are formatted with a leading "b" just before the quotes.
r"(?s)Failed to rewrite .*b?'loc:@m_apply_both_1/split' .*"
"\[b?'loc:@[uw]'\] vs \[b?'loc:@[wu]'\]"):
z = m(dict(x=u, y=w), signature="both")
def testBadColocationWithPartialInputsFromMultivaluedOp(self):
spec = hub.create_module_spec(brittle_multivalued_colocation_module_fn)
with tf.Graph().as_default():
u = tf.constant([1], name="u")
m = hub.Module(spec, name="m")
with self.assertRaisesRegexp(
ValueError,
r"(?s)Failed to rewrite .*b?'loc:@m_apply_partial/split' .*"
"\[b?'loc:@u'\] vs \[b?'loc:@m_apply_partial/split'\]"):
z = m(dict(x=u), signature="partial")
def update_ops_module_fn():
counter = tf.Variable(0, trainable=False)
tf_v1.add_to_collection(tf_v1.GraphKeys.UPDATE_OPS, counter.assign_add(1))
hub.add_signature(inputs=None, outputs=counter.value())
class TFHubUpdateOpsTest(tf.test.TestCase):
def testUpdateOps(self):
spec = hub.create_module_spec(update_ops_module_fn)
with tf_v1.Session() as sess:
trainable_module = hub.Module(spec, trainable=True)
fixed_module = hub.Module(spec, trainable=False)
# TODO(b/62433105): Understand what is the desired behaviour of UPDATE_OPS
# and applying a Module multiple times. For now UPDATE_OPS probably only
# do something reasonable if each Module is applied exactly one time.
trainable_module()
fixed_module()
variable = tf.Variable(0.0)
step = tf.Variable(0, trainable=False, name="global_step")
update_ops = tf_v1.get_collection(tf_v1.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf_v1.train.GradientDescentOptimizer(0.1).minimize(
loss=variable,
global_step=step)
sess.run(tf_v1.global_variables_initializer())
sess.run(train_op)
trainable_module_vars = list(trainable_module.variable_map.values())
self.assertEqual(len(trainable_module_vars), 1)
self.assertEqual(sess.run(trainable_module_vars[0]), 1)
fixed_module_vars = list(fixed_module.variable_map.values())
self.assertEqual(len(fixed_module_vars), 1)
self.assertEqual(sess.run(fixed_module_vars[0]), 0)
def batch_norm_module_fn(is_training):
"""Module that exercises batch normalization, incl. UPDATE_OPS."""
x = tf_v1.placeholder(dtype=tf.float32, shape=[None, 1], name="x")
y = tf_v1.layers.batch_normalization(
momentum=0.4,
inputs=x,
fused=False,
training=is_training)
hub.add_signature(inputs=x, outputs=y)
class TFHubBatchNormModuleTest(tf.test.TestCase):
# This test is intended to verify the following:
# 1) A module_fn that uses batch normalization through tf.layers.contrib
# (and its underlying utilities from tf.nn) can be used to create,
# export, load and use the Module.
# 2) Batch normalization learns the scale and offset parameters for its
# output as it should.
# 3) The UPDATE_OPS added internally for the moving_mean and moving_variance
# over the training data are properly executed at training time, and their
# results are used at serving time, without further change.
def testModuleWithBatchNorm(self):
export_path = os.path.join(self.get_temp_dir(), "batch-norm-module")
# This test resorts to lookup by name to retrieve the moving mean,
# because tf.contrib.layers.batch_norm() does not return it, and even if,
# module_fn() has no way to return it next to the result for training.
moving_mean_name = (
"module/batch_normalization/moving_mean/Read/ReadVariableOp:0")
batch_norm_train_tags = ["batch_norm_trains"]
batch_norm_fixed_tags = ["batch_norm_fixed"]
spec = hub.create_module_spec(
batch_norm_module_fn,
[(batch_norm_train_tags, {"is_training": True}),
(batch_norm_fixed_tags, {"is_training": False})])
# Test Module creation and training.
with tf.Graph().as_default() as g:
m = hub.Module(spec, trainable=True, tags=batch_norm_train_tags)
# The module is trained on a fixed batch of inputs, which has a mean
# of 12.0 and some sample variance of a less obvious value. The module
# learns scale and offset parameters that achieve the mapping x --> 2*x
# for the observed mean and variance.
x = tf.constant([[11.0], [12.0], [13.0]])
training_mean = [12.0]
y_target = tf.constant([[22.0], [24.0], [26.0]])
y = m(x)
step = tf.Variable(0, trainable=False, name="global_step")
moving_mean = g.get_tensor_by_name(moving_mean_name)
update_ops = tf_v1.get_collection(tf_v1.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train = tf_v1.train.GradientDescentOptimizer(0.1).minimize(
loss=tf_v1.losses.mean_squared_error(y, y_target), global_step=step)
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(moving_mean), [0.0])
for _ in range(100):
sess.run([train])
trained_moving_mean, trained_y = sess.run([moving_mean, y])
self.assertAllClose(trained_moving_mean, training_mean)
self.assertAllClose(trained_y, [[22.0], [24.0], [26.0]])
# Test export.
m.export(export_path, sess)
# Test import and use.
spec = load_module_spec(export_path)
with tf.Graph().as_default() as g:
# The module gets run for inference on inputs with different mean and
# variance. However, both mean and variance as well as offset and scale
# are now frozen to the values from learning, so the same mapping
# x --> 2*x is recovered.
x = tf.constant([[10.0], [20.0], [30.0]])
y = hub.Module(
spec, tags=batch_norm_fixed_tags)(x)
moving_mean = g.get_tensor_by_name(moving_mean_name)
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
for _ in range(100):
served_moving_mean, served_y = sess.run([moving_mean, y])
# No update occurs to the moving_mean from training time.
self.assertAllClose(served_moving_mean, training_mean)
# Prediction results are correct.
self.assertAllClose(served_y, [[20.0], [40.0], [60.0]])
def multiple_outputs_module_fn():
x = tf_v1.placeholder(dtype=tf.float32)
v = tf.Variable([3.0])
hub.add_signature(
inputs={"x": x},
outputs={"y": v * x, "z": v * v * x})
class TFHubMultipleOutputsTest(tf.test.TestCase):
def testMultipleOutputs(self):
with tf_v1.Session() as sess:
spec = hub.create_module_spec(multiple_outputs_module_fn)
m = hub.Module(spec)
output = m(tf.constant([2.0]), as_dict=True)
output1 = output["y"]
output2 = output["z"]
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(output1), [6.0])
self.assertAllClose(sess.run(output2), [18.0])
def create_assets_module_fn(vocabulary_file):
def assets_module_fn():
indices = tf_v1.placeholder(dtype=tf.int64, name="indices")
outputs = do_table_lookup(indices, vocabulary_file)
hub.add_signature(inputs=indices, outputs=outputs)
return assets_module_fn
def create_consumer_module_fn(exported_hub_module):
def consumer_module_fn():
indices = tf_v1.placeholder(dtype=tf.int64, name="indices")
inner_module = hub.Module(exported_hub_module)
inner_module_output = inner_module(indices)
output = tf.identity(inner_module_output)
hub.add_signature(inputs=indices, outputs=output)
return consumer_module_fn
class TFHubAssetsTest(tf.test.TestCase):
def create_vocab_file(self, path, vocab):
vocabulary_file = os.path.join(self.get_temp_dir(), "tokens.txt")
with open(vocabulary_file, "w+") as vocab_file:
for line in vocab:
vocab_file.write(line)
vocab_file.write(os.linesep)
return vocabulary_file
def testAssets(self):
export_path = os.path.join(self.get_temp_dir(), "assets-module")
vocabulary_file = self.create_vocab_file("tokens.txt",
["emerson", "lake", "palmer"])
with tf.Graph().as_default():
assets_module_fn = create_assets_module_fn(vocabulary_file)
spec = hub.create_module_spec(assets_module_fn)
embedding_module = hub.Module(spec)
output = embedding_module(tf.constant([1, 2], dtype=tf.int64))
with tf_v1.Session() as sess:
sess.run(tf_v1.tables_initializer())
self.assertAllEqual(list(sess.run(output)), [b"lake", b"palmer"])
embedding_module.export(export_path, sess)
asset_file = os.path.join(*[export_path, "assets", "tokens.txt"])
# Check that asset file got written to the expected place:
self.assertTrue(tf_v1.gfile.Exists(asset_file))
# Assets should be hermetic, so we can delete the original vocab file:
tf_v1.gfile.Remove(vocabulary_file)
with tf.Graph().as_default():
spec = load_module_spec(export_path)
embedding_module = hub.Module(spec)
output = embedding_module(tf.constant([1, 2], dtype=tf.int64))
with tf_v1.Session() as sess:
sess.run(tf_v1.tables_initializer())
# Check functionality:
self.assertAllEqual(list(sess.run(output)), [b"lake", b"palmer"])
# Check that the ASSET_FILEPATHS collection was restored properly:
asset_filepaths = [
sess.run(tensor)
for tensor in tf_v1.get_collection(tf_v1.GraphKeys.ASSET_FILEPATHS)
]
# ASSET_FILEPATHS are added for the state graph and for the apply graph:
self.assertAllEqual(asset_filepaths,
[tf.compat.as_bytes(asset_file)] * 2)
def testDuplicateAssetCopy(self):
export_path = os.path.join(self.get_temp_dir(), "assets-module")
def module_with_duplicate_asset():
vocabulary_file = self.create_vocab_file("tokens2.txt", ["1", "2", "3"])
indices1 = tf_v1.placeholder(dtype=tf.int64, name="indices1")
indices2 = tf_v1.placeholder(dtype=tf.int64, name="indices2")
hub.add_signature(
inputs={
"indices_1": indices1,
"indices_2": indices2,
},
outputs={
"x": do_table_lookup(indices1, vocabulary_file),
"y": do_table_lookup(indices2, vocabulary_file),
})
with tf.Graph().as_default():
spec = hub.create_module_spec(module_with_duplicate_asset)
module_a = hub.Module(spec)
module_a({"indices_1": tf.constant([1, 2], dtype=tf.int64),
"indices_2": tf.constant([1, 2], dtype=tf.int64)}, as_dict=True)
with tf_v1.Session() as sess:
sess.run(tf_v1.tables_initializer())
module_a.export(export_path, sess)
def testExportedConsumerModelWorksIfItUsesHubModuleWithAssets(self):
# 1. Create and export a module with assets.
module_export_path = os.path.join(self.get_temp_dir(), "small-module")
vocabulary_file = self.create_vocab_file("tokens.txt",
["emerson", "lake", "palmer"])
assets_module_fn = create_assets_module_fn(vocabulary_file)
spec = hub.create_module_spec(assets_module_fn)
with tf.Graph().as_default():
small_module = hub.Module(spec)
with tf_v1.Session() as sess:
small_module.export(module_export_path, sess)
# 2. Remove the original vocab file and move the module to another location.
tf_v1.gfile.Remove(vocabulary_file)
inner_module_path = os.path.join(self.get_temp_dir(), "inner-module")
tf_v1.gfile.Rename(module_export_path, inner_module_path)
del module_export_path
# 3. Use the module in a consumer model (which is another module here).
module_export_path = os.path.join(self.get_temp_dir(), "consumer-module")
consumer_module_fn = create_consumer_module_fn(inner_module_path)
spec = hub.create_module_spec(consumer_module_fn)
with tf.Graph().as_default():
consumer_module = hub.Module(spec)
with tf_v1.Session() as sess:
consumer_module.export(module_export_path, sess)
# 4. Delete the inner module on disk and move the consumer model to a final
# location for serving.
tf_v1.gfile.DeleteRecursively(inner_module_path)
module_serving_path = os.path.join(self.get_temp_dir(), "serving-module")
tf_v1.gfile.Rename(module_export_path, module_serving_path)
# 5. Make sure the model can be served successfully.
with tf.Graph().as_default():
serving_module = hub.Module(module_serving_path)
output = serving_module(tf.constant([1, 2], dtype=tf.int64))
with tf_v1.Session() as sess:
sess.run(tf_v1.tables_initializer())
self.assertAllEqual(list(sess.run(output)), [b"lake", b"palmer"])
def another_stateful_module_fn():
"""Stateful module with inputs."""
module_input = tf_v1.placeholder(dtype=tf.float32)
variable = tf.Variable([3.0], name="iamtheoneandonly")
hub.add_signature(inputs=module_input, outputs=module_input*variable)
class TFHubApplyStatefulModuleMultipleTimesTest(tf.test.TestCase):
def testApplyStatefulModuleMultipleTimes(self):
export_path = os.path.join(self.get_temp_dir(), "another-module")
with tf_v1.Session() as sess:
spec = hub.create_module_spec(another_stateful_module_fn)
stateful_module = hub.Module(spec, trainable=True)
times2 = stateful_module(tf.constant([2.0]))
times3 = stateful_module(tf.constant([3.0]))
step = tf.Variable(0, trainable=False, name="global_step")
# Training will adapt the hidden variable to be approximately 2:
train = tf_v1.train.GradientDescentOptimizer(0.05).minimize(
loss=tf_v1.losses.mean_squared_error(times2, [4.0]),
global_step=step)
sess.run(tf_v1.global_variables_initializer())
for _ in range(50):
sess.run(train)
self.assertAllClose(sess.run(times2), [4.0])
self.assertAllClose(sess.run(times3), [6.0])
stateful_module.export(export_path, sess)
with tf_v1.Session() as sess:
stateful_module = hub.Module(export_path)
times4 = stateful_module(tf.constant([4.0]))
times5 = stateful_module(tf.constant([5.0]))
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(times4), [8.0])
self.assertAllClose(sess.run(times5), [10.0])
def testMultipleApplicationsInDifferentScopes(self):
with tf.Graph().as_default():
export_path = os.path.join(self.get_temp_dir(), "module-applied-in-scope")
spec = hub.create_module_spec(another_stateful_module_fn)
stateful_module = hub.Module(spec, name="moduleA")
with tf.name_scope("foo"):
with tf_v1.variable_scope("bar"):
times2 = stateful_module(tf.constant([2.0]))
with tf.name_scope("baz"):
times3 = stateful_module(tf.constant([3.0]))
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(times2), [6.0])
self.assertAllClose(sess.run(times3), [9.0])
self.assertEqual(len(stateful_module.variable_map), 1)
self.assertEqual(
stateful_module.variable_map["iamtheoneandonly"].name,
"moduleA/iamtheoneandonly:0")
stateful_module.export(export_path, sess)
# Check minimal functionality of the exported module.
with tf.Graph().as_default():
stateful_module = hub.Module(export_path, name="moduleB")
times2 = stateful_module(tf.constant([2.0]))
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
self.assertAllClose(sess.run(times2), [6.0])
def multiple_signature_module_fn():
"""Stateful module with multiple signatures."""
weight = tf.Variable([3.0])
x_input = tf_v1.placeholder(dtype=tf.float32)
x_output = tf.multiply(x_input, weight)
hub.add_signature("mul", inputs=x_input, outputs=x_output)
y_input = tf_v1.placeholder(dtype=tf.float32)
y_output = tf.divide(y_input, weight)
hub.add_signature("div", inputs=y_input, outputs=y_output)
class TFHubModuleWithMultipleSignatures(tf.test.TestCase):
def testGetSignatures(self):
spec = hub.create_module_spec(multiple_signature_module_fn)
self.assertEqual(sorted(spec.get_signature_names()), ["div", "mul"])
def testModuleWithMultipleSignatures(self):
with tf.Graph().as_default():
spec = hub.create_module_spec(multiple_signature_module_fn)
module_a = hub.Module(spec, name="moduleA")
in_tensor = tf_v1.placeholder(dtype=tf.float32)
out_tensor_a = module_a(in_tensor, signature="mul")
out_tensor_b = module_a(out_tensor_a, signature="div")
with tf_v1.Session() as sess:
sess.run(tf_v1.global_variables_initializer())
in_values = [6, 3, 1]
self.assertAllClose(
sess.run(out_tensor_b, feed_dict={in_tensor: in_values}), in_values)
def cond_module_fn():
"""Computes relu(x) with a conditional."""
x = tf_v1.placeholder(dtype=tf.float32, name="x", shape=[])
result = tf.cond(0 < x, lambda: tf.identity(x), lambda: tf.constant(0.0))
hub.add_signature(inputs=x, outputs=result)
def nested_cond_module_fn():
"""Computes relu(x) with nested conditionals."""
x = tf_v1.placeholder(dtype=tf.float32, name="x", shape=[])
# pylint: disable=g-long-lambda
result = tf.cond(
0 < x,
lambda: tf.cond(3 < x,
lambda: tf.identity(x),
lambda: tf.multiply(x, 1.0)),
lambda: tf.cond(x < -3,
lambda: tf.constant(0.0),
lambda: tf.multiply(0.0, 1.0)))
# pylint: enable=g-long-lambda
hub.add_signature(inputs=x, outputs=result)
def while_module_fn():
"""Compute x^n with while_loop."""
x = tf_v1.placeholder(dtype=tf.float32, name="x", shape=[])
n = tf_v1.placeholder(dtype=tf.int32, name="n")
_, pow_x = tf.while_loop(
lambda i, ix: i < n, lambda i, ix: [tf.add(i, 1), ix * x],
[tf.constant(0), tf.constant(1.0)])
hub.add_signature(inputs={"x": x, "n": n}, outputs=pow_x)
def nested_control_flow_module_fn():
"""Compute the sum of elements greater than 'a' with nested control flow."""
elems = tf_v1.placeholder(
dtype=tf.float32, name="elems", shape=[None])
a = tf_v1.placeholder(dtype=tf.float32, name="a")
def sum_above_a(acc, x):
return acc + tf.cond(x > a, lambda: x, lambda: 0.0)
hub.add_signature(
inputs={"elems": elems, "a": a},
outputs=tf.foldl(sum_above_a, elems, initializer=tf.constant(0.0)))
class TFHubModulesWithControlFlow(tf.test.TestCase):
def testCondModule(self):
self._testReluModule(cond_module_fn)
def testModuleWithNestedConds(self):
self._testReluModule(nested_cond_module_fn)
def _testReluModule(self, module_fn):
spec = hub.create_module_spec(module_fn)
with tf.Graph().as_default():
with tf_v1.Session() as sess:
x = tf_v1.placeholder(dtype=tf.float32, name="x")
relu_module = hub.Module(spec)
y = relu_module(x)
self.assertAllClose(sess.run(y, {x: 9.1}), 9.1)
self.assertAllClose(sess.run(y, {x: -2.4}), 0.0)
grad = tf.gradients([y], [x])
self.assertAllClose(sess.run(grad, {x: 2}), [1.0])
self.assertAllClose(sess.run(grad, {x: -2}), [0.0])
def testWhileModule(self):
spec = hub.create_module_spec(while_module_fn)
with tf.Graph().as_default():
with tf_v1.Session() as sess:
x = tf_v1.placeholder(tf.float32)
n = tf_v1.placeholder(tf.int32)
pow_module = hub.Module(spec)
y = pow_module({"x": x, "n": n})
self.assertAllClose(sess.run(y, {x: 9.1, n: 1}), 9.1)
self.assertAllClose(sess.run(y, {x: 2.4, n: 2}), 5.76)
grad = tf.gradients([y], [x])
self.assertAllClose(sess.run(grad, {x: 2, n: 3}), [12.0])
def testUseModuleWithWhileLoopInsideCond(self):
spec = hub.create_module_spec(while_module_fn)
with tf.Graph().as_default():
m = hub.Module(spec)
cond = tf.cond(
tf.equal(tf.constant(0), tf.constant(0)),
lambda: m({"x": tf.constant(3.0), "n": tf.constant(2)}),
lambda: tf.constant(4.0))
with tf_v1.Session() as sess:
self.assertEqual(sess.run(cond), 9.0)
def testNestedControlFlowModule(self):
spec = hub.create_module_spec(nested_control_flow_module_fn)
with tf.Graph().as_default():
with tf_v1.Session() as sess:
elems = tf_v1.placeholder(tf.float32, shape=[None])
a = tf_v1.placeholder(tf.float32)
m = hub.Module(spec)
out = m({"elems": elems, "a": a})
self.assertAllClose(
sess.run(out, {
a: 1.1,
elems: [10, 0, 0.5, 1.2]
}), 11.2)
grad = tf.gradients([out], [elems])
self.assertAllClose(sess.run(grad, {a: 1, elems: [10, 0, 0.5, 1.2]}),
[[1.0, 0.0, 0.0, 1.0]])
def attached_messages_module_fn(tagged=0):
x = tf_v1.placeholder(tf.float32, shape=[None])
hub.add_signature(inputs={"x": x}, outputs={"y": 2*x})
# For brevity, this test borrows two well-known, stable message types
# from TensorFlow. They are not likely choices for actual uses.
hub.attach_message("numbers",
tf_v1.train.Int64List(value=[-3])) # Overwritten.
hub.attach_message("numbers", tf_v1.train.Int64List(value=[42, 69]))
hub.attach_message("letters", tf_v1.train.BytesList(value=[
tf.compat.as_bytes("abc"), tf.compat.as_bytes("xyz")]))
hub.attach_message("tagged", tf_v1.train.Int64List(value=[tagged]))
class TFHubModuleWithAttachedMessages(tf.test.TestCase):
def testModuleSpec(self):
"""This is the general test for ModuleSpec and native_module._ModuleSpec."""
spec = hub.create_module_spec(attached_messages_module_fn)
attached_letters = spec.get_attached_message("letters",
tf_v1.train.BytesList)
self.assertSequenceEqual(
attached_letters.value,
[tf.compat.as_bytes("abc"),
tf.compat.as_bytes("xyz")])
attached_numbers = spec.get_attached_message("numbers",
tf_v1.train.Int64List)
self.assertSequenceEqual(attached_numbers.value, [42, 69])
attached_train = spec.get_attached_message("tagged", tf_v1.train.Int64List)
self.assertSequenceEqual(attached_train.value, [0])
self.assertIsNone(spec.get_attached_message("bad", tf_v1.train.BytesList))
with self.assertRaises(KeyError):
spec.get_attached_message("bad", tf_v1.train.BytesList, required=True)
def testModule(self):
"""Tests forwarding from Module to ModuleSpec."""
spec = hub.create_module_spec(attached_messages_module_fn)
with tf.Graph().as_default():
module = hub.Module(spec)
attached = module.get_attached_message("numbers", tf_v1.train.Int64List)
self.assertSequenceEqual(attached.value, [42, 69])
def testGraphVersions(self):
"""Tests native_module._ModuleSpec for explicit tags arguments."""
tags_and_args = [(set(), {"tagged": 1}),
({"double", "the", "value"}, {"tagged": 2})]
spec = hub.create_module_spec(attached_messages_module_fn,
tags_and_args=tags_and_args)
for tags, args in tags_and_args:
attached_to_spec = spec.get_attached_message(
"tagged", tf_v1.train.Int64List, tags=tags)
self.assertSequenceEqual(attached_to_spec.value, [args["tagged"]])
with tf.Graph().as_default():
module = hub.Module(spec, tags=tags)
attached_to_module = module.get_attached_message(
"tagged", tf_v1.train.Int64List)
self.assertSequenceEqual(attached_to_module.value, [args["tagged"]])
def testSeparateCopies(self):
"""Mutating returned objects does not affect future returned values."""
spec = hub.create_module_spec(attached_messages_module_fn)
attached_numbers = spec.get_attached_message("numbers",
tf_v1.train.Int64List)
self.assertSequenceEqual(attached_numbers.value, [42, 69])
attached_numbers.Clear()
self.assertSequenceEqual(attached_numbers.value, [])
attached_numbers = spec.get_attached_message("numbers",
tf_v1.train.Int64List)
self.assertSequenceEqual(attached_numbers.value, [42, 69])
class TFHubOpsTest(tf.test.TestCase):
def testRegisterLinkedOpsError(self):
with self.assertRaisesRegexp(RuntimeError, "non-existent-op"):
native_module.register_ops_if_needed({"non-existent-op"})
class TFHubExportSpecTest(tf.test.TestCase):
def f(self, x, dim=10):
return tf_v1.layers.dense(x, dim)
def module_fn(self, dim=10):
x = tf_v1.placeholder(dtype=tf.float32, shape=[None, dim])
y = self.f(x, dim=dim)
hub.add_signature(inputs=x, outputs=y)
def createCheckpoint(self, scope=None):
checkpoint_path = os.path.join(self.get_temp_dir(), "model")
with tf.Graph().as_default():
x = tf_v1.get_variable(
"x", [32, 10], initializer=tf_v1.initializers.random_normal())
if scope:
with tf_v1.variable_scope(scope):
y = self.f(x)
else:
y = self.f(x)
tf_v1.layers.dense(y, 20)
saver = tf_v1.train.Saver()
init_op = tf_v1.initializers.global_variables()
with tf_v1.Session() as session:
session.run(init_op)
saver.save(session, checkpoint_path)
return checkpoint_path
def testExportModuleSpec(self):
checkpoint_path = self.createCheckpoint()
export_path = os.path.join(self.get_temp_dir(), "module1")
spec = hub.create_module_spec(self.module_fn)
spec.export(export_path,
checkpoint_path=checkpoint_path)
def testExportModuleSpec_withWrongShape(self):
checkpoint_path = self.createCheckpoint(scope="block")
export_path = os.path.join(self.get_temp_dir(), "module2")
spec = hub.create_module_spec(lambda: self.module_fn(dim=20))
with self.assertRaisesRegexp(ValueError, "doesn't match with shape of"):
spec.export(export_path,
checkpoint_path=checkpoint_path,
name_transform_fn=lambda x: "block/" + x)
def testExportModuleSpec_withWrongScope(self):
checkpoint_path = self.createCheckpoint("block2")
export_path = os.path.join(self.get_temp_dir(), "module3")
spec = hub.create_module_spec(self.module_fn)
with self.assertRaisesRegexp(ValueError, "bias is not found in"):
spec.export(export_path,
checkpoint_path=checkpoint_path,
name_transform_fn=lambda x: "block/" + x)
if __name__ == "__main__":
tf.test.main()
| 39.480226 | 80 | 0.666514 |
ace1837f78d5fee64a882e76035a4aa2300460f3 | 55,289 | py | Python | src/pyjion/dis.py | AlexWaygood/Pyjion | 974bd3cf434fad23fbfa1ea9acf43e3387a5c21f | [
"MIT"
] | null | null | null | src/pyjion/dis.py | AlexWaygood/Pyjion | 974bd3cf434fad23fbfa1ea9acf43e3387a5c21f | [
"MIT"
] | null | null | null | src/pyjion/dis.py | AlexWaygood/Pyjion | 974bd3cf434fad23fbfa1ea9acf43e3387a5c21f | [
"MIT"
] | null | null | null | from dis import get_instructions
from pyjion import dump_il, dump_native, get_offsets, symbols
from collections import namedtuple
from warnings import warn
import struct
__all__ = [
"dis",
"dis_native",
"print_il"
]
# Pre stack effect
Pop0 = 0
Pop1 = 1
PopI = 2
VarPop = 4
PopI4 = 8
PopI8 = 16
PopR4 = 32
PopR8 = 64
PopRef = 128
# Post stack effect
Push0 = 0
Push1 = 1
PushI = 2
VarPush = 4
PushI4 = 8
PushI8 = 16
PushR4 = 32
PushR8 = 64
PushRef = 128
# Size
InlineNone = 0
ShortInlineVar = 1
ShortInlineI = 2
ShortInlineR = 3
InlineI = 4
InlineI8 = 5
InlineR = 6
InlineR8 = 7
InlineMethod = 8
InlineSig = 9
InlineBrTarget = 10
InlineVar = 11
InlineType = 12
InlineField = 13
ShortInlineBrTarget = 14
InlineSwitch = 15
InlineString = 16
InlineTok = 17
# Type
IPrimitive = 1
IMacro = 2
IObjModel = 3
IInternal = 4
IPrefix = 5
NEXT = 1
BREAK = 2
CALL = 3
RETURN = 4
BRANCH = 5
COND_BRANCH = 6
THROW = 7
META = 8
MOOT = None
OPDEF = namedtuple("OPDEF", "cee_code name es_effect_pre es_effect_post size type n_bytes first_byte second_byte flow_arg")
# Copy + Paste these from opcode.def and wrap the CEE codes in quotes.
opcodes = [
OPDEF("CEE_NOP", "nop", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x00, NEXT),
OPDEF("CEE_BREAK", "break", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x01, BREAK),
OPDEF("CEE_LDARG_0", "ldarg.0", Pop0, Push1, InlineNone, IMacro, 1, 0xFF, 0x02, NEXT),
OPDEF("CEE_LDARG_1", "ldarg.1", Pop0, Push1, InlineNone, IMacro, 1, 0xFF, 0x03, NEXT),
OPDEF("CEE_LDARG_2", "ldarg.2", Pop0, Push1, InlineNone, IMacro, 1, 0xFF, 0x04, NEXT),
OPDEF("CEE_LDARG_3", "ldarg.3", Pop0, Push1, InlineNone, IMacro, 1, 0xFF, 0x05, NEXT),
OPDEF("CEE_LDLOC_0", "ldloc.0", Pop0, Push1, InlineNone, IMacro, 1, 0xFF, 0x06, NEXT),
OPDEF("CEE_LDLOC_1", "ldloc.1", Pop0, Push1, InlineNone, IMacro, 1, 0xFF, 0x07, NEXT),
OPDEF("CEE_LDLOC_2", "ldloc.2", Pop0, Push1, InlineNone, IMacro, 1, 0xFF, 0x08, NEXT),
OPDEF("CEE_LDLOC_3", "ldloc.3", Pop0, Push1, InlineNone, IMacro, 1, 0xFF, 0x09, NEXT),
OPDEF("CEE_STLOC_0", "stloc.0", Pop1, Push0, InlineNone, IMacro, 1, 0xFF, 0x0A, NEXT),
OPDEF("CEE_STLOC_1", "stloc.1", Pop1, Push0, InlineNone, IMacro, 1, 0xFF, 0x0B, NEXT),
OPDEF("CEE_STLOC_2", "stloc.2", Pop1, Push0, InlineNone, IMacro, 1, 0xFF, 0x0C, NEXT),
OPDEF("CEE_STLOC_3", "stloc.3", Pop1, Push0, InlineNone, IMacro, 1, 0xFF, 0x0D, NEXT),
OPDEF("CEE_LDARG_S", "ldarg.s", Pop0, Push1, ShortInlineVar, IMacro, 1, 0xFF, 0x0E, NEXT),
OPDEF("CEE_LDARGA_S", "ldarga.s", Pop0, PushI, ShortInlineVar, IMacro, 1, 0xFF, 0x0F, NEXT),
OPDEF("CEE_STARG_S", "starg.s", Pop1, Push0, ShortInlineVar, IMacro, 1, 0xFF, 0x10, NEXT),
OPDEF("CEE_LDLOC_S", "ldloc.s", Pop0, Push1, ShortInlineVar, IMacro, 1, 0xFF, 0x11, NEXT),
OPDEF("CEE_LDLOCA_S", "ldloca.s", Pop0, PushI, ShortInlineVar, IMacro, 1, 0xFF, 0x12, NEXT),
OPDEF("CEE_STLOC_S", "stloc.s", Pop1, Push0, ShortInlineVar, IMacro, 1, 0xFF, 0x13, NEXT),
OPDEF("CEE_LDNULL", "ldnull", Pop0, PushRef, InlineNone, IPrimitive, 1, 0xFF, 0x14, NEXT),
OPDEF("CEE_LDC_I4_M1", "ldc.i4.m1", Pop0, PushI, InlineNone, IMacro, 1, 0xFF, 0x15, NEXT),
OPDEF("CEE_LDC_I4_0", "ldc.i4.0", Pop0, PushI, InlineNone, IMacro, 1, 0xFF, 0x16, NEXT),
OPDEF("CEE_LDC_I4_1", "ldc.i4.1", Pop0, PushI, InlineNone, IMacro, 1, 0xFF, 0x17, NEXT),
OPDEF("CEE_LDC_I4_2", "ldc.i4.2", Pop0, PushI, InlineNone, IMacro, 1, 0xFF, 0x18, NEXT),
OPDEF("CEE_LDC_I4_3", "ldc.i4.3", Pop0, PushI, InlineNone, IMacro, 1, 0xFF, 0x19, NEXT),
OPDEF("CEE_LDC_I4_4", "ldc.i4.4", Pop0, PushI, InlineNone, IMacro, 1, 0xFF, 0x1A, NEXT),
OPDEF("CEE_LDC_I4_5", "ldc.i4.5", Pop0, PushI, InlineNone, IMacro, 1, 0xFF, 0x1B, NEXT),
OPDEF("CEE_LDC_I4_6", "ldc.i4.6", Pop0, PushI, InlineNone, IMacro, 1, 0xFF, 0x1C, NEXT),
OPDEF("CEE_LDC_I4_7", "ldc.i4.7", Pop0, PushI, InlineNone, IMacro, 1, 0xFF, 0x1D, NEXT),
OPDEF("CEE_LDC_I4_8", "ldc.i4.8", Pop0, PushI, InlineNone, IMacro, 1, 0xFF, 0x1E, NEXT),
OPDEF("CEE_LDC_I4_S", "ldc.i4.s", Pop0, PushI, ShortInlineI, IMacro, 1, 0xFF, 0x1F, NEXT),
OPDEF("CEE_LDC_I4", "ldc.i4", Pop0, PushI, InlineI, IPrimitive, 1, 0xFF, 0x20, NEXT),
OPDEF("CEE_LDC_I8", "ldc.i8", Pop0, PushI8, InlineI8, IPrimitive, 1, 0xFF, 0x21, NEXT),
OPDEF("CEE_LDC_R4", "ldc.r4", Pop0, PushR4, ShortInlineR, IPrimitive, 1, 0xFF, 0x22, NEXT),
OPDEF("CEE_LDC_R8", "ldc.r8", Pop0, PushR8, InlineR, IPrimitive, 1, 0xFF, 0x23, NEXT),
OPDEF("CEE_UNUSED49", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x24, NEXT),
OPDEF("CEE_DUP", "dup", Pop1, Push1+Push1, InlineNone, IPrimitive, 1, 0xFF, 0x25, NEXT),
OPDEF("CEE_POP", "pop", Pop1, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x26, NEXT),
OPDEF("CEE_JMP", "jmp", Pop0, Push0, InlineMethod, IPrimitive, 1, 0xFF, 0x27, CALL),
OPDEF("CEE_CALL", "call", VarPop, VarPush, InlineMethod, IPrimitive, 1, 0xFF, 0x28, CALL),
OPDEF("CEE_CALLI", "calli", VarPop, VarPush, InlineSig, IPrimitive, 1, 0xFF, 0x29, CALL),
OPDEF("CEE_RET", "ret", VarPop, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x2A, RETURN),
OPDEF("CEE_BR_S", "br.s", Pop0, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x2B, BRANCH),
OPDEF("CEE_BRFALSE_S", "brfalse.s", PopI, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x2C, COND_BRANCH),
OPDEF("CEE_BRTRUE_S", "brtrue.s", PopI, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x2D, COND_BRANCH),
OPDEF("CEE_BEQ_S", "beq.s", Pop1+Pop1, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x2E, COND_BRANCH),
OPDEF("CEE_BGE_S", "bge.s", Pop1+Pop1, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x2F, COND_BRANCH),
OPDEF("CEE_BGT_S", "bgt.s", Pop1+Pop1, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x30, COND_BRANCH),
OPDEF("CEE_BLE_S", "ble.s", Pop1+Pop1, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x31, COND_BRANCH),
OPDEF("CEE_BLT_S", "blt.s", Pop1+Pop1, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x32, COND_BRANCH),
OPDEF("CEE_BNE_UN_S", "bne.un.s", Pop1+Pop1, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x33, COND_BRANCH),
OPDEF("CEE_BGE_UN_S", "bge.un.s", Pop1+Pop1, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x34, COND_BRANCH),
OPDEF("CEE_BGT_UN_S", "bgt.un.s", Pop1+Pop1, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x35, COND_BRANCH),
OPDEF("CEE_BLE_UN_S", "ble.un.s", Pop1+Pop1, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x36, COND_BRANCH),
OPDEF("CEE_BLT_UN_S", "blt.un.s", Pop1+Pop1, Push0, ShortInlineBrTarget,IMacro, 1, 0xFF, 0x37, COND_BRANCH),
OPDEF("CEE_BR", "br", Pop0, Push0, InlineBrTarget, IPrimitive, 1, 0xFF, 0x38, BRANCH),
OPDEF("CEE_BRFALSE", "brfalse", PopI, Push0, InlineBrTarget, IPrimitive, 1, 0xFF, 0x39, COND_BRANCH),
OPDEF("CEE_BRTRUE", "brtrue", PopI, Push0, InlineBrTarget, IPrimitive, 1, 0xFF, 0x3A, COND_BRANCH),
OPDEF("CEE_BEQ", "beq", Pop1+Pop1, Push0, InlineBrTarget, IMacro, 1, 0xFF, 0x3B, COND_BRANCH),
OPDEF("CEE_BGE", "bge", Pop1+Pop1, Push0, InlineBrTarget, IMacro, 1, 0xFF, 0x3C, COND_BRANCH),
OPDEF("CEE_BGT", "bgt", Pop1+Pop1, Push0, InlineBrTarget, IMacro, 1, 0xFF, 0x3D, COND_BRANCH),
OPDEF("CEE_BLE", "ble", Pop1+Pop1, Push0, InlineBrTarget, IMacro, 1, 0xFF, 0x3E, COND_BRANCH),
OPDEF("CEE_BLT", "blt", Pop1+Pop1, Push0, InlineBrTarget, IMacro, 1, 0xFF, 0x3F, COND_BRANCH),
OPDEF("CEE_BNE_UN", "bne.un", Pop1+Pop1, Push0, InlineBrTarget, IMacro, 1, 0xFF, 0x40, COND_BRANCH),
OPDEF("CEE_BGE_UN", "bge.un", Pop1+Pop1, Push0, InlineBrTarget, IMacro, 1, 0xFF, 0x41, COND_BRANCH),
OPDEF("CEE_BGT_UN", "bgt.un", Pop1+Pop1, Push0, InlineBrTarget, IMacro, 1, 0xFF, 0x42, COND_BRANCH),
OPDEF("CEE_BLE_UN", "ble.un", Pop1+Pop1, Push0, InlineBrTarget, IMacro, 1, 0xFF, 0x43, COND_BRANCH),
OPDEF("CEE_BLT_UN", "blt.un", Pop1+Pop1, Push0, InlineBrTarget, IMacro, 1, 0xFF, 0x44, COND_BRANCH),
OPDEF("CEE_SWITCH", "switch", PopI, Push0, InlineSwitch, IPrimitive, 1, 0xFF, 0x45, COND_BRANCH),
OPDEF("CEE_LDIND_I1", "ldind.i1", PopI, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x46, NEXT),
OPDEF("CEE_LDIND_U1", "ldind.u1", PopI, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x47, NEXT),
OPDEF("CEE_LDIND_I2", "ldind.i2", PopI, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x48, NEXT),
OPDEF("CEE_LDIND_U2", "ldind.u2", PopI, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x49, NEXT),
OPDEF("CEE_LDIND_I4", "ldind.i4", PopI, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x4A, NEXT),
OPDEF("CEE_LDIND_U4", "ldind.u4", PopI, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x4B, NEXT),
OPDEF("CEE_LDIND_I8", "ldind.i8", PopI, PushI8, InlineNone, IPrimitive, 1, 0xFF, 0x4C, NEXT),
OPDEF("CEE_LDIND_I", "ldind.i", PopI, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x4D, NEXT),
OPDEF("CEE_LDIND_R4", "ldind.r4", PopI, PushR4, InlineNone, IPrimitive, 1, 0xFF, 0x4E, NEXT),
OPDEF("CEE_LDIND_R8", "ldind.r8", PopI, PushR8, InlineNone, IPrimitive, 1, 0xFF, 0x4F, NEXT),
OPDEF("CEE_LDIND_REF", "ldind.ref", PopI, PushRef, InlineNone, IPrimitive, 1, 0xFF, 0x50, NEXT),
OPDEF("CEE_STIND_REF", "stind.ref", PopI+PopI, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x51, NEXT),
OPDEF("CEE_STIND_I1", "stind.i1", PopI+PopI, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x52, NEXT),
OPDEF("CEE_STIND_I2", "stind.i2", PopI+PopI, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x53, NEXT),
OPDEF("CEE_STIND_I4", "stind.i4", PopI+PopI, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x54, NEXT),
OPDEF("CEE_STIND_I8", "stind.i8", PopI+PopI8, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x55, NEXT),
OPDEF("CEE_STIND_R4", "stind.r4", PopI+PopR4, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x56, NEXT),
OPDEF("CEE_STIND_R8", "stind.r8", PopI+PopR8, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x57, NEXT),
OPDEF("CEE_ADD", "add", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x58, NEXT),
OPDEF("CEE_SUB", "sub", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x59, NEXT),
OPDEF("CEE_MUL", "mul", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x5A, NEXT),
OPDEF("CEE_DIV", "div", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x5B, NEXT),
OPDEF("CEE_DIV_UN", "div.un", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x5C, NEXT),
OPDEF("CEE_REM", "rem", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x5D, NEXT),
OPDEF("CEE_REM_UN", "rem.un", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x5E, NEXT),
OPDEF("CEE_AND", "and", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x5F, NEXT),
OPDEF("CEE_OR", "or", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x60, NEXT),
OPDEF("CEE_XOR", "xor", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x61, NEXT),
OPDEF("CEE_SHL", "shl", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x62, NEXT),
OPDEF("CEE_SHR", "shr", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x63, NEXT),
OPDEF("CEE_SHR_UN", "shr.un", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x64, NEXT),
OPDEF("CEE_NEG", "neg", Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x65, NEXT),
OPDEF("CEE_NOT", "not", Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0x66, NEXT),
OPDEF("CEE_CONV_I1", "conv.i1", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x67, NEXT),
OPDEF("CEE_CONV_I2", "conv.i2", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x68, NEXT),
OPDEF("CEE_CONV_I4", "conv.i4", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x69, NEXT),
OPDEF("CEE_CONV_I8", "conv.i8", Pop1, PushI8, InlineNone, IPrimitive, 1, 0xFF, 0x6A, NEXT),
OPDEF("CEE_CONV_R4", "conv.r4", Pop1, PushR4, InlineNone, IPrimitive, 1, 0xFF, 0x6B, NEXT),
OPDEF("CEE_CONV_R8", "conv.r8", Pop1, PushR8, InlineNone, IPrimitive, 1, 0xFF, 0x6C, NEXT),
OPDEF("CEE_CONV_U4", "conv.u4", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x6D, NEXT),
OPDEF("CEE_CONV_U8", "conv.u8", Pop1, PushI8, InlineNone, IPrimitive, 1, 0xFF, 0x6E, NEXT),
OPDEF("CEE_CALLVIRT", "callvirt", VarPop, VarPush, InlineMethod, IObjModel, 1, 0xFF, 0x6F, CALL),
OPDEF("CEE_CPOBJ", "cpobj", PopI+PopI, Push0, InlineType, IObjModel, 1, 0xFF, 0x70, NEXT),
OPDEF("CEE_LDOBJ", "ldobj", PopI, Push1, InlineType, IObjModel, 1, 0xFF, 0x71, NEXT),
OPDEF("CEE_LDSTR", "ldstr", Pop0, PushRef, InlineString, IObjModel, 1, 0xFF, 0x72, NEXT),
OPDEF("CEE_NEWOBJ", "newobj", VarPop, PushRef, InlineMethod, IObjModel, 1, 0xFF, 0x73, CALL),
OPDEF("CEE_CASTCLASS", "castclass", PopRef, PushRef, InlineType, IObjModel, 1, 0xFF, 0x74, NEXT),
OPDEF("CEE_ISINST", "isinst", PopRef, PushI, InlineType, IObjModel, 1, 0xFF, 0x75, NEXT),
OPDEF("CEE_CONV_R_UN", "conv.r.un", Pop1, PushR8, InlineNone, IPrimitive, 1, 0xFF, 0x76, NEXT),
OPDEF("CEE_UNUSED58", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x77, NEXT),
OPDEF("CEE_UNUSED1", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0x78, NEXT),
OPDEF("CEE_UNBOX", "unbox", PopRef, PushI, InlineType, IPrimitive, 1, 0xFF, 0x79, NEXT),
OPDEF("CEE_THROW", "throw", PopRef, Push0, InlineNone, IObjModel, 1, 0xFF, 0x7A, THROW),
OPDEF("CEE_LDFLD", "ldfld", PopRef, Push1, InlineField, IObjModel, 1, 0xFF, 0x7B, NEXT),
OPDEF("CEE_LDFLDA", "ldflda", PopRef, PushI, InlineField, IObjModel, 1, 0xFF, 0x7C, NEXT),
OPDEF("CEE_STFLD", "stfld", PopRef+Pop1, Push0, InlineField, IObjModel, 1, 0xFF, 0x7D, NEXT),
OPDEF("CEE_LDSFLD", "ldsfld", Pop0, Push1, InlineField, IObjModel, 1, 0xFF, 0x7E, NEXT),
OPDEF("CEE_LDSFLDA", "ldsflda", Pop0, PushI, InlineField, IObjModel, 1, 0xFF, 0x7F, NEXT),
OPDEF("CEE_STSFLD", "stsfld", Pop1, Push0, InlineField, IObjModel, 1, 0xFF, 0x80, NEXT),
OPDEF("CEE_STOBJ", "stobj", PopI+Pop1, Push0, InlineType, IPrimitive, 1, 0xFF, 0x81, NEXT),
OPDEF("CEE_CONV_OVF_I1_UN", "conv.ovf.i1.un", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x82, NEXT),
OPDEF("CEE_CONV_OVF_I2_UN", "conv.ovf.i2.un", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x83, NEXT),
OPDEF("CEE_CONV_OVF_I4_UN", "conv.ovf.i4.un", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x84, NEXT),
OPDEF("CEE_CONV_OVF_I8_UN", "conv.ovf.i8.un", Pop1, PushI8, InlineNone, IPrimitive, 1, 0xFF, 0x85, NEXT),
OPDEF("CEE_CONV_OVF_U1_UN", "conv.ovf.u1.un", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x86, NEXT),
OPDEF("CEE_CONV_OVF_U2_UN", "conv.ovf.u2.un", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x87, NEXT),
OPDEF("CEE_CONV_OVF_U4_UN", "conv.ovf.u4.un", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x88, NEXT),
OPDEF("CEE_CONV_OVF_U8_UN", "conv.ovf.u8.un", Pop1, PushI8, InlineNone, IPrimitive, 1, 0xFF, 0x89, NEXT),
OPDEF("CEE_CONV_OVF_I_UN", "conv.ovf.i.un", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x8A, NEXT),
OPDEF("CEE_CONV_OVF_U_UN", "conv.ovf.u.un", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0x8B, NEXT),
OPDEF("CEE_BOX", "box", Pop1, PushRef, InlineType, IPrimitive, 1, 0xFF, 0x8C, NEXT),
OPDEF("CEE_NEWARR", "newarr", PopI, PushRef, InlineType, IObjModel, 1, 0xFF, 0x8D, NEXT),
OPDEF("CEE_LDLEN", "ldlen", PopRef, PushI, InlineNone, IObjModel, 1, 0xFF, 0x8E, NEXT),
OPDEF("CEE_LDELEMA", "ldelema", PopRef+PopI, PushI, InlineType, IObjModel, 1, 0xFF, 0x8F, NEXT),
OPDEF("CEE_LDELEM_I1", "ldelem.i1", PopRef+PopI, PushI, InlineNone, IObjModel, 1, 0xFF, 0x90, NEXT),
OPDEF("CEE_LDELEM_U1", "ldelem.u1", PopRef+PopI, PushI, InlineNone, IObjModel, 1, 0xFF, 0x91, NEXT),
OPDEF("CEE_LDELEM_I2", "ldelem.i2", PopRef+PopI, PushI, InlineNone, IObjModel, 1, 0xFF, 0x92, NEXT),
OPDEF("CEE_LDELEM_U2", "ldelem.u2", PopRef+PopI, PushI, InlineNone, IObjModel, 1, 0xFF, 0x93, NEXT),
OPDEF("CEE_LDELEM_I4", "ldelem.i4", PopRef+PopI, PushI, InlineNone, IObjModel, 1, 0xFF, 0x94, NEXT),
OPDEF("CEE_LDELEM_U4", "ldelem.u4", PopRef+PopI, PushI, InlineNone, IObjModel, 1, 0xFF, 0x95, NEXT),
OPDEF("CEE_LDELEM_I8", "ldelem.i8", PopRef+PopI, PushI8, InlineNone, IObjModel, 1, 0xFF, 0x96, NEXT),
OPDEF("CEE_LDELEM_I", "ldelem.i", PopRef+PopI, PushI, InlineNone, IObjModel, 1, 0xFF, 0x97, NEXT),
OPDEF("CEE_LDELEM_R4", "ldelem.r4", PopRef+PopI, PushR4, InlineNone, IObjModel, 1, 0xFF, 0x98, NEXT),
OPDEF("CEE_LDELEM_R8", "ldelem.r8", PopRef+PopI, PushR8, InlineNone, IObjModel, 1, 0xFF, 0x99, NEXT),
OPDEF("CEE_LDELEM_REF", "ldelem.ref", PopRef+PopI, PushRef, InlineNone, IObjModel, 1, 0xFF, 0x9A, NEXT),
OPDEF("CEE_STELEM_I", "stelem.i", PopRef+PopI+PopI, Push0, InlineNone, IObjModel, 1, 0xFF, 0x9B, NEXT),
OPDEF("CEE_STELEM_I1", "stelem.i1", PopRef+PopI+PopI, Push0, InlineNone, IObjModel, 1, 0xFF, 0x9C, NEXT),
OPDEF("CEE_STELEM_I2", "stelem.i2", PopRef+PopI+PopI, Push0, InlineNone, IObjModel, 1, 0xFF, 0x9D, NEXT),
OPDEF("CEE_STELEM_I4", "stelem.i4", PopRef+PopI+PopI, Push0, InlineNone, IObjModel, 1, 0xFF, 0x9E, NEXT),
OPDEF("CEE_STELEM_I8", "stelem.i8", PopRef+PopI+PopI8, Push0, InlineNone, IObjModel, 1, 0xFF, 0x9F, NEXT),
OPDEF("CEE_STELEM_R4", "stelem.r4", PopRef+PopI+PopR4, Push0, InlineNone, IObjModel, 1, 0xFF, 0xA0, NEXT),
OPDEF("CEE_STELEM_R8", "stelem.r8", PopRef+PopI+PopR8, Push0, InlineNone, IObjModel, 1, 0xFF, 0xA1, NEXT),
OPDEF("CEE_STELEM_REF", "stelem.ref", PopRef+PopI+PopRef, Push0, InlineNone, IObjModel, 1, 0xFF, 0xA2, NEXT),
OPDEF("CEE_UNUSED2", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xA3, NEXT),
OPDEF("CEE_UNUSED3", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xA4, NEXT),
OPDEF("CEE_UNUSED4", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xA5, NEXT),
OPDEF("CEE_UNUSED5", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xA6, NEXT),
OPDEF("CEE_UNUSED6", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xA7, NEXT),
OPDEF("CEE_UNUSED7", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xA8, NEXT),
OPDEF("CEE_UNUSED8", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xA9, NEXT),
OPDEF("CEE_UNUSED9", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xAA, NEXT),
OPDEF("CEE_UNUSED10", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xAB, NEXT),
OPDEF("CEE_UNUSED11", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xAC, NEXT),
OPDEF("CEE_UNUSED12", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xAD, NEXT),
OPDEF("CEE_UNUSED13", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xAE, NEXT),
OPDEF("CEE_UNUSED14", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xAF, NEXT),
OPDEF("CEE_UNUSED15", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xB0, NEXT),
OPDEF("CEE_UNUSED16", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xB1, NEXT),
OPDEF("CEE_UNUSED17", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xB2, NEXT),
OPDEF("CEE_CONV_OVF_I1", "conv.ovf.i1", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xB3, NEXT),
OPDEF("CEE_CONV_OVF_U1", "conv.ovf.u1", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xB4, NEXT),
OPDEF("CEE_CONV_OVF_I2", "conv.ovf.i2", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xB5, NEXT),
OPDEF("CEE_CONV_OVF_U2", "conv.ovf.u2", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xB6, NEXT),
OPDEF("CEE_CONV_OVF_I4", "conv.ovf.i4", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xB7, NEXT),
OPDEF("CEE_CONV_OVF_U4", "conv.ovf.u4", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xB8, NEXT),
OPDEF("CEE_CONV_OVF_I8", "conv.ovf.i8", Pop1, PushI8, InlineNone, IPrimitive, 1, 0xFF, 0xB9, NEXT),
OPDEF("CEE_CONV_OVF_U8", "conv.ovf.u8", Pop1, PushI8, InlineNone, IPrimitive, 1, 0xFF, 0xBA, NEXT),
OPDEF("CEE_UNUSED50", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xBB, NEXT),
OPDEF("CEE_UNUSED18", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xBC, NEXT),
OPDEF("CEE_UNUSED19", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xBD, NEXT),
OPDEF("CEE_UNUSED20", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xBE, NEXT),
OPDEF("CEE_UNUSED21", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xBF, NEXT),
OPDEF("CEE_UNUSED22", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xC0, NEXT),
OPDEF("CEE_UNUSED23", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xC1, NEXT),
OPDEF("CEE_REFANYVAL", "refanyval", Pop1, PushI, InlineType, IPrimitive, 1, 0xFF, 0xC2, NEXT),
OPDEF("CEE_CKFINITE", "ckfinite", Pop1, PushR8, InlineNone, IPrimitive, 1, 0xFF, 0xC3, NEXT),
OPDEF("CEE_UNUSED24", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xC4, NEXT),
OPDEF("CEE_UNUSED25", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xC5, NEXT),
OPDEF("CEE_MKREFANY", "mkrefany", PopI, Push1, InlineType, IPrimitive, 1, 0xFF, 0xC6, NEXT),
OPDEF("CEE_UNUSED59", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xC7, NEXT),
OPDEF("CEE_UNUSED60", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xC8, NEXT),
OPDEF("CEE_UNUSED61", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xC9, NEXT),
OPDEF("CEE_UNUSED62", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xCA, NEXT),
OPDEF("CEE_UNUSED63", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xCB, NEXT),
OPDEF("CEE_UNUSED64", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xCC, NEXT),
OPDEF("CEE_UNUSED65", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xCD, NEXT),
OPDEF("CEE_UNUSED66", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xCE, NEXT),
OPDEF("CEE_UNUSED67", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xCF, NEXT),
OPDEF("CEE_LDTOKEN", "ldtoken", Pop0, PushI, InlineTok, IPrimitive, 1, 0xFF, 0xD0, NEXT),
OPDEF("CEE_CONV_U2", "conv.u2", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xD1, NEXT),
OPDEF("CEE_CONV_U1", "conv.u1", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xD2, NEXT),
OPDEF("CEE_CONV_I", "conv.i", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xD3, NEXT),
OPDEF("CEE_CONV_OVF_I", "conv.ovf.i", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xD4, NEXT),
OPDEF("CEE_CONV_OVF_U", "conv.ovf.u", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xD5, NEXT),
OPDEF("CEE_ADD_OVF", "add.ovf", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0xD6, NEXT),
OPDEF("CEE_ADD_OVF_UN", "add.ovf.un", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0xD7, NEXT),
OPDEF("CEE_MUL_OVF", "mul.ovf", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0xD8, NEXT),
OPDEF("CEE_MUL_OVF_UN", "mul.ovf.un", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0xD9, NEXT),
OPDEF("CEE_SUB_OVF", "sub.ovf", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0xDA, NEXT),
OPDEF("CEE_SUB_OVF_UN", "sub.ovf.un", Pop1+Pop1, Push1, InlineNone, IPrimitive, 1, 0xFF, 0xDB, NEXT),
OPDEF("CEE_ENDFINALLY", "endfinally", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xDC, RETURN),
OPDEF("CEE_LEAVE", "leave", Pop0, Push0, InlineBrTarget, IPrimitive, 1, 0xFF, 0xDD, BRANCH),
OPDEF("CEE_LEAVE_S", "leave.s", Pop0, Push0, ShortInlineBrTarget,IPrimitive, 1, 0xFF, 0xDE, BRANCH),
OPDEF("CEE_STIND_I", "stind.i", PopI+PopI, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xDF, NEXT),
OPDEF("CEE_CONV_U", "conv.u", Pop1, PushI, InlineNone, IPrimitive, 1, 0xFF, 0xE0, NEXT),
OPDEF("CEE_UNUSED26", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xE1, NEXT),
OPDEF("CEE_UNUSED27", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xE2, NEXT),
OPDEF("CEE_UNUSED28", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xE3, NEXT),
OPDEF("CEE_UNUSED29", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xE4, NEXT),
OPDEF("CEE_UNUSED30", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xE5, NEXT),
OPDEF("CEE_UNUSED31", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xE6, NEXT),
OPDEF("CEE_UNUSED32", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xE7, NEXT),
OPDEF("CEE_UNUSED33", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xE8, NEXT),
OPDEF("CEE_UNUSED34", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xE9, NEXT),
OPDEF("CEE_UNUSED35", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xEA, NEXT),
OPDEF("CEE_UNUSED36", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xEB, NEXT),
OPDEF("CEE_UNUSED37", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xEC, NEXT),
OPDEF("CEE_UNUSED38", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xED, NEXT),
OPDEF("CEE_UNUSED39", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xEE, NEXT),
OPDEF("CEE_UNUSED40", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xEF, NEXT),
OPDEF("CEE_UNUSED41", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xF0, NEXT),
OPDEF("CEE_UNUSED42", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xF1, NEXT),
OPDEF("CEE_UNUSED43", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xF2, NEXT),
OPDEF("CEE_UNUSED44", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xF3, NEXT),
OPDEF("CEE_UNUSED45", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xF4, NEXT),
OPDEF("CEE_UNUSED46", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xF5, NEXT),
OPDEF("CEE_UNUSED47", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xF6, NEXT),
OPDEF("CEE_UNUSED48", "unused", Pop0, Push0, InlineNone, IPrimitive, 1, 0xFF, 0xF7, NEXT),
OPDEF("CEE_PREFIX7", "prefix7", Pop0, Push0, InlineNone, IInternal, 1, 0xFF, 0xF8, META),
OPDEF("CEE_PREFIX6", "prefix6", Pop0, Push0, InlineNone, IInternal, 1, 0xFF, 0xF9, META),
OPDEF("CEE_PREFIX5", "prefix5", Pop0, Push0, InlineNone, IInternal, 1, 0xFF, 0xFA, META),
OPDEF("CEE_PREFIX4", "prefix4", Pop0, Push0, InlineNone, IInternal, 1, 0xFF, 0xFB, META),
OPDEF("CEE_PREFIX3", "prefix3", Pop0, Push0, InlineNone, IInternal, 1, 0xFF, 0xFC, META),
OPDEF("CEE_PREFIX2", "prefix2", Pop0, Push0, InlineNone, IInternal, 1, 0xFF, 0xFD, META),
OPDEF("CEE_PREFIX1", "prefix1", Pop0, Push0, InlineNone, IInternal, 1, 0xFF, 0xFE, META),
OPDEF("CEE_PREFIXREF", "prefixref", Pop0, Push0, InlineNone, IInternal, 1, 0xFF, 0xFF, META),
OPDEF("CEE_ARGLIST", "arglist", Pop0, PushI, InlineNone, IPrimitive, 2, 0xFE, 0x00, NEXT),
OPDEF("CEE_CEQ", "ceq", Pop1+Pop1, PushI, InlineNone, IPrimitive, 2, 0xFE, 0x01, NEXT),
OPDEF("CEE_CGT", "cgt", Pop1+Pop1, PushI, InlineNone, IPrimitive, 2, 0xFE, 0x02, NEXT),
OPDEF("CEE_CGT_UN", "cgt.un", Pop1+Pop1, PushI, InlineNone, IPrimitive, 2, 0xFE, 0x03, NEXT),
OPDEF("CEE_CLT", "clt", Pop1+Pop1, PushI, InlineNone, IPrimitive, 2, 0xFE, 0x04, NEXT),
OPDEF("CEE_CLT_UN", "clt.un", Pop1+Pop1, PushI, InlineNone, IPrimitive, 2, 0xFE, 0x05, NEXT),
OPDEF("CEE_LDFTN", "ldftn", Pop0, PushI, InlineMethod, IPrimitive, 2, 0xFE, 0x06, NEXT),
OPDEF("CEE_LDVIRTFTN", "ldvirtftn", PopRef, PushI, InlineMethod, IPrimitive, 2, 0xFE, 0x07, NEXT),
OPDEF("CEE_UNUSED56", "unused", Pop0, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x08, NEXT),
OPDEF("CEE_LDARG", "ldarg", Pop0, Push1, InlineVar, IPrimitive, 2, 0xFE, 0x09, NEXT),
OPDEF("CEE_LDARGA", "ldarga", Pop0, PushI, InlineVar, IPrimitive, 2, 0xFE, 0x0A, NEXT),
OPDEF("CEE_STARG", "starg", Pop1, Push0, InlineVar, IPrimitive, 2, 0xFE, 0x0B, NEXT),
OPDEF("CEE_LDLOC", "ldloc", Pop0, Push1, InlineVar, IPrimitive, 2, 0xFE, 0x0C, NEXT),
OPDEF("CEE_LDLOCA", "ldloca", Pop0, PushI, InlineVar, IPrimitive, 2, 0xFE, 0x0D, NEXT),
OPDEF("CEE_STLOC", "stloc", Pop1, Push0, InlineVar, IPrimitive, 2, 0xFE, 0x0E, NEXT),
OPDEF("CEE_LOCALLOC", "localloc", PopI, PushI, InlineNone, IPrimitive, 2, 0xFE, 0x0F, NEXT),
OPDEF("CEE_UNUSED57", "unused", Pop0, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x10, NEXT),
OPDEF("CEE_ENDFILTER", "endfilter", PopI, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x11, RETURN),
OPDEF("CEE_UNALIGNED", "unaligned.", Pop0, Push0, ShortInlineI, IPrefix, 2, 0xFE, 0x12, META),
OPDEF("CEE_VOLATILE", "volatile.", Pop0, Push0, InlineNone, IPrefix, 2, 0xFE, 0x13, META),
OPDEF("CEE_TAILCALL", "tail.", Pop0, Push0, InlineNone, IPrefix, 2, 0xFE, 0x14, META),
OPDEF("CEE_INITOBJ", "initobj", PopI, Push0, InlineType, IObjModel, 2, 0xFE, 0x15, NEXT),
OPDEF("CEE_UNUSED68", "unused", Pop0, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x16, NEXT),
OPDEF("CEE_CPBLK", "cpblk", PopI+PopI+PopI, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x17, NEXT),
OPDEF("CEE_INITBLK", "initblk", PopI+PopI+PopI, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x18, NEXT),
OPDEF("CEE_UNUSED69", "unused", Pop0, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x19, NEXT),
OPDEF("CEE_RETHROW", "rethrow", Pop0, Push0, InlineNone, IObjModel, 2, 0xFE, 0x1A, THROW),
OPDEF("CEE_UNUSED51", "unused", Pop0, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x1B, NEXT),
OPDEF("CEE_SIZEOF", "sizeof", Pop0, PushI, InlineType, IPrimitive, 2, 0xFE, 0x1C, NEXT),
OPDEF("CEE_REFANYTYPE", "refanytype", Pop1, PushI, InlineNone, IPrimitive, 2, 0xFE, 0x1D, NEXT),
OPDEF("CEE_UNUSED52", "unused", Pop0, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x1E, NEXT),
OPDEF("CEE_UNUSED53", "unused", Pop0, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x1F, NEXT),
OPDEF("CEE_UNUSED54", "unused", Pop0, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x20, NEXT),
OPDEF("CEE_UNUSED55", "unused", Pop0, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x21, NEXT),
OPDEF("CEE_UNUSED70", "unused", Pop0, Push0, InlineNone, IPrimitive, 2, 0xFE, 0x22, NEXT),
]
opcode_map = {}
for opcode in opcodes:
if opcode.first_byte == 0xFF:
# single byte opcode
opcode_map[opcode.second_byte] = opcode
else:
opcode_map[opcode.first_byte + opcode.second_byte] = opcode
def print_il(il: bytearray, symbols, offsets=None, bytecodes=None, print_pc=True) -> None:
"""
Print the CIL sequence
:param il: A bytearray of ECMA 335 CIL
:param offsets: A dictionary of Python bytecode offsets
:param bytecodes: The dictionary of Python bytecode instructions
:param print_pc: Flag to include the PC offsets in the print
"""
i = iter(il)
try:
pc = 0
while True:
# See if this is the offset of a matching Python instruction
if offsets and bytecodes:
for py_offset, il_offset, native_offset, offset_type in offsets:
if il_offset == pc and offset_type == 'instruction':
try:
instruction = bytecodes[py_offset]
print(f'// {instruction.offset} {instruction.opname} - {instruction.arg} ({instruction.argval})', )
except KeyError:
warn("Invalid offset {0}".format(offsets))
first = next(i)
if first == 0 and pc == 0:
raise NotImplementedError(f"CorILMethod_FatFormat not yet supported")
op = opcode_map[first]
pc_label = f"IL_{pc:04x}: " if print_pc else ""
if op.size == InlineNone:
print(f"{pc_label}{op.name}")
pc += 1
continue
elif op.size == ShortInlineBrTarget:
target = int.from_bytes((next(i),), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {target}")
pc += 2
continue
elif op.size == ShortInlineVar:
target = int.from_bytes((next(i),), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {target}")
pc += 2
continue
elif op.size == ShortInlineI:
target = int.from_bytes((next(i),), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {target}")
pc += 2
continue
elif op.size == ShortInlineR:
target = int.from_bytes((next(i), next(i), next(i), next(i)), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {target}")
pc += 5
continue
elif op.size == InlineBrTarget:
target = int.from_bytes((next(i), next(i), next(i), next(i)), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {target}")
pc += 5
continue
elif op.size == InlineField:
field = int.from_bytes((next(i), next(i), next(i), next(i)), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {field}")
pc += 5
continue
elif op.size == InlineR:
[target] = struct.unpack('f', bytes((next(i), next(i), next(i), next(i))))
print(f"{pc_label}{op.name} {target}")
pc += 5
continue
elif op.size == InlineI:
target = int.from_bytes((next(i), next(i), next(i), next(i)), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {target}")
pc += 5
continue
elif op.size == InlineI8:
target = int.from_bytes((next(i), next(i), next(i), next(i), next(i), next(i), next(i), next(i)), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {target}")
pc += 9
continue
elif op.size == InlineMethod:
target = int.from_bytes((next(i), next(i), next(i), next(i)), byteorder='little', signed=True)
meth = symbols.get(target, target)
print(f"{pc_label}{op.name} {meth}")
pc += 5
continue
elif op.size == InlineSig:
target = int.from_bytes((next(i), next(i), next(i), next(i)), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {target}")
pc += 5
continue
elif op.size == InlineTok:
target = int.from_bytes((next(i), next(i), next(i), next(i)), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {target}")
pc += 5
continue
elif op.size == InlineString:
target = bytearray((next(i), next(i), next(i), next(i))).decode('utf-8')
print(f"{pc_label}{op.name} {target}")
pc += 5
continue
elif op.size == InlineVar:
target = int.from_bytes((next(i), next(i)), byteorder='little', signed=True)
print(f"{pc_label}{op.name} {target}")
pc += 3
continue
else:
raise NotImplementedError(f"Haven't implemented IL Opcode with size {op.size}")
except StopIteration:
pass
def dis(f, include_offsets=False, print_pc=True):
"""
Disassemble a code object into IL.
:param f: The compiled function or code object
:param include_offsets: Flag to print python bytecode offsets as comments
:param print_pc: Flag to print the memory address of each instruction
"""
il = dump_il(f)
if not il:
print("No IL for this function, it may not have compiled correctly.")
return
if include_offsets:
python_instructions = {i.offset: i for i in get_instructions(f)}
offsets = get_offsets(f)
print_il(il, offsets=offsets, bytecodes=python_instructions, print_pc=print_pc, symbols=symbols(f))
else:
print_il(il, print_pc=print_pc, symbols=symbols(f))
def dis_native(f, include_offsets=False, print_pc=True) -> None:
"""
Disassemble and print the JITed code object's native machine code
:param f: The compiled function or code object
:param include_offsets: Flag to print python bytecode offsets as comments
:param print_pc: Flag to print the memory address of each instruction
"""
try:
import distorm3
from rich.console import Console
from rich.syntax import Syntax
except ImportError:
raise ModuleNotFoundError("Install distorm3 and rich before disassembling native functions")
native = dump_native(f)
if not native:
print("No native code for this function, it may not have compiled correctly")
return
symbol_table = symbols(f)
if include_offsets:
python_instructions = {i.offset: i for i in get_instructions(f)}
jit_offsets = get_offsets(f)
else:
python_instructions = {}
jit_offsets = []
code, _, position = native
iterable = distorm3.DecodeGenerator(position, bytes(code), distorm3.Decode64Bits)
disassembled = [(offset, instruction) for (offset, _, instruction, _) in iterable]
console = Console()
offsets = [offset for (offset, _) in disassembled]
instructions = [instruction for (_, instruction) in disassembled]
syntax = Syntax("", lexer_name="nasm", theme="ansi_dark")
highlighted_lines = syntax.highlight("\n".join(instructions)).split("\n")
for (offset, line) in zip(offsets, highlighted_lines):
# See if this is the offset of a matching Python instruction
if include_offsets:
for py_offset, _, native_offset, offset_type in jit_offsets:
if native_offset > 0 and (position + native_offset) == offset and offset_type == "instruction":
try:
instruction = python_instructions[py_offset]
console.print(f'; {instruction.offset} {instruction.opname} - {instruction.arg} ({instruction.argval})', style="dim")
except KeyError:
warn("Invalid offset {0}".format(offsets))
if print_pc:
console.print("[grey]%.8x" % offset, style="dim", end=" ")
console.print(line, end="")
if include_offsets:
for py_offset, _, native_offset, offset_type in jit_offsets:
if native_offset > 0 and (position + native_offset) == offset and offset_type == "call":
try:
console.print(" [grey]; %s" % symbol_table[py_offset], style="dim", end="")
except KeyError:
warn("Invalid offset {0}".format(offsets))
console.print('') # force line-sep
| 96.490401 | 161 | 0.45481 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.