hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f41cabd57be3a375096fd256d70cfe5f66f6b2 | 2,615 | py | Python | pyroms_toolbox/pyroms_toolbox/__init__.py | ChuningWang/pyroms | 4f3773d39683ce78e76b30d0e41955f47d56edc2 | [
"BSD-3-Clause"
] | null | null | null | pyroms_toolbox/pyroms_toolbox/__init__.py | ChuningWang/pyroms | 4f3773d39683ce78e76b30d0e41955f47d56edc2 | [
"BSD-3-Clause"
] | null | null | null | pyroms_toolbox/pyroms_toolbox/__init__.py | ChuningWang/pyroms | 4f3773d39683ce78e76b30d0e41955f47d56edc2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
'''
PYROMS_TOOLBOX is a toolbox for working with ROMS
ocean models input/output files based on PYROMS
pyroms and pyroms_toolbox are based on the
python/numpy/matplotlib scientific python suite.
NetCDF I/O is based on the NetCDF4-python package.
'''
from .iview import iview
from .jview import jview
from .lonview import lonview
from .latview import latview
from .sview import sview
from .zview import zview
from .isoview import isoview
from .twoDview import twoDview
from .transectview import transectview
from .quiver import quiver
from . import seawater
from .N2 import N2
from .O2_saturation import O2_saturation
from . import shapiro_filter
from .rx0 import rx0
from .rx1 import rx1
from .rvalue import rvalue
from .get_coast_line import get_coast_line
from .get_coast_line_from_mask import get_coast_line_from_mask
from .get_ijcoast_line import get_ijcoast_line
from .plot_coast_line import plot_coast_line
from .plot_coast_line_from_mask import plot_coast_line_from_mask
from .plot_ijcoast_line import plot_ijcoast_line
from .lsq_phase_amplitude import lsq_phase_amplitude
from .remapping import remapping
from .remapping_bound import remapping_bound
from .remapping_bound_sig import remapping_bound_sig
from .remapping_tensor import remapping_tensor
from .nc_create_roms_file import nc_create_roms_file
from .nc_create_roms_bdry_file import nc_create_roms_bdry_file
from .average import average
from .plot_mask import plot_mask
from . import BGrid_GFDL
from .smooth_1D import smooth_1D
from . import BGrid_SODA
from .get_littoral import get_littoral
from .get_littoral2 import get_littoral2
from ._move_runoff import move_runoff
from ._move_river_t import move_river_t
from .TS_diagram import TS_diagram
from .date2jday import date2jday
from .jday2date import jday2date
from .iso2gregorian import iso2gregorian
from .gregorian2iso import gregorian2iso
from . import BGrid_POP
from .low_pass_filter import low_pass_filter
from .PCA import PCA, center, standardize
from .compute_eke import compute_eke
from .compute_moc import compute_moc
# from plot_Robinson_pyngl import plot_Robinson_pyngl
from .get_cell_area import get_cell_area
from .laplacian import laplacian
from .vorticity import vorticity
from .strain_norm import strain_norm
from .strain_norm_old import strain_norm_old
from .shift_SODA_data import shift_SODA_data
from . import Grid_HYCOM
from . import CGrid_GLORYS
from .mld_from_temp import mld_from_temp
from .mld_from_dens import mld_from_dens
from .ocean_in import ocean_in
__authors__ = ['Frederic Castruccio (frederic@marine.rutgers.edu)']
__version__ = '0.1.0'
| 33.525641 | 67 | 0.848948 |
from .iview import iview
from .jview import jview
from .lonview import lonview
from .latview import latview
from .sview import sview
from .zview import zview
from .isoview import isoview
from .twoDview import twoDview
from .transectview import transectview
from .quiver import quiver
from . import seawater
from .N2 import N2
from .O2_saturation import O2_saturation
from . import shapiro_filter
from .rx0 import rx0
from .rx1 import rx1
from .rvalue import rvalue
from .get_coast_line import get_coast_line
from .get_coast_line_from_mask import get_coast_line_from_mask
from .get_ijcoast_line import get_ijcoast_line
from .plot_coast_line import plot_coast_line
from .plot_coast_line_from_mask import plot_coast_line_from_mask
from .plot_ijcoast_line import plot_ijcoast_line
from .lsq_phase_amplitude import lsq_phase_amplitude
from .remapping import remapping
from .remapping_bound import remapping_bound
from .remapping_bound_sig import remapping_bound_sig
from .remapping_tensor import remapping_tensor
from .nc_create_roms_file import nc_create_roms_file
from .nc_create_roms_bdry_file import nc_create_roms_bdry_file
from .average import average
from .plot_mask import plot_mask
from . import BGrid_GFDL
from .smooth_1D import smooth_1D
from . import BGrid_SODA
from .get_littoral import get_littoral
from .get_littoral2 import get_littoral2
from ._move_runoff import move_runoff
from ._move_river_t import move_river_t
from .TS_diagram import TS_diagram
from .date2jday import date2jday
from .jday2date import jday2date
from .iso2gregorian import iso2gregorian
from .gregorian2iso import gregorian2iso
from . import BGrid_POP
from .low_pass_filter import low_pass_filter
from .PCA import PCA, center, standardize
from .compute_eke import compute_eke
from .compute_moc import compute_moc
from .get_cell_area import get_cell_area
from .laplacian import laplacian
from .vorticity import vorticity
from .strain_norm import strain_norm
from .strain_norm_old import strain_norm_old
from .shift_SODA_data import shift_SODA_data
from . import Grid_HYCOM
from . import CGrid_GLORYS
from .mld_from_temp import mld_from_temp
from .mld_from_dens import mld_from_dens
from .ocean_in import ocean_in
__authors__ = ['Frederic Castruccio (frederic@marine.rutgers.edu)']
__version__ = '0.1.0'
| true | true |
f7f41cc55d3a216279e6c67770f8086fd5003b53 | 335 | py | Python | server/migrations/0073_remove_machine_report_format.py | nathandarnell/sal | 464414a2666e39bdf5b4b0033a84d5129c93c053 | [
"Apache-2.0"
] | 215 | 2015-05-04T16:57:56.000Z | 2022-03-07T12:58:12.000Z | server/migrations/0073_remove_machine_report_format.py | nathandarnell/sal | 464414a2666e39bdf5b4b0033a84d5129c93c053 | [
"Apache-2.0"
] | 243 | 2015-07-04T18:10:56.000Z | 2022-02-27T18:52:40.000Z | server/migrations/0073_remove_machine_report_format.py | nathandarnell/sal | 464414a2666e39bdf5b4b0033a84d5129c93c053 | [
"Apache-2.0"
] | 90 | 2015-06-29T19:26:58.000Z | 2022-02-17T19:03:00.000Z | # Generated by Django 1.11 on 2018-04-30 14:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('server', '0072_auto_20180430_0920'),
]
operations = [
migrations.RemoveField(
model_name='machine',
name='report_format',
),
]
| 17.631579 | 46 | 0.6 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('server', '0072_auto_20180430_0920'),
]
operations = [
migrations.RemoveField(
model_name='machine',
name='report_format',
),
]
| true | true |
f7f41cdc68cfdb893cbae18dfb73372c8e60c551 | 3,645 | py | Python | HouseSpider/HouseSpider/settings.py | wangzihan424/HouseSpider | a3592d4fe4e8bc04a3972dabdbb1edeca3fee036 | [
"MIT"
] | null | null | null | HouseSpider/HouseSpider/settings.py | wangzihan424/HouseSpider | a3592d4fe4e8bc04a3972dabdbb1edeca3fee036 | [
"MIT"
] | null | null | null | HouseSpider/HouseSpider/settings.py | wangzihan424/HouseSpider | a3592d4fe4e8bc04a3972dabdbb1edeca3fee036 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for HouseSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'HouseSpider'
SPIDER_MODULES = ['HouseSpider.spiders']
NEWSPIDER_MODULE = 'HouseSpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'HouseSpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'HouseSpider.middlewares.HousespiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'HouseSpider.middlewares.MyCustomDownloaderMiddleware': 543,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
'HouseSpider.middlewares.RandomUserAgentMiddleware': 543,
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
# 'HouseSpider.pipelines.HousespiderPipeline': 300,
'HouseSpider.pipelines.MySQLAsynPipeline': 299,
'HouseSpider.pipelines.ExcelPipeline': 300,
'scrapy.pipelines.images.ImagesPipeline':1,
}
IMAGES_URLS_FIELD = "imgs"
IMAGES_STORE = "img"
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
MYSQL_USERNAME = "root"
MYSQL_PASSWORD = "123456"
MYSQL_PORT = 3306
MYSQL_CHARSET = "utf8"
MYSQL_DB = "housedb"
MYSQL_HOST = "localhost"
| 35.048077 | 109 | 0.78107 |
BOT_NAME = 'HouseSpider'
SPIDER_MODULES = ['HouseSpider.spiders']
NEWSPIDER_MODULE = 'HouseSpider.spiders'
ROBOTSTXT_OBEY = False
Y = 0.1
COOKIES_ENABLED = False
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
'HouseSpider.middlewares.RandomUserAgentMiddleware': 543,
}
ITEM_PIPELINES = {
'HouseSpider.pipelines.MySQLAsynPipeline': 299,
'HouseSpider.pipelines.ExcelPipeline': 300,
'scrapy.pipelines.images.ImagesPipeline':1,
}
IMAGES_URLS_FIELD = "imgs"
IMAGES_STORE = "img"
MYSQL_PASSWORD = "123456"
MYSQL_PORT = 3306
MYSQL_CHARSET = "utf8"
MYSQL_DB = "housedb"
MYSQL_HOST = "localhost"
| true | true |
f7f41cfdbb038190bb99272991c95bf9a226ed5d | 5,358 | py | Python | catkin_ws/src/mrta/src/DataGenerator.py | wyyfkim/MRTA | fab515569d3434cae01733c702fc0e1afc73b552 | [
"MIT"
] | 1 | 2020-03-10T04:43:48.000Z | 2020-03-10T04:43:48.000Z | catkin_ws/src/mrta/src/DataGenerator.py | wyyfkim/MRTA | fab515569d3434cae01733c702fc0e1afc73b552 | [
"MIT"
] | 1 | 2020-03-10T05:38:16.000Z | 2020-03-10T05:38:16.000Z | catkin_ws/src/mrta/src/DataGenerator.py | wyyfkim/MRTA | fab515569d3434cae01733c702fc0e1afc73b552 | [
"MIT"
] | 1 | 2019-04-10T02:49:23.000Z | 2019-04-10T02:49:23.000Z | import os, sys
import argparse
import pickle
from numpy import random
from Task import Task
from PrecedenceGraph import PrecedenceGraph, Node
from Robot import Robot
from Logger import Logger, LogLevel
class DataSet:
def __init__(self, p_graphs, robots, beta, bid_alpha, cost_alpha):
self.p_graphs = p_graphs
self.robots = robots
self.beta = beta
self.bid_alpha = bid_alpha
self.cost_alpha = cost_alpha
self.schedules = []
class DataGenerator:
def __init__(self, map_size_x, map_size_y, logger):
self._map_size = (map_size_x, map_size_y)
self._logger = logger
self.task_types = [1, 2]
def generate_tasks(self, num_of_tasks, task_locations=None):
if task_locations is not None:
if len(task_locations) != num_of_tasks:
self._logger.error("generate_tasks: The number of task locations is not same as the number of tasks.")
tasks = []
duration = random.randint(20, 40)
for i in range(num_of_tasks):
task_id = i + 1
est = random.randint(25, 400)
lft = est + random.randint(100, 1200)
task_type = random.choice(self.task_types, 1, p=[0.5, 0.5])[0]
if task_locations is not None:
pos_x = task_locations[i][0]
pos_y = task_locations[i][1]
else:
pos_x, pos_y = self.generate_locations(1)[0]
tasks.append(Task(est, lft, duration, task_id, pos_x, pos_y, task_type))
return tasks
def generate_locations(self, num_of_locations):
locations = []
for i in range(num_of_locations):
pos_x = random.randint(0, self._map_size[0])
pos_y = random.randint(0, self._map_size[1])
locations.append((pos_x, pos_y))
return locations
def generate_pgraph(self, tasks, max_num_of_edges):
p_graph = PrecedenceGraph(tasks)
min_num_of_edges = len(tasks) / 2
num_of_edges = min_num_of_edges
if max_num_of_edges > min_num_of_edges:
num_of_edges = random.randint(min_num_of_edges, max_num_of_edges)
i = 0
while i < num_of_edges:
from_task = random.choice(tasks)
to_task = random.choice(tasks)
if from_task.lft < to_task.lft:
if p_graph.are_connected(from_task, to_task):
p_graph.remove_edge(from_task, to_task)
else:
if p_graph.add_edge(from_task, to_task):
i += 1
p_graph.build_graph()
return p_graph
def generate_pgraphs(self, tasks, num_of_pgraphs, max_num_of_edges):
p_graphs = []
for i in range(num_of_pgraphs):
p_graph = self.generate_pgraph(tasks, max_num_of_edges)
p_graphs.append(p_graph)
return p_graphs
def generate_robots(self, num_of_robots, robot_speed):
locations = self.generate_locations(num_of_robots)
robots = []
task_types = [1,2]
for i in range(num_of_robots):
robot_id = i + 1
capability = set()
ran = random.uniform()
#first robot capable of doing all tasks
if i == 0 or ran > 0.66:
capability = set(task_types)
elif ran > 0.33:
capability.add(task_types[0])
else:
capability.add(task_types[1])
robot = Robot(robot_id, locations[i][0], locations[i][1], capability, robot_speed, self._logger)
robots.append(robot)
return robots
if __name__ == "__main__":
'''if len(sys.argv) < 2:
print("ERROR starting datageneration")
exit(1)'''
data_dir = "../data/"
##dsfile_name = 'dataset' + sys.argv[1] + '.pickle'
dsfile_name = '../data/dataset1.pickle'
parser = argparse.ArgumentParser(description="MRTA Data Generator")
parser.add_argument('--x',
help='X Dimention of Map',
dest='map_x',
type=int,
default=100,
action='store')
parser.add_argument('--y',
help='Y Dimention of Map',
dest='map_y',
type=int,
default=100,
action='store')
args = parser.parse_args()
logger = Logger(LogLevel.OFF[0])
map_x = args.map_x
map_y = args.map_y
num_of_pgraphs = 50
##robot_count_arr = [2, 4, 8]
##task_count_arr = [5, 10, 20, 30]
robot_count_arr = [1]
task_count_arr = [5]
dg = DataGenerator(map_x, map_y, logger)
robots = { }
for robot_count in robot_count_arr:
robots[robot_count] = dg.generate_robots(robot_count, 1)
p_graphs = { }
for task_count in task_count_arr:
p_graphs[task_count] = {}
tasks = dg.generate_tasks(task_count)
print(tasks)
max_possible_edges = (task_count * (task_count - 1))/2
max_num_of_edges = min(3 * task_count, max_possible_edges)
p_graphs[task_count] = dg.generate_pgraphs(tasks, num_of_pgraphs, max_num_of_edges)
ds = DataSet(p_graphs, robots, 0.25, 0.75, 0.75)
pickle.dump(robots, open('./robots.pickle', 'w'))
pickle.dump(p_graphs, open('./pgraphs.pickle', 'w'))
pickle.dump(ds, open(dsfile_name, 'w'))
| 30.443182 | 120 | 0.594625 | import os, sys
import argparse
import pickle
from numpy import random
from Task import Task
from PrecedenceGraph import PrecedenceGraph, Node
from Robot import Robot
from Logger import Logger, LogLevel
class DataSet:
def __init__(self, p_graphs, robots, beta, bid_alpha, cost_alpha):
self.p_graphs = p_graphs
self.robots = robots
self.beta = beta
self.bid_alpha = bid_alpha
self.cost_alpha = cost_alpha
self.schedules = []
class DataGenerator:
def __init__(self, map_size_x, map_size_y, logger):
self._map_size = (map_size_x, map_size_y)
self._logger = logger
self.task_types = [1, 2]
def generate_tasks(self, num_of_tasks, task_locations=None):
if task_locations is not None:
if len(task_locations) != num_of_tasks:
self._logger.error("generate_tasks: The number of task locations is not same as the number of tasks.")
tasks = []
duration = random.randint(20, 40)
for i in range(num_of_tasks):
task_id = i + 1
est = random.randint(25, 400)
lft = est + random.randint(100, 1200)
task_type = random.choice(self.task_types, 1, p=[0.5, 0.5])[0]
if task_locations is not None:
pos_x = task_locations[i][0]
pos_y = task_locations[i][1]
else:
pos_x, pos_y = self.generate_locations(1)[0]
tasks.append(Task(est, lft, duration, task_id, pos_x, pos_y, task_type))
return tasks
def generate_locations(self, num_of_locations):
locations = []
for i in range(num_of_locations):
pos_x = random.randint(0, self._map_size[0])
pos_y = random.randint(0, self._map_size[1])
locations.append((pos_x, pos_y))
return locations
def generate_pgraph(self, tasks, max_num_of_edges):
p_graph = PrecedenceGraph(tasks)
min_num_of_edges = len(tasks) / 2
num_of_edges = min_num_of_edges
if max_num_of_edges > min_num_of_edges:
num_of_edges = random.randint(min_num_of_edges, max_num_of_edges)
i = 0
while i < num_of_edges:
from_task = random.choice(tasks)
to_task = random.choice(tasks)
if from_task.lft < to_task.lft:
if p_graph.are_connected(from_task, to_task):
p_graph.remove_edge(from_task, to_task)
else:
if p_graph.add_edge(from_task, to_task):
i += 1
p_graph.build_graph()
return p_graph
def generate_pgraphs(self, tasks, num_of_pgraphs, max_num_of_edges):
p_graphs = []
for i in range(num_of_pgraphs):
p_graph = self.generate_pgraph(tasks, max_num_of_edges)
p_graphs.append(p_graph)
return p_graphs
def generate_robots(self, num_of_robots, robot_speed):
locations = self.generate_locations(num_of_robots)
robots = []
task_types = [1,2]
for i in range(num_of_robots):
robot_id = i + 1
capability = set()
ran = random.uniform()
if i == 0 or ran > 0.66:
capability = set(task_types)
elif ran > 0.33:
capability.add(task_types[0])
else:
capability.add(task_types[1])
robot = Robot(robot_id, locations[i][0], locations[i][1], capability, robot_speed, self._logger)
robots.append(robot)
return robots
if __name__ == "__main__":
data_dir = "../data/"
parser = argparse.ArgumentParser(description="MRTA Data Generator")
parser.add_argument('--x',
help='X Dimention of Map',
dest='map_x',
type=int,
default=100,
action='store')
parser.add_argument('--y',
help='Y Dimention of Map',
dest='map_y',
type=int,
default=100,
action='store')
args = parser.parse_args()
logger = Logger(LogLevel.OFF[0])
map_x = args.map_x
map_y = args.map_y
num_of_pgraphs = 50
dg = DataGenerator(map_x, map_y, logger)
robots = { }
for robot_count in robot_count_arr:
robots[robot_count] = dg.generate_robots(robot_count, 1)
p_graphs = { }
for task_count in task_count_arr:
p_graphs[task_count] = {}
tasks = dg.generate_tasks(task_count)
print(tasks)
max_possible_edges = (task_count * (task_count - 1))/2
max_num_of_edges = min(3 * task_count, max_possible_edges)
p_graphs[task_count] = dg.generate_pgraphs(tasks, num_of_pgraphs, max_num_of_edges)
ds = DataSet(p_graphs, robots, 0.25, 0.75, 0.75)
pickle.dump(robots, open('./robots.pickle', 'w'))
pickle.dump(p_graphs, open('./pgraphs.pickle', 'w'))
pickle.dump(ds, open(dsfile_name, 'w'))
| true | true |
f7f41d3bfaf7babd616b4330fe7dbdbe20916963 | 33,661 | py | Python | vnpy_optionmaster/ui/manager.py | noranhe/vnpy_optionmaster | 180c85f92004d1092bc45032dc31585539de9768 | [
"MIT"
] | null | null | null | vnpy_optionmaster/ui/manager.py | noranhe/vnpy_optionmaster | 180c85f92004d1092bc45032dc31585539de9768 | [
"MIT"
] | null | null | null | vnpy_optionmaster/ui/manager.py | noranhe/vnpy_optionmaster | 180c85f92004d1092bc45032dc31585539de9768 | [
"MIT"
] | null | null | null | from typing import Dict, List, Tuple, Optional
from copy import copy
from functools import partial
from scipy import interpolate
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtWidgets, QtCore, QtGui
from vnpy.trader.event import EVENT_TICK, EVENT_TIMER, EVENT_TRADE
from vnpy.trader.object import TickData, TradeData, LogData
from vnpy.trader.utility import save_json, load_json
from ..engine import OptionEngine, OptionAlgoEngine
from ..base import (
EVENT_OPTION_ALGO_PRICING,
EVENT_OPTION_ALGO_STATUS,
EVENT_OPTION_ALGO_LOG,
PortfolioData,
ChainData,
OptionData,
InstrumentData
)
from .monitor import (
MonitorCell, IndexCell, BidCell, AskCell, PosCell,
COLOR_WHITE, COLOR_BLACK
)
from ..algo import ElectronicEyeAlgo
class AlgoSpinBox(QtWidgets.QSpinBox):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.setMaximum(999999)
self.setMinimum(-999999)
self.setAlignment(QtCore.Qt.AlignCenter)
def get_value(self) -> int:
""""""
return self.value()
def set_value(self, value: int) -> None:
""""""
self.setValue(value)
def update_status(self, active: bool) -> None:
""""""
self.setEnabled(not active)
class AlgoPositiveSpinBox(AlgoSpinBox):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.setMinimum(0)
class AlgoDoubleSpinBox(QtWidgets.QDoubleSpinBox):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.setDecimals(1)
self.setMaximum(9999.9)
self.setMinimum(0)
self.setAlignment(QtCore.Qt.AlignCenter)
def get_value(self) -> float:
""""""
return self.value()
def set_value(self, value: float) -> None:
""""""
self.setValue(value)
def update_status(self, active: bool) -> None:
""""""
self.setEnabled(not active)
class AlgoDirectionCombo(QtWidgets.QComboBox):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.addItems([
"双向",
"做多",
"做空"
])
def get_value(self) -> Dict[str, bool]:
""""""
if self.currentText() == "双向":
value: dict = {
"long_allowed": True,
"short_allowed": True
}
elif self.currentText() == "做多":
value: dict = {
"long_allowed": True,
"short_allowed": False
}
else:
value: dict = {
"long_allowed": False,
"short_allowed": True
}
return value
def set_value(self, value: dict) -> None:
""""""
if value["long_allowed"] and value["short_allowed"]:
self.setCurrentIndex(0)
elif value["long_allowed"]:
self.setCurrentIndex(1)
else:
self.setCurrentIndex(2)
def update_status(self, active: bool) -> None:
""""""
self.setEnabled(not active)
class AlgoPricingButton(QtWidgets.QPushButton):
""""""
def __init__(self, vt_symbol: str, manager: "ElectronicEyeManager") -> None:
""""""
super().__init__()
self.vt_symbol: str = vt_symbol
self.manager: ElectronicEyeManager = manager
self.active: bool = False
self.setText("N")
self.clicked.connect(self.on_clicked)
def on_clicked(self) -> None:
""""""
if self.active:
self.manager.stop_algo_pricing(self.vt_symbol)
else:
self.manager.start_algo_pricing(self.vt_symbol)
def update_status(self, active: bool) -> None:
""""""
self.active = active
if active:
self.setText("Y")
else:
self.setText("N")
class AlgoTradingButton(QtWidgets.QPushButton):
""""""
def __init__(self, vt_symbol: str, manager: "ElectronicEyeManager") -> None:
""""""
super().__init__()
self.vt_symbol: str = vt_symbol
self.manager: ElectronicEyeManager = manager
self.active: bool = False
self.setText("N")
self.clicked.connect(self.on_clicked)
def on_clicked(self) -> None:
""""""
if self.active:
self.manager.stop_algo_trading(self.vt_symbol)
else:
self.manager.start_algo_trading(self.vt_symbol)
def update_status(self, active: bool) -> None:
""""""
self.active = active
if active:
self.setText("Y")
else:
self.setText("N")
class ElectronicEyeMonitor(QtWidgets.QTableWidget):
""""""
signal_tick: QtCore.Signal = QtCore.Signal(Event)
signal_pricing: QtCore.Signal = QtCore.Signal(Event)
signal_status: QtCore.Signal = QtCore.Signal(Event)
signal_trade: QtCore.Signal = QtCore.Signal(Event)
headers: List[Dict] = [
{"name": "bid_volume", "display": "买量", "cell": BidCell},
{"name": "bid_price", "display": "买价", "cell": BidCell},
{"name": "ask_price", "display": "卖价", "cell": AskCell},
{"name": "ask_volume", "display": "卖量", "cell": AskCell},
{"name": "algo_bid_price", "display": "目标\n买价", "cell": BidCell},
{"name": "algo_ask_price", "display": "目标\n卖价", "cell": AskCell},
{"name": "algo_spread", "display": "价差", "cell": MonitorCell},
{"name": "ref_price", "display": "理论价", "cell": MonitorCell},
{"name": "pricing_impv", "display": "定价\n隐波", "cell": MonitorCell},
{"name": "net_pos", "display": "净持仓", "cell": PosCell},
{"name": "price_spread", "display": "价格\n价差", "cell": AlgoDoubleSpinBox},
{"name": "volatility_spread", "display": "隐波\n价差", "cell": AlgoDoubleSpinBox},
{"name": "max_pos", "display": "持仓\n范围", "cell": AlgoPositiveSpinBox},
{"name": "target_pos", "display": "目标\n持仓", "cell": AlgoSpinBox},
{"name": "max_order_size", "display": "最大\n委托", "cell": AlgoPositiveSpinBox},
{"name": "direction", "display": "方向", "cell": AlgoDirectionCombo},
{"name": "pricing_active", "display": "定价", "cell": AlgoPricingButton},
{"name": "trading_active", "display": "交易", "cell": AlgoTradingButton},
]
def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:
""""""
super().__init__()
self.option_engine: OptionEngine = option_engine
self.event_engine: EventEngine = option_engine.event_engine
self.main_engine: MainEngine = option_engine.main_engine
self.algo_engine: OptionAlgoEngine = option_engine.algo_engine
self.portfolio_name: str = portfolio_name
self.setting_filename: str = f"{portfolio_name}_electronic_eye.json"
self.cells: Dict[str, Dict] = {}
self.init_ui()
self.register_event()
self.load_setting()
def init_ui(self) -> None:
""""""
self.setWindowTitle("电子眼")
self.verticalHeader().setVisible(False)
self.setEditTriggers(self.NoEditTriggers)
# Set table row and column numbers
portfolio: PortfolioData = self.option_engine.get_portfolio(self.portfolio_name)
row_count: int = 0
for chain in portfolio.chains.values():
row_count += (1 + len(chain.indexes))
self.setRowCount(row_count)
column_count: int = len(self.headers) * 2 + 1
self.setColumnCount(column_count)
call_labels: list = [d["display"] for d in self.headers]
put_labels: list = copy(call_labels)
put_labels.reverse()
labels: list = call_labels + ["行权价"] + put_labels
self.setHorizontalHeaderLabels(labels)
# Init cells
strike_column: int = len(self.headers)
current_row: int = 0
chain_symbols: list = list(portfolio.chains.keys())
chain_symbols.sort()
for chain_symbol in chain_symbols:
chain: ChainData = portfolio.get_chain(chain_symbol)
self.setItem(
current_row,
strike_column,
IndexCell(chain.chain_symbol.split(".")[0])
)
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
current_row += 1
# Call cells
call_cells: dict = {}
for column, d in enumerate(self.headers):
cell_type = d["cell"]
if issubclass(cell_type, QtWidgets.QPushButton):
cell = cell_type(call.vt_symbol, self)
else:
cell = cell_type()
call_cells[d["name"]] = cell
if isinstance(cell, QtWidgets.QTableWidgetItem):
self.setItem(current_row, column, cell)
else:
self.setCellWidget(current_row, column, cell)
self.cells[call.vt_symbol] = call_cells
# Put cells
put_cells: dict = {}
put_headers: list = copy(self.headers)
put_headers.reverse()
for column, d in enumerate(put_headers):
column += (strike_column + 1)
cell_type = d["cell"]
if issubclass(cell_type, QtWidgets.QPushButton):
cell = cell_type(put.vt_symbol, self)
else:
cell = cell_type()
put_cells[d["name"]] = cell
if isinstance(cell, QtWidgets.QTableWidgetItem):
self.setItem(current_row, column, cell)
else:
self.setCellWidget(current_row, column, cell)
self.cells[put.vt_symbol] = put_cells
# Strike cell
index_cell: IndexCell = IndexCell(str(call.chain_index))
self.setItem(current_row, strike_column, index_cell)
# Move to next row
current_row += 1
self.resizeColumnsToContents()
# Update all net pos and tick cells
for vt_symbol in self.cells.keys():
self.update_net_pos(vt_symbol)
tick: Optional[TickData] = self.main_engine.get_tick(vt_symbol)
if tick:
self.update_tick(tick)
def load_setting(self) -> None:
""""""
fields: list = [
"price_spread",
"volatility_spread",
"max_pos",
"target_pos",
"max_order_size",
"direction"
]
setting: dict = load_json(self.setting_filename)
for vt_symbol, cells in self.cells.items():
buf: Optional[dict] = setting.get(vt_symbol, None)
if buf:
for field in fields:
cells[field].set_value(buf[field])
def save_setting(self) -> None:
""""""
fields: list = [
"price_spread",
"volatility_spread",
"max_pos",
"target_pos",
"max_order_size",
"direction"
]
setting: dict = {}
for vt_symbol, cells in self.cells.items():
buf: dict = {}
for field in fields:
buf[field] = cells[field].get_value()
setting[vt_symbol] = buf
save_json(self.setting_filename, setting)
def register_event(self) -> None:
""""""
self.signal_pricing.connect(self.process_pricing_event)
self.signal_status.connect(self.process_status_event)
self.signal_tick.connect(self.process_tick_event)
self.signal_trade.connect(self.process_trade_event)
self.event_engine.register(
EVENT_OPTION_ALGO_PRICING,
self.signal_pricing.emit
)
self.event_engine.register(
EVENT_OPTION_ALGO_STATUS,
self.signal_status.emit
)
self.event_engine.register(
EVENT_TICK,
self.signal_tick.emit
)
self.event_engine.register(
EVENT_TRADE,
self.signal_trade.emit
)
def process_tick_event(self, event: Event) -> None:
""""""
tick: TickData = event.data
self.update_tick(tick)
def update_tick(self, tick: TickData) -> None:
""""""
cells: Optional[dict] = self.cells.get(tick.vt_symbol, None)
if not cells:
return
cells["bid_price"].setText(str(tick.bid_price_1))
cells["ask_price"].setText(str(tick.ask_price_1))
cells["bid_volume"].setText(str(tick.bid_volume_1))
cells["ask_volume"].setText(str(tick.ask_volume_1))
def process_status_event(self, event: Event) -> None:
""""""
algo: ElectronicEyeAlgo = event.data
cells: dict = self.cells[algo.vt_symbol]
cells["price_spread"].update_status(algo.pricing_active)
cells["volatility_spread"].update_status(algo.pricing_active)
cells["pricing_active"].update_status(algo.pricing_active)
cells["max_pos"].update_status(algo.trading_active)
cells["target_pos"].update_status(algo.trading_active)
cells["max_order_size"].update_status(algo.trading_active)
cells["direction"].update_status(algo.trading_active)
cells["trading_active"].update_status(algo.trading_active)
def process_pricing_event(self, event: Event) -> None:
""""""
algo: ElectronicEyeAlgo = event.data
cells: dict = self.cells[algo.vt_symbol]
if algo.ref_price:
cells["algo_bid_price"].setText(str(algo.algo_bid_price))
cells["algo_ask_price"].setText(str(algo.algo_ask_price))
cells["algo_spread"].setText(str(algo.algo_spread))
cells["ref_price"].setText(str(algo.ref_price))
cells["pricing_impv"].setText(f"{algo.pricing_impv * 100:.2f}")
else:
cells["algo_bid_price"].setText("")
cells["algo_ask_price"].setText("")
cells["algo_spread"].setText("")
cells["ref_price"].setText("")
cells["pricing_impv"].setText("")
def process_trade_event(self, event: Event) -> None:
""""""
trade: TradeData = event.data
self.update_net_pos(trade.vt_symbol)
def update_net_pos(self, vt_symbol: str) -> None:
""""""
cells: Optional[dict] = self.cells.get(vt_symbol, None)
if not cells:
return
option: InstrumentData = self.option_engine.get_instrument(vt_symbol)
cells["net_pos"].setText(str(option.net_pos))
def start_algo_pricing(self, vt_symbol: str) -> None:
""""""
cells: dict = self.cells[vt_symbol]
params: dict = {}
params["price_spread"] = cells["price_spread"].get_value()
params["volatility_spread"] = cells["volatility_spread"].get_value()
self.algo_engine.start_algo_pricing(vt_symbol, params)
def stop_algo_pricing(self, vt_symbol: str) -> None:
""""""
self.algo_engine.stop_algo_pricing(vt_symbol)
def start_algo_trading(self, vt_symbol: str) -> None:
""""""
cells: dict = self.cells[vt_symbol]
params = cells["direction"].get_value()
for name in [
"max_pos",
"target_pos",
"max_order_size"
]:
params[name] = cells[name].get_value()
self.algo_engine.start_algo_trading(vt_symbol, params)
def stop_algo_trading(self, vt_symbol: str) -> None:
""""""
self.algo_engine.stop_algo_trading(vt_symbol)
class ElectronicEyeManager(QtWidgets.QWidget):
""""""
signal_log = QtCore.Signal(Event)
def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:
""""""
super().__init__()
self.option_engine: OptionEngine = option_engine
self.event_Engine: EventEngine = option_engine.event_engine
self.algo_engine: OptionAlgoEngine = option_engine.algo_engine
self.portfolio_name: str = portfolio_name
self.init_ui()
self.register_event()
def init_ui(self) -> None:
""""""
self.setWindowTitle("期权电子眼")
self.algo_monitor: ElectronicEyeMonitor = ElectronicEyeMonitor(self.option_engine, self.portfolio_name)
self.log_monitor: QtWidgets.QTextEdit = QtWidgets.QTextEdit()
self.log_monitor.setReadOnly(True)
self.log_monitor.setMaximumWidth(400)
stop_pricing_button: QtWidgets.QPushButton = QtWidgets.QPushButton("停止定价")
stop_pricing_button.clicked.connect(self.stop_pricing_for_all)
stop_trading_button: QtWidgets.QPushButton = QtWidgets.QPushButton("停止交易")
stop_trading_button.clicked.connect(self.stop_trading_for_all)
self.price_spread_spin: AlgoDoubleSpinBox = AlgoDoubleSpinBox()
self.volatility_spread_spin: AlgoDoubleSpinBox = AlgoDoubleSpinBox()
self.direction_combo: AlgoDirectionCombo = AlgoDirectionCombo()
self.max_order_size_spin: AlgoPositiveSpinBox = AlgoPositiveSpinBox()
self.target_pos_spin: AlgoSpinBox = AlgoSpinBox()
self.max_pos_spin: AlgoPositiveSpinBox = AlgoPositiveSpinBox()
price_spread_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
price_spread_button.clicked.connect(self.set_price_spread_for_all)
volatility_spread_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
volatility_spread_button.clicked.connect(self.set_volatility_spread_for_all)
direction_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
direction_button.clicked.connect(self.set_direction_for_all)
max_order_size_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
max_order_size_button.clicked.connect(self.set_max_order_size_for_all)
target_pos_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
target_pos_button.clicked.connect(self.set_target_pos_for_all)
max_pos_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
max_pos_button.clicked.connect(self.set_max_pos_for_all)
QLabel = QtWidgets.QLabel
grid: QtWidgets.QGridLayout = QtWidgets.QGridLayout()
grid.addWidget(QLabel("价格价差"), 0, 0)
grid.addWidget(self.price_spread_spin, 0, 1)
grid.addWidget(price_spread_button, 0, 2)
grid.addWidget(QLabel("隐波价差"), 1, 0)
grid.addWidget(self.volatility_spread_spin, 1, 1)
grid.addWidget(volatility_spread_button, 1, 2)
grid.addWidget(QLabel("持仓范围"), 2, 0)
grid.addWidget(self.max_pos_spin, 2, 1)
grid.addWidget(max_pos_button, 2, 2)
grid.addWidget(QLabel("目标持仓"), 3, 0)
grid.addWidget(self.target_pos_spin, 3, 1)
grid.addWidget(target_pos_button, 3, 2)
grid.addWidget(QLabel("最大委托"), 4, 0)
grid.addWidget(self.max_order_size_spin, 4, 1)
grid.addWidget(max_order_size_button, 4, 2)
grid.addWidget(QLabel("方向"), 5, 0)
grid.addWidget(self.direction_combo, 5, 1)
grid.addWidget(direction_button, 5, 2)
hbox1: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox1.addWidget(stop_pricing_button)
hbox1.addWidget(stop_trading_button)
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addLayout(grid)
vbox.addWidget(self.log_monitor)
hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox.addWidget(self.algo_monitor)
hbox.addLayout(vbox)
self.setLayout(hbox)
def register_event(self) -> None:
""""""
self.signal_log.connect(self.process_log_event)
self.event_Engine.register(EVENT_OPTION_ALGO_LOG, self.signal_log.emit)
def process_log_event(self, event: Event) -> None:
""""""
log: LogData = event.data
timestr: str = log.time.strftime("%H:%M:%S")
msg: str = f"{timestr} {log.msg}"
self.log_monitor.append(msg)
def show(self) -> None:
""""""
self.algo_engine.init_engine(self.portfolio_name)
self.algo_monitor.resizeColumnsToContents()
super().showMaximized()
def set_price_spread_for_all(self) -> None:
""""""
price_spread: float = self.price_spread_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["price_spread"].isEnabled():
cells["price_spread"].setValue(price_spread)
def set_volatility_spread_for_all(self) -> None:
""""""
volatility_spread: float = self.volatility_spread_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["volatility_spread"].isEnabled():
cells["volatility_spread"].setValue(volatility_spread)
def set_direction_for_all(self) -> None:
""""""
ix: int = self.direction_combo.currentIndex()
for cells in self.algo_monitor.cells.values():
if cells["direction"].isEnabled():
cells["direction"].setCurrentIndex(ix)
def set_max_order_size_for_all(self) -> None:
""""""
size: int = self.max_order_size_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["max_order_size"].isEnabled():
cells["max_order_size"].setValue(size)
def set_target_pos_for_all(self) -> None:
""""""
pos: int = self.target_pos_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["target_pos"].isEnabled():
cells["target_pos"].setValue(pos)
def set_max_pos_for_all(self) -> None:
""""""
pos: int = self.max_pos_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["max_pos"].isEnabled():
cells["max_pos"].setValue(pos)
def stop_pricing_for_all(self) -> None:
""""""
for vt_symbol in self.algo_monitor.cells.keys():
self.algo_monitor.stop_algo_pricing(vt_symbol)
def stop_trading_for_all(self) -> None:
""""""
for vt_symbol in self.algo_monitor.cells.keys():
self.algo_monitor.stop_algo_trading(vt_symbol)
def closeEvent(self, event: QtGui.QCloseEvent) -> None:
""""""
self.algo_monitor.save_setting()
event.accept()
class VolatilityDoubleSpinBox(QtWidgets.QDoubleSpinBox):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.setDecimals(1)
self.setSuffix("%")
self.setMaximum(200.0)
self.setMinimum(0)
def get_value(self) -> float:
""""""
return self.value()
class PricingVolatilityManager(QtWidgets.QWidget):
""""""
signal_timer: QtCore.Signal = QtCore.Signal(Event)
def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:
""""""
super().__init__()
self.option_engine: OptionEngine = option_engine
self.event_engine: EventEngine = option_engine.event_engine
self.portfolio: PortfolioData = option_engine.get_portfolio(portfolio_name)
self.cells: Dict[Tuple, Dict] = {}
self.chain_symbols: List[str] = []
self.chain_atm_index: Dict[str, str] = {}
self.init_ui()
self.register_event()
def init_ui(self) -> None:
""""""
self.setWindowTitle("波动率管理")
tab: QtWidgets.QTabWidget = QtWidgets.QTabWidget()
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addWidget(tab)
self.setLayout(vbox)
self.chain_symbols: list = list(self.portfolio.chains.keys())
self.chain_symbols.sort()
for chain_symbol in self.chain_symbols:
chain: ChainData = self.portfolio.get_chain(chain_symbol)
table: QtWidgets.QTableWidget = QtWidgets.QTableWidget()
table.setEditTriggers(table.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.setRowCount(len(chain.indexes))
table.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch
)
labels: list = [
"行权价",
"OTM隐波",
"CALL隐波",
"PUT隐波",
"定价隐波",
"执行拟合"
]
table.setColumnCount(len(labels))
table.setHorizontalHeaderLabels(labels)
for row, index in enumerate(chain.indexes):
index_cell: IndexCell = IndexCell(index)
otm_impv_cell: MonitorCell = MonitorCell("")
call_impv_cell: MonitorCell = MonitorCell("")
put_impv_cell: MonitorCell = MonitorCell("")
set_func = partial(
self.set_pricing_impv,
chain_symbol=chain_symbol,
index=index
)
pricing_impv_spin: VolatilityDoubleSpinBox = VolatilityDoubleSpinBox()
pricing_impv_spin.setAlignment(QtCore.Qt.AlignCenter)
pricing_impv_spin.valueChanged.connect(set_func)
check: QtWidgets.QCheckBox = QtWidgets.QCheckBox()
check_hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
check_hbox.setAlignment(QtCore.Qt.AlignCenter)
check_hbox.addWidget(check)
check_widget: QtWidgets.QWidget = QtWidgets.QWidget()
check_widget.setLayout(check_hbox)
table.setItem(row, 0, index_cell)
table.setItem(row, 1, otm_impv_cell)
table.setItem(row, 2, call_impv_cell)
table.setItem(row, 3, put_impv_cell)
table.setCellWidget(row, 4, pricing_impv_spin)
table.setCellWidget(row, 5, check_widget)
cells: dict = {
"otm_impv": otm_impv_cell,
"call_impv": call_impv_cell,
"put_impv": put_impv_cell,
"pricing_impv": pricing_impv_spin,
"check": check
}
self.cells[(chain_symbol, index)] = cells
reset_func = partial(self.reset_pricing_impv, chain_symbol=chain_symbol)
button_reset: QtWidgets.QPushButton = QtWidgets.QPushButton("重置")
button_reset.clicked.connect(reset_func)
fit_func = partial(self.fit_pricing_impv, chain_symbol=chain_symbol)
button_fit: QtWidgets.QPushButton = QtWidgets.QPushButton("拟合")
button_fit.clicked.connect(fit_func)
increase_func = partial(self.increase_pricing_impv, chain_symbol=chain_symbol)
button_increase: QtWidgets.QPushButton = QtWidgets.QPushButton("+0.1%")
button_increase.clicked.connect(increase_func)
decrease_func = partial(self.decrease_pricing_impv, chain_symbol=chain_symbol)
button_decrease: QtWidgets.QPushButton = QtWidgets.QPushButton("-0.1%")
button_decrease.clicked.connect(decrease_func)
hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox.addWidget(button_reset)
hbox.addWidget(button_fit)
hbox.addWidget(button_increase)
hbox.addWidget(button_decrease)
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(table)
chain_widget: QtWidgets.QWidget = QtWidgets.QWidget()
chain_widget.setLayout(vbox)
tab.addTab(chain_widget, chain_symbol)
self.update_pricing_impv(chain_symbol)
self.default_foreground = otm_impv_cell.foreground()
self.default_background = otm_impv_cell.background()
table.resizeRowsToContents()
def register_event(self) -> None:
""""""
self.signal_timer.connect(self.process_timer_event)
self.event_engine.register(EVENT_TIMER, self.signal_timer.emit)
def process_timer_event(self, event: Event) -> None:
""""""
for chain_symbol in self.chain_symbols:
self.update_chain_impv(chain_symbol)
def reset_pricing_impv(self, chain_symbol: str) -> None:
"""
Set pricing impv to the otm mid impv of each strike price.
"""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
if index >= atm_index:
otm: OptionData = call
else:
otm: OptionData = put
call.pricing_impv = otm.mid_impv
put.pricing_impv = otm.mid_impv
self.update_pricing_impv(chain_symbol)
def fit_pricing_impv(self, chain_symbol: str) -> None:
"""
Fit pricing impv with cubic spline algo.
"""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
strike_prices: list = []
pricing_impvs: list = []
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
cells: dict = self.cells[(chain_symbol, index)]
if not cells["check"].isChecked():
if index >= atm_index:
otm: OptionData = call
else:
otm: OptionData = put
strike_prices.append(otm.strike_price)
pricing_impvs.append(otm.pricing_impv)
cs: interpolate.CubicSpline = interpolate.CubicSpline(strike_prices, pricing_impvs)
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
new_impv: float = float(cs(call.strike_price))
call.pricing_impv = new_impv
put.pricing_impv = new_impv
self.update_pricing_impv(chain_symbol)
def increase_pricing_impv(self, chain_symbol: str) -> None:
"""
Increase pricing impv of all options within a chain by 0.1%.
"""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
for option in chain.options.values():
option.pricing_impv += 0.001
self.update_pricing_impv(chain_symbol)
def decrease_pricing_impv(self, chain_symbol: str) -> None:
"""
Decrease pricing impv of all options within a chain by 0.1%.
"""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
for option in chain.options.values():
option.pricing_impv -= 0.001
self.update_pricing_impv(chain_symbol)
def set_pricing_impv(self, value: float, chain_symbol: str, index: str) -> None:
""""""
new_impv: float = value / 100
chain: ChainData = self.portfolio.get_chain(chain_symbol)
call: OptionData = chain.calls[index]
call.pricing_impv = new_impv
put: OptionData = chain.puts[index]
put.pricing_impv = new_impv
def update_pricing_impv(self, chain_symbol: str) -> None:
""""""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
for index in chain.indexes:
if index >= atm_index:
otm: OptionData = chain.calls[index]
else:
otm: OptionData = chain.puts[index]
value: int = round(otm.pricing_impv * 100, 1)
key: tuple = (chain_symbol, index)
cells: Optional[dict] = self.cells.get(key, None)
if cells:
cells["pricing_impv"].setValue(value)
def update_chain_impv(self, chain_symbol: str) -> None:
""""""
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
if index >= atm_index:
otm: OptionData = call
else:
otm: OptionData = put
cells: dict = self.cells[(chain_symbol, index)]
cells["otm_impv"].setText(f"{otm.mid_impv:.1%}")
cells["call_impv"].setText(f"{call.mid_impv:.1%}")
cells["put_impv"].setText(f"{put.mid_impv:.1%}")
current_atm_index: str = self.chain_atm_index.get(chain_symbol, "")
if current_atm_index == atm_index:
return
self.chain_atm_index[chain_symbol] = atm_index
if current_atm_index:
old_cells: dict = self.cells[(chain_symbol, current_atm_index)]
for field in ["otm_impv", "call_impv", "put_impv"]:
old_cells[field].setForeground(COLOR_WHITE)
old_cells[field].setBackground(self.default_background)
if atm_index:
new_cells: dict = self.cells[(chain_symbol, atm_index)]
for field in ["otm_impv", "call_impv", "put_impv"]:
new_cells[field].setForeground(COLOR_BLACK)
new_cells[field].setBackground(COLOR_WHITE)
| 33.898288 | 111 | 0.600992 | from typing import Dict, List, Tuple, Optional
from copy import copy
from functools import partial
from scipy import interpolate
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtWidgets, QtCore, QtGui
from vnpy.trader.event import EVENT_TICK, EVENT_TIMER, EVENT_TRADE
from vnpy.trader.object import TickData, TradeData, LogData
from vnpy.trader.utility import save_json, load_json
from ..engine import OptionEngine, OptionAlgoEngine
from ..base import (
EVENT_OPTION_ALGO_PRICING,
EVENT_OPTION_ALGO_STATUS,
EVENT_OPTION_ALGO_LOG,
PortfolioData,
ChainData,
OptionData,
InstrumentData
)
from .monitor import (
MonitorCell, IndexCell, BidCell, AskCell, PosCell,
COLOR_WHITE, COLOR_BLACK
)
from ..algo import ElectronicEyeAlgo
class AlgoSpinBox(QtWidgets.QSpinBox):
def __init__(self) -> None:
super().__init__()
self.setMaximum(999999)
self.setMinimum(-999999)
self.setAlignment(QtCore.Qt.AlignCenter)
def get_value(self) -> int:
return self.value()
def set_value(self, value: int) -> None:
self.setValue(value)
def update_status(self, active: bool) -> None:
self.setEnabled(not active)
class AlgoPositiveSpinBox(AlgoSpinBox):
def __init__(self) -> None:
super().__init__()
self.setMinimum(0)
class AlgoDoubleSpinBox(QtWidgets.QDoubleSpinBox):
def __init__(self) -> None:
super().__init__()
self.setDecimals(1)
self.setMaximum(9999.9)
self.setMinimum(0)
self.setAlignment(QtCore.Qt.AlignCenter)
def get_value(self) -> float:
return self.value()
def set_value(self, value: float) -> None:
self.setValue(value)
def update_status(self, active: bool) -> None:
self.setEnabled(not active)
class AlgoDirectionCombo(QtWidgets.QComboBox):
def __init__(self) -> None:
super().__init__()
self.addItems([
"双向",
"做多",
"做空"
])
def get_value(self) -> Dict[str, bool]:
if self.currentText() == "双向":
value: dict = {
"long_allowed": True,
"short_allowed": True
}
elif self.currentText() == "做多":
value: dict = {
"long_allowed": True,
"short_allowed": False
}
else:
value: dict = {
"long_allowed": False,
"short_allowed": True
}
return value
def set_value(self, value: dict) -> None:
if value["long_allowed"] and value["short_allowed"]:
self.setCurrentIndex(0)
elif value["long_allowed"]:
self.setCurrentIndex(1)
else:
self.setCurrentIndex(2)
def update_status(self, active: bool) -> None:
self.setEnabled(not active)
class AlgoPricingButton(QtWidgets.QPushButton):
def __init__(self, vt_symbol: str, manager: "ElectronicEyeManager") -> None:
super().__init__()
self.vt_symbol: str = vt_symbol
self.manager: ElectronicEyeManager = manager
self.active: bool = False
self.setText("N")
self.clicked.connect(self.on_clicked)
def on_clicked(self) -> None:
if self.active:
self.manager.stop_algo_pricing(self.vt_symbol)
else:
self.manager.start_algo_pricing(self.vt_symbol)
def update_status(self, active: bool) -> None:
self.active = active
if active:
self.setText("Y")
else:
self.setText("N")
class AlgoTradingButton(QtWidgets.QPushButton):
def __init__(self, vt_symbol: str, manager: "ElectronicEyeManager") -> None:
super().__init__()
self.vt_symbol: str = vt_symbol
self.manager: ElectronicEyeManager = manager
self.active: bool = False
self.setText("N")
self.clicked.connect(self.on_clicked)
def on_clicked(self) -> None:
if self.active:
self.manager.stop_algo_trading(self.vt_symbol)
else:
self.manager.start_algo_trading(self.vt_symbol)
def update_status(self, active: bool) -> None:
self.active = active
if active:
self.setText("Y")
else:
self.setText("N")
class ElectronicEyeMonitor(QtWidgets.QTableWidget):
signal_tick: QtCore.Signal = QtCore.Signal(Event)
signal_pricing: QtCore.Signal = QtCore.Signal(Event)
signal_status: QtCore.Signal = QtCore.Signal(Event)
signal_trade: QtCore.Signal = QtCore.Signal(Event)
headers: List[Dict] = [
{"name": "bid_volume", "display": "买量", "cell": BidCell},
{"name": "bid_price", "display": "买价", "cell": BidCell},
{"name": "ask_price", "display": "卖价", "cell": AskCell},
{"name": "ask_volume", "display": "卖量", "cell": AskCell},
{"name": "algo_bid_price", "display": "目标\n买价", "cell": BidCell},
{"name": "algo_ask_price", "display": "目标\n卖价", "cell": AskCell},
{"name": "algo_spread", "display": "价差", "cell": MonitorCell},
{"name": "ref_price", "display": "理论价", "cell": MonitorCell},
{"name": "pricing_impv", "display": "定价\n隐波", "cell": MonitorCell},
{"name": "net_pos", "display": "净持仓", "cell": PosCell},
{"name": "price_spread", "display": "价格\n价差", "cell": AlgoDoubleSpinBox},
{"name": "volatility_spread", "display": "隐波\n价差", "cell": AlgoDoubleSpinBox},
{"name": "max_pos", "display": "持仓\n范围", "cell": AlgoPositiveSpinBox},
{"name": "target_pos", "display": "目标\n持仓", "cell": AlgoSpinBox},
{"name": "max_order_size", "display": "最大\n委托", "cell": AlgoPositiveSpinBox},
{"name": "direction", "display": "方向", "cell": AlgoDirectionCombo},
{"name": "pricing_active", "display": "定价", "cell": AlgoPricingButton},
{"name": "trading_active", "display": "交易", "cell": AlgoTradingButton},
]
def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:
super().__init__()
self.option_engine: OptionEngine = option_engine
self.event_engine: EventEngine = option_engine.event_engine
self.main_engine: MainEngine = option_engine.main_engine
self.algo_engine: OptionAlgoEngine = option_engine.algo_engine
self.portfolio_name: str = portfolio_name
self.setting_filename: str = f"{portfolio_name}_electronic_eye.json"
self.cells: Dict[str, Dict] = {}
self.init_ui()
self.register_event()
self.load_setting()
def init_ui(self) -> None:
self.setWindowTitle("电子眼")
self.verticalHeader().setVisible(False)
self.setEditTriggers(self.NoEditTriggers)
portfolio: PortfolioData = self.option_engine.get_portfolio(self.portfolio_name)
row_count: int = 0
for chain in portfolio.chains.values():
row_count += (1 + len(chain.indexes))
self.setRowCount(row_count)
column_count: int = len(self.headers) * 2 + 1
self.setColumnCount(column_count)
call_labels: list = [d["display"] for d in self.headers]
put_labels: list = copy(call_labels)
put_labels.reverse()
labels: list = call_labels + ["行权价"] + put_labels
self.setHorizontalHeaderLabels(labels)
strike_column: int = len(self.headers)
current_row: int = 0
chain_symbols: list = list(portfolio.chains.keys())
chain_symbols.sort()
for chain_symbol in chain_symbols:
chain: ChainData = portfolio.get_chain(chain_symbol)
self.setItem(
current_row,
strike_column,
IndexCell(chain.chain_symbol.split(".")[0])
)
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
current_row += 1
call_cells: dict = {}
for column, d in enumerate(self.headers):
cell_type = d["cell"]
if issubclass(cell_type, QtWidgets.QPushButton):
cell = cell_type(call.vt_symbol, self)
else:
cell = cell_type()
call_cells[d["name"]] = cell
if isinstance(cell, QtWidgets.QTableWidgetItem):
self.setItem(current_row, column, cell)
else:
self.setCellWidget(current_row, column, cell)
self.cells[call.vt_symbol] = call_cells
put_cells: dict = {}
put_headers: list = copy(self.headers)
put_headers.reverse()
for column, d in enumerate(put_headers):
column += (strike_column + 1)
cell_type = d["cell"]
if issubclass(cell_type, QtWidgets.QPushButton):
cell = cell_type(put.vt_symbol, self)
else:
cell = cell_type()
put_cells[d["name"]] = cell
if isinstance(cell, QtWidgets.QTableWidgetItem):
self.setItem(current_row, column, cell)
else:
self.setCellWidget(current_row, column, cell)
self.cells[put.vt_symbol] = put_cells
index_cell: IndexCell = IndexCell(str(call.chain_index))
self.setItem(current_row, strike_column, index_cell)
current_row += 1
self.resizeColumnsToContents()
for vt_symbol in self.cells.keys():
self.update_net_pos(vt_symbol)
tick: Optional[TickData] = self.main_engine.get_tick(vt_symbol)
if tick:
self.update_tick(tick)
def load_setting(self) -> None:
fields: list = [
"price_spread",
"volatility_spread",
"max_pos",
"target_pos",
"max_order_size",
"direction"
]
setting: dict = load_json(self.setting_filename)
for vt_symbol, cells in self.cells.items():
buf: Optional[dict] = setting.get(vt_symbol, None)
if buf:
for field in fields:
cells[field].set_value(buf[field])
def save_setting(self) -> None:
fields: list = [
"price_spread",
"volatility_spread",
"max_pos",
"target_pos",
"max_order_size",
"direction"
]
setting: dict = {}
for vt_symbol, cells in self.cells.items():
buf: dict = {}
for field in fields:
buf[field] = cells[field].get_value()
setting[vt_symbol] = buf
save_json(self.setting_filename, setting)
def register_event(self) -> None:
self.signal_pricing.connect(self.process_pricing_event)
self.signal_status.connect(self.process_status_event)
self.signal_tick.connect(self.process_tick_event)
self.signal_trade.connect(self.process_trade_event)
self.event_engine.register(
EVENT_OPTION_ALGO_PRICING,
self.signal_pricing.emit
)
self.event_engine.register(
EVENT_OPTION_ALGO_STATUS,
self.signal_status.emit
)
self.event_engine.register(
EVENT_TICK,
self.signal_tick.emit
)
self.event_engine.register(
EVENT_TRADE,
self.signal_trade.emit
)
def process_tick_event(self, event: Event) -> None:
tick: TickData = event.data
self.update_tick(tick)
def update_tick(self, tick: TickData) -> None:
cells: Optional[dict] = self.cells.get(tick.vt_symbol, None)
if not cells:
return
cells["bid_price"].setText(str(tick.bid_price_1))
cells["ask_price"].setText(str(tick.ask_price_1))
cells["bid_volume"].setText(str(tick.bid_volume_1))
cells["ask_volume"].setText(str(tick.ask_volume_1))
def process_status_event(self, event: Event) -> None:
algo: ElectronicEyeAlgo = event.data
cells: dict = self.cells[algo.vt_symbol]
cells["price_spread"].update_status(algo.pricing_active)
cells["volatility_spread"].update_status(algo.pricing_active)
cells["pricing_active"].update_status(algo.pricing_active)
cells["max_pos"].update_status(algo.trading_active)
cells["target_pos"].update_status(algo.trading_active)
cells["max_order_size"].update_status(algo.trading_active)
cells["direction"].update_status(algo.trading_active)
cells["trading_active"].update_status(algo.trading_active)
def process_pricing_event(self, event: Event) -> None:
algo: ElectronicEyeAlgo = event.data
cells: dict = self.cells[algo.vt_symbol]
if algo.ref_price:
cells["algo_bid_price"].setText(str(algo.algo_bid_price))
cells["algo_ask_price"].setText(str(algo.algo_ask_price))
cells["algo_spread"].setText(str(algo.algo_spread))
cells["ref_price"].setText(str(algo.ref_price))
cells["pricing_impv"].setText(f"{algo.pricing_impv * 100:.2f}")
else:
cells["algo_bid_price"].setText("")
cells["algo_ask_price"].setText("")
cells["algo_spread"].setText("")
cells["ref_price"].setText("")
cells["pricing_impv"].setText("")
def process_trade_event(self, event: Event) -> None:
trade: TradeData = event.data
self.update_net_pos(trade.vt_symbol)
def update_net_pos(self, vt_symbol: str) -> None:
cells: Optional[dict] = self.cells.get(vt_symbol, None)
if not cells:
return
option: InstrumentData = self.option_engine.get_instrument(vt_symbol)
cells["net_pos"].setText(str(option.net_pos))
def start_algo_pricing(self, vt_symbol: str) -> None:
cells: dict = self.cells[vt_symbol]
params: dict = {}
params["price_spread"] = cells["price_spread"].get_value()
params["volatility_spread"] = cells["volatility_spread"].get_value()
self.algo_engine.start_algo_pricing(vt_symbol, params)
def stop_algo_pricing(self, vt_symbol: str) -> None:
self.algo_engine.stop_algo_pricing(vt_symbol)
def start_algo_trading(self, vt_symbol: str) -> None:
cells: dict = self.cells[vt_symbol]
params = cells["direction"].get_value()
for name in [
"max_pos",
"target_pos",
"max_order_size"
]:
params[name] = cells[name].get_value()
self.algo_engine.start_algo_trading(vt_symbol, params)
def stop_algo_trading(self, vt_symbol: str) -> None:
self.algo_engine.stop_algo_trading(vt_symbol)
class ElectronicEyeManager(QtWidgets.QWidget):
signal_log = QtCore.Signal(Event)
def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:
super().__init__()
self.option_engine: OptionEngine = option_engine
self.event_Engine: EventEngine = option_engine.event_engine
self.algo_engine: OptionAlgoEngine = option_engine.algo_engine
self.portfolio_name: str = portfolio_name
self.init_ui()
self.register_event()
def init_ui(self) -> None:
self.setWindowTitle("期权电子眼")
self.algo_monitor: ElectronicEyeMonitor = ElectronicEyeMonitor(self.option_engine, self.portfolio_name)
self.log_monitor: QtWidgets.QTextEdit = QtWidgets.QTextEdit()
self.log_monitor.setReadOnly(True)
self.log_monitor.setMaximumWidth(400)
stop_pricing_button: QtWidgets.QPushButton = QtWidgets.QPushButton("停止定价")
stop_pricing_button.clicked.connect(self.stop_pricing_for_all)
stop_trading_button: QtWidgets.QPushButton = QtWidgets.QPushButton("停止交易")
stop_trading_button.clicked.connect(self.stop_trading_for_all)
self.price_spread_spin: AlgoDoubleSpinBox = AlgoDoubleSpinBox()
self.volatility_spread_spin: AlgoDoubleSpinBox = AlgoDoubleSpinBox()
self.direction_combo: AlgoDirectionCombo = AlgoDirectionCombo()
self.max_order_size_spin: AlgoPositiveSpinBox = AlgoPositiveSpinBox()
self.target_pos_spin: AlgoSpinBox = AlgoSpinBox()
self.max_pos_spin: AlgoPositiveSpinBox = AlgoPositiveSpinBox()
price_spread_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
price_spread_button.clicked.connect(self.set_price_spread_for_all)
volatility_spread_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
volatility_spread_button.clicked.connect(self.set_volatility_spread_for_all)
direction_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
direction_button.clicked.connect(self.set_direction_for_all)
max_order_size_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
max_order_size_button.clicked.connect(self.set_max_order_size_for_all)
target_pos_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
target_pos_button.clicked.connect(self.set_target_pos_for_all)
max_pos_button: QtWidgets.QPushButton = QtWidgets.QPushButton("设置")
max_pos_button.clicked.connect(self.set_max_pos_for_all)
QLabel = QtWidgets.QLabel
grid: QtWidgets.QGridLayout = QtWidgets.QGridLayout()
grid.addWidget(QLabel("价格价差"), 0, 0)
grid.addWidget(self.price_spread_spin, 0, 1)
grid.addWidget(price_spread_button, 0, 2)
grid.addWidget(QLabel("隐波价差"), 1, 0)
grid.addWidget(self.volatility_spread_spin, 1, 1)
grid.addWidget(volatility_spread_button, 1, 2)
grid.addWidget(QLabel("持仓范围"), 2, 0)
grid.addWidget(self.max_pos_spin, 2, 1)
grid.addWidget(max_pos_button, 2, 2)
grid.addWidget(QLabel("目标持仓"), 3, 0)
grid.addWidget(self.target_pos_spin, 3, 1)
grid.addWidget(target_pos_button, 3, 2)
grid.addWidget(QLabel("最大委托"), 4, 0)
grid.addWidget(self.max_order_size_spin, 4, 1)
grid.addWidget(max_order_size_button, 4, 2)
grid.addWidget(QLabel("方向"), 5, 0)
grid.addWidget(self.direction_combo, 5, 1)
grid.addWidget(direction_button, 5, 2)
hbox1: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox1.addWidget(stop_pricing_button)
hbox1.addWidget(stop_trading_button)
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addLayout(grid)
vbox.addWidget(self.log_monitor)
hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox.addWidget(self.algo_monitor)
hbox.addLayout(vbox)
self.setLayout(hbox)
def register_event(self) -> None:
self.signal_log.connect(self.process_log_event)
self.event_Engine.register(EVENT_OPTION_ALGO_LOG, self.signal_log.emit)
def process_log_event(self, event: Event) -> None:
log: LogData = event.data
timestr: str = log.time.strftime("%H:%M:%S")
msg: str = f"{timestr} {log.msg}"
self.log_monitor.append(msg)
def show(self) -> None:
self.algo_engine.init_engine(self.portfolio_name)
self.algo_monitor.resizeColumnsToContents()
super().showMaximized()
def set_price_spread_for_all(self) -> None:
price_spread: float = self.price_spread_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["price_spread"].isEnabled():
cells["price_spread"].setValue(price_spread)
def set_volatility_spread_for_all(self) -> None:
volatility_spread: float = self.volatility_spread_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["volatility_spread"].isEnabled():
cells["volatility_spread"].setValue(volatility_spread)
def set_direction_for_all(self) -> None:
ix: int = self.direction_combo.currentIndex()
for cells in self.algo_monitor.cells.values():
if cells["direction"].isEnabled():
cells["direction"].setCurrentIndex(ix)
def set_max_order_size_for_all(self) -> None:
size: int = self.max_order_size_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["max_order_size"].isEnabled():
cells["max_order_size"].setValue(size)
def set_target_pos_for_all(self) -> None:
pos: int = self.target_pos_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["target_pos"].isEnabled():
cells["target_pos"].setValue(pos)
def set_max_pos_for_all(self) -> None:
pos: int = self.max_pos_spin.get_value()
for cells in self.algo_monitor.cells.values():
if cells["max_pos"].isEnabled():
cells["max_pos"].setValue(pos)
def stop_pricing_for_all(self) -> None:
for vt_symbol in self.algo_monitor.cells.keys():
self.algo_monitor.stop_algo_pricing(vt_symbol)
def stop_trading_for_all(self) -> None:
for vt_symbol in self.algo_monitor.cells.keys():
self.algo_monitor.stop_algo_trading(vt_symbol)
def closeEvent(self, event: QtGui.QCloseEvent) -> None:
self.algo_monitor.save_setting()
event.accept()
class VolatilityDoubleSpinBox(QtWidgets.QDoubleSpinBox):
def __init__(self) -> None:
super().__init__()
self.setDecimals(1)
self.setSuffix("%")
self.setMaximum(200.0)
self.setMinimum(0)
def get_value(self) -> float:
return self.value()
class PricingVolatilityManager(QtWidgets.QWidget):
signal_timer: QtCore.Signal = QtCore.Signal(Event)
def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:
super().__init__()
self.option_engine: OptionEngine = option_engine
self.event_engine: EventEngine = option_engine.event_engine
self.portfolio: PortfolioData = option_engine.get_portfolio(portfolio_name)
self.cells: Dict[Tuple, Dict] = {}
self.chain_symbols: List[str] = []
self.chain_atm_index: Dict[str, str] = {}
self.init_ui()
self.register_event()
def init_ui(self) -> None:
self.setWindowTitle("波动率管理")
tab: QtWidgets.QTabWidget = QtWidgets.QTabWidget()
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addWidget(tab)
self.setLayout(vbox)
self.chain_symbols: list = list(self.portfolio.chains.keys())
self.chain_symbols.sort()
for chain_symbol in self.chain_symbols:
chain: ChainData = self.portfolio.get_chain(chain_symbol)
table: QtWidgets.QTableWidget = QtWidgets.QTableWidget()
table.setEditTriggers(table.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.setRowCount(len(chain.indexes))
table.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch
)
labels: list = [
"行权价",
"OTM隐波",
"CALL隐波",
"PUT隐波",
"定价隐波",
"执行拟合"
]
table.setColumnCount(len(labels))
table.setHorizontalHeaderLabels(labels)
for row, index in enumerate(chain.indexes):
index_cell: IndexCell = IndexCell(index)
otm_impv_cell: MonitorCell = MonitorCell("")
call_impv_cell: MonitorCell = MonitorCell("")
put_impv_cell: MonitorCell = MonitorCell("")
set_func = partial(
self.set_pricing_impv,
chain_symbol=chain_symbol,
index=index
)
pricing_impv_spin: VolatilityDoubleSpinBox = VolatilityDoubleSpinBox()
pricing_impv_spin.setAlignment(QtCore.Qt.AlignCenter)
pricing_impv_spin.valueChanged.connect(set_func)
check: QtWidgets.QCheckBox = QtWidgets.QCheckBox()
check_hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
check_hbox.setAlignment(QtCore.Qt.AlignCenter)
check_hbox.addWidget(check)
check_widget: QtWidgets.QWidget = QtWidgets.QWidget()
check_widget.setLayout(check_hbox)
table.setItem(row, 0, index_cell)
table.setItem(row, 1, otm_impv_cell)
table.setItem(row, 2, call_impv_cell)
table.setItem(row, 3, put_impv_cell)
table.setCellWidget(row, 4, pricing_impv_spin)
table.setCellWidget(row, 5, check_widget)
cells: dict = {
"otm_impv": otm_impv_cell,
"call_impv": call_impv_cell,
"put_impv": put_impv_cell,
"pricing_impv": pricing_impv_spin,
"check": check
}
self.cells[(chain_symbol, index)] = cells
reset_func = partial(self.reset_pricing_impv, chain_symbol=chain_symbol)
button_reset: QtWidgets.QPushButton = QtWidgets.QPushButton("重置")
button_reset.clicked.connect(reset_func)
fit_func = partial(self.fit_pricing_impv, chain_symbol=chain_symbol)
button_fit: QtWidgets.QPushButton = QtWidgets.QPushButton("拟合")
button_fit.clicked.connect(fit_func)
increase_func = partial(self.increase_pricing_impv, chain_symbol=chain_symbol)
button_increase: QtWidgets.QPushButton = QtWidgets.QPushButton("+0.1%")
button_increase.clicked.connect(increase_func)
decrease_func = partial(self.decrease_pricing_impv, chain_symbol=chain_symbol)
button_decrease: QtWidgets.QPushButton = QtWidgets.QPushButton("-0.1%")
button_decrease.clicked.connect(decrease_func)
hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox.addWidget(button_reset)
hbox.addWidget(button_fit)
hbox.addWidget(button_increase)
hbox.addWidget(button_decrease)
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(table)
chain_widget: QtWidgets.QWidget = QtWidgets.QWidget()
chain_widget.setLayout(vbox)
tab.addTab(chain_widget, chain_symbol)
self.update_pricing_impv(chain_symbol)
self.default_foreground = otm_impv_cell.foreground()
self.default_background = otm_impv_cell.background()
table.resizeRowsToContents()
def register_event(self) -> None:
self.signal_timer.connect(self.process_timer_event)
self.event_engine.register(EVENT_TIMER, self.signal_timer.emit)
def process_timer_event(self, event: Event) -> None:
for chain_symbol in self.chain_symbols:
self.update_chain_impv(chain_symbol)
def reset_pricing_impv(self, chain_symbol: str) -> None:
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
if index >= atm_index:
otm: OptionData = call
else:
otm: OptionData = put
call.pricing_impv = otm.mid_impv
put.pricing_impv = otm.mid_impv
self.update_pricing_impv(chain_symbol)
def fit_pricing_impv(self, chain_symbol: str) -> None:
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
strike_prices: list = []
pricing_impvs: list = []
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
cells: dict = self.cells[(chain_symbol, index)]
if not cells["check"].isChecked():
if index >= atm_index:
otm: OptionData = call
else:
otm: OptionData = put
strike_prices.append(otm.strike_price)
pricing_impvs.append(otm.pricing_impv)
cs: interpolate.CubicSpline = interpolate.CubicSpline(strike_prices, pricing_impvs)
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
new_impv: float = float(cs(call.strike_price))
call.pricing_impv = new_impv
put.pricing_impv = new_impv
self.update_pricing_impv(chain_symbol)
def increase_pricing_impv(self, chain_symbol: str) -> None:
chain: ChainData = self.portfolio.get_chain(chain_symbol)
for option in chain.options.values():
option.pricing_impv += 0.001
self.update_pricing_impv(chain_symbol)
def decrease_pricing_impv(self, chain_symbol: str) -> None:
chain: ChainData = self.portfolio.get_chain(chain_symbol)
for option in chain.options.values():
option.pricing_impv -= 0.001
self.update_pricing_impv(chain_symbol)
def set_pricing_impv(self, value: float, chain_symbol: str, index: str) -> None:
new_impv: float = value / 100
chain: ChainData = self.portfolio.get_chain(chain_symbol)
call: OptionData = chain.calls[index]
call.pricing_impv = new_impv
put: OptionData = chain.puts[index]
put.pricing_impv = new_impv
def update_pricing_impv(self, chain_symbol: str) -> None:
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
for index in chain.indexes:
if index >= atm_index:
otm: OptionData = chain.calls[index]
else:
otm: OptionData = chain.puts[index]
value: int = round(otm.pricing_impv * 100, 1)
key: tuple = (chain_symbol, index)
cells: Optional[dict] = self.cells.get(key, None)
if cells:
cells["pricing_impv"].setValue(value)
def update_chain_impv(self, chain_symbol: str) -> None:
chain: ChainData = self.portfolio.get_chain(chain_symbol)
atm_index: str = chain.atm_index
for index in chain.indexes:
call: OptionData = chain.calls[index]
put: OptionData = chain.puts[index]
if index >= atm_index:
otm: OptionData = call
else:
otm: OptionData = put
cells: dict = self.cells[(chain_symbol, index)]
cells["otm_impv"].setText(f"{otm.mid_impv:.1%}")
cells["call_impv"].setText(f"{call.mid_impv:.1%}")
cells["put_impv"].setText(f"{put.mid_impv:.1%}")
current_atm_index: str = self.chain_atm_index.get(chain_symbol, "")
if current_atm_index == atm_index:
return
self.chain_atm_index[chain_symbol] = atm_index
if current_atm_index:
old_cells: dict = self.cells[(chain_symbol, current_atm_index)]
for field in ["otm_impv", "call_impv", "put_impv"]:
old_cells[field].setForeground(COLOR_WHITE)
old_cells[field].setBackground(self.default_background)
if atm_index:
new_cells: dict = self.cells[(chain_symbol, atm_index)]
for field in ["otm_impv", "call_impv", "put_impv"]:
new_cells[field].setForeground(COLOR_BLACK)
new_cells[field].setBackground(COLOR_WHITE)
| true | true |
f7f41e22289e3b364c15513be0edf12543f5b886 | 4,040 | py | Python | src/lib/phdi-building-blocks/tests/test_linkage.py | CDCgov/prime-data-ingestion | d0eb9c25999a9280a91b11583c3e131783b13c17 | [
"Apache-2.0"
] | 1 | 2021-12-20T17:37:52.000Z | 2021-12-20T17:37:52.000Z | src/lib/phdi-building-blocks/tests/test_linkage.py | CDCgov/prime-data-ingestion | d0eb9c25999a9280a91b11583c3e131783b13c17 | [
"Apache-2.0"
] | 7 | 2022-01-26T17:57:13.000Z | 2022-02-02T18:51:58.000Z | src/lib/phdi-building-blocks/tests/test_linkage.py | CDCgov/prime-data-ingestion | d0eb9c25999a9280a91b11583c3e131783b13c17 | [
"Apache-2.0"
] | 1 | 2021-12-21T15:55:02.000Z | 2021-12-21T15:55:02.000Z | from phdi_building_blocks.linkage import generate_hash_str, add_patient_identifier
def test_generate_hash():
salt_str = "super-legit-salt"
patient_1 = "John-Shepard-2153/11/07-1234 Silversun Strip Zakera Ward Citadel 99999"
patient_2 = "Tali-Zora-Vas-Normandy-2160/05/14-PO Box 1 Rock Rannoch"
hash_1 = generate_hash_str(patient_1, salt_str)
hash_2 = generate_hash_str(patient_2, salt_str)
assert hash_1 == "0aa5aa1f6183a24670b2e1848864514e119ae6ca63bb35246ef215e7a0746a35"
assert hash_2 == "102818c623290c24069beb721c6eb465d281b3b67ecfb6aef924d14affa117b9"
def test_missing_address():
bundle = {
"entry": [
{
"resource": {
"resourceType": "Patient",
"name": [{"family": "doe"}],
"birthDate": "19900101",
}
}
]
}
add_patient_identifier(bundle, "some-salt")
expected = generate_hash_str("doe-19900101-", "some-salt")
actual = bundle["entry"][0]["resource"]["identifier"][0]["value"]
assert actual == expected
def test_add_patient_identifier():
salt_str = "super-legit-salt"
incoming_bundle = {
"resourceType": "Bundle",
"type": "batch",
"timestamp": "2022-01-01T00:00:30",
"identifier": {"value": "a-totally-legit-id"},
"id": "45bdc851-2fe5-cf8a-2fd7-dd24b23409e4",
"entry": [
{
"fullUrl": "asdfasdfu2189u812",
"resource": {
"resourceType": "MessageHeader",
"resourceBody": "some-FHIR-stuff",
},
},
{
"fullUrl": "ajshdfo8ashf8191hf",
"resource": {
"resourceType": "Patient",
"id": "65489-asdf5-6d8w2-zz5g8",
"identifier": [
{
"value": "99999",
"type": {
"coding": [
{
"code": "real-code",
"system": "a-real-url",
}
]
},
"system": "urn:oid:1.2.840.114350.1.13.163.3.7.2.696570",
}
],
"name": [
{
"family": "Shepard",
"given": ["John", "Tiberius"],
"use": "official",
}
],
"birthDate": "2053-11-07",
"gender": "male",
"address": [
{
"line": ["1234 Silversun Strip"],
"city": "Zakera Ward",
"state": "Citadel",
"postalCode": "99999",
}
],
},
},
{
"fullUrl": "64a6s87df98a46e8a52d",
"resource": {
"resourceType": "Provenance",
"resourceBody": "moar-FHIR-stuff",
},
},
],
}
plaintext = (
"John-Tiberius-Shepard-2053-11-07-"
+ "1234 Silversun Strip Zakera Ward, Citadel 99999"
)
expected_new_identifier = {
"value": generate_hash_str(plaintext, salt_str),
"system": "urn:ietf:rfc:3986",
"use": "temp",
}
add_patient_identifier(incoming_bundle, salt_str)
assert len(incoming_bundle["entry"]) == 3
for resource in incoming_bundle["entry"]:
if resource["resource"]["resourceType"] == "Patient":
assert len(resource["resource"]["identifier"]) == 2
assert resource["resource"]["identifier"][-1] == expected_new_identifier
| 34.237288 | 88 | 0.437624 | from phdi_building_blocks.linkage import generate_hash_str, add_patient_identifier
def test_generate_hash():
salt_str = "super-legit-salt"
patient_1 = "John-Shepard-2153/11/07-1234 Silversun Strip Zakera Ward Citadel 99999"
patient_2 = "Tali-Zora-Vas-Normandy-2160/05/14-PO Box 1 Rock Rannoch"
hash_1 = generate_hash_str(patient_1, salt_str)
hash_2 = generate_hash_str(patient_2, salt_str)
assert hash_1 == "0aa5aa1f6183a24670b2e1848864514e119ae6ca63bb35246ef215e7a0746a35"
assert hash_2 == "102818c623290c24069beb721c6eb465d281b3b67ecfb6aef924d14affa117b9"
def test_missing_address():
bundle = {
"entry": [
{
"resource": {
"resourceType": "Patient",
"name": [{"family": "doe"}],
"birthDate": "19900101",
}
}
]
}
add_patient_identifier(bundle, "some-salt")
expected = generate_hash_str("doe-19900101-", "some-salt")
actual = bundle["entry"][0]["resource"]["identifier"][0]["value"]
assert actual == expected
def test_add_patient_identifier():
salt_str = "super-legit-salt"
incoming_bundle = {
"resourceType": "Bundle",
"type": "batch",
"timestamp": "2022-01-01T00:00:30",
"identifier": {"value": "a-totally-legit-id"},
"id": "45bdc851-2fe5-cf8a-2fd7-dd24b23409e4",
"entry": [
{
"fullUrl": "asdfasdfu2189u812",
"resource": {
"resourceType": "MessageHeader",
"resourceBody": "some-FHIR-stuff",
},
},
{
"fullUrl": "ajshdfo8ashf8191hf",
"resource": {
"resourceType": "Patient",
"id": "65489-asdf5-6d8w2-zz5g8",
"identifier": [
{
"value": "99999",
"type": {
"coding": [
{
"code": "real-code",
"system": "a-real-url",
}
]
},
"system": "urn:oid:1.2.840.114350.1.13.163.3.7.2.696570",
}
],
"name": [
{
"family": "Shepard",
"given": ["John", "Tiberius"],
"use": "official",
}
],
"birthDate": "2053-11-07",
"gender": "male",
"address": [
{
"line": ["1234 Silversun Strip"],
"city": "Zakera Ward",
"state": "Citadel",
"postalCode": "99999",
}
],
},
},
{
"fullUrl": "64a6s87df98a46e8a52d",
"resource": {
"resourceType": "Provenance",
"resourceBody": "moar-FHIR-stuff",
},
},
],
}
plaintext = (
"John-Tiberius-Shepard-2053-11-07-"
+ "1234 Silversun Strip Zakera Ward, Citadel 99999"
)
expected_new_identifier = {
"value": generate_hash_str(plaintext, salt_str),
"system": "urn:ietf:rfc:3986",
"use": "temp",
}
add_patient_identifier(incoming_bundle, salt_str)
assert len(incoming_bundle["entry"]) == 3
for resource in incoming_bundle["entry"]:
if resource["resource"]["resourceType"] == "Patient":
assert len(resource["resource"]["identifier"]) == 2
assert resource["resource"]["identifier"][-1] == expected_new_identifier
| true | true |
f7f41e78ab51f3fa6a9276832b59ea545804484d | 40,353 | py | Python | core/domain/topic_domain_test.py | OsamaAdam98/oppia | 9c9c3d0fc6710f58394c5e28d414043340ed2a62 | [
"Apache-2.0"
] | 2 | 2019-03-31T07:03:32.000Z | 2019-04-24T18:12:53.000Z | core/domain/topic_domain_test.py | OsamaAdam98/oppia | 9c9c3d0fc6710f58394c5e28d414043340ed2a62 | [
"Apache-2.0"
] | 3 | 2019-08-01T18:38:54.000Z | 2019-08-12T03:02:59.000Z | core/domain/topic_domain_test.py | RafayGhafoor/oppia | 855d02414ca00d0807d841e48be7ce7b94d68c29 | [
"Apache-2.0"
] | 1 | 2020-03-15T14:29:55.000Z | 2020-03-15T14:29:55.000Z | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for topic domain objects."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from constants import constants
from core.domain import topic_domain
from core.domain import user_services
from core.tests import test_utils
import feconf
import utils
class TopicDomainUnitTests(test_utils.GenericTestBase):
"""Tests for topic domain objects."""
topic_id = 'topic_id'
def setUp(self):
super(TopicDomainUnitTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'Name', 'abbrev')
self.topic.subtopics = [
topic_domain.Subtopic(1, 'Title', ['skill_id_1'])]
self.topic.next_subtopic_id = 2
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_a = user_services.UserActionsInfo(self.user_id_a)
self.user_b = user_services.UserActionsInfo(self.user_id_b)
def test_create_default_topic(self):
"""Tests the create_default_topic() function."""
topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'Name', 'abbrev')
expected_topic_dict = {
'id': self.topic_id,
'name': 'Name',
'abbreviated_name': 'abbrev',
'thumbnail_filename': None,
'description': feconf.DEFAULT_TOPIC_DESCRIPTION,
'canonical_story_references': [],
'additional_story_references': [],
'uncategorized_skill_ids': [],
'subtopics': [],
'next_subtopic_id': 1,
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'subtopic_schema_version': feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION,
'story_reference_schema_version': (
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
'version': 0
}
self.assertEqual(topic.to_dict(), expected_topic_dict)
def test_get_all_skill_ids(self):
self.topic.uncategorized_skill_ids = ['skill_id_2', 'skill_id_3']
self.assertEqual(
self.topic.get_all_skill_ids(),
['skill_id_2', 'skill_id_3', 'skill_id_1'])
def test_get_all_uncategorized_skill_ids(self):
self.topic.uncategorized_skill_ids = ['skill_id_1', 'skill_id_2']
self.assertEqual(
self.topic.get_all_uncategorized_skill_ids(),
['skill_id_1', 'skill_id_2'])
def test_get_all_subtopics(self):
self.topic.subtopics = [topic_domain.Subtopic(
1, 'Title', ['skill_id_1'])]
subtopics = self.topic.get_all_subtopics()
self.assertEqual(
subtopics, [{
'skill_ids': ['skill_id_1'],
'id': 1,
'title': 'Title'}])
def test_delete_canonical_story(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_2')
]
self.topic.delete_canonical_story('story_id_1')
canonical_story_ids = self.topic.get_canonical_story_ids()
self.assertEqual(
canonical_story_ids, ['story_id', 'story_id_2'])
with self.assertRaisesRegexp(
Exception, 'The story_id story_id_5 is not present in the canonical'
' story references list of the topic.'):
self.topic.delete_canonical_story('story_id_5')
def test_get_all_story_references(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id_2'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_3')
]
all_story_references = self.topic.get_all_story_references()
self.assertEqual(len(all_story_references), 4)
self.assertEqual(all_story_references[0].story_id, 'story_id')
self.assertEqual(all_story_references[1].story_id, 'story_id_1')
self.assertEqual(all_story_references[2].story_id, 'story_id_2')
self.assertEqual(all_story_references[3].story_id, 'story_id_3')
def test_add_canonical_story(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self.topic.add_canonical_story('story_id_2')
canonical_story_ids = self.topic.get_canonical_story_ids()
self.assertEqual(
canonical_story_ids,
['story_id', 'story_id_1', 'story_id_2'])
with self.assertRaisesRegexp(
Exception, 'The story_id story_id_2 is already present in the '
'canonical story references list of the topic.'):
self.topic.add_canonical_story('story_id_2')
def test_delete_additional_story(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_2')
]
self.topic.delete_additional_story('story_id_1')
additional_story_ids = self.topic.get_additional_story_ids()
self.assertEqual(
additional_story_ids, ['story_id', 'story_id_2'])
with self.assertRaisesRegexp(
Exception,
'The story_id story_id_5 is not present in the additional'
' story references list of the topic.'):
self.topic.delete_additional_story('story_id_5')
def test_add_additional_story(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self.topic.add_additional_story('story_id_2')
additional_story_ids = self.topic.get_additional_story_ids()
self.assertEqual(
additional_story_ids,
['story_id', 'story_id_1', 'story_id_2'])
with self.assertRaisesRegexp(
Exception, 'The story_id story_id_2 is already present in the '
'additional story references list of the topic.'):
self.topic.add_additional_story('story_id_2')
def _assert_validation_error(self, expected_error_substring):
"""Checks that the topic passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.topic.validate()
def _assert_valid_topic_id(self, expected_error_substring, topic_id):
"""Checks that the skill passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
topic_domain.Topic.require_valid_topic_id(topic_id)
def _assert_valid_abbreviated_name(
self, expected_error_substring, name):
"""Checks that the topic passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
topic_domain.Topic.require_valid_abbreviated_name(name)
def test_valid_topic_id(self):
self._assert_valid_topic_id('Topic id should be a string', 10)
self._assert_valid_topic_id('Topic id abc is invalid', 'abc')
def test_valid_abbreviated_name(self):
self._assert_valid_abbreviated_name(
'Abbreviated name should be a string.', 10)
self._assert_valid_abbreviated_name(
'Abbreviated name field should not be empty.', '')
self._assert_valid_abbreviated_name(
'Abbreviated name field should not exceed 12 characters.',
'this is a lengthy name.')
def test_thumbnail_filename_validation(self):
self.topic.thumbnail_filename = 1
self._assert_validation_error(
'Expected thumbnail filename to be a string, received 1')
def test_subtopic_title_validation(self):
self.topic.subtopics[0].title = 1
self._assert_validation_error('Expected subtopic title to be a string')
def test_story_id_validation(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference(123, True)
]
self._assert_validation_error('Expected story id to be a string')
def test_story_is_published_validation(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference('story_id', 'published')
]
self._assert_validation_error(
'Expected story_is_published to be a boolean')
def test_subtopic_id_validation(self):
self.topic.subtopics[0].id = 'invalid_id'
self._assert_validation_error('Expected subtopic id to be an int')
def test_subtopic_skill_ids_validation(self):
self.topic.subtopics[0].skill_ids = 'abc'
self._assert_validation_error('Expected skill ids to be a list')
self.topic.subtopics[0].skill_ids = ['skill_id', 'skill_id']
self._assert_validation_error(
'Expected all skill ids to be distinct.')
self.topic.subtopics[0].skill_ids = [1, 2]
self._assert_validation_error('Expected each skill id to be a string')
def test_subtopics_validation(self):
self.topic.subtopics = 'abc'
self._assert_validation_error('Expected subtopics to be a list')
def test_name_validation(self):
self.topic.name = 1
self._assert_validation_error('Name should be a string')
self.topic.name = ''
self._assert_validation_error('Name field should not be empty')
def test_subtopic_schema_version_type_validation(self):
self.topic.subtopic_schema_version = 'invalid_version'
self._assert_validation_error(
'Expected subtopic schema version to be an integer')
def test_story_reference_schema_version_type_validation(self):
self.topic.story_reference_schema_version = 'invalid_version'
self._assert_validation_error(
'Expected story reference schema version to be an integer')
def test_subtopic_schema_version_validation(self):
self.topic.subtopic_schema_version = 0
self._assert_validation_error(
'Expected subtopic schema version to be %s'
% (feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION))
def test_subtopic_type_validation(self):
self.topic.subtopics = ['subtopic']
self._assert_validation_error(
'Expected each subtopic to be a Subtopic object')
def test_description_validation(self):
self.topic.description = 1
self._assert_validation_error('Expected description to be a string')
def test_next_subtopic_id_validation(self):
self.topic.next_subtopic_id = '1'
self._assert_validation_error('Expected next_subtopic_id to be an int')
self.topic.next_subtopic_id = 1
self._assert_validation_error(
'The id for subtopic 1 is greater than or equal to '
'next_subtopic_id 1')
def test_language_code_validation(self):
self.topic.language_code = 0
self._assert_validation_error('Expected language code to be a string')
self.topic.language_code = 'xz'
self._assert_validation_error('Invalid language code')
def test_canonical_story_references_validation(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self._assert_validation_error(
'Expected all canonical story ids to be distinct.')
self.topic.canonical_story_references = 'story_id'
self._assert_validation_error(
'Expected canonical story references to be a list')
def test_additional_story_references_validation(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self._assert_validation_error(
'Expected all additional story ids to be distinct.')
self.topic.additional_story_references = 'story_id'
self._assert_validation_error(
'Expected additional story references to be a list')
def test_additional_canonical_story_intersection_validation(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
]
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_2')
]
self._assert_validation_error(
'Expected additional story ids list and canonical story '
'ids list to be mutually exclusive.')
def test_uncategorized_skill_ids_validation(self):
self.topic.uncategorized_skill_ids = 'uncategorized_skill_id'
self._assert_validation_error(
'Expected uncategorized skill ids to be a list')
def test_add_uncategorized_skill_id(self):
self.topic.subtopics.append(
topic_domain.Subtopic('id_2', 'Title2', ['skill_id_2']))
with self.assertRaisesRegexp(
Exception,
'The skill id skill_id_1 already exists in subtopic with id 1'):
self.topic.add_uncategorized_skill_id('skill_id_1')
self.topic.add_uncategorized_skill_id('skill_id_3')
self.assertEqual(self.topic.uncategorized_skill_ids, ['skill_id_3'])
def test_remove_uncategorized_skill_id(self):
self.topic.uncategorized_skill_ids = ['skill_id_5']
with self.assertRaisesRegexp(
Exception,
'The skill id skill_id_3 is not present in the topic'):
self.topic.remove_uncategorized_skill_id('skill_id_3')
self.topic.remove_uncategorized_skill_id('skill_id_5')
self.assertEqual(self.topic.uncategorized_skill_ids, [])
def test_move_skill_id_to_subtopic(self):
self.topic.uncategorized_skill_ids = ['skill_id_1']
self.topic.subtopics[0].skill_ids = ['skill_id_2']
self.topic.move_skill_id_to_subtopic(None, 1, 'skill_id_1')
self.assertEqual(self.topic.uncategorized_skill_ids, [])
self.assertEqual(
self.topic.subtopics[0].skill_ids, ['skill_id_2', 'skill_id_1'])
self.topic.uncategorized_skill_ids = ['skill_id_1']
self.topic.subtopics[0].skill_ids = ['skill_id_2']
with self.assertRaisesRegexp(
Exception,
'Skill id skill_id_3 is not an uncategorized skill id'):
self.topic.move_skill_id_to_subtopic(None, 'id_1', 'skill_id_3')
def test_get_subtopic_index(self):
self.assertIsNone(self.topic.get_subtopic_index(2))
self.assertEqual(self.topic.get_subtopic_index(1), 0)
def test_to_dict(self):
user_ids = [self.user_id_a, self.user_id_b]
topic_rights = topic_domain.TopicRights(self.topic_id, user_ids, False)
expected_dict = {
'topic_id': self.topic_id,
'manager_names': ['A', 'B'],
'topic_is_published': False
}
self.assertEqual(expected_dict, topic_rights.to_dict())
def test_is_manager(self):
user_ids = [self.user_id_a, self.user_id_b]
topic_rights = topic_domain.TopicRights(self.topic_id, user_ids, False)
self.assertTrue(topic_rights.is_manager(self.user_id_a))
self.assertTrue(topic_rights.is_manager(self.user_id_b))
self.assertFalse(topic_rights.is_manager('fakeuser'))
def test_cannot_create_topic_rights_change_class_with_invalid_cmd(self):
with self.assertRaisesRegexp(
Exception, 'Command invalid cmd is not allowed'):
topic_domain.TopicRightsChange({
'cmd': 'invalid cmd'
})
def test_cannot_create_topic_rights_change_class_with_invalid_changelist(
self):
with self.assertRaisesRegexp(
Exception, 'Missing cmd key in change dict'):
topic_domain.TopicRightsChange({})
def test_create_new_topic_rights_change_class(self):
topic_rights = topic_domain.TopicRightsChange({
'cmd': 'create_new'
})
self.assertEqual(topic_rights.to_dict(), {'cmd': 'create_new'})
def test_update_language_code(self):
self.assertEqual(self.topic.language_code, 'en')
self.topic.update_language_code('bn')
self.assertEqual(self.topic.language_code, 'bn')
def test_update_abbreviated_name(self):
self.assertEqual(self.topic.abbreviated_name, 'abbrev')
self.topic.update_abbreviated_name('name')
self.assertEqual(self.topic.abbreviated_name, 'name')
def test_update_thumbnail_filename(self):
self.assertEqual(self.topic.thumbnail_filename, None)
self.topic.update_thumbnail_filename('img.png')
self.assertEqual(self.topic.thumbnail_filename, 'img.png')
def test_cannot_add_uncategorized_skill_with_existing_uncategorized_skill(
self):
self.assertEqual(self.topic.uncategorized_skill_ids, [])
self.topic.uncategorized_skill_ids = ['skill_id1']
with self.assertRaisesRegexp(
Exception,
'The skill id skill_id1 is already an uncategorized skill.'):
self.topic.add_uncategorized_skill_id('skill_id1')
def test_cannot_delete_subtopic_with_invalid_subtopic_id(self):
with self.assertRaisesRegexp(
Exception, 'A subtopic with id invalid_id doesn\'t exist.'):
self.topic.delete_subtopic('invalid_id')
def test_cannot_update_subtopic_title_with_invalid_subtopic_id(self):
with self.assertRaisesRegexp(
Exception, 'The subtopic with id invalid_id does not exist.'):
self.topic.update_subtopic_title('invalid_id', 'new title')
def test_update_subtopic_title(self):
self.assertEqual(len(self.topic.subtopics), 1)
self.assertEqual(self.topic.subtopics[0].title, 'Title')
self.topic.update_subtopic_title(1, 'new title')
self.assertEqual(self.topic.subtopics[0].title, 'new title')
def test_cannot_remove_skill_id_from_subtopic_with_invalid_subtopic_id(
self):
with self.assertRaisesRegexp(
Exception, 'The subtopic with id invalid_id does not exist.'):
self.topic.remove_skill_id_from_subtopic('invalid_id', 'skill_id1')
def test_cannot_move_skill_id_to_subtopic_with_invalid_subtopic_id(self):
with self.assertRaisesRegexp(
Exception, 'The subtopic with id old_subtopic_id does not exist.'):
self.topic.move_skill_id_to_subtopic(
'old_subtopic_id', 'new_subtopic_id', 'skill_id1')
def test_cannot_move_existing_skill_to_subtopic(self):
self.topic.subtopics = [
topic_domain.Subtopic(1, 'Title', ['skill_id_1']),
topic_domain.Subtopic(2, 'Another title', ['skill_id_1'])]
with self.assertRaisesRegexp(
Exception,
'Skill id skill_id_1 is already present in the target subtopic'):
self.topic.move_skill_id_to_subtopic(1, 2, 'skill_id_1')
class TopicChangeTests(test_utils.GenericTestBase):
def test_topic_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
topic_domain.TopicChange({'invalid': 'data'})
def test_topic_change_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
topic_domain.TopicChange({'cmd': 'invalid'})
def test_topic_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_value, old_value')):
topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'name',
})
def test_topic_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
topic_domain.TopicChange({
'cmd': 'add_subtopic',
'title': 'title',
'subtopic_id': 'subtopic_id',
'invalid': 'invalid'
})
def test_topic_change_object_with_invalid_topic_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd update_topic_property: '
'invalid is not allowed')):
topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_topic_change_object_with_invalid_subtopic_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd update_subtopic_property: '
'invalid is not allowed')):
topic_domain.TopicChange({
'cmd': 'update_subtopic_property',
'subtopic_id': 'subtopic_id',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_topic_change_object_with_add_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'add_subtopic',
'subtopic_id': 'subtopic_id',
'title': 'title'
})
self.assertEqual(topic_change_object.cmd, 'add_subtopic')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
self.assertEqual(topic_change_object.title, 'title')
def test_topic_change_object_with_delete_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'delete_subtopic',
'subtopic_id': 'subtopic_id'
})
self.assertEqual(topic_change_object.cmd, 'delete_subtopic')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
def test_topic_change_object_with_add_uncategorized_skill_id(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': 'new_uncategorized_skill_id'
})
self.assertEqual(topic_change_object.cmd, 'add_uncategorized_skill_id')
self.assertEqual(
topic_change_object.new_uncategorized_skill_id,
'new_uncategorized_skill_id')
def test_topic_change_object_with_remove_uncategorized_skill_id(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': 'uncategorized_skill_id'
})
self.assertEqual(
topic_change_object.cmd, 'remove_uncategorized_skill_id')
self.assertEqual(
topic_change_object.uncategorized_skill_id,
'uncategorized_skill_id')
def test_topic_change_object_with_move_skill_id_to_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'move_skill_id_to_subtopic',
'skill_id': 'skill_id',
'old_subtopic_id': 'old_subtopic_id',
'new_subtopic_id': 'new_subtopic_id'
})
self.assertEqual(topic_change_object.cmd, 'move_skill_id_to_subtopic')
self.assertEqual(topic_change_object.skill_id, 'skill_id')
self.assertEqual(topic_change_object.old_subtopic_id, 'old_subtopic_id')
self.assertEqual(topic_change_object.new_subtopic_id, 'new_subtopic_id')
def test_topic_change_object_with_remove_skill_id_from_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'remove_skill_id_from_subtopic',
'skill_id': 'skill_id',
'subtopic_id': 'subtopic_id'
})
self.assertEqual(
topic_change_object.cmd, 'remove_skill_id_from_subtopic')
self.assertEqual(topic_change_object.skill_id, 'skill_id')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
def test_topic_change_object_with_update_subtopic_property(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'update_subtopic_property',
'subtopic_id': 'subtopic_id',
'property_name': 'title',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(topic_change_object.cmd, 'update_subtopic_property')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
self.assertEqual(topic_change_object.property_name, 'title')
self.assertEqual(topic_change_object.new_value, 'new_value')
self.assertEqual(topic_change_object.old_value, 'old_value')
def test_topic_change_object_with_update_topic_property(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'name',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(topic_change_object.cmd, 'update_topic_property')
self.assertEqual(topic_change_object.property_name, 'name')
self.assertEqual(topic_change_object.new_value, 'new_value')
self.assertEqual(topic_change_object.old_value, 'old_value')
def test_topic_change_object_with_create_new(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'create_new',
'name': 'name',
})
self.assertEqual(topic_change_object.cmd, 'create_new')
self.assertEqual(topic_change_object.name, 'name')
def test_topic_change_object_with_migrate_subtopic_schema_to_latest_version(
self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'migrate_subtopic_schema_to_latest_version',
'from_version': 'from_version',
'to_version': 'to_version',
})
self.assertEqual(
topic_change_object.cmd,
'migrate_subtopic_schema_to_latest_version')
self.assertEqual(topic_change_object.from_version, 'from_version')
self.assertEqual(topic_change_object.to_version, 'to_version')
def test_to_dict(self):
topic_change_dict = {
'cmd': 'create_new',
'name': 'name'
}
topic_change_object = topic_domain.TopicChange(topic_change_dict)
self.assertEqual(topic_change_object.to_dict(), topic_change_dict)
class TopicRightsChangeTests(test_utils.GenericTestBase):
def test_topic_rights_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
topic_domain.TopicRightsChange({'invalid': 'data'})
def test_topic_change_rights_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
topic_domain.TopicRightsChange({'cmd': 'invalid'})
def test_topic_rights_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_role, old_role')):
topic_domain.TopicRightsChange({
'cmd': 'change_role',
'assignee_id': 'assignee_id',
})
def test_topic_rights_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
topic_domain.TopicRightsChange({
'cmd': 'publish_topic',
'invalid': 'invalid'
})
def test_topic_rights_change_object_with_invalid_role(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for old_role in cmd change_role: '
'invalid is not allowed')):
topic_domain.TopicRightsChange({
'cmd': 'change_role',
'assignee_id': 'assignee_id',
'old_role': 'invalid',
'new_role': topic_domain.ROLE_MANAGER
})
def test_topic_rights_change_object_with_create_new(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'create_new'
})
self.assertEqual(topic_rights_change_object.cmd, 'create_new')
def test_topic_rights_change_object_with_change_role(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'change_role',
'assignee_id': 'assignee_id',
'old_role': topic_domain.ROLE_NONE,
'new_role': topic_domain.ROLE_MANAGER
})
self.assertEqual(topic_rights_change_object.cmd, 'change_role')
self.assertEqual(topic_rights_change_object.assignee_id, 'assignee_id')
self.assertEqual(
topic_rights_change_object.old_role, topic_domain.ROLE_NONE)
self.assertEqual(
topic_rights_change_object.new_role, topic_domain.ROLE_MANAGER)
def test_topic_rights_change_object_with_publish_topic(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'publish_topic'
})
self.assertEqual(topic_rights_change_object.cmd, 'publish_topic')
def test_topic_rights_change_object_with_unpublish_topic(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'unpublish_topic'
})
self.assertEqual(topic_rights_change_object.cmd, 'unpublish_topic')
def test_to_dict(self):
topic_rights_change_dict = {
'cmd': 'change_role',
'assignee_id': 'assignee_id',
'old_role': topic_domain.ROLE_NONE,
'new_role': topic_domain.ROLE_MANAGER
}
topic_rights_change_object = topic_domain.TopicRightsChange(
topic_rights_change_dict)
self.assertEqual(
topic_rights_change_object.to_dict(), topic_rights_change_dict)
class TopicSummaryTests(test_utils.GenericTestBase):
def setUp(self):
super(TopicSummaryTests, self).setUp()
current_time = datetime.datetime.utcnow()
time_in_millisecs = utils.get_time_in_millisecs(current_time)
self.topic_summary_dict = {
'id': 'topic_id',
'name': 'name',
'language_code': 'en',
'version': 1,
'canonical_story_count': 1,
'additional_story_count': 1,
'uncategorized_skill_count': 1,
'subtopic_count': 1,
'total_skill_count': 1,
'topic_model_created_on': time_in_millisecs,
'topic_model_last_updated': time_in_millisecs
}
self.topic_summary = topic_domain.TopicSummary(
'topic_id', 'name', 'name', 'en', 1, 1, 1, 1, 1, 1,
current_time, current_time)
def test_topic_summary_gets_created(self):
self.assertEqual(
self.topic_summary.to_dict(), self.topic_summary_dict)
def test_validation_passes_with_valid_properties(self):
self.topic_summary.validate()
def test_validation_fails_with_invalid_name(self):
self.topic_summary.name = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Name should be a string.'):
self.topic_summary.validate()
def test_validation_fails_with_empty_name(self):
self.topic_summary.name = ''
with self.assertRaisesRegexp(
utils.ValidationError, 'Name field should not be empty'):
self.topic_summary.validate()
def test_validation_fails_with_invalid_canonical_name(self):
self.topic_summary.canonical_name = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Canonical name should be a string.'):
self.topic_summary.validate()
def test_validation_fails_with_empty_canonical_name(self):
self.topic_summary.canonical_name = ''
with self.assertRaisesRegexp(
utils.ValidationError, 'Canonical name field should not be empty'):
self.topic_summary.validate()
def test_validation_fails_with_invalid_language_code(self):
self.topic_summary.language_code = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language code to be a string, received 0'):
self.topic_summary.validate()
def test_validation_fails_with_unallowed_language_code(self):
self.topic_summary.language_code = 'invalid'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language code: invalid'):
self.topic_summary.validate()
def test_validation_fails_with_invalid_canonical_story_count(self):
self.topic_summary.canonical_story_count = '10'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected canonical story count to be an integer, received \'10\''):
self.topic_summary.validate()
def test_validation_fails_with_negative_canonical_story_count(self):
self.topic_summary.canonical_story_count = -1
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected canonical_story_count to be non-negative, '
'received \'-1\'')):
self.topic_summary.validate()
def test_validation_fails_with_invalid_additional_story_count(self):
self.topic_summary.additional_story_count = '10'
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected additional story count to be an '
'integer, received \'10\'')):
self.topic_summary.validate()
def test_validation_fails_with_negative_additional_story_count(self):
self.topic_summary.additional_story_count = -1
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected additional_story_count to be non-negative, '
'received \'-1\'')):
self.topic_summary.validate()
def test_validation_fails_with_invalid_uncategorized_skill_count(self):
self.topic_summary.uncategorized_skill_count = '10'
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected uncategorized skill count to be an integer, '
'received \'10\'')):
self.topic_summary.validate()
def test_validation_fails_with_negative_uncategorized_skill_count(self):
self.topic_summary.uncategorized_skill_count = -1
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected uncategorized_skill_count to be non-negative, '
'received \'-1\'')):
self.topic_summary.validate()
def test_validation_fails_with_invalid_total_skill_count(self):
self.topic_summary.total_skill_count = '10'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected total skill count to be an integer, received \'10\''):
self.topic_summary.validate()
def test_validation_fails_with_negative_total_skill_count(self):
self.topic_summary.total_skill_count = -1
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected total_skill_count to be non-negative, '
'received \'-1\'')):
self.topic_summary.validate()
def test_validation_fails_with_invalid_total_skill_count_value(self):
self.topic_summary.total_skill_count = 5
self.topic_summary.uncategorized_skill_count = 10
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected total_skill_count to be greater than or equal to '
'uncategorized_skill_count 10, received \'5\'')):
self.topic_summary.validate()
def test_validation_fails_with_invalid_subtopic_count(self):
self.topic_summary.subtopic_count = '10'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected subtopic count to be an integer, received \'10\''):
self.topic_summary.validate()
def test_validation_fails_with_negative_subtopic_count(self):
self.topic_summary.subtopic_count = -1
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected subtopic_count to be non-negative, '
'received \'-1\'')):
self.topic_summary.validate()
class TopicRightsTests(test_utils.GenericTestBase):
def setUp(self):
super(TopicRightsTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.topic_summary_dict = {
'topic_id': 'topic_id',
'manager_names': ['A'],
'topic_is_published': False,
}
self.topic_summary = topic_domain.TopicRights(
'topic_id', [self.user_id_a], False
)
def test_topic_summary_gets_created(self):
self.assertEqual(
self.topic_summary.to_dict(), self.topic_summary_dict)
def test_is_manager(self):
self.assertTrue(self.topic_summary.is_manager(self.user_id_a))
self.assertFalse(self.topic_summary.is_manager(self.user_id_b))
| 42.25445 | 80 | 0.666493 |
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
from constants import constants
from core.domain import topic_domain
from core.domain import user_services
from core.tests import test_utils
import feconf
import utils
class TopicDomainUnitTests(test_utils.GenericTestBase):
topic_id = 'topic_id'
def setUp(self):
super(TopicDomainUnitTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'Name', 'abbrev')
self.topic.subtopics = [
topic_domain.Subtopic(1, 'Title', ['skill_id_1'])]
self.topic.next_subtopic_id = 2
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_a = user_services.UserActionsInfo(self.user_id_a)
self.user_b = user_services.UserActionsInfo(self.user_id_b)
def test_create_default_topic(self):
topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'Name', 'abbrev')
expected_topic_dict = {
'id': self.topic_id,
'name': 'Name',
'abbreviated_name': 'abbrev',
'thumbnail_filename': None,
'description': feconf.DEFAULT_TOPIC_DESCRIPTION,
'canonical_story_references': [],
'additional_story_references': [],
'uncategorized_skill_ids': [],
'subtopics': [],
'next_subtopic_id': 1,
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'subtopic_schema_version': feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION,
'story_reference_schema_version': (
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
'version': 0
}
self.assertEqual(topic.to_dict(), expected_topic_dict)
def test_get_all_skill_ids(self):
self.topic.uncategorized_skill_ids = ['skill_id_2', 'skill_id_3']
self.assertEqual(
self.topic.get_all_skill_ids(),
['skill_id_2', 'skill_id_3', 'skill_id_1'])
def test_get_all_uncategorized_skill_ids(self):
self.topic.uncategorized_skill_ids = ['skill_id_1', 'skill_id_2']
self.assertEqual(
self.topic.get_all_uncategorized_skill_ids(),
['skill_id_1', 'skill_id_2'])
def test_get_all_subtopics(self):
self.topic.subtopics = [topic_domain.Subtopic(
1, 'Title', ['skill_id_1'])]
subtopics = self.topic.get_all_subtopics()
self.assertEqual(
subtopics, [{
'skill_ids': ['skill_id_1'],
'id': 1,
'title': 'Title'}])
def test_delete_canonical_story(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_2')
]
self.topic.delete_canonical_story('story_id_1')
canonical_story_ids = self.topic.get_canonical_story_ids()
self.assertEqual(
canonical_story_ids, ['story_id', 'story_id_2'])
with self.assertRaisesRegexp(
Exception, 'The story_id story_id_5 is not present in the canonical'
' story references list of the topic.'):
self.topic.delete_canonical_story('story_id_5')
def test_get_all_story_references(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id_2'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_3')
]
all_story_references = self.topic.get_all_story_references()
self.assertEqual(len(all_story_references), 4)
self.assertEqual(all_story_references[0].story_id, 'story_id')
self.assertEqual(all_story_references[1].story_id, 'story_id_1')
self.assertEqual(all_story_references[2].story_id, 'story_id_2')
self.assertEqual(all_story_references[3].story_id, 'story_id_3')
def test_add_canonical_story(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self.topic.add_canonical_story('story_id_2')
canonical_story_ids = self.topic.get_canonical_story_ids()
self.assertEqual(
canonical_story_ids,
['story_id', 'story_id_1', 'story_id_2'])
with self.assertRaisesRegexp(
Exception, 'The story_id story_id_2 is already present in the '
'canonical story references list of the topic.'):
self.topic.add_canonical_story('story_id_2')
def test_delete_additional_story(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_2')
]
self.topic.delete_additional_story('story_id_1')
additional_story_ids = self.topic.get_additional_story_ids()
self.assertEqual(
additional_story_ids, ['story_id', 'story_id_2'])
with self.assertRaisesRegexp(
Exception,
'The story_id story_id_5 is not present in the additional'
' story references list of the topic.'):
self.topic.delete_additional_story('story_id_5')
def test_add_additional_story(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self.topic.add_additional_story('story_id_2')
additional_story_ids = self.topic.get_additional_story_ids()
self.assertEqual(
additional_story_ids,
['story_id', 'story_id_1', 'story_id_2'])
with self.assertRaisesRegexp(
Exception, 'The story_id story_id_2 is already present in the '
'additional story references list of the topic.'):
self.topic.add_additional_story('story_id_2')
def _assert_validation_error(self, expected_error_substring):
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.topic.validate()
def _assert_valid_topic_id(self, expected_error_substring, topic_id):
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
topic_domain.Topic.require_valid_topic_id(topic_id)
def _assert_valid_abbreviated_name(
self, expected_error_substring, name):
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
topic_domain.Topic.require_valid_abbreviated_name(name)
def test_valid_topic_id(self):
self._assert_valid_topic_id('Topic id should be a string', 10)
self._assert_valid_topic_id('Topic id abc is invalid', 'abc')
def test_valid_abbreviated_name(self):
self._assert_valid_abbreviated_name(
'Abbreviated name should be a string.', 10)
self._assert_valid_abbreviated_name(
'Abbreviated name field should not be empty.', '')
self._assert_valid_abbreviated_name(
'Abbreviated name field should not exceed 12 characters.',
'this is a lengthy name.')
def test_thumbnail_filename_validation(self):
self.topic.thumbnail_filename = 1
self._assert_validation_error(
'Expected thumbnail filename to be a string, received 1')
def test_subtopic_title_validation(self):
self.topic.subtopics[0].title = 1
self._assert_validation_error('Expected subtopic title to be a string')
def test_story_id_validation(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference(123, True)
]
self._assert_validation_error('Expected story id to be a string')
def test_story_is_published_validation(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference('story_id', 'published')
]
self._assert_validation_error(
'Expected story_is_published to be a boolean')
def test_subtopic_id_validation(self):
self.topic.subtopics[0].id = 'invalid_id'
self._assert_validation_error('Expected subtopic id to be an int')
def test_subtopic_skill_ids_validation(self):
self.topic.subtopics[0].skill_ids = 'abc'
self._assert_validation_error('Expected skill ids to be a list')
self.topic.subtopics[0].skill_ids = ['skill_id', 'skill_id']
self._assert_validation_error(
'Expected all skill ids to be distinct.')
self.topic.subtopics[0].skill_ids = [1, 2]
self._assert_validation_error('Expected each skill id to be a string')
def test_subtopics_validation(self):
self.topic.subtopics = 'abc'
self._assert_validation_error('Expected subtopics to be a list')
def test_name_validation(self):
self.topic.name = 1
self._assert_validation_error('Name should be a string')
self.topic.name = ''
self._assert_validation_error('Name field should not be empty')
def test_subtopic_schema_version_type_validation(self):
self.topic.subtopic_schema_version = 'invalid_version'
self._assert_validation_error(
'Expected subtopic schema version to be an integer')
def test_story_reference_schema_version_type_validation(self):
self.topic.story_reference_schema_version = 'invalid_version'
self._assert_validation_error(
'Expected story reference schema version to be an integer')
def test_subtopic_schema_version_validation(self):
self.topic.subtopic_schema_version = 0
self._assert_validation_error(
'Expected subtopic schema version to be %s'
% (feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION))
def test_subtopic_type_validation(self):
self.topic.subtopics = ['subtopic']
self._assert_validation_error(
'Expected each subtopic to be a Subtopic object')
def test_description_validation(self):
self.topic.description = 1
self._assert_validation_error('Expected description to be a string')
def test_next_subtopic_id_validation(self):
self.topic.next_subtopic_id = '1'
self._assert_validation_error('Expected next_subtopic_id to be an int')
self.topic.next_subtopic_id = 1
self._assert_validation_error(
'The id for subtopic 1 is greater than or equal to '
'next_subtopic_id 1')
def test_language_code_validation(self):
self.topic.language_code = 0
self._assert_validation_error('Expected language code to be a string')
self.topic.language_code = 'xz'
self._assert_validation_error('Invalid language code')
def test_canonical_story_references_validation(self):
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self._assert_validation_error(
'Expected all canonical story ids to be distinct.')
self.topic.canonical_story_references = 'story_id'
self._assert_validation_error(
'Expected canonical story references to be a list')
def test_additional_story_references_validation(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1')
]
self._assert_validation_error(
'Expected all additional story ids to be distinct.')
self.topic.additional_story_references = 'story_id'
self._assert_validation_error(
'Expected additional story references to be a list')
def test_additional_canonical_story_intersection_validation(self):
self.topic.additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_1'),
]
self.topic.canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(
'story_id'),
topic_domain.StoryReference.create_default_story_reference(
'story_id_2')
]
self._assert_validation_error(
'Expected additional story ids list and canonical story '
'ids list to be mutually exclusive.')
def test_uncategorized_skill_ids_validation(self):
self.topic.uncategorized_skill_ids = 'uncategorized_skill_id'
self._assert_validation_error(
'Expected uncategorized skill ids to be a list')
def test_add_uncategorized_skill_id(self):
self.topic.subtopics.append(
topic_domain.Subtopic('id_2', 'Title2', ['skill_id_2']))
with self.assertRaisesRegexp(
Exception,
'The skill id skill_id_1 already exists in subtopic with id 1'):
self.topic.add_uncategorized_skill_id('skill_id_1')
self.topic.add_uncategorized_skill_id('skill_id_3')
self.assertEqual(self.topic.uncategorized_skill_ids, ['skill_id_3'])
def test_remove_uncategorized_skill_id(self):
self.topic.uncategorized_skill_ids = ['skill_id_5']
with self.assertRaisesRegexp(
Exception,
'The skill id skill_id_3 is not present in the topic'):
self.topic.remove_uncategorized_skill_id('skill_id_3')
self.topic.remove_uncategorized_skill_id('skill_id_5')
self.assertEqual(self.topic.uncategorized_skill_ids, [])
def test_move_skill_id_to_subtopic(self):
self.topic.uncategorized_skill_ids = ['skill_id_1']
self.topic.subtopics[0].skill_ids = ['skill_id_2']
self.topic.move_skill_id_to_subtopic(None, 1, 'skill_id_1')
self.assertEqual(self.topic.uncategorized_skill_ids, [])
self.assertEqual(
self.topic.subtopics[0].skill_ids, ['skill_id_2', 'skill_id_1'])
self.topic.uncategorized_skill_ids = ['skill_id_1']
self.topic.subtopics[0].skill_ids = ['skill_id_2']
with self.assertRaisesRegexp(
Exception,
'Skill id skill_id_3 is not an uncategorized skill id'):
self.topic.move_skill_id_to_subtopic(None, 'id_1', 'skill_id_3')
def test_get_subtopic_index(self):
self.assertIsNone(self.topic.get_subtopic_index(2))
self.assertEqual(self.topic.get_subtopic_index(1), 0)
def test_to_dict(self):
user_ids = [self.user_id_a, self.user_id_b]
topic_rights = topic_domain.TopicRights(self.topic_id, user_ids, False)
expected_dict = {
'topic_id': self.topic_id,
'manager_names': ['A', 'B'],
'topic_is_published': False
}
self.assertEqual(expected_dict, topic_rights.to_dict())
def test_is_manager(self):
user_ids = [self.user_id_a, self.user_id_b]
topic_rights = topic_domain.TopicRights(self.topic_id, user_ids, False)
self.assertTrue(topic_rights.is_manager(self.user_id_a))
self.assertTrue(topic_rights.is_manager(self.user_id_b))
self.assertFalse(topic_rights.is_manager('fakeuser'))
def test_cannot_create_topic_rights_change_class_with_invalid_cmd(self):
with self.assertRaisesRegexp(
Exception, 'Command invalid cmd is not allowed'):
topic_domain.TopicRightsChange({
'cmd': 'invalid cmd'
})
def test_cannot_create_topic_rights_change_class_with_invalid_changelist(
self):
with self.assertRaisesRegexp(
Exception, 'Missing cmd key in change dict'):
topic_domain.TopicRightsChange({})
def test_create_new_topic_rights_change_class(self):
topic_rights = topic_domain.TopicRightsChange({
'cmd': 'create_new'
})
self.assertEqual(topic_rights.to_dict(), {'cmd': 'create_new'})
def test_update_language_code(self):
self.assertEqual(self.topic.language_code, 'en')
self.topic.update_language_code('bn')
self.assertEqual(self.topic.language_code, 'bn')
def test_update_abbreviated_name(self):
self.assertEqual(self.topic.abbreviated_name, 'abbrev')
self.topic.update_abbreviated_name('name')
self.assertEqual(self.topic.abbreviated_name, 'name')
def test_update_thumbnail_filename(self):
self.assertEqual(self.topic.thumbnail_filename, None)
self.topic.update_thumbnail_filename('img.png')
self.assertEqual(self.topic.thumbnail_filename, 'img.png')
def test_cannot_add_uncategorized_skill_with_existing_uncategorized_skill(
self):
self.assertEqual(self.topic.uncategorized_skill_ids, [])
self.topic.uncategorized_skill_ids = ['skill_id1']
with self.assertRaisesRegexp(
Exception,
'The skill id skill_id1 is already an uncategorized skill.'):
self.topic.add_uncategorized_skill_id('skill_id1')
def test_cannot_delete_subtopic_with_invalid_subtopic_id(self):
with self.assertRaisesRegexp(
Exception, 'A subtopic with id invalid_id doesn\'t exist.'):
self.topic.delete_subtopic('invalid_id')
def test_cannot_update_subtopic_title_with_invalid_subtopic_id(self):
with self.assertRaisesRegexp(
Exception, 'The subtopic with id invalid_id does not exist.'):
self.topic.update_subtopic_title('invalid_id', 'new title')
def test_update_subtopic_title(self):
self.assertEqual(len(self.topic.subtopics), 1)
self.assertEqual(self.topic.subtopics[0].title, 'Title')
self.topic.update_subtopic_title(1, 'new title')
self.assertEqual(self.topic.subtopics[0].title, 'new title')
def test_cannot_remove_skill_id_from_subtopic_with_invalid_subtopic_id(
self):
with self.assertRaisesRegexp(
Exception, 'The subtopic with id invalid_id does not exist.'):
self.topic.remove_skill_id_from_subtopic('invalid_id', 'skill_id1')
def test_cannot_move_skill_id_to_subtopic_with_invalid_subtopic_id(self):
with self.assertRaisesRegexp(
Exception, 'The subtopic with id old_subtopic_id does not exist.'):
self.topic.move_skill_id_to_subtopic(
'old_subtopic_id', 'new_subtopic_id', 'skill_id1')
def test_cannot_move_existing_skill_to_subtopic(self):
self.topic.subtopics = [
topic_domain.Subtopic(1, 'Title', ['skill_id_1']),
topic_domain.Subtopic(2, 'Another title', ['skill_id_1'])]
with self.assertRaisesRegexp(
Exception,
'Skill id skill_id_1 is already present in the target subtopic'):
self.topic.move_skill_id_to_subtopic(1, 2, 'skill_id_1')
class TopicChangeTests(test_utils.GenericTestBase):
def test_topic_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
topic_domain.TopicChange({'invalid': 'data'})
def test_topic_change_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
topic_domain.TopicChange({'cmd': 'invalid'})
def test_topic_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_value, old_value')):
topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'name',
})
def test_topic_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
topic_domain.TopicChange({
'cmd': 'add_subtopic',
'title': 'title',
'subtopic_id': 'subtopic_id',
'invalid': 'invalid'
})
def test_topic_change_object_with_invalid_topic_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd update_topic_property: '
'invalid is not allowed')):
topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_topic_change_object_with_invalid_subtopic_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd update_subtopic_property: '
'invalid is not allowed')):
topic_domain.TopicChange({
'cmd': 'update_subtopic_property',
'subtopic_id': 'subtopic_id',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_topic_change_object_with_add_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'add_subtopic',
'subtopic_id': 'subtopic_id',
'title': 'title'
})
self.assertEqual(topic_change_object.cmd, 'add_subtopic')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
self.assertEqual(topic_change_object.title, 'title')
def test_topic_change_object_with_delete_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'delete_subtopic',
'subtopic_id': 'subtopic_id'
})
self.assertEqual(topic_change_object.cmd, 'delete_subtopic')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
def test_topic_change_object_with_add_uncategorized_skill_id(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': 'new_uncategorized_skill_id'
})
self.assertEqual(topic_change_object.cmd, 'add_uncategorized_skill_id')
self.assertEqual(
topic_change_object.new_uncategorized_skill_id,
'new_uncategorized_skill_id')
def test_topic_change_object_with_remove_uncategorized_skill_id(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': 'uncategorized_skill_id'
})
self.assertEqual(
topic_change_object.cmd, 'remove_uncategorized_skill_id')
self.assertEqual(
topic_change_object.uncategorized_skill_id,
'uncategorized_skill_id')
def test_topic_change_object_with_move_skill_id_to_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'move_skill_id_to_subtopic',
'skill_id': 'skill_id',
'old_subtopic_id': 'old_subtopic_id',
'new_subtopic_id': 'new_subtopic_id'
})
self.assertEqual(topic_change_object.cmd, 'move_skill_id_to_subtopic')
self.assertEqual(topic_change_object.skill_id, 'skill_id')
self.assertEqual(topic_change_object.old_subtopic_id, 'old_subtopic_id')
self.assertEqual(topic_change_object.new_subtopic_id, 'new_subtopic_id')
def test_topic_change_object_with_remove_skill_id_from_subtopic(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'remove_skill_id_from_subtopic',
'skill_id': 'skill_id',
'subtopic_id': 'subtopic_id'
})
self.assertEqual(
topic_change_object.cmd, 'remove_skill_id_from_subtopic')
self.assertEqual(topic_change_object.skill_id, 'skill_id')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
def test_topic_change_object_with_update_subtopic_property(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'update_subtopic_property',
'subtopic_id': 'subtopic_id',
'property_name': 'title',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(topic_change_object.cmd, 'update_subtopic_property')
self.assertEqual(topic_change_object.subtopic_id, 'subtopic_id')
self.assertEqual(topic_change_object.property_name, 'title')
self.assertEqual(topic_change_object.new_value, 'new_value')
self.assertEqual(topic_change_object.old_value, 'old_value')
def test_topic_change_object_with_update_topic_property(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'name',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(topic_change_object.cmd, 'update_topic_property')
self.assertEqual(topic_change_object.property_name, 'name')
self.assertEqual(topic_change_object.new_value, 'new_value')
self.assertEqual(topic_change_object.old_value, 'old_value')
def test_topic_change_object_with_create_new(self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'create_new',
'name': 'name',
})
self.assertEqual(topic_change_object.cmd, 'create_new')
self.assertEqual(topic_change_object.name, 'name')
def test_topic_change_object_with_migrate_subtopic_schema_to_latest_version(
self):
topic_change_object = topic_domain.TopicChange({
'cmd': 'migrate_subtopic_schema_to_latest_version',
'from_version': 'from_version',
'to_version': 'to_version',
})
self.assertEqual(
topic_change_object.cmd,
'migrate_subtopic_schema_to_latest_version')
self.assertEqual(topic_change_object.from_version, 'from_version')
self.assertEqual(topic_change_object.to_version, 'to_version')
def test_to_dict(self):
topic_change_dict = {
'cmd': 'create_new',
'name': 'name'
}
topic_change_object = topic_domain.TopicChange(topic_change_dict)
self.assertEqual(topic_change_object.to_dict(), topic_change_dict)
class TopicRightsChangeTests(test_utils.GenericTestBase):
def test_topic_rights_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
topic_domain.TopicRightsChange({'invalid': 'data'})
def test_topic_change_rights_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
topic_domain.TopicRightsChange({'cmd': 'invalid'})
def test_topic_rights_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_role, old_role')):
topic_domain.TopicRightsChange({
'cmd': 'change_role',
'assignee_id': 'assignee_id',
})
def test_topic_rights_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
topic_domain.TopicRightsChange({
'cmd': 'publish_topic',
'invalid': 'invalid'
})
def test_topic_rights_change_object_with_invalid_role(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for old_role in cmd change_role: '
'invalid is not allowed')):
topic_domain.TopicRightsChange({
'cmd': 'change_role',
'assignee_id': 'assignee_id',
'old_role': 'invalid',
'new_role': topic_domain.ROLE_MANAGER
})
def test_topic_rights_change_object_with_create_new(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'create_new'
})
self.assertEqual(topic_rights_change_object.cmd, 'create_new')
def test_topic_rights_change_object_with_change_role(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'change_role',
'assignee_id': 'assignee_id',
'old_role': topic_domain.ROLE_NONE,
'new_role': topic_domain.ROLE_MANAGER
})
self.assertEqual(topic_rights_change_object.cmd, 'change_role')
self.assertEqual(topic_rights_change_object.assignee_id, 'assignee_id')
self.assertEqual(
topic_rights_change_object.old_role, topic_domain.ROLE_NONE)
self.assertEqual(
topic_rights_change_object.new_role, topic_domain.ROLE_MANAGER)
def test_topic_rights_change_object_with_publish_topic(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'publish_topic'
})
self.assertEqual(topic_rights_change_object.cmd, 'publish_topic')
def test_topic_rights_change_object_with_unpublish_topic(self):
topic_rights_change_object = topic_domain.TopicRightsChange({
'cmd': 'unpublish_topic'
})
self.assertEqual(topic_rights_change_object.cmd, 'unpublish_topic')
def test_to_dict(self):
topic_rights_change_dict = {
'cmd': 'change_role',
'assignee_id': 'assignee_id',
'old_role': topic_domain.ROLE_NONE,
'new_role': topic_domain.ROLE_MANAGER
}
topic_rights_change_object = topic_domain.TopicRightsChange(
topic_rights_change_dict)
self.assertEqual(
topic_rights_change_object.to_dict(), topic_rights_change_dict)
class TopicSummaryTests(test_utils.GenericTestBase):
def setUp(self):
super(TopicSummaryTests, self).setUp()
current_time = datetime.datetime.utcnow()
time_in_millisecs = utils.get_time_in_millisecs(current_time)
self.topic_summary_dict = {
'id': 'topic_id',
'name': 'name',
'language_code': 'en',
'version': 1,
'canonical_story_count': 1,
'additional_story_count': 1,
'uncategorized_skill_count': 1,
'subtopic_count': 1,
'total_skill_count': 1,
'topic_model_created_on': time_in_millisecs,
'topic_model_last_updated': time_in_millisecs
}
self.topic_summary = topic_domain.TopicSummary(
'topic_id', 'name', 'name', 'en', 1, 1, 1, 1, 1, 1,
current_time, current_time)
def test_topic_summary_gets_created(self):
self.assertEqual(
self.topic_summary.to_dict(), self.topic_summary_dict)
def test_validation_passes_with_valid_properties(self):
self.topic_summary.validate()
def test_validation_fails_with_invalid_name(self):
self.topic_summary.name = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Name should be a string.'):
self.topic_summary.validate()
def test_validation_fails_with_empty_name(self):
self.topic_summary.name = ''
with self.assertRaisesRegexp(
utils.ValidationError, 'Name field should not be empty'):
self.topic_summary.validate()
def test_validation_fails_with_invalid_canonical_name(self):
self.topic_summary.canonical_name = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Canonical name should be a string.'):
self.topic_summary.validate()
def test_validation_fails_with_empty_canonical_name(self):
self.topic_summary.canonical_name = ''
with self.assertRaisesRegexp(
utils.ValidationError, 'Canonical name field should not be empty'):
self.topic_summary.validate()
def test_validation_fails_with_invalid_language_code(self):
self.topic_summary.language_code = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language code to be a string, received 0'):
self.topic_summary.validate()
def test_validation_fails_with_unallowed_language_code(self):
self.topic_summary.language_code = 'invalid'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language code: invalid'):
self.topic_summary.validate()
def test_validation_fails_with_invalid_canonical_story_count(self):
self.topic_summary.canonical_story_count = '10'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected canonical story count to be an integer, received \'10\''):
self.topic_summary.validate()
def test_validation_fails_with_negative_canonical_story_count(self):
self.topic_summary.canonical_story_count = -1
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected canonical_story_count to be non-negative, '
'received \'-1\'')):
self.topic_summary.validate()
def test_validation_fails_with_invalid_additional_story_count(self):
self.topic_summary.additional_story_count = '10'
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected additional story count to be an '
'integer, received \'10\'')):
self.topic_summary.validate()
def test_validation_fails_with_negative_additional_story_count(self):
self.topic_summary.additional_story_count = -1
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected additional_story_count to be non-negative, '
'received \'-1\'')):
self.topic_summary.validate()
def test_validation_fails_with_invalid_uncategorized_skill_count(self):
self.topic_summary.uncategorized_skill_count = '10'
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected uncategorized skill count to be an integer, '
'received \'10\'')):
self.topic_summary.validate()
def test_validation_fails_with_negative_uncategorized_skill_count(self):
self.topic_summary.uncategorized_skill_count = -1
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected uncategorized_skill_count to be non-negative, '
'received \'-1\'')):
self.topic_summary.validate()
def test_validation_fails_with_invalid_total_skill_count(self):
self.topic_summary.total_skill_count = '10'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected total skill count to be an integer, received \'10\''):
self.topic_summary.validate()
def test_validation_fails_with_negative_total_skill_count(self):
self.topic_summary.total_skill_count = -1
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected total_skill_count to be non-negative, '
'received \'-1\'')):
self.topic_summary.validate()
def test_validation_fails_with_invalid_total_skill_count_value(self):
self.topic_summary.total_skill_count = 5
self.topic_summary.uncategorized_skill_count = 10
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected total_skill_count to be greater than or equal to '
'uncategorized_skill_count 10, received \'5\'')):
self.topic_summary.validate()
def test_validation_fails_with_invalid_subtopic_count(self):
self.topic_summary.subtopic_count = '10'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected subtopic count to be an integer, received \'10\''):
self.topic_summary.validate()
def test_validation_fails_with_negative_subtopic_count(self):
self.topic_summary.subtopic_count = -1
with self.assertRaisesRegexp(
utils.ValidationError, (
'Expected subtopic_count to be non-negative, '
'received \'-1\'')):
self.topic_summary.validate()
class TopicRightsTests(test_utils.GenericTestBase):
def setUp(self):
super(TopicRightsTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.topic_summary_dict = {
'topic_id': 'topic_id',
'manager_names': ['A'],
'topic_is_published': False,
}
self.topic_summary = topic_domain.TopicRights(
'topic_id', [self.user_id_a], False
)
def test_topic_summary_gets_created(self):
self.assertEqual(
self.topic_summary.to_dict(), self.topic_summary_dict)
def test_is_manager(self):
self.assertTrue(self.topic_summary.is_manager(self.user_id_a))
self.assertFalse(self.topic_summary.is_manager(self.user_id_b))
| true | true |
f7f41edaa8db8511784f4d9a2ac59a50dabbc388 | 6,491 | py | Python | nematus/data_iterator.py | JeffreyJosanne/nematus_tf | 582be1eeba2920bfa8cc064fa642c429f5eddd6d | [
"BSD-3-Clause"
] | null | null | null | nematus/data_iterator.py | JeffreyJosanne/nematus_tf | 582be1eeba2920bfa8cc064fa642c429f5eddd6d | [
"BSD-3-Clause"
] | null | null | null | nematus/data_iterator.py | JeffreyJosanne/nematus_tf | 582be1eeba2920bfa8cc064fa642c429f5eddd6d | [
"BSD-3-Clause"
] | null | null | null | import numpy
import gzip
import shuffle
from util import load_dict
def fopen(filename, mode='r'):
if filename.endswith('.gz'):
return gzip.open(filename, mode)
return open(filename, mode)
class FileWrapper(object):
def __init__(self, fname):
self.pos = 0
self.lines = fopen(fname).readlines()
self.lines = numpy.array(self.lines, dtype=numpy.object)
def __iter__(self):
return self
def next(self):
if self.pos >= len(self.lines):
raise StopIteration
l = self.lines[self.pos]
self.pos += 1
return l
def reset(self):
self.pos = 0
def seek(self, pos):
assert pos == 0
self.pos = 0
def readline(self):
return self.next()
def shuffle_lines(self, perm):
self.lines = self.lines[perm]
self.pos = 0
def __len__(self):
return len(self.lines)
class TextIterator:
"""Simple Bitext iterator."""
def __init__(self, source, target,
source_dicts, target_dict,
batch_size=128,
maxlen=100,
n_words_source=-1,
n_words_target=-1,
skip_empty=False,
shuffle_each_epoch=False,
sort_by_length=True,
use_factor=False,
maxibatch_size=20,
keep_data_in_memory=False):
if keep_data_in_memory:
self.source, self.target = FileWrapper(source), FileWrapper(target)
if shuffle_each_epoch:
r = numpy.random.permutation(len(self.source))
self.source.shuffle_lines(r)
self.target.shuffle_lines(r)
elif shuffle_each_epoch:
self.source_orig = source
self.target_orig = target
self.source, self.target = shuffle.main([self.source_orig, self.target_orig], temporary=True)
else:
self.source = fopen(source, 'r')
self.target = fopen(target, 'r')
self.source_dicts = []
for source_dict in source_dicts:
self.source_dicts.append(load_dict(source_dict))
self.target_dict = load_dict(target_dict)
self.keep_data_in_memory = keep_data_in_memory
self.batch_size = batch_size
self.maxlen = maxlen
self.skip_empty = skip_empty
self.use_factor = use_factor
self.n_words_source = n_words_source
self.n_words_target = n_words_target
if self.n_words_source > 0:
for d in self.source_dicts:
for key, idx in d.items():
if idx >= self.n_words_source:
del d[key]
if self.n_words_target > 0:
for key, idx in self.target_dict.items():
if idx >= self.n_words_target:
del self.target_dict[key]
self.shuffle = shuffle_each_epoch
self.sort_by_length = sort_by_length
self.source_buffer = []
self.target_buffer = []
self.k = batch_size * maxibatch_size
self.end_of_data = False
def __iter__(self):
return self
def reset(self):
if self.shuffle:
if self.keep_data_in_memory:
r = numpy.random.permutation(len(self.source))
self.source.shuffle_lines(r)
self.target.shuffle_lines(r)
else:
self.source, self.target = shuffle.main([self.source_orig, self.target_orig], temporary=True)
else:
self.source.seek(0)
self.target.seek(0)
def next(self):
if self.end_of_data:
self.end_of_data = False
self.reset()
raise StopIteration
source = []
target = []
# fill buffer, if it's empty
assert len(self.source_buffer) == len(self.target_buffer), 'Buffer size mismatch!'
if len(self.source_buffer) == 0:
for ss in self.source:
ss = ss.split()
tt = self.target.readline().split()
if self.skip_empty and (len(ss) == 0 or len(tt) == 0):
continue
if len(ss) > self.maxlen or len(tt) > self.maxlen:
continue
self.source_buffer.append(ss)
self.target_buffer.append(tt)
if len(self.source_buffer) == self.k:
break
if len(self.source_buffer) == 0 or len(self.target_buffer) == 0:
self.end_of_data = False
self.reset()
raise StopIteration
# sort by target buffer
if self.sort_by_length:
tlen = numpy.array([len(t) for t in self.target_buffer])
tidx = tlen.argsort()
_sbuf = [self.source_buffer[i] for i in tidx]
_tbuf = [self.target_buffer[i] for i in tidx]
self.source_buffer = _sbuf
self.target_buffer = _tbuf
else:
self.source_buffer.reverse()
self.target_buffer.reverse()
try:
# actual work here
while True:
# read from source file and map to word index
try:
ss = self.source_buffer.pop()
except IndexError:
break
tmp = []
for w in ss:
if self.use_factor:
w = [self.source_dicts[i][f] if f in self.source_dicts[i] else 1 for (i,f) in enumerate(w.split('|'))]
else:
w = [self.source_dicts[0][w] if w in self.source_dicts[0] else 1]
tmp.append(w)
ss = tmp
# read from source file and map to word index
tt = self.target_buffer.pop()
tt = [self.target_dict[w] if w in self.target_dict else 1
for w in tt]
if self.n_words_target > 0:
tt = [w if w < self.n_words_target else 1 for w in tt]
source.append(ss)
target.append(tt)
if len(source) >= self.batch_size or \
len(target) >= self.batch_size:
break
except IOError:
self.end_of_data = True
return source, target
| 32.61809 | 126 | 0.523494 | import numpy
import gzip
import shuffle
from util import load_dict
def fopen(filename, mode='r'):
if filename.endswith('.gz'):
return gzip.open(filename, mode)
return open(filename, mode)
class FileWrapper(object):
def __init__(self, fname):
self.pos = 0
self.lines = fopen(fname).readlines()
self.lines = numpy.array(self.lines, dtype=numpy.object)
def __iter__(self):
return self
def next(self):
if self.pos >= len(self.lines):
raise StopIteration
l = self.lines[self.pos]
self.pos += 1
return l
def reset(self):
self.pos = 0
def seek(self, pos):
assert pos == 0
self.pos = 0
def readline(self):
return self.next()
def shuffle_lines(self, perm):
self.lines = self.lines[perm]
self.pos = 0
def __len__(self):
return len(self.lines)
class TextIterator:
def __init__(self, source, target,
source_dicts, target_dict,
batch_size=128,
maxlen=100,
n_words_source=-1,
n_words_target=-1,
skip_empty=False,
shuffle_each_epoch=False,
sort_by_length=True,
use_factor=False,
maxibatch_size=20,
keep_data_in_memory=False):
if keep_data_in_memory:
self.source, self.target = FileWrapper(source), FileWrapper(target)
if shuffle_each_epoch:
r = numpy.random.permutation(len(self.source))
self.source.shuffle_lines(r)
self.target.shuffle_lines(r)
elif shuffle_each_epoch:
self.source_orig = source
self.target_orig = target
self.source, self.target = shuffle.main([self.source_orig, self.target_orig], temporary=True)
else:
self.source = fopen(source, 'r')
self.target = fopen(target, 'r')
self.source_dicts = []
for source_dict in source_dicts:
self.source_dicts.append(load_dict(source_dict))
self.target_dict = load_dict(target_dict)
self.keep_data_in_memory = keep_data_in_memory
self.batch_size = batch_size
self.maxlen = maxlen
self.skip_empty = skip_empty
self.use_factor = use_factor
self.n_words_source = n_words_source
self.n_words_target = n_words_target
if self.n_words_source > 0:
for d in self.source_dicts:
for key, idx in d.items():
if idx >= self.n_words_source:
del d[key]
if self.n_words_target > 0:
for key, idx in self.target_dict.items():
if idx >= self.n_words_target:
del self.target_dict[key]
self.shuffle = shuffle_each_epoch
self.sort_by_length = sort_by_length
self.source_buffer = []
self.target_buffer = []
self.k = batch_size * maxibatch_size
self.end_of_data = False
def __iter__(self):
return self
def reset(self):
if self.shuffle:
if self.keep_data_in_memory:
r = numpy.random.permutation(len(self.source))
self.source.shuffle_lines(r)
self.target.shuffle_lines(r)
else:
self.source, self.target = shuffle.main([self.source_orig, self.target_orig], temporary=True)
else:
self.source.seek(0)
self.target.seek(0)
def next(self):
if self.end_of_data:
self.end_of_data = False
self.reset()
raise StopIteration
source = []
target = []
assert len(self.source_buffer) == len(self.target_buffer), 'Buffer size mismatch!'
if len(self.source_buffer) == 0:
for ss in self.source:
ss = ss.split()
tt = self.target.readline().split()
if self.skip_empty and (len(ss) == 0 or len(tt) == 0):
continue
if len(ss) > self.maxlen or len(tt) > self.maxlen:
continue
self.source_buffer.append(ss)
self.target_buffer.append(tt)
if len(self.source_buffer) == self.k:
break
if len(self.source_buffer) == 0 or len(self.target_buffer) == 0:
self.end_of_data = False
self.reset()
raise StopIteration
# sort by target buffer
if self.sort_by_length:
tlen = numpy.array([len(t) for t in self.target_buffer])
tidx = tlen.argsort()
_sbuf = [self.source_buffer[i] for i in tidx]
_tbuf = [self.target_buffer[i] for i in tidx]
self.source_buffer = _sbuf
self.target_buffer = _tbuf
else:
self.source_buffer.reverse()
self.target_buffer.reverse()
try:
# actual work here
while True:
# read from source file and map to word index
try:
ss = self.source_buffer.pop()
except IndexError:
break
tmp = []
for w in ss:
if self.use_factor:
w = [self.source_dicts[i][f] if f in self.source_dicts[i] else 1 for (i,f) in enumerate(w.split('|'))]
else:
w = [self.source_dicts[0][w] if w in self.source_dicts[0] else 1]
tmp.append(w)
ss = tmp
# read from source file and map to word index
tt = self.target_buffer.pop()
tt = [self.target_dict[w] if w in self.target_dict else 1
for w in tt]
if self.n_words_target > 0:
tt = [w if w < self.n_words_target else 1 for w in tt]
source.append(ss)
target.append(tt)
if len(source) >= self.batch_size or \
len(target) >= self.batch_size:
break
except IOError:
self.end_of_data = True
return source, target
| true | true |
f7f41f4f5584dc3356a4004f8bef0c63adcd12ef | 558 | py | Python | Mundo 1/desafios/desafio018-Seno Cosseno Tangente.py | marioarl/python | 0bd677cdbec87e9603866c09cb8009fef295b89b | [
"MIT"
] | 1 | 2021-11-21T21:04:04.000Z | 2021-11-21T21:04:04.000Z | Mundo 1/desafios/desafio018-Seno Cosseno Tangente.py | marioarl/python | 0bd677cdbec87e9603866c09cb8009fef295b89b | [
"MIT"
] | null | null | null | Mundo 1/desafios/desafio018-Seno Cosseno Tangente.py | marioarl/python | 0bd677cdbec87e9603866c09cb8009fef295b89b | [
"MIT"
] | null | null | null | # faça um programa que leia um angulo qualquer e mostre na tela o valor do seno, cosseno \
# e tangente desse angulo
#minha resposta 1 (as funcoes sin, cos e tan funcionam com radioano
from math import sin, cos, tan, radians
ang = float(input('Digite o angulo que voce deseja: '))
sin = sin(radians(ang))
cos = cos(radians(ang))
tan = tan(radians(ang))
print('O ângulo de {:.2f} tem o SENO de {:.2f}'.format(ang, sin))
print('O ângulo de {:.2f} tem o COSSENO de {:.2f}'.format(ang, cos))
print('O ângulo de {:.2f} tem a TANGENTE de {:.2f}'.format(ang, tan)) | 46.5 | 90 | 0.691756 |
from math import sin, cos, tan, radians
ang = float(input('Digite o angulo que voce deseja: '))
sin = sin(radians(ang))
cos = cos(radians(ang))
tan = tan(radians(ang))
print('O ângulo de {:.2f} tem o SENO de {:.2f}'.format(ang, sin))
print('O ângulo de {:.2f} tem o COSSENO de {:.2f}'.format(ang, cos))
print('O ângulo de {:.2f} tem a TANGENTE de {:.2f}'.format(ang, tan)) | true | true |
f7f41fe9e2f27eaacd2434723e96dbcc82fffe85 | 982 | py | Python | behind/users/urls.py | SilverNine/behind | 739b866713a2f2cb7154ac8197c256ee14fd1a0d | [
"MIT"
] | null | null | null | behind/users/urls.py | SilverNine/behind | 739b866713a2f2cb7154ac8197c256ee14fd1a0d | [
"MIT"
] | 6 | 2020-06-05T18:26:15.000Z | 2021-06-10T20:22:47.000Z | behind/users/urls.py | SilverNine/behind | 739b866713a2f2cb7154ac8197c256ee14fd1a0d | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
app_name = "users"
urlpatterns = [
url(
regex=r'^explore/$',
view=views.ExploreUsers.as_view(),
name='explore_users'
),
url(
regex=r'^search/$',
view=views.Search.as_view(),
name='search'
),
url(
regex=r'^(?P<user_id>[0-9]+)/follow/$',
view=views.FollowUser.as_view(),
name='follow_user'
),
url(
regex=r'^(?P<user_id>[0-9]+)/unfollow/$',
view=views.UnFollowUser.as_view(),
name='follow_user'
),
url(
regex=r'^(?P<username>\w+)/$',
view=views.UserProfile.as_view(),
name='user_profile'
),
url(
regex=r'^(?P<username>\w+)/followers/$',
view=views.UserFollowers.as_view(),
name='user_followers'
),
url(
regex=r'^(?P<username>\w+)/following/$',
view=views.UserFollowing.as_view(),
name='user_following'
),
]
| 23.380952 | 49 | 0.531568 | from django.conf.urls import url
from . import views
app_name = "users"
urlpatterns = [
url(
regex=r'^explore/$',
view=views.ExploreUsers.as_view(),
name='explore_users'
),
url(
regex=r'^search/$',
view=views.Search.as_view(),
name='search'
),
url(
regex=r'^(?P<user_id>[0-9]+)/follow/$',
view=views.FollowUser.as_view(),
name='follow_user'
),
url(
regex=r'^(?P<user_id>[0-9]+)/unfollow/$',
view=views.UnFollowUser.as_view(),
name='follow_user'
),
url(
regex=r'^(?P<username>\w+)/$',
view=views.UserProfile.as_view(),
name='user_profile'
),
url(
regex=r'^(?P<username>\w+)/followers/$',
view=views.UserFollowers.as_view(),
name='user_followers'
),
url(
regex=r'^(?P<username>\w+)/following/$',
view=views.UserFollowing.as_view(),
name='user_following'
),
]
| true | true |
f7f4208ae359bc2e507162385cdc793dbb205419 | 1,796 | py | Python | drimg/functions.py | hojjat-faryabi/drawable_image | 3a5f3a2c11a5fc176e035f2306f8077dc9b0277e | [
"MIT"
] | 1 | 2021-09-19T15:23:10.000Z | 2021-09-19T15:23:10.000Z | drimg/functions.py | hojjat-faryabi/drawable_image | 3a5f3a2c11a5fc176e035f2306f8077dc9b0277e | [
"MIT"
] | null | null | null | drimg/functions.py | hojjat-faryabi/drawable_image | 3a5f3a2c11a5fc176e035f2306f8077dc9b0277e | [
"MIT"
] | null | null | null | from PIL import Image
import os
import pathlib
from .constants import *
def calcDP(px):
index = -1
if px in SIZES_PX:
index = SIZES_PX.index(px)
else:
for item in SIZES_PX:
if item > px:
index = SIZES_PX.index(item)
break
if index == -1:
index = len(SIZES_PX) - 1
return px / (SIZES_DPI[index] / 160)
def calcNewSizes(dp):
res = []
for item in SCALING:
res.append(int(item * dp))
return res
def getImageFilesList():
files = []
for file in os.listdir("."):
if isImage(file):
files.append(file)
return files
def isImage(name):
if os.path.isfile(name):
file_parts = pathlib.Path(name)
if file_parts.suffix.removeprefix(".") in SUPPORTED_FORMATS:
return True
return False
def checkImageNameForSave(name: str):
# in android drawables images can't start with numbers
# and is better they haven't capitalize words
name = name.lower()
if name[0].isnumeric():
name = '_' + name
return name
def createNewImage(img_path):
img = Image.open(img_path)
x, y = img.size
ratio = x / y
new_xs = calcNewSizes(calcDP(x))
new_ys = []
for item in new_xs:
new_ys.append(int(item / ratio))
for i in range(len(SCALING)):
root = "Drawable"
folder = "drawable-" + DRAWABLE_SIZE_NAMES[i]
name = checkImageNameForSave(img_path)
path = os.path.join(root, folder, name)
try:
pathlib.Path(os.path.join(root, folder)).mkdir(parents=True, exist_ok=True)
new_img = img.resize((new_xs[i], new_ys[i]))
new_img.save(checkImageNameForSave(path))
except Exception as ex:
print(ex)
| 22.734177 | 87 | 0.589644 | from PIL import Image
import os
import pathlib
from .constants import *
def calcDP(px):
index = -1
if px in SIZES_PX:
index = SIZES_PX.index(px)
else:
for item in SIZES_PX:
if item > px:
index = SIZES_PX.index(item)
break
if index == -1:
index = len(SIZES_PX) - 1
return px / (SIZES_DPI[index] / 160)
def calcNewSizes(dp):
res = []
for item in SCALING:
res.append(int(item * dp))
return res
def getImageFilesList():
files = []
for file in os.listdir("."):
if isImage(file):
files.append(file)
return files
def isImage(name):
if os.path.isfile(name):
file_parts = pathlib.Path(name)
if file_parts.suffix.removeprefix(".") in SUPPORTED_FORMATS:
return True
return False
def checkImageNameForSave(name: str):
# and is better they haven't capitalize words
name = name.lower()
if name[0].isnumeric():
name = '_' + name
return name
def createNewImage(img_path):
img = Image.open(img_path)
x, y = img.size
ratio = x / y
new_xs = calcNewSizes(calcDP(x))
new_ys = []
for item in new_xs:
new_ys.append(int(item / ratio))
for i in range(len(SCALING)):
root = "Drawable"
folder = "drawable-" + DRAWABLE_SIZE_NAMES[i]
name = checkImageNameForSave(img_path)
path = os.path.join(root, folder, name)
try:
pathlib.Path(os.path.join(root, folder)).mkdir(parents=True, exist_ok=True)
new_img = img.resize((new_xs[i], new_ys[i]))
new_img.save(checkImageNameForSave(path))
except Exception as ex:
print(ex)
| true | true |
f7f420cbcc6938382bc7e97572c4600d98244384 | 1,351 | py | Python | setup.py | ErinMorelli/em-slack-tableflip | 2450f3a71baa4a43a0f879e644b0847600303380 | [
"MIT"
] | 51 | 2015-08-06T19:41:47.000Z | 2022-03-11T20:59:19.000Z | setup.py | ErinMorelli/em-slack-tableflip | 2450f3a71baa4a43a0f879e644b0847600303380 | [
"MIT"
] | 5 | 2016-10-13T18:42:54.000Z | 2021-12-02T16:07:02.000Z | setup.py | ErinMorelli/em-slack-tableflip | 2450f3a71baa4a43a0f879e644b0847600303380 | [
"MIT"
] | 9 | 2016-01-22T22:53:36.000Z | 2022-01-12T17:45:45.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (c) 2015-2021 Erin Morelli.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
"""
from setuptools import setup, find_packages
setup(
name='em-slack-tableflip',
version=open('VERSION').read(),
author='Erin Morelli',
author_email='me@erin.dev',
url='https://slack-tableflip.herokuapp.com',
license='MIT',
platforms='Linux, OSX',
description='Flip some tables on Slack.',
long_description=open('README.md').read(),
packages=find_packages(),
include_package_data=True,
install_requires=[
'cryptography',
'Flask',
'Flask-SQLAlchemy',
'keen',
'pkginfo',
'requests',
'itsdangerous',
'mysqlclient',
'newrelic',
'psycopg2-binary',
'slacker'
]
)
| 28.744681 | 69 | 0.680977 |
from setuptools import setup, find_packages
setup(
name='em-slack-tableflip',
version=open('VERSION').read(),
author='Erin Morelli',
author_email='me@erin.dev',
url='https://slack-tableflip.herokuapp.com',
license='MIT',
platforms='Linux, OSX',
description='Flip some tables on Slack.',
long_description=open('README.md').read(),
packages=find_packages(),
include_package_data=True,
install_requires=[
'cryptography',
'Flask',
'Flask-SQLAlchemy',
'keen',
'pkginfo',
'requests',
'itsdangerous',
'mysqlclient',
'newrelic',
'psycopg2-binary',
'slacker'
]
)
| true | true |
f7f420dcd731d6cf8a90325a837e613c53a233d7 | 5,972 | py | Python | sdk/python/pulumi_azure/lb/nat_pool.py | stack72/pulumi-azure | 18245b4e74abbd3f768f9eda67adb1df609ff32e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/lb/nat_pool.py | stack72/pulumi-azure | 18245b4e74abbd3f768f9eda67adb1df609ff32e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/lb/nat_pool.py | stack72/pulumi-azure | 18245b4e74abbd3f768f9eda67adb1df609ff32e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class NatPool(pulumi.CustomResource):
backend_port: pulumi.Output[float]
"""
The port used for the internal endpoint. Possible values range between 1 and 65535, inclusive.
"""
frontend_ip_configuration_id: pulumi.Output[str]
frontend_ip_configuration_name: pulumi.Output[str]
"""
The name of the frontend IP configuration exposing this rule.
"""
frontend_port_end: pulumi.Output[float]
"""
The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with this Load Balancer. Possible values range between 1 and 65534, inclusive.
"""
frontend_port_start: pulumi.Output[float]
"""
The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with this Load Balancer. Possible values range between 1 and 65534, inclusive.
"""
loadbalancer_id: pulumi.Output[str]
"""
The ID of the Load Balancer in which to create the NAT pool.
"""
location: pulumi.Output[str]
name: pulumi.Output[str]
"""
Specifies the name of the NAT pool.
"""
protocol: pulumi.Output[str]
"""
The transport protocol for the external endpoint. Possible values are `Udp` or `Tcp`.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to create the resource.
"""
def __init__(__self__, resource_name, opts=None, backend_port=None, frontend_ip_configuration_name=None, frontend_port_end=None, frontend_port_start=None, loadbalancer_id=None, location=None, name=None, protocol=None, resource_group_name=None, __name__=None, __opts__=None):
"""
Manages a Load Balancer NAT pool.
> **NOTE** When using this resource, the Load Balancer needs to have a FrontEnd IP Configuration Attached
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] backend_port: The port used for the internal endpoint. Possible values range between 1 and 65535, inclusive.
:param pulumi.Input[str] frontend_ip_configuration_name: The name of the frontend IP configuration exposing this rule.
:param pulumi.Input[float] frontend_port_end: The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with this Load Balancer. Possible values range between 1 and 65534, inclusive.
:param pulumi.Input[float] frontend_port_start: The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with this Load Balancer. Possible values range between 1 and 65534, inclusive.
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the NAT pool.
:param pulumi.Input[str] name: Specifies the name of the NAT pool.
:param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp` or `Tcp`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if backend_port is None:
raise TypeError("Missing required property 'backend_port'")
__props__['backend_port'] = backend_port
if frontend_ip_configuration_name is None:
raise TypeError("Missing required property 'frontend_ip_configuration_name'")
__props__['frontend_ip_configuration_name'] = frontend_ip_configuration_name
if frontend_port_end is None:
raise TypeError("Missing required property 'frontend_port_end'")
__props__['frontend_port_end'] = frontend_port_end
if frontend_port_start is None:
raise TypeError("Missing required property 'frontend_port_start'")
__props__['frontend_port_start'] = frontend_port_start
if loadbalancer_id is None:
raise TypeError("Missing required property 'loadbalancer_id'")
__props__['loadbalancer_id'] = loadbalancer_id
__props__['location'] = location
__props__['name'] = name
if protocol is None:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['frontend_ip_configuration_id'] = None
super(NatPool, __self__).__init__(
'azure:lb/natPool:NatPool',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 47.776 | 278 | 0.704789 |
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class NatPool(pulumi.CustomResource):
backend_port: pulumi.Output[float]
frontend_ip_configuration_id: pulumi.Output[str]
frontend_ip_configuration_name: pulumi.Output[str]
frontend_port_end: pulumi.Output[float]
frontend_port_start: pulumi.Output[float]
loadbalancer_id: pulumi.Output[str]
location: pulumi.Output[str]
name: pulumi.Output[str]
protocol: pulumi.Output[str]
resource_group_name: pulumi.Output[str]
def __init__(__self__, resource_name, opts=None, backend_port=None, frontend_ip_configuration_name=None, frontend_port_end=None, frontend_port_start=None, loadbalancer_id=None, location=None, name=None, protocol=None, resource_group_name=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if backend_port is None:
raise TypeError("Missing required property 'backend_port'")
__props__['backend_port'] = backend_port
if frontend_ip_configuration_name is None:
raise TypeError("Missing required property 'frontend_ip_configuration_name'")
__props__['frontend_ip_configuration_name'] = frontend_ip_configuration_name
if frontend_port_end is None:
raise TypeError("Missing required property 'frontend_port_end'")
__props__['frontend_port_end'] = frontend_port_end
if frontend_port_start is None:
raise TypeError("Missing required property 'frontend_port_start'")
__props__['frontend_port_start'] = frontend_port_start
if loadbalancer_id is None:
raise TypeError("Missing required property 'loadbalancer_id'")
__props__['loadbalancer_id'] = loadbalancer_id
__props__['location'] = location
__props__['name'] = name
if protocol is None:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['frontend_ip_configuration_id'] = None
super(NatPool, __self__).__init__(
'azure:lb/natPool:NatPool',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f7f4235517be0fb1c8e7c4491ad2825000579759 | 21,701 | py | Python | pokemongo_bot/cell_workers/move_to_map_pokemon.py | timgates42/PokemonGo-Bot | 5e80f20760f32478a84a8f0ced7ca24cdf41fe03 | [
"MIT"
] | 5,362 | 2016-07-21T02:38:46.000Z | 2022-03-23T13:34:51.000Z | pokemongo_bot/cell_workers/move_to_map_pokemon.py | timgates42/PokemonGo-Bot | 5e80f20760f32478a84a8f0ced7ca24cdf41fe03 | [
"MIT"
] | 5,897 | 2016-07-21T05:05:49.000Z | 2022-03-17T09:21:35.000Z | pokemongo_bot/cell_workers/move_to_map_pokemon.py | timgates42/PokemonGo-Bot | 5e80f20760f32478a84a8f0ced7ca24cdf41fe03 | [
"MIT"
] | 3,379 | 2016-07-21T02:38:48.000Z | 2022-03-30T02:46:57.000Z | # -*- coding: utf-8 -*-
"""
Moves a trainer to a Pokemon.
Events:
move_to_map_pokemon
When a generic message is logged
Returns:
message: Log message.
move_to_map_pokemon_fail
When the worker fails.
Returns:
message: Failure message.
move_to_map_pokemon_updated_map
When worker updates the PokemonGo-Map.
Returns:
lat: Latitude
lon: Longitude
move_to_map_pokemon_teleport_to
When trainer is teleported to a Pokemon.
Returns:
poke_name: Pokemon's name
poke_dist: Distance from the trainer
poke_lat: Latitude of the Pokemon
poke_lon: Longitude of the Pokemon
disappears_in: Number of seconds before the Pokemon disappears
move_to_map_pokemon_encounter
When a trainer encounters a Pokemon by teleporting or walking.
Returns:
poke_name: Pokemon's name
poke_dist: Distance from the trainer
poke_lat: Latitude of the Pokemon
poke_lon: Longitude of the Pokemon
disappears_in: Number of seconds before the Pokemon disappears
move_to_map_pokemon_move_towards
When a trainer moves toward a Pokemon.
Returns:
poke_name: Pokemon's name
poke_dist: Distance from the trainer
poke_lat: Latitude of the Pokemon
poke_lon: Longitude of the Pokemon
disappears_in: Number of seconds before the Pokemon disappears
move_to_map_pokemon_teleport_back
When a trainer teleports back to thier previous location.
Returns:
last_lat: Trainer's last known latitude
last_lon: Trainer's last known longitude
"""
from __future__ import unicode_literals
import os
import time
import json
import requests
from pokemongo_bot import inventory
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.cell_workers.utils import distance, format_dist, format_time, fort_details
from pokemongo_bot.walkers.walker_factory import walker_factory
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.cell_workers.pokemon_catch_worker import PokemonCatchWorker
from random import uniform
from pokemongo_bot.constants import Constants
from datetime import datetime
ULTRABALL_ID = 3
GREATBALL_ID = 2
POKEBALL_ID = 1
class MoveToMapPokemon(BaseTask):
"""Task for moving a trainer to a Pokemon."""
SUPPORTED_TASK_API_VERSION = 1
def initialize(self):
self.last_map_update = 0
self.pokemon_data = self.bot.pokemon_list
self.unit = self.bot.config.distance_unit
self.cache = []
self.min_ball = self.config.get('min_ball', 1)
self.map_path = self.config.get('map_path', 'raw_data')
self.walker = self.config.get('walker', 'StepWalker')
self.snip_enabled = self.config.get('snipe', False)
self.snipe_high_prio_only = self.config.get('snipe_high_prio_only', False)
self.snipe_high_prio_threshold = self.config.get('snipe_high_prio_threshold', 400)
self.by_pass_times = 0
data_file = os.path.join(_base_dir, 'map-caught-{}.json'.format(self.bot.config.username))
if os.path.isfile(data_file):
self.cache = json.load(
open(data_file)
)
self.alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max)
self.debug = self.config.get('debug', False)
def pokemons_parser(self, pokemon_list):
pokemons = []
if not pokemon_list:
return pokemons
now = int(time.time())
for pokemon in pokemon_list:
try:
disappear = int(pokemon.get('expiration_timestamp_ms', 0) / 1000) or int(pokemon.get('disappear_time', 0) / 1000)
pokemon['encounter_id'] = pokemon.get('encounter_id', '')
pokemon['spawn_point_id'] = pokemon.get('spawn_point_id', '') or pokemon.get('spawnpoint_id', '')
pokemon['iv'] = pokemon.get('iv', 0)
pokemon['disappear_time'] = disappear
pokemon['name'] = self.pokemon_data[pokemon['pokemon_id'] - 1]['Name']
pokemon['is_vip'] = pokemon['name'] in self.bot.config.vips
except TypeError:
continue
except KeyError:
continue
if now > pokemon['disappear_time']:
continue
if pokemon['name'] not in self.config['catch'] and not pokemon['is_vip']:
if self.debug:
self._emit_failure("Not catching {}".format(pokemon['name']))
continue
if self.is_inspected(pokemon):
if self.debug:
self._emit_log('Skipped {} because it was already catch or does not exist'.format(pokemon['name']))
continue
pokemon['priority'] = self.config['catch'].get(pokemon['name'], 0)
pokemon['dist'] = distance(
self.bot.position[0],
self.bot.position[1],
pokemon['latitude'],
pokemon['longitude']
)
# If distance to pokemon greater than the max_sniping_distance, then ignore regardless of "snipe" setting
if pokemon['dist'] > self.config.get('max_sniping_distance', 10000):
continue
# If distance bigger than walking distance, ignore if sniping is not active
if pokemon['dist'] > self.config.get('max_walking_distance', 1000) and not self.snip_enabled:
continue
# if pokemon not reachable with mean walking speed (by config)
mean_walk_speed = (self.bot.config.walk_max + self.bot.config.walk_min) / 2
if pokemon['dist'] > ((pokemon['disappear_time'] - now) * mean_walk_speed) and not self.snip_enabled:
continue
pokemons.append(pokemon)
return pokemons
def get_pokemon_from_social(self):
if not hasattr(self.bot, 'mqtt_pokemon_list') or not self.bot.mqtt_pokemon_list:
return []
tmp_pokemon_list, self.bot.mqtt_pokemon_list = self.bot.mqtt_pokemon_list, []
return self.pokemons_parser(tmp_pokemon_list)
def get_pokemon_from_url(self):
try:
request = requests.get(self.config['address'])
response = request.json()
except requests.exceptions.ConnectionError:
self._emit_failure('Could not get data from {}'.format(self.config['address']))
return []
except ValueError:
self._emit_failure('JSON format is not valid')
return []
return self.pokemons_parser(response.get('pokemons', []))
# TODO: refactor
def is_inspected(self, pokemon):
for caught_pokemon in self.cache:
# Since IDs might be invalid (null/blank) by this time, compare by approximate location
# TODO: make a better comparision
same_latitude = "{0:.4f}".format(pokemon['latitude']) == "{0:.4f}".format(caught_pokemon['latitude'])
same_longitude = "{0:.4f}".format(pokemon['longitude']) == "{0:.4f}".format(caught_pokemon['longitude'])
if same_latitude and same_longitude:
return True
return False
# Stores a target so that
# TODO: refactor
def inspect(self, pokemon):
# Make sure it was not caught!
for caught_pokemon in self.cache:
same_latitude = "{0:.4f}".format(pokemon['latitude']) == "{0:.4f}".format(caught_pokemon['latitude'])
same_longitude = "{0:.4f}".format(pokemon['longitude']) == "{0:.4f}".format(caught_pokemon['longitude'])
if same_latitude and same_longitude:
return
if len(self.cache) >= 200:
self.cache.pop(0)
self.cache.append(pokemon)
def snipe(self, pokemon):
# Backup position before anything
last_position = self.bot.position[0:2]
# Teleport, so that we can see nearby stuff
# self.bot.heartbeat() was moved to thread, if you do want to call it, you need sleep 10s.
self.bot.hb_locked = True
self._teleport_to(pokemon)
# Simulate kind of a lag after teleporting/moving to a long distance
time.sleep(2)
# If social is enabled and if no verification is needed, trust it. Otherwise, update IDs!
verify = not pokemon.get('encounter_id') or not pokemon.get('spawn_point_id')
exists = not verify and self.bot.config.enable_social
# If social is disabled, we will have to make sure the target still exists
if verify:
nearby_pokemons = []
nearby_stuff = self.bot.get_meta_cell()
# Sleep some time, so that we have accurate results (successfull cell data request)
time.sleep(2)
# Retrieve nearby pokemons for validation
if 'wild_pokemons' in nearby_stuff:
nearby_pokemons.extend(nearby_stuff['wild_pokemons'])
if 'catchable_pokemons' in nearby_stuff:
nearby_pokemons.extend(nearby_stuff['catchable_pokemons'])
# Make sure the target still/really exists (TODO: validate expiration)
for nearby_pokemon in nearby_pokemons:
is_wild = 'pokemon_data' in nearby_pokemon
nearby_pokemon_id = nearby_pokemon['pokemon_data']['pokemon_id'] if is_wild else nearby_pokemon['pokemon_id']
if nearby_pokemon_id == pokemon['pokemon_id']:
exists = True
# Also, if the IDs arent valid, update them!
if not pokemon['encounter_id'] or not pokemon['spawn_point_id']:
pokemon['encounter_id'] = nearby_pokemon['encounter_id']
pokemon['spawn_point_id'] = nearby_pokemon['spawn_point_id']
pokemon['disappear_time'] = nearby_pokemon['last_modified_timestamp_ms'] if is_wild else nearby_pokemon['expiration_timestamp_ms']
break
# If target exists, catch it, otherwise ignore
if exists:
self._encountered(pokemon)
catch_worker = PokemonCatchWorker(pokemon, self.bot)
api_encounter_response = catch_worker.create_encounter_api_call()
time.sleep(self.config.get('snipe_sleep_sec', 2))
self._teleport_back(last_position)
self.bot.api.set_position(last_position[0], last_position[1], self.alt, False)
time.sleep(self.config.get('snipe_sleep_sec', 2))
catch_worker.work(api_encounter_response)
else:
self._emit_failure('{} doesnt exist anymore. Skipping...'.format(pokemon['name']))
time.sleep(self.config.get('snipe_sleep_sec', 2))
self._teleport_back(last_position)
self.bot.api.set_position(last_position[0], last_position[1], self.alt, False)
time.sleep(self.config.get('snipe_sleep_sec', 2))
self.inspect(pokemon)
self.bot.hb_locked = False
return WorkerResult.SUCCESS
def dump_caught_pokemon(self):
user_data_map_caught = os.path.join(_base_dir, 'data', 'map-caught-{}.json'.format(self.bot.config.username))
with open(user_data_map_caught, 'w') as outfile:
json.dump(self.cache, outfile)
def work(self):
# check for pokeballs (excluding masterball)
pokeballs_quantity = inventory.items().get(POKEBALL_ID).count
superballs_quantity = inventory.items().get(GREATBALL_ID).count
ultraballs_quantity = inventory.items().get(ULTRABALL_ID).count
# Validate the balls quantity
if (pokeballs_quantity + superballs_quantity + ultraballs_quantity) < self.min_ball:
if self.debug:
self._emit_log("Not enough balls to start sniping (have {}, {} needed)".format(
pokeballs_quantity + superballs_quantity + ultraballs_quantity, self.min_ball))
return WorkerResult.SUCCESS
if self.bot.catch_disabled:
if not hasattr(self.bot,"mtmp_disabled_global_warning") or \
(hasattr(self.bot,"mtmp_disabled_global_warning") and not self.bot.mtmp_disabled_global_warning):
self._emit_log("All catching tasks are currently disabled until {}. Sniping will resume when catching tasks are re-enabled".format(self.bot.catch_resume_at.strftime("%H:%M:%S")))
self.bot.mtmp_disabled_global_warning = True
return WorkerResult.SUCCESS
else:
self.bot.mtmp_disabled_global_warning = False
if self.bot.softban:
if not hasattr(self.bot, "softban_global_warning") or \
(hasattr(self.bot, "softban_global_warning") and not self.bot.softban_global_warning):
self.logger.info("Possible softban! Not trying to catch Pokemon.")
self.bot.softban_global_warning = True
return WorkerResult.SUCCESS
else:
self.bot.softban_global_warning = False
# Retrieve pokemos
self.dump_caught_pokemon()
if self.bot.config.enable_social:
if self.snip_enabled:
self.by_pass_times += 1
if self.by_pass_times < self.config.get('skip_rounds', 30):
if self.debug:
self._emit_log("Skipping pass {}".format(self.by_pass_times))
return WorkerResult.SUCCESS
self.by_pass_times = 0
pokemon_list = self.get_pokemon_from_social()
else:
pokemon_list = self.get_pokemon_from_url()
pokemon_list.sort(key=lambda x: x['dist'])
if self.config['mode'] == 'priority':
pokemon_list.sort(key=lambda x: x['priority'], reverse=True)
if self.config['prioritize_vips']:
pokemon_list.sort(key=lambda x: x['is_vip'], reverse=True)
if not len(pokemon_list):
if self.debug:
self._emit_log("No pokemons in list to snipe")
return WorkerResult.SUCCESS
pokemon = pokemon_list[0]
if self.debug:
self._emit_log('How many pokemon in list: {}'.format(len(pokemon_list)))
if self.snip_enabled:
if self.snipe_high_prio_only:
count = 0
for pokemon in pokemon_list:
if self.snipe_high_prio_threshold < pokemon['priority']:
self.snipe(pokemon)
count += 1
if count >= self.config.get('snipe_max_in_chain', 2):
return WorkerResult.SUCCESS
if count is not 1:
time.sleep(self.config.get('snipe_sleep_sec', 2) * 5)
else:
if self.debug:
self._emit_log('this pokemon is not good enough to snipe {}'.format(pokemon))
return WorkerResult.SUCCESS
else:
return self.snipe(pokemon)
# check for pokeballs (excluding masterball)
# checking again as we may have lost some if we sniped
pokeballs_quantity = inventory.items().get(POKEBALL_ID).count
superballs_quantity = inventory.items().get(GREATBALL_ID).count
ultraballs_quantity = inventory.items().get(ULTRABALL_ID).count
if pokeballs_quantity + superballs_quantity + ultraballs_quantity < self.min_ball:
return WorkerResult.SUCCESS
nearest_fort = self.get_nearest_fort_on_the_way(pokemon)
if pokemon['is_vip'] or nearest_fort is None:
# lock catching(with pokemon_id specified) while moving to vip pokemon or no fort around
self.bot.capture_locked = pokemon['pokemon_id']
step_walker = self._move_to(pokemon)
if not step_walker.step():
if pokemon['dist'] < Constants.MAX_DISTANCE_POKEMON_IS_REACHABLE:
self._encountered(pokemon)
self.bot.capture_locked = False # unlock catch_worker
self.inspect(pokemon)
return WorkerResult.SUCCESS
else:
return WorkerResult.RUNNING
else:
step_walker = self._move_to_pokemon_througt_fort(nearest_fort, pokemon)
if not step_walker or not step_walker.step():
return WorkerResult.RUNNING
def _emit_failure(self, msg):
self.emit_event(
'move_to_map_pokemon_fail',
formatted='Failure! {message}',
data={'message': msg}
)
def _emit_log(self, msg):
self.emit_event(
'move_to_map_pokemon',
formatted='{message}',
data={'message': msg}
)
def _pokemon_event_data(self, pokemon):
"""Generates parameters used for the Bot's event manager.
Args:
pokemon: Pokemon object
Returns:
Dictionary with Pokemon's info.
"""
now = int(time.time())
return {
'poke_name': pokemon['name'],
'poke_dist': (format_dist(pokemon['dist'], self.unit)),
'poke_lat': pokemon['latitude'],
'poke_lon': pokemon['longitude'],
'disappears_in': (format_time(pokemon['disappear_time'] - now))
}
def _teleport_to(self, pokemon):
self.emit_event(
'move_to_map_pokemon_teleport_to',
formatted='Teleporting to {poke_name}. ({poke_dist})',
data=self._pokemon_event_data(pokemon)
)
self.bot.api.set_position(pokemon['latitude'], pokemon['longitude'], self.alt, True)
def _encountered(self, pokemon):
self.emit_event(
'move_to_map_pokemon_encounter',
formatted='Encountered Pokemon: {poke_name}',
data=self._pokemon_event_data(pokemon)
)
def _teleport_back(self, last_position):
self.emit_event(
'move_to_map_pokemon_teleport_back',
formatted='Teleporting back to previous location ({last_lat}, {last_lon})...',
data={'last_lat': last_position[0], 'last_lon': last_position[1]}
)
def _move_to(self, pokemon):
"""Moves trainer towards a Pokemon.
Args:
pokemon: Pokemon to move to.
Returns:
Walker
"""
self.emit_event(
'move_to_map_pokemon_move_towards',
formatted=('Moving towards {poke_name}, {poke_dist}, left ('
'{disappears_in})'),
data=self._pokemon_event_data(pokemon)
)
return walker_factory(self.walker, self.bot, pokemon['latitude'], pokemon['longitude'])
def _move_to_pokemon_througt_fort(self, fort, pokemon):
"""Moves trainer towards a fort before a Pokemon.
Args:
fort
Returns:
StepWalker
"""
nearest_fort = fort
lat = nearest_fort['latitude']
lng = nearest_fort['longitude']
fortID = nearest_fort['id']
details = fort_details(self.bot, fortID, lat, lng)
fort_name = details.get('name', 'Unknown')
unit = self.bot.config.distance_unit # Unit to use when printing formatted distance
dist = distance(
self.bot.position[0],
self.bot.position[1],
lat,
lng
)
if dist > Constants.MAX_DISTANCE_FORT_IS_REACHABLE:
pokemon_throught_fort_event_data = {
'fort_name': u"{}".format(fort_name),
'distance': format_dist(dist, unit),
'poke_name': pokemon['name'],
'poke_dist': (format_dist(pokemon['dist'], self.unit))
}
self.emit_event(
'moving_to_pokemon_throught_fort',
formatted="Moving towards {poke_name} - {poke_dist} through pokestop {fort_name} - {distance}",
data=pokemon_throught_fort_event_data
)
else:
self.emit_event(
'arrived_at_fort',
formatted='Arrived at fort.'
)
return walker_factory(self.walker, self.bot, lat, lng)
def get_nearest_fort_on_the_way(self, pokemon):
forts = self.bot.get_forts(order_by_distance=True)
# Remove stops that are still on timeout
forts = filter(lambda x: x["id"] not in self.bot.fort_timeouts, forts)
i = 0
while i < len(forts):
ratio = float(self.config.get('max_extra_dist_fort', 20))
dist_self_to_fort = distance(self.bot.position[0], self.bot.position[1], forts[i]['latitude'],
forts[i]['longitude'])
dist_fort_to_pokemon = distance(pokemon['latitude'], pokemon['longitude'], forts[i]['latitude'],
forts[i]['longitude'])
total_dist = dist_self_to_fort + dist_fort_to_pokemon
dist_self_to_pokemon = distance(self.bot.position[0], self.bot.position[1], pokemon['latitude'],
pokemon['longitude'])
if total_dist < (1 + (ratio / 100)) * dist_self_to_pokemon:
i += 1
else:
del forts[i]
# Return nearest fort if there are remaining
if len(forts):
return forts[0]
else:
return None
| 40.791353 | 194 | 0.606332 |
from __future__ import unicode_literals
import os
import time
import json
import requests
from pokemongo_bot import inventory
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.cell_workers.utils import distance, format_dist, format_time, fort_details
from pokemongo_bot.walkers.walker_factory import walker_factory
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.cell_workers.pokemon_catch_worker import PokemonCatchWorker
from random import uniform
from pokemongo_bot.constants import Constants
from datetime import datetime
ULTRABALL_ID = 3
GREATBALL_ID = 2
POKEBALL_ID = 1
class MoveToMapPokemon(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def initialize(self):
self.last_map_update = 0
self.pokemon_data = self.bot.pokemon_list
self.unit = self.bot.config.distance_unit
self.cache = []
self.min_ball = self.config.get('min_ball', 1)
self.map_path = self.config.get('map_path', 'raw_data')
self.walker = self.config.get('walker', 'StepWalker')
self.snip_enabled = self.config.get('snipe', False)
self.snipe_high_prio_only = self.config.get('snipe_high_prio_only', False)
self.snipe_high_prio_threshold = self.config.get('snipe_high_prio_threshold', 400)
self.by_pass_times = 0
data_file = os.path.join(_base_dir, 'map-caught-{}.json'.format(self.bot.config.username))
if os.path.isfile(data_file):
self.cache = json.load(
open(data_file)
)
self.alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max)
self.debug = self.config.get('debug', False)
def pokemons_parser(self, pokemon_list):
pokemons = []
if not pokemon_list:
return pokemons
now = int(time.time())
for pokemon in pokemon_list:
try:
disappear = int(pokemon.get('expiration_timestamp_ms', 0) / 1000) or int(pokemon.get('disappear_time', 0) / 1000)
pokemon['encounter_id'] = pokemon.get('encounter_id', '')
pokemon['spawn_point_id'] = pokemon.get('spawn_point_id', '') or pokemon.get('spawnpoint_id', '')
pokemon['iv'] = pokemon.get('iv', 0)
pokemon['disappear_time'] = disappear
pokemon['name'] = self.pokemon_data[pokemon['pokemon_id'] - 1]['Name']
pokemon['is_vip'] = pokemon['name'] in self.bot.config.vips
except TypeError:
continue
except KeyError:
continue
if now > pokemon['disappear_time']:
continue
if pokemon['name'] not in self.config['catch'] and not pokemon['is_vip']:
if self.debug:
self._emit_failure("Not catching {}".format(pokemon['name']))
continue
if self.is_inspected(pokemon):
if self.debug:
self._emit_log('Skipped {} because it was already catch or does not exist'.format(pokemon['name']))
continue
pokemon['priority'] = self.config['catch'].get(pokemon['name'], 0)
pokemon['dist'] = distance(
self.bot.position[0],
self.bot.position[1],
pokemon['latitude'],
pokemon['longitude']
)
if pokemon['dist'] > self.config.get('max_sniping_distance', 10000):
continue
if pokemon['dist'] > self.config.get('max_walking_distance', 1000) and not self.snip_enabled:
continue
mean_walk_speed = (self.bot.config.walk_max + self.bot.config.walk_min) / 2
if pokemon['dist'] > ((pokemon['disappear_time'] - now) * mean_walk_speed) and not self.snip_enabled:
continue
pokemons.append(pokemon)
return pokemons
def get_pokemon_from_social(self):
if not hasattr(self.bot, 'mqtt_pokemon_list') or not self.bot.mqtt_pokemon_list:
return []
tmp_pokemon_list, self.bot.mqtt_pokemon_list = self.bot.mqtt_pokemon_list, []
return self.pokemons_parser(tmp_pokemon_list)
def get_pokemon_from_url(self):
try:
request = requests.get(self.config['address'])
response = request.json()
except requests.exceptions.ConnectionError:
self._emit_failure('Could not get data from {}'.format(self.config['address']))
return []
except ValueError:
self._emit_failure('JSON format is not valid')
return []
return self.pokemons_parser(response.get('pokemons', []))
def is_inspected(self, pokemon):
for caught_pokemon in self.cache:
same_latitude = "{0:.4f}".format(pokemon['latitude']) == "{0:.4f}".format(caught_pokemon['latitude'])
same_longitude = "{0:.4f}".format(pokemon['longitude']) == "{0:.4f}".format(caught_pokemon['longitude'])
if same_latitude and same_longitude:
return True
return False
def inspect(self, pokemon):
for caught_pokemon in self.cache:
same_latitude = "{0:.4f}".format(pokemon['latitude']) == "{0:.4f}".format(caught_pokemon['latitude'])
same_longitude = "{0:.4f}".format(pokemon['longitude']) == "{0:.4f}".format(caught_pokemon['longitude'])
if same_latitude and same_longitude:
return
if len(self.cache) >= 200:
self.cache.pop(0)
self.cache.append(pokemon)
def snipe(self, pokemon):
last_position = self.bot.position[0:2]
self.bot.hb_locked = True
self._teleport_to(pokemon)
time.sleep(2)
verify = not pokemon.get('encounter_id') or not pokemon.get('spawn_point_id')
exists = not verify and self.bot.config.enable_social
if verify:
nearby_pokemons = []
nearby_stuff = self.bot.get_meta_cell()
time.sleep(2)
if 'wild_pokemons' in nearby_stuff:
nearby_pokemons.extend(nearby_stuff['wild_pokemons'])
if 'catchable_pokemons' in nearby_stuff:
nearby_pokemons.extend(nearby_stuff['catchable_pokemons'])
for nearby_pokemon in nearby_pokemons:
is_wild = 'pokemon_data' in nearby_pokemon
nearby_pokemon_id = nearby_pokemon['pokemon_data']['pokemon_id'] if is_wild else nearby_pokemon['pokemon_id']
if nearby_pokemon_id == pokemon['pokemon_id']:
exists = True
if not pokemon['encounter_id'] or not pokemon['spawn_point_id']:
pokemon['encounter_id'] = nearby_pokemon['encounter_id']
pokemon['spawn_point_id'] = nearby_pokemon['spawn_point_id']
pokemon['disappear_time'] = nearby_pokemon['last_modified_timestamp_ms'] if is_wild else nearby_pokemon['expiration_timestamp_ms']
break
if exists:
self._encountered(pokemon)
catch_worker = PokemonCatchWorker(pokemon, self.bot)
api_encounter_response = catch_worker.create_encounter_api_call()
time.sleep(self.config.get('snipe_sleep_sec', 2))
self._teleport_back(last_position)
self.bot.api.set_position(last_position[0], last_position[1], self.alt, False)
time.sleep(self.config.get('snipe_sleep_sec', 2))
catch_worker.work(api_encounter_response)
else:
self._emit_failure('{} doesnt exist anymore. Skipping...'.format(pokemon['name']))
time.sleep(self.config.get('snipe_sleep_sec', 2))
self._teleport_back(last_position)
self.bot.api.set_position(last_position[0], last_position[1], self.alt, False)
time.sleep(self.config.get('snipe_sleep_sec', 2))
self.inspect(pokemon)
self.bot.hb_locked = False
return WorkerResult.SUCCESS
def dump_caught_pokemon(self):
user_data_map_caught = os.path.join(_base_dir, 'data', 'map-caught-{}.json'.format(self.bot.config.username))
with open(user_data_map_caught, 'w') as outfile:
json.dump(self.cache, outfile)
def work(self):
pokeballs_quantity = inventory.items().get(POKEBALL_ID).count
superballs_quantity = inventory.items().get(GREATBALL_ID).count
ultraballs_quantity = inventory.items().get(ULTRABALL_ID).count
if (pokeballs_quantity + superballs_quantity + ultraballs_quantity) < self.min_ball:
if self.debug:
self._emit_log("Not enough balls to start sniping (have {}, {} needed)".format(
pokeballs_quantity + superballs_quantity + ultraballs_quantity, self.min_ball))
return WorkerResult.SUCCESS
if self.bot.catch_disabled:
if not hasattr(self.bot,"mtmp_disabled_global_warning") or \
(hasattr(self.bot,"mtmp_disabled_global_warning") and not self.bot.mtmp_disabled_global_warning):
self._emit_log("All catching tasks are currently disabled until {}. Sniping will resume when catching tasks are re-enabled".format(self.bot.catch_resume_at.strftime("%H:%M:%S")))
self.bot.mtmp_disabled_global_warning = True
return WorkerResult.SUCCESS
else:
self.bot.mtmp_disabled_global_warning = False
if self.bot.softban:
if not hasattr(self.bot, "softban_global_warning") or \
(hasattr(self.bot, "softban_global_warning") and not self.bot.softban_global_warning):
self.logger.info("Possible softban! Not trying to catch Pokemon.")
self.bot.softban_global_warning = True
return WorkerResult.SUCCESS
else:
self.bot.softban_global_warning = False
self.dump_caught_pokemon()
if self.bot.config.enable_social:
if self.snip_enabled:
self.by_pass_times += 1
if self.by_pass_times < self.config.get('skip_rounds', 30):
if self.debug:
self._emit_log("Skipping pass {}".format(self.by_pass_times))
return WorkerResult.SUCCESS
self.by_pass_times = 0
pokemon_list = self.get_pokemon_from_social()
else:
pokemon_list = self.get_pokemon_from_url()
pokemon_list.sort(key=lambda x: x['dist'])
if self.config['mode'] == 'priority':
pokemon_list.sort(key=lambda x: x['priority'], reverse=True)
if self.config['prioritize_vips']:
pokemon_list.sort(key=lambda x: x['is_vip'], reverse=True)
if not len(pokemon_list):
if self.debug:
self._emit_log("No pokemons in list to snipe")
return WorkerResult.SUCCESS
pokemon = pokemon_list[0]
if self.debug:
self._emit_log('How many pokemon in list: {}'.format(len(pokemon_list)))
if self.snip_enabled:
if self.snipe_high_prio_only:
count = 0
for pokemon in pokemon_list:
if self.snipe_high_prio_threshold < pokemon['priority']:
self.snipe(pokemon)
count += 1
if count >= self.config.get('snipe_max_in_chain', 2):
return WorkerResult.SUCCESS
if count is not 1:
time.sleep(self.config.get('snipe_sleep_sec', 2) * 5)
else:
if self.debug:
self._emit_log('this pokemon is not good enough to snipe {}'.format(pokemon))
return WorkerResult.SUCCESS
else:
return self.snipe(pokemon)
pokeballs_quantity = inventory.items().get(POKEBALL_ID).count
superballs_quantity = inventory.items().get(GREATBALL_ID).count
ultraballs_quantity = inventory.items().get(ULTRABALL_ID).count
if pokeballs_quantity + superballs_quantity + ultraballs_quantity < self.min_ball:
return WorkerResult.SUCCESS
nearest_fort = self.get_nearest_fort_on_the_way(pokemon)
if pokemon['is_vip'] or nearest_fort is None:
self.bot.capture_locked = pokemon['pokemon_id']
step_walker = self._move_to(pokemon)
if not step_walker.step():
if pokemon['dist'] < Constants.MAX_DISTANCE_POKEMON_IS_REACHABLE:
self._encountered(pokemon)
self.bot.capture_locked = False
self.inspect(pokemon)
return WorkerResult.SUCCESS
else:
return WorkerResult.RUNNING
else:
step_walker = self._move_to_pokemon_througt_fort(nearest_fort, pokemon)
if not step_walker or not step_walker.step():
return WorkerResult.RUNNING
def _emit_failure(self, msg):
self.emit_event(
'move_to_map_pokemon_fail',
formatted='Failure! {message}',
data={'message': msg}
)
def _emit_log(self, msg):
self.emit_event(
'move_to_map_pokemon',
formatted='{message}',
data={'message': msg}
)
def _pokemon_event_data(self, pokemon):
now = int(time.time())
return {
'poke_name': pokemon['name'],
'poke_dist': (format_dist(pokemon['dist'], self.unit)),
'poke_lat': pokemon['latitude'],
'poke_lon': pokemon['longitude'],
'disappears_in': (format_time(pokemon['disappear_time'] - now))
}
def _teleport_to(self, pokemon):
self.emit_event(
'move_to_map_pokemon_teleport_to',
formatted='Teleporting to {poke_name}. ({poke_dist})',
data=self._pokemon_event_data(pokemon)
)
self.bot.api.set_position(pokemon['latitude'], pokemon['longitude'], self.alt, True)
def _encountered(self, pokemon):
self.emit_event(
'move_to_map_pokemon_encounter',
formatted='Encountered Pokemon: {poke_name}',
data=self._pokemon_event_data(pokemon)
)
def _teleport_back(self, last_position):
self.emit_event(
'move_to_map_pokemon_teleport_back',
formatted='Teleporting back to previous location ({last_lat}, {last_lon})...',
data={'last_lat': last_position[0], 'last_lon': last_position[1]}
)
def _move_to(self, pokemon):
self.emit_event(
'move_to_map_pokemon_move_towards',
formatted=('Moving towards {poke_name}, {poke_dist}, left ('
'{disappears_in})'),
data=self._pokemon_event_data(pokemon)
)
return walker_factory(self.walker, self.bot, pokemon['latitude'], pokemon['longitude'])
def _move_to_pokemon_througt_fort(self, fort, pokemon):
nearest_fort = fort
lat = nearest_fort['latitude']
lng = nearest_fort['longitude']
fortID = nearest_fort['id']
details = fort_details(self.bot, fortID, lat, lng)
fort_name = details.get('name', 'Unknown')
unit = self.bot.config.distance_unit
dist = distance(
self.bot.position[0],
self.bot.position[1],
lat,
lng
)
if dist > Constants.MAX_DISTANCE_FORT_IS_REACHABLE:
pokemon_throught_fort_event_data = {
'fort_name': u"{}".format(fort_name),
'distance': format_dist(dist, unit),
'poke_name': pokemon['name'],
'poke_dist': (format_dist(pokemon['dist'], self.unit))
}
self.emit_event(
'moving_to_pokemon_throught_fort',
formatted="Moving towards {poke_name} - {poke_dist} through pokestop {fort_name} - {distance}",
data=pokemon_throught_fort_event_data
)
else:
self.emit_event(
'arrived_at_fort',
formatted='Arrived at fort.'
)
return walker_factory(self.walker, self.bot, lat, lng)
def get_nearest_fort_on_the_way(self, pokemon):
forts = self.bot.get_forts(order_by_distance=True)
forts = filter(lambda x: x["id"] not in self.bot.fort_timeouts, forts)
i = 0
while i < len(forts):
ratio = float(self.config.get('max_extra_dist_fort', 20))
dist_self_to_fort = distance(self.bot.position[0], self.bot.position[1], forts[i]['latitude'],
forts[i]['longitude'])
dist_fort_to_pokemon = distance(pokemon['latitude'], pokemon['longitude'], forts[i]['latitude'],
forts[i]['longitude'])
total_dist = dist_self_to_fort + dist_fort_to_pokemon
dist_self_to_pokemon = distance(self.bot.position[0], self.bot.position[1], pokemon['latitude'],
pokemon['longitude'])
if total_dist < (1 + (ratio / 100)) * dist_self_to_pokemon:
i += 1
else:
del forts[i]
if len(forts):
return forts[0]
else:
return None
| true | true |
f7f424ccd90f69485342448ad5bf957dcf9b6570 | 8,591 | py | Python | pytorch/utils.py | joeranbosma/ModelsGenesis | 24ae94cbc1b2ae15d95771f249d6443369ba0742 | [
"MIT"
] | 574 | 2019-10-01T02:30:56.000Z | 2022-03-29T07:34:08.000Z | pytorch/utils.py | luisfilipeap/ModelsGenesis | 5b18ea88d662e5250523434d02cfdcb6b527e634 | [
"MIT"
] | 52 | 2019-10-11T02:43:21.000Z | 2022-03-14T03:10:57.000Z | pytorch/utils.py | luisfilipeap/ModelsGenesis | 5b18ea88d662e5250523434d02cfdcb6b527e634 | [
"MIT"
] | 143 | 2019-10-01T13:38:10.000Z | 2022-03-29T02:26:04.000Z | from __future__ import print_function
import math
import os
import random
import copy
import scipy
import imageio
import string
import numpy as np
from skimage.transform import resize
try: # SciPy >= 0.19
from scipy.special import comb
except ImportError:
from scipy.misc import comb
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return comb(n, i) * ( t**(n-i) ) * (1 - t)**i
def bezier_curve(points, nTimes=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
Control points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
def data_augmentation(x, y, prob=0.5):
# augmentation by flipping
cnt = 3
while random.random() < prob and cnt > 0:
degree = random.choice([0, 1, 2])
x = np.flip(x, axis=degree)
y = np.flip(y, axis=degree)
cnt = cnt - 1
return x, y
def nonlinear_transformation(x, prob=0.5):
if random.random() >= prob:
return x
points = [[0, 0], [random.random(), random.random()], [random.random(), random.random()], [1, 1]]
xpoints = [p[0] for p in points]
ypoints = [p[1] for p in points]
xvals, yvals = bezier_curve(points, nTimes=100000)
if random.random() < 0.5:
# Half change to get flip
xvals = np.sort(xvals)
else:
xvals, yvals = np.sort(xvals), np.sort(yvals)
nonlinear_x = np.interp(x, xvals, yvals)
return nonlinear_x
def local_pixel_shuffling(x, prob=0.5):
if random.random() >= prob:
return x
image_temp = copy.deepcopy(x)
orig_image = copy.deepcopy(x)
_, img_rows, img_cols, img_deps = x.shape
num_block = 10000
for _ in range(num_block):
block_noise_size_x = random.randint(1, img_rows//10)
block_noise_size_y = random.randint(1, img_cols//10)
block_noise_size_z = random.randint(1, img_deps//10)
noise_x = random.randint(0, img_rows-block_noise_size_x)
noise_y = random.randint(0, img_cols-block_noise_size_y)
noise_z = random.randint(0, img_deps-block_noise_size_z)
window = orig_image[0, noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z,
]
window = window.flatten()
np.random.shuffle(window)
window = window.reshape((block_noise_size_x,
block_noise_size_y,
block_noise_size_z))
image_temp[0, noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z] = window
local_shuffling_x = image_temp
return local_shuffling_x
def image_in_painting(x):
_, img_rows, img_cols, img_deps = x.shape
cnt = 5
while cnt > 0 and random.random() < 0.95:
block_noise_size_x = random.randint(img_rows//6, img_rows//3)
block_noise_size_y = random.randint(img_cols//6, img_cols//3)
block_noise_size_z = random.randint(img_deps//6, img_deps//3)
noise_x = random.randint(3, img_rows-block_noise_size_x-3)
noise_y = random.randint(3, img_cols-block_noise_size_y-3)
noise_z = random.randint(3, img_deps-block_noise_size_z-3)
x[:,
noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z] = np.random.rand(block_noise_size_x,
block_noise_size_y,
block_noise_size_z, ) * 1.0
cnt -= 1
return x
def image_out_painting(x):
_, img_rows, img_cols, img_deps = x.shape
image_temp = copy.deepcopy(x)
x = np.random.rand(x.shape[0], x.shape[1], x.shape[2], x.shape[3], ) * 1.0
block_noise_size_x = img_rows - random.randint(3*img_rows//7, 4*img_rows//7)
block_noise_size_y = img_cols - random.randint(3*img_cols//7, 4*img_cols//7)
block_noise_size_z = img_deps - random.randint(3*img_deps//7, 4*img_deps//7)
noise_x = random.randint(3, img_rows-block_noise_size_x-3)
noise_y = random.randint(3, img_cols-block_noise_size_y-3)
noise_z = random.randint(3, img_deps-block_noise_size_z-3)
x[:,
noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z] = image_temp[:, noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z]
cnt = 4
while cnt > 0 and random.random() < 0.95:
block_noise_size_x = img_rows - random.randint(3*img_rows//7, 4*img_rows//7)
block_noise_size_y = img_cols - random.randint(3*img_cols//7, 4*img_cols//7)
block_noise_size_z = img_deps - random.randint(3*img_deps//7, 4*img_deps//7)
noise_x = random.randint(3, img_rows-block_noise_size_x-3)
noise_y = random.randint(3, img_cols-block_noise_size_y-3)
noise_z = random.randint(3, img_deps-block_noise_size_z-3)
x[:,
noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z] = image_temp[:, noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z]
cnt -= 1
return x
def generate_pair(img, batch_size, config, status="test"):
img_rows, img_cols, img_deps = img.shape[2], img.shape[3], img.shape[4]
while True:
index = [i for i in range(img.shape[0])]
random.shuffle(index)
y = img[index[:batch_size]]
x = copy.deepcopy(y)
for n in range(batch_size):
# Autoencoder
x[n] = copy.deepcopy(y[n])
# Flip
x[n], y[n] = data_augmentation(x[n], y[n], config.flip_rate)
# Local Shuffle Pixel
x[n] = local_pixel_shuffling(x[n], prob=config.local_rate)
# Apply non-Linear transformation with an assigned probability
x[n] = nonlinear_transformation(x[n], config.nonlinear_rate)
# Inpainting & Outpainting
if random.random() < config.paint_rate:
if random.random() < config.inpaint_rate:
# Inpainting
x[n] = image_in_painting(x[n])
else:
# Outpainting
x[n] = image_out_painting(x[n])
# Save sample images module
if config.save_samples is not None and status == "train" and random.random() < 0.01:
n_sample = random.choice( [i for i in range(config.batch_size)] )
sample_1 = np.concatenate((x[n_sample,0,:,:,2*img_deps//6], y[n_sample,0,:,:,2*img_deps//6]), axis=1)
sample_2 = np.concatenate((x[n_sample,0,:,:,3*img_deps//6], y[n_sample,0,:,:,3*img_deps//6]), axis=1)
sample_3 = np.concatenate((x[n_sample,0,:,:,4*img_deps//6], y[n_sample,0,:,:,4*img_deps//6]), axis=1)
sample_4 = np.concatenate((x[n_sample,0,:,:,5*img_deps//6], y[n_sample,0,:,:,5*img_deps//6]), axis=1)
final_sample = np.concatenate((sample_1, sample_2, sample_3, sample_4), axis=0)
final_sample = final_sample * 255.0
final_sample = final_sample.astype(np.uint8)
file_name = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(10)])+'.'+config.save_samples
imageio.imwrite(os.path.join(config.sample_path, config.exp_name, file_name), final_sample)
yield (x, y)
| 42.112745 | 129 | 0.600047 | from __future__ import print_function
import math
import os
import random
import copy
import scipy
import imageio
import string
import numpy as np
from skimage.transform import resize
try:
from scipy.special import comb
except ImportError:
from scipy.misc import comb
def bernstein_poly(i, n, t):
return comb(n, i) * ( t**(n-i) ) * (1 - t)**i
def bezier_curve(points, nTimes=1000):
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
def data_augmentation(x, y, prob=0.5):
cnt = 3
while random.random() < prob and cnt > 0:
degree = random.choice([0, 1, 2])
x = np.flip(x, axis=degree)
y = np.flip(y, axis=degree)
cnt = cnt - 1
return x, y
def nonlinear_transformation(x, prob=0.5):
if random.random() >= prob:
return x
points = [[0, 0], [random.random(), random.random()], [random.random(), random.random()], [1, 1]]
xpoints = [p[0] for p in points]
ypoints = [p[1] for p in points]
xvals, yvals = bezier_curve(points, nTimes=100000)
if random.random() < 0.5:
xvals = np.sort(xvals)
else:
xvals, yvals = np.sort(xvals), np.sort(yvals)
nonlinear_x = np.interp(x, xvals, yvals)
return nonlinear_x
def local_pixel_shuffling(x, prob=0.5):
if random.random() >= prob:
return x
image_temp = copy.deepcopy(x)
orig_image = copy.deepcopy(x)
_, img_rows, img_cols, img_deps = x.shape
num_block = 10000
for _ in range(num_block):
block_noise_size_x = random.randint(1, img_rows//10)
block_noise_size_y = random.randint(1, img_cols//10)
block_noise_size_z = random.randint(1, img_deps//10)
noise_x = random.randint(0, img_rows-block_noise_size_x)
noise_y = random.randint(0, img_cols-block_noise_size_y)
noise_z = random.randint(0, img_deps-block_noise_size_z)
window = orig_image[0, noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z,
]
window = window.flatten()
np.random.shuffle(window)
window = window.reshape((block_noise_size_x,
block_noise_size_y,
block_noise_size_z))
image_temp[0, noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z] = window
local_shuffling_x = image_temp
return local_shuffling_x
def image_in_painting(x):
_, img_rows, img_cols, img_deps = x.shape
cnt = 5
while cnt > 0 and random.random() < 0.95:
block_noise_size_x = random.randint(img_rows//6, img_rows//3)
block_noise_size_y = random.randint(img_cols//6, img_cols//3)
block_noise_size_z = random.randint(img_deps//6, img_deps//3)
noise_x = random.randint(3, img_rows-block_noise_size_x-3)
noise_y = random.randint(3, img_cols-block_noise_size_y-3)
noise_z = random.randint(3, img_deps-block_noise_size_z-3)
x[:,
noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z] = np.random.rand(block_noise_size_x,
block_noise_size_y,
block_noise_size_z, ) * 1.0
cnt -= 1
return x
def image_out_painting(x):
_, img_rows, img_cols, img_deps = x.shape
image_temp = copy.deepcopy(x)
x = np.random.rand(x.shape[0], x.shape[1], x.shape[2], x.shape[3], ) * 1.0
block_noise_size_x = img_rows - random.randint(3*img_rows//7, 4*img_rows//7)
block_noise_size_y = img_cols - random.randint(3*img_cols//7, 4*img_cols//7)
block_noise_size_z = img_deps - random.randint(3*img_deps//7, 4*img_deps//7)
noise_x = random.randint(3, img_rows-block_noise_size_x-3)
noise_y = random.randint(3, img_cols-block_noise_size_y-3)
noise_z = random.randint(3, img_deps-block_noise_size_z-3)
x[:,
noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z] = image_temp[:, noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z]
cnt = 4
while cnt > 0 and random.random() < 0.95:
block_noise_size_x = img_rows - random.randint(3*img_rows//7, 4*img_rows//7)
block_noise_size_y = img_cols - random.randint(3*img_cols//7, 4*img_cols//7)
block_noise_size_z = img_deps - random.randint(3*img_deps//7, 4*img_deps//7)
noise_x = random.randint(3, img_rows-block_noise_size_x-3)
noise_y = random.randint(3, img_cols-block_noise_size_y-3)
noise_z = random.randint(3, img_deps-block_noise_size_z-3)
x[:,
noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z] = image_temp[:, noise_x:noise_x+block_noise_size_x,
noise_y:noise_y+block_noise_size_y,
noise_z:noise_z+block_noise_size_z]
cnt -= 1
return x
def generate_pair(img, batch_size, config, status="test"):
img_rows, img_cols, img_deps = img.shape[2], img.shape[3], img.shape[4]
while True:
index = [i for i in range(img.shape[0])]
random.shuffle(index)
y = img[index[:batch_size]]
x = copy.deepcopy(y)
for n in range(batch_size):
x[n] = copy.deepcopy(y[n])
x[n], y[n] = data_augmentation(x[n], y[n], config.flip_rate)
x[n] = local_pixel_shuffling(x[n], prob=config.local_rate)
x[n] = nonlinear_transformation(x[n], config.nonlinear_rate)
if random.random() < config.paint_rate:
if random.random() < config.inpaint_rate:
x[n] = image_in_painting(x[n])
else:
x[n] = image_out_painting(x[n])
if config.save_samples is not None and status == "train" and random.random() < 0.01:
n_sample = random.choice( [i for i in range(config.batch_size)] )
sample_1 = np.concatenate((x[n_sample,0,:,:,2*img_deps//6], y[n_sample,0,:,:,2*img_deps//6]), axis=1)
sample_2 = np.concatenate((x[n_sample,0,:,:,3*img_deps//6], y[n_sample,0,:,:,3*img_deps//6]), axis=1)
sample_3 = np.concatenate((x[n_sample,0,:,:,4*img_deps//6], y[n_sample,0,:,:,4*img_deps//6]), axis=1)
sample_4 = np.concatenate((x[n_sample,0,:,:,5*img_deps//6], y[n_sample,0,:,:,5*img_deps//6]), axis=1)
final_sample = np.concatenate((sample_1, sample_2, sample_3, sample_4), axis=0)
final_sample = final_sample * 255.0
final_sample = final_sample.astype(np.uint8)
file_name = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(10)])+'.'+config.save_samples
imageio.imwrite(os.path.join(config.sample_path, config.exp_name, file_name), final_sample)
yield (x, y)
| true | true |
f7f428befe91cd3b1559cbd4451c77288a712c2a | 6,731 | py | Python | sdk/python/pulumi_azure_native/apimanagement/v20170301/api_policy.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/v20170301/api_policy.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/v20170301/api_policy.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['ApiPolicy']
class ApiPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
policy_content: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Policy Contract details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param pulumi.Input[str] policy_content: Json escaped Xml Encoded contents of the Policy.
:param pulumi.Input[str] policy_id: The identifier of the Policy.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if api_id is None and not opts.urn:
raise TypeError("Missing required property 'api_id'")
__props__['api_id'] = api_id
if policy_content is None and not opts.urn:
raise TypeError("Missing required property 'policy_content'")
__props__['policy_content'] = policy_content
__props__['policy_id'] = policy_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/latest:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:ApiPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApiPolicy, __self__).__init__(
'azure-native:apimanagement/v20170301:ApiPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApiPolicy':
"""
Get an existing ApiPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["name"] = None
__props__["policy_content"] = None
__props__["type"] = None
return ApiPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyContent")
def policy_content(self) -> pulumi.Output[str]:
"""
Json escaped Xml Encoded contents of the Policy.
"""
return pulumi.get(self, "policy_content")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 52.585938 | 1,409 | 0.676274 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['ApiPolicy']
class ApiPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
policy_content: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if api_id is None and not opts.urn:
raise TypeError("Missing required property 'api_id'")
__props__['api_id'] = api_id
if policy_content is None and not opts.urn:
raise TypeError("Missing required property 'policy_content'")
__props__['policy_content'] = policy_content
__props__['policy_id'] = policy_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/latest:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:ApiPolicy"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:ApiPolicy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:ApiPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApiPolicy, __self__).__init__(
'azure-native:apimanagement/v20170301:ApiPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApiPolicy':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["name"] = None
__props__["policy_content"] = None
__props__["type"] = None
return ApiPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyContent")
def policy_content(self) -> pulumi.Output[str]:
return pulumi.get(self, "policy_content")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f7f428e72ba7540ca0e54872eae615627aa04762 | 2,370 | py | Python | lib/ingest/tests/test_py_hdfs.py | shivaathreya/ibis | f99e3b7a677652a8a1c00a069e645d97682e839c | [
"Apache-2.0"
] | 50 | 2018-09-27T13:03:45.000Z | 2021-04-06T15:36:59.000Z | lib/ingest/tests/test_py_hdfs.py | shivaathreya/ibis | f99e3b7a677652a8a1c00a069e645d97682e839c | [
"Apache-2.0"
] | null | null | null | lib/ingest/tests/test_py_hdfs.py | shivaathreya/ibis | f99e3b7a677652a8a1c00a069e645d97682e839c | [
"Apache-2.0"
] | 14 | 2018-10-03T20:36:15.000Z | 2021-05-18T07:08:57.000Z | import unittest
from mock import patch
from lib.ingest.py_hdfs import PyHDFS
BASE_DIR = '/user/dev/data/checks_balances/'
class PopenClose(object):
def close(self):
pass
class PopenOut(object):
def __init__(self, std_out):
self.stdout = std_out
self.returncode = 0
def communicate(self):
return (self.stdout, 'No Error')
class PopenOne(object):
def __init__(self, std_out):
self.stdout = PopenClose()
self.stdin = std_out
self.returncode = 0
def communicate(self):
return (self.stdout, 'No Error')
class PyHDFSTest(unittest.TestCase):
"""Test PyHDFS class"""
def setUp(self):
self.pyhdfs = PyHDFS(BASE_DIR)
def tearDown(self):
self.pyhdfs = None
@patch('lib.ingest.py_hdfs.PyHDFS.execute',
return_value=('Ok Response', 0, None))
def test_get_file(self, mock_execute):
res = self.pyhdfs.get_file('/user', )
self.assertEqual(res[0], 'Ok Response')
@patch('lib.ingest.py_hdfs.PyHDFS.execute_with_echo',
return_value=('Execute Response', 0, None))
@patch('lib.ingest.py_hdfs.PyHDFS.execute',
return_value=('Get File Response', 0, None))
def test_insert_update(self, mock_execute_with_echo, mock_execute):
res = self.pyhdfs.insert_update('/user', 'sample data')
self.assertEqual(res[0], 'Execute Response')
def test_tuple_to_string(self):
tuple_val = ('ls', '-R')
res = self.pyhdfs.tuple_to_string(tuple_val)
self.assertEqual(res, 'ls -R')
@patch(
'subprocess.Popen',
return_value=PopenOut('2017-03-28 07:41 /user/data/checks_balances/'))
def test_execute(self, mock_subproc_popen):
result = self.pyhdfs.execute('-ls')
expected = '2017-03-28 07:41 /user/data/checks_balances/'
self.assertEquals(result, (expected, 0, 'No Error'))
@patch(
'subprocess.Popen',
return_value=PopenOne(''), autospec=True)
@patch(
'subprocess.Popen',
return_value=PopenOut(
['2017-03-28 07:41 /user/data/checks_balances/']))
def test_execute_with_echo(self, mock_popen_close, mock_popen):
result = self.pyhdfs.execute_with_echo('-ls', '2017-03-28 07:41')
self.assertEquals(0, result[1])
if __name__ == "__main__":
unittest.main()
| 29.259259 | 78 | 0.638819 | import unittest
from mock import patch
from lib.ingest.py_hdfs import PyHDFS
BASE_DIR = '/user/dev/data/checks_balances/'
class PopenClose(object):
def close(self):
pass
class PopenOut(object):
def __init__(self, std_out):
self.stdout = std_out
self.returncode = 0
def communicate(self):
return (self.stdout, 'No Error')
class PopenOne(object):
def __init__(self, std_out):
self.stdout = PopenClose()
self.stdin = std_out
self.returncode = 0
def communicate(self):
return (self.stdout, 'No Error')
class PyHDFSTest(unittest.TestCase):
def setUp(self):
self.pyhdfs = PyHDFS(BASE_DIR)
def tearDown(self):
self.pyhdfs = None
@patch('lib.ingest.py_hdfs.PyHDFS.execute',
return_value=('Ok Response', 0, None))
def test_get_file(self, mock_execute):
res = self.pyhdfs.get_file('/user', )
self.assertEqual(res[0], 'Ok Response')
@patch('lib.ingest.py_hdfs.PyHDFS.execute_with_echo',
return_value=('Execute Response', 0, None))
@patch('lib.ingest.py_hdfs.PyHDFS.execute',
return_value=('Get File Response', 0, None))
def test_insert_update(self, mock_execute_with_echo, mock_execute):
res = self.pyhdfs.insert_update('/user', 'sample data')
self.assertEqual(res[0], 'Execute Response')
def test_tuple_to_string(self):
tuple_val = ('ls', '-R')
res = self.pyhdfs.tuple_to_string(tuple_val)
self.assertEqual(res, 'ls -R')
@patch(
'subprocess.Popen',
return_value=PopenOut('2017-03-28 07:41 /user/data/checks_balances/'))
def test_execute(self, mock_subproc_popen):
result = self.pyhdfs.execute('-ls')
expected = '2017-03-28 07:41 /user/data/checks_balances/'
self.assertEquals(result, (expected, 0, 'No Error'))
@patch(
'subprocess.Popen',
return_value=PopenOne(''), autospec=True)
@patch(
'subprocess.Popen',
return_value=PopenOut(
['2017-03-28 07:41 /user/data/checks_balances/']))
def test_execute_with_echo(self, mock_popen_close, mock_popen):
result = self.pyhdfs.execute_with_echo('-ls', '2017-03-28 07:41')
self.assertEquals(0, result[1])
if __name__ == "__main__":
unittest.main()
| true | true |
f7f42909824e13101e2b6790665ab96a7152e004 | 1,107 | py | Python | app/artworks/urls/user_urls.py | Vadee-art/backend | 9b068d6ed11c1ffeccc13c4be67f1bb87a12d6ad | [
"MIT"
] | null | null | null | app/artworks/urls/user_urls.py | Vadee-art/backend | 9b068d6ed11c1ffeccc13c4be67f1bb87a12d6ad | [
"MIT"
] | null | null | null | app/artworks/urls/user_urls.py | Vadee-art/backend | 9b068d6ed11c1ffeccc13c4be67f1bb87a12d6ad | [
"MIT"
] | null | null | null | from django.urls import path
from artworks.views import user_views as views
urlpatterns = [
path('login/', views.MyTokenObtainPairView.as_view(),
name='token_obtain_pair'),
path('register/', views.registerUser, name='register'),
path('profile/', views.fetchUserProfile, name='users_profile'),
path('profile/update/', views.updateUserProfile, name='users_profile_update'),
path('profile/artworks/mine', views.fetchMyArtworks, name='users_profile_artworks'),
path('profile/artworks/favorites/', views.fetchFavoriteArtworkList, name='favorite_artworks'),
path('profile/artists/favorites/', views.fetchFavoriteArtistList, name='favorite_artists'),
path('artwork/favorite/<int:pk>/', views.addFavoriteArtwork, name='favorite_add'),
path('artist/favorite/<int:pk>/', views.addFavoriteArtwork, name='favorite_add'),
# path('update/<int:pk>/', views.updateUserById, name='user_update_by_id'),
path('', views.fetchUsers, name='users'),
path('delete/', views.deleteUser, name='user_delete'),
path('<int:pk>/', views.fetchUsersById, name='user_by_id'),
]
| 52.714286 | 98 | 0.725384 | from django.urls import path
from artworks.views import user_views as views
urlpatterns = [
path('login/', views.MyTokenObtainPairView.as_view(),
name='token_obtain_pair'),
path('register/', views.registerUser, name='register'),
path('profile/', views.fetchUserProfile, name='users_profile'),
path('profile/update/', views.updateUserProfile, name='users_profile_update'),
path('profile/artworks/mine', views.fetchMyArtworks, name='users_profile_artworks'),
path('profile/artworks/favorites/', views.fetchFavoriteArtworkList, name='favorite_artworks'),
path('profile/artists/favorites/', views.fetchFavoriteArtistList, name='favorite_artists'),
path('artwork/favorite/<int:pk>/', views.addFavoriteArtwork, name='favorite_add'),
path('artist/favorite/<int:pk>/', views.addFavoriteArtwork, name='favorite_add'),
path('', views.fetchUsers, name='users'),
path('delete/', views.deleteUser, name='user_delete'),
path('<int:pk>/', views.fetchUsersById, name='user_by_id'),
]
| true | true |
f7f4298ed360b057a74c12a0b99ed8d232254930 | 1,343 | py | Python | src/pretix/api/pagination.py | andynd/pretix | edef9f1b23a775b50c0bacacd40a4bbd5d77dcae | [
"Apache-2.0"
] | 1,248 | 2015-04-24T13:32:06.000Z | 2022-03-29T07:01:36.000Z | src/pretix/api/pagination.py | andynd/pretix | edef9f1b23a775b50c0bacacd40a4bbd5d77dcae | [
"Apache-2.0"
] | 2,113 | 2015-02-18T18:58:16.000Z | 2022-03-31T11:12:32.000Z | src/pretix/api/pagination.py | thegcat/pretix | 451d3fce0575d85a0ea93fd64aa0631feaced967 | [
"Apache-2.0"
] | 453 | 2015-05-13T09:29:06.000Z | 2022-03-24T13:39:16.000Z | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from rest_framework.pagination import PageNumberPagination
class Pagination(PageNumberPagination):
page_size_query_param = 'page_size'
max_page_size = 50
| 47.964286 | 118 | 0.782576 |
from rest_framework.pagination import PageNumberPagination
class Pagination(PageNumberPagination):
page_size_query_param = 'page_size'
max_page_size = 50
| true | true |
f7f429e1b801066b217f93e9623c06124e3c5857 | 76,086 | py | Python | src/transformers/modeling_mobilebert.py | mattiaguerri/transformers | ebc36108dc1c20985905c79f7d6a00f57f3cd3ae | [
"Apache-2.0"
] | 647 | 2020-10-27T01:35:35.000Z | 2022-03-29T12:59:11.000Z | src/transformers/modeling_mobilebert.py | mattiaguerri/transformers | ebc36108dc1c20985905c79f7d6a00f57f3cd3ae | [
"Apache-2.0"
] | 18 | 2020-11-01T17:57:59.000Z | 2022-03-16T06:33:40.000Z | src/transformers/modeling_mobilebert.py | mattiaguerri/transformers | ebc36108dc1c20985905c79f7d6a00f57f3cd3ae | [
"Apache-2.0"
] | 38 | 2020-10-29T12:35:30.000Z | 2022-03-24T20:48:07.000Z | # MIT License
#
# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import math
import os
import warnings
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.modeling_bert import BertIntermediate
from .activations import gelu, gelu_new, swish
from .configuration_mobilebert import MobileBertConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
logger = logging.getLogger(__name__)
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = ["mobilebert-uncased"]
def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.replace("ffn_layer", "ffn")
name = name.replace("FakeLayerNorm", "LayerNorm")
name = name.replace("extra_output_weights", "dense/kernel")
name = name.replace("bert", "mobilebert")
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def mish(x):
return x * torch.tanh(nn.functional.softplus(x))
class NoNorm(nn.Module):
def __init__(self, feat_size, eps=None):
super().__init__()
self.bias = nn.Parameter(torch.zeros(feat_size))
self.weight = nn.Parameter(torch.ones(feat_size))
def forward(self, input_tensor):
return input_tensor * self.weight + self.bias
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish}
NORM2FN = {"layer_norm": torch.nn.LayerNorm, "no_norm": NoNorm}
class MobileBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.trigram_input = config.trigram_input
self.embedding_size = config.embedding_size
self.hidden_size = config.hidden_size
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
embed_dim_multiplier = 3 if self.trigram_input else 1
embedded_input_size = self.embedding_size * embed_dim_multiplier
self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.trigram_input:
# From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
# Devices (https://arxiv.org/abs/2004.02984)
#
# The embedding table in BERT models accounts for a substantial proportion of model size. To compress
# the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
# Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
# dimensional output.
inputs_embeds = torch.cat(
[
F.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0),
inputs_embeds,
F.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0),
],
dim=2,
)
if self.trigram_input or self.embedding_size != self.hidden_size:
inputs_embeds = self.embedding_transformation(inputs_embeds)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MobileBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.true_hidden_size, self.all_head_size)
self.key = nn.Linear(config.true_hidden_size, self.all_head_size)
self.value = nn.Linear(
config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size
)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
query_tensor,
key_tensor,
value_tensor,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
):
mixed_query_layer = self.query(query_tensor)
mixed_key_layer = self.key(key_tensor)
mixed_value_layer = self.value(value_tensor)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class MobileBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
if not self.use_bottleneck:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
if not self.use_bottleneck:
layer_outputs = self.dropout(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class MobileBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = MobileBertSelfAttention(config)
self.output = MobileBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
):
self_outputs = self.self(
query_tensor,
key_tensor,
value_tensor,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
attention_output = self.output(self_outputs[0], layer_input)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class MobileBertIntermediate(BertIntermediate):
def __init__(self, config):
super().__init__(config)
self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)
class OutputBottleneck(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
layer_outputs = self.dropout(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class MobileBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)
if not self.use_bottleneck:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
else:
self.bottleneck = OutputBottleneck(config)
def forward(self, intermediate_states, residual_tensor_1, residual_tensor_2):
layer_output = self.dense(intermediate_states)
if not self.use_bottleneck:
layer_output = self.dropout(layer_output)
layer_output = self.LayerNorm(layer_output + residual_tensor_1)
else:
layer_output = self.LayerNorm(layer_output + residual_tensor_1)
layer_output = self.bottleneck(layer_output, residual_tensor_2)
return layer_output
class BottleneckLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
layer_input = self.dense(hidden_states)
layer_input = self.LayerNorm(layer_input)
return layer_input
class Bottleneck(nn.Module):
def __init__(self, config):
super().__init__()
self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
self.use_bottleneck_attention = config.use_bottleneck_attention
self.input = BottleneckLayer(config)
if self.key_query_shared_bottleneck:
self.attention = BottleneckLayer(config)
def forward(self, hidden_states):
# This method can return three different tuples of values. These different values make use of bottlenecks,
# which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
# usage. These linear layer have weights that are learned during training.
#
# If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
# key, query, value, and "layer input" to be used by the attention layer.
# This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
# in the attention self output, after the attention scores have been computed.
#
# If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
# four values, three of which have been passed through a bottleneck: the query and key, passed through the same
# bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
#
# Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
# and the residual layer will be this value passed through a bottleneck.
bottlenecked_hidden_states = self.input(hidden_states)
if self.use_bottleneck_attention:
return (bottlenecked_hidden_states,) * 4
elif self.key_query_shared_bottleneck:
shared_attention_input = self.attention(hidden_states)
return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
else:
return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
class FFNOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class FFNLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate = MobileBertIntermediate(config)
self.output = FFNOutput(config)
def forward(self, hidden_states):
intermediate_output = self.intermediate(hidden_states)
layer_outputs = self.output(intermediate_output, hidden_states)
return layer_outputs
class MobileBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.num_feedforward_networks = config.num_feedforward_networks
self.attention = MobileBertAttention(config)
self.intermediate = MobileBertIntermediate(config)
self.output = MobileBertOutput(config)
if self.use_bottleneck:
self.bottleneck = Bottleneck(config)
if config.num_feedforward_networks > 1:
self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
):
if self.use_bottleneck:
query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
else:
query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
self_attention_outputs = self.attention(
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
s = (attention_output,)
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.num_feedforward_networks != 1:
for i, ffn_module in enumerate(self.ffn):
attention_output = ffn_module(attention_output)
s += (attention_output,)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output, hidden_states)
outputs = (
(layer_output,)
+ outputs
+ (
torch.tensor(1000),
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_output,
intermediate_output,
)
+ s
)
return outputs
class MobileBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if output_hidden_states:
outputs = outputs + (all_hidden_states,)
if output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class MobileBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.do_activate = config.classifier_activation
if self.do_activate:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
if not self.do_activate:
return first_token_tensor
else:
pooled_output = self.dense(first_token_tensor)
pooled_output = F.tanh(pooled_output)
return pooled_output
class MobileBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class MobileBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = MobileBertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))
hidden_states += self.bias
return hidden_states
class MobileBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class MobileBertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class MobileBertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = MobileBertConfig
pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST
load_tf_weights = load_tf_weights_in_mobilebert
base_model_prefix = "mobilebert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, (nn.LayerNorm, NoNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
MOBILEBERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.MobileBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
MOBILEBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.MobileBertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
"""
@add_start_docstrings(
"The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
MOBILEBERT_START_DOCSTRING,
)
class MobileBertModel(MobileBertPreTrainedModel):
"""
https://arxiv.org/pdf/2004.02984.pdf
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = MobileBertEmbeddings(config)
self.encoder = MobileBertEncoder(config)
self.pooler = MobileBertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_hidden_states=None,
output_attentions=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import MobileBertModel, MobileBertTokenizer
import torch
tokenizer = MobileBertTokenizer.from_pretrained(model_name_or_path)
model = MobileBertModel.from_pretrained(model_name_or_path)
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, self.device
)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings(
"""MobileBert Model with two heads on top as done during the pre-training: a `masked language modeling` head and
a `next sentence prediction (classification)` head. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForPreTraining(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
input_embeddings = self.get_input_embeddings()
resized_dense = nn.Linear(
input_embeddings.num_embeddings, self.config.hidden_size - self.config.embedding_size, bias=False
)
kept_data = self.cls.predictions.dense.weight.data[
..., : min(self.cls.predictions.dense.weight.data.shape[1], resized_dense.weight.data.shape[1])
]
resized_dense.weight.data[..., : self.cls.predictions.dense.weight.data.shape[1]] = kept_data
self.cls.predictions.dense = resized_dense
self.cls.predictions.dense.to(self.device)
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:
loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False
continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import MobileBertTokenizer, MobileBertForPreTraining
import torch
tokenizer = MobileBertTokenizer.from_pretrained(model_name_or_path)
model = MobileBertForPreTraining.from_pretrained(model_name_or_path)
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[
2:
] # add hidden states and attention if they are here
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""MobileBert Model with a `language modeling` head on top. """, MOBILEBERT_START_DOCSTRING)
class MobileBertForMaskedLM(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertOnlyMLMHead(config)
self.config = config
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
input_embeddings = self.get_input_embeddings()
resized_dense = nn.Linear(
input_embeddings.num_embeddings, self.config.hidden_size - self.config.embedding_size, bias=False
)
kept_data = self.cls.predictions.dense.weight.data[
..., : min(self.cls.predictions.dense.weight.data.shape[1], resized_dense.weight.data.shape[1])
]
resized_dense.weight.data[..., : self.cls.predictions.dense.weight.data.shape[1]] = kept_data
self.cls.predictions.dense = resized_dense
self.cls.predictions.dense.to(self.device)
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
**kwargs
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import MobileBertTokenizer, MobileBertForMaskedLM
import torch
tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')
model = MobileBertForMaskedLM.from_pretrained('mobilebert-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
if "masked_lm_labels" in kwargs:
warnings.warn(
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("masked_lm_labels")
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
class MobileBertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
@add_start_docstrings(
"""MobileBert Model with a `next sentence prediction (classification)` head on top. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
next_sentence_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided):
Next sequence prediction (classification) loss.
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import MobileBertTokenizer, MobileBertForNextSentencePrediction
import torch
tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')
model = MobileBertForNextSentencePrediction.from_pretrained('mobilebert-uncased')
prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
next_sentence = "The sky is blue due to the shorter wavelength of blue light."
encoding = tokenizer.encode_plus(prompt, next_sentence, return_tensors='pt')
loss, logits = model(**encoding, next_sentence_label=torch.LongTensor([1]))
assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings(
"""MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import MobileBertTokenizer, MobileBertForQuestionAnswering
import torch
tokenizer = BertTokenizer.from_pretrained(model_name_or_path)
model = MobileBertForQuestionAnswering.from_pretrained(model_name_or_path)
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
encoding = tokenizer.encode_plus(question, text)
input_ids, token_type_ids = encoding["input_ids"], encoding["token_type_ids"]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])
assert answer == "a nice puppet"
"""
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""MobileBert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices-1]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import MobileBertTokenizer, MobileBertForMultipleChoice
import torch
tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')
model = MobileBertForMultipleChoice.from_pretrained('mobilebert-uncased')
prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
choice0 = "It is eaten with a fork and a knife."
choice1 = "It is eaten while held in the hand."
labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
encoding = tokenizer.batch_encode_plus([[prompt, choice0], [prompt, choice1]], return_tensors='pt', pad_to_max_length=True)
outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1
# the linear classifier still needs to be trained
loss, logits = outputs[:2]
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""MoibleBert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForTokenClassification(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import MobileBertTokenizer, MobileBertForTokenClassification
import torch
tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')
model = MobileBertForTokenClassification.from_pretrained('mobilebert-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
| 46.507335 | 163 | 0.673724 |
import logging
import math
import os
import warnings
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.modeling_bert import BertIntermediate
from .activations import gelu, gelu_new, swish
from .configuration_mobilebert import MobileBertConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
logger = logging.getLogger(__name__)
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = ["mobilebert-uncased"]
def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.replace("ffn_layer", "ffn")
name = name.replace("FakeLayerNorm", "LayerNorm")
name = name.replace("extra_output_weights", "dense/kernel")
name = name.replace("bert", "mobilebert")
name = name.split("/")
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def mish(x):
return x * torch.tanh(nn.functional.softplus(x))
class NoNorm(nn.Module):
def __init__(self, feat_size, eps=None):
super().__init__()
self.bias = nn.Parameter(torch.zeros(feat_size))
self.weight = nn.Parameter(torch.ones(feat_size))
def forward(self, input_tensor):
return input_tensor * self.weight + self.bias
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish}
NORM2FN = {"layer_norm": torch.nn.LayerNorm, "no_norm": NoNorm}
class MobileBertEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.trigram_input = config.trigram_input
self.embedding_size = config.embedding_size
self.hidden_size = config.hidden_size
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
embed_dim_multiplier = 3 if self.trigram_input else 1
embedded_input_size = self.embedding_size * embed_dim_multiplier
self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.trigram_input:
inputs_embeds = torch.cat(
[
F.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0),
inputs_embeds,
F.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0),
],
dim=2,
)
if self.trigram_input or self.embedding_size != self.hidden_size:
inputs_embeds = self.embedding_transformation(inputs_embeds)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MobileBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.true_hidden_size, self.all_head_size)
self.key = nn.Linear(config.true_hidden_size, self.all_head_size)
self.value = nn.Linear(
config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size
)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
query_tensor,
key_tensor,
value_tensor,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
):
mixed_query_layer = self.query(query_tensor)
mixed_key_layer = self.key(key_tensor)
mixed_value_layer = self.value(value_tensor)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class MobileBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
if not self.use_bottleneck:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
if not self.use_bottleneck:
layer_outputs = self.dropout(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class MobileBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = MobileBertSelfAttention(config)
self.output = MobileBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
):
self_outputs = self.self(
query_tensor,
key_tensor,
value_tensor,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
attention_output = self.output(self_outputs[0], layer_input)
outputs = (attention_output,) + self_outputs[1:]
return outputs
class MobileBertIntermediate(BertIntermediate):
def __init__(self, config):
super().__init__(config)
self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)
class OutputBottleneck(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
layer_outputs = self.dropout(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class MobileBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)
if not self.use_bottleneck:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
else:
self.bottleneck = OutputBottleneck(config)
def forward(self, intermediate_states, residual_tensor_1, residual_tensor_2):
layer_output = self.dense(intermediate_states)
if not self.use_bottleneck:
layer_output = self.dropout(layer_output)
layer_output = self.LayerNorm(layer_output + residual_tensor_1)
else:
layer_output = self.LayerNorm(layer_output + residual_tensor_1)
layer_output = self.bottleneck(layer_output, residual_tensor_2)
return layer_output
class BottleneckLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
layer_input = self.dense(hidden_states)
layer_input = self.LayerNorm(layer_input)
return layer_input
class Bottleneck(nn.Module):
def __init__(self, config):
super().__init__()
self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
self.use_bottleneck_attention = config.use_bottleneck_attention
self.input = BottleneckLayer(config)
if self.key_query_shared_bottleneck:
self.attention = BottleneckLayer(config)
def forward(self, hidden_states):
bottlenecked_hidden_states = self.input(hidden_states)
if self.use_bottleneck_attention:
return (bottlenecked_hidden_states,) * 4
elif self.key_query_shared_bottleneck:
shared_attention_input = self.attention(hidden_states)
return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
else:
return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
class FFNOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class FFNLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate = MobileBertIntermediate(config)
self.output = FFNOutput(config)
def forward(self, hidden_states):
intermediate_output = self.intermediate(hidden_states)
layer_outputs = self.output(intermediate_output, hidden_states)
return layer_outputs
class MobileBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.num_feedforward_networks = config.num_feedforward_networks
self.attention = MobileBertAttention(config)
self.intermediate = MobileBertIntermediate(config)
self.output = MobileBertOutput(config)
if self.use_bottleneck:
self.bottleneck = Bottleneck(config)
if config.num_feedforward_networks > 1:
self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
):
if self.use_bottleneck:
query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
else:
query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
self_attention_outputs = self.attention(
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
s = (attention_output,)
outputs = self_attention_outputs[1:]
if self.num_feedforward_networks != 1:
for i, ffn_module in enumerate(self.ffn):
attention_output = ffn_module(attention_output)
s += (attention_output,)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output, hidden_states)
outputs = (
(layer_output,)
+ outputs
+ (
torch.tensor(1000),
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_output,
intermediate_output,
)
+ s
)
return outputs
class MobileBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if output_hidden_states:
outputs = outputs + (all_hidden_states,)
if output_attentions:
outputs = outputs + (all_attentions,)
return outputs
class MobileBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.do_activate = config.classifier_activation
if self.do_activate:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
if not self.do_activate:
return first_token_tensor
else:
pooled_output = self.dense(first_token_tensor)
pooled_output = F.tanh(pooled_output)
return pooled_output
class MobileBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class MobileBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = MobileBertPredictionHeadTransform(config)
self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))
hidden_states += self.bias
return hidden_states
class MobileBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class MobileBertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class MobileBertPreTrainedModel(PreTrainedModel):
config_class = MobileBertConfig
pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST
load_tf_weights = load_tf_weights_in_mobilebert
base_model_prefix = "mobilebert"
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, (nn.LayerNorm, NoNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
MOBILEBERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.MobileBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
MOBILEBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.MobileBertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
"""
@add_start_docstrings(
"The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
MOBILEBERT_START_DOCSTRING,
)
class MobileBertModel(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = MobileBertEmbeddings(config)
self.encoder = MobileBertEncoder(config)
self.pooler = MobileBertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_hidden_states=None,
output_attentions=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, self.device
)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings(
"""MobileBert Model with two heads on top as done during the pre-training: a `masked language modeling` head and
a `next sentence prediction (classification)` head. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForPreTraining(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def tie_weights(self):
output_embeddings = self.get_output_embeddings()
input_embeddings = self.get_input_embeddings()
resized_dense = nn.Linear(
input_embeddings.num_embeddings, self.config.hidden_size - self.config.embedding_size, bias=False
)
kept_data = self.cls.predictions.dense.weight.data[
..., : min(self.cls.predictions.dense.weight.data.shape[1], resized_dense.weight.data.shape[1])
]
resized_dense.weight.data[..., : self.cls.predictions.dense.weight.data.shape[1]] = kept_data
self.cls.predictions.dense = resized_dense
self.cls.predictions.dense.to(self.device)
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
):
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[
2:
] # add hidden states and attention if they are here
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""MobileBert Model with a `language modeling` head on top. """, MOBILEBERT_START_DOCSTRING)
class MobileBertForMaskedLM(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertOnlyMLMHead(config)
self.config = config
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def tie_weights(self):
output_embeddings = self.get_output_embeddings()
input_embeddings = self.get_input_embeddings()
resized_dense = nn.Linear(
input_embeddings.num_embeddings, self.config.hidden_size - self.config.embedding_size, bias=False
)
kept_data = self.cls.predictions.dense.weight.data[
..., : min(self.cls.predictions.dense.weight.data.shape[1], resized_dense.weight.data.shape[1])
]
resized_dense.weight.data[..., : self.cls.predictions.dense.weight.data.shape[1]] = kept_data
self.cls.predictions.dense = resized_dense
self.cls.predictions.dense.to(self.device)
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
**kwargs
):
if "masked_lm_labels" in kwargs:
warnings.warn(
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("masked_lm_labels")
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
class MobileBertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
@add_start_docstrings(
"""MobileBert Model with a `next sentence prediction (classification)` head on top. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
):
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings(
"""MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
):
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""MobileBert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""MoibleBert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForTokenClassification(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
| true | true |
f7f42aa5cd7079bb3433259255e9267c54538edd | 3,300 | py | Python | fbpcp/service/mpc_game.py | zehuali/fbpcp | a543ffcdb64eaa20d9b6bcd870b453705cc22993 | [
"MIT"
] | null | null | null | fbpcp/service/mpc_game.py | zehuali/fbpcp | a543ffcdb64eaa20d9b6bcd870b453705cc22993 | [
"MIT"
] | null | null | null | fbpcp/service/mpc_game.py | zehuali/fbpcp | a543ffcdb64eaa20d9b6bcd870b453705cc22993 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import logging
from typing import Any, Dict, Optional, Tuple
from fbpcp.entity.mpc_game_config import MPCGameConfig
from fbpcp.entity.mpc_instance import MPCParty
from fbpcp.repository.mpc_game_repository import MPCGameRepository
from fbpcp.util.arg_builder import build_cmd_args
LIFT_GAME_NAME = "lift"
LIFT_AGGREGATOR_GAME_NAME = "aggregator"
class MPCGameService:
def __init__(self, mpc_game_repository: MPCGameRepository) -> None:
self.logger: logging.Logger = logging.getLogger(__name__)
self.mpc_game_repository: MPCGameRepository = mpc_game_repository
# returns package_name and cmd which includes only arguments (no executable)
def build_onedocker_args(
self,
game_name: str,
mpc_party: MPCParty,
server_ip: Optional[str] = None,
port: Optional[int] = None,
**kwargs: object,
) -> Tuple[str, str]:
mpc_game_config = self.mpc_game_repository.get_game(game_name)
return (
mpc_game_config.onedocker_package_name,
self._build_cmd(
mpc_game_config=mpc_game_config,
mpc_party=mpc_party,
server_ip=server_ip,
port=port,
**kwargs,
),
)
# returns cmd which includes only arguments (no executable)
def _build_cmd(
self,
mpc_game_config: MPCGameConfig,
mpc_party: MPCParty,
server_ip: Optional[str] = None,
port: Optional[int] = None,
**kwargs: object,
) -> str:
args = self._prepare_args(
mpc_game_config=mpc_game_config,
mpc_party=mpc_party,
server_ip=server_ip,
port=port,
**kwargs,
)
return build_cmd_args(**args)
def _prepare_args(
self,
mpc_game_config: MPCGameConfig,
mpc_party: MPCParty,
server_ip: Optional[str] = None,
port: Optional[int] = None,
**kwargs: object,
) -> Dict[str, Any]:
all_arguments: Dict[str, Any] = {}
# push MPC required arguments to dict all_arguments
all_arguments["party"] = 1 if mpc_party == MPCParty.SERVER else 2
if mpc_party == MPCParty.CLIENT:
if server_ip is None:
raise ValueError("Client must provide a server ip address.")
all_arguments["server_ip"] = server_ip
if port is not None:
all_arguments["port"] = port
# push game specific arguments to dict all_arguments
for argument in mpc_game_config.arguments:
key = argument.name
value = kwargs.get(key)
if value is None and argument.required:
# Have to make game_name a special case for PL-Worker
if key == "game_name":
all_arguments[key] = mpc_game_config.game_name
else:
raise ValueError(f"Missing required argument {key}!")
if value is not None:
all_arguments[key] = value
return all_arguments
| 33.333333 | 80 | 0.621818 |
import logging
from typing import Any, Dict, Optional, Tuple
from fbpcp.entity.mpc_game_config import MPCGameConfig
from fbpcp.entity.mpc_instance import MPCParty
from fbpcp.repository.mpc_game_repository import MPCGameRepository
from fbpcp.util.arg_builder import build_cmd_args
LIFT_GAME_NAME = "lift"
LIFT_AGGREGATOR_GAME_NAME = "aggregator"
class MPCGameService:
def __init__(self, mpc_game_repository: MPCGameRepository) -> None:
self.logger: logging.Logger = logging.getLogger(__name__)
self.mpc_game_repository: MPCGameRepository = mpc_game_repository
def build_onedocker_args(
self,
game_name: str,
mpc_party: MPCParty,
server_ip: Optional[str] = None,
port: Optional[int] = None,
**kwargs: object,
) -> Tuple[str, str]:
mpc_game_config = self.mpc_game_repository.get_game(game_name)
return (
mpc_game_config.onedocker_package_name,
self._build_cmd(
mpc_game_config=mpc_game_config,
mpc_party=mpc_party,
server_ip=server_ip,
port=port,
**kwargs,
),
)
def _build_cmd(
self,
mpc_game_config: MPCGameConfig,
mpc_party: MPCParty,
server_ip: Optional[str] = None,
port: Optional[int] = None,
**kwargs: object,
) -> str:
args = self._prepare_args(
mpc_game_config=mpc_game_config,
mpc_party=mpc_party,
server_ip=server_ip,
port=port,
**kwargs,
)
return build_cmd_args(**args)
def _prepare_args(
self,
mpc_game_config: MPCGameConfig,
mpc_party: MPCParty,
server_ip: Optional[str] = None,
port: Optional[int] = None,
**kwargs: object,
) -> Dict[str, Any]:
all_arguments: Dict[str, Any] = {}
all_arguments["party"] = 1 if mpc_party == MPCParty.SERVER else 2
if mpc_party == MPCParty.CLIENT:
if server_ip is None:
raise ValueError("Client must provide a server ip address.")
all_arguments["server_ip"] = server_ip
if port is not None:
all_arguments["port"] = port
for argument in mpc_game_config.arguments:
key = argument.name
value = kwargs.get(key)
if value is None and argument.required:
if key == "game_name":
all_arguments[key] = mpc_game_config.game_name
else:
raise ValueError(f"Missing required argument {key}!")
if value is not None:
all_arguments[key] = value
return all_arguments
| true | true |
f7f42ac48629705b065f589cb94395e189677c57 | 1,238 | py | Python | ensembling/make_submit.py | sergeyshilin/kaggle-statoil-iceberg-classifier-challenge | fa5c7e721297d9e1478593951b4d9cf16a0cd66d | [
"MIT"
] | 15 | 2018-01-24T04:48:38.000Z | 2020-08-11T18:53:08.000Z | ensembling/make_submit.py | sergeyshilin/kaggle-statoil-iceberg-classifier-challenge | fa5c7e721297d9e1478593951b4d9cf16a0cd66d | [
"MIT"
] | null | null | null | ensembling/make_submit.py | sergeyshilin/kaggle-statoil-iceberg-classifier-challenge | fa5c7e721297d9e1478593951b4d9cf16a0cd66d | [
"MIT"
] | 7 | 2018-01-24T03:58:08.000Z | 2021-02-09T14:56:58.000Z | import sys
import pandas as pd
import numpy as np
from sklearn.metrics import log_loss
power = float(sys.argv[1])
def transform(preds):
return preds ** power / (preds ** power + (1.0 - preds) ** power)
with open('submit_id', 'r') as submit_id:
last_submit_id = int(submit_id.read())
last_submit_id = str(last_submit_id).zfill(3)
ensemble = pd.read_csv('ensembles/ensemble_{}.csv'.format(last_submit_id))
ensemble_cv = pd.read_csv('ensembles_cv/ensemble_cv_{}.csv'.format(last_submit_id))
y_cv = ensemble_cv.is_iceberg
x_cv = ensemble_cv.drop('is_iceberg', axis=1).values.mean(axis=1)
print ('cv log_loss before: {}'.format(log_loss(y_cv, x_cv)))
x_cv_calib = transform(x_cv)
print ('cv log_loss calibration: {}'.format(log_loss(y_cv, x_cv_calib)))
x_cv_clip = np.clip(x_cv, 0.001, 0.999)
print ('cv log_loss clip: {}'.format(log_loss(y_cv, x_cv_clip)))
x_cv_calib_clip = np.clip(transform(x_cv), 0.001, 0.999)
print ('cv log_loss calib+clip: {}'.format(log_loss(y_cv, x_cv_calib_clip)))
submit = pd.read_csv('../data/sample_submission.csv')
submit.is_iceberg = np.clip(transform(ensemble.values.mean(axis=1)), 0.001, 0.999)
submit.to_csv('submits/submission_{}_calib_clip_1_4.csv'.format(last_submit_id), index=False)
| 33.459459 | 93 | 0.738288 | import sys
import pandas as pd
import numpy as np
from sklearn.metrics import log_loss
power = float(sys.argv[1])
def transform(preds):
return preds ** power / (preds ** power + (1.0 - preds) ** power)
with open('submit_id', 'r') as submit_id:
last_submit_id = int(submit_id.read())
last_submit_id = str(last_submit_id).zfill(3)
ensemble = pd.read_csv('ensembles/ensemble_{}.csv'.format(last_submit_id))
ensemble_cv = pd.read_csv('ensembles_cv/ensemble_cv_{}.csv'.format(last_submit_id))
y_cv = ensemble_cv.is_iceberg
x_cv = ensemble_cv.drop('is_iceberg', axis=1).values.mean(axis=1)
print ('cv log_loss before: {}'.format(log_loss(y_cv, x_cv)))
x_cv_calib = transform(x_cv)
print ('cv log_loss calibration: {}'.format(log_loss(y_cv, x_cv_calib)))
x_cv_clip = np.clip(x_cv, 0.001, 0.999)
print ('cv log_loss clip: {}'.format(log_loss(y_cv, x_cv_clip)))
x_cv_calib_clip = np.clip(transform(x_cv), 0.001, 0.999)
print ('cv log_loss calib+clip: {}'.format(log_loss(y_cv, x_cv_calib_clip)))
submit = pd.read_csv('../data/sample_submission.csv')
submit.is_iceberg = np.clip(transform(ensemble.values.mean(axis=1)), 0.001, 0.999)
submit.to_csv('submits/submission_{}_calib_clip_1_4.csv'.format(last_submit_id), index=False)
| true | true |
f7f42b1c16c4d37dcb80e47143ca6327a15119e7 | 12,129 | py | Python | tools/benchmark_tool/openvino/tools/benchmark/parameters.py | chccc1994/openvino | 41f7893ae81d186d15c1754b179bf32a66d03bcf | [
"Apache-2.0"
] | 1 | 2020-11-19T15:53:18.000Z | 2020-11-19T15:53:18.000Z | tools/benchmark_tool/openvino/tools/benchmark/parameters.py | chccc1994/openvino | 41f7893ae81d186d15c1754b179bf32a66d03bcf | [
"Apache-2.0"
] | 44 | 2020-12-09T12:38:22.000Z | 2022-03-28T13:18:29.000Z | tools/benchmark_tool/openvino/tools/benchmark/parameters.py | PoliOwl/openvino | 27fa64031b4f46fdb05ae64ca0f425072808cdd7 | [
"Apache-2.0"
] | 4 | 2021-09-29T20:44:49.000Z | 2021-10-20T13:02:12.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import sys,argparse
from fnmatch import fnmatch
from openvino.tools.benchmark.utils.utils import show_available_devices
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
return ivalue
class print_help(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
show_available_devices()
sys.exit()
def parse_args():
parser = argparse.ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action=print_help, nargs='?', default=argparse.SUPPRESS,
help='Show this help message and exit.')
args.add_argument('-i', '--paths_to_input', action='append', nargs='+', type=str, required=False,
help='Optional. '
'Path to a folder with images and/or binaries or to specific image or binary file.'
'It is also allowed to map files to network inputs: '
'input_1:file_1,file_2,input_4:file_4 input_2:file_3')
args.add_argument('-m', '--path_to_model', type=str, required=True,
help='Required. Path to an .xml/.onnx file with a trained model or '
'to a .blob file with a trained compiled model.')
args.add_argument('-d', '--target_device', type=str, required=False, default='CPU',
help='Optional. Specify a target device to infer on (the list of available devices is shown below). '
'Default value is CPU. Use \'-d HETERO:<comma separated devices list>\' format to specify HETERO plugin. '
'Use \'-d MULTI:<comma separated devices list>\' format to specify MULTI plugin. '
'The application looks for a suitable plugin for the specified device.')
args.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
help='Optional. Required for CPU custom layers. '
'Absolute path to a shared library with the kernels implementations.')
args.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
help='Optional. Required for GPU custom kernels. Absolute path to an .xml file with the '
'kernels description.')
args.add_argument('-hint', '--perf_hint', type=str, required=False, default='', choices=['throughput', 'latency'],
help='Optional. Performance hint (optimize for latency or throughput). '
'The hint allows the OpenVINO device to select the right network-specific settings, '
'as opposite to accepting specific values like \'nstreams\' from the command line. '
'So you can specify just the hint without adding explicit device-specific options')
args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
help='Optional. Enable using sync/async API. Default value is async.')
args.add_argument('-niter', '--number_iterations', type=check_positive, required=False, default=None,
help='Optional. Number of iterations. '
'If not specified, the number of iterations is calculated depending on a device.')
args.add_argument('-nireq', '--number_infer_requests', type=check_positive, required=False, default=None,
help='Optional. Number of infer requests. Default value is determined automatically for device.')
args.add_argument('-b', '--batch_size', type=int, required=False, default=0,
help='Optional. ' +
'Batch size value. ' +
'If not specified, the batch size value is determined from Intermediate Representation')
args.add_argument('-stream_output', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. '
'Print progress as a plain text. '
'When specified, an interactive progress bar is replaced with a multi-line output.')
args.add_argument('-t', '--time', type=int, required=False, default=None,
help='Optional. Time in seconds to execute topology.')
args.add_argument('-progress', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. '
'Show progress bar (can affect performance measurement). Default values is \'False\'.')
args.add_argument('-shape', type=str, required=False, default='',
help='Optional. '
'Set shape for input. For example, "input1[1,3,224,224],input2[1,4]" or "[1,3,224,224]" in case of one input size.')
args.add_argument('-layout', type=str, required=False, default='',
help='Optional. '
'Prompts how network layouts should be treated by application. '
'For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input size.')
args.add_argument('-nstreams', '--number_streams', type=str, required=False, default=None,
help='Optional. Number of streams to use for inference on the CPU/GPU/MYRIAD '
'(for HETERO and MULTI device cases use format <device1>:<nstreams1>,<device2>:<nstreams2> '
'or just <nstreams>). '
'Default value is determined automatically for a device. Please note that although the automatic selection '
'usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very small networks. '
'Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency '
'estimations the number of streams should be set to 1. '
'See samples README for more details.')
args.add_argument('--latency_percentile', type=int, required=False, default=50, choices=range(1,101),
help='Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value is 50 (median).')
args.add_argument('-enforcebf16', '--enforce_bfloat16', type=str2bool, required=False, default=False, nargs='?', const=True, choices=[True, False],
help='Optional. By default floating point operations execution in bfloat16 precision are enforced if supported by platform. '
'\'true\' - enable bfloat16 regardless of platform support. '
'\'false\' - disable bfloat16 regardless of platform support.')
args.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
help='Number of threads to use for inference on the CPU, GNA '
'(including HETERO and MULTI cases).')
args.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, choices=['YES', 'NO', 'NUMA', 'HYBRID_AWARE'],
help='Optional. Enable threads->cores (\'YES\' which is OpenVINO runtime\'s default for conventional CPUs), '
'threads->(NUMA)nodes (\'NUMA\'), '
'threads->appropriate core types (\'HYBRID_AWARE\', which is OpenVINO runtime\'s default for Hybrid CPUs) '
'or completely disable (\'NO\') '
'CPU threads pinning for CPU-involved inference.')
args.add_argument('-exec_graph_path', '--exec_graph_path', type=str, required=False,
help='Optional. Path to a file where to store executable graph information serialized.')
args.add_argument('-pc', '--perf_counts', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. Report performance counters.', )
args.add_argument('-report_type', '--report_type', type=str, required=False,
choices=['no_counters', 'average_counters', 'detailed_counters'],
help="Optional. Enable collecting statistics report. \"no_counters\" report contains "
"configuration options specified, resulting FPS and latency. \"average_counters\" "
"report extends \"no_counters\" report and additionally includes average PM "
"counters values for each layer from the network. \"detailed_counters\" report "
"extends \"average_counters\" report and additionally includes per-layer PM "
"counters and latency for each executed infer request.")
args.add_argument('-report_folder', '--report_folder', type=str, required=False, default='',
help="Optional. Path to a folder where statistics report is stored.")
args.add_argument('-dump_config', type=str, required=False, default='',
help="Optional. Path to JSON file to dump IE parameters, which were set by application.")
args.add_argument('-load_config', type=str, required=False, default='',
help="Optional. Path to JSON file to load custom IE parameters."
" Please note, command line parameters have higher priority then parameters from configuration file.")
args.add_argument('-qb', '--quantization_bits', type=int, required=False, default=None, choices=[8, 16],
help="Optional. Weight bits for quantization: 8 (I8) or 16 (I16) ")
args.add_argument('-ip', '--input_precision', type=str, required=False, choices=['U8', 'FP16', 'FP32'],
help='Optional. Specifies precision for all input layers of the network.')
args.add_argument('-op', '--output_precision', type=str, required=False, choices=['U8', 'FP16', 'FP32'],
help='Optional. Specifies precision for all output layers of the network.')
args.add_argument('-iop', '--input_output_precision', type=str, required=False,
help='Optional. Specifies precision for input and output layers by name. Example: -iop "input:FP16, output:FP16". Notice that quotes are required. Overwrites precision from ip and op options for specified layers.')
args.add_argument('-cdir', '--cache_dir', type=str, required=False, default='',
help="Optional. Enable model caching to specified directory")
args.add_argument('-lfile', '--load_from_file', required=False, nargs='?', default=argparse.SUPPRESS,
help="Optional. Loads model from file directly without read_network.")
args.add_argument('-iscale', '--input_scale', type=str, required=False, default='',
help="Optional. Scale values to be used for the input image per channel.\n Values to be provided in the [R, G, B] format. Can be defined for desired input of the model.\n"
"Example: -iscale data[255,255,255],info[255,255,255]\n")
args.add_argument('-imean', '--input_mean', type=str, required=False, default='',
help="Optional. Mean values to be used for the input image per channel.\n Values to be provided in the [R, G, B] format. Can be defined for desired input of the model.\n"
"Example: -imean data[255,255,255],info[255,255,255]\n")
parsed_args = parser.parse_args()
return parsed_args
| 81.402685 | 236 | 0.615055 |
import sys,argparse
from fnmatch import fnmatch
from openvino.tools.benchmark.utils.utils import show_available_devices
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
return ivalue
class print_help(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
show_available_devices()
sys.exit()
def parse_args():
parser = argparse.ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action=print_help, nargs='?', default=argparse.SUPPRESS,
help='Show this help message and exit.')
args.add_argument('-i', '--paths_to_input', action='append', nargs='+', type=str, required=False,
help='Optional. '
'Path to a folder with images and/or binaries or to specific image or binary file.'
'It is also allowed to map files to network inputs: '
'input_1:file_1,file_2,input_4:file_4 input_2:file_3')
args.add_argument('-m', '--path_to_model', type=str, required=True,
help='Required. Path to an .xml/.onnx file with a trained model or '
'to a .blob file with a trained compiled model.')
args.add_argument('-d', '--target_device', type=str, required=False, default='CPU',
help='Optional. Specify a target device to infer on (the list of available devices is shown below). '
'Default value is CPU. Use \'-d HETERO:<comma separated devices list>\' format to specify HETERO plugin. '
'Use \'-d MULTI:<comma separated devices list>\' format to specify MULTI plugin. '
'The application looks for a suitable plugin for the specified device.')
args.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
help='Optional. Required for CPU custom layers. '
'Absolute path to a shared library with the kernels implementations.')
args.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
help='Optional. Required for GPU custom kernels. Absolute path to an .xml file with the '
'kernels description.')
args.add_argument('-hint', '--perf_hint', type=str, required=False, default='', choices=['throughput', 'latency'],
help='Optional. Performance hint (optimize for latency or throughput). '
'The hint allows the OpenVINO device to select the right network-specific settings, '
'as opposite to accepting specific values like \'nstreams\' from the command line. '
'So you can specify just the hint without adding explicit device-specific options')
args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
help='Optional. Enable using sync/async API. Default value is async.')
args.add_argument('-niter', '--number_iterations', type=check_positive, required=False, default=None,
help='Optional. Number of iterations. '
'If not specified, the number of iterations is calculated depending on a device.')
args.add_argument('-nireq', '--number_infer_requests', type=check_positive, required=False, default=None,
help='Optional. Number of infer requests. Default value is determined automatically for device.')
args.add_argument('-b', '--batch_size', type=int, required=False, default=0,
help='Optional. ' +
'Batch size value. ' +
'If not specified, the batch size value is determined from Intermediate Representation')
args.add_argument('-stream_output', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. '
'Print progress as a plain text. '
'When specified, an interactive progress bar is replaced with a multi-line output.')
args.add_argument('-t', '--time', type=int, required=False, default=None,
help='Optional. Time in seconds to execute topology.')
args.add_argument('-progress', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. '
'Show progress bar (can affect performance measurement). Default values is \'False\'.')
args.add_argument('-shape', type=str, required=False, default='',
help='Optional. '
'Set shape for input. For example, "input1[1,3,224,224],input2[1,4]" or "[1,3,224,224]" in case of one input size.')
args.add_argument('-layout', type=str, required=False, default='',
help='Optional. '
'Prompts how network layouts should be treated by application. '
'For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input size.')
args.add_argument('-nstreams', '--number_streams', type=str, required=False, default=None,
help='Optional. Number of streams to use for inference on the CPU/GPU/MYRIAD '
'(for HETERO and MULTI device cases use format <device1>:<nstreams1>,<device2>:<nstreams2> '
'or just <nstreams>). '
'Default value is determined automatically for a device. Please note that although the automatic selection '
'usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very small networks. '
'Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency '
'estimations the number of streams should be set to 1. '
'See samples README for more details.')
args.add_argument('--latency_percentile', type=int, required=False, default=50, choices=range(1,101),
help='Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value is 50 (median).')
args.add_argument('-enforcebf16', '--enforce_bfloat16', type=str2bool, required=False, default=False, nargs='?', const=True, choices=[True, False],
help='Optional. By default floating point operations execution in bfloat16 precision are enforced if supported by platform. '
'\'true\' - enable bfloat16 regardless of platform support. '
'\'false\' - disable bfloat16 regardless of platform support.')
args.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
help='Number of threads to use for inference on the CPU, GNA '
'(including HETERO and MULTI cases).')
args.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, choices=['YES', 'NO', 'NUMA', 'HYBRID_AWARE'],
help='Optional. Enable threads->cores (\'YES\' which is OpenVINO runtime\'s default for conventional CPUs), '
'threads->(NUMA)nodes (\'NUMA\'), '
'threads->appropriate core types (\'HYBRID_AWARE\', which is OpenVINO runtime\'s default for Hybrid CPUs) '
'or completely disable (\'NO\') '
'CPU threads pinning for CPU-involved inference.')
args.add_argument('-exec_graph_path', '--exec_graph_path', type=str, required=False,
help='Optional. Path to a file where to store executable graph information serialized.')
args.add_argument('-pc', '--perf_counts', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. Report performance counters.', )
args.add_argument('-report_type', '--report_type', type=str, required=False,
choices=['no_counters', 'average_counters', 'detailed_counters'],
help="Optional. Enable collecting statistics report. \"no_counters\" report contains "
"configuration options specified, resulting FPS and latency. \"average_counters\" "
"report extends \"no_counters\" report and additionally includes average PM "
"counters values for each layer from the network. \"detailed_counters\" report "
"extends \"average_counters\" report and additionally includes per-layer PM "
"counters and latency for each executed infer request.")
args.add_argument('-report_folder', '--report_folder', type=str, required=False, default='',
help="Optional. Path to a folder where statistics report is stored.")
args.add_argument('-dump_config', type=str, required=False, default='',
help="Optional. Path to JSON file to dump IE parameters, which were set by application.")
args.add_argument('-load_config', type=str, required=False, default='',
help="Optional. Path to JSON file to load custom IE parameters."
" Please note, command line parameters have higher priority then parameters from configuration file.")
args.add_argument('-qb', '--quantization_bits', type=int, required=False, default=None, choices=[8, 16],
help="Optional. Weight bits for quantization: 8 (I8) or 16 (I16) ")
args.add_argument('-ip', '--input_precision', type=str, required=False, choices=['U8', 'FP16', 'FP32'],
help='Optional. Specifies precision for all input layers of the network.')
args.add_argument('-op', '--output_precision', type=str, required=False, choices=['U8', 'FP16', 'FP32'],
help='Optional. Specifies precision for all output layers of the network.')
args.add_argument('-iop', '--input_output_precision', type=str, required=False,
help='Optional. Specifies precision for input and output layers by name. Example: -iop "input:FP16, output:FP16". Notice that quotes are required. Overwrites precision from ip and op options for specified layers.')
args.add_argument('-cdir', '--cache_dir', type=str, required=False, default='',
help="Optional. Enable model caching to specified directory")
args.add_argument('-lfile', '--load_from_file', required=False, nargs='?', default=argparse.SUPPRESS,
help="Optional. Loads model from file directly without read_network.")
args.add_argument('-iscale', '--input_scale', type=str, required=False, default='',
help="Optional. Scale values to be used for the input image per channel.\n Values to be provided in the [R, G, B] format. Can be defined for desired input of the model.\n"
"Example: -iscale data[255,255,255],info[255,255,255]\n")
args.add_argument('-imean', '--input_mean', type=str, required=False, default='',
help="Optional. Mean values to be used for the input image per channel.\n Values to be provided in the [R, G, B] format. Can be defined for desired input of the model.\n"
"Example: -imean data[255,255,255],info[255,255,255]\n")
parsed_args = parser.parse_args()
return parsed_args
| true | true |
f7f42cbd6cfe5e9e47ec198146eb106e73af5d56 | 4,252 | py | Python | descriptastorus/cli/storus.py | sharmavaruns/descriptastorus | 7a3e457bc64e480e44f0ce624052da68d2a27bad | [
"BSD-3-Clause"
] | 118 | 2019-01-15T23:04:29.000Z | 2022-03-25T01:31:06.000Z | descriptastorus/cli/storus.py | sharmavaruns/descriptastorus | 7a3e457bc64e480e44f0ce624052da68d2a27bad | [
"BSD-3-Clause"
] | 14 | 2019-02-15T17:15:46.000Z | 2022-03-10T14:18:56.000Z | descriptastorus/cli/storus.py | sharmavaruns/descriptastorus | 7a3e457bc64e480e44f0ce624052da68d2a27bad | [
"BSD-3-Clause"
] | 40 | 2018-12-18T11:39:38.000Z | 2022-03-23T09:45:45.000Z | # Copyright (c) 2018, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
from descriptastorus import append_store, make_store
import argparse, logging, os, shutil, time
import sys
from rdkit import rdBase
rdBase.DisableLog("rdApp.*")
parser = argparse.ArgumentParser()
parser.add_argument("smilesfile",
help="file containing smiles strings")
parser.add_argument("storage",
help="directory in which to store the descriptors")
parser.add_argument("--append", action="store_true",
help="Append new compounds to the smiles file (rejecting compounds with the same name)")
parser.add_argument("--append-store", action="store_true",
help="Append specified storage to the")
parser.add_argument("--descriptors", default="Morgan3Counts,RDKit2D")
parser.add_argument("--hasHeader", action="store_true",
help="Indicate whether the smiles file has a header row")
parser.add_argument("--index-inchikey", action="store_true",
help="Optionally index the descriptors with inchi keys")
#parser.add_argument("--index-smiles", action="store_true",
# help="Indicate whether the smiles file has a header column")
parser.add_argument("--smilesColumn", default=0,
help="Row index (or header name if the file has a header) for the smiles column")
parser.add_argument("--nameColumn", default=None,
help="Row index (or header name if the file has a header) for the name column")
parser.add_argument("--seperator", default="\t",
help="Row index (or header name if the file has a header) for the name column")
parser.add_argument("--batchsize", default=1000, type=int,
help="Batchsize for chunking up the data for processing")
parser.add_argument("--numprocs", default=-1, type=int,
help="Number of processers to use (-1 means use all available")
parser.add_argument("--verbose", action="store_true",
help="Verbose logging")
def main():
opts = parser.parse_args()
if opts.verbose:
logging.getLogger().setLevel(logging.INFO)
if opts.append and opts.append_store:
logging.error("Use one of --append --append-store")
if opts.append:
append_store.append_smiles(append_store.AppendStorageOptions(**vars(opts)))
elif opts.append_store:
append_store.append_store(append_store.AppendStorageOptions(**vars(opts)))
else:
d = vars(opts)
del d['append']
make_store.make_store(make_store.MakeStorageOptions(**d))
| 45.72043 | 108 | 0.706256 |
from __future__ import print_function
from descriptastorus import append_store, make_store
import argparse, logging, os, shutil, time
import sys
from rdkit import rdBase
rdBase.DisableLog("rdApp.*")
parser = argparse.ArgumentParser()
parser.add_argument("smilesfile",
help="file containing smiles strings")
parser.add_argument("storage",
help="directory in which to store the descriptors")
parser.add_argument("--append", action="store_true",
help="Append new compounds to the smiles file (rejecting compounds with the same name)")
parser.add_argument("--append-store", action="store_true",
help="Append specified storage to the")
parser.add_argument("--descriptors", default="Morgan3Counts,RDKit2D")
parser.add_argument("--hasHeader", action="store_true",
help="Indicate whether the smiles file has a header row")
parser.add_argument("--index-inchikey", action="store_true",
help="Optionally index the descriptors with inchi keys")
parser.add_argument("--smilesColumn", default=0,
help="Row index (or header name if the file has a header) for the smiles column")
parser.add_argument("--nameColumn", default=None,
help="Row index (or header name if the file has a header) for the name column")
parser.add_argument("--seperator", default="\t",
help="Row index (or header name if the file has a header) for the name column")
parser.add_argument("--batchsize", default=1000, type=int,
help="Batchsize for chunking up the data for processing")
parser.add_argument("--numprocs", default=-1, type=int,
help="Number of processers to use (-1 means use all available")
parser.add_argument("--verbose", action="store_true",
help="Verbose logging")
def main():
opts = parser.parse_args()
if opts.verbose:
logging.getLogger().setLevel(logging.INFO)
if opts.append and opts.append_store:
logging.error("Use one of --append --append-store")
if opts.append:
append_store.append_smiles(append_store.AppendStorageOptions(**vars(opts)))
elif opts.append_store:
append_store.append_store(append_store.AppendStorageOptions(**vars(opts)))
else:
d = vars(opts)
del d['append']
make_store.make_store(make_store.MakeStorageOptions(**d))
| true | true |
f7f42d0aa0279672f3a0885035d959ce97fd03eb | 2,278 | py | Python | main/hw2_1_threads.py | v-v-d/Python_client-server_apps | 5741c92dc5324ae8af2c7102d95f63c57e71b4c7 | [
"MIT"
] | null | null | null | main/hw2_1_threads.py | v-v-d/Python_client-server_apps | 5741c92dc5324ae8af2c7102d95f63c57e71b4c7 | [
"MIT"
] | null | null | null | main/hw2_1_threads.py | v-v-d/Python_client-server_apps | 5741c92dc5324ae8af2c7102d95f63c57e71b4c7 | [
"MIT"
] | 1 | 2020-02-27T08:08:26.000Z | 2020-02-27T08:08:26.000Z | import subprocess
import threading
import ipaddress
from tabulate import tabulate
class ParallelHostRangePing:
def __init__(self, from_ip_addr, to_ip_addr):
self.from_ip_addr = from_ip_addr
self.to_ip_addr = to_ip_addr
self._ping_status_list = []
self._ping_threads = []
def is_ip_addrs_valid(self):
valid_types = (ipaddress.IPv4Address, ipaddress.IPv6Address)
for ip_addr in (self.from_ip_addr, self.to_ip_addr):
if type(ip_addr) not in valid_types:
return False
return True
def _add_to_ping_status_list(self, ping_status):
if ping_status not in self._ping_status_list:
self._ping_status_list.append(ping_status)
def _host_ping(self, ip_addr):
is_unreachable = subprocess.call(['ping', str(ip_addr)], shell=True)
ping_status = {'Unreachable': ip_addr} if is_unreachable else {'Reachable': ip_addr}
self._add_to_ping_status_list(ping_status)
def _parallel_ping(self, ip_addrs):
for idx, ip_addr in enumerate(ip_addrs):
ping_thread = threading.Thread(target=self._host_ping, args=(ip_addr,), name=f'ping_thread_{idx}')
ping_thread.start()
self._ping_threads.append(ping_thread)
def _get_valid_ip_addrs_positions(self):
if self.to_ip_addr < self.from_ip_addr:
self.from_ip_addr, self.to_ip_addr = self.to_ip_addr, self.from_ip_addr
def get_ip_addrs_list(self):
if self.is_ip_addrs_valid():
self._get_valid_ip_addrs_positions()
return [self.from_ip_addr + i for i in range(int(self.to_ip_addr) - int(self.from_ip_addr) + 1)]
def _host_range_ping(self):
self._parallel_ping(self.get_ip_addrs_list())
def _wait_for_threads_complete(self):
for ping_thread in self._ping_threads:
ping_thread.join()
def print_host_range_ping_tab(self):
self._host_range_ping()
self._wait_for_threads_complete()
print(tabulate(self._ping_status_list, headers='keys'))
if __name__ == '__main__':
ip_1 = ipaddress.ip_address('10.0.0.5')
ip_2 = ipaddress.ip_address('10.0.0.1')
ping_tab = ParallelHostRangePing(ip_1, ip_2)
ping_tab.print_host_range_ping_tab()
| 35.59375 | 110 | 0.68964 | import subprocess
import threading
import ipaddress
from tabulate import tabulate
class ParallelHostRangePing:
def __init__(self, from_ip_addr, to_ip_addr):
self.from_ip_addr = from_ip_addr
self.to_ip_addr = to_ip_addr
self._ping_status_list = []
self._ping_threads = []
def is_ip_addrs_valid(self):
valid_types = (ipaddress.IPv4Address, ipaddress.IPv6Address)
for ip_addr in (self.from_ip_addr, self.to_ip_addr):
if type(ip_addr) not in valid_types:
return False
return True
def _add_to_ping_status_list(self, ping_status):
if ping_status not in self._ping_status_list:
self._ping_status_list.append(ping_status)
def _host_ping(self, ip_addr):
is_unreachable = subprocess.call(['ping', str(ip_addr)], shell=True)
ping_status = {'Unreachable': ip_addr} if is_unreachable else {'Reachable': ip_addr}
self._add_to_ping_status_list(ping_status)
def _parallel_ping(self, ip_addrs):
for idx, ip_addr in enumerate(ip_addrs):
ping_thread = threading.Thread(target=self._host_ping, args=(ip_addr,), name=f'ping_thread_{idx}')
ping_thread.start()
self._ping_threads.append(ping_thread)
def _get_valid_ip_addrs_positions(self):
if self.to_ip_addr < self.from_ip_addr:
self.from_ip_addr, self.to_ip_addr = self.to_ip_addr, self.from_ip_addr
def get_ip_addrs_list(self):
if self.is_ip_addrs_valid():
self._get_valid_ip_addrs_positions()
return [self.from_ip_addr + i for i in range(int(self.to_ip_addr) - int(self.from_ip_addr) + 1)]
def _host_range_ping(self):
self._parallel_ping(self.get_ip_addrs_list())
def _wait_for_threads_complete(self):
for ping_thread in self._ping_threads:
ping_thread.join()
def print_host_range_ping_tab(self):
self._host_range_ping()
self._wait_for_threads_complete()
print(tabulate(self._ping_status_list, headers='keys'))
if __name__ == '__main__':
ip_1 = ipaddress.ip_address('10.0.0.5')
ip_2 = ipaddress.ip_address('10.0.0.1')
ping_tab = ParallelHostRangePing(ip_1, ip_2)
ping_tab.print_host_range_ping_tab()
| true | true |
f7f42db9b9ea51f742fe8a5eb21c857d1e024694 | 84 | py | Python | app/apps.py | impressai/cloudfront-django-setup | 0758fddc6986e999421ea4cb7713ce257cacad26 | [
"MIT"
] | 1 | 2020-09-18T16:31:57.000Z | 2020-09-18T16:31:57.000Z | app/apps.py | impressai/cloudfront-django-setup | 0758fddc6986e999421ea4cb7713ce257cacad26 | [
"MIT"
] | 10 | 2019-12-05T04:34:23.000Z | 2022-03-11T23:56:27.000Z | app/apps.py | impressai/cloudfront-django-setup | 0758fddc6986e999421ea4cb7713ce257cacad26 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class ApiAppConfig(AppConfig):
name = 'app'
| 14 | 33 | 0.738095 | from django.apps import AppConfig
class ApiAppConfig(AppConfig):
name = 'app'
| true | true |
f7f42e708e954a7c2ac87a4c357b059c27f8fd08 | 618 | py | Python | rfcomm_client_led.py | chenphilip888/rpi3b-bluetooth | 6b149b14f630092ca0abfbb99034b82fafa3a44e | [
"MIT"
] | null | null | null | rfcomm_client_led.py | chenphilip888/rpi3b-bluetooth | 6b149b14f630092ca0abfbb99034b82fafa3a44e | [
"MIT"
] | null | null | null | rfcomm_client_led.py | chenphilip888/rpi3b-bluetooth | 6b149b14f630092ca0abfbb99034b82fafa3a44e | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import bluetooth
import sys
import time
uuid = "94f39d29-7d6d-437d-973b-fba39e49d4ee"
service_matches = bluetooth.find_service( uuid = uuid )
if len(service_matches) == 0:
print("couldn't find the FooBar service")
sys.exit(0)
first_match = service_matches[0]
port = first_match["port"]
name = first_match["name"]
host = first_match["host"]
sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
sock.connect((host, port))
for i in range( 5 ):
sock.send("1")
print(sock.recv(1024))
time.sleep( 1 )
sock.send("0")
print(sock.recv(1024))
time.sleep( 1 )
sock.close()
| 19.935484 | 55 | 0.690939 |
import bluetooth
import sys
import time
uuid = "94f39d29-7d6d-437d-973b-fba39e49d4ee"
service_matches = bluetooth.find_service( uuid = uuid )
if len(service_matches) == 0:
print("couldn't find the FooBar service")
sys.exit(0)
first_match = service_matches[0]
port = first_match["port"]
name = first_match["name"]
host = first_match["host"]
sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
sock.connect((host, port))
for i in range( 5 ):
sock.send("1")
print(sock.recv(1024))
time.sleep( 1 )
sock.send("0")
print(sock.recv(1024))
time.sleep( 1 )
sock.close()
| true | true |
f7f42f0ea5da15bfbe7816400c336946ad55b119 | 3,153 | py | Python | validation/new/bare_model.py | FoVNull/MFDSL | 8c6fc99260c1c02f4f45cfb14a111028d2a96ded | [
"MIT"
] | 2 | 2021-12-08T05:45:19.000Z | 2022-03-04T01:00:32.000Z | validation/new/bare_model.py | FoVNull/MFDSL | 8c6fc99260c1c02f4f45cfb14a111028d2a96ded | [
"MIT"
] | null | null | null | validation/new/bare_model.py | FoVNull/MFDSL | 8c6fc99260c1c02f4f45cfb14a111028d2a96ded | [
"MIT"
] | null | null | null | from typing import Dict, Any
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from kashgari_local.abc_feature_model import ABCClassificationModel
from kashgari.layers import L
class Bare_Model(ABCClassificationModel):
def __init__(self, embedding, **params):
super().__init__(embedding)
self.feature_D = params["feature_D"]
@classmethod
def default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
activation_function list:
{softmax, elu, selu, softplus, softsign, swish,
relu, gelu, tanh, sigmoid, exponential,
hard_sigmoid, linear, serialize, deserialize, get}
"""
return {
'layer_bilstm1': {
'units': 128,
'return_sequences': True
},
'layer_time_distributed': {},
'conv_layer1': {
'filters': 128,
'kernel_size': 4,
'padding': 'valid',
'activation': 'relu'
},
'layer_output1': {
'activation': 'softmax'
},
}
def build_model_arc(self):
"""
build model architectural
BiLSTM + Convolution + Attention
"""
features = tf.keras.Input(shape=(None, self.feature_D), name="features")
l1_reg = tf.keras.regularizers.l1(0.01)
l2_reg = tf.keras.regularizers.L2(0.01)
output_dim = self.label_processor.vocab_size
config = self.hyper_parameters
embed_model = self.embedding.embed_model
# Define layers for BiLSTM
layer_stack = [
L.Bidirectional(L.LSTM(**config['layer_bilstm1'])),
L.Dropout(rate=0.2),
]
# tensor flow in Layers {tensor:=layer(tensor)}
tensor = embed_model.output
for layer in layer_stack:
tensor = layer(tensor)
# extend features
features_tensor = L.Dense(64, kernel_regularizer=l1_reg)(features)
# tensor = L.Concatenate(axis=-1)([features_tensor, tensor])
query = L.Concatenate(axis=-1)([tensor, features_tensor])
key = L.Concatenate(axis=-1)([features_tensor, tensor])
query_value_attention_seq = L.Attention()([query, key])
# query_value_attention_seq = L.MultiHeadAttention(
# num_heads=4, key_dim=2, dropout=0.5
# )(tensor, tensor)
query_encoding = L.GlobalMaxPool1D()(query)
query_value_attention = L.GlobalMaxPool1D()(query_value_attention_seq)
input_tensor = L.Concatenate(axis=1)([query_encoding, query_value_attention])
# output tensor
input_tensor = L.Dropout(rate=0.1)(input_tensor)
output_tensor = L.Dense(
output_dim, activation='sigmoid', name="output0",
kernel_regularizer=l2_reg
)(input_tensor)
self.tf_model = tf.keras.Model(inputs=[embed_model.inputs, features], outputs=output_tensor)
# plot_model(self.tf_model, to_file="D:/PycProject/TripleC/reference/model.png")
| 33.903226 | 100 | 0.607358 | from typing import Dict, Any
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from kashgari_local.abc_feature_model import ABCClassificationModel
from kashgari.layers import L
class Bare_Model(ABCClassificationModel):
def __init__(self, embedding, **params):
super().__init__(embedding)
self.feature_D = params["feature_D"]
@classmethod
def default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bilstm1': {
'units': 128,
'return_sequences': True
},
'layer_time_distributed': {},
'conv_layer1': {
'filters': 128,
'kernel_size': 4,
'padding': 'valid',
'activation': 'relu'
},
'layer_output1': {
'activation': 'softmax'
},
}
def build_model_arc(self):
features = tf.keras.Input(shape=(None, self.feature_D), name="features")
l1_reg = tf.keras.regularizers.l1(0.01)
l2_reg = tf.keras.regularizers.L2(0.01)
output_dim = self.label_processor.vocab_size
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_stack = [
L.Bidirectional(L.LSTM(**config['layer_bilstm1'])),
L.Dropout(rate=0.2),
]
tensor = embed_model.output
for layer in layer_stack:
tensor = layer(tensor)
features_tensor = L.Dense(64, kernel_regularizer=l1_reg)(features)
query = L.Concatenate(axis=-1)([tensor, features_tensor])
key = L.Concatenate(axis=-1)([features_tensor, tensor])
query_value_attention_seq = L.Attention()([query, key])
query_encoding = L.GlobalMaxPool1D()(query)
query_value_attention = L.GlobalMaxPool1D()(query_value_attention_seq)
input_tensor = L.Concatenate(axis=1)([query_encoding, query_value_attention])
input_tensor = L.Dropout(rate=0.1)(input_tensor)
output_tensor = L.Dense(
output_dim, activation='sigmoid', name="output0",
kernel_regularizer=l2_reg
)(input_tensor)
self.tf_model = tf.keras.Model(inputs=[embed_model.inputs, features], outputs=output_tensor)
| true | true |
f7f42f4357cda576a0f16c469b9be8109b7b75bf | 1,790 | py | Python | builder/template_app/build.py | ufopilot/QT-App-Builder | af9c455b4122669d5f200728d467f5afe4f3ee87 | [
"MIT"
] | null | null | null | builder/template_app/build.py | ufopilot/QT-App-Builder | af9c455b4122669d5f200728d467f5afe4f3ee87 | [
"MIT"
] | null | null | null | builder/template_app/build.py | ufopilot/QT-App-Builder | af9c455b4122669d5f200728d467f5afe4f3ee87 | [
"MIT"
] | null | null | null | import json, os
import subprocess
import locale
import shutil
locale.setlocale(locale.LC_ALL,'en-US')
#copy settings folder to dist
#shutil.copytree('gui/settings', 'dist/', dirs_exist_ok=True)
if not os.path.exists("dist/settings"):
os.makedirs("dist/settings")
for __file in ("ui_settings.json", "theme_settings.json"):
print(f"copy {__file}")
shutil.copy(f"gui/settings\\{__file}", f"dist/settings/{__file}")
with open("gui/settings/ui_settings.json", "r", encoding='utf-8') as reader:
settings = json.loads(reader.read())
app_name = settings['window']['app_name']
icon = settings['window']['icon']
command = ('pyinstaller.exe',
'--onefile',
'--windowed',
'--icon', icon,
'--name', app_name,
'--hidden-import', 'requests',
'--hidden-import', 'bs4',
'--hidden-import', 'pyperclip',
'--hidden-import', 'cloudscraper',
'--hidden-import', 'webbrowser',
'--hidden-import', 'webcolors',
'--add-data', 'resources_rc.py;.',
'--add-data', 'qt_core.py;.',
'--add-data', 'gui;gui/',
'main.py')
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
print(output.strip())
# Do something else
return_code = process.poll()
if return_code is not None:
print('RETURN CODE', return_code)
# Process has finished, read rest of the output
for output in process.stdout.readlines():
print(output.strip())
break
# test app
print("starting App ....")
os.chdir('dist')
os.system(f"{app_name}.exe") | 29.833333 | 76 | 0.577095 | import json, os
import subprocess
import locale
import shutil
locale.setlocale(locale.LC_ALL,'en-US')
if not os.path.exists("dist/settings"):
os.makedirs("dist/settings")
for __file in ("ui_settings.json", "theme_settings.json"):
print(f"copy {__file}")
shutil.copy(f"gui/settings\\{__file}", f"dist/settings/{__file}")
with open("gui/settings/ui_settings.json", "r", encoding='utf-8') as reader:
settings = json.loads(reader.read())
app_name = settings['window']['app_name']
icon = settings['window']['icon']
command = ('pyinstaller.exe',
'--onefile',
'--windowed',
'--icon', icon,
'--name', app_name,
'--hidden-import', 'requests',
'--hidden-import', 'bs4',
'--hidden-import', 'pyperclip',
'--hidden-import', 'cloudscraper',
'--hidden-import', 'webbrowser',
'--hidden-import', 'webcolors',
'--add-data', 'resources_rc.py;.',
'--add-data', 'qt_core.py;.',
'--add-data', 'gui;gui/',
'main.py')
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
print(output.strip())
return_code = process.poll()
if return_code is not None:
print('RETURN CODE', return_code)
for output in process.stdout.readlines():
print(output.strip())
break
print("starting App ....")
os.chdir('dist')
os.system(f"{app_name}.exe") | true | true |
f7f42fe3783833eb76cfe814dc981f8d40edb2da | 4,972 | py | Python | resources/draw1.py | sandeepsn1997/civilapp | 749027e904924575f60883c5d44688101f7e9864 | [
"MIT"
] | null | null | null | resources/draw1.py | sandeepsn1997/civilapp | 749027e904924575f60883c5d44688101f7e9864 | [
"MIT"
] | null | null | null | resources/draw1.py | sandeepsn1997/civilapp | 749027e904924575f60883c5d44688101f7e9864 | [
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
def draw_line_x(ox, oy, x, y, length):
for i in range(length+1):
ox.append(x+i)
oy.append(y)
return ox, oy
def draw_line_y(ox, oy, x, y, length):
for i in range(length+1):
ox.append(x)
oy.append(y+i)
return ox, oy
def draw_sqr(ox, oy, x, y, length):
draw_line_x(ox, oy, x, y, length)
draw_line_x(ox, oy, x, y+length, length)
draw_line_y(ox, oy, x, y, length)
draw_line_y(ox, oy, x + length, y, length)
return ox, oy
def draw_rect(ox, oy, x, y, length, breadth):
draw_line_x(ox, oy, x, y, length)
draw_line_x(ox, oy, x, y+breadth, length)
draw_line_y(ox, oy, x, y, breadth)
draw_line_y(ox, oy, x + length, y, breadth)
return ox, oy
def draw_layout():
ox, oy = [], []
# Outter Box
ox, oy = draw_rect(ox, oy, -60, 0, 470,300)
#Sites Row1
ox, oy = draw_rect(ox, oy, 40, 240,25, 30)
ox, oy = draw_rect(ox, oy, 85, 240, 25, 30)
ox, oy = draw_rect(ox, oy, 130, 240, 25, 30)
ox, oy = draw_rect(ox, oy, 265, 240, 25, 30)
ox, oy = draw_rect(ox, oy, 310, 240, 25, 30)
# outer boundry for row1
ox, oy = draw_rect(ox, oy, 30, 225, 45, 55)
ox, oy = draw_rect(ox, oy, 75, 225, 45, 55)
ox, oy = draw_rect(ox, oy, 120, 225, 45, 55)
ox, oy = draw_rect(ox, oy, 255, 225, 45, 55)
ox, oy = draw_rect(ox, oy, 300, 225, 45, 55)
# Sites Row2
ox, oy = draw_rect(ox, oy, 40, 150, 25, 30)
ox, oy = draw_rect(ox, oy, 85, 150, 25, 30)
ox, oy = draw_rect(ox, oy, 130, 150, 25, 30)
ox, oy = draw_rect(ox, oy, 310, 150, 25, 30)
ox, oy = draw_rect(ox, oy, 355, 150, 25, 30)
# outer boundry for row2
ox, oy = draw_rect(ox, oy, 30, 140, 45, 55)
ox, oy = draw_rect(ox, oy, 75, 140, 45, 55)
ox, oy = draw_rect(ox, oy, 120, 140, 45, 55)
ox, oy = draw_rect(ox, oy, 300, 140, 45, 55)
ox, oy = draw_rect(ox, oy, 345, 140, 45, 55)
# Sites Row3
ox, oy = draw_rect(ox, oy, 40,100, 25, 30)
ox, oy = draw_rect(ox, oy, 85, 100, 25, 30)
ox, oy = draw_rect(ox, oy, 130, 100, 25, 30)
ox, oy = draw_rect(ox, oy, 310, 100, 25, 30)
ox, oy = draw_rect(ox, oy,355 , 100, 25, 30)
# outer boundry for row3
ox, oy = draw_rect(ox, oy, 30, 85, 45, 55)
ox, oy = draw_rect(ox, oy, 75, 85, 45, 55)
ox, oy = draw_rect(ox, oy, 120, 85, 45, 55)
ox, oy = draw_rect(ox, oy, 300, 85, 45, 55)
ox, oy = draw_rect(ox, oy, 345, 85, 45, 55)
# Sites Row4
ox, oy = draw_rect(ox, oy, 40, 10,25, 30)
ox, oy = draw_rect(ox, oy, 85, 10, 25, 30)
ox, oy = draw_rect(ox, oy, 130, 10, 25, 30)
ox, oy = draw_rect(ox, oy, 310, 10, 25, 30)
ox, oy = draw_rect(ox, oy, 355, 10, 25, 30)
# outer boundry for row4
ox, oy = draw_rect(ox, oy, 30, 0, 45, 55)
ox, oy = draw_rect(ox, oy, 75, 0, 45, 55)
ox, oy = draw_rect(ox, oy, 120,0, 45, 55)
ox, oy = draw_rect(ox, oy, 300, 0, 45, 55)
ox, oy = draw_rect(ox, oy, 345, 0, 45, 55)
return ox, oy
def draw_empty_space():
ox, oy = [], []
ox, oy = draw_sqr(ox, oy, -50, 265, 25)#1
ox, oy = draw_rect(ox, oy, -50,65,25,135)#2
ox, oy = draw_sqr(ox, oy,190,240,35)#4
ox, oy = draw_sqr(ox, oy, 225, 150, 20)#6
ox, oy = draw_rect(ox, oy, 190,150, 25,35)#5
ox, oy = draw_rect(ox, oy, -50, 5,40,50 )
ox, oy = draw_rect(ox, oy, 360, 240, 45,55)#7
ox, oy = draw_rect(ox, oy, 190,90,30,45)#8
ox, oy = draw_sqr(ox, oy, 240,5, 25)#10
ox, oy = draw_rect(ox, oy,230,105,40,30)#9
ox, oy = draw_sqr(ox, oy,190 , 5, 40)#11
return ox, oy
plt.figure(figsize=(10, 8))
ox, oy = draw_layout()
plt.plot(ox, oy, "sk")
ox, oy = draw_empty_space()
plt.plot(ox, oy, "sg")
plt.axis("equal")
plt.grid(True)
plt.annotate("1",xy=(-40,275))
plt.annotate("2",xy=(-40,135))
plt.annotate("3",xy=(-35,30))
plt.annotate("4",xy=(205,255))
plt.annotate("5",xy=(195,165))
plt.annotate("6",xy=(230,155))
plt.annotate("7",xy=(375,265))
plt.annotate("8",xy=(200,112))
plt.annotate("9",xy=(245,115))
plt.annotate("10",xy=(245,15))
plt.annotate("11",xy=(200,25))
plt.xlabel('X-Coordinates')
plt.ylabel('Y-Coordinates')
plt.title('Construction Site Layout Plan',fontsize=15,color="red")
plt.figtext(0.905,0.8,"1=Security shed",fontsize=10,color="blue")
plt.figtext(0.905,0.77,"2=Parking",fontsize=10,color="blue")
plt.figtext(0.905,0.74,"3=Site office",fontsize=10,color="blue")
plt.figtext(0.905,0.71,"4=Canteen",fontsize=10,color="blue")
plt.figtext(0.905,0.68,"5=Labour Shed",fontsize=10,color="blue")
plt.figtext(0.905,0.65,"6=Toilet",fontsize=10,color="blue")
plt.figtext(0.905,0.62,"7=Ware House",fontsize=10,color="blue")
plt.figtext(0.905,0.59,"8=Power House",fontsize=10,color="blue")
plt.figtext(0.905,0.56,"9=Water tank",fontsize=10,color="blue")
plt.figtext(0.905,0.53,"10=Q/C Lab",fontsize=10,color="blue")
plt.figtext(0.905,0.50,"11=Batching Plant",fontsize=10,color="blue")
plt.show()
#plt.axis('scaled')
#plt.axis("square")
| 28.25 | 68 | 0.592518 |
import matplotlib.pyplot as plt
def draw_line_x(ox, oy, x, y, length):
for i in range(length+1):
ox.append(x+i)
oy.append(y)
return ox, oy
def draw_line_y(ox, oy, x, y, length):
for i in range(length+1):
ox.append(x)
oy.append(y+i)
return ox, oy
def draw_sqr(ox, oy, x, y, length):
draw_line_x(ox, oy, x, y, length)
draw_line_x(ox, oy, x, y+length, length)
draw_line_y(ox, oy, x, y, length)
draw_line_y(ox, oy, x + length, y, length)
return ox, oy
def draw_rect(ox, oy, x, y, length, breadth):
draw_line_x(ox, oy, x, y, length)
draw_line_x(ox, oy, x, y+breadth, length)
draw_line_y(ox, oy, x, y, breadth)
draw_line_y(ox, oy, x + length, y, breadth)
return ox, oy
def draw_layout():
ox, oy = [], []
ox, oy = draw_rect(ox, oy, -60, 0, 470,300)
ox, oy = draw_rect(ox, oy, 40, 240,25, 30)
ox, oy = draw_rect(ox, oy, 85, 240, 25, 30)
ox, oy = draw_rect(ox, oy, 130, 240, 25, 30)
ox, oy = draw_rect(ox, oy, 265, 240, 25, 30)
ox, oy = draw_rect(ox, oy, 310, 240, 25, 30)
ox, oy = draw_rect(ox, oy, 30, 225, 45, 55)
ox, oy = draw_rect(ox, oy, 75, 225, 45, 55)
ox, oy = draw_rect(ox, oy, 120, 225, 45, 55)
ox, oy = draw_rect(ox, oy, 255, 225, 45, 55)
ox, oy = draw_rect(ox, oy, 300, 225, 45, 55)
ox, oy = draw_rect(ox, oy, 40, 150, 25, 30)
ox, oy = draw_rect(ox, oy, 85, 150, 25, 30)
ox, oy = draw_rect(ox, oy, 130, 150, 25, 30)
ox, oy = draw_rect(ox, oy, 310, 150, 25, 30)
ox, oy = draw_rect(ox, oy, 355, 150, 25, 30)
ox, oy = draw_rect(ox, oy, 30, 140, 45, 55)
ox, oy = draw_rect(ox, oy, 75, 140, 45, 55)
ox, oy = draw_rect(ox, oy, 120, 140, 45, 55)
ox, oy = draw_rect(ox, oy, 300, 140, 45, 55)
ox, oy = draw_rect(ox, oy, 345, 140, 45, 55)
ox, oy = draw_rect(ox, oy, 40,100, 25, 30)
ox, oy = draw_rect(ox, oy, 85, 100, 25, 30)
ox, oy = draw_rect(ox, oy, 130, 100, 25, 30)
ox, oy = draw_rect(ox, oy, 310, 100, 25, 30)
ox, oy = draw_rect(ox, oy,355 , 100, 25, 30)
ox, oy = draw_rect(ox, oy, 30, 85, 45, 55)
ox, oy = draw_rect(ox, oy, 75, 85, 45, 55)
ox, oy = draw_rect(ox, oy, 120, 85, 45, 55)
ox, oy = draw_rect(ox, oy, 300, 85, 45, 55)
ox, oy = draw_rect(ox, oy, 345, 85, 45, 55)
ox, oy = draw_rect(ox, oy, 40, 10,25, 30)
ox, oy = draw_rect(ox, oy, 85, 10, 25, 30)
ox, oy = draw_rect(ox, oy, 130, 10, 25, 30)
ox, oy = draw_rect(ox, oy, 310, 10, 25, 30)
ox, oy = draw_rect(ox, oy, 355, 10, 25, 30)
ox, oy = draw_rect(ox, oy, 30, 0, 45, 55)
ox, oy = draw_rect(ox, oy, 75, 0, 45, 55)
ox, oy = draw_rect(ox, oy, 120,0, 45, 55)
ox, oy = draw_rect(ox, oy, 300, 0, 45, 55)
ox, oy = draw_rect(ox, oy, 345, 0, 45, 55)
return ox, oy
def draw_empty_space():
ox, oy = [], []
ox, oy = draw_sqr(ox, oy, -50, 265, 25)
ox, oy = draw_rect(ox, oy, -50,65,25,135)
ox, oy = draw_sqr(ox, oy,190,240,35)
ox, oy = draw_sqr(ox, oy, 225, 150, 20)
ox, oy = draw_rect(ox, oy, 190,150, 25,35)
ox, oy = draw_rect(ox, oy, -50, 5,40,50 )
ox, oy = draw_rect(ox, oy, 360, 240, 45,55)
ox, oy = draw_rect(ox, oy, 190,90,30,45)
ox, oy = draw_sqr(ox, oy, 240,5, 25)
ox, oy = draw_rect(ox, oy,230,105,40,30)
ox, oy = draw_sqr(ox, oy,190 , 5, 40)
return ox, oy
plt.figure(figsize=(10, 8))
ox, oy = draw_layout()
plt.plot(ox, oy, "sk")
ox, oy = draw_empty_space()
plt.plot(ox, oy, "sg")
plt.axis("equal")
plt.grid(True)
plt.annotate("1",xy=(-40,275))
plt.annotate("2",xy=(-40,135))
plt.annotate("3",xy=(-35,30))
plt.annotate("4",xy=(205,255))
plt.annotate("5",xy=(195,165))
plt.annotate("6",xy=(230,155))
plt.annotate("7",xy=(375,265))
plt.annotate("8",xy=(200,112))
plt.annotate("9",xy=(245,115))
plt.annotate("10",xy=(245,15))
plt.annotate("11",xy=(200,25))
plt.xlabel('X-Coordinates')
plt.ylabel('Y-Coordinates')
plt.title('Construction Site Layout Plan',fontsize=15,color="red")
plt.figtext(0.905,0.8,"1=Security shed",fontsize=10,color="blue")
plt.figtext(0.905,0.77,"2=Parking",fontsize=10,color="blue")
plt.figtext(0.905,0.74,"3=Site office",fontsize=10,color="blue")
plt.figtext(0.905,0.71,"4=Canteen",fontsize=10,color="blue")
plt.figtext(0.905,0.68,"5=Labour Shed",fontsize=10,color="blue")
plt.figtext(0.905,0.65,"6=Toilet",fontsize=10,color="blue")
plt.figtext(0.905,0.62,"7=Ware House",fontsize=10,color="blue")
plt.figtext(0.905,0.59,"8=Power House",fontsize=10,color="blue")
plt.figtext(0.905,0.56,"9=Water tank",fontsize=10,color="blue")
plt.figtext(0.905,0.53,"10=Q/C Lab",fontsize=10,color="blue")
plt.figtext(0.905,0.50,"11=Batching Plant",fontsize=10,color="blue")
plt.show()
| true | true |
f7f430b487c5929488c96ff1e3fae158917d68cc | 29 | py | Python | asymmetric_uncertainties/__init__.py | muryelgp/asymmetric_uncertainties | 7d16e07d46a14603d595778ec6deb509bd11743d | [
"MIT"
] | null | null | null | asymmetric_uncertainties/__init__.py | muryelgp/asymmetric_uncertainties | 7d16e07d46a14603d595778ec6deb509bd11743d | [
"MIT"
] | null | null | null | asymmetric_uncertainties/__init__.py | muryelgp/asymmetric_uncertainties | 7d16e07d46a14603d595778ec6deb509bd11743d | [
"MIT"
] | null | null | null | from .core import aufloat
| 9.666667 | 26 | 0.724138 | from .core import aufloat
| true | true |
f7f430cf71e1aecb7d9abaadcff32cf7704793f0 | 454 | py | Python | watchmen/raw_data/model_field.py | Insurance-Metrics-Measure-Advisory/watchman-data-connector | 8ecab0c5b28174f1611e51deba8d94a42f53d51d | [
"MIT"
] | 125 | 2021-03-13T07:39:46.000Z | 2022-02-28T03:14:17.000Z | watchmen/raw_data/model_field.py | Insurance-Metrics-Measure-Advisory/watchman-data-connector | 8ecab0c5b28174f1611e51deba8d94a42f53d51d | [
"MIT"
] | null | null | null | watchmen/raw_data/model_field.py | Insurance-Metrics-Measure-Advisory/watchman-data-connector | 8ecab0c5b28174f1611e51deba8d94a42f53d51d | [
"MIT"
] | 17 | 2021-03-13T07:31:58.000Z | 2021-05-20T09:38:02.000Z | from enum import Enum
from typing import Optional
from model.model.common.watchmen_model import WatchmenModel
class FieldType(str, Enum):
NUM = "num"
STR = "str"
DATE = "date"
time = "time"
EMAIL = "email"
ADDR = "address"
PHONE = "phone"
IdCard = "IDCard"
class ModelField(WatchmenModel):
field_id: int = None
name: str = None
description: Optional[str] = None
type: str = None
values: list = []
| 18.916667 | 59 | 0.634361 | from enum import Enum
from typing import Optional
from model.model.common.watchmen_model import WatchmenModel
class FieldType(str, Enum):
NUM = "num"
STR = "str"
DATE = "date"
time = "time"
EMAIL = "email"
ADDR = "address"
PHONE = "phone"
IdCard = "IDCard"
class ModelField(WatchmenModel):
field_id: int = None
name: str = None
description: Optional[str] = None
type: str = None
values: list = []
| true | true |
f7f4323e0853370a98fd843282c7e94c90bf80b5 | 4,857 | py | Python | uuv_control/uuv_control_utils/scripts/start_circular_trajectory.py | jpliquid/testActions2 | 6f314fa1430f654e5943e47ac278101b9c24f938 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | uuv_control/uuv_control_utils/scripts/start_circular_trajectory.py | jpliquid/testActions2 | 6f314fa1430f654e5943e47ac278101b9c24f938 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | uuv_control/uuv_control_utils/scripts/start_circular_trajectory.py | jpliquid/testActions2 | 6f314fa1430f654e5943e47ac278101b9c24f938 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2020 The Plankton Authors.
# All rights reserved.
#
# This source code is derived from UUV Simulator
# (https://github.com/uuvsimulator/uuv_simulator)
# Copyright (c) 2016-2019 The UUV Simulator Authors
# licensed under the Apache license, Version 2.0
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
import sys
from numpy import pi
from uuv_control_msgs.srv import InitCircularTrajectory
from geometry_msgs.msg import Point
from std_msgs.msg import Time
from plankton_utils.time import time_in_float_sec
from plankton_utils import float_sec_to_int_sec_nano
from plankton_utils.time import is_sim_time
def main():
rclpy.init()
sim_time_param = is_sim_time()
node = rclpy.create_node(
'start_circular_trajectory',
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
parameter_overrides=[sim_time_param])
# sim_time = rclpy.parameter.Parameter('use_sim_time', rclpy.Parameter.Type.BOOL, True)
# node.set_parameters([sim_time])
node.get_logger().info('Starting the circular trajectory creator')
#Important...ensure the clock has been updated when using sim time
while node.get_clock().now() == rclpy.time.Time():
rclpy.spin_once(node)
# If no start time is provided: start *now*.
start_time = time_in_float_sec(node.get_clock().now())
start_now = False
if node.has_parameter('start_time'):
start_time = node.get_parameter('start_time').value
if start_time < 0.0:
node.get_logger().warn('Negative start time, setting it to 0.0')
start_time = 0.0
start_now = True
else:
start_now = True
param_labels = ['radius', 'center', 'n_points', 'heading_offset',
'duration', 'max_forward_speed']
params = dict()
for label in param_labels:
if not node.has_parameter(label):
node.get_logger().error('{} must be provided for the trajectory generation!'.format(label))
sys.exit(-1)
params[label] = node.get_parameter(label).value
if len(params['center']) != 3:
node.get_logger().error('Center of circle must have 3 components (x, y, z)')
sys.exit(-1)
if params['n_points'] <= 2:
node.get_logger().error('Number of points must be at least 2')
sys.exit(-1)
if params['max_forward_speed'] <= 0:
node.get_logger().error('Velocity limit must be positive')
sys.exit(-1)
srv_name = 'start_circular_trajectory'
traj_gen = node.create_client(InitCircularTrajectory, srv_name)
if not traj_gen.wait_for_service(timeout_sec=20):
node.get_logger().error('Service %s not available! Closing node...' %(traj_gen.srv_name))
sys.exit(-1)
node.get_logger().info('Generating trajectory that starts at t={} s'.format(start_time))
#Convert the time value
(sec, nsec) = float_sec_to_int_sec_nano(start_time)
req = InitCircularTrajectory.Request()
req.start_time = rclpy.time.Time(seconds=sec, nanoseconds=nsec).to_msg()
req.start_now = start_now
req.radius = params['radius'],
req.center = Point(params['center'][0], params['center'][1], params['center'][2])
req.is_clockwise = False
req.angle_offset = 0.0
req.n_points = params['n_points']
req.heading_offset = params['heading_offset'] * pi / 180
req.max_forward_speed = params['max_forward_speed']
req.duration = params['duration']
future = traj_gen.call_async(req)
rclpy.spin_until_future_complete(self, future)
try:
response = future.result()
except Exception as e:
node.get_logger().error('Service call ' + srv_name + ' failed, error=' + str(e)):
else:
node.get_logger().info('Trajectory successfully generated!')
#success = traj_gen.call(req)
# if success:
# print('Trajectory successfully generated!')
# else:
# print('Failed')
#==============================================================================
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Something went wrong: ' + str(e))
finally:
if rclpy.ok():
rclpy.shutdown()
| 34.446809 | 103 | 0.670167 |
import rclpy
import sys
from numpy import pi
from uuv_control_msgs.srv import InitCircularTrajectory
from geometry_msgs.msg import Point
from std_msgs.msg import Time
from plankton_utils.time import time_in_float_sec
from plankton_utils import float_sec_to_int_sec_nano
from plankton_utils.time import is_sim_time
def main():
rclpy.init()
sim_time_param = is_sim_time()
node = rclpy.create_node(
'start_circular_trajectory',
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
parameter_overrides=[sim_time_param])
node.get_logger().info('Starting the circular trajectory creator')
while node.get_clock().now() == rclpy.time.Time():
rclpy.spin_once(node)
start_time = time_in_float_sec(node.get_clock().now())
start_now = False
if node.has_parameter('start_time'):
start_time = node.get_parameter('start_time').value
if start_time < 0.0:
node.get_logger().warn('Negative start time, setting it to 0.0')
start_time = 0.0
start_now = True
else:
start_now = True
param_labels = ['radius', 'center', 'n_points', 'heading_offset',
'duration', 'max_forward_speed']
params = dict()
for label in param_labels:
if not node.has_parameter(label):
node.get_logger().error('{} must be provided for the trajectory generation!'.format(label))
sys.exit(-1)
params[label] = node.get_parameter(label).value
if len(params['center']) != 3:
node.get_logger().error('Center of circle must have 3 components (x, y, z)')
sys.exit(-1)
if params['n_points'] <= 2:
node.get_logger().error('Number of points must be at least 2')
sys.exit(-1)
if params['max_forward_speed'] <= 0:
node.get_logger().error('Velocity limit must be positive')
sys.exit(-1)
srv_name = 'start_circular_trajectory'
traj_gen = node.create_client(InitCircularTrajectory, srv_name)
if not traj_gen.wait_for_service(timeout_sec=20):
node.get_logger().error('Service %s not available! Closing node...' %(traj_gen.srv_name))
sys.exit(-1)
node.get_logger().info('Generating trajectory that starts at t={} s'.format(start_time))
(sec, nsec) = float_sec_to_int_sec_nano(start_time)
req = InitCircularTrajectory.Request()
req.start_time = rclpy.time.Time(seconds=sec, nanoseconds=nsec).to_msg()
req.start_now = start_now
req.radius = params['radius'],
req.center = Point(params['center'][0], params['center'][1], params['center'][2])
req.is_clockwise = False
req.angle_offset = 0.0
req.n_points = params['n_points']
req.heading_offset = params['heading_offset'] * pi / 180
req.max_forward_speed = params['max_forward_speed']
req.duration = params['duration']
future = traj_gen.call_async(req)
rclpy.spin_until_future_complete(self, future)
try:
response = future.result()
except Exception as e:
node.get_logger().error('Service call ' + srv_name + ' failed, error=' + str(e)):
else:
node.get_logger().info('Trajectory successfully generated!')
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Something went wrong: ' + str(e))
finally:
if rclpy.ok():
rclpy.shutdown()
| false | true |
f7f4328ed7300513d61a0658b55ff43d4f7c9c03 | 49 | py | Python | envisage/plugins/ipython_shell/view/api.py | robmcmullen/envisage | 57338fcb0ea69c75bc3c86de18a5967d8e78c6c1 | [
"BSD-3-Clause"
] | null | null | null | envisage/plugins/ipython_shell/view/api.py | robmcmullen/envisage | 57338fcb0ea69c75bc3c86de18a5967d8e78c6c1 | [
"BSD-3-Clause"
] | 1 | 2017-05-22T21:15:22.000Z | 2017-05-22T21:15:22.000Z | envisage/plugins/ipython_shell/view/api.py | robmcmullen/envisage | 57338fcb0ea69c75bc3c86de18a5967d8e78c6c1 | [
"BSD-3-Clause"
] | 1 | 2019-10-01T07:03:58.000Z | 2019-10-01T07:03:58.000Z | from .ipython_shell_view import IPythonShellView
| 24.5 | 48 | 0.897959 | from .ipython_shell_view import IPythonShellView
| true | true |
f7f432e2884c71fa52ceeef4531935fc810b8632 | 6,083 | py | Python | sim_004_complex_001/module/plot.py | dego1985/wave_simulation | 05f5119aab158e0958170d90066c2b87b998e658 | [
"MIT"
] | 1 | 2022-01-23T14:42:10.000Z | 2022-01-23T14:42:10.000Z | sim_004_complex_001/module/plot.py | dego1985/wave_simulation | 05f5119aab158e0958170d90066c2b87b998e658 | [
"MIT"
] | null | null | null | sim_004_complex_001/module/plot.py | dego1985/wave_simulation | 05f5119aab158e0958170d90066c2b87b998e658 | [
"MIT"
] | null | null | null | import numpy as np
from glumpy import app, gl, glm, gloo
import torch
import module.gpu_work as gw
class mesh():
def __init__(self, motion):
# plane
self.motion = motion
self.N = N = motion.N[:2]
self.dx = dx = motion.dx
# vertices
X = [dx * (np.arange(N[i]) - N[i] * 0.5) for i in range(2)]
x, y = X
x, y = np.meshgrid(x, y)
z = motion.update_numpy()
vertices = np.transpose([x, y, z], (1, 2, 0)).reshape(-1, 3)
# colors
colors = np.random.randn(len(vertices), 4).astype(np.float32)
# outline
idx = []
for i in np.arange(N[1]-1):
for j in np.arange(N[0]-1):
offset = i * N[0] + j
idx.append([offset, offset+1, offset+1+N[0], offset+N[0]] +
[offset, offset+N[0], offset+1, offset+1+N[0]])
outline = np.array(idx).reshape(-1)
# outline
idx = np.arange(N[0]*N[1])
point_idx = np.array(idx).reshape(-1)
############################################################
# glumpy Vertex Buffer
dtype = [("position", np.float32, 3),
("color", np.float32, 4)]
VertexBuffer = np.zeros(len(vertices), dtype)
VertexBuffer["position"] = vertices
VertexBuffer["color"] = colors
VertexBuffer = VertexBuffer.view(gloo.VertexBuffer)
# glumpy Index Buffer
outline = outline.astype(np.uint32).view(gloo.IndexBuffer)
# glumpy Index Buffer
point_idx = point_idx.astype(np.uint32).view(gloo.IndexBuffer)
############################################################
# self
self.VertexBuffer = VertexBuffer
self.outline = outline
self.point_idx = point_idx
############################################################
# torch
v = torch.from_numpy(np.transpose(vertices, (1, 0)).reshape(1, 3, N[0], N[1]).astype(np.float32)).cuda()
c = torch.from_numpy(np.transpose(colors, (1, 0)).reshape(1, 4, N[0], N[1]).astype(np.float32)).cuda()
self.v = v
self.c = c
def update(self, dt=0):
motion = self.motion
v = self.v
c = self.c
z = motion.update(dt)
zc = 0.5 * z
c[0, 0] = 0 + 2*zc
c[0, 1] = 0.5 - zc
c[0, 2] = 1.0 + 2*zc
c[0, 3] = 1
v[0, 2] = z*0.3
class plot3d():
def __init__(self, obj):
self.obj = obj
self.phi, self.theta = 0, 0
# init
self.init_window()
self.bind_obj(obj)
self.update_VertexBuffer()
app.run()
def init_window(self):
window = app.Window(width=1920, height=1080,
color=(0.30, 0.30, 0.35, 1.00))
@window.event
def on_init():
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glPolygonOffset(1, 1)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glLineWidth(0.55)
@window.event
def on_draw(dt):
window.clear()
self.on_draw(dt)
@window.event
def on_resize(width, height):
program = self.program
program['projection'] = glm.perspective(
45.0, width / float(height), 0.1, 100.0)
self.window = window
def bind_obj(self, obj):
# make obj
vertex = """
uniform vec4 ucolor;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
attribute vec3 position;
attribute vec4 color;
varying vec4 v_color;
void main()
{
v_color = ucolor * color;
gl_Position = projection * view * model * vec4(position,1.0);
}
"""
fragment = """
varying vec4 v_color;
void main()
{
gl_FragColor = v_color;
}
"""
VertexBuffer = obj.VertexBuffer
outline = obj.outline
point_idx = obj.point_idx
program = gloo.Program(vertex, fragment)
program.bind(VertexBuffer)
program['model'] = np.eye(4, dtype=np.float32)
program['view'] = glm.translation(0, 0, -5)
VertexBuffer.activate()
VertexBuffer.deactivate()
self.RegisteredBuffer = gw.make_RegisteredBuffer(VertexBuffer)
self.program = program
self.outline = outline
self.point_idx = point_idx
def update_VertexBuffer(self, dt=0):
# torch
self.obj.update(dt)
v = self.obj.v
c = self.obj.c
V_ = torch.cat((v, c), dim=1)
V_ = V_.contiguous(memory_format=torch.channels_last)
# copy
gw.copy_torch2RegisteredBuffer(self.RegisteredBuffer, V_[0])
def on_draw(self, dt):
program = self.program
window = self.window
# set title
window.set_title(str(
window.fps).encode("ascii"))
self.update_VertexBuffer(dt)
# # Point
# gl.glDisable(gl.GL_BLEND)
# gl.glEnable(gl.GL_DEPTH_TEST)
# gl.glPointSize(5)
# program['ucolor'] = 1, 1, 1, 1
# program.draw(gl.GL_POINTS, self.point_idx)
# Fill
gl.glDisable(gl.GL_BLEND)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_POLYGON_OFFSET_FILL)
program['ucolor'] = 1, 1, 1, 1
program.draw(gl.GL_QUADS, self.outline)
# Outlined program
# gl.glDisable(gl.GL_POLYGON_OFFSET_FILL)
# gl.glEnable(gl.GL_BLEND)
# gl.glDepthMask(gl.GL_FALSE)
# program['ucolor'] = 0, 0, 0, 1
# program.draw(gl.GL_LINES, self.outline)
# gl.glDepthMask(gl.GL_TRUE)
# Make program rotate
self.theta += 0*dt # degrees
self.phi += 2*dt # degrees
model = np.eye(4, dtype=np.float32)
glm.rotate(model, -90, 1, 0, 0)
glm.rotate(model, self.theta, 0, 0, 1)
glm.rotate(model, self.phi, 0, 1, 0)
glm.rotate(model, 45, 1, 0, 0)
program['model'] = model
| 28.558685 | 112 | 0.515864 | import numpy as np
from glumpy import app, gl, glm, gloo
import torch
import module.gpu_work as gw
class mesh():
def __init__(self, motion):
self.motion = motion
self.N = N = motion.N[:2]
self.dx = dx = motion.dx
X = [dx * (np.arange(N[i]) - N[i] * 0.5) for i in range(2)]
x, y = X
x, y = np.meshgrid(x, y)
z = motion.update_numpy()
vertices = np.transpose([x, y, z], (1, 2, 0)).reshape(-1, 3)
colors = np.random.randn(len(vertices), 4).astype(np.float32)
idx = []
for i in np.arange(N[1]-1):
for j in np.arange(N[0]-1):
offset = i * N[0] + j
idx.append([offset, offset+1, offset+1+N[0], offset+N[0]] +
[offset, offset+N[0], offset+1, offset+1+N[0]])
outline = np.array(idx).reshape(-1)
idx = np.arange(N[0]*N[1])
point_idx = np.array(idx).reshape(-1)
| true | true |
f7f4338df08dffce7d0e0fd30efc8a72dfde0455 | 8,031 | py | Python | model.py | WeiChengTseng/Basic_Peptides_Model | 0b2bb8f157ec4c9752382eca8ffcbaca94fcaa45 | [
"MIT"
] | 4 | 2018-03-26T00:14:48.000Z | 2022-01-11T06:00:21.000Z | model.py | WeiChengTseng/Basic_Peptides_Model | 0b2bb8f157ec4c9752382eca8ffcbaca94fcaa45 | [
"MIT"
] | null | null | null | model.py | WeiChengTseng/Basic_Peptides_Model | 0b2bb8f157ec4c9752382eca8ffcbaca94fcaa45 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import math
import os
class Model():
def __init__(self, num_label, word_dim=10, batch_size=32):
self.num_label = num_label
self.word_dim = word_dim
self.batch_size = batch_size
return
def build(self, x, y, reg, keep_prob):
"""
Build the model.
Input:
- x: the input data, that is, the peptide sequences.
- y: the ground truth of the peptides.
- reg: the weight of the regression.
Output:
- loss: the loss of the model.
- logits: the result of the logit regression.
- predict: the prediction of the peptides.
"""
logits, params = self.sub_model(x, keep_prob)
loss = self.loss(y, logits, params, reg)
predict = self.predict(logits)
return loss, logits, predict
def sub_model(self, x, keep_prob):
"""
Define the architecture of the model.
Input:
- x: the input data, that is, the peptide sequences.
- keep_prob: the keep probability of dropout.
Output:
- logits: the result of the logit regression.
- params: some weights and filters used in the model.
"""
params = []
with tf.name_scope('filters'):
Filter1 = tf.Variable(tf.truncated_normal([6, self.word_dim, 128], stddev = 0.1), name = 'Filter_1')
Filter2 = tf.Variable(tf.truncated_normal([6, 128, 128], stddev = 0.1), name = 'Filter_2')
Filter3 = tf.Variable(tf.truncated_normal([5, 128, 256], stddev = 0.1), name = 'Filter_3')
Filter4 = tf.Variable(tf.truncated_normal([5, 256, 256], stddev = 0.1), name = 'Filter_4')
Filter5 = tf.Variable(tf.truncated_normal([5, 256, 512], stddev = 0.1), name = 'Filter_5')
Filter6 = tf.Variable(tf.truncated_normal([5, 512, 512], stddev = 0.1), name = 'Filter_6')
self.variable_summaries(Filter1)
self.variable_summaries(Filter2)
self.variable_summaries(Filter3)
self.variable_summaries(Filter4)
self.variable_summaries(Filter5)
self.variable_summaries(Filter6)
with tf.name_scope('weights'):
W7 = tf.Variable(tf.truncated_normal([7168, 1024], stddev = 0.1), name = 'W7')
W8 = tf.Variable(tf.truncated_normal([1024, self.num_label], stddev = 0.1), name = 'W8')
self.variable_summaries(W7)
self.variable_summaries(W8)
with tf.name_scope('bias'):
b1 = tf.Variable(tf.zeros([128]), name = 'b1')
b2 = tf.Variable(tf.zeros([128]), name = 'b2')
b3 = tf.Variable(tf.zeros([256]), name = 'b3')
b4 = tf.Variable(tf.zeros([256]), name = 'b4')
b5 = tf.Variable(tf.zeros([512]), name = 'b5')
b6 = tf.Variable(tf.zeros([512]), name = 'b6')
b7 = tf.Variable(tf.zeros([1024]), name = 'b7')
b8 = tf.Variable(tf.zeros([self.num_label]), name = 'b8')
self.variable_summaries(b1)
self.variable_summaries(b2)
self.variable_summaries(b3)
self.variable_summaries(b4)
self.variable_summaries(b5)
self.variable_summaries(b6)
self.variable_summaries(b7)
self.variable_summaries(b8)
alpha = 0.2
with tf.name_scope('Conv_1'):
L1 = tf.nn.conv1d(x, Filter1, stride = 1, padding = 'VALID', data_format='NHWC') + b1
with tf.name_scope('leaky_relu_1'):
L1_act = tf.nn.leaky_relu(L1, alpha)
L1_bn = tf.layers.batch_normalization(L1_act, scale = False, name = 'bn_1')
with tf.name_scope('Conv_2'):
L2 = tf.nn.conv1d(L1_bn, Filter2, stride = 1, padding = 'VALID') + b2
with tf.name_scope('leaky_relu_2'):
L2_act = tf.nn.leaky_relu(L2, alpha)
L2_pooled = tf.layers.max_pooling1d(L2_act, pool_size = 2, strides = 2, name = 'max_pool_2')
L2_bn = tf.layers.batch_normalization(L2_pooled, scale = False, name = 'bn_2')
with tf.name_scope('Conv_3'):
L3 = tf.nn.conv1d(L2_bn, Filter3, stride = 1, padding = 'VALID') + b3
with tf.name_scope('leaky_relu_3'):
L3_act = tf.nn.leaky_relu(L3, alpha)
L3_pooled = tf.layers.max_pooling1d(L3_act, pool_size = 2, strides = 2, name = 'max_pool_3')
L3_bn = tf.layers.batch_normalization(L3_pooled, scale = False, name = 'bn_3')
with tf.name_scope('Conv_4'):
L4 = tf.nn.conv1d(L3_bn, Filter4, stride = 1, padding = 'VALID') + b4
with tf.name_scope('leaky_relu_4'):
L4_act = tf.nn.leaky_relu(L4, alpha)
L4_pooled = tf.layers.max_pooling1d(L4_act, pool_size = 2, strides = 2, name = 'max_pool_4')
L4_bn = tf.layers.batch_normalization(L4_pooled, scale = False, name = 'bn_4')
with tf.name_scope('Conv_5'):
L5 = tf.nn.conv1d(L4_bn, Filter5, stride = 1, padding = 'VALID') + b5
with tf.name_scope('leaky_relu_5'):
L5_act = tf.nn.leaky_relu(L5, alpha)
L5_pooled = tf.layers.max_pooling1d(L5_act, pool_size = 2, strides = 2, name = 'max_pool_5')
L5_bn = tf.layers.batch_normalization(L5_pooled, scale = False, name = 'bn_5')
with tf.name_scope('Conv_6'):
L6 = tf.nn.conv1d(L5_bn, Filter6, stride = 1, padding = 'VALID') + b6
with tf.name_scope('leaky_relu_6'):
L6_act = tf.nn.leaky_relu(L6, alpha)
L6_pooled = tf.layers.max_pooling1d(L6_act, pool_size = 2, strides = 2, name = 'max_pool_6')
L6_bn = tf.layers.batch_normalization(L6_pooled, scale = False, name = 'bn_6')
reshaped_data = tf.reshape(L6_bn, shape = (self.batch_size, -1), name = 'reshape')
with tf.name_scope('full_connected_7'):
L7 = tf.matmul(reshaped_data, W7) + b7
with tf.name_scope('leaky_relu_7'):
L7_act = tf.nn.leaky_relu(L7, alpha)
L7_dropout = tf.nn.dropout(L7_act, keep_prob=keep_prob, name = 'dropout')
L7_bn = tf.layers.batch_normalization(L7_dropout, scale = True, name = 'bm_7')
with tf.name_scope('full_connected_8'):
L8 = tf.matmul(L7_bn, W8) + b8
logits = L8
params += [Filter1, Filter2, Filter3, Filter4, Filter5, Filter6]
params += [W7, W8]
return logits, params
def predict(self, logits):
"""
Predict the labels according to the model.
Input:
- logits: the result of the logit regression.
Output:
- x: the result of the prediction
"""
x = tf.nn.sigmoid(logits)
return x
def loss(self, labels, logits, params, reg):
"""
Define the loss of the model.
Input:
- label: the ground truth of the prediction.
- logits: the result of the logit regression.
- params: some weights and filters used in the model.
- reg: the weight of the L2 loss
Output:
- loss: the loss of the model.
"""
L2_loss_list = list(map(tf.nn.l2_loss, params))
L2_loss = tf.add_n(L2_loss_list)
loss = tf.losses.sigmoid_cross_entropy(labels, logits) + L2_loss * reg
tf.summary.scalar('loss', loss)
return loss
def variable_summaries(self, var):
"""
Define the tensorboard scalar and histogram summary.
Input:
- var: the variable we want to summarize in tensorboard.
"""
with tf.name_scope("summaries"):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
tf.summary.scalar('stddev',stddev)
tf.summary.scalar('max',tf.reduce_max(var))
tf.summary.scalar('min',tf.reduce_min(var))
tf.summary.histogram('histogram',var)
return
| 40.766497 | 112 | 0.587847 | import tensorflow as tf
import numpy as np
import math
import os
class Model():
def __init__(self, num_label, word_dim=10, batch_size=32):
self.num_label = num_label
self.word_dim = word_dim
self.batch_size = batch_size
return
def build(self, x, y, reg, keep_prob):
logits, params = self.sub_model(x, keep_prob)
loss = self.loss(y, logits, params, reg)
predict = self.predict(logits)
return loss, logits, predict
def sub_model(self, x, keep_prob):
params = []
with tf.name_scope('filters'):
Filter1 = tf.Variable(tf.truncated_normal([6, self.word_dim, 128], stddev = 0.1), name = 'Filter_1')
Filter2 = tf.Variable(tf.truncated_normal([6, 128, 128], stddev = 0.1), name = 'Filter_2')
Filter3 = tf.Variable(tf.truncated_normal([5, 128, 256], stddev = 0.1), name = 'Filter_3')
Filter4 = tf.Variable(tf.truncated_normal([5, 256, 256], stddev = 0.1), name = 'Filter_4')
Filter5 = tf.Variable(tf.truncated_normal([5, 256, 512], stddev = 0.1), name = 'Filter_5')
Filter6 = tf.Variable(tf.truncated_normal([5, 512, 512], stddev = 0.1), name = 'Filter_6')
self.variable_summaries(Filter1)
self.variable_summaries(Filter2)
self.variable_summaries(Filter3)
self.variable_summaries(Filter4)
self.variable_summaries(Filter5)
self.variable_summaries(Filter6)
with tf.name_scope('weights'):
W7 = tf.Variable(tf.truncated_normal([7168, 1024], stddev = 0.1), name = 'W7')
W8 = tf.Variable(tf.truncated_normal([1024, self.num_label], stddev = 0.1), name = 'W8')
self.variable_summaries(W7)
self.variable_summaries(W8)
with tf.name_scope('bias'):
b1 = tf.Variable(tf.zeros([128]), name = 'b1')
b2 = tf.Variable(tf.zeros([128]), name = 'b2')
b3 = tf.Variable(tf.zeros([256]), name = 'b3')
b4 = tf.Variable(tf.zeros([256]), name = 'b4')
b5 = tf.Variable(tf.zeros([512]), name = 'b5')
b6 = tf.Variable(tf.zeros([512]), name = 'b6')
b7 = tf.Variable(tf.zeros([1024]), name = 'b7')
b8 = tf.Variable(tf.zeros([self.num_label]), name = 'b8')
self.variable_summaries(b1)
self.variable_summaries(b2)
self.variable_summaries(b3)
self.variable_summaries(b4)
self.variable_summaries(b5)
self.variable_summaries(b6)
self.variable_summaries(b7)
self.variable_summaries(b8)
alpha = 0.2
with tf.name_scope('Conv_1'):
L1 = tf.nn.conv1d(x, Filter1, stride = 1, padding = 'VALID', data_format='NHWC') + b1
with tf.name_scope('leaky_relu_1'):
L1_act = tf.nn.leaky_relu(L1, alpha)
L1_bn = tf.layers.batch_normalization(L1_act, scale = False, name = 'bn_1')
with tf.name_scope('Conv_2'):
L2 = tf.nn.conv1d(L1_bn, Filter2, stride = 1, padding = 'VALID') + b2
with tf.name_scope('leaky_relu_2'):
L2_act = tf.nn.leaky_relu(L2, alpha)
L2_pooled = tf.layers.max_pooling1d(L2_act, pool_size = 2, strides = 2, name = 'max_pool_2')
L2_bn = tf.layers.batch_normalization(L2_pooled, scale = False, name = 'bn_2')
with tf.name_scope('Conv_3'):
L3 = tf.nn.conv1d(L2_bn, Filter3, stride = 1, padding = 'VALID') + b3
with tf.name_scope('leaky_relu_3'):
L3_act = tf.nn.leaky_relu(L3, alpha)
L3_pooled = tf.layers.max_pooling1d(L3_act, pool_size = 2, strides = 2, name = 'max_pool_3')
L3_bn = tf.layers.batch_normalization(L3_pooled, scale = False, name = 'bn_3')
with tf.name_scope('Conv_4'):
L4 = tf.nn.conv1d(L3_bn, Filter4, stride = 1, padding = 'VALID') + b4
with tf.name_scope('leaky_relu_4'):
L4_act = tf.nn.leaky_relu(L4, alpha)
L4_pooled = tf.layers.max_pooling1d(L4_act, pool_size = 2, strides = 2, name = 'max_pool_4')
L4_bn = tf.layers.batch_normalization(L4_pooled, scale = False, name = 'bn_4')
with tf.name_scope('Conv_5'):
L5 = tf.nn.conv1d(L4_bn, Filter5, stride = 1, padding = 'VALID') + b5
with tf.name_scope('leaky_relu_5'):
L5_act = tf.nn.leaky_relu(L5, alpha)
L5_pooled = tf.layers.max_pooling1d(L5_act, pool_size = 2, strides = 2, name = 'max_pool_5')
L5_bn = tf.layers.batch_normalization(L5_pooled, scale = False, name = 'bn_5')
with tf.name_scope('Conv_6'):
L6 = tf.nn.conv1d(L5_bn, Filter6, stride = 1, padding = 'VALID') + b6
with tf.name_scope('leaky_relu_6'):
L6_act = tf.nn.leaky_relu(L6, alpha)
L6_pooled = tf.layers.max_pooling1d(L6_act, pool_size = 2, strides = 2, name = 'max_pool_6')
L6_bn = tf.layers.batch_normalization(L6_pooled, scale = False, name = 'bn_6')
reshaped_data = tf.reshape(L6_bn, shape = (self.batch_size, -1), name = 'reshape')
with tf.name_scope('full_connected_7'):
L7 = tf.matmul(reshaped_data, W7) + b7
with tf.name_scope('leaky_relu_7'):
L7_act = tf.nn.leaky_relu(L7, alpha)
L7_dropout = tf.nn.dropout(L7_act, keep_prob=keep_prob, name = 'dropout')
L7_bn = tf.layers.batch_normalization(L7_dropout, scale = True, name = 'bm_7')
with tf.name_scope('full_connected_8'):
L8 = tf.matmul(L7_bn, W8) + b8
logits = L8
params += [Filter1, Filter2, Filter3, Filter4, Filter5, Filter6]
params += [W7, W8]
return logits, params
def predict(self, logits):
x = tf.nn.sigmoid(logits)
return x
def loss(self, labels, logits, params, reg):
L2_loss_list = list(map(tf.nn.l2_loss, params))
L2_loss = tf.add_n(L2_loss_list)
loss = tf.losses.sigmoid_cross_entropy(labels, logits) + L2_loss * reg
tf.summary.scalar('loss', loss)
return loss
def variable_summaries(self, var):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
tf.summary.scalar('stddev',stddev)
tf.summary.scalar('max',tf.reduce_max(var))
tf.summary.scalar('min',tf.reduce_min(var))
tf.summary.histogram('histogram',var)
return
| true | true |
f7f4363e814bf5134b315da52f9bbcdd6299fc1e | 6,210 | py | Python | tensorflow/python/kernel_tests/accumulate_n_test.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/python/kernel_tests/accumulate_n_test.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/python/kernel_tests/accumulate_n_test.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for new version of accumulate_n op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.control_flow_ops import while_loop as while_loop_v1
from tensorflow.python.platform import googletest
class AccumulateNV2Test(test_util.TensorFlowTestCase):
"""Tests of the new, differentiable version of accumulate_n."""
@test_util.run_deprecated_v1
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
with self.session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllClose(x[0] * 5,
math_ops.accumulate_n([tf_x[0]] * 5).eval())
@test_util.run_deprecated_v1
def testInt(self):
np.random.seed(54321)
x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
tf_x = ops.convert_n_to_tensor(x)
with self.session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllEqual(x[0] * 6,
math_ops.accumulate_n([tf_x[0]] * 6).eval())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.session(use_gpu=True):
x0 = array_ops.placeholder(dtype=dtypes_lib.int32, shape=[None])
acc = math_ops.accumulate_n([x0, x0], shape=[None])
self.assertAllEqual([2, 4], acc.eval(feed_dict={x0: [1, 2]}))
@test_util.run_deprecated_v1
def testGrad(self):
np.random.seed(42)
for num_inputs in range(1, 10):
with self.cached_session(use_gpu=True) as sess:
input_vars = [
variables.Variable(10.0 * np.random.random())
for _ in range(0, num_inputs)
]
accum_n = math_ops.accumulate_n(input_vars)
self.evaluate(variables.global_variables_initializer())
accum_n_grad = gradients.gradients(accum_n, input_vars)
self.assertAllEqual(
np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
[g.eval() for g in accum_n_grad])
# The tests below used to be in a separate class under cwise_ops_test.py,
# which did not run in the default test target.
# Putting them here so that everything that exercises AccumulateNV2 is in
# one place and the default build runs all unit tests.
def testSimple(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
tf_val = math_ops.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
# Test that AccumulateNV2 rewrite correctly add edges necessary to propagate
# while loop execution frame to all nodes.
def testAccumulateInsideWhileLoop(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
def cond_fn(i, acc, tensors):
del acc, tensors # unused
return i < 1 # do just one iteration
def body_fn(i, acc, tensors):
return i + 1, acc + math_ops.accumulate_n(tensors), tensors
zeros = np.zeros((16, 16, 16, 16)).astype(np.float32)
_, tf_val, _ = while_loop_v1(cond_fn, body_fn, (0, zeros, random_tensors))
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
def testZeroArgs(self):
with self.cached_session():
with self.assertRaises(ValueError):
tf_val = math_ops.accumulate_n([])
self.evaluate(tf_val)
def testWrongShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(0.2)
b = variables.Variable(0.1)
math_ops.accumulate_n([a, b], shape=[2, 2]) # Should be shape=[]
def testIncompatibleShapes(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(np.array([0.1, 0.2]))
b = variables.Variable(np.array([[0.3], [0.4]]))
math_ops.accumulate_n([a, b])
def testWrongType(self):
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
b = variables.Variable(0.1, dtype=np.float32)
math_ops.accumulate_n([a, b], tensor_dtype=np.int32)
def testWrongTypeOneInput(self):
# Scenario that used to trigger a bug, even when testWrongType() worked
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
math_ops.accumulate_n([a], tensor_dtype=np.int32)
if __name__ == "__main__":
googletest.main()
| 38.571429 | 80 | 0.675201 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.control_flow_ops import while_loop as while_loop_v1
from tensorflow.python.platform import googletest
class AccumulateNV2Test(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
with self.session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllClose(x[0] * 5,
math_ops.accumulate_n([tf_x[0]] * 5).eval())
@test_util.run_deprecated_v1
def testInt(self):
np.random.seed(54321)
x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
tf_x = ops.convert_n_to_tensor(x)
with self.session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllEqual(x[0] * 6,
math_ops.accumulate_n([tf_x[0]] * 6).eval())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.session(use_gpu=True):
x0 = array_ops.placeholder(dtype=dtypes_lib.int32, shape=[None])
acc = math_ops.accumulate_n([x0, x0], shape=[None])
self.assertAllEqual([2, 4], acc.eval(feed_dict={x0: [1, 2]}))
@test_util.run_deprecated_v1
def testGrad(self):
np.random.seed(42)
for num_inputs in range(1, 10):
with self.cached_session(use_gpu=True) as sess:
input_vars = [
variables.Variable(10.0 * np.random.random())
for _ in range(0, num_inputs)
]
accum_n = math_ops.accumulate_n(input_vars)
self.evaluate(variables.global_variables_initializer())
accum_n_grad = gradients.gradients(accum_n, input_vars)
self.assertAllEqual(
np.repeat(1.0, num_inputs),
[g.eval() for g in accum_n_grad])
def testSimple(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
tf_val = math_ops.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
def testAccumulateInsideWhileLoop(self):
with self.cached_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
def cond_fn(i, acc, tensors):
del acc, tensors
return i < 1
def body_fn(i, acc, tensors):
return i + 1, acc + math_ops.accumulate_n(tensors), tensors
zeros = np.zeros((16, 16, 16, 16)).astype(np.float32)
_, tf_val, _ = while_loop_v1(cond_fn, body_fn, (0, zeros, random_tensors))
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, self.evaluate(tf_val))
def testZeroArgs(self):
with self.cached_session():
with self.assertRaises(ValueError):
tf_val = math_ops.accumulate_n([])
self.evaluate(tf_val)
def testWrongShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(0.2)
b = variables.Variable(0.1)
math_ops.accumulate_n([a, b], shape=[2, 2])
def testIncompatibleShapes(self):
with self.cached_session():
with self.assertRaises(ValueError):
a = variables.Variable(np.array([0.1, 0.2]))
b = variables.Variable(np.array([[0.3], [0.4]]))
math_ops.accumulate_n([a, b])
def testWrongType(self):
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
b = variables.Variable(0.1, dtype=np.float32)
math_ops.accumulate_n([a, b], tensor_dtype=np.int32)
def testWrongTypeOneInput(self):
with self.cached_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
math_ops.accumulate_n([a], tensor_dtype=np.int32)
if __name__ == "__main__":
googletest.main()
| true | true |
f7f4381783b3ce17d0526956a59b9ffbe4ffe14c | 437 | py | Python | tensorflowonspark/util.py | jimdowling/tfspark | 0efca1001ec21f01b7948ac1518936aeb130eb65 | [
"Apache-2.0"
] | null | null | null | tensorflowonspark/util.py | jimdowling/tfspark | 0efca1001ec21f01b7948ac1518936aeb130eb65 | [
"Apache-2.0"
] | null | null | null | tensorflowonspark/util.py | jimdowling/tfspark | 0efca1001ec21f01b7948ac1518936aeb130eb65 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import socket
def get_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
| 25.705882 | 56 | 0.778032 |
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import socket
def get_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
| true | true |
f7f4388df0a168d57409b15e2f24a950535c00bd | 395 | py | Python | backend/inventory/management/commands/initadmin.py | BENBRIKMouad/cid | 3f242553771c60a0fe6400e25f80d8bd86f5c6fb | [
"CC0-1.0"
] | 1 | 2021-08-29T12:09:30.000Z | 2021-08-29T12:09:30.000Z | backend/inventory/management/commands/initadmin.py | BENBRIKMouad/cid | 3f242553771c60a0fe6400e25f80d8bd86f5c6fb | [
"CC0-1.0"
] | null | null | null | backend/inventory/management/commands/initadmin.py | BENBRIKMouad/cid | 3f242553771c60a0fe6400e25f80d8bd86f5c6fb | [
"CC0-1.0"
] | null | null | null | from django.conf import settings
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
class Command(BaseCommand):
def handle(self, *args, **options):
if User.objects.count() == 0:
User.objects.create_superuser('admin', '', 'admin')
else:
print('Admin accounts can only be initialized if no Accounts exist') | 35.909091 | 80 | 0.688608 | from django.conf import settings
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
class Command(BaseCommand):
def handle(self, *args, **options):
if User.objects.count() == 0:
User.objects.create_superuser('admin', '', 'admin')
else:
print('Admin accounts can only be initialized if no Accounts exist') | true | true |
f7f438af7706484a61f56a8be5e44874fa559d8b | 628 | py | Python | colour_detect.py | riddhigupta1318/menu_driven | 1a3e4a8d3ff3dbcd9cffaa87ab9fbc66868d9eb6 | [
"Apache-2.0"
] | null | null | null | colour_detect.py | riddhigupta1318/menu_driven | 1a3e4a8d3ff3dbcd9cffaa87ab9fbc66868d9eb6 | [
"Apache-2.0"
] | null | null | null | colour_detect.py | riddhigupta1318/menu_driven | 1a3e4a8d3ff3dbcd9cffaa87ab9fbc66868d9eb6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import cv2
# capture camera start
cap=cv2.VideoCapture(0)
while cap.isOpened():
status,frame=cap.read()
#converting to HSV
hsvimg=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
# MASKING IMAGE for green color
imgmask=cv2.inRange(hsvimg,(40,50,50),(80,255,255))
# for blue color
# bitwise mask and original frame
redpart=cv2.bitwise_and(frame,frame,mask=imgmask)
# showing images
cv2.imshow('original',frame)
#cv2.imshow('mask',imgmask)
cv2.imshow('red',redpart)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cap.release()
| 21.655172 | 55 | 0.667197 |
import cv2
cap=cv2.VideoCapture(0)
while cap.isOpened():
status,frame=cap.read()
hsvimg=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
imgmask=cv2.inRange(hsvimg,(40,50,50),(80,255,255))
redpart=cv2.bitwise_and(frame,frame,mask=imgmask)
cv2.imshow('original',frame)
cv2.imshow('red',redpart)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cap.release()
| true | true |
f7f438ec0fd6787713af918cb466f5e1b5c8e3bf | 375 | py | Python | setup.py | Bidski/calc | 79cd4eecf4c7dac91d10d0b4d3797500d1230419 | [
"MIT"
] | 5 | 2018-05-28T18:55:35.000Z | 2021-03-16T01:38:21.000Z | setup.py | Bidski/calc | 79cd4eecf4c7dac91d10d0b4d3797500d1230419 | [
"MIT"
] | null | null | null | setup.py | Bidski/calc | 79cd4eecf4c7dac91d10d0b4d3797500d1230419 | [
"MIT"
] | 2 | 2019-09-25T02:16:46.000Z | 2021-02-17T06:21:12.000Z | from setuptools import setup
setup(
name = "CALC",
version = "0.1",
author = "Bryan Tripp",
author_email = "bptripp@uwaterloo.ca",
description = ("Convolutional architecture like cortex."),
license = "BSD",
keywords = "primate vision convolutional network",
url = "https://github.com/bptripp/calc",
packages=['calc', 'calc.examples'],
)
| 25 | 62 | 0.645333 | from setuptools import setup
setup(
name = "CALC",
version = "0.1",
author = "Bryan Tripp",
author_email = "bptripp@uwaterloo.ca",
description = ("Convolutional architecture like cortex."),
license = "BSD",
keywords = "primate vision convolutional network",
url = "https://github.com/bptripp/calc",
packages=['calc', 'calc.examples'],
)
| true | true |
f7f439235b35cf4ea29c0e1fe315e04017238520 | 3,127 | py | Python | coupons/admin.py | rsalmaso/django-coupons | 27e15403b6aa99997a9e5239949b4c462c0ed2c2 | [
"BSD-3-Clause"
] | 83 | 2015-03-09T12:42:38.000Z | 2022-03-20T10:48:24.000Z | coupons/admin.py | rsalmaso/django-coupons | 27e15403b6aa99997a9e5239949b4c462c0ed2c2 | [
"BSD-3-Clause"
] | 20 | 2015-03-23T06:06:48.000Z | 2020-09-28T13:03:20.000Z | coupons/admin.py | rsalmaso/django-coupons | 27e15403b6aa99997a9e5239949b4c462c0ed2c2 | [
"BSD-3-Clause"
] | 72 | 2015-03-23T10:13:08.000Z | 2022-02-08T06:34:46.000Z | from django.conf.urls import url
from django.contrib import admin
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import TemplateView
from .forms import CouponGenerationForm
from .models import Coupon, CouponUser, Campaign
class CouponUserInline(admin.TabularInline):
model = CouponUser
extra = 0
def get_max_num(self, request, obj=None, **kwargs):
if obj:
return obj.user_limit
return None # disable limit for new objects (e.g. admin add)
class CouponAdmin(admin.ModelAdmin):
list_display = [
'created_at', 'code', 'type', 'value', 'user_count', 'user_limit', 'is_redeemed', 'valid_until', 'campaign'
]
list_filter = ['type', 'campaign', 'created_at', 'valid_until']
raw_id_fields = ()
search_fields = ('code', 'value')
inlines = (CouponUserInline,)
exclude = ('users',)
def user_count(self, inst):
return inst.users.count()
def get_urls(self):
urls = super(CouponAdmin, self).get_urls()
my_urls = [
url(r'generate-coupons', self.admin_site.admin_view(GenerateCouponsAdminView.as_view()),
name='generate_coupons'),
]
return my_urls + urls
class GenerateCouponsAdminView(TemplateView):
template_name = 'admin/generate_coupons.html'
def get_context_data(self, **kwargs):
context = super(GenerateCouponsAdminView, self).get_context_data(**kwargs)
if self.request.method == 'POST':
form = CouponGenerationForm(self.request.POST)
if form.is_valid():
context['coupons'] = Coupon.objects.create_coupons(
form.cleaned_data['quantity'],
form.cleaned_data['type'],
form.cleaned_data['value'],
form.cleaned_data['valid_until'],
form.cleaned_data['prefix'],
form.cleaned_data['campaign'],
)
messages.success(self.request, _("Your coupons have been generated."))
else:
form = CouponGenerationForm()
context['form'] = form
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class CampaignAdmin(admin.ModelAdmin):
list_display = ['name', 'num_coupons', 'num_coupons_used', 'num_coupons_unused', 'num_coupons_expired']
def num_coupons(self, obj):
return obj.coupons.count()
num_coupons.short_description = _("coupons")
def num_coupons_used(self, obj):
return obj.coupons.used().count()
num_coupons_used.short_description = _("used")
def num_coupons_unused(self, obj):
return obj.coupons.used().count()
num_coupons_unused.short_description = _("unused")
def num_coupons_expired(self, obj):
return obj.coupons.expired().count()
num_coupons_expired.short_description = _("expired")
admin.site.register(Coupon, CouponAdmin)
admin.site.register(Campaign, CampaignAdmin)
| 33.623656 | 115 | 0.651103 | from django.conf.urls import url
from django.contrib import admin
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import TemplateView
from .forms import CouponGenerationForm
from .models import Coupon, CouponUser, Campaign
class CouponUserInline(admin.TabularInline):
model = CouponUser
extra = 0
def get_max_num(self, request, obj=None, **kwargs):
if obj:
return obj.user_limit
return None
class CouponAdmin(admin.ModelAdmin):
list_display = [
'created_at', 'code', 'type', 'value', 'user_count', 'user_limit', 'is_redeemed', 'valid_until', 'campaign'
]
list_filter = ['type', 'campaign', 'created_at', 'valid_until']
raw_id_fields = ()
search_fields = ('code', 'value')
inlines = (CouponUserInline,)
exclude = ('users',)
def user_count(self, inst):
return inst.users.count()
def get_urls(self):
urls = super(CouponAdmin, self).get_urls()
my_urls = [
url(r'generate-coupons', self.admin_site.admin_view(GenerateCouponsAdminView.as_view()),
name='generate_coupons'),
]
return my_urls + urls
class GenerateCouponsAdminView(TemplateView):
template_name = 'admin/generate_coupons.html'
def get_context_data(self, **kwargs):
context = super(GenerateCouponsAdminView, self).get_context_data(**kwargs)
if self.request.method == 'POST':
form = CouponGenerationForm(self.request.POST)
if form.is_valid():
context['coupons'] = Coupon.objects.create_coupons(
form.cleaned_data['quantity'],
form.cleaned_data['type'],
form.cleaned_data['value'],
form.cleaned_data['valid_until'],
form.cleaned_data['prefix'],
form.cleaned_data['campaign'],
)
messages.success(self.request, _("Your coupons have been generated."))
else:
form = CouponGenerationForm()
context['form'] = form
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class CampaignAdmin(admin.ModelAdmin):
list_display = ['name', 'num_coupons', 'num_coupons_used', 'num_coupons_unused', 'num_coupons_expired']
def num_coupons(self, obj):
return obj.coupons.count()
num_coupons.short_description = _("coupons")
def num_coupons_used(self, obj):
return obj.coupons.used().count()
num_coupons_used.short_description = _("used")
def num_coupons_unused(self, obj):
return obj.coupons.used().count()
num_coupons_unused.short_description = _("unused")
def num_coupons_expired(self, obj):
return obj.coupons.expired().count()
num_coupons_expired.short_description = _("expired")
admin.site.register(Coupon, CouponAdmin)
admin.site.register(Campaign, CampaignAdmin)
| true | true |
f7f4399010c2aabffddb9bf2c7a97efe2ba59bdb | 4,199 | py | Python | download_tn_contents_updated.py | kumarks1122/sushi-chef-khan-academy | 38a5b41902cda97b70614ecd7412b5ae53b0b026 | [
"MIT"
] | null | null | null | download_tn_contents_updated.py | kumarks1122/sushi-chef-khan-academy | 38a5b41902cda97b70614ecd7412b5ae53b0b026 | [
"MIT"
] | null | null | null | download_tn_contents_updated.py | kumarks1122/sushi-chef-khan-academy | 38a5b41902cda97b70614ecd7412b5ae53b0b026 | [
"MIT"
] | null | null | null | from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import tempfile
import pdb
import json
import csv
import os
import youtube_dl
import shutil
import requests
class DownloadTn():
"""docstring for DownloadTn
reading and downloading KA content data youtube
"""
def __init__(self):
self.contents = []
self.youtube = None
self.MP4 = "mp4"
self.videos_path = "./tn_videos"
self.thumbnails_path = "./tn_images"
self.uploaded_contents = []
self.content_ids = []
def process_content(self, video_data):
video_id = video_data["id"]
content = {}
content['title'] = video_data['snippet']['title']
content['description'] = video_data['snippet']['description']
content['youtube_id'] = video_id
content['published_at'] = video_data['snippet']['publishedAt']
download_settings = {}
destination_path = os.path.join(tempfile.gettempdir(), "{}.mp4".format(video_id))
download_settings["outtmpl"] = destination_path
download_settings["format"] = "[ext=mp4]"
thumbnail_url = video_data["snippet"]["thumbnails"]["high"]["url"]
with youtube_dl.YoutubeDL(download_settings) as ydl:
ydl.download(["https://www.youtube.com/watch?v={}".format(video_id)])
video_file_path = "{}/{}.{}".format(self.videos_path, video_id, self.MP4)
thumbnail_file_path = "{}/{}.jpg".format(self.thumbnails_path, video_id)
with open(destination_path, "rb") as dlf, open(video_file_path, 'wb') as destf:
shutil.copyfileobj(dlf, destf)
f = open(thumbnail_file_path,'wb')
f.write(requests.get(thumbnail_url).content)
f.close()
content["video_name"] = "{}.mp4".format(video_id)
drive_file = self.service.files().create(body={'name': content["video_name"], "parents": ["17VgIHddGW24Yd0hWIoALnDt0srOc1KZ9"]}, media_body=video_file_path).execute()
content['drive_file_path'] = "https://drive.google.com/open?id=" + drive_file['id']
# if generation[content["kind"]] < 31:
# pdb.set_trace()
content["thumbnail_file_name"] = "{}.jpg".format(video_id)
drive_file = self.service.files().create(body={'name': content["thumbnail_file_name"], "parents": ["1bh6IszPGB2_c-TEVhoOwdRfYpTVG4Hnh"]}, media_body=thumbnail_file_path).execute()
content['thumbnail_file_path'] = "https://drive.google.com/open?id=" + drive_file['id']
self.contents.append(content)
with open('TAMILNADU_contents.json', 'w') as outfile:
json.dump(self.contents, outfile)
def list_all_contents(self):
with open('TAMILNADU_contents.json', 'r') as outfile:
self.contents = json.load(outfile)
with open('tn_ids.json', 'r') as outfile:
self.content_ids = json.load(outfile)
for content in self.contents:
self.uploaded_contents.append(content['youtube_id'])
page_token = ""
content_id_list = [self.content_ids[i:i+40] for i in range(0, len(self.content_ids),40)]
for content_id_taken in content_id_list:
response = self.youtube.videos().list(
id=",".join(content_id_taken),
part='id,snippet',
maxResults=50,
pageToken=page_token
).execute()
for search_result in response.get('items', []):
print(search_result["id"])
if search_result["id"] not in self.uploaded_contents:
self.process_content(search_result)
# Process change
# page_token = response.get('nextPageToken', "")
# pdb.set_trace()
# if page_token is "":
# break
def main(self):
SCOPES = ["https://www.googleapis.com/auth/drive", "https://www.googleapis.com/auth/youtube"]
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
self.youtube = build('youtube', 'v3', http=creds.authorize(Http()))
self.service = build('drive', 'v3', http=creds.authorize(Http()))
self.list_all_contents()
if __name__ == '__main__':
chef = DownloadTn()
chef.main() | 37.828829 | 187 | 0.67135 | from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import tempfile
import pdb
import json
import csv
import os
import youtube_dl
import shutil
import requests
class DownloadTn():
def __init__(self):
self.contents = []
self.youtube = None
self.MP4 = "mp4"
self.videos_path = "./tn_videos"
self.thumbnails_path = "./tn_images"
self.uploaded_contents = []
self.content_ids = []
def process_content(self, video_data):
video_id = video_data["id"]
content = {}
content['title'] = video_data['snippet']['title']
content['description'] = video_data['snippet']['description']
content['youtube_id'] = video_id
content['published_at'] = video_data['snippet']['publishedAt']
download_settings = {}
destination_path = os.path.join(tempfile.gettempdir(), "{}.mp4".format(video_id))
download_settings["outtmpl"] = destination_path
download_settings["format"] = "[ext=mp4]"
thumbnail_url = video_data["snippet"]["thumbnails"]["high"]["url"]
with youtube_dl.YoutubeDL(download_settings) as ydl:
ydl.download(["https://www.youtube.com/watch?v={}".format(video_id)])
video_file_path = "{}/{}.{}".format(self.videos_path, video_id, self.MP4)
thumbnail_file_path = "{}/{}.jpg".format(self.thumbnails_path, video_id)
with open(destination_path, "rb") as dlf, open(video_file_path, 'wb') as destf:
shutil.copyfileobj(dlf, destf)
f = open(thumbnail_file_path,'wb')
f.write(requests.get(thumbnail_url).content)
f.close()
content["video_name"] = "{}.mp4".format(video_id)
drive_file = self.service.files().create(body={'name': content["video_name"], "parents": ["17VgIHddGW24Yd0hWIoALnDt0srOc1KZ9"]}, media_body=video_file_path).execute()
content['drive_file_path'] = "https://drive.google.com/open?id=" + drive_file['id']
content["thumbnail_file_name"] = "{}.jpg".format(video_id)
drive_file = self.service.files().create(body={'name': content["thumbnail_file_name"], "parents": ["1bh6IszPGB2_c-TEVhoOwdRfYpTVG4Hnh"]}, media_body=thumbnail_file_path).execute()
content['thumbnail_file_path'] = "https://drive.google.com/open?id=" + drive_file['id']
self.contents.append(content)
with open('TAMILNADU_contents.json', 'w') as outfile:
json.dump(self.contents, outfile)
def list_all_contents(self):
with open('TAMILNADU_contents.json', 'r') as outfile:
self.contents = json.load(outfile)
with open('tn_ids.json', 'r') as outfile:
self.content_ids = json.load(outfile)
for content in self.contents:
self.uploaded_contents.append(content['youtube_id'])
page_token = ""
content_id_list = [self.content_ids[i:i+40] for i in range(0, len(self.content_ids),40)]
for content_id_taken in content_id_list:
response = self.youtube.videos().list(
id=",".join(content_id_taken),
part='id,snippet',
maxResults=50,
pageToken=page_token
).execute()
for search_result in response.get('items', []):
print(search_result["id"])
if search_result["id"] not in self.uploaded_contents:
self.process_content(search_result)
def main(self):
SCOPES = ["https://www.googleapis.com/auth/drive", "https://www.googleapis.com/auth/youtube"]
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
self.youtube = build('youtube', 'v3', http=creds.authorize(Http()))
self.service = build('drive', 'v3', http=creds.authorize(Http()))
self.list_all_contents()
if __name__ == '__main__':
chef = DownloadTn()
chef.main() | true | true |
f7f439dc261267394a4fc43e8322ad4ba6ab1561 | 3,884 | py | Python | benchmark/startQiskit_Class2071.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_Class2071.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_Class2071.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=26
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=14
prog.x(input_qubit[3]) # number=15
prog.rx(1.8001325905069514,input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=16
prog.h(input_qubit[1]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.x(input_qubit[3]) # number=24
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.x(input_qubit[1]) # number=25
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.z(input_qubit[1]) # number=21
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.x(input_qubit[1]) # number=17
prog.cx(input_qubit[2],input_qubit[0]) # number=11
prog.y(input_qubit[0]) # number=12
prog.y(input_qubit[0]) # number=13
prog.cx(input_qubit[2],input_qubit[1]) # number=23
prog.x(input_qubit[0]) # number=19
prog.x(input_qubit[0]) # number=20
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2071.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 33.773913 | 140 | 0.645726 |
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.rx(1.8001325905069514,input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.x(input_qubit[3])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.x(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.z(input_qubit[1])
prog.h(input_qubit[0])
prog.cx(input_qubit[2],input_qubit[0])
prog.x(input_qubit[1])
prog.cx(input_qubit[2],input_qubit[0])
prog.y(input_qubit[0])
prog.y(input_qubit[0])
prog.cx(input_qubit[2],input_qubit[1])
prog.x(input_qubit[0])
prog.x(input_qubit[0])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2071.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f7f43a0f9454a0ca4303359fb33025fc7f168461 | 2,709 | py | Python | python/setup.py | VoVAllen/tf-dlpack | cac0e981888a7b1ec213ce28985e5cb1ada87f6b | [
"Apache-2.0"
] | 39 | 2019-11-08T18:41:24.000Z | 2022-03-27T18:12:50.000Z | python/setup.py | VoVAllen/tf-dlpack | cac0e981888a7b1ec213ce28985e5cb1ada87f6b | [
"Apache-2.0"
] | 12 | 2019-11-08T17:21:50.000Z | 2021-09-21T05:39:31.000Z | python/setup.py | VoVAllen/tf-dlpack | cac0e981888a7b1ec213ce28985e5cb1ada87f6b | [
"Apache-2.0"
] | 4 | 2020-01-19T20:22:51.000Z | 2020-05-20T18:21:40.000Z | import os
import re
import sys
import shutil
import platform
import subprocess
from setuptools import find_packages
from setuptools import setup, Extension
from setuptools.dist import Distribution
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
CURRENT_DIR = os.path.dirname(__file__)
def get_lib_path():
"""Get library path, name and version"""
# We can not import `libinfo.py` in setup.py directly since __init__.py
# Will be invoked which introduces dependences
libinfo_py = os.path.join(CURRENT_DIR, 'tfdlpack', 'libinfo.py')
libinfo = {'__file__': libinfo_py}
exec(compile(open(libinfo_py, "rb").read(), libinfo_py, 'exec'), libinfo, libinfo)
version = libinfo['__version__']
libs = []
optional = not ("bdist_wheel" in sys.argv or os.getenv('CONDA_BUILD'))
for libname in libinfo['LIBNAMES']:
lib_path = libinfo['find_lib_path'](libname, optional=optional)
if lib_path is not None:
libs.append(lib_path[0])
return libs, version
LIBS, VERSION = get_lib_path()
include_libs = False
wheel_include_libs = False
if "bdist_wheel" in sys.argv or os.getenv('CONDA_BUILD'):
wheel_include_libs = True
else:
include_libs = True
setup_kwargs = {}
# For bdist_wheel only
if wheel_include_libs:
with open("MANIFEST.in", "w") as fo:
for path in LIBS:
shutil.copy(path, os.path.join(CURRENT_DIR, 'tfdlpack'))
_, libname = os.path.split(path)
fo.write("include tfdlpack/%s\n" % libname)
setup_kwargs = {
"include_package_data": True
}
# For source tree setup
# Conda build also includes the binary library
if include_libs:
rpath = [os.path.relpath(path, CURRENT_DIR) for path in LIBS]
setup_kwargs = {
"include_package_data": True,
"data_files": [('tfdlpack', rpath)]
}
setup(
name='tfdlpack' + os.getenv('TFDLPACK_PACKAGE_SUFFIX', ''),
version=VERSION,
author='Jinjing Zhou',
author_email='allen.zhou@nyu.edu',
description='Tensorflow plugin for DLPack',
packages=find_packages(),
long_description="""
The package adds interoperability of DLPack to Tensorflow. It contains straightforward
and easy-to-use APIs to convert Tensorflow tensors from/to DLPack format.
""",
distclass=BinaryDistribution,
zip_safe=False,
license='APACHE',
**setup_kwargs
)
if wheel_include_libs:
# Wheel cleanup
os.remove("MANIFEST.in")
for path in LIBS:
_, libname = os.path.split(path)
os.remove(os.path.join(CURRENT_DIR, 'tfdlpack', libname))
| 29.769231 | 86 | 0.696198 | import os
import re
import sys
import shutil
import platform
import subprocess
from setuptools import find_packages
from setuptools import setup, Extension
from setuptools.dist import Distribution
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
CURRENT_DIR = os.path.dirname(__file__)
def get_lib_path():
libinfo_py = os.path.join(CURRENT_DIR, 'tfdlpack', 'libinfo.py')
libinfo = {'__file__': libinfo_py}
exec(compile(open(libinfo_py, "rb").read(), libinfo_py, 'exec'), libinfo, libinfo)
version = libinfo['__version__']
libs = []
optional = not ("bdist_wheel" in sys.argv or os.getenv('CONDA_BUILD'))
for libname in libinfo['LIBNAMES']:
lib_path = libinfo['find_lib_path'](libname, optional=optional)
if lib_path is not None:
libs.append(lib_path[0])
return libs, version
LIBS, VERSION = get_lib_path()
include_libs = False
wheel_include_libs = False
if "bdist_wheel" in sys.argv or os.getenv('CONDA_BUILD'):
wheel_include_libs = True
else:
include_libs = True
setup_kwargs = {}
if wheel_include_libs:
with open("MANIFEST.in", "w") as fo:
for path in LIBS:
shutil.copy(path, os.path.join(CURRENT_DIR, 'tfdlpack'))
_, libname = os.path.split(path)
fo.write("include tfdlpack/%s\n" % libname)
setup_kwargs = {
"include_package_data": True
}
if include_libs:
rpath = [os.path.relpath(path, CURRENT_DIR) for path in LIBS]
setup_kwargs = {
"include_package_data": True,
"data_files": [('tfdlpack', rpath)]
}
setup(
name='tfdlpack' + os.getenv('TFDLPACK_PACKAGE_SUFFIX', ''),
version=VERSION,
author='Jinjing Zhou',
author_email='allen.zhou@nyu.edu',
description='Tensorflow plugin for DLPack',
packages=find_packages(),
long_description="""
The package adds interoperability of DLPack to Tensorflow. It contains straightforward
and easy-to-use APIs to convert Tensorflow tensors from/to DLPack format.
""",
distclass=BinaryDistribution,
zip_safe=False,
license='APACHE',
**setup_kwargs
)
if wheel_include_libs:
os.remove("MANIFEST.in")
for path in LIBS:
_, libname = os.path.split(path)
os.remove(os.path.join(CURRENT_DIR, 'tfdlpack', libname))
| true | true |
f7f43a52d580a244a3c620710d75e82cd89b1f5c | 1,730 | py | Python | app/routers/forex_router.py | chanmoto/eurusd_pix2pix | 956d5d2025aee265f38e0b0ccfe42dcdb9165ca3 | [
"MIT"
] | null | null | null | app/routers/forex_router.py | chanmoto/eurusd_pix2pix | 956d5d2025aee265f38e0b0ccfe42dcdb9165ca3 | [
"MIT"
] | null | null | null | app/routers/forex_router.py | chanmoto/eurusd_pix2pix | 956d5d2025aee265f38e0b0ccfe42dcdb9165ca3 | [
"MIT"
] | null | null | null | from fastapi import Depends, Body, APIRouter
from sqlalchemy.orm import Session
from database import get_db
from models.forex_model import Forex2_m5,Forex2_m30,Forex2_m240
from datetime import datetime as dt
from crud.forex_crud import get_last_time, add_forex
from schemas import forex_schema as schema
import pdb
# モジュール化する場合は、APIRouterのインスタンスを作る → 命名はrouter
router = APIRouter(
responses={404: {"forex": "Not found"}},
)
def session_clear(exception):
if exception and Session.is_active:
Session.rollback()
else:
pass
Session.close()
@router.get("/getlasttime/")
async def gettime(db: Session = Depends(get_db),):
return {
"m5": get_last_time(db=db,model=Forex2_m5),
"m30": get_last_time(db=db,model=Forex2_m30),
"m240": get_last_time(db=db,model=Forex2_m240)
}
@router.post("/gettick/")
async def gettick(
db: Session = Depends(get_db),
body=Body(...)):
time, peristr, open, high, low, close, volume = body["content"].split(",")
obj =schema.Forex(
id = dt.strptime(time, "%Y.%m.%d %H:%M"),
open = open,
high= high,
low= low,
close= close,
volume= volume
)
if peristr == "forex_f1":
repo = Forex2_m5
elif peristr == "forex_f2":
repo = Forex2_m30
elif peristr == "forex_f3":
repo = Forex2_m240
else:
return {"error": "invalid peristr"}
try:
r = add_forex(
db=db,
schema = obj,
model = repo,
commit=True,
)
except Exception as e:
session_clear(e)
return {"error": "invalid data"}
return {"msg": "data posting comleted" }
| 25.820896 | 78 | 0.606936 | from fastapi import Depends, Body, APIRouter
from sqlalchemy.orm import Session
from database import get_db
from models.forex_model import Forex2_m5,Forex2_m30,Forex2_m240
from datetime import datetime as dt
from crud.forex_crud import get_last_time, add_forex
from schemas import forex_schema as schema
import pdb
router = APIRouter(
responses={404: {"forex": "Not found"}},
)
def session_clear(exception):
if exception and Session.is_active:
Session.rollback()
else:
pass
Session.close()
@router.get("/getlasttime/")
async def gettime(db: Session = Depends(get_db),):
return {
"m5": get_last_time(db=db,model=Forex2_m5),
"m30": get_last_time(db=db,model=Forex2_m30),
"m240": get_last_time(db=db,model=Forex2_m240)
}
@router.post("/gettick/")
async def gettick(
db: Session = Depends(get_db),
body=Body(...)):
time, peristr, open, high, low, close, volume = body["content"].split(",")
obj =schema.Forex(
id = dt.strptime(time, "%Y.%m.%d %H:%M"),
open = open,
high= high,
low= low,
close= close,
volume= volume
)
if peristr == "forex_f1":
repo = Forex2_m5
elif peristr == "forex_f2":
repo = Forex2_m30
elif peristr == "forex_f3":
repo = Forex2_m240
else:
return {"error": "invalid peristr"}
try:
r = add_forex(
db=db,
schema = obj,
model = repo,
commit=True,
)
except Exception as e:
session_clear(e)
return {"error": "invalid data"}
return {"msg": "data posting comleted" }
| true | true |
f7f43a97a86d016a0c710f9c25c5020a733ecd59 | 44,397 | py | Python | opentok/opentok.py | Nyceane/fitstream-jetson-nano | cd4fe9210670669dd683912f1f7ac1539037726d | [
"Apache-2.0"
] | null | null | null | opentok/opentok.py | Nyceane/fitstream-jetson-nano | cd4fe9210670669dd683912f1f7ac1539037726d | [
"Apache-2.0"
] | null | null | null | opentok/opentok.py | Nyceane/fitstream-jetson-nano | cd4fe9210670669dd683912f1f7ac1539037726d | [
"Apache-2.0"
] | null | null | null | from datetime import datetime # generate_token
import calendar # generate_token
import base64 # generate_token
import random # generate_token
import time # generate_token
import hmac # _sign_string
import hashlib # _sign_string
import requests # create_session, archiving
import json # archiving
import platform # user-agent
from socket import inet_aton # create_session
import xml.dom.minidom as xmldom # create_session
from jose import jwt # _create_jwt_auth_header
import random # _create_jwt_auth_header
# compat
from six.moves.urllib.parse import urlencode
from six import text_type, u, b, PY3
from enum import Enum
from .version import __version__
from .endpoints import Endpoints
from .session import Session
from .archives import Archive, ArchiveList, OutputModes
from .stream import Stream
from .streamlist import StreamList
from .sip_call import SipCall
from .broadcast import Broadcast
from .exceptions import (
OpenTokException,
RequestError,
AuthError,
NotFoundError,
ArchiveError,
SignalingError,
GetStreamError,
ForceDisconnectError,
SipDialError,
SetStreamClassError,
BroadcastError
)
class Roles(Enum):
"""List of valid roles for a token."""
subscriber = u('subscriber')
"""A subscriber can only subscribe to streams."""
publisher = u('publisher')
"""A publisher can publish streams, subscribe to streams, and signal"""
moderator = u('moderator')
"""In addition to the privileges granted to a publisher, in clients using the OpenTok.js 2.2
library, a moderator can call the `forceUnpublish()` and `forceDisconnect()` method of the
Session object.
"""
class MediaModes(Enum):
"""List of valid settings for the mediaMode parameter of the OpenTok.create_session() method."""
routed = u('disabled')
"""The session will transmit streams using the OpenTok Media Server."""
relayed = u('enabled')
"""The session will attempt to transmit streams directly between clients. If two clients
cannot send and receive each others' streams, due to firewalls on the clients' networks,
their streams will be relayed using the OpenTok TURN Server."""
class ArchiveModes(Enum):
"""List of valid settings for the archive_mode parameter of the OpenTok.create_Session()
method."""
manual = u('manual')
"""The session will be manually archived."""
always = u('always')
"""The session will be automatically archived."""
class OpenTok(object):
"""Use this SDK to create tokens and interface with the server-side portion
of the Opentok API.
"""
TOKEN_SENTINEL = 'T1=='
"""For internal use."""
def __init__(self, api_key, api_secret, api_url='https://api.opentok.com', timeout=None):
self.api_key = str(api_key)
self.api_secret = api_secret
self.timeout = timeout
self._proxies = None
self.endpoints = Endpoints(api_url, self.api_key)
@property
def proxies(self):
return self._proxies
@proxies.setter
def proxies(self, proxies):
self._proxies = proxies
def generate_token(self, session_id, role=Roles.publisher, expire_time=None, data=None,
initial_layout_class_list=[]):
"""
Generates a token for a given session.
:param String session_id: The session ID of the session to be accessed by the client using
the token.
:param String role: The role for the token. Valid values are defined in the Role
class:
* `Roles.subscriber` -- A subscriber can only subscribe to streams.
* `Roles.publisher` -- A publisher can publish streams, subscribe to
streams, and signal. (This is the default value if you do not specify a role.)
* `Roles.moderator` -- In addition to the privileges granted to a
publisher, in clients using the OpenTok.js 2.2 library, a moderator can call the
`forceUnpublish()` and `forceDisconnect()` method of the
Session object.
:param int expire_time: The expiration time of the token, in seconds since the UNIX epoch.
The maximum expiration time is 30 days after the creation time. The default expiration
time is 24 hours after the token creation time.
:param String data: A string containing connection metadata describing the
end-user. For example, you can pass the user ID, name, or other data describing the
end-user. The length of the string is limited to 1000 characters. This data cannot be
updated once it is set.
:param list initial_layout_class_list: An array of class names (strings)
to be used as the initial layout classes for streams published by the client. Layout
classes are used in customizing the layout of videos in
`live streaming broadcasts <https://tokbox.com/developer/guides/broadcast/#live-streaming>`_ and
`composed archives <https://tokbox.com/developer/guides/archiving/layout-control.html>`_
:rtype:
The token string.
"""
# normalize
# expire_time can be an integer, a datetime object, or anything else that can be coerced into an integer
# after this block it will only be an integer
if expire_time is not None:
if isinstance(expire_time, datetime):
expire_time = calendar.timegm(expire_time.utctimetuple())
else:
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise OpenTokException(u('Cannot generate token, invalid expire time {0}').format(expire_time))
else:
expire_time = int(time.time()) + (60*60*24) # 1 day
# validations
if not text_type(session_id):
raise OpenTokException(u('Cannot generate token, session_id was not valid {0}').format(session_id))
if not isinstance(role, Roles):
raise OpenTokException(u('Cannot generate token, {0} is not a valid role').format(role))
now = int(time.time())
if expire_time < now:
raise OpenTokException(u('Cannot generate token, expire_time is not in the future {0}').format(expire_time))
if expire_time > now + (60*60*24*30): # 30 days
raise OpenTokException(u('Cannot generate token, expire_time is not in the next 30 days {0}').format(expire_time))
if data and len(data) > 1000:
raise OpenTokException(u('Cannot generate token, data must be less than 1000 characters'))
if initial_layout_class_list and not all(text_type(c) for c in initial_layout_class_list):
raise OpenTokException(u('Cannot generate token, all items in initial_layout_class_list must be strings'))
initial_layout_class_list_serialized = u(' ').join(initial_layout_class_list)
if len(initial_layout_class_list_serialized) > 1000:
raise OpenTokException(u('Cannot generate token, initial_layout_class_list must be less than 1000 characters'))
# decode session id to verify api_key
sub_session_id = session_id[2:]
sub_session_id_bytes = sub_session_id.encode('utf-8')
sub_session_id_bytes_padded = sub_session_id_bytes + (b('=') * (-len(sub_session_id_bytes) % 4))
try:
decoded_session_id = base64.b64decode(sub_session_id_bytes_padded, b('-_'))
parts = decoded_session_id.decode('utf-8').split(u('~'))
except Exception as e:
raise OpenTokException(u('Cannot generate token, the session_id {0} was not valid').format(session_id))
if self.api_key not in parts:
raise OpenTokException(u('Cannot generate token, the session_id {0} does not belong to the api_key {1}').format(session_id, self.api_key))
data_params = dict(
session_id = session_id,
create_time = now,
expire_time = expire_time,
role = role.value,
nonce = random.randint(0,999999),
initial_layout_class_list = initial_layout_class_list_serialized
)
if data:
data_params['connection_data'] = data
data_string = urlencode(data_params, True)
sig = self._sign_string(data_string, self.api_secret)
decoded_base64_bytes = u('partner_id={api_key}&sig={sig}:{payload}').format(
api_key = self.api_key,
sig = sig,
payload = data_string
)
if PY3:
decoded_base64_bytes = decoded_base64_bytes.encode('utf-8')
token = u('{sentinal}{base64_data}').format(
sentinal = self.TOKEN_SENTINEL,
base64_data = base64.b64encode(decoded_base64_bytes).decode()
)
return token
def create_session(self, location=None, media_mode=MediaModes.relayed, archive_mode=ArchiveModes.manual):
"""
Creates a new OpenTok session and returns the session ID, which uniquely identifies
the session.
For example, when using the OpenTok JavaScript library, use the session ID when calling the
OT.initSession() method (to initialize an OpenTok session).
OpenTok sessions do not expire. However, authentication tokens do expire (see the
generateToken() method). Also note that sessions cannot explicitly be destroyed.
A session ID string can be up to 255 characters long.
Calling this method results in an OpenTokException in the event of an error.
Check the error message for details.
You can also create a session using the OpenTok
`REST API <https://tokbox.com/opentok/api/#session_id_production>`_ or
`the OpenTok dashboard <https://dashboard.tokbox.com/projects>`_.
:param String media_mode: Determines whether the session will transmit streams using the
OpenTok Media Router (MediaMode.routed) or not (MediaMode.relayed). By default,
the setting is MediaMode.relayed.
With the media_mode property set to MediaMode.relayed, the session
will attempt to transmit streams directly between clients. If clients cannot connect
due to firewall restrictions, the session uses the OpenTok TURN server to relay
audio-video streams.
The `OpenTok Media
Router <https://tokbox.com/opentok/tutorials/create-session/#media-mode>`_
provides the following benefits:
* The OpenTok Media Router can decrease bandwidth usage in multiparty sessions.
(When the mediaMode property is set to MediaMode.relayed, each client must send
a separate audio-video stream to each client subscribing to it.)
* The OpenTok Media Router can improve the quality of the user experience through
audio fallback and video recovery (see https://tokbox.com/platform/fallback). With
these features, if a client's connectivity degrades to a degree that
it does not support video for a stream it's subscribing to, the video is dropped on
that client (without affecting other clients), and the client receives audio only.
If the client's connectivity improves, the video returns.
* The OpenTok Media Router supports the archiving feature, which lets
you record, save, and retrieve OpenTok sessions (see http://tokbox.com/platform/archiving).
:param String archive_mode: Whether the session is automatically archived
(ArchiveModes.always) or not (ArchiveModes.manual). By default,
the setting is ArchiveModes.manual, and you must call the
start_archive() method of the OpenTok object to start archiving. To archive the session
(either automatically or not), you must set the media_mode parameter to
MediaModes.routed.
:param String location: An IP address that the OpenTok servers will use to
situate the session in its global network. If you do not set a location hint,
the OpenTok servers will be based on the first client connecting to the session.
:rtype: The Session object. The session_id property of the object is the session ID.
"""
# build options
options = {}
if not isinstance(media_mode, MediaModes):
raise OpenTokException(u('Cannot create session, {0} is not a valid media mode').format(media_mode))
if not isinstance(archive_mode, ArchiveModes):
raise OpenTokException(u('Cannot create session, {0} is not a valid archive mode').format(archive_mode))
if archive_mode == ArchiveModes.always and media_mode != MediaModes.routed:
raise OpenTokException(u('A session with always archive mode must also have the routed media mode.'))
options[u('p2p.preference')] = media_mode.value
options[u('archiveMode')] = archive_mode.value
if location:
# validate IP address
try:
inet_aton(location)
except:
raise OpenTokException(u('Cannot create session. Location must be either None or a valid IPv4 address {0}').format(location))
options[u('location')] = location
try:
response = requests.post(self.endpoints.session_url(), data=options, headers=self.headers(), proxies=self.proxies, timeout=self.timeout)
response.encoding = 'utf-8'
if response.status_code == 403:
raise AuthError('Failed to create session, invalid credentials')
if not response.content:
raise RequestError()
dom = xmldom.parseString(response.content)
except Exception as e:
raise RequestError('Failed to create session: %s' % str(e))
try:
error = dom.getElementsByTagName('error')
if error:
error = error[0]
raise AuthError('Failed to create session (code=%s): %s' % (error.attributes['code'].value, error.firstChild.attributes['message'].value))
session_id = dom.getElementsByTagName('session_id')[0].childNodes[0].nodeValue
return Session(self, session_id, location=location, media_mode=media_mode, archive_mode=archive_mode)
except Exception as e:
raise OpenTokException('Failed to generate session: %s' % str(e))
def headers(self):
"""For internal use."""
return {
'User-Agent': 'OpenTok-Python-SDK/' + __version__ + ' ' + platform.python_version(),
'X-OPENTOK-AUTH': self._create_jwt_auth_header()
}
def json_headers(self):
"""For internal use."""
result = self.headers()
result['Content-Type'] = 'application/json'
return result
def start_archive(self, session_id, has_audio=True, has_video=True, name=None, output_mode=OutputModes.composed, resolution=None):
"""
Starts archiving an OpenTok session.
Clients must be actively connected to the OpenTok session for you to successfully start
recording an archive.
You can only record one archive at a time for a given session. You can only record archives
of sessions that use the OpenTok Media Router (sessions with the media mode set to routed);
you cannot archive sessions with the media mode set to relayed.
For more information on archiving, see the
`OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide.
:param String session_id: The session ID of the OpenTok session to archive.
:param String name: This is the name of the archive. You can use this name
to identify the archive. It is a property of the Archive object, and it is a property
of archive-related events in the OpenTok.js library.
:param Boolean has_audio: if set to True, an audio track will be inserted to the archive.
has_audio is an optional parameter that is set to True by default. If you set both
has_audio and has_video to False, the call to the start_archive() method results in
an error.
:param Boolean has_video: if set to True, a video track will be inserted to the archive.
has_video is an optional parameter that is set to True by default.
:param OutputModes output_mode: Whether all streams in the archive are recorded
to a single file (OutputModes.composed, the default) or to individual files
(OutputModes.individual).
:param String resolution (Optional): The resolution of the archive, either "640x480" (the default)
or "1280x720". This parameter only applies to composed archives. If you set this
parameter and set the output_mode parameter to OutputModes.individual, the call to the
start_archive() method results in an error.
:rtype: The Archive object, which includes properties defining the archive,
including the archive ID.
"""
if not isinstance(output_mode, OutputModes):
raise OpenTokException(u('Cannot start archive, {0} is not a valid output mode').format(output_mode))
if resolution and output_mode == OutputModes.individual:
raise OpenTokException(u('Invalid parameters: Resolution cannot be supplied for individual output mode.'))
payload = {'name': name,
'sessionId': session_id,
'hasAudio': has_audio,
'hasVideo': has_video,
'outputMode': output_mode.value,
'resolution': resolution,
}
response = requests.post(self.endpoints.archive_url(), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 400:
"""
The HTTP response has a 400 status code in the following cases:
You do not pass in a session ID or you pass in an invalid session ID.
No clients are actively connected to the OpenTok session.
You specify an invalid resolution value.
The outputMode property is set to "individual" and you set the resolution property and (which is not supported in individual stream archives).
"""
raise RequestError(response.json().get("message"))
elif response.status_code == 404:
raise NotFoundError("Session not found")
elif response.status_code == 409:
raise ArchiveError(response.json().get("message"))
else:
raise RequestError("An unexpected error occurred", response.status_code)
def stop_archive(self, archive_id):
"""
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 90 minutes or when all clients have disconnected
from the session being archived.
@param [String] archive_id The archive ID of the archive you want to stop recording.
:rtype: The Archive object corresponding to the archive being stopped.
"""
response = requests.post(self.endpoints.archive_url(archive_id) + '/stop', headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
elif response.status_code == 409:
raise ArchiveError("Archive is not in started state")
else:
raise RequestError("An unexpected error occurred", response.status_code)
def delete_archive(self, archive_id):
"""
Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
:param String archive_id: The archive ID of the archive to be deleted.
"""
response = requests.delete(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
pass
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code)
def get_archive(self, archive_id):
"""Gets an Archive object for the given archive ID.
:param String archive_id: The archive ID.
:rtype: The Archive object.
"""
response = requests.get(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code)
def get_archives(self, offset=None, count=None, session_id=None):
"""Returns an ArchiveList, which is an array of archives that are completed and in-progress,
for your API key.
:param int: offset Optional. The index offset of the first archive. 0 is offset
of the most recently started archive. 1 is the offset of the archive that started prior to
the most recent archive. If you do not specify an offset, 0 is used.
:param int: count Optional. The number of archives to be returned. The maximum
number of archives returned is 1000.
:param string: session_id Optional. Used to list archives for a specific session ID.
:rtype: An ArchiveList object, which is an array of Archive objects.
"""
params = {}
if offset is not None:
params['offset'] = offset
if count is not None:
params['count'] = count
if session_id is not None:
params['sessionId'] = session_id
endpoint = self.endpoints.archive_url() + "?" + urlencode(params)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code < 300:
return ArchiveList(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code)
def list_archives(self, offset=None, count=None, session_id=None):
"""
New method to get archive list, it's alternative to 'get_archives()',
both methods exist to have backwards compatible
"""
return self.get_archives(offset, count, session_id)
def signal(self, session_id, payload, connection_id=None):
"""
Send signals to all participants in an active OpenTok session or to a specific client
connected to that session.
:param String session_id: The session ID of the OpenTok session that receives the signal
:param Dictionary payload: Structure that contains both the type and data fields. These
correspond to the type and data parameters passed in the client signal received handlers
:param String connection_id: The connection_id parameter is an optional string used to
specify the connection ID of a client connected to the session. If you specify this value,
the signal is sent to the specified client. Otherwise, the signal is sent to all clients
connected to the session
"""
response = requests.post(
self.endpoints.signaling_url(session_id, connection_id),
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 204:
pass
elif response.status_code == 400:
raise SignalingError('One of the signal properties - data, type, sessionId or connectionId - is invalid.')
elif response.status_code == 403:
raise AuthError('You are not authorized to send the signal. Check your authentication credentials.')
elif response.status_code == 404:
raise SignalingError('The client specified by the connectionId property is not connected to the session.')
elif response.status_code == 413:
raise SignalingError('The type string exceeds the maximum length (128 bytes), or the data string exceeds the maximum size (8 kB).')
else:
raise RequestError('An unexpected error occurred', response.status_code)
def get_stream(self, session_id, stream_id):
"""
Returns an Stream object that contains information of an OpenTok stream:
-id: The stream ID
-videoType: "camera" or "screen"
-name: The stream name (if one was set when the client published the stream)
-layoutClassList: It's an array of the layout classes for the stream
"""
endpoint = self.endpoints.get_stream_url(session_id, stream_id)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 200:
return Stream(response.json())
elif response.status_code == 400:
raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.')
elif response.status_code == 403:
raise AuthError('You passed in an invalid OpenTok API key or JWT token.')
elif response.status_code == 408:
raise GetStreamError('You passed in an invalid stream ID.')
else:
raise RequestError('An unexpected error occurred', response.status_code)
def list_streams(self, session_id):
"""
Returns a list of Stream objects that contains information of all
the streams in a OpenTok session, with the following attributes:
-count: An integer that indicates the number of streams in the session
-items: List of the Stream objects
"""
endpoint = self.endpoints.get_stream_url(session_id)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 200:
return StreamList(response.json())
elif response.status_code == 400:
raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.')
elif response.status_code == 403:
raise AuthError('You passed in an invalid OpenTok API key or JWT token.')
else:
raise RequestError('An unexpected error occurred', response.status_code)
def force_disconnect(self, session_id, connection_id):
"""
Sends a request to disconnect a client from an OpenTok session
:param String session_id: The session ID of the OpenTok session from which the
client will be disconnected
:param String connection_id: The connection ID of the client that will be disconnected
"""
endpoint = self.endpoints.force_disconnect_url(session_id, connection_id)
response = requests.delete(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 204:
pass
elif response.status_code == 400:
raise ForceDisconnectError('One of the arguments - sessionId or connectionId - is invalid.')
elif response.status_code == 403:
raise AuthError('You are not authorized to forceDisconnect, check your authentication credentials.')
elif response.status_code == 404:
raise ForceDisconnectError('The client specified by the connectionId property is not connected to the session.')
else:
raise RequestError('An unexpected error occurred', response.status_code)
def set_archive_layout(self, archive_id, layout_type, stylesheet=None):
"""
Use this method to change the layout of videos in an OpenTok archive
:param String archive_id: The ID of the archive that will be updated
:param String layout_type: The layout type for the archive. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom'
"""
payload = {
'type': layout_type,
}
if layout_type == 'custom':
if stylesheet is not None:
payload['stylesheet'] = stylesheet
endpoint = self.endpoints.set_archive_layout_url(archive_id)
response = requests.put(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise ArchiveError('Invalid request. This response may indicate that data in your request data is invalid JSON. It may also indicate that you passed in invalid layout options.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def dial(self, session_id, token, sip_uri, options=[]):
"""
Use this method to connect a SIP platform to an OpenTok session. The audio from the end
of the SIP call is added to the OpenTok session as an audio-only stream. The OpenTok Media
Router mixes audio from other streams in the session and sends the mixed audio to the SIP
endpoint
:param String session_id: The OpenTok session ID for the SIP call to join
:param String token: The OpenTok token to be used for the participant being called
:param String sip_uri: The SIP URI to be used as destination of the SIP call initiated from
OpenTok to the SIP platform
:param Dictionary options optional: Aditional options with the following properties:
String 'from': The number or string that will be sent to the final SIP number
as the caller
Dictionary 'headers': Defines custom headers to be added to the SIP INVITE request
initiated from OpenTok to the SIP platform. Each of the custom headers must
start with the "X-" prefix, or the call will result in a Bad Request (400) response
Dictionary 'auth': Contains the username and password to be used in the the SIP
INVITE request for HTTP digest authentication, if it is required by the SIP platform
For example:
'auth': {
'username': 'username',
'password': 'password'
}
Boolean 'secure': A Boolean flag that indicates whether the media must be transmitted
encrypted (true) or not (false, the default)
:rtype: A SipCall object, which contains data of the SIP call: id, connectionId and streamId
"""
payload = {
'sessionId': session_id,
'token': token,
'sip': {
'uri': sip_uri
}
}
if 'from' in options:
payload['sip']['from'] = options['from']
if 'headers' in options:
payload['sip']['headers'] = options['headers']
if 'auth' in options:
payload['sip']['auth'] = options['auth']
if 'secure' in options:
payload['sip']['secure'] = options['secure']
endpoint = self.endpoints.dial_url()
response = requests.post(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return SipCall(response.json())
elif response.status_code == 400:
raise SipDialError('Invalid request. Invalid session ID.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 404:
raise SipDialError('The session does not exist.')
elif response.status_code == 409:
raise SipDialError(
'You attempted to start a SIP call for a session that '
'does not use the OpenTok Media Router.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def set_stream_class_lists(self, session_id, payload):
"""
Use this method to change layout classes for OpenTok streams. The layout classes
define how the streams are displayed in the layout of a composed OpenTok archive
:param String session_id: The ID of the session of the streams that will be updated
:param List payload: A list defining the class lists to apply to the streams.
Each element in the list is a dictionary with two properties: 'id' and 'layoutClassList'.
The 'id' property is the stream ID (a String), and the 'layoutClassList' is an array of
class names (Strings) to apply to the stream. For example:
payload = [
{'id': '7b09ec3c-26f9-43d7-8197-f608f13d4fb6', 'layoutClassList': ['focus']},
{'id': '567bc941-6ea0-4c69-97fc-70a740b68976', 'layoutClassList': ['top']},
{'id': '307dc941-0450-4c09-975c-705740d08970', 'layoutClassList': ['bottom']}
]
"""
items_payload = {'items': payload}
endpoint = self.endpoints.set_stream_class_lists_url(session_id)
response = requests.put(
endpoint,
data=json.dumps(items_payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise SetStreamClassError(
'Invalid request. This response may indicate that data in your request data '
'is invalid JSON. It may also indicate that you passed in invalid layout options.'
)
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def start_broadcast(self, session_id, options):
"""
Use this method to start a live streaming for an OpenTok session. This broadcasts the
session to an HLS (HTTP live streaming) or to RTMP streams. To successfully start
broadcasting a session, at least one client must be connected to the session. You can only
start live streaming for sessions that use the OpenTok Media Router (with the media mode set
to routed); you cannot use live streaming with sessions that have the media mode set to
relayed
:param String session_id: The session ID of the OpenTok session you want to broadcast
:param Dictionary options, with the following properties:
Dictionary 'layout' optional: Specify this to assign the initial layout type for the
broadcast. Valid values for the layout property are "bestFit", "custom",
"horizontalPresentation", "pip" and "verticalPresentation". If you specify a "custom"
layout type, set the stylesheet property of the layout object to the stylesheet.
If you do not specify an initial layout type, the broadcast stream uses the Best Fit
layout type
Integer 'maxDuration' optional: The maximum duration for the broadcast, in seconds.
The broadcast will automatically stop when the maximum duration is reached. You can
set the maximum duration to a value from 60 (60 seconds) to 36000 (10 hours). The
default maximum duration is 2 hours (7200 seconds)
Dictionary 'outputs': This object defines the types of broadcast streams you want to
start (both HLS and RTMP). You can include HLS, RTMP, or both as broadcast streams.
If you include RTMP streaming, you can specify up to five target RTMP streams. For
each RTMP stream, specify 'serverUrl' (the RTMP server URL), 'streamName' (the stream
name, such as the YouTube Live stream name or the Facebook stream key), and
(optionally) 'id' (a unique ID for the stream)
String 'resolution' optional: The resolution of the broadcast, either "640x480"
(SD, the default) or "1280x720" (HD)
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt, resolution, status and broadcastUrls
"""
payload = {
'sessionId': session_id
}
payload.update(options)
endpoint = self.endpoints.broadcast_url()
response = requests.post(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request data is '
'invalid JSON. It may also indicate that you passed in invalid layout options. '
'Or you have exceeded the limit of five simultaneous RTMP streams for an OpenTok '
'session. Or you specified and invalid resolution.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError('The broadcast has already started for the session.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def stop_broadcast(self, broadcast_id):
"""
Use this method to stop a live broadcast of an OpenTok session
:param String broadcast_id: The ID of the broadcast you want to stop
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt and resolution
"""
endpoint = self.endpoints.broadcast_url(broadcast_id, stop=True)
response = requests.post(
endpoint,
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request '
'data is invalid JSON.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError(
'The broadcast (with the specified ID) was not found or it has already '
'stopped.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def get_broadcast(self, broadcast_id):
"""
Use this method to get details on a broadcast that is in-progress.
:param String broadcast_id: The ID of the broadcast you want to stop
:rtype A Broadcast object, which contains information of the broadcast: id, sessionId
projectId, createdAt, updatedAt, resolution, broadcastUrls and status
"""
endpoint = self.endpoints.broadcast_url(broadcast_id)
response = requests.get(
endpoint,
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request '
'data is invalid JSON.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError('No matching broadcast found (with the specified ID).')
else:
raise RequestError('OpenTok server error.', response.status_code)
def set_broadcast_layout(self, broadcast_id, layout_type, stylesheet=None):
"""
Use this method to change the layout type of a live streaming broadcast
:param String broadcast_id: The ID of the broadcast that will be updated
:param String layout_type: The layout type for the broadcast. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom'
"""
payload = {
'type': layout_type,
}
if layout_type == 'custom':
if stylesheet is not None:
payload['stylesheet'] = stylesheet
endpoint = self.endpoints.broadcast_url(broadcast_id, layout=True)
response = requests.put(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request data is '
'invalid JSON. It may also indicate that you passed in invalid layout options.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def _sign_string(self, string, secret):
return hmac.new(secret.encode('utf-8'), string.encode('utf-8'), hashlib.sha1).hexdigest()
def _create_jwt_auth_header(self):
payload = {
'ist': 'project',
'iss': self.api_key,
'iat': int(time.time()), # current time in unix time (seconds)
'exp': int(time.time()) + (60*3), # 3 minutes in the future (seconds)
'jti': '{0}'.format(0, random.random())
}
return jwt.encode(payload, self.api_secret, algorithm='HS256')
| 46.782929 | 222 | 0.646282 | from datetime import datetime
import calendar
import base64
import random
import time
import hmac
import hashlib
import requests
import json
import platform
from socket import inet_aton
import xml.dom.minidom as xmldom
from jose import jwt
import random
from six.moves.urllib.parse import urlencode
from six import text_type, u, b, PY3
from enum import Enum
from .version import __version__
from .endpoints import Endpoints
from .session import Session
from .archives import Archive, ArchiveList, OutputModes
from .stream import Stream
from .streamlist import StreamList
from .sip_call import SipCall
from .broadcast import Broadcast
from .exceptions import (
OpenTokException,
RequestError,
AuthError,
NotFoundError,
ArchiveError,
SignalingError,
GetStreamError,
ForceDisconnectError,
SipDialError,
SetStreamClassError,
BroadcastError
)
class Roles(Enum):
subscriber = u('subscriber')
publisher = u('publisher')
moderator = u('moderator')
class MediaModes(Enum):
routed = u('disabled')
relayed = u('enabled')
class ArchiveModes(Enum):
manual = u('manual')
always = u('always')
class OpenTok(object):
TOKEN_SENTINEL = 'T1=='
def __init__(self, api_key, api_secret, api_url='https://api.opentok.com', timeout=None):
self.api_key = str(api_key)
self.api_secret = api_secret
self.timeout = timeout
self._proxies = None
self.endpoints = Endpoints(api_url, self.api_key)
@property
def proxies(self):
return self._proxies
@proxies.setter
def proxies(self, proxies):
self._proxies = proxies
def generate_token(self, session_id, role=Roles.publisher, expire_time=None, data=None,
initial_layout_class_list=[]):
if expire_time is not None:
if isinstance(expire_time, datetime):
expire_time = calendar.timegm(expire_time.utctimetuple())
else:
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise OpenTokException(u('Cannot generate token, invalid expire time {0}').format(expire_time))
else:
expire_time = int(time.time()) + (60*60*24)
if not text_type(session_id):
raise OpenTokException(u('Cannot generate token, session_id was not valid {0}').format(session_id))
if not isinstance(role, Roles):
raise OpenTokException(u('Cannot generate token, {0} is not a valid role').format(role))
now = int(time.time())
if expire_time < now:
raise OpenTokException(u('Cannot generate token, expire_time is not in the future {0}').format(expire_time))
if expire_time > now + (60*60*24*30):
raise OpenTokException(u('Cannot generate token, expire_time is not in the next 30 days {0}').format(expire_time))
if data and len(data) > 1000:
raise OpenTokException(u('Cannot generate token, data must be less than 1000 characters'))
if initial_layout_class_list and not all(text_type(c) for c in initial_layout_class_list):
raise OpenTokException(u('Cannot generate token, all items in initial_layout_class_list must be strings'))
initial_layout_class_list_serialized = u(' ').join(initial_layout_class_list)
if len(initial_layout_class_list_serialized) > 1000:
raise OpenTokException(u('Cannot generate token, initial_layout_class_list must be less than 1000 characters'))
sub_session_id = session_id[2:]
sub_session_id_bytes = sub_session_id.encode('utf-8')
sub_session_id_bytes_padded = sub_session_id_bytes + (b('=') * (-len(sub_session_id_bytes) % 4))
try:
decoded_session_id = base64.b64decode(sub_session_id_bytes_padded, b('-_'))
parts = decoded_session_id.decode('utf-8').split(u('~'))
except Exception as e:
raise OpenTokException(u('Cannot generate token, the session_id {0} was not valid').format(session_id))
if self.api_key not in parts:
raise OpenTokException(u('Cannot generate token, the session_id {0} does not belong to the api_key {1}').format(session_id, self.api_key))
data_params = dict(
session_id = session_id,
create_time = now,
expire_time = expire_time,
role = role.value,
nonce = random.randint(0,999999),
initial_layout_class_list = initial_layout_class_list_serialized
)
if data:
data_params['connection_data'] = data
data_string = urlencode(data_params, True)
sig = self._sign_string(data_string, self.api_secret)
decoded_base64_bytes = u('partner_id={api_key}&sig={sig}:{payload}').format(
api_key = self.api_key,
sig = sig,
payload = data_string
)
if PY3:
decoded_base64_bytes = decoded_base64_bytes.encode('utf-8')
token = u('{sentinal}{base64_data}').format(
sentinal = self.TOKEN_SENTINEL,
base64_data = base64.b64encode(decoded_base64_bytes).decode()
)
return token
def create_session(self, location=None, media_mode=MediaModes.relayed, archive_mode=ArchiveModes.manual):
options = {}
if not isinstance(media_mode, MediaModes):
raise OpenTokException(u('Cannot create session, {0} is not a valid media mode').format(media_mode))
if not isinstance(archive_mode, ArchiveModes):
raise OpenTokException(u('Cannot create session, {0} is not a valid archive mode').format(archive_mode))
if archive_mode == ArchiveModes.always and media_mode != MediaModes.routed:
raise OpenTokException(u('A session with always archive mode must also have the routed media mode.'))
options[u('p2p.preference')] = media_mode.value
options[u('archiveMode')] = archive_mode.value
if location:
try:
inet_aton(location)
except:
raise OpenTokException(u('Cannot create session. Location must be either None or a valid IPv4 address {0}').format(location))
options[u('location')] = location
try:
response = requests.post(self.endpoints.session_url(), data=options, headers=self.headers(), proxies=self.proxies, timeout=self.timeout)
response.encoding = 'utf-8'
if response.status_code == 403:
raise AuthError('Failed to create session, invalid credentials')
if not response.content:
raise RequestError()
dom = xmldom.parseString(response.content)
except Exception as e:
raise RequestError('Failed to create session: %s' % str(e))
try:
error = dom.getElementsByTagName('error')
if error:
error = error[0]
raise AuthError('Failed to create session (code=%s): %s' % (error.attributes['code'].value, error.firstChild.attributes['message'].value))
session_id = dom.getElementsByTagName('session_id')[0].childNodes[0].nodeValue
return Session(self, session_id, location=location, media_mode=media_mode, archive_mode=archive_mode)
except Exception as e:
raise OpenTokException('Failed to generate session: %s' % str(e))
def headers(self):
return {
'User-Agent': 'OpenTok-Python-SDK/' + __version__ + ' ' + platform.python_version(),
'X-OPENTOK-AUTH': self._create_jwt_auth_header()
}
def json_headers(self):
result = self.headers()
result['Content-Type'] = 'application/json'
return result
def start_archive(self, session_id, has_audio=True, has_video=True, name=None, output_mode=OutputModes.composed, resolution=None):
if not isinstance(output_mode, OutputModes):
raise OpenTokException(u('Cannot start archive, {0} is not a valid output mode').format(output_mode))
if resolution and output_mode == OutputModes.individual:
raise OpenTokException(u('Invalid parameters: Resolution cannot be supplied for individual output mode.'))
payload = {'name': name,
'sessionId': session_id,
'hasAudio': has_audio,
'hasVideo': has_video,
'outputMode': output_mode.value,
'resolution': resolution,
}
response = requests.post(self.endpoints.archive_url(), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 400:
"""
The HTTP response has a 400 status code in the following cases:
You do not pass in a session ID or you pass in an invalid session ID.
No clients are actively connected to the OpenTok session.
You specify an invalid resolution value.
The outputMode property is set to "individual" and you set the resolution property and (which is not supported in individual stream archives).
"""
raise RequestError(response.json().get("message"))
elif response.status_code == 404:
raise NotFoundError("Session not found")
elif response.status_code == 409:
raise ArchiveError(response.json().get("message"))
else:
raise RequestError("An unexpected error occurred", response.status_code)
def stop_archive(self, archive_id):
response = requests.post(self.endpoints.archive_url(archive_id) + '/stop', headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
elif response.status_code == 409:
raise ArchiveError("Archive is not in started state")
else:
raise RequestError("An unexpected error occurred", response.status_code)
def delete_archive(self, archive_id):
response = requests.delete(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
pass
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code)
def get_archive(self, archive_id):
response = requests.get(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code)
def get_archives(self, offset=None, count=None, session_id=None):
params = {}
if offset is not None:
params['offset'] = offset
if count is not None:
params['count'] = count
if session_id is not None:
params['sessionId'] = session_id
endpoint = self.endpoints.archive_url() + "?" + urlencode(params)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code < 300:
return ArchiveList(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code)
def list_archives(self, offset=None, count=None, session_id=None):
return self.get_archives(offset, count, session_id)
def signal(self, session_id, payload, connection_id=None):
response = requests.post(
self.endpoints.signaling_url(session_id, connection_id),
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 204:
pass
elif response.status_code == 400:
raise SignalingError('One of the signal properties - data, type, sessionId or connectionId - is invalid.')
elif response.status_code == 403:
raise AuthError('You are not authorized to send the signal. Check your authentication credentials.')
elif response.status_code == 404:
raise SignalingError('The client specified by the connectionId property is not connected to the session.')
elif response.status_code == 413:
raise SignalingError('The type string exceeds the maximum length (128 bytes), or the data string exceeds the maximum size (8 kB).')
else:
raise RequestError('An unexpected error occurred', response.status_code)
def get_stream(self, session_id, stream_id):
endpoint = self.endpoints.get_stream_url(session_id, stream_id)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 200:
return Stream(response.json())
elif response.status_code == 400:
raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.')
elif response.status_code == 403:
raise AuthError('You passed in an invalid OpenTok API key or JWT token.')
elif response.status_code == 408:
raise GetStreamError('You passed in an invalid stream ID.')
else:
raise RequestError('An unexpected error occurred', response.status_code)
def list_streams(self, session_id):
endpoint = self.endpoints.get_stream_url(session_id)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 200:
return StreamList(response.json())
elif response.status_code == 400:
raise GetStreamError('Invalid request. This response may indicate that data in your request data is invalid JSON. Or it may indicate that you do not pass in a session ID or you passed in an invalid stream ID.')
elif response.status_code == 403:
raise AuthError('You passed in an invalid OpenTok API key or JWT token.')
else:
raise RequestError('An unexpected error occurred', response.status_code)
def force_disconnect(self, session_id, connection_id):
endpoint = self.endpoints.force_disconnect_url(session_id, connection_id)
response = requests.delete(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code == 204:
pass
elif response.status_code == 400:
raise ForceDisconnectError('One of the arguments - sessionId or connectionId - is invalid.')
elif response.status_code == 403:
raise AuthError('You are not authorized to forceDisconnect, check your authentication credentials.')
elif response.status_code == 404:
raise ForceDisconnectError('The client specified by the connectionId property is not connected to the session.')
else:
raise RequestError('An unexpected error occurred', response.status_code)
def set_archive_layout(self, archive_id, layout_type, stylesheet=None):
payload = {
'type': layout_type,
}
if layout_type == 'custom':
if stylesheet is not None:
payload['stylesheet'] = stylesheet
endpoint = self.endpoints.set_archive_layout_url(archive_id)
response = requests.put(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise ArchiveError('Invalid request. This response may indicate that data in your request data is invalid JSON. It may also indicate that you passed in invalid layout options.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def dial(self, session_id, token, sip_uri, options=[]):
payload = {
'sessionId': session_id,
'token': token,
'sip': {
'uri': sip_uri
}
}
if 'from' in options:
payload['sip']['from'] = options['from']
if 'headers' in options:
payload['sip']['headers'] = options['headers']
if 'auth' in options:
payload['sip']['auth'] = options['auth']
if 'secure' in options:
payload['sip']['secure'] = options['secure']
endpoint = self.endpoints.dial_url()
response = requests.post(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return SipCall(response.json())
elif response.status_code == 400:
raise SipDialError('Invalid request. Invalid session ID.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 404:
raise SipDialError('The session does not exist.')
elif response.status_code == 409:
raise SipDialError(
'You attempted to start a SIP call for a session that '
'does not use the OpenTok Media Router.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def set_stream_class_lists(self, session_id, payload):
items_payload = {'items': payload}
endpoint = self.endpoints.set_stream_class_lists_url(session_id)
response = requests.put(
endpoint,
data=json.dumps(items_payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise SetStreamClassError(
'Invalid request. This response may indicate that data in your request data '
'is invalid JSON. It may also indicate that you passed in invalid layout options.'
)
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def start_broadcast(self, session_id, options):
payload = {
'sessionId': session_id
}
payload.update(options)
endpoint = self.endpoints.broadcast_url()
response = requests.post(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request data is '
'invalid JSON. It may also indicate that you passed in invalid layout options. '
'Or you have exceeded the limit of five simultaneous RTMP streams for an OpenTok '
'session. Or you specified and invalid resolution.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError('The broadcast has already started for the session.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def stop_broadcast(self, broadcast_id):
endpoint = self.endpoints.broadcast_url(broadcast_id, stop=True)
response = requests.post(
endpoint,
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request '
'data is invalid JSON.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError(
'The broadcast (with the specified ID) was not found or it has already '
'stopped.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def get_broadcast(self, broadcast_id):
endpoint = self.endpoints.broadcast_url(broadcast_id)
response = requests.get(
endpoint,
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
return Broadcast(response.json())
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request '
'data is invalid JSON.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
elif response.status_code == 409:
raise BroadcastError('No matching broadcast found (with the specified ID).')
else:
raise RequestError('OpenTok server error.', response.status_code)
def set_broadcast_layout(self, broadcast_id, layout_type, stylesheet=None):
payload = {
'type': layout_type,
}
if layout_type == 'custom':
if stylesheet is not None:
payload['stylesheet'] = stylesheet
endpoint = self.endpoints.broadcast_url(broadcast_id, layout=True)
response = requests.put(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request data is '
'invalid JSON. It may also indicate that you passed in invalid layout options.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code)
def _sign_string(self, string, secret):
return hmac.new(secret.encode('utf-8'), string.encode('utf-8'), hashlib.sha1).hexdigest()
def _create_jwt_auth_header(self):
payload = {
'ist': 'project',
'iss': self.api_key,
'iat': int(time.time()),
'exp': int(time.time()) + (60*3),
'jti': '{0}'.format(0, random.random())
}
return jwt.encode(payload, self.api_secret, algorithm='HS256')
| true | true |
f7f43b792511979f542f9828feaef9f068c4990c | 1,103 | py | Python | src/api/tasks.py | watxaut-alpha/joke-app | da039d1ecb6bb557a8d26b9094bcb047b8cfb249 | [
"MIT"
] | null | null | null | src/api/tasks.py | watxaut-alpha/joke-app | da039d1ecb6bb557a8d26b9094bcb047b8cfb249 | [
"MIT"
] | 13 | 2020-03-31T10:55:16.000Z | 2022-03-29T22:28:16.000Z | src/api/tasks.py | watxaut/joke-app | da039d1ecb6bb557a8d26b9094bcb047b8cfb249 | [
"MIT"
] | null | null | null | import argparse
import logging
import os
import src.tasks.send as tasks
import src.tasks.validate as validate
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--action",
type=str,
choices=["send_joke_mail", "validate_jokes", "tweet_joke"],
help="Type of action to run. Leave empty to run the bot",
)
parser.add_argument("-d", "--debug", action="store_true", help="Does whatever with debug params")
args = parser.parse_args()
# change dir to current main.py (when executed in cron)
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
if args.action == "send_joke_mail":
tasks.send_mail(args.debug)
elif args.action == "validate_jokes":
validate.put_validated_jokes_in_joke_db()
else:
raise Exception("Option for action not recognized: '{}'".format(args.action))
| 29.810811 | 106 | 0.664551 | import argparse
import logging
import os
import src.tasks.send as tasks
import src.tasks.validate as validate
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--action",
type=str,
choices=["send_joke_mail", "validate_jokes", "tweet_joke"],
help="Type of action to run. Leave empty to run the bot",
)
parser.add_argument("-d", "--debug", action="store_true", help="Does whatever with debug params")
args = parser.parse_args()
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
if args.action == "send_joke_mail":
tasks.send_mail(args.debug)
elif args.action == "validate_jokes":
validate.put_validated_jokes_in_joke_db()
else:
raise Exception("Option for action not recognized: '{}'".format(args.action))
| true | true |
f7f43bfb7320b81fc53e2170613bd85f44c5ad34 | 2,056 | py | Python | tests/perf/multiprocessing_write.py | yaal-coop/sheraf | 774e3781bc6ff2e16c6cc39f268d475b5e64fcea | [
"MIT"
] | null | null | null | tests/perf/multiprocessing_write.py | yaal-coop/sheraf | 774e3781bc6ff2e16c6cc39f268d475b5e64fcea | [
"MIT"
] | null | null | null | tests/perf/multiprocessing_write.py | yaal-coop/sheraf | 774e3781bc6ff2e16c6cc39f268d475b5e64fcea | [
"MIT"
] | null | null | null | import multiprocessing
import sheraf
from BTrees.OOBTree import OOBTree
class PerfConcurrentDB:
NUM_PROCESS = 55
DATABASE_URI = "zeo://localhost:9999"
PROVOKE_CONFLICTS = True
@classmethod
def concurrent_creation(cls):
class Model(sheraf.Model):
table = "Model"
status = sheraf.SimpleAttribute()
first_id = None
# Create the table if needed:
database = sheraf.Database(cls.DATABASE_URI)
try:
with sheraf.connection(commit=True) as conn:
if conn.root().get(Model.table) is None:
first_id = Model.create(status=1)
def process(uri, barrier):
sheraf.Database(uri)
with sheraf.connection(commit=True):
Model.read(first_id.id)
barrier.wait()
Model.create(status=1)
processes = []
barrier = multiprocessing.Barrier(cls.NUM_PROCESS)
for i in range(0, cls.NUM_PROCESS):
processes.append(
multiprocessing.Process(
target=process, args=(cls.DATABASE_URI, barrier)
)
)
for i in range(0, cls.NUM_PROCESS):
processes[i].start()
for i in reversed(range(0, cls.NUM_PROCESS)):
processes[i].join(timeout=10)
for i in range(0, cls.NUM_PROCESS):
assert 0 == processes[i].exitcode
finally:
if cls.PROVOKE_CONFLICTS:
pass
else:
with sheraf.databases.connection(commit=True) as conn:
for key in list(conn.root().keys()):
del conn.root()[key]
database.close()
class BTreePerf:
@classmethod
def overbalancing(cls):
t = OOBTree()
t.update({1: "red", 2: "blue"})
if __name__ == "__main__":
PerfConcurrentDB.concurrent_creation()
# BTreePerf.overbalancing()
| 27.783784 | 72 | 0.534533 | import multiprocessing
import sheraf
from BTrees.OOBTree import OOBTree
class PerfConcurrentDB:
NUM_PROCESS = 55
DATABASE_URI = "zeo://localhost:9999"
PROVOKE_CONFLICTS = True
@classmethod
def concurrent_creation(cls):
class Model(sheraf.Model):
table = "Model"
status = sheraf.SimpleAttribute()
first_id = None
database = sheraf.Database(cls.DATABASE_URI)
try:
with sheraf.connection(commit=True) as conn:
if conn.root().get(Model.table) is None:
first_id = Model.create(status=1)
def process(uri, barrier):
sheraf.Database(uri)
with sheraf.connection(commit=True):
Model.read(first_id.id)
barrier.wait()
Model.create(status=1)
processes = []
barrier = multiprocessing.Barrier(cls.NUM_PROCESS)
for i in range(0, cls.NUM_PROCESS):
processes.append(
multiprocessing.Process(
target=process, args=(cls.DATABASE_URI, barrier)
)
)
for i in range(0, cls.NUM_PROCESS):
processes[i].start()
for i in reversed(range(0, cls.NUM_PROCESS)):
processes[i].join(timeout=10)
for i in range(0, cls.NUM_PROCESS):
assert 0 == processes[i].exitcode
finally:
if cls.PROVOKE_CONFLICTS:
pass
else:
with sheraf.databases.connection(commit=True) as conn:
for key in list(conn.root().keys()):
del conn.root()[key]
database.close()
class BTreePerf:
@classmethod
def overbalancing(cls):
t = OOBTree()
t.update({1: "red", 2: "blue"})
if __name__ == "__main__":
PerfConcurrentDB.concurrent_creation()
| true | true |
f7f43c8f61b06429021e90fb6eb1f90dc2608b12 | 22,003 | py | Python | sympy/polys/tests/test_ring_series.py | kumarkrishna/sympy | a91ba26c6d639980a4b3351a443548c629447cf9 | [
"BSD-3-Clause"
] | null | null | null | sympy/polys/tests/test_ring_series.py | kumarkrishna/sympy | a91ba26c6d639980a4b3351a443548c629447cf9 | [
"BSD-3-Clause"
] | null | null | null | sympy/polys/tests/test_ring_series.py | kumarkrishna/sympy | a91ba26c6d639980a4b3351a443548c629447cf9 | [
"BSD-3-Clause"
] | null | null | null | from sympy.polys.domains import QQ, EX, RR
from sympy.polys.rings import ring
from sympy.polys.ring_series import (_invert_monoms, rs_integrate,
rs_trunc, rs_mul, rs_square, rs_pow, _has_constant_term, rs_hadamard_exp,
rs_series_from_list, rs_exp, rs_log, rs_newton, rs_series_inversion,
rs_compose_add, rs_asin, rs_atan, rs_atanh, rs_tan, rs_cot, rs_sin, rs_cos,
rs_cos_sin, rs_sinh, rs_cosh, rs_tanh, _tan1, rs_fun, rs_nth_root,
rs_LambertW, rs_series_reversion, rs_is_puiseux)
from sympy.utilities.pytest import raises
from sympy.core.compatibility import range
from sympy.core.symbol import symbols
from sympy.functions import (sin, cos, exp, tan, cot, atan, asin, atanh,
tanh, log, sqrt)
from sympy.core.numbers import Rational
def is_close(a, b):
tol = 10**(-10)
assert abs(a - b) < tol
def test_ring_series1():
R, x = ring('x', QQ)
p = x**4 + 2*x**3 + 3*x + 4
assert _invert_monoms(p) == 4*x**4 + 3*x**3 + 2*x + 1
assert rs_hadamard_exp(p) == x**4/24 + x**3/3 + 3*x + 4
R, x = ring('x', QQ)
p = x**4 + 2*x**3 + 3*x + 4
assert rs_integrate(p, x) == x**5/5 + x**4/2 + 3*x**2/2 + 4*x
R, x, y = ring('x, y', QQ)
p = x**2*y**2 + x + 1
assert rs_integrate(p, x) == x**3*y**2/3 + x**2/2 + x
assert rs_integrate(p, y) == x**2*y**3/3 + x*y + y
def test_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = (y + t*x)**4
p1 = rs_trunc(p, x, 3)
assert p1 == y**4 + 4*y**3*t*x + 6*y**2*t**2*x**2
def test_mul_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = 1 + t*x + t*y
for i in range(2):
p = rs_mul(p, p, t, 3)
assert p == 6*x**2*t**2 + 12*x*y*t**2 + 6*y**2*t**2 + 4*x*t + 4*y*t + 1
p = 1 + t*x + t*y + t**2*x*y
p1 = rs_mul(p, p, t, 2)
assert p1 == 1 + 2*t*x + 2*t*y
R1, z = ring('z', QQ)
def test1(p):
p2 = rs_mul(p, z, x, 2)
raises(ValueError, lambda: test1(p))
p1 = 2 + 2*x + 3*x**2
p2 = 3 + x**2
assert rs_mul(p1, p2, x, 4) == 2*x**3 + 11*x**2 + 6*x + 6
def test_square_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = (1 + t*x + t*y)*2
p1 = rs_mul(p, p, x, 3)
p2 = rs_square(p, x, 3)
assert p1 == p2
p = 1 + x + x**2 + x**3
assert rs_square(p, x, 4) == 4*x**3 + 3*x**2 + 2*x + 1
def test_pow_trunc():
R, x, y, z = ring('x, y, z', QQ)
p0 = y + x*z
p = p0**16
for xx in (x, y, z):
p1 = rs_trunc(p, xx, 8)
p2 = rs_pow(p0, 16, xx, 8)
assert p1 == p2
p = 1 + x
p1 = rs_pow(p, 3, x, 2)
assert p1 == 1 + 3*x
assert rs_pow(p, 0, x, 2) == 1
assert rs_pow(p, -2, x, 2) == 1 - 2*x
p = x + y
assert rs_pow(p, 3, y, 3) == x**3 + 3*x**2*y + 3*x*y**2
assert rs_pow(1 + x, Rational(2, 3), x, 4) == 4*x**3/81 - x**2/9 + 2*x/3 + 1
def test_has_constant_term():
R, x, y, z = ring('x, y, z', QQ)
p = y + x*z
assert _has_constant_term(p, x)
p = x + x**4
assert not _has_constant_term(p, x)
p = 1 + x + x**4
assert _has_constant_term(p, x)
p = x + y + x*z
def test_inversion():
R, x = ring('x', QQ)
p = 2 + x + 2*x**2
n = 5
p1 = rs_series_inversion(p, x, n)
assert rs_trunc(p*p1, x, n) == 1
R, x, y = ring('x, y', QQ)
p = 2 + x + 2*x**2 + y*x + x**2*y
p1 = rs_series_inversion(p, x, n)
assert rs_trunc(p*p1, x, n) == 1
R, x, y = ring('x, y', QQ)
p = 1 + x + y
def test2(p):
p1 = rs_series_inversion(p, x, 4)
raises(NotImplementedError, lambda: test2(p))
p = R.zero
def test3(p):
p1 = rs_series_inversion(p, x, 3)
raises(ZeroDivisionError, lambda: test3(p))
def test_series_reversion():
R, x, y = ring('x, y', QQ)
p = rs_tan(x, x, 10)
r1 = rs_series_reversion(p, x, 8, y)
r2 = rs_atan(y, y, 8)
assert rs_series_reversion(p, x, 8, y) == rs_atan(y, y, 8)
p = rs_sin(x, x, 10)
assert rs_series_reversion(p, x, 8, y) == 5*y**7/112 + 3*y**5/40 + \
y**3/6 + y
def test_series_from_list():
R, x = ring('x', QQ)
p = 1 + 2*x + x**2 + 3*x**3
c = [1, 2, 0, 4, 4]
r = rs_series_from_list(p, c, x, 5)
pc = R.from_list(list(reversed(c)))
r1 = rs_trunc(pc.compose(x, p), x, 5)
assert r == r1
R, x, y = ring('x, y', QQ)
c = [1, 3, 5, 7]
p1 = rs_series_from_list(x + y, c, x, 3, concur=0)
p2 = rs_trunc((1 + 3*(x+y) + 5*(x+y)**2 + 7*(x+y)**3), x, 3)
assert p1 == p2
R, x = ring('x', QQ)
h = 25
p = rs_exp(x, x, h) - 1
p1 = rs_series_from_list(p, c, x, h)
p2 = 0
for i, cx in enumerate(c):
p2 += cx*rs_pow(p, i, x, h)
assert p1 == p2
def test_log():
R, x = ring('x', QQ)
p = 1 + x
p1 = rs_log(p, x, 4)
assert p1 == x - x**2/2 + x**3/3
p = 1 + x +2*x**2/3
p1 = rs_log(p, x, 9)
assert p1 == -17*x**8/648 + 13*x**7/189 - 11*x**6/162 - x**5/45 + \
7*x**4/36 - x**3/3 + x**2/6 + x
p2 = rs_series_inversion(p, x, 9)
p3 = rs_log(p2, x, 9)
assert p3 == -p1
R, x, y = ring('x, y', QQ)
p = 1 + x + 2*y*x**2
p1 = rs_log(p, x, 6)
assert p1 == (4*x**5*y**2 - 2*x**5*y - 2*x**4*y**2 + x**5/5 + 2*x**4*y -
x**4/4 - 2*x**3*y + x**3/3 + 2*x**2*y - x**2/2 + x)
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_log(x + a, x, 5) == -EX(1/(4*a**4))*x**4 + EX(1/(3*a**3))*x**3 \
- EX(1/(2*a**2))*x**2 + EX(1/a)*x + EX(log(a))
assert rs_log(x + x**2*y + a, x, 4) == -EX(a**(-2))*x**3*y + \
EX(1/(3*a**3))*x**3 + EX(1/a)*x**2*y - EX(1/(2*a**2))*x**2 + \
EX(1/a)*x + EX(log(a))
p = x + x**2 + 3
assert rs_log(p, x, 10).compose(x, 5) == EX(log(3) + 19281291595/9920232)
def test_exp():
R, x = ring('x', QQ)
p = x + x**4
for h in [10, 30]:
q = rs_series_inversion(1 + p, x, h) - 1
p1 = rs_exp(q, x, h)
q1 = rs_log(p1, x, h)
assert q1 == q
p1 = rs_exp(p, x, 30)
assert p1.coeff(x**29) == QQ(74274246775059676726972369, 353670479749588078181744640000)
prec = 21
p = rs_log(1 + x, x, prec)
p1 = rs_exp(p, x, prec)
assert p1 == x + 1
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[exp(a), a])
assert rs_exp(x + a, x, 5) == exp(a)*x**4/24 + exp(a)*x**3/6 + \
exp(a)*x**2/2 + exp(a)*x + exp(a)
assert rs_exp(x + x**2*y + a, x, 5) == exp(a)*x**4*y**2/2 + \
exp(a)*x**4*y/2 + exp(a)*x**4/24 + exp(a)*x**3*y + \
exp(a)*x**3/6 + exp(a)*x**2*y + exp(a)*x**2/2 + exp(a)*x + exp(a)
R, x, y = ring('x, y', EX)
assert rs_exp(x + a, x, 5) == EX(exp(a)/24)*x**4 + EX(exp(a)/6)*x**3 + \
EX(exp(a)/2)*x**2 + EX(exp(a))*x + EX(exp(a))
assert rs_exp(x + x**2*y + a, x, 5) == EX(exp(a)/2)*x**4*y**2 + \
EX(exp(a)/2)*x**4*y + EX(exp(a)/24)*x**4 + EX(exp(a))*x**3*y + \
EX(exp(a)/6)*x**3 + EX(exp(a))*x**2*y + EX(exp(a)/2)*x**2 + \
EX(exp(a))*x + EX(exp(a))
def test_newton():
R, x = ring('x', QQ)
p = x**2 - 2
r = rs_newton(p, x, 4)
f = [1, 0, -2]
assert r == 8*x**4 + 4*x**2 + 2
def test_compose_add():
R, x = ring('x', QQ)
p1 = x**3 - 1
p2 = x**2 - 2
assert rs_compose_add(p1, p2) == x**6 - 6*x**4 - 2*x**3 + 12*x**2 - 12*x - 7
def test_fun():
R, x, y = ring('x, y', QQ)
p = x*y + x**2*y**3 + x**5*y
assert rs_fun(p, rs_tan, x, 10) == rs_tan(p, x, 10)
assert rs_fun(p, _tan1, x, 10) == _tan1(p, x, 10)
def test_nth_root():
R, x, y = ring('x, y', QQ)
r1 = rs_nth_root(1 + x**2*y, 4, x, 10)
assert rs_nth_root(1 + x**2*y, 4, x, 10) == -77*x**8*y**4/2048 + \
7*x**6*y**3/128 - 3*x**4*y**2/32 + x**2*y/4 + 1
assert rs_nth_root(1 + x*y + x**2*y**3, 3, x, 5) == -x**4*y**6/9 + \
5*x**4*y**5/27 - 10*x**4*y**4/243 - 2*x**3*y**4/9 + 5*x**3*y**3/81 + \
x**2*y**3/3 - x**2*y**2/9 + x*y/3 + 1
assert rs_nth_root(8*x, 3, x, 3) == 2*x**QQ(1, 3)
assert rs_nth_root(8*x + x**2 + x**3, 3, x, 3) == x**QQ(4,3)/12 + 2*x**QQ(1,3)
r = rs_nth_root(8*x + x**2*y + x**3, 3, x, 4)
assert r == -x**QQ(7,3)*y**2/288 + x**QQ(7,3)/12 + x**QQ(4,3)*y/12 + 2*x**QQ(1,3)
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_nth_root(x + a, 3, x, 4) == EX(5/(81*a**QQ(8, 3)))*x**3 - \
EX(1/(9*a**QQ(5, 3)))*x**2 + EX(1/(3*a**QQ(2, 3)))*x + EX(a**QQ(1, 3))
assert rs_nth_root(x**QQ(2, 3) + x**2*y + 5, 2, x, 3) == -EX(sqrt(5)/100)*\
x**QQ(8, 3)*y - EX(sqrt(5)/16000)*x**QQ(8, 3) + EX(sqrt(5)/10)*x**2*y + \
EX(sqrt(5)/2000)*x**2 - EX(sqrt(5)/200)*x**QQ(4, 3) + \
EX(sqrt(5)/10)*x**QQ(2, 3) + EX(sqrt(5))
def test_atan():
R, x, y = ring('x, y', QQ)
assert rs_atan(x, x, 9) == -x**7/7 + x**5/5 - x**3/3 + x
assert rs_atan(x*y + x**2*y**3, x, 9) == 2*x**8*y**11 - x**8*y**9 + \
2*x**7*y**9 - x**7*y**7/7 - x**6*y**9/3 + x**6*y**7 - x**5*y**7 + \
x**5*y**5/5 - x**4*y**5 - x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_atan(x + a, x, 5) == -EX((a**3 - a)/(a**8 + 4*a**6 + 6*a**4 + \
4*a**2 + 1))*x**4 + EX((3*a**2 - 1)/(3*a**6 + 9*a**4 + \
9*a**2 + 3))*x**3 - EX(a/(a**4 + 2*a**2 + 1))*x**2 + \
EX(1/(a**2 + 1))*x + EX(atan(a))
assert rs_atan(x + x**2*y + a, x, 4) == -EX(2*a/(a**4 + 2*a**2 + 1)) \
*x**3*y + EX((3*a**2 - 1)/(3*a**6 + 9*a**4 + 9*a**2 + 3))*x**3 + \
EX(1/(a**2 + 1))*x**2*y - EX(a/(a**4 + 2*a**2 + 1))*x**2 + EX(1/(a**2 \
+ 1))*x + EX(atan(a))
def test_asin():
R, x, y = ring('x, y', QQ)
assert rs_asin(x + x*y, x, 5) == x**3*y**3/6 + x**3*y**2/2 + x**3*y/2 + \
x**3/6 + x*y + x
assert rs_asin(x*y + x**2*y**3, x, 6) == x**5*y**7/2 + 3*x**5*y**5/40 + \
x**4*y**5/2 + x**3*y**3/6 + x**2*y**3 + x*y
def test_tan():
R, x, y = ring('x, y', QQ)
assert rs_tan(x, x, 9) == \
x + x**3/3 + 2*x**5/15 + 17*x**7/315
assert rs_tan(x*y + x**2*y**3, x, 9) == 4*x**8*y**11/3 + 17*x**8*y**9/45 + \
4*x**7*y**9/3 + 17*x**7*y**7/315 + x**6*y**9/3 + 2*x**6*y**7/3 + \
x**5*y**7 + 2*x**5*y**5/15 + x**4*y**5 + x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[tan(a), a])
assert rs_tan(x + a, x, 5) == (tan(a)**5 + 5*tan(a)**3/3 + \
2*tan(a)/3)*x**4 + (tan(a)**4 + 4*tan(a)**2/3 + 1/3)*x**3 + \
(tan(a)**3 + tan(a))*x**2 + (tan(a)**2 + 1)*x + tan(a)
assert rs_tan(x + x**2*y + a, x, 4) == (2*tan(a)**3 + 2*tan(a))*x**3*y + \
(tan(a)**4 + 4/3*tan(a)**2 + 1/3)*x**3 + (tan(a)**2 + 1)*x**2*y + \
(tan(a)**3 + tan(a))*x**2 + (tan(a)**2 + 1)*x + tan(a)
R, x, y = ring('x, y', EX)
assert rs_tan(x + a, x, 5) == EX(tan(a)**5 + 5*tan(a)**3/3 + \
2*tan(a)/3)*x**4 + EX(tan(a)**4 + 4*tan(a)**2/3 + EX(1)/3)*x**3 + \
EX(tan(a)**3 + tan(a))*x**2 + EX(tan(a)**2 + 1)*x + EX(tan(a))
assert rs_tan(x + x**2*y + a, x, 4) == EX(2*tan(a)**3 + \
2*tan(a))*x**3*y + EX(tan(a)**4 + 4*tan(a)**2/3 + EX(1)/3)*x**3 + \
EX(tan(a)**2 + 1)*x**2*y + EX(tan(a)**3 + tan(a))*x**2 + \
EX(tan(a)**2 + 1)*x + EX(tan(a))
p = x + x**2 + 5
assert rs_atan(p, x, 10).compose(x, 10) == EX(atan(5) + 67701870330562640/ \
668083460499)
def test_cot():
R, x, y = ring('x, y', QQ)
assert rs_cot(x**6 + x**7, x, 8) == x**-6 - x**-5 + x**-4 - x**-3 + \
x**-2 - x**-1 + 1 - x + x**2 - x**3 + x**4 - x**5 + 2*x**6/3 - 4*x**7/3
assert rs_cot(x + x**2*y, x, 5) == -x**4*y**5 - x**4*y/15 + x**3*y**4 - \
x**3/45 - x**2*y**3 - x**2*y/3 + x*y**2 - x/3 - y + x**-1
def test_sin():
R, x, y = ring('x, y', QQ)
assert rs_sin(x, x, 9) == \
x - x**3/6 + x**5/120 - x**7/5040
assert rs_sin(x*y + x**2*y**3, x, 9) == x**8*y**11/12 - \
x**8*y**9/720 + x**7*y**9/12 - x**7*y**7/5040 - x**6*y**9/6 + \
x**6*y**7/24 - x**5*y**7/2 + x**5*y**5/120 - x**4*y**5/2 - \
x**3*y**3/6 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[sin(a), cos(a), a])
assert rs_sin(x + a, x, 5) == sin(a)*x**4/24 - cos(a)*x**3/6 - \
sin(a)*x**2/2 + cos(a)*x + sin(a)
assert rs_sin(x + x**2*y + a, x, 5) == -sin(a)*x**4*y**2/2 - \
cos(a)*x**4*y/2 + sin(a)*x**4/24 - sin(a)*x**3*y - cos(a)*x**3/6 + \
cos(a)*x**2*y - sin(a)*x**2/2 + cos(a)*x + sin(a)
R, x, y = ring('x, y', EX)
assert rs_sin(x + a, x, 5) == EX(sin(a)/24)*x**4 - EX(cos(a)/6)*x**3 - \
EX(sin(a)/2)*x**2 + EX(cos(a))*x + EX(sin(a))
assert rs_sin(x + x**2*y + a, x, 5) == -EX(sin(a)/2)*x**4*y**2 - \
EX(cos(a)/2)*x**4*y + EX(sin(a)/24)*x**4 - EX(sin(a))*x**3*y - \
EX(cos(a)/6)*x**3 + EX(cos(a))*x**2*y - EX(sin(a)/2)*x**2 + \
EX(cos(a))*x + EX(sin(a))
def test_cos():
R, x, y = ring('x, y', QQ)
assert rs_cos(x, x, 9) == \
x**8/40320 - x**6/720 + x**4/24 - x**2/2 + 1
assert rs_cos(x*y + x**2*y**3, x, 9) == x**8*y**12/24 - \
x**8*y**10/48 + x**8*y**8/40320 + x**7*y**10/6 - \
x**7*y**8/120 + x**6*y**8/4 - x**6*y**6/720 + x**5*y**6/6 - \
x**4*y**6/2 + x**4*y**4/24 - x**3*y**4 - x**2*y**2/2 + 1
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[sin(a), cos(a), a])
assert rs_cos(x + a, x, 5) == cos(a)*x**4/24 + sin(a)*x**3/6 - \
cos(a)*x**2/2 - sin(a)*x + cos(a)
assert rs_cos(x + x**2*y + a, x, 5) == -cos(a)*x**4*y**2/2 + \
sin(a)*x**4*y/2 + cos(a)*x**4/24 - cos(a)*x**3*y + sin(a)*x**3/6 - \
sin(a)*x**2*y - cos(a)*x**2/2 - sin(a)*x + cos(a)
R, x, y = ring('x, y', EX)
assert rs_cos(x + a, x, 5) == EX(cos(a)/24)*x**4 + EX(sin(a)/6)*x**3 - \
EX(cos(a)/2)*x**2 - EX(sin(a))*x + EX(cos(a))
assert rs_cos(x + x**2*y + a, x, 5) == -EX(cos(a)/2)*x**4*y**2 + \
EX(sin(a)/2)*x**4*y + EX(cos(a)/24)*x**4 - EX(cos(a))*x**3*y + \
EX(sin(a)/6)*x**3 - EX(sin(a))*x**2*y - EX(cos(a)/2)*x**2 - \
EX(sin(a))*x + EX(cos(a))
def test_cos_sin():
R, x, y = ring('x, y', QQ)
cos, sin = rs_cos_sin(x, x, 9)
assert cos == rs_cos(x, x, 9)
assert sin == rs_sin(x, x, 9)
cos, sin = rs_cos_sin(x + x*y, x, 5)
assert cos == rs_cos(x + x*y, x, 5)
assert sin == rs_sin(x + x*y, x, 5)
def test_atanh():
R, x, y = ring('x, y', QQ)
assert rs_atanh(x, x, 9) == x**7/7 + x**5/5 + x**3/3 + x
assert rs_atanh(x*y + x**2*y**3, x, 9) == 2*x**8*y**11 + x**8*y**9 + \
2*x**7*y**9 + x**7*y**7/7 + x**6*y**9/3 + x**6*y**7 + x**5*y**7 + \
x**5*y**5/5 + x**4*y**5 + x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_atanh(x + a, x, 5) == EX((a**3 + a)/(a**8 - 4*a**6 + 6*a**4 - \
4*a**2 + 1))*x**4 - EX((3*a**2 + 1)/(3*a**6 - 9*a**4 + \
9*a**2 - 3))*x**3 + EX(a/(a**4 - 2*a**2 + 1))*x**2 - EX(1/(a**2 - \
1))*x + EX(atanh(a))
assert rs_atanh(x + x**2*y + a, x, 4) == EX(2*a/(a**4 - 2*a**2 + \
1))*x**3*y - EX((3*a**2 + 1)/(3*a**6 - 9*a**4 + 9*a**2 - 3))*x**3 - \
EX(1/(a**2 - 1))*x**2*y + EX(a/(a**4 - 2*a**2 + 1))*x**2 - \
EX(1/(a**2 - 1))*x + EX(atanh(a))
p = x + x**2 + 5
assert rs_atanh(p, x, 10).compose(x, 10) == EX(-733442653682135/5079158784 \
+ atanh(5))
def test_sinh():
R, x, y = ring('x, y', QQ)
assert rs_sinh(x, x, 9) == x**7/5040 + x**5/120 + x**3/6 + x
assert rs_sinh(x*y + x**2*y**3, x, 9) == x**8*y**11/12 + \
x**8*y**9/720 + x**7*y**9/12 + x**7*y**7/5040 + x**6*y**9/6 + \
x**6*y**7/24 + x**5*y**7/2 + x**5*y**5/120 + x**4*y**5/2 + \
x**3*y**3/6 + x**2*y**3 + x*y
def test_cosh():
R, x, y = ring('x, y', QQ)
assert rs_cosh(x, x, 9) == x**8/40320 + x**6/720 + x**4/24 + \
x**2/2 + 1
assert rs_cosh(x*y + x**2*y**3, x, 9) == x**8*y**12/24 + \
x**8*y**10/48 + x**8*y**8/40320 + x**7*y**10/6 + \
x**7*y**8/120 + x**6*y**8/4 + x**6*y**6/720 + x**5*y**6/6 + \
x**4*y**6/2 + x**4*y**4/24 + x**3*y**4 + x**2*y**2/2 + 1
def test_tanh():
R, x, y = ring('x, y', QQ)
assert rs_tanh(x, x, 9) == -17*x**7/315 + 2*x**5/15 - x**3/3 + x
assert rs_tanh(x*y + x**2*y**3 , x, 9) == 4*x**8*y**11/3 - \
17*x**8*y**9/45 + 4*x**7*y**9/3 - 17*x**7*y**7/315 - x**6*y**9/3 + \
2*x**6*y**7/3 - x**5*y**7 + 2*x**5*y**5/15 - x**4*y**5 - \
x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_tanh(x + a, x, 5) == EX(tanh(a)**5 - 5*tanh(a)**3/3 + \
2*tanh(a)/3)*x**4 + EX(-tanh(a)**4 + 4*tanh(a)**2/3 - QQ(1, 3))*x**3 + \
EX(tanh(a)**3 - tanh(a))*x**2 + EX(-tanh(a)**2 + 1)*x + EX(tanh(a))
p = rs_tanh(x + x**2*y + a, x, 4)
assert (p.compose(x, 10)).compose(y, 5) == EX(-1000*tanh(a)**4 + \
10100*tanh(a)**3 + 2470*tanh(a)**2/3 - 10099*tanh(a) + QQ(530, 3))
def test_RR():
rs_funcs = [rs_sin, rs_cos, rs_tan, rs_cot, rs_atan, rs_tanh]
sympy_funcs = [sin, cos, tan, cot, atan, tanh]
R, x, y = ring('x, y', RR)
a = symbols('a')
for rs_func, sympy_func in zip(rs_funcs, sympy_funcs):
p = rs_func(2 + x, x, 5).compose(x, 5)
q = sympy_func(2 + a).series(a, 0, 5).removeO()
is_close(p.as_expr(), q.subs(a, 5).n())
p = rs_nth_root(2 + x, 5, x, 5).compose(x, 5)
q = ((2 + a)**QQ(1, 5)).series(a, 0, 5).removeO()
is_close(p.as_expr(), q.subs(a, 5).n())
def test_is_regular():
R, x, y = ring('x, y', QQ)
p = 1 + 2*x + x**2 + 3*x**3
assert not rs_is_puiseux(p, x)
p = x + x**QQ(1,5)*y
assert rs_is_puiseux(p, x)
assert not rs_is_puiseux(p, y)
p = x + x**2*y**QQ(1,5)*y
assert not rs_is_puiseux(p, x)
def test_puiseux():
R, x, y = ring('x, y', QQ)
p = x**QQ(2,5) + x**QQ(2,3) + x
r = rs_series_inversion(p, x, 1)
r1 = -x**QQ(14,15) + x**QQ(4,5) - 3*x**QQ(11,15) + x**QQ(2,3) + \
2*x**QQ(7,15) - x**QQ(2,5) - x**QQ(1,5) + x**QQ(2,15) - x**QQ(-2,15) \
+ x**QQ(-2,5)
assert r == r1
r = rs_nth_root(1 + p, 3, x, 1)
assert r == -x**QQ(4,5)/9 + x**QQ(2,3)/3 + x**QQ(2,5)/3 + 1
r = rs_log(1 + p, x, 1)
assert r == -x**QQ(4,5)/2 + x**QQ(2,3) + x**QQ(2,5)
r = rs_LambertW(p, x, 1)
assert r == -x**QQ(4,5) + x**QQ(2,3) + x**QQ(2,5)
r = rs_exp(p, x, 1)
assert r == x**QQ(4,5)/2 + x**QQ(2,3) + x**QQ(2,5) + 1
p1 = x + x**QQ(1,5)*y
r = rs_exp(p1, x, 1)
assert r == x**QQ(4,5)*y**4/24 + x**QQ(3,5)*y**3/6 + x**QQ(2,5)*y**2/2 + \
x**QQ(1,5)*y + 1
r = rs_atan(p, x, 2)
assert r == -x**QQ(9,5) - x**QQ(26,15) - x**QQ(22,15) - x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
r = rs_atan(p1, x, 2)
assert r == x**QQ(9,5)*y**9/9 + x**QQ(9,5)*y**4 - x**QQ(7,5)*y**7/7 - \
x**QQ(7,5)*y**2 + x*y**5/5 + x - x**QQ(3,5)*y**3/3 + x**QQ(1,5)*y
r = rs_asin(p, x, 2)
assert r == x**QQ(9,5)/2 + x**QQ(26,15)/2 + x**QQ(22,15)/2 + \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_tan(p, x, 2)
assert r == x**QQ(9,5) + x**QQ(26,15) + x**QQ(22,15) + x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cot(p, x, 1)
assert r == -x**QQ(14,15) + x**QQ(4,5) - 3*x**QQ(11,15) + \
2*x**QQ(2,3)/3 + 2*x**QQ(7,15) - 4*x**QQ(2,5)/3 - x**QQ(1,5) + \
x**QQ(2,15) - x**QQ(-2,15) + x**QQ(-2,5)
r = rs_sin(p, x, 2)
assert r == -x**QQ(9,5)/2 - x**QQ(26,15)/2 - x**QQ(22,15)/2 - \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cos(p, x, 2)
assert r == x**QQ(28,15)/6 - x**QQ(5,3) + x**QQ(8,5)/24 - x**QQ(7,5) - \
x**QQ(4,3)/2 - x**QQ(16,15) - x**QQ(4,5)/2 + 1
r = rs_cos_sin(p, x, 2)
assert r[0] == x**QQ(28,15)/6 - x**QQ(5,3) + x**QQ(8,5)/24 - x**QQ(7,5) - \
x**QQ(4,3)/2 - x**QQ(16,15) - x**QQ(4,5)/2 + 1
assert r[1] == -x**QQ(9,5)/2 - x**QQ(26,15)/2 - x**QQ(22,15)/2 - \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_atanh(p, x, 2)
assert r == x**QQ(9,5) + x**QQ(26,15) + x**QQ(22,15) + x**QQ(6,5)/3 + x + \
x**QQ(2,3) + x**QQ(2,5)
r = rs_sinh(p, x, 2)
assert r == x**QQ(9,5)/2 + x**QQ(26,15)/2 + x**QQ(22,15)/2 + \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cosh(p, x, 2)
assert r == x**QQ(28,15)/6 + x**QQ(5,3) + x**QQ(8,5)/24 + x**QQ(7,5) + \
x**QQ(4,3)/2 + x**QQ(16,15) + x**QQ(4,5)/2 + 1
r = rs_tanh(p, x, 2)
assert r == -x**QQ(9,5) - x**QQ(26,15) - x**QQ(22,15) - x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
def test1():
R, x = ring('x', QQ)
r = rs_sin(x, x, 15)*x**(-5)
assert r == x**8/6227020800 - x**6/39916800 + x**4/362880 - x**2/5040 + \
QQ(1,120) - x**-2/6 + x**-4
p = rs_sin(x, x, 10)
r = rs_nth_root(p, 2, x, 10)
assert r == -67*x**QQ(17,2)/29030400 - x**QQ(13,2)/24192 + \
x**QQ(9,2)/1440 - x**QQ(5,2)/12 + x**QQ(1,2)
p = rs_sin(x, x, 10)
r = rs_nth_root(p, 7, x, 10)
r = rs_pow(r, 5, x, 10)
assert r == -97*x**QQ(61,7)/124467840 - x**QQ(47,7)/16464 + \
11*x**QQ(33,7)/3528 - 5*x**QQ(19,7)/42 + x**QQ(5,7)
r = rs_exp(x**QQ(1,2), x, 10)
assert r == x**QQ(19,2)/121645100408832000 + x**9/6402373705728000 + \
x**QQ(17,2)/355687428096000 + x**8/20922789888000 + \
x**QQ(15,2)/1307674368000 + x**7/87178291200 + \
x**QQ(13,2)/6227020800 + x**6/479001600 + x**QQ(11,2)/39916800 + \
x**5/3628800 + x**QQ(9,2)/362880 + x**4/40320 + x**QQ(7,2)/5040 + \
x**3/720 + x**QQ(5,2)/120 + x**2/24 + x**QQ(3,2)/6 + x/2 + \
x**QQ(1,2) + 1
def test_puiseux2():
R, y = ring('y', QQ)
S, x = ring('x', R)
p = x + x**QQ(1,5)*y
r = rs_atan(p, x, 3)
assert r == (y**13/13 + y**8 + 2*y**3)*x**QQ(13,5) - (y**11/11 + y**6 +
y)*x**QQ(11,5) + (y**9/9 + y**4)*x**QQ(9,5) - (y**7/7 +
y**2)*x**QQ(7,5) + (y**5/5 + 1)*x - y**3*x**QQ(3,5)/3 + y*x**QQ(1,5)
| 37.870912 | 92 | 0.440667 | from sympy.polys.domains import QQ, EX, RR
from sympy.polys.rings import ring
from sympy.polys.ring_series import (_invert_monoms, rs_integrate,
rs_trunc, rs_mul, rs_square, rs_pow, _has_constant_term, rs_hadamard_exp,
rs_series_from_list, rs_exp, rs_log, rs_newton, rs_series_inversion,
rs_compose_add, rs_asin, rs_atan, rs_atanh, rs_tan, rs_cot, rs_sin, rs_cos,
rs_cos_sin, rs_sinh, rs_cosh, rs_tanh, _tan1, rs_fun, rs_nth_root,
rs_LambertW, rs_series_reversion, rs_is_puiseux)
from sympy.utilities.pytest import raises
from sympy.core.compatibility import range
from sympy.core.symbol import symbols
from sympy.functions import (sin, cos, exp, tan, cot, atan, asin, atanh,
tanh, log, sqrt)
from sympy.core.numbers import Rational
def is_close(a, b):
tol = 10**(-10)
assert abs(a - b) < tol
def test_ring_series1():
R, x = ring('x', QQ)
p = x**4 + 2*x**3 + 3*x + 4
assert _invert_monoms(p) == 4*x**4 + 3*x**3 + 2*x + 1
assert rs_hadamard_exp(p) == x**4/24 + x**3/3 + 3*x + 4
R, x = ring('x', QQ)
p = x**4 + 2*x**3 + 3*x + 4
assert rs_integrate(p, x) == x**5/5 + x**4/2 + 3*x**2/2 + 4*x
R, x, y = ring('x, y', QQ)
p = x**2*y**2 + x + 1
assert rs_integrate(p, x) == x**3*y**2/3 + x**2/2 + x
assert rs_integrate(p, y) == x**2*y**3/3 + x*y + y
def test_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = (y + t*x)**4
p1 = rs_trunc(p, x, 3)
assert p1 == y**4 + 4*y**3*t*x + 6*y**2*t**2*x**2
def test_mul_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = 1 + t*x + t*y
for i in range(2):
p = rs_mul(p, p, t, 3)
assert p == 6*x**2*t**2 + 12*x*y*t**2 + 6*y**2*t**2 + 4*x*t + 4*y*t + 1
p = 1 + t*x + t*y + t**2*x*y
p1 = rs_mul(p, p, t, 2)
assert p1 == 1 + 2*t*x + 2*t*y
R1, z = ring('z', QQ)
def test1(p):
p2 = rs_mul(p, z, x, 2)
raises(ValueError, lambda: test1(p))
p1 = 2 + 2*x + 3*x**2
p2 = 3 + x**2
assert rs_mul(p1, p2, x, 4) == 2*x**3 + 11*x**2 + 6*x + 6
def test_square_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = (1 + t*x + t*y)*2
p1 = rs_mul(p, p, x, 3)
p2 = rs_square(p, x, 3)
assert p1 == p2
p = 1 + x + x**2 + x**3
assert rs_square(p, x, 4) == 4*x**3 + 3*x**2 + 2*x + 1
def test_pow_trunc():
R, x, y, z = ring('x, y, z', QQ)
p0 = y + x*z
p = p0**16
for xx in (x, y, z):
p1 = rs_trunc(p, xx, 8)
p2 = rs_pow(p0, 16, xx, 8)
assert p1 == p2
p = 1 + x
p1 = rs_pow(p, 3, x, 2)
assert p1 == 1 + 3*x
assert rs_pow(p, 0, x, 2) == 1
assert rs_pow(p, -2, x, 2) == 1 - 2*x
p = x + y
assert rs_pow(p, 3, y, 3) == x**3 + 3*x**2*y + 3*x*y**2
assert rs_pow(1 + x, Rational(2, 3), x, 4) == 4*x**3/81 - x**2/9 + 2*x/3 + 1
def test_has_constant_term():
R, x, y, z = ring('x, y, z', QQ)
p = y + x*z
assert _has_constant_term(p, x)
p = x + x**4
assert not _has_constant_term(p, x)
p = 1 + x + x**4
assert _has_constant_term(p, x)
p = x + y + x*z
def test_inversion():
R, x = ring('x', QQ)
p = 2 + x + 2*x**2
n = 5
p1 = rs_series_inversion(p, x, n)
assert rs_trunc(p*p1, x, n) == 1
R, x, y = ring('x, y', QQ)
p = 2 + x + 2*x**2 + y*x + x**2*y
p1 = rs_series_inversion(p, x, n)
assert rs_trunc(p*p1, x, n) == 1
R, x, y = ring('x, y', QQ)
p = 1 + x + y
def test2(p):
p1 = rs_series_inversion(p, x, 4)
raises(NotImplementedError, lambda: test2(p))
p = R.zero
def test3(p):
p1 = rs_series_inversion(p, x, 3)
raises(ZeroDivisionError, lambda: test3(p))
def test_series_reversion():
R, x, y = ring('x, y', QQ)
p = rs_tan(x, x, 10)
r1 = rs_series_reversion(p, x, 8, y)
r2 = rs_atan(y, y, 8)
assert rs_series_reversion(p, x, 8, y) == rs_atan(y, y, 8)
p = rs_sin(x, x, 10)
assert rs_series_reversion(p, x, 8, y) == 5*y**7/112 + 3*y**5/40 + \
y**3/6 + y
def test_series_from_list():
R, x = ring('x', QQ)
p = 1 + 2*x + x**2 + 3*x**3
c = [1, 2, 0, 4, 4]
r = rs_series_from_list(p, c, x, 5)
pc = R.from_list(list(reversed(c)))
r1 = rs_trunc(pc.compose(x, p), x, 5)
assert r == r1
R, x, y = ring('x, y', QQ)
c = [1, 3, 5, 7]
p1 = rs_series_from_list(x + y, c, x, 3, concur=0)
p2 = rs_trunc((1 + 3*(x+y) + 5*(x+y)**2 + 7*(x+y)**3), x, 3)
assert p1 == p2
R, x = ring('x', QQ)
h = 25
p = rs_exp(x, x, h) - 1
p1 = rs_series_from_list(p, c, x, h)
p2 = 0
for i, cx in enumerate(c):
p2 += cx*rs_pow(p, i, x, h)
assert p1 == p2
def test_log():
R, x = ring('x', QQ)
p = 1 + x
p1 = rs_log(p, x, 4)
assert p1 == x - x**2/2 + x**3/3
p = 1 + x +2*x**2/3
p1 = rs_log(p, x, 9)
assert p1 == -17*x**8/648 + 13*x**7/189 - 11*x**6/162 - x**5/45 + \
7*x**4/36 - x**3/3 + x**2/6 + x
p2 = rs_series_inversion(p, x, 9)
p3 = rs_log(p2, x, 9)
assert p3 == -p1
R, x, y = ring('x, y', QQ)
p = 1 + x + 2*y*x**2
p1 = rs_log(p, x, 6)
assert p1 == (4*x**5*y**2 - 2*x**5*y - 2*x**4*y**2 + x**5/5 + 2*x**4*y -
x**4/4 - 2*x**3*y + x**3/3 + 2*x**2*y - x**2/2 + x)
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_log(x + a, x, 5) == -EX(1/(4*a**4))*x**4 + EX(1/(3*a**3))*x**3 \
- EX(1/(2*a**2))*x**2 + EX(1/a)*x + EX(log(a))
assert rs_log(x + x**2*y + a, x, 4) == -EX(a**(-2))*x**3*y + \
EX(1/(3*a**3))*x**3 + EX(1/a)*x**2*y - EX(1/(2*a**2))*x**2 + \
EX(1/a)*x + EX(log(a))
p = x + x**2 + 3
assert rs_log(p, x, 10).compose(x, 5) == EX(log(3) + 19281291595/9920232)
def test_exp():
R, x = ring('x', QQ)
p = x + x**4
for h in [10, 30]:
q = rs_series_inversion(1 + p, x, h) - 1
p1 = rs_exp(q, x, h)
q1 = rs_log(p1, x, h)
assert q1 == q
p1 = rs_exp(p, x, 30)
assert p1.coeff(x**29) == QQ(74274246775059676726972369, 353670479749588078181744640000)
prec = 21
p = rs_log(1 + x, x, prec)
p1 = rs_exp(p, x, prec)
assert p1 == x + 1
a = symbols('a')
R, x, y = ring('x, y', QQ[exp(a), a])
assert rs_exp(x + a, x, 5) == exp(a)*x**4/24 + exp(a)*x**3/6 + \
exp(a)*x**2/2 + exp(a)*x + exp(a)
assert rs_exp(x + x**2*y + a, x, 5) == exp(a)*x**4*y**2/2 + \
exp(a)*x**4*y/2 + exp(a)*x**4/24 + exp(a)*x**3*y + \
exp(a)*x**3/6 + exp(a)*x**2*y + exp(a)*x**2/2 + exp(a)*x + exp(a)
R, x, y = ring('x, y', EX)
assert rs_exp(x + a, x, 5) == EX(exp(a)/24)*x**4 + EX(exp(a)/6)*x**3 + \
EX(exp(a)/2)*x**2 + EX(exp(a))*x + EX(exp(a))
assert rs_exp(x + x**2*y + a, x, 5) == EX(exp(a)/2)*x**4*y**2 + \
EX(exp(a)/2)*x**4*y + EX(exp(a)/24)*x**4 + EX(exp(a))*x**3*y + \
EX(exp(a)/6)*x**3 + EX(exp(a))*x**2*y + EX(exp(a)/2)*x**2 + \
EX(exp(a))*x + EX(exp(a))
def test_newton():
R, x = ring('x', QQ)
p = x**2 - 2
r = rs_newton(p, x, 4)
f = [1, 0, -2]
assert r == 8*x**4 + 4*x**2 + 2
def test_compose_add():
R, x = ring('x', QQ)
p1 = x**3 - 1
p2 = x**2 - 2
assert rs_compose_add(p1, p2) == x**6 - 6*x**4 - 2*x**3 + 12*x**2 - 12*x - 7
def test_fun():
R, x, y = ring('x, y', QQ)
p = x*y + x**2*y**3 + x**5*y
assert rs_fun(p, rs_tan, x, 10) == rs_tan(p, x, 10)
assert rs_fun(p, _tan1, x, 10) == _tan1(p, x, 10)
def test_nth_root():
R, x, y = ring('x, y', QQ)
r1 = rs_nth_root(1 + x**2*y, 4, x, 10)
assert rs_nth_root(1 + x**2*y, 4, x, 10) == -77*x**8*y**4/2048 + \
7*x**6*y**3/128 - 3*x**4*y**2/32 + x**2*y/4 + 1
assert rs_nth_root(1 + x*y + x**2*y**3, 3, x, 5) == -x**4*y**6/9 + \
5*x**4*y**5/27 - 10*x**4*y**4/243 - 2*x**3*y**4/9 + 5*x**3*y**3/81 + \
x**2*y**3/3 - x**2*y**2/9 + x*y/3 + 1
assert rs_nth_root(8*x, 3, x, 3) == 2*x**QQ(1, 3)
assert rs_nth_root(8*x + x**2 + x**3, 3, x, 3) == x**QQ(4,3)/12 + 2*x**QQ(1,3)
r = rs_nth_root(8*x + x**2*y + x**3, 3, x, 4)
assert r == -x**QQ(7,3)*y**2/288 + x**QQ(7,3)/12 + x**QQ(4,3)*y/12 + 2*x**QQ(1,3)
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_nth_root(x + a, 3, x, 4) == EX(5/(81*a**QQ(8, 3)))*x**3 - \
EX(1/(9*a**QQ(5, 3)))*x**2 + EX(1/(3*a**QQ(2, 3)))*x + EX(a**QQ(1, 3))
assert rs_nth_root(x**QQ(2, 3) + x**2*y + 5, 2, x, 3) == -EX(sqrt(5)/100)*\
x**QQ(8, 3)*y - EX(sqrt(5)/16000)*x**QQ(8, 3) + EX(sqrt(5)/10)*x**2*y + \
EX(sqrt(5)/2000)*x**2 - EX(sqrt(5)/200)*x**QQ(4, 3) + \
EX(sqrt(5)/10)*x**QQ(2, 3) + EX(sqrt(5))
def test_atan():
R, x, y = ring('x, y', QQ)
assert rs_atan(x, x, 9) == -x**7/7 + x**5/5 - x**3/3 + x
assert rs_atan(x*y + x**2*y**3, x, 9) == 2*x**8*y**11 - x**8*y**9 + \
2*x**7*y**9 - x**7*y**7/7 - x**6*y**9/3 + x**6*y**7 - x**5*y**7 + \
x**5*y**5/5 - x**4*y**5 - x**3*y**3/3 + x**2*y**3 + x*y
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_atan(x + a, x, 5) == -EX((a**3 - a)/(a**8 + 4*a**6 + 6*a**4 + \
4*a**2 + 1))*x**4 + EX((3*a**2 - 1)/(3*a**6 + 9*a**4 + \
9*a**2 + 3))*x**3 - EX(a/(a**4 + 2*a**2 + 1))*x**2 + \
EX(1/(a**2 + 1))*x + EX(atan(a))
assert rs_atan(x + x**2*y + a, x, 4) == -EX(2*a/(a**4 + 2*a**2 + 1)) \
*x**3*y + EX((3*a**2 - 1)/(3*a**6 + 9*a**4 + 9*a**2 + 3))*x**3 + \
EX(1/(a**2 + 1))*x**2*y - EX(a/(a**4 + 2*a**2 + 1))*x**2 + EX(1/(a**2 \
+ 1))*x + EX(atan(a))
def test_asin():
R, x, y = ring('x, y', QQ)
assert rs_asin(x + x*y, x, 5) == x**3*y**3/6 + x**3*y**2/2 + x**3*y/2 + \
x**3/6 + x*y + x
assert rs_asin(x*y + x**2*y**3, x, 6) == x**5*y**7/2 + 3*x**5*y**5/40 + \
x**4*y**5/2 + x**3*y**3/6 + x**2*y**3 + x*y
def test_tan():
R, x, y = ring('x, y', QQ)
assert rs_tan(x, x, 9) == \
x + x**3/3 + 2*x**5/15 + 17*x**7/315
assert rs_tan(x*y + x**2*y**3, x, 9) == 4*x**8*y**11/3 + 17*x**8*y**9/45 + \
4*x**7*y**9/3 + 17*x**7*y**7/315 + x**6*y**9/3 + 2*x**6*y**7/3 + \
x**5*y**7 + 2*x**5*y**5/15 + x**4*y**5 + x**3*y**3/3 + x**2*y**3 + x*y
a = symbols('a')
R, x, y = ring('x, y', QQ[tan(a), a])
assert rs_tan(x + a, x, 5) == (tan(a)**5 + 5*tan(a)**3/3 + \
2*tan(a)/3)*x**4 + (tan(a)**4 + 4*tan(a)**2/3 + 1/3)*x**3 + \
(tan(a)**3 + tan(a))*x**2 + (tan(a)**2 + 1)*x + tan(a)
assert rs_tan(x + x**2*y + a, x, 4) == (2*tan(a)**3 + 2*tan(a))*x**3*y + \
(tan(a)**4 + 4/3*tan(a)**2 + 1/3)*x**3 + (tan(a)**2 + 1)*x**2*y + \
(tan(a)**3 + tan(a))*x**2 + (tan(a)**2 + 1)*x + tan(a)
R, x, y = ring('x, y', EX)
assert rs_tan(x + a, x, 5) == EX(tan(a)**5 + 5*tan(a)**3/3 + \
2*tan(a)/3)*x**4 + EX(tan(a)**4 + 4*tan(a)**2/3 + EX(1)/3)*x**3 + \
EX(tan(a)**3 + tan(a))*x**2 + EX(tan(a)**2 + 1)*x + EX(tan(a))
assert rs_tan(x + x**2*y + a, x, 4) == EX(2*tan(a)**3 + \
2*tan(a))*x**3*y + EX(tan(a)**4 + 4*tan(a)**2/3 + EX(1)/3)*x**3 + \
EX(tan(a)**2 + 1)*x**2*y + EX(tan(a)**3 + tan(a))*x**2 + \
EX(tan(a)**2 + 1)*x + EX(tan(a))
p = x + x**2 + 5
assert rs_atan(p, x, 10).compose(x, 10) == EX(atan(5) + 67701870330562640/ \
668083460499)
def test_cot():
R, x, y = ring('x, y', QQ)
assert rs_cot(x**6 + x**7, x, 8) == x**-6 - x**-5 + x**-4 - x**-3 + \
x**-2 - x**-1 + 1 - x + x**2 - x**3 + x**4 - x**5 + 2*x**6/3 - 4*x**7/3
assert rs_cot(x + x**2*y, x, 5) == -x**4*y**5 - x**4*y/15 + x**3*y**4 - \
x**3/45 - x**2*y**3 - x**2*y/3 + x*y**2 - x/3 - y + x**-1
def test_sin():
R, x, y = ring('x, y', QQ)
assert rs_sin(x, x, 9) == \
x - x**3/6 + x**5/120 - x**7/5040
assert rs_sin(x*y + x**2*y**3, x, 9) == x**8*y**11/12 - \
x**8*y**9/720 + x**7*y**9/12 - x**7*y**7/5040 - x**6*y**9/6 + \
x**6*y**7/24 - x**5*y**7/2 + x**5*y**5/120 - x**4*y**5/2 - \
x**3*y**3/6 + x**2*y**3 + x*y
a = symbols('a')
R, x, y = ring('x, y', QQ[sin(a), cos(a), a])
assert rs_sin(x + a, x, 5) == sin(a)*x**4/24 - cos(a)*x**3/6 - \
sin(a)*x**2/2 + cos(a)*x + sin(a)
assert rs_sin(x + x**2*y + a, x, 5) == -sin(a)*x**4*y**2/2 - \
cos(a)*x**4*y/2 + sin(a)*x**4/24 - sin(a)*x**3*y - cos(a)*x**3/6 + \
cos(a)*x**2*y - sin(a)*x**2/2 + cos(a)*x + sin(a)
R, x, y = ring('x, y', EX)
assert rs_sin(x + a, x, 5) == EX(sin(a)/24)*x**4 - EX(cos(a)/6)*x**3 - \
EX(sin(a)/2)*x**2 + EX(cos(a))*x + EX(sin(a))
assert rs_sin(x + x**2*y + a, x, 5) == -EX(sin(a)/2)*x**4*y**2 - \
EX(cos(a)/2)*x**4*y + EX(sin(a)/24)*x**4 - EX(sin(a))*x**3*y - \
EX(cos(a)/6)*x**3 + EX(cos(a))*x**2*y - EX(sin(a)/2)*x**2 + \
EX(cos(a))*x + EX(sin(a))
def test_cos():
R, x, y = ring('x, y', QQ)
assert rs_cos(x, x, 9) == \
x**8/40320 - x**6/720 + x**4/24 - x**2/2 + 1
assert rs_cos(x*y + x**2*y**3, x, 9) == x**8*y**12/24 - \
x**8*y**10/48 + x**8*y**8/40320 + x**7*y**10/6 - \
x**7*y**8/120 + x**6*y**8/4 - x**6*y**6/720 + x**5*y**6/6 - \
x**4*y**6/2 + x**4*y**4/24 - x**3*y**4 - x**2*y**2/2 + 1
a = symbols('a')
R, x, y = ring('x, y', QQ[sin(a), cos(a), a])
assert rs_cos(x + a, x, 5) == cos(a)*x**4/24 + sin(a)*x**3/6 - \
cos(a)*x**2/2 - sin(a)*x + cos(a)
assert rs_cos(x + x**2*y + a, x, 5) == -cos(a)*x**4*y**2/2 + \
sin(a)*x**4*y/2 + cos(a)*x**4/24 - cos(a)*x**3*y + sin(a)*x**3/6 - \
sin(a)*x**2*y - cos(a)*x**2/2 - sin(a)*x + cos(a)
R, x, y = ring('x, y', EX)
assert rs_cos(x + a, x, 5) == EX(cos(a)/24)*x**4 + EX(sin(a)/6)*x**3 - \
EX(cos(a)/2)*x**2 - EX(sin(a))*x + EX(cos(a))
assert rs_cos(x + x**2*y + a, x, 5) == -EX(cos(a)/2)*x**4*y**2 + \
EX(sin(a)/2)*x**4*y + EX(cos(a)/24)*x**4 - EX(cos(a))*x**3*y + \
EX(sin(a)/6)*x**3 - EX(sin(a))*x**2*y - EX(cos(a)/2)*x**2 - \
EX(sin(a))*x + EX(cos(a))
def test_cos_sin():
R, x, y = ring('x, y', QQ)
cos, sin = rs_cos_sin(x, x, 9)
assert cos == rs_cos(x, x, 9)
assert sin == rs_sin(x, x, 9)
cos, sin = rs_cos_sin(x + x*y, x, 5)
assert cos == rs_cos(x + x*y, x, 5)
assert sin == rs_sin(x + x*y, x, 5)
def test_atanh():
R, x, y = ring('x, y', QQ)
assert rs_atanh(x, x, 9) == x**7/7 + x**5/5 + x**3/3 + x
assert rs_atanh(x*y + x**2*y**3, x, 9) == 2*x**8*y**11 + x**8*y**9 + \
2*x**7*y**9 + x**7*y**7/7 + x**6*y**9/3 + x**6*y**7 + x**5*y**7 + \
x**5*y**5/5 + x**4*y**5 + x**3*y**3/3 + x**2*y**3 + x*y
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_atanh(x + a, x, 5) == EX((a**3 + a)/(a**8 - 4*a**6 + 6*a**4 - \
4*a**2 + 1))*x**4 - EX((3*a**2 + 1)/(3*a**6 - 9*a**4 + \
9*a**2 - 3))*x**3 + EX(a/(a**4 - 2*a**2 + 1))*x**2 - EX(1/(a**2 - \
1))*x + EX(atanh(a))
assert rs_atanh(x + x**2*y + a, x, 4) == EX(2*a/(a**4 - 2*a**2 + \
1))*x**3*y - EX((3*a**2 + 1)/(3*a**6 - 9*a**4 + 9*a**2 - 3))*x**3 - \
EX(1/(a**2 - 1))*x**2*y + EX(a/(a**4 - 2*a**2 + 1))*x**2 - \
EX(1/(a**2 - 1))*x + EX(atanh(a))
p = x + x**2 + 5
assert rs_atanh(p, x, 10).compose(x, 10) == EX(-733442653682135/5079158784 \
+ atanh(5))
def test_sinh():
R, x, y = ring('x, y', QQ)
assert rs_sinh(x, x, 9) == x**7/5040 + x**5/120 + x**3/6 + x
assert rs_sinh(x*y + x**2*y**3, x, 9) == x**8*y**11/12 + \
x**8*y**9/720 + x**7*y**9/12 + x**7*y**7/5040 + x**6*y**9/6 + \
x**6*y**7/24 + x**5*y**7/2 + x**5*y**5/120 + x**4*y**5/2 + \
x**3*y**3/6 + x**2*y**3 + x*y
def test_cosh():
R, x, y = ring('x, y', QQ)
assert rs_cosh(x, x, 9) == x**8/40320 + x**6/720 + x**4/24 + \
x**2/2 + 1
assert rs_cosh(x*y + x**2*y**3, x, 9) == x**8*y**12/24 + \
x**8*y**10/48 + x**8*y**8/40320 + x**7*y**10/6 + \
x**7*y**8/120 + x**6*y**8/4 + x**6*y**6/720 + x**5*y**6/6 + \
x**4*y**6/2 + x**4*y**4/24 + x**3*y**4 + x**2*y**2/2 + 1
def test_tanh():
R, x, y = ring('x, y', QQ)
assert rs_tanh(x, x, 9) == -17*x**7/315 + 2*x**5/15 - x**3/3 + x
assert rs_tanh(x*y + x**2*y**3 , x, 9) == 4*x**8*y**11/3 - \
17*x**8*y**9/45 + 4*x**7*y**9/3 - 17*x**7*y**7/315 - x**6*y**9/3 + \
2*x**6*y**7/3 - x**5*y**7 + 2*x**5*y**5/15 - x**4*y**5 - \
x**3*y**3/3 + x**2*y**3 + x*y
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_tanh(x + a, x, 5) == EX(tanh(a)**5 - 5*tanh(a)**3/3 + \
2*tanh(a)/3)*x**4 + EX(-tanh(a)**4 + 4*tanh(a)**2/3 - QQ(1, 3))*x**3 + \
EX(tanh(a)**3 - tanh(a))*x**2 + EX(-tanh(a)**2 + 1)*x + EX(tanh(a))
p = rs_tanh(x + x**2*y + a, x, 4)
assert (p.compose(x, 10)).compose(y, 5) == EX(-1000*tanh(a)**4 + \
10100*tanh(a)**3 + 2470*tanh(a)**2/3 - 10099*tanh(a) + QQ(530, 3))
def test_RR():
rs_funcs = [rs_sin, rs_cos, rs_tan, rs_cot, rs_atan, rs_tanh]
sympy_funcs = [sin, cos, tan, cot, atan, tanh]
R, x, y = ring('x, y', RR)
a = symbols('a')
for rs_func, sympy_func in zip(rs_funcs, sympy_funcs):
p = rs_func(2 + x, x, 5).compose(x, 5)
q = sympy_func(2 + a).series(a, 0, 5).removeO()
is_close(p.as_expr(), q.subs(a, 5).n())
p = rs_nth_root(2 + x, 5, x, 5).compose(x, 5)
q = ((2 + a)**QQ(1, 5)).series(a, 0, 5).removeO()
is_close(p.as_expr(), q.subs(a, 5).n())
def test_is_regular():
R, x, y = ring('x, y', QQ)
p = 1 + 2*x + x**2 + 3*x**3
assert not rs_is_puiseux(p, x)
p = x + x**QQ(1,5)*y
assert rs_is_puiseux(p, x)
assert not rs_is_puiseux(p, y)
p = x + x**2*y**QQ(1,5)*y
assert not rs_is_puiseux(p, x)
def test_puiseux():
R, x, y = ring('x, y', QQ)
p = x**QQ(2,5) + x**QQ(2,3) + x
r = rs_series_inversion(p, x, 1)
r1 = -x**QQ(14,15) + x**QQ(4,5) - 3*x**QQ(11,15) + x**QQ(2,3) + \
2*x**QQ(7,15) - x**QQ(2,5) - x**QQ(1,5) + x**QQ(2,15) - x**QQ(-2,15) \
+ x**QQ(-2,5)
assert r == r1
r = rs_nth_root(1 + p, 3, x, 1)
assert r == -x**QQ(4,5)/9 + x**QQ(2,3)/3 + x**QQ(2,5)/3 + 1
r = rs_log(1 + p, x, 1)
assert r == -x**QQ(4,5)/2 + x**QQ(2,3) + x**QQ(2,5)
r = rs_LambertW(p, x, 1)
assert r == -x**QQ(4,5) + x**QQ(2,3) + x**QQ(2,5)
r = rs_exp(p, x, 1)
assert r == x**QQ(4,5)/2 + x**QQ(2,3) + x**QQ(2,5) + 1
p1 = x + x**QQ(1,5)*y
r = rs_exp(p1, x, 1)
assert r == x**QQ(4,5)*y**4/24 + x**QQ(3,5)*y**3/6 + x**QQ(2,5)*y**2/2 + \
x**QQ(1,5)*y + 1
r = rs_atan(p, x, 2)
assert r == -x**QQ(9,5) - x**QQ(26,15) - x**QQ(22,15) - x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
r = rs_atan(p1, x, 2)
assert r == x**QQ(9,5)*y**9/9 + x**QQ(9,5)*y**4 - x**QQ(7,5)*y**7/7 - \
x**QQ(7,5)*y**2 + x*y**5/5 + x - x**QQ(3,5)*y**3/3 + x**QQ(1,5)*y
r = rs_asin(p, x, 2)
assert r == x**QQ(9,5)/2 + x**QQ(26,15)/2 + x**QQ(22,15)/2 + \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_tan(p, x, 2)
assert r == x**QQ(9,5) + x**QQ(26,15) + x**QQ(22,15) + x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cot(p, x, 1)
assert r == -x**QQ(14,15) + x**QQ(4,5) - 3*x**QQ(11,15) + \
2*x**QQ(2,3)/3 + 2*x**QQ(7,15) - 4*x**QQ(2,5)/3 - x**QQ(1,5) + \
x**QQ(2,15) - x**QQ(-2,15) + x**QQ(-2,5)
r = rs_sin(p, x, 2)
assert r == -x**QQ(9,5)/2 - x**QQ(26,15)/2 - x**QQ(22,15)/2 - \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cos(p, x, 2)
assert r == x**QQ(28,15)/6 - x**QQ(5,3) + x**QQ(8,5)/24 - x**QQ(7,5) - \
x**QQ(4,3)/2 - x**QQ(16,15) - x**QQ(4,5)/2 + 1
r = rs_cos_sin(p, x, 2)
assert r[0] == x**QQ(28,15)/6 - x**QQ(5,3) + x**QQ(8,5)/24 - x**QQ(7,5) - \
x**QQ(4,3)/2 - x**QQ(16,15) - x**QQ(4,5)/2 + 1
assert r[1] == -x**QQ(9,5)/2 - x**QQ(26,15)/2 - x**QQ(22,15)/2 - \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_atanh(p, x, 2)
assert r == x**QQ(9,5) + x**QQ(26,15) + x**QQ(22,15) + x**QQ(6,5)/3 + x + \
x**QQ(2,3) + x**QQ(2,5)
r = rs_sinh(p, x, 2)
assert r == x**QQ(9,5)/2 + x**QQ(26,15)/2 + x**QQ(22,15)/2 + \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cosh(p, x, 2)
assert r == x**QQ(28,15)/6 + x**QQ(5,3) + x**QQ(8,5)/24 + x**QQ(7,5) + \
x**QQ(4,3)/2 + x**QQ(16,15) + x**QQ(4,5)/2 + 1
r = rs_tanh(p, x, 2)
assert r == -x**QQ(9,5) - x**QQ(26,15) - x**QQ(22,15) - x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
def test1():
R, x = ring('x', QQ)
r = rs_sin(x, x, 15)*x**(-5)
assert r == x**8/6227020800 - x**6/39916800 + x**4/362880 - x**2/5040 + \
QQ(1,120) - x**-2/6 + x**-4
p = rs_sin(x, x, 10)
r = rs_nth_root(p, 2, x, 10)
assert r == -67*x**QQ(17,2)/29030400 - x**QQ(13,2)/24192 + \
x**QQ(9,2)/1440 - x**QQ(5,2)/12 + x**QQ(1,2)
p = rs_sin(x, x, 10)
r = rs_nth_root(p, 7, x, 10)
r = rs_pow(r, 5, x, 10)
assert r == -97*x**QQ(61,7)/124467840 - x**QQ(47,7)/16464 + \
11*x**QQ(33,7)/3528 - 5*x**QQ(19,7)/42 + x**QQ(5,7)
r = rs_exp(x**QQ(1,2), x, 10)
assert r == x**QQ(19,2)/121645100408832000 + x**9/6402373705728000 + \
x**QQ(17,2)/355687428096000 + x**8/20922789888000 + \
x**QQ(15,2)/1307674368000 + x**7/87178291200 + \
x**QQ(13,2)/6227020800 + x**6/479001600 + x**QQ(11,2)/39916800 + \
x**5/3628800 + x**QQ(9,2)/362880 + x**4/40320 + x**QQ(7,2)/5040 + \
x**3/720 + x**QQ(5,2)/120 + x**2/24 + x**QQ(3,2)/6 + x/2 + \
x**QQ(1,2) + 1
def test_puiseux2():
R, y = ring('y', QQ)
S, x = ring('x', R)
p = x + x**QQ(1,5)*y
r = rs_atan(p, x, 3)
assert r == (y**13/13 + y**8 + 2*y**3)*x**QQ(13,5) - (y**11/11 + y**6 +
y)*x**QQ(11,5) + (y**9/9 + y**4)*x**QQ(9,5) - (y**7/7 +
y**2)*x**QQ(7,5) + (y**5/5 + 1)*x - y**3*x**QQ(3,5)/3 + y*x**QQ(1,5)
| true | true |
f7f43d3dbe815e422dee5f434812cd2cbacb19e4 | 1,255 | py | Python | back/fs/subtitler.py | rkohser/gustaf2 | b9f4dc0a9b5adca94161f9c59fa9907e1842b091 | [
"MIT"
] | null | null | null | back/fs/subtitler.py | rkohser/gustaf2 | b9f4dc0a9b5adca94161f9c59fa9907e1842b091 | [
"MIT"
] | null | null | null | back/fs/subtitler.py | rkohser/gustaf2 | b9f4dc0a9b5adca94161f9c59fa9907e1842b091 | [
"MIT"
] | null | null | null | import os
from subliminal import download_best_subtitles, save_subtitles
from subliminal.video import Episode
from subliminal.core import search_external_subtitles
from babelfish.language import Language
class Subtitler:
def __init__(self, languages, providers):
self.languages = languages
self.providers = providers
def subtitle(self, episodes):
# Parse babelfish languages
bb_lang = {Language.fromietf(l) for l in self.languages}
# Create subliminal episode set
sub_episodes = set()
for episode in episodes:
ep_path = os.path.join(episode['dir'], episode['filename'])
sub_episode = Episode.fromguess(ep_path, episode)
# Look for external subtitles (not done automatically, apparently)
sub_episode.subtitle_languages |= set(search_external_subtitles(sub_episode.name).values())
sub_episodes.add(sub_episode)
# download subtitles in the specified language
subl_subtitles = download_best_subtitles(sub_episodes, bb_lang, providers=self.providers)
for video, subtitles in subl_subtitles.items():
save_subtitles(video, subtitles)
# save subtitle languages in episode dict | 31.375 | 103 | 0.701195 | import os
from subliminal import download_best_subtitles, save_subtitles
from subliminal.video import Episode
from subliminal.core import search_external_subtitles
from babelfish.language import Language
class Subtitler:
def __init__(self, languages, providers):
self.languages = languages
self.providers = providers
def subtitle(self, episodes):
bb_lang = {Language.fromietf(l) for l in self.languages}
sub_episodes = set()
for episode in episodes:
ep_path = os.path.join(episode['dir'], episode['filename'])
sub_episode = Episode.fromguess(ep_path, episode)
sub_episode.subtitle_languages |= set(search_external_subtitles(sub_episode.name).values())
sub_episodes.add(sub_episode)
subl_subtitles = download_best_subtitles(sub_episodes, bb_lang, providers=self.providers)
for video, subtitles in subl_subtitles.items():
save_subtitles(video, subtitles)
| true | true |
f7f43d801ece20fe1e29343f5116b7a4268dc10d | 3,610 | py | Python | src/trader/management/commands/ticker.py | edse/bl3ptrader | 40c83751f2b854e9a5d0f915dce7fd84dc9d7233 | [
"MIT"
] | 1 | 2017-11-19T13:35:34.000Z | 2017-11-19T13:35:34.000Z | src/trader/management/commands/ticker.py | edse/bl3ptrader | 40c83751f2b854e9a5d0f915dce7fd84dc9d7233 | [
"MIT"
] | 3 | 2020-02-11T23:39:00.000Z | 2021-06-10T19:12:20.000Z | src/trader/management/commands/ticker.py | edse/bl3ptrader | 40c83751f2b854e9a5d0f915dce7fd84dc9d7233 | [
"MIT"
] | null | null | null | import ipdb; ipdb.set_trace()
from django.core.management.base import BaseCommand
from .trader import Trader
# import logging
# from django.conf import settings
# import json
# import requests
# from time import sleep
import websocket
# from .storage import Storage
from .base import * # noqa
websocket.enableTrace(True)
class Command(BaseCommand):
ws = None
trade = None
def handle(self, *args, **options):
self.trader = Trader()
# self.run_ticker()
# trader = None
# ws_trades = None
# ws_ticker = None
# def get_ticker_path():
# return settings.EXCHANGES['BL3P']['public']['http'] + \
# settings.EXCHANGES['BL3P']['public']['paths']['ticker']
# def get_trades_path():
# return settings.EXCHANGES['BL3P']['public']['wss'] + \
# settings.EXCHANGES['BL3P']['public']['paths']['trades']
# def parse_trade(message):
# data = json.loads(message)
# price = float(data['price_int']) / NORM_PRICE
# amount = float(data['amount_int']) / NORM_AMOUNT
# return Storage.store([{
# 'measurement': 'BTC_EUR',
# 'tags': {
# 'asset': 'BTC',
# 'currency': 'EUR'
# },
# 'fields': {
# 'timestamp': data['date'],
# 'price': price,
# 'amount': amount
# }
# }])
# def on_trade_message(ws, message):
# logger.setLevel(logging.WARNING)
# data = parse_trade(message)[0]
# if settings.DEBUG:
# logger.debug('parsed: %s', data)
# trader.analyse(data)
# def on_error(ws, error):
# if settings.DEBUG:
# logger.exception('Websocket error... %s', error)
# def run_ticker(trader):
# logger.setLevel(logging.DEBUG)
# last = 0
# while True:
# print 'asdf'
# response = requests.get(get_ticker_path())
# if response.status_code != 200:
# print('unexpected response: %s%s' % response.status_code, response.content)
# return False
# data = json.loads(response.content)
# if settings.DEBUG:
# logger.debug(data['last'])
# if float(data['last']) != last:
# last = float(data['last'])
# stored = Storage.store([{
# 'measurement': 'BTC_EUR',
# 'tags': {
# 'asset': 'BTC',
# 'currency': 'EUR'
# },
# 'fields': {
# 'timestamp': data['timestamp'],
# 'price': last,
# }
# }])
# # if settings.DEBUG:
# # logger.debug(stored[0])
# trader.analyse(stored[0])
# sleep(float(settings.BOT_TICKER_INTERVAL))
# def run_trader(trader):
# trader = trader # noqa
# ws_trades = websocket.WebSocketApp(
# get_trades_path(),
# on_message=on_trade_message,
# on_error=on_error
# )
# ws_trades.run_forever()
# def run():
# from subprocess import Popen
# from sys import stdout, stdin, stderr
# import time
# import os
# import signal
# commands = [
# './src/manage.py trader',
# './src/manage.py ticker'
# ]
# proc_list = []
# for command in commands:
# print "$ " + command
# proc = Popen(command, shell=True, stdin=stdin, stdout=stdout, stderr=stderr)
# proc_list.append(proc)
# try:
# while True:
# time.sleep(10)
# except KeyboardInterrupt:
# for proc in proc_list:
# os.kill(proc.pid, signal.SIGKILL)
| 22.283951 | 89 | 0.539335 | import ipdb; ipdb.set_trace()
from django.core.management.base import BaseCommand
from .trader import Trader
import websocket
from .base import *
websocket.enableTrace(True)
class Command(BaseCommand):
ws = None
trade = None
def handle(self, *args, **options):
self.trader = Trader()
| true | true |
f7f43e2635350e67a54d981786abf7cabfd64847 | 3,818 | py | Python | pysat/instruments/de2_lang.py | scivision/pysat | 2916ff7c77ad4201c537acca91b7c0b46b542e82 | [
"BSD-3-Clause"
] | null | null | null | pysat/instruments/de2_lang.py | scivision/pysat | 2916ff7c77ad4201c537acca91b7c0b46b542e82 | [
"BSD-3-Clause"
] | null | null | null | pysat/instruments/de2_lang.py | scivision/pysat | 2916ff7c77ad4201c537acca91b7c0b46b542e82 | [
"BSD-3-Clause"
] | 1 | 2018-10-26T02:42:50.000Z | 2018-10-26T02:42:50.000Z | # -*- coding: utf-8 -*-
"""Supports the Langmuir Probe (LANG) instrument on Dynamics Explorer 2 (DE2).
From CDAWeb:
The Langmuir Probe Instrument (LANG) was a cylindrical electrostatic probe that
obtained measurements of electron temperature, Te, and electron or ion
concentration, Ne or Ni, respectively, and spacecraft potential. Data from
this investigation were used to provide temperature and density measurements
along magnetic field lines related to thermal energy and particle flows within
the magnetosphere-ionosphere system, to provide thermal plasma conditions for
wave-particle interactions, and to measure large-scale and fine-structure
ionospheric effects of energy deposition in the ionosphere. The Langmuir Probe
instrument was identical to that used on the AE satellites and the Pioneer
Venus Orbiter. Two independent sensors were connected to individual adaptive
sweep voltage circuits which continuously tracked the changing electron
temperature and spacecraft potential, while autoranging electrometers adjusted
their gain in response to the changing plasma density. The control signals used
to achieve this automatic tracking provided a continuous monitor of the
ionospheric parameters without telemetering each volt-ampere (V-I) curve.
Furthermore, internal data storage circuits permitted high resolution, high
data rate sampling of selected V-I curves for transmission to ground to verify
or correct the inflight processed data. Time resolution was 0.5 seconds.
References
----------
J. P. Krehbiel, L. H. Brace, R. F. Theis, W. H. Pinkus, and R. B. Kaplan,
The Dynamics Explorer 2 Langmuir Probe (LANG), Space Sci. Instrum., v. 5, n. 4,
p. 493, 1981.
Properties
----------
platform
'de2'
name
'lang'
sat_id
None Supported
tag
None Supported
Authors
-------
J. Klenzing
"""
from __future__ import print_function
from __future__ import absolute_import
import functools
import pysat
from pysat.instruments.methods import nasa_cdaweb as cdw
from pysat.instruments.methods import general as mm_gen
platform = 'de2'
name = 'lang'
tags = {'': '500 ms cadence Langmuir Probe data'}
sat_ids = {'': ['']}
_test_dates = {'': {'': pysat.datetime(1983, 1, 1)}}
fname = 'de2_plasma500ms_lang_{year:04d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'': {'': fname}}
# use the CDAWeb methods list files routine
list_files = functools.partial(mm_gen.list_files,
supported_tags=supported_tags)
# use the default CDAWeb method
load = cdw.load
# support download routine
basic_tag = {'dir': '/pub/data/de/de2/plasma_lang/plasma500ms_lang_cdaweb',
'remote_fname': '{year:4d}/' + fname,
'local_fname': fname}
supported_tags = {'': {'': basic_tag}}
download = functools.partial(cdw.download, supported_tags)
# support listing files currently on CDAWeb
list_remote_files = functools.partial(cdw.list_remote_files,
supported_tags=supported_tags)
def clean(inst):
"""Routine to return PLATFORM/NAME data cleaned to the specified level
Cleaning level is specified in inst.clean_level and pysat
will accept user input for several strings. The clean_level is
specified at instantiation of the Instrument object.
'clean' All parameters should be good, suitable for statistical and
case studies
'dusty' All paramers should generally be good though same may
not be great
'dirty' There are data areas that have issues, data should be used
with caution
'none' No cleaning applied, routine not called in this case.
Parameters
-----------
inst : pysat.Instrument
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
"""
return
| 34.089286 | 79 | 0.736773 |
from __future__ import print_function
from __future__ import absolute_import
import functools
import pysat
from pysat.instruments.methods import nasa_cdaweb as cdw
from pysat.instruments.methods import general as mm_gen
platform = 'de2'
name = 'lang'
tags = {'': '500 ms cadence Langmuir Probe data'}
sat_ids = {'': ['']}
_test_dates = {'': {'': pysat.datetime(1983, 1, 1)}}
fname = 'de2_plasma500ms_lang_{year:04d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'': {'': fname}}
list_files = functools.partial(mm_gen.list_files,
supported_tags=supported_tags)
load = cdw.load
basic_tag = {'dir': '/pub/data/de/de2/plasma_lang/plasma500ms_lang_cdaweb',
'remote_fname': '{year:4d}/' + fname,
'local_fname': fname}
supported_tags = {'': {'': basic_tag}}
download = functools.partial(cdw.download, supported_tags)
list_remote_files = functools.partial(cdw.list_remote_files,
supported_tags=supported_tags)
def clean(inst):
return
| true | true |
f7f43ee4f2308325f1d94bb3259b97233d268597 | 1,156 | py | Python | 2016/10/runbots.py | Apreche/advent2016 | 81e72efe670a62065297d1db4a2eef544bc36cb6 | [
"MIT"
] | 3 | 2020-12-12T22:05:06.000Z | 2021-12-11T17:05:48.000Z | 2016/10/runbots.py | Apreche/advent-of-code | 81e72efe670a62065297d1db4a2eef544bc36cb6 | [
"MIT"
] | null | null | null | 2016/10/runbots.py | Apreche/advent-of-code | 81e72efe670a62065297d1db4a2eef544bc36cb6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import re
from bots import Factory
input_pattern = re.compile(r'^value (\d+) goes to bot (\d+)$')
p2 = r'^bot (\d+) gives (low|high) to (bot|output) (\d+) and (high|low) to (bot|output) (\d+)$'
instruction_pattern = re.compile(p2)
instructions = []
factory = Factory()
with open('input.txt', 'r') as input_file:
for line in input_file:
line = line.strip()
match = input_pattern.search(line)
if match is not None:
value, bot_id = match.groups()
factory.input(int(bot_id), int(value))
continue
match = instruction_pattern.search(line)
if match is not None:
instructions.append(match.groups())
continue
while instructions:
for instruction in instructions:
from_id, from1, to1, to1_id, from2, to2, to2_id = instruction
from_id = int(from_id)
to1_id = int(to1_id)
to2_id = int(to2_id)
result = factory.instruct(from_id, from1, to1, to1_id)
if result:
result = factory.instruct(from_id, from2, to2, to2_id)
instructions.remove(instruction)
print factory
| 30.421053 | 95 | 0.617647 |
import re
from bots import Factory
input_pattern = re.compile(r'^value (\d+) goes to bot (\d+)$')
p2 = r'^bot (\d+) gives (low|high) to (bot|output) (\d+) and (high|low) to (bot|output) (\d+)$'
instruction_pattern = re.compile(p2)
instructions = []
factory = Factory()
with open('input.txt', 'r') as input_file:
for line in input_file:
line = line.strip()
match = input_pattern.search(line)
if match is not None:
value, bot_id = match.groups()
factory.input(int(bot_id), int(value))
continue
match = instruction_pattern.search(line)
if match is not None:
instructions.append(match.groups())
continue
while instructions:
for instruction in instructions:
from_id, from1, to1, to1_id, from2, to2, to2_id = instruction
from_id = int(from_id)
to1_id = int(to1_id)
to2_id = int(to2_id)
result = factory.instruct(from_id, from1, to1, to1_id)
if result:
result = factory.instruct(from_id, from2, to2, to2_id)
instructions.remove(instruction)
print factory
| false | true |
f7f44155bc8ab764412d569a9d816f0921c064ff | 5,559 | py | Python | tensorflow/python/eager/tape_test.py | DHsLc/test | f286c78b619b81ca95ba9f738cc0de4e14440e44 | [
"Apache-2.0"
] | 5 | 2021-01-11T01:51:57.000Z | 2021-12-11T17:19:08.000Z | tensorflow/python/eager/tape_test.py | radi2015/tensorflow | 4b2fb49fd7578afe7e289936f347af581b5bdab1 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/eager/tape_test.py | radi2015/tensorflow | 4b2fb49fd7578afe7e289936f347af581b5bdab1 | [
"Apache-2.0"
] | 3 | 2020-07-02T13:46:32.000Z | 2021-01-11T01:52:01.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic tests for autograd-based gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import custom_gradient
from tensorflow.python.eager import tensor
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
# Importing nn_grad for the registration functions.
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
@custom_gradient.custom_gradient
def two_outputs(a, b):
mm = math_ops.matmul(a, b)
r = math_ops.reduce_sum(mm)
def grad(dmm, dr):
return [
math_ops.matmul(dmm, b, transpose_b=True) +
math_ops.matmul(array_ops.ones_like(b * dr), b, transpose_b=True),
math_ops.matmul(a, dmm, transpose_b=True) +
math_ops.matmul(a, array_ops.ones_like(a) * dr, transpose_b=True)
]
return [mm, r], grad
class TapeTest(test.TestCase):
def testMultiOutput(self):
def fn(x, y):
c = x + y
# Multiple outputs from split.
d, f = array_ops.split(c, 2)
return d + f
a = tensor.Tensor([[1., 0.], [0., 1.]])
b = tensor.Tensor([[1., 2.], [3., 4.]])
da, db = backprop.gradients_function(fn, [0, 1])(a, b)
with context.graph_mode(), self.test_session():
tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)
tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
tf_c = tf_a + tf_b
tf_d, tf_f = array_ops.split(tf_c, 2, axis=1)
tf_e = tf_d + tf_f
tf_da, tf_db = gradients_impl.gradients(tf_e, [tf_a, tf_b])
self.assertAllEqual(da.numpy(), tf_da.eval())
self.assertAllEqual(db.numpy(), tf_db.eval())
def testBasicFunctional(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = tensor.Tensor([[1., 0.], [0., 1.]])
bb = tensor.Tensor([[1., 2.], [3., 4.]])
da, = backprop.gradients_function(forward, ['a'])(aa, bb)
self.assertAllEqual(da.numpy(),
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)).numpy())
def testBasicFunctionalPositionalArg(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = tensor.Tensor([[1., 0.], [0., 1.]])
bb = tensor.Tensor([[1., 2.], [3., 4.]])
da, = backprop.gradients_function(forward, [0])(aa, bb)
self.assertAllEqual(da.numpy(),
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)).numpy())
def testBasicFunctionalWithValue(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = tensor.Tensor([[1., 0.], [0., 1.]])
bb = tensor.Tensor([[1., 2.], [3., 4.]])
val, (da,) = backprop.val_and_grad_function(forward, ['a'])(aa, bb)
self.assertAllEqual(da.numpy(),
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)).numpy())
self.assertAllEqual(val.numpy(), forward(aa, bb).numpy())
def testTwoOutputs(self):
def fn(x, y):
mm, r = two_outputs(x, y)
return r + math_ops.reduce_sum(mm)
a = tensor.Tensor([[1., 0.], [0., 1.]])
b = tensor.Tensor([[1., 2.], [3., 4.]])
da, db = backprop.gradients_function(fn, [0, 1])(a, b)
with context.graph_mode(), self.test_session():
tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)
tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
tf_mm = math_ops.matmul(tf_a, tf_b)
tf_rr = 2 * math_ops.reduce_sum(tf_mm)
tf_da, tf_db = gradients_impl.gradients(tf_rr, [tf_a, tf_b])
self.assertAllEqual(da.numpy(), tf_da.eval())
self.assertAllEqual(db.numpy(), tf_db.eval())
def testGcTwoOutputs(self):
def fn(x, y):
return nn_ops.sparse_softmax_cross_entropy_with_logits(logits=x,
labels=y)[0]
labels = tensor.Tensor([0])
logits = tensor.Tensor([[0.0]])
grad, = backprop.gradients_function(fn, [0])(logits, labels)
self.assertAllEqual(grad.numpy(), [[0.0]])
def testTfTensor(self):
def fn(x):
return x
t = constant_op.constant(1.0)
g, = backprop.gradients_function(fn, [0])(t)
self.assertEqual(g.numpy(), 1.0)
if __name__ == '__main__':
test.main()
| 34.314815 | 80 | 0.624573 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import custom_gradient
from tensorflow.python.eager import tensor
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import nn_ops
@custom_gradient.custom_gradient
def two_outputs(a, b):
mm = math_ops.matmul(a, b)
r = math_ops.reduce_sum(mm)
def grad(dmm, dr):
return [
math_ops.matmul(dmm, b, transpose_b=True) +
math_ops.matmul(array_ops.ones_like(b * dr), b, transpose_b=True),
math_ops.matmul(a, dmm, transpose_b=True) +
math_ops.matmul(a, array_ops.ones_like(a) * dr, transpose_b=True)
]
return [mm, r], grad
class TapeTest(test.TestCase):
def testMultiOutput(self):
def fn(x, y):
c = x + y
d, f = array_ops.split(c, 2)
return d + f
a = tensor.Tensor([[1., 0.], [0., 1.]])
b = tensor.Tensor([[1., 2.], [3., 4.]])
da, db = backprop.gradients_function(fn, [0, 1])(a, b)
with context.graph_mode(), self.test_session():
tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)
tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
tf_c = tf_a + tf_b
tf_d, tf_f = array_ops.split(tf_c, 2, axis=1)
tf_e = tf_d + tf_f
tf_da, tf_db = gradients_impl.gradients(tf_e, [tf_a, tf_b])
self.assertAllEqual(da.numpy(), tf_da.eval())
self.assertAllEqual(db.numpy(), tf_db.eval())
def testBasicFunctional(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = tensor.Tensor([[1., 0.], [0., 1.]])
bb = tensor.Tensor([[1., 2.], [3., 4.]])
da, = backprop.gradients_function(forward, ['a'])(aa, bb)
self.assertAllEqual(da.numpy(),
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)).numpy())
def testBasicFunctionalPositionalArg(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = tensor.Tensor([[1., 0.], [0., 1.]])
bb = tensor.Tensor([[1., 2.], [3., 4.]])
da, = backprop.gradients_function(forward, [0])(aa, bb)
self.assertAllEqual(da.numpy(),
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)).numpy())
def testBasicFunctionalWithValue(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = tensor.Tensor([[1., 0.], [0., 1.]])
bb = tensor.Tensor([[1., 2.], [3., 4.]])
val, (da,) = backprop.val_and_grad_function(forward, ['a'])(aa, bb)
self.assertAllEqual(da.numpy(),
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)).numpy())
self.assertAllEqual(val.numpy(), forward(aa, bb).numpy())
def testTwoOutputs(self):
def fn(x, y):
mm, r = two_outputs(x, y)
return r + math_ops.reduce_sum(mm)
a = tensor.Tensor([[1., 0.], [0., 1.]])
b = tensor.Tensor([[1., 2.], [3., 4.]])
da, db = backprop.gradients_function(fn, [0, 1])(a, b)
with context.graph_mode(), self.test_session():
tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)
tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
tf_mm = math_ops.matmul(tf_a, tf_b)
tf_rr = 2 * math_ops.reduce_sum(tf_mm)
tf_da, tf_db = gradients_impl.gradients(tf_rr, [tf_a, tf_b])
self.assertAllEqual(da.numpy(), tf_da.eval())
self.assertAllEqual(db.numpy(), tf_db.eval())
def testGcTwoOutputs(self):
def fn(x, y):
return nn_ops.sparse_softmax_cross_entropy_with_logits(logits=x,
labels=y)[0]
labels = tensor.Tensor([0])
logits = tensor.Tensor([[0.0]])
grad, = backprop.gradients_function(fn, [0])(logits, labels)
self.assertAllEqual(grad.numpy(), [[0.0]])
def testTfTensor(self):
def fn(x):
return x
t = constant_op.constant(1.0)
g, = backprop.gradients_function(fn, [0])(t)
self.assertEqual(g.numpy(), 1.0)
if __name__ == '__main__':
test.main()
| true | true |
f7f441ca738c6897f579eb809505199c66722cbd | 8,974 | py | Python | indico/modules/attachments/models/folders.py | UNOG-Indico/UNOG-Indico-v2 | 4fa4393cc1f3b453a69f5e0ea3b52c18337831a5 | [
"MIT"
] | null | null | null | indico/modules/attachments/models/folders.py | UNOG-Indico/UNOG-Indico-v2 | 4fa4393cc1f3b453a69f5e0ea3b52c18337831a5 | [
"MIT"
] | null | null | null | indico/modules/attachments/models/folders.py | UNOG-Indico/UNOG-Indico-v2 | 4fa4393cc1f3b453a69f5e0ea3b52c18337831a5 | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from collections import defaultdict
from flask import g
from sqlalchemy.event import listens_for
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import joinedload
from indico.core.db import db
from indico.core.db.sqlalchemy.links import LinkMixin, LinkType
from indico.core.db.sqlalchemy.protection import ProtectionMixin, ProtectionMode
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.modules.attachments.models.attachments import Attachment
from indico.modules.attachments.models.principals import AttachmentFolderPrincipal
from indico.modules.attachments.util import can_manage_attachments
from indico.util.decorators import strict_classproperty
from indico.util.locators import locator_property
from indico.util.string import return_ascii
class AttachmentFolder(LinkMixin, ProtectionMixin, db.Model):
__tablename__ = 'folders'
allowed_link_types = LinkMixin.allowed_link_types - {LinkType.session_block}
unique_links = 'is_default'
events_backref_name = 'all_attachment_folders'
link_backref_name = 'attachment_folders'
link_backref_lazy = 'dynamic'
@strict_classproperty
@staticmethod
def __auto_table_args():
default_inheriting = 'not (is_default and protection_mode != {})'.format(ProtectionMode.inheriting.value)
return (db.CheckConstraint(default_inheriting, 'default_inheriting'),
db.CheckConstraint('is_default = (title IS NULL)', 'default_or_title'),
db.CheckConstraint('not (is_default and is_deleted)', 'default_not_deleted'),
db.CheckConstraint('not (is_hidden and is_always_visible)', 'is_hidden_not_is_always_visible'),
{'schema': 'attachments'})
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
#: The ID of the folder
id = db.Column(
db.Integer,
primary_key=True
)
#: The name of the folder (``None`` for the default folder)
title = db.Column(
db.String,
nullable=True
)
#: The description of the folder
description = db.Column(
db.Text,
nullable=False,
default=''
)
#: If the folder has been deleted
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: If the folder is the default folder (used for "folder-less" files)
is_default = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: If the folder is always visible (even if you cannot access it)
is_always_visible = db.Column(
db.Boolean,
nullable=False,
default=True
)
#: If the folder is never shown in the frontend (even if you can access it)
is_hidden = db.Column(
db.Boolean,
nullable=False,
default=False
)
acl_entries = db.relationship(
'AttachmentFolderPrincipal',
backref='folder',
cascade='all, delete-orphan',
collection_class=set
)
#: The ACL of the folder (used for ProtectionMode.protected)
acl = association_proxy('acl_entries', 'principal', creator=lambda v: AttachmentFolderPrincipal(principal=v))
#: The list of attachments that are not deleted, ordered by name
attachments = db.relationship(
'Attachment',
primaryjoin=lambda: (Attachment.folder_id == AttachmentFolder.id) & ~Attachment.is_deleted,
order_by=lambda: db.func.lower(Attachment.title),
viewonly=True,
lazy=True
)
# relationship backrefs:
# - all_attachments (Attachment.folder)
# - legacy_mapping (LegacyAttachmentFolderMapping.folder)
@property
def protection_parent(self):
return self.object
@classmethod
def get_or_create_default(cls, linked_object):
"""Get the default folder for the given object or creates it."""
folder = cls.find_first(is_default=True, object=linked_object)
if folder is None:
folder = cls(is_default=True, object=linked_object)
return folder
@classmethod
def get_or_create(cls, linked_object, title=None):
"""Get a folder for the given object or create it.
If no folder title is specified, the default folder will be
used. It is the caller's responsibility to add the folder
or an object (such as an attachment) associated with it
to the SQLAlchemy session using ``db.session.add(...)``.
"""
if title is None:
return AttachmentFolder.get_or_create_default(linked_object)
else:
folder = AttachmentFolder.find_first(object=linked_object, is_default=False, is_deleted=False, title=title)
return folder or AttachmentFolder(object=linked_object, title=title)
@locator_property
def locator(self):
return dict(self.object.locator, folder_id=self.id)
def can_access(self, user, *args, **kwargs):
"""Check if the user is allowed to access the folder.
This is the case if the user has access the folder or if the
user can manage attachments for the linked object.
"""
return (super(AttachmentFolder, self).can_access(user, *args, **kwargs) or
can_manage_attachments(self.object, user))
def can_view(self, user):
"""Check if the user can see the folder.
This does not mean the user can actually access its contents.
It just determines if it is visible to him or not.
"""
if self.is_hidden:
return False
if not self.object.can_access(user):
return False
return self.is_always_visible or super(AttachmentFolder, self).can_access(user)
@classmethod
def get_for_linked_object(cls, linked_object, preload_event=False):
"""Get the attachments for the given object.
This only returns attachments that haven't been deleted.
:param linked_object: A category, event, session, contribution or
subcontribution.
:param preload_event: If all attachments for the same event should
be pre-loaded and cached in the app context.
This must not be used when ``linked_object``
is a category.
"""
from indico.modules.attachments.api.util import get_event
event = get_event(linked_object)
if event and event in g.get('event_attachments', {}):
return g.event_attachments[event].get(linked_object, [])
elif not preload_event or not event:
return (linked_object.attachment_folders.filter_by(is_deleted=False)
.order_by(AttachmentFolder.is_default.desc(), db.func.lower(AttachmentFolder.title))
.options(joinedload(AttachmentFolder.attachments))
.all())
else:
if 'event_attachments' not in g:
g.event_attachments = {}
g.event_attachments[event] = defaultdict(list)
query = (event.all_attachment_folders
.filter_by(is_deleted=False)
.order_by(AttachmentFolder.is_default.desc(), db.func.lower(AttachmentFolder.title))
.options(joinedload(AttachmentFolder.attachments),
joinedload(AttachmentFolder.linked_event),
joinedload(AttachmentFolder.session),
joinedload(AttachmentFolder.contribution),
joinedload(AttachmentFolder.subcontribution)))
# populate cache
for obj in query:
g.event_attachments[event][obj.object].append(obj)
return g.event_attachments[event].get(linked_object, [])
@return_ascii
def __repr__(self):
return '<AttachmentFolder({}, {}{}{}{}, {}, {})>'.format(
self.id,
self.title,
', is_default=True' if self.is_default else '',
', is_always_visible=False' if not self.is_always_visible else '',
', is_hidden=True' if self.is_hidden else '',
', is_deleted=True' if self.is_deleted else '',
self.protection_repr
)
@listens_for(AttachmentFolder.attachments, 'append')
@listens_for(AttachmentFolder.attachments, 'remove')
def _wrong_attachments_modified(target, value, *unused):
raise Exception('AttachmentFolder.attachments is view-only. Use all_attachments for write operations!')
AttachmentFolder.register_link_events()
AttachmentFolder.register_protection_events()
| 38.848485 | 119 | 0.662804 |
from __future__ import unicode_literals
from collections import defaultdict
from flask import g
from sqlalchemy.event import listens_for
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import joinedload
from indico.core.db import db
from indico.core.db.sqlalchemy.links import LinkMixin, LinkType
from indico.core.db.sqlalchemy.protection import ProtectionMixin, ProtectionMode
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.modules.attachments.models.attachments import Attachment
from indico.modules.attachments.models.principals import AttachmentFolderPrincipal
from indico.modules.attachments.util import can_manage_attachments
from indico.util.decorators import strict_classproperty
from indico.util.locators import locator_property
from indico.util.string import return_ascii
class AttachmentFolder(LinkMixin, ProtectionMixin, db.Model):
__tablename__ = 'folders'
allowed_link_types = LinkMixin.allowed_link_types - {LinkType.session_block}
unique_links = 'is_default'
events_backref_name = 'all_attachment_folders'
link_backref_name = 'attachment_folders'
link_backref_lazy = 'dynamic'
@strict_classproperty
@staticmethod
def __auto_table_args():
default_inheriting = 'not (is_default and protection_mode != {})'.format(ProtectionMode.inheriting.value)
return (db.CheckConstraint(default_inheriting, 'default_inheriting'),
db.CheckConstraint('is_default = (title IS NULL)', 'default_or_title'),
db.CheckConstraint('not (is_default and is_deleted)', 'default_not_deleted'),
db.CheckConstraint('not (is_hidden and is_always_visible)', 'is_hidden_not_is_always_visible'),
{'schema': 'attachments'})
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
id = db.Column(
db.Integer,
primary_key=True
)
title = db.Column(
db.String,
nullable=True
)
description = db.Column(
db.Text,
nullable=False,
default=''
)
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False
)
is_default = db.Column(
db.Boolean,
nullable=False,
default=False
)
is_always_visible = db.Column(
db.Boolean,
nullable=False,
default=True
)
is_hidden = db.Column(
db.Boolean,
nullable=False,
default=False
)
acl_entries = db.relationship(
'AttachmentFolderPrincipal',
backref='folder',
cascade='all, delete-orphan',
collection_class=set
)
acl = association_proxy('acl_entries', 'principal', creator=lambda v: AttachmentFolderPrincipal(principal=v))
attachments = db.relationship(
'Attachment',
primaryjoin=lambda: (Attachment.folder_id == AttachmentFolder.id) & ~Attachment.is_deleted,
order_by=lambda: db.func.lower(Attachment.title),
viewonly=True,
lazy=True
)
@property
def protection_parent(self):
return self.object
@classmethod
def get_or_create_default(cls, linked_object):
folder = cls.find_first(is_default=True, object=linked_object)
if folder is None:
folder = cls(is_default=True, object=linked_object)
return folder
@classmethod
def get_or_create(cls, linked_object, title=None):
if title is None:
return AttachmentFolder.get_or_create_default(linked_object)
else:
folder = AttachmentFolder.find_first(object=linked_object, is_default=False, is_deleted=False, title=title)
return folder or AttachmentFolder(object=linked_object, title=title)
@locator_property
def locator(self):
return dict(self.object.locator, folder_id=self.id)
def can_access(self, user, *args, **kwargs):
return (super(AttachmentFolder, self).can_access(user, *args, **kwargs) or
can_manage_attachments(self.object, user))
def can_view(self, user):
if self.is_hidden:
return False
if not self.object.can_access(user):
return False
return self.is_always_visible or super(AttachmentFolder, self).can_access(user)
@classmethod
def get_for_linked_object(cls, linked_object, preload_event=False):
from indico.modules.attachments.api.util import get_event
event = get_event(linked_object)
if event and event in g.get('event_attachments', {}):
return g.event_attachments[event].get(linked_object, [])
elif not preload_event or not event:
return (linked_object.attachment_folders.filter_by(is_deleted=False)
.order_by(AttachmentFolder.is_default.desc(), db.func.lower(AttachmentFolder.title))
.options(joinedload(AttachmentFolder.attachments))
.all())
else:
if 'event_attachments' not in g:
g.event_attachments = {}
g.event_attachments[event] = defaultdict(list)
query = (event.all_attachment_folders
.filter_by(is_deleted=False)
.order_by(AttachmentFolder.is_default.desc(), db.func.lower(AttachmentFolder.title))
.options(joinedload(AttachmentFolder.attachments),
joinedload(AttachmentFolder.linked_event),
joinedload(AttachmentFolder.session),
joinedload(AttachmentFolder.contribution),
joinedload(AttachmentFolder.subcontribution)))
for obj in query:
g.event_attachments[event][obj.object].append(obj)
return g.event_attachments[event].get(linked_object, [])
@return_ascii
def __repr__(self):
return '<AttachmentFolder({}, {}{}{}{}, {}, {})>'.format(
self.id,
self.title,
', is_default=True' if self.is_default else '',
', is_always_visible=False' if not self.is_always_visible else '',
', is_hidden=True' if self.is_hidden else '',
', is_deleted=True' if self.is_deleted else '',
self.protection_repr
)
@listens_for(AttachmentFolder.attachments, 'append')
@listens_for(AttachmentFolder.attachments, 'remove')
def _wrong_attachments_modified(target, value, *unused):
raise Exception('AttachmentFolder.attachments is view-only. Use all_attachments for write operations!')
AttachmentFolder.register_link_events()
AttachmentFolder.register_protection_events()
| true | true |
f7f44278ba1c7f06ac9585ef939552f95caad8e7 | 189 | py | Python | doc/_i18n/en/tutorials/introduction/ef-controller/update_ef_task.py | rohanpsingh/mc_rtc | 63918efc54f561650a8e93f449c40ec156a6feb4 | [
"BSD-2-Clause"
] | null | null | null | doc/_i18n/en/tutorials/introduction/ef-controller/update_ef_task.py | rohanpsingh/mc_rtc | 63918efc54f561650a8e93f449c40ec156a6feb4 | [
"BSD-2-Clause"
] | null | null | null | doc/_i18n/en/tutorials/introduction/ef-controller/update_ef_task.py | rohanpsingh/mc_rtc | 63918efc54f561650a8e93f449c40ec156a6feb4 | [
"BSD-2-Clause"
] | null | null | null | # Get the current objective
pt = efTask.get_ef_pose()
# Update the rotation and position objective
efTask.set_ef_pose(sva.PTransformd(sva.RotY(-math.pi/2), eigen.Vector3d(0.5, -0.5, 1.2)))
| 37.8 | 89 | 0.751323 |
pt = efTask.get_ef_pose()
efTask.set_ef_pose(sva.PTransformd(sva.RotY(-math.pi/2), eigen.Vector3d(0.5, -0.5, 1.2)))
| true | true |
f7f442ab4f125c2b1ef5939d99eb61204aa66aef | 1,926 | py | Python | packages/python/plotly/plotly/validators/layout/legend/__init__.py | eisenlohr/plotly.py | 3b0e3df45036cf48f772b13bcc10ce347964aefc | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | packages/python/plotly/plotly/validators/layout/legend/__init__.py | jiangrongbo/plotly.py | df19fc702b309586cc24e25373b87e8bdbb3ff60 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | packages/python/plotly/plotly/validators/layout/legend/__init__.py | jiangrongbo/plotly.py | df19fc702b309586cc24e25373b87e8bdbb3ff60 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import sys
if sys.version_info < (3, 7):
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._valign import ValignValidator
from ._uirevision import UirevisionValidator
from ._traceorder import TraceorderValidator
from ._tracegroupgap import TracegroupgapValidator
from ._title import TitleValidator
from ._orientation import OrientationValidator
from ._itemwidth import ItemwidthValidator
from ._itemsizing import ItemsizingValidator
from ._itemdoubleclick import ItemdoubleclickValidator
from ._itemclick import ItemclickValidator
from ._groupclick import GroupclickValidator
from ._font import FontValidator
from ._borderwidth import BorderwidthValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._yanchor.YanchorValidator",
"._y.YValidator",
"._xanchor.XanchorValidator",
"._x.XValidator",
"._valign.ValignValidator",
"._uirevision.UirevisionValidator",
"._traceorder.TraceorderValidator",
"._tracegroupgap.TracegroupgapValidator",
"._title.TitleValidator",
"._orientation.OrientationValidator",
"._itemwidth.ItemwidthValidator",
"._itemsizing.ItemsizingValidator",
"._itemdoubleclick.ItemdoubleclickValidator",
"._itemclick.ItemclickValidator",
"._groupclick.GroupclickValidator",
"._font.FontValidator",
"._borderwidth.BorderwidthValidator",
"._bordercolor.BordercolorValidator",
"._bgcolor.BgcolorValidator",
],
)
| 37.764706 | 58 | 0.687954 | import sys
if sys.version_info < (3, 7):
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._valign import ValignValidator
from ._uirevision import UirevisionValidator
from ._traceorder import TraceorderValidator
from ._tracegroupgap import TracegroupgapValidator
from ._title import TitleValidator
from ._orientation import OrientationValidator
from ._itemwidth import ItemwidthValidator
from ._itemsizing import ItemsizingValidator
from ._itemdoubleclick import ItemdoubleclickValidator
from ._itemclick import ItemclickValidator
from ._groupclick import GroupclickValidator
from ._font import FontValidator
from ._borderwidth import BorderwidthValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._yanchor.YanchorValidator",
"._y.YValidator",
"._xanchor.XanchorValidator",
"._x.XValidator",
"._valign.ValignValidator",
"._uirevision.UirevisionValidator",
"._traceorder.TraceorderValidator",
"._tracegroupgap.TracegroupgapValidator",
"._title.TitleValidator",
"._orientation.OrientationValidator",
"._itemwidth.ItemwidthValidator",
"._itemsizing.ItemsizingValidator",
"._itemdoubleclick.ItemdoubleclickValidator",
"._itemclick.ItemclickValidator",
"._groupclick.GroupclickValidator",
"._font.FontValidator",
"._borderwidth.BorderwidthValidator",
"._bordercolor.BordercolorValidator",
"._bgcolor.BgcolorValidator",
],
)
| true | true |
f7f44464bbf65e99311a2ec55bb21b29e3211d83 | 713 | py | Python | src/utils/hardware_stats.py | LuizPitaAlmeida/image_caption_generator | e368b9f23ef283856a42f78b724d3181245b27de | [
"Apache-2.0"
] | null | null | null | src/utils/hardware_stats.py | LuizPitaAlmeida/image_caption_generator | e368b9f23ef283856a42f78b724d3181245b27de | [
"Apache-2.0"
] | null | null | null | src/utils/hardware_stats.py | LuizPitaAlmeida/image_caption_generator | e368b9f23ef283856a42f78b724d3181245b27de | [
"Apache-2.0"
] | null | null | null | import torch
import nvidia_smi
import psutil
class HardwareStats():
def __init__(self):
super().__init__()
self.device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu")
nvidia_smi.nvmlInit()
self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
def hardware_stats(self):
"""
Returns a dict containing some hardware related stats
"""
res = nvidia_smi.nvmlDeviceGetUtilizationRates(self.handle)
return {"cpu": f"{str(psutil.cpu_percent())}%",
"mem": f"{str(psutil.virtual_memory().percent)}%",
"gpu": f"{str(res.gpu)}%",
"gpu_mem": f"{str(res.memory)}%"}
| 31 | 67 | 0.591865 | import torch
import nvidia_smi
import psutil
class HardwareStats():
def __init__(self):
super().__init__()
self.device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu")
nvidia_smi.nvmlInit()
self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
def hardware_stats(self):
res = nvidia_smi.nvmlDeviceGetUtilizationRates(self.handle)
return {"cpu": f"{str(psutil.cpu_percent())}%",
"mem": f"{str(psutil.virtual_memory().percent)}%",
"gpu": f"{str(res.gpu)}%",
"gpu_mem": f"{str(res.memory)}%"}
| true | true |
f7f44625a306293210abae169174b6e3435d1607 | 40,419 | py | Python | pytorch_lightning/trainer/training_loop.py | javierlorenzod/pytorch-lightning | 6dba26666aa564db414eb238d99a4213006d8220 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/training_loop.py | javierlorenzod/pytorch-lightning | 6dba26666aa564db414eb238d99a4213006d8220 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/training_loop.py | javierlorenzod/pytorch-lightning | 6dba26666aa564db414eb238d99a4213006d8220 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
import numpy as np
import torch
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import RunningStage, TrainerState
from pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode):
self.trainer = trainer
self.early_stopping_accumulator = None
self.checkpoint_accumulator = None
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self.automatic_optimization = True
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
def on_trainer_init(
self,
max_epochs,
min_epochs,
max_steps,
min_steps,
num_sanity_val_steps,
automatic_optimization,
weights_summary,
):
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.interrupted = False
self.trainer.should_stop = False
self.trainer._state = TrainerState.INITIALIZING
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
self.automatic_optimization = automatic_optimization
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
self.trainer.weights_summary = weights_summary
if weights_summary is not None and weights_summary not in ModelSummary.MODES:
raise MisconfigurationException(
f"`weights_summary` can be None, {', '.join(ModelSummary.MODES)}, got {weights_summary}"
)
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
# provide rank to profiler
self.trainer.profile_connector.on_train_start(self.trainer)
def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# links data to the trainer
self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)
# check that model is configured correctly
self.trainer.config_validator.verify_loop_configurations(model)
# attach model log function to callback
self.trainer.callback_connector.attach_model_logging_functions(model)
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:
self.trainer.logger.finalize("success")
# summarize profile results
if self.trainer.global_rank == 0:
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator.on_train_end()
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def check_early_stopping_callback(self, should_update):
# TODO bake this logic into the EarlyStopping callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.get_model()
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# structured result accumulators for callbacks
self.early_stopping_accumulator = Accumulator()
self.checkpoint_accumulator = Accumulator()
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
# hook
self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model):
if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
hook_overridden = (
is_overridden("training_epoch_end", model=self.trainer.get_model())
or is_overridden("on_train_epoch_end", model=self.trainer.get_model())
)
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def get_optimizers_iterable(self):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
optimizers_loop_length = optimizer_freq_cumsum[-1]
current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
is_result_obj = isinstance(training_step_output, Result)
if is_result_obj:
training_step_output.detach()
else:
training_step_output.batch_loss = training_step_output.batch_loss.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator_backend.training_step(args)
self.trainer.accelerator_backend.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
is_result_obj = isinstance(training_step_output, Result)
if training_step_output_for_epoch_end is None:
return None
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.train_loop.automatic_optimization:
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
if is_result_obj:
closure_loss = training_step_output.minimize
else:
closure_loss = training_step_output.batch_loss
closure_loss = closure_loss / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
hiddens=training_step_output.hiddens,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output
# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
# no need for these checks in 1.0.0
# TODO: remove checks in 1.0.0
is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)
is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output)
if is_1_0_output:
return self._process_training_step_output_1_0(training_step_output, split_batch)
# -----------------------------------------
# process old dict (deprecate 1.0)
# -----------------------------------------
training_step_output = self.trainer.process_dict_result(training_step_output, train=True)
training_step_output = AttributeDict(
batch_loss=training_step_output[0],
pbar_on_batch_end=training_step_output[1],
log_metrics=training_step_output[2],
callback_metrics=training_step_output[3],
hiddens=training_step_output[4],
)
# if the user decides to finally reduce things in epoch_end, save raw output without graphs
if isinstance(training_step_output_for_epoch_end, torch.Tensor):
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
else:
training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)
return training_step_output_for_epoch_end, training_step_output
def _process_training_step_output_1_0(self, training_step_output, split_batch):
result = self.trainer.get_model()._results
loss = None
hiddens = None
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
result["extra"] = {}
# map to results under the hood
result.minimize = loss
result.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end.cpu()
# what flows back into the system
training_step_output = result
return training_step_output_for_epoch_end, training_step_output
def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)
training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()
return training_step_output_for_epoch_end
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.get_model()
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.get_model()
grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)
return grad_norm_dict
def process_hiddens(self, opt_closure_result):
hiddens = opt_closure_result.hiddens
if isinstance(opt_closure_result.training_step_output, Result):
opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()
return hiddens
def tbptt_split_batch(self, batch):
splits = [batch]
if self.trainer.truncated_bptt_steps is not None:
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
should_check_val = False
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
batch_end_outputs = self.process_train_step_outputs(
batch_output.training_step_output_for_epoch_end,
self.early_stopping_accumulator,
self.checkpoint_accumulator,
)
# hook
# TODO: add outputs to batches
self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.run_evaluation()
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
# epoch end hook
self.run_on_epoch_end_hook(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(
epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers
)
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
if should_check_val:
self.trainer.run_evaluation(on_epoch=True)
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
if should_train_only:
# update epoch level lr_schedulers
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
self.check_checkpoint_callback(True)
self.check_early_stopping_callback(True)
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
self.trainer.hiddens = None
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]
if batch is None:
return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self.tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in self.prepare_optimizers():
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# track hiddens
self.trainer.hiddens = self.process_hiddens(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if result is None:
if self.automatic_optimization:
self.warning_cache.warn("training_step returned None if it was on purpose, ignore this warning...")
return None
if not self._skip_backward and self.trainer.train_loop.automatic_optimization:
# backward pass
with self.trainer.profiler.profile("model_backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(result.loss)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.get_model().untoggle_optimizer(opt_idx)
return result
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator_backend.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(interval="step", monitor_metrics=monitor_metrics)
def run_on_epoch_end_hook(self, epoch_output):
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
self.trainer.call_hook('on_train_epoch_end', epoch_output)
self.trainer.call_hook('on_epoch_end')
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step += 1
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):
# decide if we should run validation
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
can_check_val = self.trainer.enable_validation and is_val_check_epoch
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches
should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop
or is_last_batch_for_infinite_dataset
) if on_epoch else (is_val_check_batch and not epoch_end_val_check)
return should_check_val and can_check_val
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
args.append(opt_idx)
else:
num_opts = len(self.trainer.optimizers)
raise ValueError(
f"Your LightningModule defines {num_opts} optimizers but "
f'training_step is missing the "optimizer_idx" argument.'
)
# pass hiddens if using tbptt
if self.trainer.truncated_bptt_steps is not None:
args.append(hiddens)
return args
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):
"""
Figure out what needs to be tracked/logged at the end of the epoch
"""
# the training step outputs a list per optimizer. The list contains the outputs at each time step
# when no TBPTT is used, then the list has 1 item per batch
# when TBPTT IS used, then the list has n items (1 per time step)
batch_end_outputs = []
for optimizer_idx_outputs in all_train_step_outputs:
# extract one representative sample from each time step (1 if no tbptt) and 0th optimizer
if len(optimizer_idx_outputs) == 0:
continue
sample_output = optimizer_idx_outputs[-1]
# pull out callback info if available (ie: Results object)
if isinstance(sample_output, dict) and "early_stop_on" in sample_output:
early_stopping_accumulator.accumulate(sample_output["early_stop_on"])
if isinstance(sample_output, dict) and "checkpoint_on" in sample_output:
checkpoint_accumulator.accumulate(sample_output["checkpoint_on"])
batch_end_outputs.append(optimizer_idx_outputs)
return batch_end_outputs
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.get_model()
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
| 42.816737 | 119 | 0.661718 |
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
import numpy as np
import torch
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import RunningStage, TrainerState
from pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode):
self.trainer = trainer
self.early_stopping_accumulator = None
self.checkpoint_accumulator = None
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self.automatic_optimization = True
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
def on_trainer_init(
self,
max_epochs,
min_epochs,
max_steps,
min_steps,
num_sanity_val_steps,
automatic_optimization,
weights_summary,
):
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.interrupted = False
self.trainer.should_stop = False
self.trainer._state = TrainerState.INITIALIZING
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
self.automatic_optimization = automatic_optimization
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
self.trainer.weights_summary = weights_summary
if weights_summary is not None and weights_summary not in ModelSummary.MODES:
raise MisconfigurationException(
f"`weights_summary` can be None, {', '.join(ModelSummary.MODES)}, got {weights_summary}"
)
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
self.trainer.call_hook("on_train_start")
self.trainer.profile_connector.on_train_start(self.trainer)
def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)
self.trainer.config_validator.verify_loop_configurations(model)
self.trainer.callback_connector.attach_model_logging_functions(model)
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
self.trainer.call_hook("on_train_end")
if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:
self.trainer.logger.finalize("success")
if self.trainer.global_rank == 0:
self.trainer.profiler.describe()
self.trainer.accelerator.on_train_end()
def check_checkpoint_callback(self, should_update, is_last=False):
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def check_early_stopping_callback(self, should_update):
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
self.trainer.current_epoch = epoch
model = self.trainer.get_model()
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
with suppress(Exception):
self.trainer.train_dataloader.sampler.set_epoch(epoch)
self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
self.early_stopping_accumulator = Accumulator()
self.checkpoint_accumulator = Accumulator()
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model):
if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
hook_overridden = (
is_overridden("training_epoch_end", model=self.trainer.get_model())
or is_overridden("on_train_epoch_end", model=self.trainer.get_model())
)
if not (hook_overridden or auto_reduce_tng_result):
continue
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def get_optimizers_iterable(self):
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
optimizers_loop_length = optimizer_freq_cumsum[-1]
current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
is_result_obj = isinstance(training_step_output, Result)
if is_result_obj:
training_step_output.detach()
else:
training_step_output.batch_loss = training_step_output.batch_loss.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator_backend.training_step(args)
self.trainer.accelerator_backend.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
is_result_obj = isinstance(training_step_output, Result)
if training_step_output_for_epoch_end is None:
return None
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.train_loop.automatic_optimization:
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
if is_result_obj:
closure_loss = training_step_output.minimize
else:
closure_loss = training_step_output.batch_loss
closure_loss = closure_loss / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
hiddens=training_step_output.hiddens,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output
# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
# no need for these checks in 1.0.0
# TODO: remove checks in 1.0.0
is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)
is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output)
if is_1_0_output:
return self._process_training_step_output_1_0(training_step_output, split_batch)
# -----------------------------------------
# process old dict (deprecate 1.0)
# -----------------------------------------
training_step_output = self.trainer.process_dict_result(training_step_output, train=True)
training_step_output = AttributeDict(
batch_loss=training_step_output[0],
pbar_on_batch_end=training_step_output[1],
log_metrics=training_step_output[2],
callback_metrics=training_step_output[3],
hiddens=training_step_output[4],
)
# if the user decides to finally reduce things in epoch_end, save raw output without graphs
if isinstance(training_step_output_for_epoch_end, torch.Tensor):
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
else:
training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)
return training_step_output_for_epoch_end, training_step_output
def _process_training_step_output_1_0(self, training_step_output, split_batch):
result = self.trainer.get_model()._results
loss = None
hiddens = None
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
result["extra"] = {}
# map to results under the hood
result.minimize = loss
result.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end.cpu()
# what flows back into the system
training_step_output = result
return training_step_output_for_epoch_end, training_step_output
def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)
training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()
return training_step_output_for_epoch_end
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.get_model()
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.get_model()
grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)
return grad_norm_dict
def process_hiddens(self, opt_closure_result):
hiddens = opt_closure_result.hiddens
if isinstance(opt_closure_result.training_step_output, Result):
opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()
return hiddens
def tbptt_split_batch(self, batch):
splits = [batch]
if self.trainer.truncated_bptt_steps is not None:
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
should_check_val = False
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
batch_end_outputs = self.process_train_step_outputs(
batch_output.training_step_output_for_epoch_end,
self.early_stopping_accumulator,
self.checkpoint_accumulator,
)
# hook
# TODO: add outputs to batches
self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.run_evaluation()
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
if self._num_training_batches_reached(is_last_batch):
break
self.increment_accumulated_grad_global_step()
self.run_on_epoch_end_hook(epoch_output)
self.trainer.logger_connector.log_train_epoch_end_metrics(
epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers
)
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
if should_check_val:
self.trainer.run_evaluation(on_epoch=True)
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
if should_train_only:
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
self.check_checkpoint_callback(True)
self.check_early_stopping_callback(True)
self.increment_accumulated_grad_global_step()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
grad_norm_dic = {}
self.trainer.hiddens = None
batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]
if batch is None:
return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
splits = self.tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
for opt_idx, optimizer in self.prepare_optimizers():
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# track hiddens
self.trainer.hiddens = self.process_hiddens(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if result is None:
if self.automatic_optimization:
self.warning_cache.warn("training_step returned None if it was on purpose, ignore this warning...")
return None
if not self._skip_backward and self.trainer.train_loop.automatic_optimization:
# backward pass
with self.trainer.profiler.profile("model_backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(result.loss)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.get_model().untoggle_optimizer(opt_idx)
return result
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator_backend.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(interval="step", monitor_metrics=monitor_metrics)
def run_on_epoch_end_hook(self, epoch_output):
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
self.trainer.call_hook('on_train_epoch_end', epoch_output)
self.trainer.call_hook('on_epoch_end')
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step += 1
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):
# decide if we should run validation
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
can_check_val = self.trainer.enable_validation and is_val_check_epoch
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches
should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop
or is_last_batch_for_infinite_dataset
) if on_epoch else (is_val_check_batch and not epoch_end_val_check)
return should_check_val and can_check_val
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
args.append(opt_idx)
else:
num_opts = len(self.trainer.optimizers)
raise ValueError(
f"Your LightningModule defines {num_opts} optimizers but "
f'training_step is missing the "optimizer_idx" argument.'
)
# pass hiddens if using tbptt
if self.trainer.truncated_bptt_steps is not None:
args.append(hiddens)
return args
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):
# the training step outputs a list per optimizer. The list contains the outputs at each time step
# when no TBPTT is used, then the list has 1 item per batch
# when TBPTT IS used, then the list has n items (1 per time step)
batch_end_outputs = []
for optimizer_idx_outputs in all_train_step_outputs:
# extract one representative sample from each time step (1 if no tbptt) and 0th optimizer
if len(optimizer_idx_outputs) == 0:
continue
sample_output = optimizer_idx_outputs[-1]
# pull out callback info if available (ie: Results object)
if isinstance(sample_output, dict) and "early_stop_on" in sample_output:
early_stopping_accumulator.accumulate(sample_output["early_stop_on"])
if isinstance(sample_output, dict) and "checkpoint_on" in sample_output:
checkpoint_accumulator.accumulate(sample_output["checkpoint_on"])
batch_end_outputs.append(optimizer_idx_outputs)
return batch_end_outputs
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
if self.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.get_model()
model.toggle_optimizer(optimizer, opt_idx)
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
self.accumulated_loss.reset()
| true | true |
f7f446a45ea9d26f2c54009938dc4d576c808ed1 | 1,394 | py | Python | fiwareglancesync/utils/checkpath.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | null | null | null | fiwareglancesync/utils/checkpath.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | 88 | 2015-07-21T22:13:23.000Z | 2016-11-15T21:28:56.000Z | fiwareglancesync/utils/checkpath.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | 2 | 2015-08-12T11:19:55.000Z | 2018-05-25T19:04:43.000Z | # -- encoding: utf-8 --
#
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
# Import the database object (db) from the main application module
# We will define this inside /app/__init__.py in the next sections.
import os
def check_path(url, filename):
# Check if the url starts with / and not with ., absolute path
if url[0] != '/':
result = False
else:
if filename not in url:
result = False
else:
result = True
if os.path.isfile(url):
result = True
else:
result = False
return result
| 30.977778 | 75 | 0.681492 |
import os
def check_path(url, filename):
if url[0] != '/':
result = False
else:
if filename not in url:
result = False
else:
result = True
if os.path.isfile(url):
result = True
else:
result = False
return result
| true | true |
f7f446af625168a91ac6fef0fb8e208f2a6cbfd4 | 16,119 | py | Python | efficientdet/keras/infer_lib.py | adrianbouza/automl | 46dbd753efc8efc73ced146fe8b3bb694709dcff | [
"Apache-2.0"
] | null | null | null | efficientdet/keras/infer_lib.py | adrianbouza/automl | 46dbd753efc8efc73ced146fe8b3bb694709dcff | [
"Apache-2.0"
] | null | null | null | efficientdet/keras/infer_lib.py | adrianbouza/automl | 46dbd753efc8efc73ced146fe8b3bb694709dcff | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Inference related utilities."""
import copy
import os
import time
from typing import Text, Dict, Any, Optional
from absl import logging
import numpy as np
import tensorflow as tf
from .. import dataloader
from .. import hparams_config
from .. import utils
from . import efficientdet_keras
from . import label_util
from . import util_keras
from ..visualize import vis_utils
def visualize_image(image,
boxes,
classes,
scores,
label_map=None,
min_score_thresh=0.01,
max_boxes_to_draw=1000,
line_thickness=2,
**kwargs):
"""Visualizes a given image.
Args:
image: a image with shape [H, W, C].
boxes: a box prediction with shape [N, 4] ordered [ymin, xmin, ymax, xmax].
classes: a class prediction with shape [N].
scores: A list of float value with shape [N].
label_map: a dictionary from class id to name.
min_score_thresh: minimal score for showing. If claass probability is below
this threshold, then the object will not show up.
max_boxes_to_draw: maximum bounding box to draw.
line_thickness: how thick is the bounding box line.
**kwargs: extra parameters.
Returns:
output_image: an output image with annotated boxes and classes.
"""
label_map = label_util.get_label_map(label_map or 'coco')
category_index = {k: {'id': k, 'name': label_map[k]} for k in label_map}
img = np.array(image)
vis_utils.visualize_boxes_and_labels_on_image_array(
img,
boxes,
classes,
scores,
category_index,
min_score_thresh=min_score_thresh,
max_boxes_to_draw=max_boxes_to_draw,
line_thickness=line_thickness,
**kwargs)
return img
class ExportNetwork(tf.Module):
def __init__(self, model):
super().__init__()
self.model = model
@tf.function
def __call__(self, imgs):
return tf.nest.flatten(self.model(imgs, training=False))
class ExportModel(tf.Module):
"""Model to be exported as SavedModel/TFLite format."""
def __init__(self, model, pre_mode='infer', post_mode='global'):
super().__init__()
self.model = model
self.pre_mode = pre_mode
self.post_mode = post_mode
@tf.function
def __call__(self, imgs):
return self.model(
imgs, training=False, pre_mode=self.pre_mode, post_mode=self.post_mode)
class ServingDriver:
"""A driver for serving single or batch images.
This driver supports serving with image files or arrays, with configurable
batch size.
Example 1. Serving streaming image contents:
driver = inference.ServingDriver(
'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=1)
driver.build()
for m in image_iterator():
predictions = driver.serve_files([m])
boxes, scores, classes, _ = tf.nest.map_structure(np.array, predictions)
driver.visualize(m, boxes[0], scores[0], classes[0])
# m is the new image with annotated boxes.
Example 2. Serving batch image contents:
imgs = []
for f in ['/tmp/1.jpg', '/tmp/2.jpg']:
imgs.append(np.array(Image.open(f)))
driver = inference.ServingDriver(
'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=len(imgs))
driver.build()
predictions = driver.serve(imgs)
boxes, scores, classes, _ = tf.nest.map_structure(np.array, predictions)
for i in range(len(imgs)):
driver.visualize(imgs[i], boxes[i], scores[i], classes[i])
Example 3: another way is to use SavedModel:
# step1: export a model.
driver = inference.ServingDriver('efficientdet-d0', '/tmp/efficientdet-d0')
driver.build()
driver.export('/tmp/saved_model_path')
# step2: Serve a model.
driver.load(self.saved_model_dir)
raw_images = []
for f in tf.io.gfile.glob('/tmp/images/*.jpg'):
raw_images.append(np.array(PIL.Image.open(f)))
detections = driver.serve(raw_images)
boxes, scores, classes, _ = tf.nest.map_structure(np.array, detections)
for i in range(len(imgs)):
driver.visualize(imgs[i], boxes[i], scores[i], classes[i])
"""
def __init__(self,
model_name: Text,
ckpt_path: Optional[Text] = None,
batch_size: int = 1,
only_network: bool = False,
model_params: Optional[Dict[Text, Any]] = None,
debug: bool = False):
"""Initialize the inference driver.
Args:
model_name: target model name, such as efficientdet-d0.
ckpt_path: checkpoint path, such as /tmp/efficientdet-d0/.
batch_size: batch size for inference.
only_network: only use the network without pre/post processing.
model_params: model parameters for overriding the config.
debug: bool, if true, run in debug mode.
"""
super().__init__()
self.model_name = model_name
self.ckpt_path = ckpt_path
self.batch_size = batch_size
self.only_network = only_network
self.debug = debug
self.params = hparams_config.get_detection_config(model_name).as_dict()
if model_params:
self.params.update(model_params)
self.params.update(dict(is_training_bn=False))
self.label_map = self.params.get('label_map', None)
self._model = None
mixed_precision = self.params.get('mixed_precision', None)
precision = utils.get_precision(
self.params.get('strategy', None), mixed_precision)
policy = tf.keras.mixed_precision.Policy(precision)
tf.keras.mixed_precision.set_global_policy(policy)
@property
def model(self):
if not self._model:
self.build()
return self._model
@model.setter
def model(self, model):
self._model = model
def build(self, params_override=None):
"""Build model and restore checkpoints."""
params = copy.deepcopy(self.params)
if params_override:
params.update(params_override)
config = hparams_config.get_efficientdet_config(self.model_name)
config.override(params)
if self.only_network:
self.model = efficientdet_keras.EfficientDetNet(config=config)
else:
self.model = efficientdet_keras.EfficientDetModel(config=config)
image_size = utils.parse_image_size(params['image_size'])
self.model.build((self.batch_size, *image_size, 3))
util_keras.restore_ckpt(self.model, self.ckpt_path,
self.params['moving_average_decay'],
skip_mismatch=False)
if self.debug:
tf.config.run_functions_eagerly(self.debug)
def visualize(self, image, boxes, classes, scores, **kwargs):
"""Visualize prediction on image."""
return visualize_image(image, boxes, classes.astype(int), scores,
self.label_map, **kwargs)
def benchmark(self, image_arrays, bm_runs=10, trace_filename=None):
"""Benchmark inference latency/throughput.
Args:
image_arrays: a list of images in numpy array format.
bm_runs: Number of benchmark runs.
trace_filename: If None, specify the filename for saving trace.
"""
_, spec = self._get_model_and_spec()
@tf.function(input_signature=[spec])
def test_func(image_arrays):
return self.model(image_arrays) # pylint: disable=not-callable
for _ in range(3): # warmup 3 runs.
test_func(image_arrays)
start = time.perf_counter()
for _ in range(bm_runs):
test_func(image_arrays)
end = time.perf_counter()
inference_time = (end - start) / bm_runs
print('Per batch inference time: ', inference_time)
print('FPS: ', self.batch_size / inference_time)
if trace_filename:
options = tf.profiler.experimental.ProfilerOptions()
tf.profiler.experimental.start(trace_filename, options)
test_func(image_arrays)
tf.profiler.experimental.stop()
def serve(self, image_arrays):
"""Serve a list of image arrays.
Args:
image_arrays: A list of image content with each image has shape [height,
width, 3] and uint8 type.
Returns:
A list of detections.
"""
if isinstance(self.model, tf.lite.Interpreter):
input_details = self.model.get_input_details()
output_details = self.model.get_output_details()
self.model.set_tensor(input_details[0]['index'], np.array(image_arrays))
self.model.invoke()
return [self.model.get_tensor(x['index']) for x in output_details]
return self.model(image_arrays) # pylint: disable=not-callable
def load(self, saved_model_dir_or_frozen_graph: Text):
"""Load the model using saved model or a frozen graph."""
# Load saved model if it is a folder.
if tf.saved_model.contains_saved_model(saved_model_dir_or_frozen_graph):
self.model = tf.saved_model.load(saved_model_dir_or_frozen_graph)
return
if saved_model_dir_or_frozen_graph.endswith('.tflite'):
self.model = tf.lite.Interpreter(saved_model_dir_or_frozen_graph)
self.model.allocate_tensors()
return
# Load a frozen graph.
def wrap_frozen_graph(graph_def, inputs, outputs):
# https://www.tensorflow.org/guide/migrate
imports_graph_def_fn = lambda: tf.import_graph_def(graph_def, name='')
wrapped_import = tf.compat.v1.wrap_function(imports_graph_def_fn, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, inputs),
tf.nest.map_structure(import_graph.as_graph_element, outputs))
graph_def = tf.Graph().as_graph_def()
with tf.io.gfile.GFile(saved_model_dir_or_frozen_graph, 'rb') as f:
graph_def.ParseFromString(f.read())
self.model = wrap_frozen_graph(
graph_def,
inputs='images:0',
outputs=['Identity:0', 'Identity_1:0', 'Identity_2:0', 'Identity_3:0'])
def freeze(self, func):
"""Freeze the graph."""
# pylint: disable=g-import-not-at-top,disable=g-direct-tensorflow-import
from tensorflow.python.framework.convert_to_constants \
import convert_variables_to_constants_v2_as_graph
_, graphdef = convert_variables_to_constants_v2_as_graph(func)
return graphdef
def _get_model_and_spec(self, tflite=None):
"""Get model instance and export spec."""
if self.only_network or tflite:
image_size = utils.parse_image_size(self.params['image_size'])
spec = tf.TensorSpec(
shape=[self.batch_size, *image_size, 3],
dtype=tf.float32,
name='images')
if self.only_network:
export_model = ExportNetwork(self.model)
else:
# If export tflite, we should remove preprocessing since TFLite doesn't
# support dynamic shape.
logging.info('Export model without preprocessing.')
# This section is only used for TFLite, so we use the applicable
# pre_ & post_ modes.
export_model = ExportModel(
self.model, pre_mode=None, post_mode='tflite')
return export_model, spec
else:
spec = tf.TensorSpec(
shape=[self.batch_size, None, None, 3], dtype=tf.uint8, name='images')
export_model = ExportModel(self.model)
return export_model, spec
def export(self,
output_dir: Optional[Text] = None,
tensorrt: Optional[Text] = None,
tflite: Optional[Text] = None,
file_pattern: Optional[Text] = None,
num_calibration_steps: int = 2000):
"""Export a saved model, frozen graph, and potential tflite/tensorrt model.
Args:
output_dir: the output folder for saved model.
tensorrt: If not None, must be {'FP32', 'FP16', 'INT8'}.
tflite: Type for post-training quantization.
file_pattern: Glob for tfrecords, e.g. coco/val-*.tfrecord.
num_calibration_steps: Number of post-training quantization calibration
steps to run.
"""
export_model, input_spec = self._get_model_and_spec(tflite)
image_size = utils.parse_image_size(self.params['image_size'])
if output_dir:
tf.saved_model.save(
export_model,
output_dir,
signatures=export_model.__call__.get_concrete_function(input_spec))
logging.info('Model saved at %s', output_dir)
# also save freeze pb file.
graphdef = self.freeze(
export_model.__call__.get_concrete_function(input_spec))
proto_path = tf.io.write_graph(
graphdef, output_dir, self.model_name + '_frozen.pb', as_text=False)
logging.info('Frozen graph saved at %s', proto_path)
if tflite:
shape = (self.batch_size, *image_size, 3)
input_spec = tf.TensorSpec(
shape=shape, dtype=input_spec.dtype, name=input_spec.name)
# from_saved_model supports advanced converter features like op fusing.
converter = tf.lite.TFLiteConverter.from_saved_model(output_dir)
if tflite == 'FP32':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float32]
elif tflite == 'FP16':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
elif tflite == 'INT8':
# Enables MLIR-based post-training quantization.
converter.experimental_new_quantizer = True
if file_pattern:
config = hparams_config.get_efficientdet_config(self.model_name)
config.override(self.params)
ds = dataloader.InputReader(
file_pattern,
is_training=False,
max_instances_per_image=config.max_instances_per_image)(
config, batch_size=self.batch_size)
def representative_dataset_gen():
for image, _ in ds.take(num_calibration_steps):
yield [image]
else: # Used for debugging, can remove later.
logging.warn('Use real representative dataset instead of fake ones.')
num_calibration_steps = 10
def representative_dataset_gen(): # rewrite this for real data.
for _ in range(num_calibration_steps):
yield [tf.ones(shape, dtype=input_spec.dtype)]
converter.representative_dataset = representative_dataset_gen
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.uint8
# TFLite's custom NMS op isn't supported by post-training quant,
# so we add TFLITE_BUILTINS as well.
supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.TFLITE_BUILTINS
]
converter.target_spec.supported_ops = supported_ops
else:
raise ValueError(f'Invalid tflite {tflite}: must be FP32, FP16, INT8.')
tflite_path = os.path.join(output_dir, tflite.lower() + '.tflite')
tflite_model = converter.convert()
tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_model)
logging.info('TFLite is saved at %s', tflite_path)
if tensorrt:
trt_path = os.path.join(output_dir, 'tensorrt_' + tensorrt.lower())
conversion_params = tf.experimental.tensorrt.ConversionParams(
max_workspace_size_bytes=(2 << 20),
maximum_cached_engines=1,
precision_mode=tensorrt.upper())
converter = tf.experimental.tensorrt.Converter(
output_dir, conversion_params=conversion_params)
converter.convert()
converter.save(trt_path)
logging.info('TensorRT model is saved at %s', trt_path)
| 37.3125 | 80 | 0.674794 |
import copy
import os
import time
from typing import Text, Dict, Any, Optional
from absl import logging
import numpy as np
import tensorflow as tf
from .. import dataloader
from .. import hparams_config
from .. import utils
from . import efficientdet_keras
from . import label_util
from . import util_keras
from ..visualize import vis_utils
def visualize_image(image,
boxes,
classes,
scores,
label_map=None,
min_score_thresh=0.01,
max_boxes_to_draw=1000,
line_thickness=2,
**kwargs):
label_map = label_util.get_label_map(label_map or 'coco')
category_index = {k: {'id': k, 'name': label_map[k]} for k in label_map}
img = np.array(image)
vis_utils.visualize_boxes_and_labels_on_image_array(
img,
boxes,
classes,
scores,
category_index,
min_score_thresh=min_score_thresh,
max_boxes_to_draw=max_boxes_to_draw,
line_thickness=line_thickness,
**kwargs)
return img
class ExportNetwork(tf.Module):
def __init__(self, model):
super().__init__()
self.model = model
@tf.function
def __call__(self, imgs):
return tf.nest.flatten(self.model(imgs, training=False))
class ExportModel(tf.Module):
def __init__(self, model, pre_mode='infer', post_mode='global'):
super().__init__()
self.model = model
self.pre_mode = pre_mode
self.post_mode = post_mode
@tf.function
def __call__(self, imgs):
return self.model(
imgs, training=False, pre_mode=self.pre_mode, post_mode=self.post_mode)
class ServingDriver:
def __init__(self,
model_name: Text,
ckpt_path: Optional[Text] = None,
batch_size: int = 1,
only_network: bool = False,
model_params: Optional[Dict[Text, Any]] = None,
debug: bool = False):
super().__init__()
self.model_name = model_name
self.ckpt_path = ckpt_path
self.batch_size = batch_size
self.only_network = only_network
self.debug = debug
self.params = hparams_config.get_detection_config(model_name).as_dict()
if model_params:
self.params.update(model_params)
self.params.update(dict(is_training_bn=False))
self.label_map = self.params.get('label_map', None)
self._model = None
mixed_precision = self.params.get('mixed_precision', None)
precision = utils.get_precision(
self.params.get('strategy', None), mixed_precision)
policy = tf.keras.mixed_precision.Policy(precision)
tf.keras.mixed_precision.set_global_policy(policy)
@property
def model(self):
if not self._model:
self.build()
return self._model
@model.setter
def model(self, model):
self._model = model
def build(self, params_override=None):
params = copy.deepcopy(self.params)
if params_override:
params.update(params_override)
config = hparams_config.get_efficientdet_config(self.model_name)
config.override(params)
if self.only_network:
self.model = efficientdet_keras.EfficientDetNet(config=config)
else:
self.model = efficientdet_keras.EfficientDetModel(config=config)
image_size = utils.parse_image_size(params['image_size'])
self.model.build((self.batch_size, *image_size, 3))
util_keras.restore_ckpt(self.model, self.ckpt_path,
self.params['moving_average_decay'],
skip_mismatch=False)
if self.debug:
tf.config.run_functions_eagerly(self.debug)
def visualize(self, image, boxes, classes, scores, **kwargs):
return visualize_image(image, boxes, classes.astype(int), scores,
self.label_map, **kwargs)
def benchmark(self, image_arrays, bm_runs=10, trace_filename=None):
_, spec = self._get_model_and_spec()
@tf.function(input_signature=[spec])
def test_func(image_arrays):
return self.model(image_arrays)
for _ in range(3):
test_func(image_arrays)
start = time.perf_counter()
for _ in range(bm_runs):
test_func(image_arrays)
end = time.perf_counter()
inference_time = (end - start) / bm_runs
print('Per batch inference time: ', inference_time)
print('FPS: ', self.batch_size / inference_time)
if trace_filename:
options = tf.profiler.experimental.ProfilerOptions()
tf.profiler.experimental.start(trace_filename, options)
test_func(image_arrays)
tf.profiler.experimental.stop()
def serve(self, image_arrays):
if isinstance(self.model, tf.lite.Interpreter):
input_details = self.model.get_input_details()
output_details = self.model.get_output_details()
self.model.set_tensor(input_details[0]['index'], np.array(image_arrays))
self.model.invoke()
return [self.model.get_tensor(x['index']) for x in output_details]
return self.model(image_arrays)
def load(self, saved_model_dir_or_frozen_graph: Text):
if tf.saved_model.contains_saved_model(saved_model_dir_or_frozen_graph):
self.model = tf.saved_model.load(saved_model_dir_or_frozen_graph)
return
if saved_model_dir_or_frozen_graph.endswith('.tflite'):
self.model = tf.lite.Interpreter(saved_model_dir_or_frozen_graph)
self.model.allocate_tensors()
return
def wrap_frozen_graph(graph_def, inputs, outputs):
imports_graph_def_fn = lambda: tf.import_graph_def(graph_def, name='')
wrapped_import = tf.compat.v1.wrap_function(imports_graph_def_fn, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, inputs),
tf.nest.map_structure(import_graph.as_graph_element, outputs))
graph_def = tf.Graph().as_graph_def()
with tf.io.gfile.GFile(saved_model_dir_or_frozen_graph, 'rb') as f:
graph_def.ParseFromString(f.read())
self.model = wrap_frozen_graph(
graph_def,
inputs='images:0',
outputs=['Identity:0', 'Identity_1:0', 'Identity_2:0', 'Identity_3:0'])
def freeze(self, func):
from tensorflow.python.framework.convert_to_constants \
import convert_variables_to_constants_v2_as_graph
_, graphdef = convert_variables_to_constants_v2_as_graph(func)
return graphdef
def _get_model_and_spec(self, tflite=None):
if self.only_network or tflite:
image_size = utils.parse_image_size(self.params['image_size'])
spec = tf.TensorSpec(
shape=[self.batch_size, *image_size, 3],
dtype=tf.float32,
name='images')
if self.only_network:
export_model = ExportNetwork(self.model)
else:
# support dynamic shape.
logging.info('Export model without preprocessing.')
# This section is only used for TFLite, so we use the applicable
# pre_ & post_ modes.
export_model = ExportModel(
self.model, pre_mode=None, post_mode='tflite')
return export_model, spec
else:
spec = tf.TensorSpec(
shape=[self.batch_size, None, None, 3], dtype=tf.uint8, name='images')
export_model = ExportModel(self.model)
return export_model, spec
def export(self,
output_dir: Optional[Text] = None,
tensorrt: Optional[Text] = None,
tflite: Optional[Text] = None,
file_pattern: Optional[Text] = None,
num_calibration_steps: int = 2000):
export_model, input_spec = self._get_model_and_spec(tflite)
image_size = utils.parse_image_size(self.params['image_size'])
if output_dir:
tf.saved_model.save(
export_model,
output_dir,
signatures=export_model.__call__.get_concrete_function(input_spec))
logging.info('Model saved at %s', output_dir)
# also save freeze pb file.
graphdef = self.freeze(
export_model.__call__.get_concrete_function(input_spec))
proto_path = tf.io.write_graph(
graphdef, output_dir, self.model_name + '_frozen.pb', as_text=False)
logging.info('Frozen graph saved at %s', proto_path)
if tflite:
shape = (self.batch_size, *image_size, 3)
input_spec = tf.TensorSpec(
shape=shape, dtype=input_spec.dtype, name=input_spec.name)
# from_saved_model supports advanced converter features like op fusing.
converter = tf.lite.TFLiteConverter.from_saved_model(output_dir)
if tflite == 'FP32':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float32]
elif tflite == 'FP16':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
elif tflite == 'INT8':
# Enables MLIR-based post-training quantization.
converter.experimental_new_quantizer = True
if file_pattern:
config = hparams_config.get_efficientdet_config(self.model_name)
config.override(self.params)
ds = dataloader.InputReader(
file_pattern,
is_training=False,
max_instances_per_image=config.max_instances_per_image)(
config, batch_size=self.batch_size)
def representative_dataset_gen():
for image, _ in ds.take(num_calibration_steps):
yield [image]
else: # Used for debugging, can remove later.
logging.warn('Use real representative dataset instead of fake ones.')
num_calibration_steps = 10
def representative_dataset_gen(): # rewrite this for real data.
for _ in range(num_calibration_steps):
yield [tf.ones(shape, dtype=input_spec.dtype)]
converter.representative_dataset = representative_dataset_gen
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.uint8
# TFLite's custom NMS op isn't supported by post-training quant,
# so we add TFLITE_BUILTINS as well.
supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.TFLITE_BUILTINS
]
converter.target_spec.supported_ops = supported_ops
else:
raise ValueError(f'Invalid tflite {tflite}: must be FP32, FP16, INT8.')
tflite_path = os.path.join(output_dir, tflite.lower() + '.tflite')
tflite_model = converter.convert()
tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_model)
logging.info('TFLite is saved at %s', tflite_path)
if tensorrt:
trt_path = os.path.join(output_dir, 'tensorrt_' + tensorrt.lower())
conversion_params = tf.experimental.tensorrt.ConversionParams(
max_workspace_size_bytes=(2 << 20),
maximum_cached_engines=1,
precision_mode=tensorrt.upper())
converter = tf.experimental.tensorrt.Converter(
output_dir, conversion_params=conversion_params)
converter.convert()
converter.save(trt_path)
logging.info('TensorRT model is saved at %s', trt_path)
| true | true |
f7f446ed3801d6e97bbd0823b35c16eb100c1f11 | 1,530 | py | Python | ament_pep257/setup.py | ament/ament_lint | d502e7bc45ba4ed9dc23c198299899f97ebd0d7e | [
"Apache-2.0"
] | 23 | 2015-07-08T05:42:24.000Z | 2022-03-14T02:13:01.000Z | ament_pep257/setup.py | ament/ament_lint | d502e7bc45ba4ed9dc23c198299899f97ebd0d7e | [
"Apache-2.0"
] | 292 | 2015-03-06T20:11:45.000Z | 2022-03-31T22:30:41.000Z | ament_pep257/setup.py | ament/ament_lint | d502e7bc45ba4ed9dc23c198299899f97ebd0d7e | [
"Apache-2.0"
] | 71 | 2016-05-24T01:24:54.000Z | 2022-03-23T07:42:41.000Z | from setuptools import find_packages
from setuptools import setup
package_name = 'ament_pep257'
setup(
name=package_name,
version='0.11.3',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['setuptools'],
package_data={'': [
'configuration/ament_pep257.ini',
]},
zip_safe=True,
author='Dirk Thomas',
author_email='dthomas@osrfoundation.org',
maintainer='Michael Jeronimo, Michel Hidalgo',
maintainer_email='michael.jeronimo@openrobotics.org, michel@ekumenlabs.com',
url='https://github.com/ament/ament_lint',
download_url='https://github.com/ament/ament_lint/releases',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Check Python code style using pep257.',
long_description="""\
The ability to check code against the docstring conventions in PEP 257
and generate xUnit test result files.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'ament_pep257 = ament_pep257.main:main',
],
'pytest11': [
'ament_pep257 = ament_pep257.pytest_marker',
],
},
)
| 31.875 | 80 | 0.647059 | from setuptools import find_packages
from setuptools import setup
package_name = 'ament_pep257'
setup(
name=package_name,
version='0.11.3',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['setuptools'],
package_data={'': [
'configuration/ament_pep257.ini',
]},
zip_safe=True,
author='Dirk Thomas',
author_email='dthomas@osrfoundation.org',
maintainer='Michael Jeronimo, Michel Hidalgo',
maintainer_email='michael.jeronimo@openrobotics.org, michel@ekumenlabs.com',
url='https://github.com/ament/ament_lint',
download_url='https://github.com/ament/ament_lint/releases',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Check Python code style using pep257.',
long_description="""\
The ability to check code against the docstring conventions in PEP 257
and generate xUnit test result files.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'ament_pep257 = ament_pep257.main:main',
],
'pytest11': [
'ament_pep257 = ament_pep257.pytest_marker',
],
},
)
| true | true |
f7f4478acba8b118bbf50b23cfc29c33b1bb0468 | 979 | py | Python | isi_sdk_9_0_0/test/test_cluster_node_drive_firmware.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_9_0_0/test/test_cluster_node_drive_firmware.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_9_0_0/test/test_cluster_node_drive_firmware.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_9_0_0
from isi_sdk_9_0_0.models.cluster_node_drive_firmware import ClusterNodeDriveFirmware # noqa: E501
from isi_sdk_9_0_0.rest import ApiException
class TestClusterNodeDriveFirmware(unittest.TestCase):
"""ClusterNodeDriveFirmware unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testClusterNodeDriveFirmware(self):
"""Test ClusterNodeDriveFirmware"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_9_0_0.models.cluster_node_drive_firmware.ClusterNodeDriveFirmware() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.878049 | 107 | 0.728294 |
from __future__ import absolute_import
import unittest
import isi_sdk_9_0_0
from isi_sdk_9_0_0.models.cluster_node_drive_firmware import ClusterNodeDriveFirmware
from isi_sdk_9_0_0.rest import ApiException
class TestClusterNodeDriveFirmware(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testClusterNodeDriveFirmware(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f7f447ca9355581efd544c13d9ba730d1d3ec9cb | 3,957 | py | Python | testModes.py | rockset/rockset-load | f093089295ed28fdd0b4aaac2d6dd4b9abce548e | [
"Apache-2.0"
] | null | null | null | testModes.py | rockset/rockset-load | f093089295ed28fdd0b4aaac2d6dd4b9abce548e | [
"Apache-2.0"
] | null | null | null | testModes.py | rockset/rockset-load | f093089295ed28fdd0b4aaac2d6dd4b9abce548e | [
"Apache-2.0"
] | null | null | null | from output import log_qs_summary, log_query_results
from display import display_qs_summary, display_qs_results
from executors import ParallelQSExecutor, SerialQSExecutor
class TestMode():
def __init__(self,config, options):
self.config = config
self.options = options
self.verbose = options['verbose']
self.log_output = options['log_output']
def run_queryset(self, target, query_set):
if 'execution_mode' in target:
mode = target['execution_mode']
else:
mode = 'serial'
if mode == 'parallel':
executor = ParallelQSExecutor(target, query_set)
elif mode == 'serial':
executor = SerialQSExecutor(target, query_set)
else:
print(f"Unexpected query set execution mode {mode}")
return None
return executor.run()
def obfuscate_apikey(self, config):
# We obfuscate the apikey once we are done executing any queries to prevent it from being leaked in any reports
last4 = config['target']['api_key'][-4:]
config['target']['api_key'] = '******' + last4
def summarize_qs_results(self, config, results):
total, query, queued, network = 0,0,0,0
warnings = []
clean = True
for result in results['query_results']:
if result['status'] == 'success':
total += result['round_trip_ms']
query += result['query_ms']
queued += round(result['queued_ns']/1000)
network += result['network_ms']
if result['row_count'] == 0:
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Returned no rows'
warnings.append(warning)
elif result['status'] == 'error':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = f"Errored with message: {result['message']}"
warnings.append(warning)
elif result['status'] == 'timeout':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Query timed out'
warnings.append(warning)
elif result['status'] == 'exhausted':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Resources exhausted'
warnings.append(warning)
return {
'total_ms': total,
'query_ms': query,
'queued_ms': queued,
'network_ms': network,
'warnings': warnings,
'clean': clean
}
class QPSTestMode(TestMode):
def __init__(self,config, options):
super().__init__(config,options)
class IterationsTestMode(TestMode):
def __init__(self,config, options):
super().__init__(config,options)
def run(self):
query_results = self.run_queryset(self.config['target'], self.config['queries'])
self.obfuscate_apikey(self.config)
query_set_summary = self.summarize_qs_results(self.config, query_results)
if self.verbose:
display_qs_results(self.config, query_results)
display_qs_summary(self.config, query_set_summary)
if self.log_output:
log_query_results(self.options, self.config, query_results)
log_qs_summary(self.options, self.config, query_set_summary)
| 39.178218 | 119 | 0.550164 | from output import log_qs_summary, log_query_results
from display import display_qs_summary, display_qs_results
from executors import ParallelQSExecutor, SerialQSExecutor
class TestMode():
def __init__(self,config, options):
self.config = config
self.options = options
self.verbose = options['verbose']
self.log_output = options['log_output']
def run_queryset(self, target, query_set):
if 'execution_mode' in target:
mode = target['execution_mode']
else:
mode = 'serial'
if mode == 'parallel':
executor = ParallelQSExecutor(target, query_set)
elif mode == 'serial':
executor = SerialQSExecutor(target, query_set)
else:
print(f"Unexpected query set execution mode {mode}")
return None
return executor.run()
def obfuscate_apikey(self, config):
last4 = config['target']['api_key'][-4:]
config['target']['api_key'] = '******' + last4
def summarize_qs_results(self, config, results):
total, query, queued, network = 0,0,0,0
warnings = []
clean = True
for result in results['query_results']:
if result['status'] == 'success':
total += result['round_trip_ms']
query += result['query_ms']
queued += round(result['queued_ns']/1000)
network += result['network_ms']
if result['row_count'] == 0:
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Returned no rows'
warnings.append(warning)
elif result['status'] == 'error':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = f"Errored with message: {result['message']}"
warnings.append(warning)
elif result['status'] == 'timeout':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Query timed out'
warnings.append(warning)
elif result['status'] == 'exhausted':
clean = False
warning = {}
warning['query_num'] = result['query_num']
warning['name'] = result['name']
warning['message'] = 'Resources exhausted'
warnings.append(warning)
return {
'total_ms': total,
'query_ms': query,
'queued_ms': queued,
'network_ms': network,
'warnings': warnings,
'clean': clean
}
class QPSTestMode(TestMode):
def __init__(self,config, options):
super().__init__(config,options)
class IterationsTestMode(TestMode):
def __init__(self,config, options):
super().__init__(config,options)
def run(self):
query_results = self.run_queryset(self.config['target'], self.config['queries'])
self.obfuscate_apikey(self.config)
query_set_summary = self.summarize_qs_results(self.config, query_results)
if self.verbose:
display_qs_results(self.config, query_results)
display_qs_summary(self.config, query_set_summary)
if self.log_output:
log_query_results(self.options, self.config, query_results)
log_qs_summary(self.options, self.config, query_set_summary)
| true | true |
f7f44837819617114e612521998f86b0efe44c0a | 2,227 | py | Python | magi/instruction.py | bcho/magi | ec5c936ad48c1db1fef042768da728b229b28210 | [
"MIT"
] | 1 | 2019-08-24T20:36:41.000Z | 2019-08-24T20:36:41.000Z | magi/instruction.py | bcho/magi | ec5c936ad48c1db1fef042768da728b229b28210 | [
"MIT"
] | null | null | null | magi/instruction.py | bcho/magi | ec5c936ad48c1db1fef042768da728b229b28210 | [
"MIT"
] | null | null | null | # coding: utf-8
'''
magi.instruction
~~~~~~~~~~~~~~~~
CPU instructions.
'''
from magi import const
class MalformatInstruction(ValueError):
'''Unable to decode your instruction.'''
class OperationSet(object):
# Operations.
ops = {}
@classmethod
def add(cls, opcode, func):
'''Add an operation implement.'''
cls.ops[opcode] = func
return cls
@classmethod
def has(cls, opcode):
'''Do we have this operation?'''
return opcode in cls.ops
@classmethod
def get(cls, opcode):
'''Get an operation.'''
return cls.ops[opcode]
class Instruction(object):
def __init__(self, opcode, dest_register=None, src_register=None):
if opcode not in const.OP or not OperationSet.has(opcode):
raise MalformatInstruction('Unknown opcode: {0}'.format(opcode))
if src_register and src_register not in const.Registers:
raise MalformatInstruction(
'Unknown register: {0}'.format(src_register)
)
if dest_register and dest_register not in const.Registers:
raise MalformatInstruction(
'Unknown register: {0}'.format(dest_register)
)
self.opcode = opcode
self.src = src_register
self.dest = dest_register
def execute(self, machine):
'''Execute instruction.
:param machine: machine state.
'''
OperationSet.get(self.opcode)(self.src, self.dest, machine)
@property
def bytecode(self):
return (const.FROM_OP(self.opcode)
+ const.FROM_SRC(self.src)
+ const.FROM_DEST(self.dest))
def __str__(self):
return '<Inst: {r.opcode}: {r.dest}, {r.src}>'.format(r=self)
def __repr__(self):
return self.__str__()
@classmethod
def from_bytecode(cls, bytecode):
'''Decode an instruction from bytecode.
:param bytecode: input bytecode.
'''
if not isinstance(bytecode, int):
raise MalformatInstruction
return Instruction(
const.GET_OP(bytecode),
const.GET_DEST(bytecode),
const.GET_SRC(bytecode)
)
| 24.744444 | 76 | 0.591379 |
from magi import const
class MalformatInstruction(ValueError):
class OperationSet(object):
ops = {}
@classmethod
def add(cls, opcode, func):
cls.ops[opcode] = func
return cls
@classmethod
def has(cls, opcode):
return opcode in cls.ops
@classmethod
def get(cls, opcode):
return cls.ops[opcode]
class Instruction(object):
def __init__(self, opcode, dest_register=None, src_register=None):
if opcode not in const.OP or not OperationSet.has(opcode):
raise MalformatInstruction('Unknown opcode: {0}'.format(opcode))
if src_register and src_register not in const.Registers:
raise MalformatInstruction(
'Unknown register: {0}'.format(src_register)
)
if dest_register and dest_register not in const.Registers:
raise MalformatInstruction(
'Unknown register: {0}'.format(dest_register)
)
self.opcode = opcode
self.src = src_register
self.dest = dest_register
def execute(self, machine):
OperationSet.get(self.opcode)(self.src, self.dest, machine)
@property
def bytecode(self):
return (const.FROM_OP(self.opcode)
+ const.FROM_SRC(self.src)
+ const.FROM_DEST(self.dest))
def __str__(self):
return '<Inst: {r.opcode}: {r.dest}, {r.src}>'.format(r=self)
def __repr__(self):
return self.__str__()
@classmethod
def from_bytecode(cls, bytecode):
if not isinstance(bytecode, int):
raise MalformatInstruction
return Instruction(
const.GET_OP(bytecode),
const.GET_DEST(bytecode),
const.GET_SRC(bytecode)
)
| true | true |
f7f4486c97b5232a412476415acf4cf39fa304da | 2,011 | py | Python | nablapps/interactive/migrations/0013_colorchoice.py | Amund211/nablaweb | 8105c34615d4b67637e982545fbc6489a131c1f3 | [
"MIT"
] | 17 | 2019-10-07T15:10:58.000Z | 2022-01-21T14:18:07.000Z | nablapps/interactive/migrations/0013_colorchoice.py | Amund211/nablaweb | 8105c34615d4b67637e982545fbc6489a131c1f3 | [
"MIT"
] | 222 | 2019-10-07T15:04:51.000Z | 2022-03-24T12:14:16.000Z | nablapps/interactive/migrations/0013_colorchoice.py | Amund211/nablaweb | 8105c34615d4b67637e982545fbc6489a131c1f3 | [
"MIT"
] | 7 | 2019-10-10T18:53:42.000Z | 2021-10-18T02:13:09.000Z | # Generated by Django 2.1.13 on 2019-11-07 20:36
import re
import django.core.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("interactive", "0012_remove_empty_quizreplies"),
]
operations = [
migrations.CreateModel(
name="ColorChoice",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"color",
models.CharField(
max_length=20,
validators=[
django.core.validators.RegexValidator(
re.compile(
"(^#[a-f0-9]{3,6}$)|(^rgb\\s*\\(\\s*((2[0-4][0-9]|25[0-5]|1?[0-9]{1,2}|100%|[0-9]{1,2}%)\\s*,\\s*){2}((2[0-4][0-9]|25[0-5]|1?[0-9]{1,2}|100%|[0-9]{1,2}%)\\s*)\\))|(^hsl\\s*\\(\\s*(360|3[0-5][0-9]|[0-2]?[0-9]{1,2})\\s*,\\s*(100%|[0-9]{1,2}%)\\s*,\\s*(100%|[0-9]{1,2}%)\\s*\\)$)",
2,
),
"Enter a valid color in CSS format.",
"invalid",
)
],
),
),
("time", models.DateTimeField(auto_now_add=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
| 34.672414 | 314 | 0.378916 |
import re
import django.core.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("interactive", "0012_remove_empty_quizreplies"),
]
operations = [
migrations.CreateModel(
name="ColorChoice",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"color",
models.CharField(
max_length=20,
validators=[
django.core.validators.RegexValidator(
re.compile(
"(^#[a-f0-9]{3,6}$)|(^rgb\\s*\\(\\s*((2[0-4][0-9]|25[0-5]|1?[0-9]{1,2}|100%|[0-9]{1,2}%)\\s*,\\s*){2}((2[0-4][0-9]|25[0-5]|1?[0-9]{1,2}|100%|[0-9]{1,2}%)\\s*)\\))|(^hsl\\s*\\(\\s*(360|3[0-5][0-9]|[0-2]?[0-9]{1,2})\\s*,\\s*(100%|[0-9]{1,2}%)\\s*,\\s*(100%|[0-9]{1,2}%)\\s*\\)$)",
2,
),
"Enter a valid color in CSS format.",
"invalid",
)
],
),
),
("time", models.DateTimeField(auto_now_add=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
| true | true |
f7f4496dd52cd13894061a13b5116481aeac5395 | 22,793 | py | Python | conchat.py | lepers/conchat | c63798426b5ac86707d543cbad07880b0de22351 | [
"MIT"
] | 1 | 2022-02-28T14:10:02.000Z | 2022-02-28T14:10:02.000Z | conchat.py | lepers/conchat | c63798426b5ac86707d543cbad07880b0de22351 | [
"MIT"
] | null | null | null | conchat.py | lepers/conchat | c63798426b5ac86707d543cbad07880b0de22351 | [
"MIT"
] | null | null | null | import requests
import time
import logging
from datetime import datetime
import _thread
import os
import sys
import configparser
import string
from cryptography.fernet import Fernet
import platform
from requests.api import request
import torrequest
import enchant
import re
import random
import pyttsx3
from builtins import print
from bs4 import BeautifulSoup
from console.printers import print
version = '1.0.18b'
localsystem = platform.system()
if localsystem == 'Windows':
import pythoncom
logfile = 'chat.log'
class _TTS:
engine = None
rate = None
def __init__(self):
if localsystem == 'Windows':
pythoncom.CoInitializeEx(0)
self.engine = pyttsx3.init()
self.engine.setProperty('voice', 'russian')
def start(self, text_):
self.engine.say(text_)
self.engine.runAndWait()
class _Request:
tor = None
def __init__(self):
self.tor = torrequest.TorRequest(proxy_port=7050, ctrl_port=7051, password=None)
def post(self, url, headers, json, data):
res = self.tor.post(url, headers, json, data)
self.tor.close()
return res
class conchat:
def __init__(self):
self.renew()
def myprint(self, text):
print(text)
def renew(self):
print("""
╭━━━╮╱╱╱╱╱╱╱╱╭╮╱╱╱╱╭╮
┃╭━╮┃╱╱╱╱╱╱╱╱┃┃╱╱╱╭╯╰╮
┃┃╱╰╋━━┳━╮╭━━┫╰━┳━┻╮╭╯
┃┃╱╭┫╭╮┃╭╮┫╭━┫╭╮┃╭╮┃┃
┃╰━╯┃╰╯┃┃┃┃╰━┫┃┃┃╭╮┃╰╮
╰━━━┻━━┻╯╰┻━━┻╯╰┻╯╰┻━╯
""")
print('conchat - ' + localsystem + ' ver.' + version)
self.conf = configparser.ConfigParser()
self.conf.read('chat.ini')
self.uid = self.conf['chat']['uid']
self.sid = self.conf['chat']['sid']
self.name = self.conf['chat']['name']
self.session = self.conf['chat']['session']
self.csrf_token = self.conf['chat']['csrf_token']
self.useTor = int(self.conf['chat']['useTor'])
self.spellCheck = int(self.conf['chat']['spellCheck'])
self.logfile = self.conf['chat']['logFile']
self.useLog = int(self.conf['chat']['log'])
self.logOnlyMode = int(self.conf['chat']['logOnlyMode'])
self.subLepra = self.conf['chat']['subLepra']
self.say = int(self.conf['chat']['say'])
if self.subLepra != '':
self.subLepra = str(self.subLepra) + '.'
print('https://' + str(self.subLepra) + 'leprosorium.ru/')
self.encr = False
self.pauseChat = False
if 'plaintext' in self.conf['chat']:
self.plaintext = int(self.conf['chat']['plaintext'])
else:
self.plaintext = 0
if 'yinfo' in self.conf['chat']:
self.yinfo = int(self.conf['chat']['yinfo'])
else:
self.yinfo = 0
if 'ydownload' in self.conf['chat']:
self.ydownload = int(self.conf['chat']['ydownload'])
else:
self.ydownload = 0
if 'silent' in self.conf['chat']:
self.silent = int(self.conf['chat']['silent'])
else:
self.silent = 0
self.intervalGetMess = 11
self.getUrl = 'https://' + str(self.subLepra) + 'leprosorium.ru/ajax/chat/load/'
self.addUrl = 'https://' + str(self.subLepra) + 'leprosorium.ru/ajax/chat/add/'
self.apiUrl = 'https://leprosorium.ru/api/'
self.headers = {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9,ru;q=0.8', 'Connection': 'keep-alive', 'Content-Length': '0', 'Host': str(self.subLepra) + 'leprosorium.ru', 'Origin': 'https://' + str(self.subLepra) + 'leprosorium.ru', 'Referer': 'https://' + str(self.subLepra) + 'leprosorium.ru/', 'Sec-Fetch-Dest': 'empty', 'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Site': 'same-origin', 'Cookie': 'wikilepro_session=' + self.session + '; uid=' + self.uid + '; sid=' + self.sid, 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'}
self.form = {'last_message_id': lm, 'csrf_token': self.csrf_token}
self.Red = 31
self.Green = 32
self.Yellow = 33
self.Blue = 34
self.Purple = 35
self.tor = None
self.myprint('команды приложения:')
self.myprint('#spel - проверка правописания, левописания')
self.myprint('#lepra - указать подлепру')
self.myprint('#exit - выход')
self.myprint('#enc - шифрование сообщения общим ключом')
self.myprint('ENTER - пауза вывода сообщений')
self.myprint('#say - читать сообщения в слух')
self.myprint('#plaintext - текстовый формат консоли')
self.myprint('#tor - использование тора')
self.myprint('#yinfo - информация о youtube ссылах')
self.myprint('#ydownload - скачиавание youtube ссылок')
self.myprint('#silent - не отсылать сообщения в чат')
def get_logger(self, name):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(self.log_handler)
return logger
def mess_loop(self, lm):
while True:
if self.pauseChat == True:
time.sleep(2)
lms = self.AddMessages(lm)
if lms > lm:
file = open('lastMessageNum.txt', 'w')
file.write(str(lm) + '\n')
lm = lms
time.sleep(self.intervalGetMess)
def sendMessage_loop(self):
while True:
try:
msg = input()
self.sendMessage(msg)
except:
self.exit_chat()
def html_special_chars(self, text):
return text.replace('&', '&').replace('"', '"').replace(''', "'").replace('<', '<').replace('>', '>').replace('–', '-')
def sendMessage(self, msg):
send = True
if '/python.exe' in msg:
send = False
if 'nyok' in msg:
send = False
if '#enc' in str(msg):
key = self.load_key()
f = Fernet(key)
msg = str(f.encrypt(msg.encode()))
if str(msg) == '':
self.myprint('Вывод сообщений на паузе....пока ты набираешь текст.')
self.pauseChat = True
send = False
if 'posts' in str(msg):
if 'mixed' in str(msg):
self.printPosts('mixed')
elif 'main' in str(msg):
self.printPosts('main')
elif 'personal' in str(msg):
self.printPosts('personal')
else:
self.myprint('Ошибка в выражении posts [mixed,main,personal]')
self.myprint('posts mixed')
send = False
if '#spell' in str(msg):
if self.spellCheck == 1:
self.conf.set('chat', 'spellCheck', '0')
self.spellCheck = 0
self.myprint('spellCheck=0')
else:
self.conf.set('chat', 'spellCheck', '1')
self.spellCheck = 1
self.myprint('spellCheck=1')
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#lepra' in str(msg):
self.subLepra = input('Подлепра:')
self.conf.set('chat', 'subLepra', self.subLepra)
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
self.renew()
send = False
if '#say' in str(msg):
if self.say == 1:
self.myprint('say=0')
self.conf.set('chat', 'say', '0')
self.say = 0
else:
self.myprint('say=1')
self.conf.set('chat', 'say', '1')
self.say = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#tor' in str(msg):
if self.useTor == 1:
self.myprint('useTor=0')
self.conf.set('chat', 'useTor', '0')
self.useTor = 0
else:
self.myprint('useTor=1')
self.conf.set('chat', 'useTor', '1')
self.useTor = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#exit' in str(msg):
self.exit_chat()
if '#plaintext' in str(msg):
if self.plaintext == 1:
self.myprint('plaintext=0')
self.conf.set('chat', 'plaintext', '0')
self.plaintext = 0
else:
self.myprint('plaintext=1')
self.conf.set('chat', 'plaintext', '1')
self.plaintext = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#yinfo' in str(msg):
if self.yinfo == 1:
self.myprint('yinfo=0')
self.conf.set('chat', 'yinfo', '0')
self.yinfo = 0
else:
self.myprint('yinfo=1')
self.conf.set('chat', 'yinfo', '1')
self.yinfo = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#silent' in str(msg):
if self.silent == 1:
self.myprint('silent=0')
self.conf.set('chat', 'silent', '0')
self.silent = 0
else:
self.myprint('silent=1')
self.conf.set('chat', 'silent', '1')
self.silent = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#ydownload' in str(msg):
if self.ydownload == 1:
self.myprint('ydownload=0')
self.conf.set('chat', 'ydownload', '0')
self.ydownload = 0
else:
self.myprint('ydownload=1')
self.conf.set('chat', 'ydownload', '1')
self.ydownload = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if self.spellCheck == 1:
c = enchant.Dict('ru_RU')
msg = str(msg)
for word in re.findall('[А-Яа-я]+', str(msg)):
if c.check(word) == False:
lst = c.suggest(word)
if len(lst) > 0:
sugid = random.randint(0, len(lst) - 1)
wrd = lst[sugid]
msg = msg.replace(word, wrd, 1)
if self.silent == 1:
send = False
if send == True:
form2 = {'last': lm, 'csrf_token': self.csrf_token, 'body': msg}
tc = self.req()
tc.post(self.addUrl, headers=self.headers, json=form2, data=form2)
self.pauseChat = False
def req(self):
if self.useTor == 1:
if localsystem == 'Linux':
rq = _Request()
else:
rq = torrequest.TorRequest(proxy_port=7050, ctrl_port=7051, password=None)
return rq
r = requests
return r
def getMessages(self):
try:
tc = self.req()
r = tc.post(self.getUrl, headers=self.headers, json=self.form, data=self.form)
return r.json()
except Exception:
self.myprint('чёта с инетом... \n ')
time.sleep(5)
return
def l(self, text):
self.log.info(text)
def getposts(self, feed_type):
url = self.apiUrl + 'feeds/' + feed_type + '/'
tc = self.req()
r = tc.get(url, headers={'X-Futuware-UID': self.uid, 'X-Futuware-SID': self.id})
result = r.json()
return result
def printPosts(self, feed_type):
posts = self.getposts(feed_type)
for post in posts['posts']:
link = str('\x1b[1;' + str(self.Yellow) + ';40m' + post['_links'][0]['href'] + '\x1b[0m ')
rating = str('\x1b[1;' + str(self.Red) + ';40m' + str(post['rating']) + '\x1b[0m ')
login = str('\x1b[1;' + str(self.Green) + ';40m' + str(post['user']['login']) + '\x1b[0m ')
id = str('\x1b[1;' + str(self.Purple) + ';40m' + str(post['id']) + '\x1b[0m ')
comments_count = str('\x1b[1;' + str(Blue) + ';40m' + str(post['comments_count']) + '\x1b[0m ')
created = datetime.fromtimestamp(post['created'])
today = datetime.today()
d1 = today.strftime('%m/%Y')
d2 = created.strftime('%m/%Y')
if not d1 == d2:
continue
self.myprint('л, ' + link + ' р, ' + rating + ' %, ' + login + ' д, ' + created.strftime('%Y-%m-%d-%H.%M') + ' к,' + comments_count + ' id:' + id)
self.myprint(post['body'])
def load_key(self):
'''
Loads the key from the current directory named `key.key`
'''
defaultKey = b'jkE4yxD4azCxKL3_R1-kRy6RbZGf0pwxJGAZOtiPg8E='
return defaultKey
def ClearLog(self):
try:
if not os.path.isfile(self.logfile):
return
if os.path.isfile(self.logfile + '.old'):
os.remove(self.logfile + '.old')
os.rename(self.logfile, self.logfile + '.old')
lines_seen = set()
outfile = open(self.logfile, 'w')
for line in open(self.logfile + '.old', 'r'):
if line not in lines_seen:
outfile.write(line)
lines_seen.add(line)
outfile.close()
except:
pass
def InfoYoutube(self, url):
try:
res = pafy.new(url)
print(f'Title: {res.title}')
print(f'Viewcount {res.viewcount}')
print(f'Author: {res.author}')
print(f'Video Length: {res.length}')
print(f'Likes: {res.likes}')
print(f'Dislikes: {res.dislikes}')
print(f'Description: {res.description}')
if 'Artist' in res:
print(f'Artist: {res.artist}')
if 'Artist' in res:
print(f'Song: {res.song}')
except:
pass
return {'title': f'{res.title}', 'length': f'{res.length}'}
def DownloadYoutube(self, url):
result = pafy.new(url)
best_quality_audio = result.getbestaudio()
print('Начинаю загрузку аудио ....')
best_quality_audio.download()
def AddMessages(self, lm):
if self.pauseChat == True:
return lm
result = self.getMessages()
if result == None:
return lm
if 'messages' in result:
mess = result['messages']
else:
mess = []
self.myprint('Ошибка, возможно узел закрыт или Вас забанили...')
return lm
id = 0
for message in mess:
id = message['id']
if int(id) <= int(lm):
continue
login = str(message['user']['login'])
messageText = str(message['body'])
messageText = self.html_special_chars(messageText)
created = datetime.fromtimestamp(message['created'])
if "b'" in str(messageText):
if ' ' not in str(messageText):
messageText = messageText.replace("b'", '').replace("'", '')
try:
self.myprint('Расшифровка - ' + messageText)
key = self.load_key()
f = Fernet(key.decode())
messageText = str(f.decrypt(messageText.encode()).decode())
except:
self.myprint('ХХХХ')
elif self.say == 1:
tts = _TTS()
tts.start(messageText)
del tts
if self.plaintext == 1 and '<' in messageText and '>' in messageText:
soup = BeautifulSoup(messageText, features='lxml')
messageText = soup.getText()
messageText = self.html_special_chars(messageText)
urls = re.findall('((https?):((//)|(\\\\))+([\\w\\d:#@%/;$()~_?\\+-=\\\\.&](#!)?)*)', messageText)
for url in urls:
if 'youtube.com' or 'youtu.be' in url[0]:
title = ''
if self.yinfo == 1:
try:
rs = self.InfoYoutube(url[0])
title = rs['title']
except:
pass
if self.ydownload == 1 and title != '' and int(rs['length']) < 500:
try:
if not os.path.isfile(title + '.webm') and not (not os.path.isfile(title + '.m4a') and not os.path.isfile(title + '.mp3')):
_thread.start_new_thread(self.DownloadYoutube, (url[0],))
except:
pass
if login != self.name and self.name in messageText:
tts = _TTS()
tts.start(messageText)
log_msg = str(created.strftime('%m-%d-%H.%M') + '|' + login + '|' + messageText + '|' + str(id) + '|' + str(message['created']))
mynick = str('\x1b[1;' + str(self.Red) + ';40m' + self.name + ':\x1b[0m ')
messageText = messageText.replace(self.name, mynick)
if login != self.name:
msg = str('\x1b[1;' + str(self.Yellow) + ';40m' + created.strftime('%H.%M') + ':\x1b[0m ' + '\x1b[1;' + str(self.Green) + ';40m' + login + ':\x1b[0m ' + messageText)
else:
msg = str('\x1b[1;' + str(self.Yellow) + ';40m' + created.strftime('%H.%M') + ':\x1b[0m ' + '\x1b[1;' + str(self.Red) + ';40m' + login + ':\x1b[0m ' + messageText)
self.myprint(msg)
log_msg = str(log_msg)
if self.useLog == 1:
self.l(log_msg)
return id
def exit_chat(self):
try:
print('conchat ' + version)
print('by')
print("""
─╔╗───╔╦╗
─║╠╦╦═╬╣╚╦═╦╦╗
╔╣║║║╬║║╔╣╩╣╔╝
╚═╩═╣╔╩╩═╩═╩╝
────╚╝""")
handlers = self.log.handlers.copy()
for handler in handlers:
try:
handler.acquire()
handler.flush()
handler.close()
except (OSError, ValueError):
pass
finally:
handler.release()
self.log.removeHandler(handler)
sys.exit(0)
except SystemExit:
os._exit(0)
def startChat(self, lm, log_handler):
self.log_handler = log_handler
self.log = self.get_logger(__name__)
try:
_thread.start_new_thread(self.mess_loop, (lm,))
except:
self.myprint('Ошибка в потоке сообщений, закрытие приложения')
try:
_thread.start_new_thread(self.sendMessage_loop, ())
except:
self.myprint('Ошибка в потоке отправки, выход....')
while True:
time.sleep(1)
def write_key():
'''
Generates a key and save it into a file
'''
key = Fernet.generate_key()
with open('key.key', 'wb') as key_file:
key_file.write(key)
def load_key():
'''
Loads the key from the current directory named `key.key`
'''
defaultKey = 'jkE4yxD4azCxKL3_R1-kRy6RbZGf0pwxJGAZOtiPg8E='
return defaultKey
def loginLzd(uname, pas):
lurl = 'https://leprosorium.ru/ajax/auth/login/ '
hdlg = {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9,ru;q=0.8', 'Connection': 'keep-alive', 'Content-Length': '0', 'Host': 'leprosorium.ru', 'Referer': 'https://leprosorium.ru/login/', 'Sec-Fetch-Dest': 'empty', 'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Site': 'same-origin', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'}
formlg = {'username': uname, 'password': pas, 'g-recaptcha-response': ''}
r = requests.post(lurl, headers=hdlg, json=formlg, data=formlg)
d = r.json()
if d['status'] == 'OK':
config = configparser.ConfigParser()
config.add_section('chat')
config.set('chat', 'uid', r.cookies['uid'])
config.set('chat', 'sid', r.cookies['sid'])
config.set('chat', 'session', '')
config.set('chat', 'csrf_token', d['csrf_token'])
config.set('chat', 'logFile', 'chat.log')
config.set('chat', 'spellCheck', '0')
config.set('chat', 'log', '1')
config.set('chat', 'name', uname)
config.set('chat', 'useTor', '0')
config.set('chat', 'logOnlyMode', '0')
config.set('chat', 'subLepra', '')
config.set('chat', 'say', '0')
config.set('chat', 'yinfo', '0')
config.set('chat', 'ydownload', '0')
with open('chat.ini', 'w') as config_file:
config.write(config_file)
return True
return False
def exit_chat():
try:
sys.exit(0)
except SystemExit:
os._exit(0)
def setup():
print('отсутствует chat.ini')
print("""Зайти через логин / пароль?
""")
answer = input('Yes|No\n')
if answer == 'Yes':
log = input('login:')
pas = input('password:')
if loginLzd(log, pas) == True:
print('Записано на будущеее... ')
else:
print('нету chat.ini\n')
exit_chat()
def get_file_handler():
_log_format = '%(message)s'
today = datetime.today()
file_handler = logging.FileHandler(logfile + '.' + today.strftime('%y%m%d%H%M') + '.log', 'w', 'utf-8')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(_log_format))
return file_handler
if __name__ == '__main__':
tx = """conchat
Скачать новую версию:
https://gofile.io/d/KheQAd
"""
print(tx)
lm = 1400000
log_handler = get_file_handler()
if not os.path.isfile('chat.ini'):
setup()
try:
file = open('lastMessageNum.txt', 'r+')
write = False
except:
file = open('lastMessageNum.txt', 'w')
write = True
try:
if not write:
lines = file.readlines()
lm = int(lines[0][:-1]) - 50
except:
print('Ошибка чтения последнего сообщения...ничего страшного, просто для информации')
lm = 1
chat = conchat()
try:
chat.startChat(lm, log_handler)
except KeyboardInterrupt:
print(' выход из приложения ctl+c KeyboardInterrupt')
exit_chat()
| 38.051753 | 649 | 0.508709 | import requests
import time
import logging
from datetime import datetime
import _thread
import os
import sys
import configparser
import string
from cryptography.fernet import Fernet
import platform
from requests.api import request
import torrequest
import enchant
import re
import random
import pyttsx3
from builtins import print
from bs4 import BeautifulSoup
from console.printers import print
version = '1.0.18b'
localsystem = platform.system()
if localsystem == 'Windows':
import pythoncom
logfile = 'chat.log'
class _TTS:
engine = None
rate = None
def __init__(self):
if localsystem == 'Windows':
pythoncom.CoInitializeEx(0)
self.engine = pyttsx3.init()
self.engine.setProperty('voice', 'russian')
def start(self, text_):
self.engine.say(text_)
self.engine.runAndWait()
class _Request:
tor = None
def __init__(self):
self.tor = torrequest.TorRequest(proxy_port=7050, ctrl_port=7051, password=None)
def post(self, url, headers, json, data):
res = self.tor.post(url, headers, json, data)
self.tor.close()
return res
class conchat:
def __init__(self):
self.renew()
def myprint(self, text):
print(text)
def renew(self):
print("""
╭━━━╮╱╱╱╱╱╱╱╱╭╮╱╱╱╱╭╮
┃╭━╮┃╱╱╱╱╱╱╱╱┃┃╱╱╱╭╯╰╮
┃┃╱╰╋━━┳━╮╭━━┫╰━┳━┻╮╭╯
┃┃╱╭┫╭╮┃╭╮┫╭━┫╭╮┃╭╮┃┃
┃╰━╯┃╰╯┃┃┃┃╰━┫┃┃┃╭╮┃╰╮
╰━━━┻━━┻╯╰┻━━┻╯╰┻╯╰┻━╯
""")
print('conchat - ' + localsystem + ' ver.' + version)
self.conf = configparser.ConfigParser()
self.conf.read('chat.ini')
self.uid = self.conf['chat']['uid']
self.sid = self.conf['chat']['sid']
self.name = self.conf['chat']['name']
self.session = self.conf['chat']['session']
self.csrf_token = self.conf['chat']['csrf_token']
self.useTor = int(self.conf['chat']['useTor'])
self.spellCheck = int(self.conf['chat']['spellCheck'])
self.logfile = self.conf['chat']['logFile']
self.useLog = int(self.conf['chat']['log'])
self.logOnlyMode = int(self.conf['chat']['logOnlyMode'])
self.subLepra = self.conf['chat']['subLepra']
self.say = int(self.conf['chat']['say'])
if self.subLepra != '':
self.subLepra = str(self.subLepra) + '.'
print('https://' + str(self.subLepra) + 'leprosorium.ru/')
self.encr = False
self.pauseChat = False
if 'plaintext' in self.conf['chat']:
self.plaintext = int(self.conf['chat']['plaintext'])
else:
self.plaintext = 0
if 'yinfo' in self.conf['chat']:
self.yinfo = int(self.conf['chat']['yinfo'])
else:
self.yinfo = 0
if 'ydownload' in self.conf['chat']:
self.ydownload = int(self.conf['chat']['ydownload'])
else:
self.ydownload = 0
if 'silent' in self.conf['chat']:
self.silent = int(self.conf['chat']['silent'])
else:
self.silent = 0
self.intervalGetMess = 11
self.getUrl = 'https://' + str(self.subLepra) + 'leprosorium.ru/ajax/chat/load/'
self.addUrl = 'https://' + str(self.subLepra) + 'leprosorium.ru/ajax/chat/add/'
self.apiUrl = 'https://leprosorium.ru/api/'
self.headers = {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9,ru;q=0.8', 'Connection': 'keep-alive', 'Content-Length': '0', 'Host': str(self.subLepra) + 'leprosorium.ru', 'Origin': 'https://' + str(self.subLepra) + 'leprosorium.ru', 'Referer': 'https://' + str(self.subLepra) + 'leprosorium.ru/', 'Sec-Fetch-Dest': 'empty', 'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Site': 'same-origin', 'Cookie': 'wikilepro_session=' + self.session + '; uid=' + self.uid + '; sid=' + self.sid, 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'}
self.form = {'last_message_id': lm, 'csrf_token': self.csrf_token}
self.Red = 31
self.Green = 32
self.Yellow = 33
self.Blue = 34
self.Purple = 35
self.tor = None
self.myprint('команды приложения:')
self.myprint('#spel - проверка правописания, левописания')
self.myprint('#lepra - указать подлепру')
self.myprint('#exit - выход')
self.myprint('#enc - шифрование сообщения общим ключом')
self.myprint('ENTER - пауза вывода сообщений')
self.myprint('#say - читать сообщения в слух')
self.myprint('#plaintext - текстовый формат консоли')
self.myprint('#tor - использование тора')
self.myprint('#yinfo - информация о youtube ссылах')
self.myprint('#ydownload - скачиавание youtube ссылок')
self.myprint('#silent - не отсылать сообщения в чат')
def get_logger(self, name):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(self.log_handler)
return logger
def mess_loop(self, lm):
while True:
if self.pauseChat == True:
time.sleep(2)
lms = self.AddMessages(lm)
if lms > lm:
file = open('lastMessageNum.txt', 'w')
file.write(str(lm) + '\n')
lm = lms
time.sleep(self.intervalGetMess)
def sendMessage_loop(self):
while True:
try:
msg = input()
self.sendMessage(msg)
except:
self.exit_chat()
def html_special_chars(self, text):
return text.replace('&', '&').replace('"', '"').replace(''', "'").replace('<', '<').replace('>', '>').replace('–', '-')
def sendMessage(self, msg):
send = True
if '/python.exe' in msg:
send = False
if 'nyok' in msg:
send = False
if '#enc' in str(msg):
key = self.load_key()
f = Fernet(key)
msg = str(f.encrypt(msg.encode()))
if str(msg) == '':
self.myprint('Вывод сообщений на паузе....пока ты набираешь текст.')
self.pauseChat = True
send = False
if 'posts' in str(msg):
if 'mixed' in str(msg):
self.printPosts('mixed')
elif 'main' in str(msg):
self.printPosts('main')
elif 'personal' in str(msg):
self.printPosts('personal')
else:
self.myprint('Ошибка в выражении posts [mixed,main,personal]')
self.myprint('posts mixed')
send = False
if '#spell' in str(msg):
if self.spellCheck == 1:
self.conf.set('chat', 'spellCheck', '0')
self.spellCheck = 0
self.myprint('spellCheck=0')
else:
self.conf.set('chat', 'spellCheck', '1')
self.spellCheck = 1
self.myprint('spellCheck=1')
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#lepra' in str(msg):
self.subLepra = input('Подлепра:')
self.conf.set('chat', 'subLepra', self.subLepra)
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
self.renew()
send = False
if '#say' in str(msg):
if self.say == 1:
self.myprint('say=0')
self.conf.set('chat', 'say', '0')
self.say = 0
else:
self.myprint('say=1')
self.conf.set('chat', 'say', '1')
self.say = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#tor' in str(msg):
if self.useTor == 1:
self.myprint('useTor=0')
self.conf.set('chat', 'useTor', '0')
self.useTor = 0
else:
self.myprint('useTor=1')
self.conf.set('chat', 'useTor', '1')
self.useTor = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#exit' in str(msg):
self.exit_chat()
if '#plaintext' in str(msg):
if self.plaintext == 1:
self.myprint('plaintext=0')
self.conf.set('chat', 'plaintext', '0')
self.plaintext = 0
else:
self.myprint('plaintext=1')
self.conf.set('chat', 'plaintext', '1')
self.plaintext = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#yinfo' in str(msg):
if self.yinfo == 1:
self.myprint('yinfo=0')
self.conf.set('chat', 'yinfo', '0')
self.yinfo = 0
else:
self.myprint('yinfo=1')
self.conf.set('chat', 'yinfo', '1')
self.yinfo = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#silent' in str(msg):
if self.silent == 1:
self.myprint('silent=0')
self.conf.set('chat', 'silent', '0')
self.silent = 0
else:
self.myprint('silent=1')
self.conf.set('chat', 'silent', '1')
self.silent = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if '#ydownload' in str(msg):
if self.ydownload == 1:
self.myprint('ydownload=0')
self.conf.set('chat', 'ydownload', '0')
self.ydownload = 0
else:
self.myprint('ydownload=1')
self.conf.set('chat', 'ydownload', '1')
self.ydownload = 1
with open('chat.ini', 'w') as config_file:
self.conf.write(config_file)
send = False
if self.spellCheck == 1:
c = enchant.Dict('ru_RU')
msg = str(msg)
for word in re.findall('[А-Яа-я]+', str(msg)):
if c.check(word) == False:
lst = c.suggest(word)
if len(lst) > 0:
sugid = random.randint(0, len(lst) - 1)
wrd = lst[sugid]
msg = msg.replace(word, wrd, 1)
if self.silent == 1:
send = False
if send == True:
form2 = {'last': lm, 'csrf_token': self.csrf_token, 'body': msg}
tc = self.req()
tc.post(self.addUrl, headers=self.headers, json=form2, data=form2)
self.pauseChat = False
def req(self):
if self.useTor == 1:
if localsystem == 'Linux':
rq = _Request()
else:
rq = torrequest.TorRequest(proxy_port=7050, ctrl_port=7051, password=None)
return rq
r = requests
return r
def getMessages(self):
try:
tc = self.req()
r = tc.post(self.getUrl, headers=self.headers, json=self.form, data=self.form)
return r.json()
except Exception:
self.myprint('чёта с инетом... \n ')
time.sleep(5)
return
def l(self, text):
self.log.info(text)
def getposts(self, feed_type):
url = self.apiUrl + 'feeds/' + feed_type + '/'
tc = self.req()
r = tc.get(url, headers={'X-Futuware-UID': self.uid, 'X-Futuware-SID': self.id})
result = r.json()
return result
def printPosts(self, feed_type):
posts = self.getposts(feed_type)
for post in posts['posts']:
link = str('\x1b[1;' + str(self.Yellow) + ';40m' + post['_links'][0]['href'] + '\x1b[0m ')
rating = str('\x1b[1;' + str(self.Red) + ';40m' + str(post['rating']) + '\x1b[0m ')
login = str('\x1b[1;' + str(self.Green) + ';40m' + str(post['user']['login']) + '\x1b[0m ')
id = str('\x1b[1;' + str(self.Purple) + ';40m' + str(post['id']) + '\x1b[0m ')
comments_count = str('\x1b[1;' + str(Blue) + ';40m' + str(post['comments_count']) + '\x1b[0m ')
created = datetime.fromtimestamp(post['created'])
today = datetime.today()
d1 = today.strftime('%m/%Y')
d2 = created.strftime('%m/%Y')
if not d1 == d2:
continue
self.myprint('л, ' + link + ' р, ' + rating + ' %, ' + login + ' д, ' + created.strftime('%Y-%m-%d-%H.%M') + ' к,' + comments_count + ' id:' + id)
self.myprint(post['body'])
def load_key(self):
defaultKey = b'jkE4yxD4azCxKL3_R1-kRy6RbZGf0pwxJGAZOtiPg8E='
return defaultKey
def ClearLog(self):
try:
if not os.path.isfile(self.logfile):
return
if os.path.isfile(self.logfile + '.old'):
os.remove(self.logfile + '.old')
os.rename(self.logfile, self.logfile + '.old')
lines_seen = set()
outfile = open(self.logfile, 'w')
for line in open(self.logfile + '.old', 'r'):
if line not in lines_seen:
outfile.write(line)
lines_seen.add(line)
outfile.close()
except:
pass
def InfoYoutube(self, url):
try:
res = pafy.new(url)
print(f'Title: {res.title}')
print(f'Viewcount {res.viewcount}')
print(f'Author: {res.author}')
print(f'Video Length: {res.length}')
print(f'Likes: {res.likes}')
print(f'Dislikes: {res.dislikes}')
print(f'Description: {res.description}')
if 'Artist' in res:
print(f'Artist: {res.artist}')
if 'Artist' in res:
print(f'Song: {res.song}')
except:
pass
return {'title': f'{res.title}', 'length': f'{res.length}'}
def DownloadYoutube(self, url):
result = pafy.new(url)
best_quality_audio = result.getbestaudio()
print('Начинаю загрузку аудио ....')
best_quality_audio.download()
def AddMessages(self, lm):
if self.pauseChat == True:
return lm
result = self.getMessages()
if result == None:
return lm
if 'messages' in result:
mess = result['messages']
else:
mess = []
self.myprint('Ошибка, возможно узел закрыт или Вас забанили...')
return lm
id = 0
for message in mess:
id = message['id']
if int(id) <= int(lm):
continue
login = str(message['user']['login'])
messageText = str(message['body'])
messageText = self.html_special_chars(messageText)
created = datetime.fromtimestamp(message['created'])
if "b'" in str(messageText):
if ' ' not in str(messageText):
messageText = messageText.replace("b'", '').replace("'", '')
try:
self.myprint('Расшифровка - ' + messageText)
key = self.load_key()
f = Fernet(key.decode())
messageText = str(f.decrypt(messageText.encode()).decode())
except:
self.myprint('ХХХХ')
elif self.say == 1:
tts = _TTS()
tts.start(messageText)
del tts
if self.plaintext == 1 and '<' in messageText and '>' in messageText:
soup = BeautifulSoup(messageText, features='lxml')
messageText = soup.getText()
messageText = self.html_special_chars(messageText)
urls = re.findall('((https?):((//)|(\\\\))+([\\w\\d:#@%/;$()~_?\\+-=\\\\.&](#!)?)*)', messageText)
for url in urls:
if 'youtube.com' or 'youtu.be' in url[0]:
title = ''
if self.yinfo == 1:
try:
rs = self.InfoYoutube(url[0])
title = rs['title']
except:
pass
if self.ydownload == 1 and title != '' and int(rs['length']) < 500:
try:
if not os.path.isfile(title + '.webm') and not (not os.path.isfile(title + '.m4a') and not os.path.isfile(title + '.mp3')):
_thread.start_new_thread(self.DownloadYoutube, (url[0],))
except:
pass
if login != self.name and self.name in messageText:
tts = _TTS()
tts.start(messageText)
log_msg = str(created.strftime('%m-%d-%H.%M') + '|' + login + '|' + messageText + '|' + str(id) + '|' + str(message['created']))
mynick = str('\x1b[1;' + str(self.Red) + ';40m' + self.name + ':\x1b[0m ')
messageText = messageText.replace(self.name, mynick)
if login != self.name:
msg = str('\x1b[1;' + str(self.Yellow) + ';40m' + created.strftime('%H.%M') + ':\x1b[0m ' + '\x1b[1;' + str(self.Green) + ';40m' + login + ':\x1b[0m ' + messageText)
else:
msg = str('\x1b[1;' + str(self.Yellow) + ';40m' + created.strftime('%H.%M') + ':\x1b[0m ' + '\x1b[1;' + str(self.Red) + ';40m' + login + ':\x1b[0m ' + messageText)
self.myprint(msg)
log_msg = str(log_msg)
if self.useLog == 1:
self.l(log_msg)
return id
def exit_chat(self):
try:
print('conchat ' + version)
print('by')
print("""
─╔╗───╔╦╗
─║╠╦╦═╬╣╚╦═╦╦╗
╔╣║║║╬║║╔╣╩╣╔╝
╚═╩═╣╔╩╩═╩═╩╝
────╚╝""")
handlers = self.log.handlers.copy()
for handler in handlers:
try:
handler.acquire()
handler.flush()
handler.close()
except (OSError, ValueError):
pass
finally:
handler.release()
self.log.removeHandler(handler)
sys.exit(0)
except SystemExit:
os._exit(0)
def startChat(self, lm, log_handler):
self.log_handler = log_handler
self.log = self.get_logger(__name__)
try:
_thread.start_new_thread(self.mess_loop, (lm,))
except:
self.myprint('Ошибка в потоке сообщений, закрытие приложения')
try:
_thread.start_new_thread(self.sendMessage_loop, ())
except:
self.myprint('Ошибка в потоке отправки, выход....')
while True:
time.sleep(1)
def write_key():
key = Fernet.generate_key()
with open('key.key', 'wb') as key_file:
key_file.write(key)
def load_key():
defaultKey = 'jkE4yxD4azCxKL3_R1-kRy6RbZGf0pwxJGAZOtiPg8E='
return defaultKey
def loginLzd(uname, pas):
lurl = 'https://leprosorium.ru/ajax/auth/login/ '
hdlg = {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9,ru;q=0.8', 'Connection': 'keep-alive', 'Content-Length': '0', 'Host': 'leprosorium.ru', 'Referer': 'https://leprosorium.ru/login/', 'Sec-Fetch-Dest': 'empty', 'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Site': 'same-origin', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'}
formlg = {'username': uname, 'password': pas, 'g-recaptcha-response': ''}
r = requests.post(lurl, headers=hdlg, json=formlg, data=formlg)
d = r.json()
if d['status'] == 'OK':
config = configparser.ConfigParser()
config.add_section('chat')
config.set('chat', 'uid', r.cookies['uid'])
config.set('chat', 'sid', r.cookies['sid'])
config.set('chat', 'session', '')
config.set('chat', 'csrf_token', d['csrf_token'])
config.set('chat', 'logFile', 'chat.log')
config.set('chat', 'spellCheck', '0')
config.set('chat', 'log', '1')
config.set('chat', 'name', uname)
config.set('chat', 'useTor', '0')
config.set('chat', 'logOnlyMode', '0')
config.set('chat', 'subLepra', '')
config.set('chat', 'say', '0')
config.set('chat', 'yinfo', '0')
config.set('chat', 'ydownload', '0')
with open('chat.ini', 'w') as config_file:
config.write(config_file)
return True
return False
def exit_chat():
try:
sys.exit(0)
except SystemExit:
os._exit(0)
def setup():
print('отсутствует chat.ini')
print("""Зайти через логин / пароль?
""")
answer = input('Yes|No\n')
if answer == 'Yes':
log = input('login:')
pas = input('password:')
if loginLzd(log, pas) == True:
print('Записано на будущеее... ')
else:
print('нету chat.ini\n')
exit_chat()
def get_file_handler():
_log_format = '%(message)s'
today = datetime.today()
file_handler = logging.FileHandler(logfile + '.' + today.strftime('%y%m%d%H%M') + '.log', 'w', 'utf-8')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(_log_format))
return file_handler
if __name__ == '__main__':
tx = """conchat
Скачать новую версию:
https://gofile.io/d/KheQAd
"""
print(tx)
lm = 1400000
log_handler = get_file_handler()
if not os.path.isfile('chat.ini'):
setup()
try:
file = open('lastMessageNum.txt', 'r+')
write = False
except:
file = open('lastMessageNum.txt', 'w')
write = True
try:
if not write:
lines = file.readlines()
lm = int(lines[0][:-1]) - 50
except:
print('Ошибка чтения последнего сообщения...ничего страшного, просто для информации')
lm = 1
chat = conchat()
try:
chat.startChat(lm, log_handler)
except KeyboardInterrupt:
print(' выход из приложения ctl+c KeyboardInterrupt')
exit_chat()
| true | true |
f7f449717bdd7c8be5f81f65ddd254aa7850f146 | 2,468 | py | Python | juriscraper/opinions/united_states_backscrapers/federal_special/cit_2004.py | umeboshi2/juriscraper | 16abceb3747947593841b1c2708de84dcc85c59d | [
"BSD-2-Clause"
] | null | null | null | juriscraper/opinions/united_states_backscrapers/federal_special/cit_2004.py | umeboshi2/juriscraper | 16abceb3747947593841b1c2708de84dcc85c59d | [
"BSD-2-Clause"
] | null | null | null | juriscraper/opinions/united_states_backscrapers/federal_special/cit_2004.py | umeboshi2/juriscraper | 16abceb3747947593841b1c2708de84dcc85c59d | [
"BSD-2-Clause"
] | 1 | 2021-03-03T00:03:16.000Z | 2021-03-03T00:03:16.000Z | # Scraper for the United States Court of International Trade
# CourtID: cit
# Court Short Name: Ct. Int'l Trade
# Neutral Citation Format: Ct. Int'l Trade No. 12-1
from juriscraper.OpinionSite import OpinionSite
import re
import time
from datetime import date
from lxml import html
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
# This is a special backscraper to deal with problems on the 2004 page.
self.url = 'http://www.cit.uscourts.gov/SlipOpinions/SlipOps-2004.html'
self.court_id = self.__module__
def _get_download_urls(self):
return [t for t in self.html.xpath('//table[3]//tr[position() > 1]/td/font//a/@href')]
def _get_neutral_citations(self):
neutral_citations = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[1]/font//a'):
s = html.tostring(e, method='text', encoding='unicode').strip()
neutral_citations.append(s)
return neutral_citations
def _get_case_names(self):
case_names = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[2]/*'):
s = html.tostring (e, method='text', encoding='unicode').strip()
# We strip "erratum: mm/dd/yyyy" from the case names of errata docs.
if "erratum" in s:
case_names.append(s.strip()[:-18])
else:
case_names.append(s.strip())
return case_names
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
def _get_case_dates(self):
case_dates = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[3]/font'):
s = html.tostring (e, method='text', encoding='unicode').strip()
if s == '06/012004':
case_dates.append(date.fromtimestamp(time.mktime(time.strptime('06/01/2004'.strip(), '%m/%d/%Y'))))
else:
case_dates.append(date.fromtimestamp(time.mktime(time.strptime(s.strip(), '%m/%d/%Y'))))
return case_dates
# Because there can be multiple docket numbers we have to replace some newlines.
def _get_docket_numbers(self):
docket_numbers = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[4]/font'):
s = html.tostring (e, method='text', encoding='unicode').strip()
docket_numbers.append(s.replace('\r\n', ' &'))
return docket_numbers
| 41.830508 | 115 | 0.62034 |
# Neutral Citation Format: Ct. Int'l Trade No. 12-1
from juriscraper.OpinionSite import OpinionSite
import re
import time
from datetime import date
from lxml import html
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.url = 'http://www.cit.uscourts.gov/SlipOpinions/SlipOps-2004.html'
self.court_id = self.__module__
def _get_download_urls(self):
return [t for t in self.html.xpath('//table[3]//tr[position() > 1]/td/font//a/@href')]
def _get_neutral_citations(self):
neutral_citations = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[1]/font//a'):
s = html.tostring(e, method='text', encoding='unicode').strip()
neutral_citations.append(s)
return neutral_citations
def _get_case_names(self):
case_names = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[2]/*'):
s = html.tostring (e, method='text', encoding='unicode').strip()
if "erratum" in s:
case_names.append(s.strip()[:-18])
else:
case_names.append(s.strip())
return case_names
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
def _get_case_dates(self):
case_dates = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[3]/font'):
s = html.tostring (e, method='text', encoding='unicode').strip()
if s == '06/012004':
case_dates.append(date.fromtimestamp(time.mktime(time.strptime('06/01/2004'.strip(), '%m/%d/%Y'))))
else:
case_dates.append(date.fromtimestamp(time.mktime(time.strptime(s.strip(), '%m/%d/%Y'))))
return case_dates
def _get_docket_numbers(self):
docket_numbers = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[4]/font'):
s = html.tostring (e, method='text', encoding='unicode').strip()
docket_numbers.append(s.replace('\r\n', ' &'))
return docket_numbers
| true | true |
f7f4497f7f244d0627ff3814f0818c0ab826af39 | 425 | py | Python | services/authService/setup.py | anaquin135/modularCPQ | af8575a407813c6ef3e3c0ca3258266f0bc6a4e7 | [
"MIT"
] | null | null | null | services/authService/setup.py | anaquin135/modularCPQ | af8575a407813c6ef3e3c0ca3258266f0bc6a4e7 | [
"MIT"
] | null | null | null | services/authService/setup.py | anaquin135/modularCPQ | af8575a407813c6ef3e3c0ca3258266f0bc6a4e7 | [
"MIT"
] | null | null | null | from app import db, bcrypt
from app.models import USER
db.drop_all()
db.create_all()
sampleUser = USER()
sampleUser.firstName = "Bob"
sampleUser.lastName = "Dylan"
sampleUser.jobTitle = "CIO"
sampleUser.email = "admin@email.com"
sampleUser.password = bcrypt.generate_password_hash('wasspord')
sampleUser.isActive = True
sampleUser.accessLevel = 2
sampleUser.isPwdExp = False
db.session.add(sampleUser)
db.session.commit()
| 22.368421 | 63 | 0.781176 | from app import db, bcrypt
from app.models import USER
db.drop_all()
db.create_all()
sampleUser = USER()
sampleUser.firstName = "Bob"
sampleUser.lastName = "Dylan"
sampleUser.jobTitle = "CIO"
sampleUser.email = "admin@email.com"
sampleUser.password = bcrypt.generate_password_hash('wasspord')
sampleUser.isActive = True
sampleUser.accessLevel = 2
sampleUser.isPwdExp = False
db.session.add(sampleUser)
db.session.commit()
| true | true |
f7f44a1730d5677097e10a6715951c37e8e5460b | 4,305 | py | Python | cythonize/difference.py | RubenPants/RobotSimulator2D | 334d7b9cab0edb22d4670cfaf39fbed76c351758 | [
"MIT"
] | null | null | null | cythonize/difference.py | RubenPants/RobotSimulator2D | 334d7b9cab0edb22d4670cfaf39fbed76c351758 | [
"MIT"
] | null | null | null | cythonize/difference.py | RubenPants/RobotSimulator2D | 334d7b9cab0edb22d4670cfaf39fbed76c351758 | [
"MIT"
] | null | null | null | """
difference.py
Visualize the difference between the python and the cython file.
"""
import difflib
import os
# name, path to python file, path to cython file
files = [
('multi environment', 'environment/env_multi.py', 'environment/cy/env_multi_cy.pyx'),
('game', 'environment/entities/game.py', 'environment/entities/cy/game_cy.pyx'),
('robots', 'environment/entities/robots.py', 'environment/entities/cy/robots_cy.pyx'),
('sensors', 'environment/entities/sensors.py', 'environment/entities/cy/sensors_cy.pyx'),
('intersection', 'utils/intersection.py', 'utils/cy/intersection_cy.pyx'),
('line2d', 'utils/line2d.py', 'utils/cy/line2d_cy.pyx'),
('vec2d', 'utils/vec2d.py', 'utils/cy/vec2d_cy.pyx'),
('test drive', 'tests/drive_test.py', 'tests/cy/drive_test_cy.py'),
('test intersection', 'tests/intersection_test.py', 'tests/cy/intersection_test_cy.py'),
('test sensors', 'tests/sensors_test.py', 'tests/cy/sensors_test_cy.py'),
]
def match(python_file, cython_file):
"""
Match the cython-file to the (original) python file.
:param python_file: String representing the python-file
:param cython_file: String representing the cython-file
:return: Difference-lists
"""
# Get git-wise diff file
diff = difflib.unified_diff(python_file, cython_file, fromfile='py', tofile='cy', lineterm='')
lines = [l for l in diff][2:]
# Python-code (minus)
py = []
concat = False
for l in lines:
if not concat and l[0] in ['+', '-']:
py.append("")
concat = True
elif l[0] not in ['+', '-']:
concat = False
# Add if necessary
if l[0] == '-': py[-1] += f'\n{l[1:]}' if len(py[-1]) > 0 else f'{l[1:]}'
# Cython-code (plus)
cy = []
concat = False
for l in lines:
if not concat and l[0] in ['+', '-']:
cy.append("")
concat = True
elif l[0] not in ['+', '-']:
concat = False
# Add if necessary
if l[0] == '+': cy[-1] += f'\n{l[1:]}' if len(cy[-1]) > 0 else f'{l[1:]}'
# Both lists must be equally long
assert len(py) == len(cy)
# Remove empty segments
to_remove = []
for i_block in range(len(py)):
if (py[i_block].replace(" ", "") == "") and (cy[i_block].replace(" ", "") == ""): to_remove.append(i_block)
for rm in reversed(to_remove):
del py[rm]
del cy[rm]
return py, cy
def pretty_print(py, cy):
"""Pretty print the two lists."""
# Enroll the diff-blocks
py_unrolled = [line.split("\n") for line in py]
cy_unrolled = [line.split("\n") for line in cy]
# Define the maximum length of a single line for both the py and cy segments
max_py = max({len(line) for block in py_unrolled for line in block})
max_cy = max({len(line) for block in cy_unrolled for line in block})
# Enlarge the blocks such that they contain an equal amount of lines
for i_block in range(len(py_unrolled)):
while len(py_unrolled[i_block]) > len(cy_unrolled[i_block]):
cy_unrolled[i_block].append("")
while len(py_unrolled[i_block]) < len(cy_unrolled[i_block]):
py_unrolled[i_block].append("")
assert len(py_unrolled[i_block]) == len(cy_unrolled[i_block])
# Print out the differences
print(f"{'PYTHON':^{max_py}} | {'CYTHON':^{max_cy}}")
print("-" * (max_py + 3 + max_cy))
for i_block in range(len(py_unrolled)):
for i_line in range(len(py_unrolled[i_block])):
print(f"{py_unrolled[i_block][i_line]:{max_py}} | {cy_unrolled[i_block][i_line]:{max_cy}}")
print("-" * (max_py + 3 + max_cy))
if __name__ == '__main__':
os.chdir("..")
for name, f_py, f_cy in files:
print(f"\n\n\n==> ANALYZING: {name}\n")
# Load in the files as a list, split on the new-line symbol
with open(f_py, 'r') as f:
contents_py = f.read().split('\n')
with open(f_cy, 'r') as f:
contents_cy = f.read().split('\n')
# Match the two files with each other
diff_py, diff_cy = match(contents_py, contents_cy)
# Pretty print the difference of the two files
pretty_print(diff_py, diff_cy)
| 36.794872 | 115 | 0.595122 | import difflib
import os
files = [
('multi environment', 'environment/env_multi.py', 'environment/cy/env_multi_cy.pyx'),
('game', 'environment/entities/game.py', 'environment/entities/cy/game_cy.pyx'),
('robots', 'environment/entities/robots.py', 'environment/entities/cy/robots_cy.pyx'),
('sensors', 'environment/entities/sensors.py', 'environment/entities/cy/sensors_cy.pyx'),
('intersection', 'utils/intersection.py', 'utils/cy/intersection_cy.pyx'),
('line2d', 'utils/line2d.py', 'utils/cy/line2d_cy.pyx'),
('vec2d', 'utils/vec2d.py', 'utils/cy/vec2d_cy.pyx'),
('test drive', 'tests/drive_test.py', 'tests/cy/drive_test_cy.py'),
('test intersection', 'tests/intersection_test.py', 'tests/cy/intersection_test_cy.py'),
('test sensors', 'tests/sensors_test.py', 'tests/cy/sensors_test_cy.py'),
]
def match(python_file, cython_file):
diff = difflib.unified_diff(python_file, cython_file, fromfile='py', tofile='cy', lineterm='')
lines = [l for l in diff][2:]
py = []
concat = False
for l in lines:
if not concat and l[0] in ['+', '-']:
py.append("")
concat = True
elif l[0] not in ['+', '-']:
concat = False
if l[0] == '-': py[-1] += f'\n{l[1:]}' if len(py[-1]) > 0 else f'{l[1:]}'
cy = []
concat = False
for l in lines:
if not concat and l[0] in ['+', '-']:
cy.append("")
concat = True
elif l[0] not in ['+', '-']:
concat = False
if l[0] == '+': cy[-1] += f'\n{l[1:]}' if len(cy[-1]) > 0 else f'{l[1:]}'
assert len(py) == len(cy)
to_remove = []
for i_block in range(len(py)):
if (py[i_block].replace(" ", "") == "") and (cy[i_block].replace(" ", "") == ""): to_remove.append(i_block)
for rm in reversed(to_remove):
del py[rm]
del cy[rm]
return py, cy
def pretty_print(py, cy):
py_unrolled = [line.split("\n") for line in py]
cy_unrolled = [line.split("\n") for line in cy]
max_py = max({len(line) for block in py_unrolled for line in block})
max_cy = max({len(line) for block in cy_unrolled for line in block})
for i_block in range(len(py_unrolled)):
while len(py_unrolled[i_block]) > len(cy_unrolled[i_block]):
cy_unrolled[i_block].append("")
while len(py_unrolled[i_block]) < len(cy_unrolled[i_block]):
py_unrolled[i_block].append("")
assert len(py_unrolled[i_block]) == len(cy_unrolled[i_block])
print(f"{'PYTHON':^{max_py}} | {'CYTHON':^{max_cy}}")
print("-" * (max_py + 3 + max_cy))
for i_block in range(len(py_unrolled)):
for i_line in range(len(py_unrolled[i_block])):
print(f"{py_unrolled[i_block][i_line]:{max_py}} | {cy_unrolled[i_block][i_line]:{max_cy}}")
print("-" * (max_py + 3 + max_cy))
if __name__ == '__main__':
os.chdir("..")
for name, f_py, f_cy in files:
print(f"\n\n\n==> ANALYZING: {name}\n")
with open(f_py, 'r') as f:
contents_py = f.read().split('\n')
with open(f_cy, 'r') as f:
contents_cy = f.read().split('\n')
diff_py, diff_cy = match(contents_py, contents_cy)
pretty_print(diff_py, diff_cy)
| true | true |
f7f44b24cd750618b84b8aa36ad3b59bbd84b42d | 653 | py | Python | utils/shuffler.py | Luke-zhang-04/Sorting_Algorithms | dc89ce0f1651252b30509062fddff72d423689a3 | [
"Unlicense"
] | 1 | 2020-01-03T10:09:33.000Z | 2020-01-03T10:09:33.000Z | utils/shuffler.py | Luke-zhang-04/Sorting_Algorithms | dc89ce0f1651252b30509062fddff72d423689a3 | [
"Unlicense"
] | 1 | 2020-06-11T13:46:57.000Z | 2020-06-11T13:46:57.000Z | utils/shuffler.py | Luke-zhang-04/Sorting_Algorithms | dc89ce0f1651252b30509062fddff72d423689a3 | [
"Unlicense"
] | 2 | 2020-01-03T10:17:39.000Z | 2020-06-13T06:15:17.000Z | from random import shuffle
from typing import List, Union
def randomSequence(*args: Union[List[int], range]) -> List[int]:
"""Returns a shuffled array\n
args can be either stop, start stop, and start stop step, or a range object
"""
if isinstance(args[0], int):
if (len(args)) == 1:
array = [i for i in range(0, args[0])]
elif len(args) == 2:
array = [i for i in range(args[0], args[1])]
elif len(args) == 3:
array = [i for i in range(args[0], args[1], args[2])]
elif isinstance(args[0], range):
array = [i for i in args[0]]
shuffle(array)
return array
| 29.681818 | 79 | 0.57121 | from random import shuffle
from typing import List, Union
def randomSequence(*args: Union[List[int], range]) -> List[int]:
if isinstance(args[0], int):
if (len(args)) == 1:
array = [i for i in range(0, args[0])]
elif len(args) == 2:
array = [i for i in range(args[0], args[1])]
elif len(args) == 3:
array = [i for i in range(args[0], args[1], args[2])]
elif isinstance(args[0], range):
array = [i for i in args[0]]
shuffle(array)
return array
| true | true |
f7f44cf6afa95c6dd3cb5da02fde0db50f00aa7a | 974 | py | Python | backend/appengine/routes/andris/categoria.py | andris210296/andris-projeto | 41c0af031152d06ef0af4ac8031bbded86533528 | [
"MIT"
] | 3 | 2015-05-04T16:04:08.000Z | 2018-07-18T17:15:16.000Z | backend/appengine/routes/andris/categoria.py | andris210296/andris-projeto | 41c0af031152d06ef0af4ac8031bbded86533528 | [
"MIT"
] | null | null | null | backend/appengine/routes/andris/categoria.py | andris210296/andris-projeto | 41c0af031152d06ef0af4ac8031bbded86533528 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from config.template_middleware import TemplateResponse
from routes.andris.categoria_produto.categoria_produtoM import Produto,Categoria
@login_not_required
@no_csrf
def index(categoria):
categoria = Categoria.get_by_id(int(categoria))
categoria_query = Categoria.query_ordenada_por_nome()
categorias = categoria_query.fetch()
produto_query = Produto.query_por_categoria_ordenada_por_nome(categoria)
produtos = produto_query.fetch()
for cat in categorias:
cat.QtdProd = len(Produto.query_por_categoria_ordenada_por_nome(Categoria.get_by_id(int(cat.key.id()))).fetch())
contexto = {'categoria_lista':categorias,'produto_lista':produtos,'categoria_produto':categoria}
return TemplateResponse(contexto,template_path='/andris/categoria_produto.html')
| 32.466667 | 120 | 0.799795 |
from __future__ import absolute_import, unicode_literals
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from config.template_middleware import TemplateResponse
from routes.andris.categoria_produto.categoria_produtoM import Produto,Categoria
@login_not_required
@no_csrf
def index(categoria):
categoria = Categoria.get_by_id(int(categoria))
categoria_query = Categoria.query_ordenada_por_nome()
categorias = categoria_query.fetch()
produto_query = Produto.query_por_categoria_ordenada_por_nome(categoria)
produtos = produto_query.fetch()
for cat in categorias:
cat.QtdProd = len(Produto.query_por_categoria_ordenada_por_nome(Categoria.get_by_id(int(cat.key.id()))).fetch())
contexto = {'categoria_lista':categorias,'produto_lista':produtos,'categoria_produto':categoria}
return TemplateResponse(contexto,template_path='/andris/categoria_produto.html')
| true | true |
f7f44e070df47b8fe2d9439588e1657eee4c7139 | 708 | py | Python | t5-base/model.py | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon | 410c549488b0e2ceece067a9e1581e182a11e885 | [
"MIT"
] | 6 | 2020-08-26T13:00:11.000Z | 2021-12-28T18:58:43.000Z | t5-base/model.py | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon | 410c549488b0e2ceece067a9e1581e182a11e885 | [
"MIT"
] | null | null | null | t5-base/model.py | shanayghag/AV-Janatahack-Independence-Day-2020-ML-Hackathon | 410c549488b0e2ceece067a9e1581e182a11e885 | [
"MIT"
] | 1 | 2020-08-24T08:34:19.000Z | 2020-08-24T08:34:19.000Z | from torch import nn
from transformers import T5ForConditionalGeneration
from config import config
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.t5_model = T5ForConditionalGeneration.from_pretrained(config.MODEL_PATH)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
lm_labels=None
):
return self.t5_model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
lm_labels=lm_labels,
) | 26.222222 | 85 | 0.653955 | from torch import nn
from transformers import T5ForConditionalGeneration
from config import config
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.t5_model = T5ForConditionalGeneration.from_pretrained(config.MODEL_PATH)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
lm_labels=None
):
return self.t5_model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
lm_labels=lm_labels,
) | true | true |
f7f44ffe6e059afe2080c921ca43a2789718a4b3 | 21,632 | py | Python | utils/gen_doc.py | fossabot/onnx-mlir | ed1377c26b1be69b9b0ed6942025197491ca6c7e | [
"Apache-2.0"
] | null | null | null | utils/gen_doc.py | fossabot/onnx-mlir | ed1377c26b1be69b9b0ed6942025197491ca6c7e | [
"Apache-2.0"
] | null | null | null | utils/gen_doc.py | fossabot/onnx-mlir | ed1377c26b1be69b9b0ed6942025197491ca6c7e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
from io import StringIO
import io
import os
import sys
import datetime
import argparse
import numpy as np # type: ignore
from onnx import defs, FunctionProto, helper, OperatorStatus
from onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN
from onnx.backend.test.case import collect_snippets
from onnx.backend.sample.ops import collect_sample_implementations
from typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run-onnx-ops",
help="Output ONNXOps.td.inc content to stdout.",
action="store_true",
default=False)
parser.add_argument("--dry-run-op-build-table",
help="Output OpBuildTable.inc content to stdout.",
action="store_true",
default=False)
args = parser.parse_args()
# Manual specification of attribute defaults.
special_attr_defaults = dict([
# ("AveragePool.kernel_shape", ('ints', '{}')),
# ("MaxPool.kernel_shape", ('ints', '{}')),
# ("Cast.to", ('int', '0')),
# ("Concat.axis", ('int', '0')),
# ("Conv.group", ('int', '1')),
# ("Unsqueeze.axes", ('ints', '{}')),
# ("RNN.activation_alpha", ('floats', '{}')),
# ("RNN.activation_beta", ('floats', '{}')),
])
# Special operation importing handlers.
special_op_handler = dict([
("MaxPool", "ImportNodeMaxPool"),
("BatchNormalization", "ImportNodeBatchNormalization"),
("Pad", "ImportNodePad"),
("Reshape", "ImportNodeReshape"),
#("Transpose", "ImportNodeTranspose")
])
# Operations supporting shape inference.
OpsWithShapeInference = [
'Exp', 'Tanh', 'Sinh', 'Cosh', 'Sigmoid', 'Relu', 'Add', 'Mul', 'Div',
'Sub', 'And', 'Or', 'Xor', 'Sum', 'Max', 'Min', 'MatMul', 'Gemm',
'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal',
'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin',
'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze',
'Sign', 'Constant', 'AveragePool', 'Abs', 'Conv', 'Concat', 'Neg'
]
# Operations supporting canonicalization.
OpsWithCanonicalizer = ['Add', 'Identity', 'Gemm', 'Conv']
# Operations who have operands that, if produced by constant operations, should
# be promoted to become an attribute (via attribute promotion).
#
# For each operation, a key/value pair is used to specify how attribute promotion
# should proceed. The key is the operation's name and the value is a list of
# tuples, whose first item is the attribute/operand name, and the second item is
# the index at which such operand occurs in the list of the operation's inputs.
OpsWithPromotableConstOperands = {"Reshape": [("shape", 1)]}
# Add an Op in this list if the Op needs result type deduction which is required
# when writing declarative rewriting rules. Deduced type is always
# an UnrankedTensorType whose element type is the same as the first operand's
# element type.
#
# Currenlty, there are only two build methods generated:
# - one with operands and attributes having a separate parameter, and
# - one with operands and attributes having aggregated parameters.
custom_builder_ops_list = ['Abs', 'Mul', 'Exp', 'ReduceSum', 'ReduceSumSquare']
SNIPPETS = collect_snippets()
SAMPLE_IMPLEMENTATIONS = collect_sample_implementations()
ONNX_ML = not bool(os.getenv('ONNX_ML') == '0')
ONNX_ML = False
sys.stderr.write("ONNX_ML {}\n".format(ONNX_ML))
if ONNX_ML:
ext = '-ml.md'
else:
ext = '.md'
def should_render_domain(domain): # type: (Text) -> bool
if domain == ONNX_ML_DOMAIN and not ONNX_ML:
return False
elif ONNX_ML and domain != ONNX_ML_DOMAIN:
return False
return True
def display_attr_type(v): # type: (OpSchema.AttrType) -> Text
assert isinstance(v, OpSchema.AttrType)
s = Text(v)
s = s[s.rfind('.') + 1:].lower()
if s[-1] == 's':
s = 'list of ' + s
return s
def get_unique_output_name(schema, name):
for input in schema.inputs:
if input.name == name:
return 'out_' + name
return name
def onnx_attr_type_to_mlir_attr_type(t):
onnx_attr_type = Text(t)
onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()
if onnx_attr_type == 'int':
mlir_attr_type = 'I64Attr'
elif onnx_attr_type == 'float':
mlir_attr_type = 'F32Attr'
elif onnx_attr_type == 'ints':
mlir_attr_type = 'I64ArrayAttr'
elif onnx_attr_type == 'floats':
mlir_attr_type = 'F32ArrayAttr'
elif onnx_attr_type == "string":
mlir_attr_type = 'StrAttr'
elif onnx_attr_type == "strings":
mlir_attr_type = 'StrArrayAttr'
else:
mlir_attr_type = 'AnyAttr'
#TODO: tensor and sparse tensor
return mlir_attr_type
#TODO: any better way to do this.
def tblgen_attr_type_to_cpp_type(t):
if 'I64Attr' in t:
cpp_type = 'IntegerAttr'
elif 'F32Attr' in t:
cpp_type = 'FloatAttr'
elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:
cpp_type = 'ArrayAttr'
elif 'StrAttr' in t:
cpp_type = 'StringAttr'
elif 'strings' in t:
cpp_type = 'ArrayAttr'
else:
cpp_type = 'Attribute'
return cpp_type
def tblgen_operand_type_to_cpp_type(op_type):
if op_type.startswith('Variadic'):
mytype = 'ValueRange'
else:
mytype = 'Value'
return mytype
def np_type_to_tblgen_attr_type(tstr):
tfrom = np.array([
'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',
'float', 'double'
])
tto = np.array(
['I1', 'I8', 'I16', 'I32', 'I64', 'BF16', 'F16', 'F32', 'F64'])
index = -1
for i in range(len(tfrom)):
if tfrom[i] in tstr:
index = i
break
if index == -1:
print("error", tstr)
return ''
else:
return tto[i]
def get_allowed_elem_types(schema, input):
allowed_types_str = None
return allowed_types_str
# TODO: enable type constraints.
# if input.typeStr :
# tstr = input.typeStr
# else :
# return allwedTypeStr
# if schema.type_constraints:
# for type_constraint in schema.type_constraints:
# if type_constraint.type_param_str != tstr :
# continue
# allowedTypes = type_constraint.allowed_type_strs
# allowedTypeStr=''
# if (len(allowedTypes) > 0):
# t = convert_type(allowedTypes[0])
# if t == '' :
# return ''
# allowedTypeStr += t
# for allowedType in allowedTypes[1:]:
# t = convert_type(allowedType)
# if t == '' :
# return ''
# if not t in allowedTypeStr :
# allowedTypeStr += ', '+t
#
# return allowedTypeStr
#
# return allowedTypeStr
def inc_indent(indent=None):
return "" if indent is None else indent + ' ' * 2
def dec_indent(indent):
return indent[:-2]
def join_args(args):
return ", ".join(args)
def get_operands_or_results(schema, is_input):
value_list = schema.inputs if is_input else schema.outputs
if not value_list:
return OrderedDict()
def any_type_of(types):
assert isinstance(types, list)
if len(types) == 1:
return types[0]
else:
return "AnyTypeOf<[{}]>".format(", ".join(types))
name_to_types = OrderedDict()
for i, value in enumerate(value_list):
elem_types = get_allowed_elem_types(schema, value)
if elem_types is None:
types = ["AnyMemRef", "AnyTensor"]
else:
types = ["TensorOf<[{}]>", "MemRefOf<[{}]>"]
types = list(map(lambda x: x.format(elem_types), types))
# If operand is promotable to an attribute, then it must be
# nullable in case it migrates to be an attribute.
if schema.name in OpsWithPromotableConstOperands:
idxs = dict(OpsWithPromotableConstOperands[schema.name]).values()
if i in idxs:
types.append("NoneType")
if OpSchema.FormalParameterOption.Optional == value.option:
types.append("NoneType")
elif OpSchema.FormalParameterOption.Variadic == value.option:
if value.isHomogeneous:
types = ["Variadic<{}>".format(any_type_of(types))]
else:
#TODO handle(variadic, heterogeneous) "
sys.stderr.write("warning: (variadic, heterogeneous) for" + schema.name +
' ' + value.name + "\n")
# Since output name can coincide with that of an input, we explicitly
# append a suffix "_out" to such names for disambiguation.
if is_input:
value_name = value.name
else:
value_name = get_unique_output_name(schema, value.name)
name_to_types[value_name] = any_type_of(types)
return name_to_types
def get_attrs(schema):
def get_attr_type_optional(attr_type):
return 'OptionalAttr<{}>'.format(
onnx_attr_type_to_mlir_attr_type(attr_type))
def get_attr_type_with_default(attr_type, attr_default):
return 'DefaultValuedAttr<{}, "{}">'.format(
onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)
if not schema.attributes:
return OrderedDict()
name_to_type = OrderedDict()
for _, attr in sorted(schema.attributes.items()):
qualified_attr_name = "{}.{}".format(schema.name, attr.name)
if qualified_attr_name in special_attr_defaults:
name_to_type[attr.name] = get_attr_type_with_default(
*special_attr_defaults[qualified_attr_name])
# option holds either required or default value
elif attr.required:
name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(
attr.type)
elif attr.default_value.name:
def format_value(value): # type: (Any) -> Text
if isinstance(value, float):
formatted = str(np.round(value, 5))
# use default formatting, unless too long.
if (len(formatted) > 10):
formatted = str("({:e})".format(value))
return formatted
elif isinstance(
value,
(bytes, bytearray)) and sys.version_info[0] == 3:
return str(value.decode('utf-8'))
return str(value)
default_value = helper.get_attribute_value(attr.default_value)
if isinstance(default_value, list):
default_value = [format_value(val) for val in default_value]
default_value_str = '{}'.format(default_value)
default_value_str = default_value_str.replace('[', '{', 1)
default_value_str = default_value_str.replace(']', '}', 1)
if Text(attr.type) == "AttrType.STRINGS":
default_value_str = default_value_str.replace("'", '\\"')
else:
default_value_str = default_value_str.replace("'", '')
else:
default_value = format_value(default_value)
default_value_str = default_value
name_to_type[attr.name] = get_attr_type_with_default(
attr.type, default_value_str)
else:
name_to_type[attr.name] = get_attr_type_optional(attr.type)
return name_to_type
def get_promotable_const_operands_func(s, indent, const_operands_name_to_idx):
cpp_name_to_idx_literal = "{" + ", ".join([
"{{\"{}\", {}}}".format(*name_to_idx)
for name_to_idx in const_operands_name_to_idx
]) + "}"
s += indent + "let extraClassDeclaration = [{\n"
indent = inc_indent(indent)
s += indent + "std::map<std::string, size_t> promotableConstOperands() {\n"
indent = inc_indent(indent)
s += indent + "return {};\n".format(cpp_name_to_idx_literal)
indent = dec_indent(indent)
s += indent + "}\n"
indent = dec_indent(indent)
s += indent + "}];\n"
return s
def gen_op_def(schema):
indent = inc_indent()
s = 'def ONNX{0}Op:ONNX_Op<"{0}",\n'.format(schema.name)
# Generate decl for op traits.
traits = ["NoSideEffect"]
if schema.name in OpsWithShapeInference:
traits.append("DeclareOpInterfaceMethods<ShapeInferenceOpInterface>")
if schema.name in OpsWithPromotableConstOperands.keys():
traits.append("OpInterface<\"PromotableConstOperandsOpInterface\">")
s += inc_indent(indent) + '[{}]> {{\n'.format(join_args(traits))
# Generate decl for canonicalizer.
indent = inc_indent(indent)
if schema.name in OpsWithCanonicalizer:
s += indent + 'let hasCanonicalizer = 1;\n'
# Generate decl for summary.
s += indent + 'let summary = "ONNX {} operation";\n'.format(schema.name)
# Generate description.
s += indent + 'let description = [{\n'
if schema.doc:
lines = schema.doc.lstrip().splitlines()
for line in lines:
escaped_line = line.replace('"', '\\"')\
.replace('}]', '\\}\\]')
s += indent + '"{}"\n'.format(escaped_line)
s += indent + '}];\n'
# Generate ins (consisting of operands and attributes).
ins = get_operands_or_results(schema, is_input=True)
ins.update(get_attrs(schema))
ins_strs = ["{1}:${0}".format(*i) for i in ins.items()]
s += indent + 'let arguments = (ins {});\n'.format(
(',\n' + inc_indent(indent)).join(ins_strs))
# Generate outs (operation results).
outs = get_operands_or_results(schema, is_input=False)
outs_strs = ["{1}:${0}".format(*i) for i in outs.items()]
s += indent + 'let results = (outs {});\n'.format(
(',\n' + inc_indent(indent)).join(outs_strs))
# add custom builders
# use element type of the first operand to construct an UnrankedTensorType for the output.
if schema.name in custom_builder_ops_list:
if len(ins) == 0:
raise RuntimeWarning(
"warning: not generate custom build methods for " +
schema.name + " since it does not have operands.")
else:
s += indent + 'let builders = [\n'
# Custom builders with operands and attributes having a seperate parameter.
# E.g. OpBuilder<"Builder *builder, OperationState &state, Value X, Value, Y, Attribute A", [{}]>
indent = inc_indent(indent)
s += indent + 'OpBuilder<"Builder *builder, OperationState &state'
operands_dict = get_operands_or_results(schema, is_input=True)
for name, ty in operands_dict.items():
s += ', {} {}'.format(tblgen_operand_type_to_cpp_type(ty),
name)
for name, ty in get_attrs(schema).items():
s += ', {} {}'.format(tblgen_attr_type_to_cpp_type(ty), name)
s += '", [{\n'
indent = inc_indent(indent)
# Get output type from first operand's type.
first_operand_name = list(ins.items())[0][0]
s += indent + 'auto elementType = {}.getType().cast<TensorType>().getElementType();\n'.format(
first_operand_name)
s += indent + 'build(builder, state, UnrankedTensorType::get(elementType)'
for name, _ in ins.items():
s += ', ' + name
s += ');\n'
indent = dec_indent(indent)
s += indent + '}]>,\n'
# Custom builders with all operands and attributes having aggregate parameters.
# E.g. OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes", [{}]>'
s += indent + 'OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes", [{\n'
indent = inc_indent(indent)
s += indent + 'auto elementType = operands[0].getType().cast<TensorType>().getElementType();\n'
s += indent + 'std::vector<mlir::Type> outputTypes;\n'
s += indent + 'outputTypes.emplace_back(UnrankedTensorType::get(elementType));\n'
s += indent + 'build(builder, state, outputTypes, operands, attributes);\n'
indent = dec_indent(indent)
s += indent + '}]>'
s += '\n' + indent + '];\n'
if schema.name in OpsWithPromotableConstOperands:
s = get_promotable_const_operands_func(
s, indent, OpsWithPromotableConstOperands[schema.name])
s += '}\n\n'
return s
"""
special cases:
* Split: attr split default value: sizeof(output1) namely 1
* Conv: attr dilations default value is {num_dim of first input - 2, 1}
* Conv: attr kernel_shape type is ints
* Transpose: attr perm default value is {} empty int list
"""
def gen_op_importer(schema, file):
indent = inc_indent()
s = indent + 'if (opName == "' + schema.name + '")\n'
expected_num_operands = len(schema.inputs)
expected_num_results = len(schema.outputs)
for input in schema.inputs:
if OpSchema.FormalParameterOption.Variadic == input.option:
expected_num_operands = -1
for output in schema.outputs:
if OpSchema.FormalParameterOption.Variadic == output.option:
expected_num_results = -1
handler_func = special_op_handler.get(
schema.name, "buildOperation<mlir::ONNX{}Op>".format(schema.name))
# Special handlers currently require expected num operands/results to be specified.
# TODO: remove special handlers.
args = ["node"]
if expected_num_operands != -1 or expected_num_results != -1 or "buildOperation" not in handler_func:
args.append(
"/* expected_num_operands = */ {}".format(expected_num_operands))
args.append(
'/* expected_num_results = */ {}'.format(expected_num_results))
s += inc_indent(indent) + "return {}({});\n".format(
handler_func, ", ".join(args))
file.write(s)
def build_operator_schemas():
# domain -> support level -> name -> [schema]
index = defaultdict(lambda: defaultdict(lambda: defaultdict(
list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]
for schema in defs.get_all_schemas_with_history():
index[schema.domain][int(
schema.support_level)][schema.name].append(schema)
# Preprocess the Operator Schemas
# [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]
operator_schemas = list(
) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]
exsting_ops = set() # type: Set[Text]
for domain, _supportmap in sorted(index.items()):
if not should_render_domain(domain):
continue
processed_supportmap = list()
for _support, _namemap in sorted(_supportmap.items()):
processed_namemap = list()
for n, unsorted_versions in sorted(_namemap.items()):
versions = sorted(unsorted_versions,
key=lambda s: s.since_version)
schema = versions[-1]
if schema.name in exsting_ops:
continue
exsting_ops.add(schema.name)
processed_namemap.append((n, schema, versions))
processed_supportmap.append((_support, processed_namemap))
operator_schemas.append((domain, processed_supportmap))
return operator_schemas
def main(args): # type: (Type[Args]) -> None
curr_utc_time = datetime.datetime.now(
datetime.timezone.utc).strftime("%m/%d/%Y, %H:%M:%S")
autogen_warning = (
'//********************************************************\n'
'// Do not modify this file directly.\n'
'// This file is automatically generated via script.\n'
'// Details can be found in docs/readonnxdefs.md .\n'
'//********************************************************\n\n')
autogen_warning = autogen_warning.format(curr_utc_time)
op_def = args.op_def
op_def.write(autogen_warning)
op_importer = args.op_importer
op_importer.write(autogen_warning)
for domain, supportmap in build_operator_schemas():
for _, namemap in supportmap:
for op_type, schema, versions in namemap:
gen_op_importer(schema, op_importer)
r = gen_op_def(schema)
op_def.write(r)
if __name__ == '__main__':
curr_dir = os.path.dirname(os.path.realpath(__file__))
class Args(object):
if args.dry_run_onnx_ops:
op_def = StringIO()
else:
op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc')
op_def = io.open(op_def_file_path, 'w', newline='')
if args.dry_run_op_build_table:
op_importer = StringIO()
else:
op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc')
op_importer = io.open(op_importer_file_path, 'w', newline='')
main(Args)
if args.dry_run_onnx_ops:
sys.stdout.write(Args.op_def.getvalue())
if args.dry_run_op_build_table:
sys.stdout.write(Args.op_importer.getvalue())
| 37.62087 | 143 | 0.611964 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
from io import StringIO
import io
import os
import sys
import datetime
import argparse
import numpy as np
from onnx import defs, FunctionProto, helper, OperatorStatus
from onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN
from onnx.backend.test.case import collect_snippets
from onnx.backend.sample.ops import collect_sample_implementations
from typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run-onnx-ops",
help="Output ONNXOps.td.inc content to stdout.",
action="store_true",
default=False)
parser.add_argument("--dry-run-op-build-table",
help="Output OpBuildTable.inc content to stdout.",
action="store_true",
default=False)
args = parser.parse_args()
special_attr_defaults = dict([
])
special_op_handler = dict([
("MaxPool", "ImportNodeMaxPool"),
("BatchNormalization", "ImportNodeBatchNormalization"),
("Pad", "ImportNodePad"),
("Reshape", "ImportNodeReshape"),
])
OpsWithShapeInference = [
'Exp', 'Tanh', 'Sinh', 'Cosh', 'Sigmoid', 'Relu', 'Add', 'Mul', 'Div',
'Sub', 'And', 'Or', 'Xor', 'Sum', 'Max', 'Min', 'MatMul', 'Gemm',
'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal',
'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin',
'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze',
'Sign', 'Constant', 'AveragePool', 'Abs', 'Conv', 'Concat', 'Neg'
]
OpsWithCanonicalizer = ['Add', 'Identity', 'Gemm', 'Conv']
# tuples, whose first item is the attribute/operand name, and the second item is
# the index at which such operand occurs in the list of the operation's inputs.
OpsWithPromotableConstOperands = {"Reshape": [("shape", 1)]}
# element type.
#
# Currenlty, there are only two build methods generated:
# - one with operands and attributes having a separate parameter, and
# - one with operands and attributes having aggregated parameters.
custom_builder_ops_list = ['Abs', 'Mul', 'Exp', 'ReduceSum', 'ReduceSumSquare']
SNIPPETS = collect_snippets()
SAMPLE_IMPLEMENTATIONS = collect_sample_implementations()
ONNX_ML = not bool(os.getenv('ONNX_ML') == '0')
ONNX_ML = False
sys.stderr.write("ONNX_ML {}\n".format(ONNX_ML))
if ONNX_ML:
ext = '-ml.md'
else:
ext = '.md'
def should_render_domain(domain): # type: (Text) -> bool
if domain == ONNX_ML_DOMAIN and not ONNX_ML:
return False
elif ONNX_ML and domain != ONNX_ML_DOMAIN:
return False
return True
def display_attr_type(v): # type: (OpSchema.AttrType) -> Text
assert isinstance(v, OpSchema.AttrType)
s = Text(v)
s = s[s.rfind('.') + 1:].lower()
if s[-1] == 's':
s = 'list of ' + s
return s
def get_unique_output_name(schema, name):
for input in schema.inputs:
if input.name == name:
return 'out_' + name
return name
def onnx_attr_type_to_mlir_attr_type(t):
onnx_attr_type = Text(t)
onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()
if onnx_attr_type == 'int':
mlir_attr_type = 'I64Attr'
elif onnx_attr_type == 'float':
mlir_attr_type = 'F32Attr'
elif onnx_attr_type == 'ints':
mlir_attr_type = 'I64ArrayAttr'
elif onnx_attr_type == 'floats':
mlir_attr_type = 'F32ArrayAttr'
elif onnx_attr_type == "string":
mlir_attr_type = 'StrAttr'
elif onnx_attr_type == "strings":
mlir_attr_type = 'StrArrayAttr'
else:
mlir_attr_type = 'AnyAttr'
#TODO: tensor and sparse tensor
return mlir_attr_type
#TODO: any better way to do this.
def tblgen_attr_type_to_cpp_type(t):
if 'I64Attr' in t:
cpp_type = 'IntegerAttr'
elif 'F32Attr' in t:
cpp_type = 'FloatAttr'
elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:
cpp_type = 'ArrayAttr'
elif 'StrAttr' in t:
cpp_type = 'StringAttr'
elif 'strings' in t:
cpp_type = 'ArrayAttr'
else:
cpp_type = 'Attribute'
return cpp_type
def tblgen_operand_type_to_cpp_type(op_type):
if op_type.startswith('Variadic'):
mytype = 'ValueRange'
else:
mytype = 'Value'
return mytype
def np_type_to_tblgen_attr_type(tstr):
tfrom = np.array([
'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',
'float', 'double'
])
tto = np.array(
['I1', 'I8', 'I16', 'I32', 'I64', 'BF16', 'F16', 'F32', 'F64'])
index = -1
for i in range(len(tfrom)):
if tfrom[i] in tstr:
index = i
break
if index == -1:
print("error", tstr)
return ''
else:
return tto[i]
def get_allowed_elem_types(schema, input):
allowed_types_str = None
return allowed_types_str
# TODO: enable type constraints.
# if input.typeStr :
# tstr = input.typeStr
# else :
# return allwedTypeStr
# if schema.type_constraints:
# for type_constraint in schema.type_constraints:
# if type_constraint.type_param_str != tstr :
# continue
# allowedTypes = type_constraint.allowed_type_strs
# allowedTypeStr=''
# if (len(allowedTypes) > 0):
# t = convert_type(allowedTypes[0])
# if t == '' :
# return ''
# allowedTypeStr += t
# for allowedType in allowedTypes[1:]:
# t = convert_type(allowedType)
# if t == '' :
# return ''
# if not t in allowedTypeStr :
# allowedTypeStr += ', '+t
#
# return allowedTypeStr
#
# return allowedTypeStr
def inc_indent(indent=None):
return "" if indent is None else indent + ' ' * 2
def dec_indent(indent):
return indent[:-2]
def join_args(args):
return ", ".join(args)
def get_operands_or_results(schema, is_input):
value_list = schema.inputs if is_input else schema.outputs
if not value_list:
return OrderedDict()
def any_type_of(types):
assert isinstance(types, list)
if len(types) == 1:
return types[0]
else:
return "AnyTypeOf<[{}]>".format(", ".join(types))
name_to_types = OrderedDict()
for i, value in enumerate(value_list):
elem_types = get_allowed_elem_types(schema, value)
if elem_types is None:
types = ["AnyMemRef", "AnyTensor"]
else:
types = ["TensorOf<[{}]>", "MemRefOf<[{}]>"]
types = list(map(lambda x: x.format(elem_types), types))
# If operand is promotable to an attribute, then it must be
# nullable in case it migrates to be an attribute.
if schema.name in OpsWithPromotableConstOperands:
idxs = dict(OpsWithPromotableConstOperands[schema.name]).values()
if i in idxs:
types.append("NoneType")
if OpSchema.FormalParameterOption.Optional == value.option:
types.append("NoneType")
elif OpSchema.FormalParameterOption.Variadic == value.option:
if value.isHomogeneous:
types = ["Variadic<{}>".format(any_type_of(types))]
else:
#TODO handle(variadic, heterogeneous) "
sys.stderr.write("warning: (variadic, heterogeneous) for" + schema.name +
' ' + value.name + "\n")
# Since output name can coincide with that of an input, we explicitly
# append a suffix "_out" to such names for disambiguation.
if is_input:
value_name = value.name
else:
value_name = get_unique_output_name(schema, value.name)
name_to_types[value_name] = any_type_of(types)
return name_to_types
def get_attrs(schema):
def get_attr_type_optional(attr_type):
return 'OptionalAttr<{}>'.format(
onnx_attr_type_to_mlir_attr_type(attr_type))
def get_attr_type_with_default(attr_type, attr_default):
return 'DefaultValuedAttr<{}, "{}">'.format(
onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)
if not schema.attributes:
return OrderedDict()
name_to_type = OrderedDict()
for _, attr in sorted(schema.attributes.items()):
qualified_attr_name = "{}.{}".format(schema.name, attr.name)
if qualified_attr_name in special_attr_defaults:
name_to_type[attr.name] = get_attr_type_with_default(
*special_attr_defaults[qualified_attr_name])
# option holds either required or default value
elif attr.required:
name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(
attr.type)
elif attr.default_value.name:
def format_value(value): # type: (Any) -> Text
if isinstance(value, float):
formatted = str(np.round(value, 5))
# use default formatting, unless too long.
if (len(formatted) > 10):
formatted = str("({:e})".format(value))
return formatted
elif isinstance(
value,
(bytes, bytearray)) and sys.version_info[0] == 3:
return str(value.decode('utf-8'))
return str(value)
default_value = helper.get_attribute_value(attr.default_value)
if isinstance(default_value, list):
default_value = [format_value(val) for val in default_value]
default_value_str = '{}'.format(default_value)
default_value_str = default_value_str.replace('[', '{', 1)
default_value_str = default_value_str.replace(']', '}', 1)
if Text(attr.type) == "AttrType.STRINGS":
default_value_str = default_value_str.replace("'", '\\"')
else:
default_value_str = default_value_str.replace("'", '')
else:
default_value = format_value(default_value)
default_value_str = default_value
name_to_type[attr.name] = get_attr_type_with_default(
attr.type, default_value_str)
else:
name_to_type[attr.name] = get_attr_type_optional(attr.type)
return name_to_type
def get_promotable_const_operands_func(s, indent, const_operands_name_to_idx):
cpp_name_to_idx_literal = "{" + ", ".join([
"{{\"{}\", {}}}".format(*name_to_idx)
for name_to_idx in const_operands_name_to_idx
]) + "}"
s += indent + "let extraClassDeclaration = [{\n"
indent = inc_indent(indent)
s += indent + "std::map<std::string, size_t> promotableConstOperands() {\n"
indent = inc_indent(indent)
s += indent + "return {};\n".format(cpp_name_to_idx_literal)
indent = dec_indent(indent)
s += indent + "}\n"
indent = dec_indent(indent)
s += indent + "}];\n"
return s
def gen_op_def(schema):
indent = inc_indent()
s = 'def ONNX{0}Op:ONNX_Op<"{0}",\n'.format(schema.name)
# Generate decl for op traits.
traits = ["NoSideEffect"]
if schema.name in OpsWithShapeInference:
traits.append("DeclareOpInterfaceMethods<ShapeInferenceOpInterface>")
if schema.name in OpsWithPromotableConstOperands.keys():
traits.append("OpInterface<\"PromotableConstOperandsOpInterface\">")
s += inc_indent(indent) + '[{}]> {{\n'.format(join_args(traits))
# Generate decl for canonicalizer.
indent = inc_indent(indent)
if schema.name in OpsWithCanonicalizer:
s += indent + 'let hasCanonicalizer = 1;\n'
# Generate decl for summary.
s += indent + 'let summary = "ONNX {} operation";\n'.format(schema.name)
# Generate description.
s += indent + 'let description = [{\n'
if schema.doc:
lines = schema.doc.lstrip().splitlines()
for line in lines:
escaped_line = line.replace('"', '\\"')\
.replace('}]', '\\}\\]')
s += indent + '"{}"\n'.format(escaped_line)
s += indent + '}];\n'
# Generate ins (consisting of operands and attributes).
ins = get_operands_or_results(schema, is_input=True)
ins.update(get_attrs(schema))
ins_strs = ["{1}:${0}".format(*i) for i in ins.items()]
s += indent + 'let arguments = (ins {});\n'.format(
(',\n' + inc_indent(indent)).join(ins_strs))
# Generate outs (operation results).
outs = get_operands_or_results(schema, is_input=False)
outs_strs = ["{1}:${0}".format(*i) for i in outs.items()]
s += indent + 'let results = (outs {});\n'.format(
(',\n' + inc_indent(indent)).join(outs_strs))
# add custom builders
# use element type of the first operand to construct an UnrankedTensorType for the output.
if schema.name in custom_builder_ops_list:
if len(ins) == 0:
raise RuntimeWarning(
"warning: not generate custom build methods for " +
schema.name + " since it does not have operands.")
else:
s += indent + 'let builders = [\n'
# Custom builders with operands and attributes having a seperate parameter.
# E.g. OpBuilder<"Builder *builder, OperationState &state, Value X, Value, Y, Attribute A", [{}]>
indent = inc_indent(indent)
s += indent + 'OpBuilder<"Builder *builder, OperationState &state'
operands_dict = get_operands_or_results(schema, is_input=True)
for name, ty in operands_dict.items():
s += ', {} {}'.format(tblgen_operand_type_to_cpp_type(ty),
name)
for name, ty in get_attrs(schema).items():
s += ', {} {}'.format(tblgen_attr_type_to_cpp_type(ty), name)
s += '", [{\n'
indent = inc_indent(indent)
# Get output type from first operand's type.
first_operand_name = list(ins.items())[0][0]
s += indent + 'auto elementType = {}.getType().cast<TensorType>().getElementType();\n'.format(
first_operand_name)
s += indent + 'build(builder, state, UnrankedTensorType::get(elementType)'
for name, _ in ins.items():
s += ', ' + name
s += ');\n'
indent = dec_indent(indent)
s += indent + '}]>,\n'
s += indent + 'OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes", [{\n'
indent = inc_indent(indent)
s += indent + 'auto elementType = operands[0].getType().cast<TensorType>().getElementType();\n'
s += indent + 'std::vector<mlir::Type> outputTypes;\n'
s += indent + 'outputTypes.emplace_back(UnrankedTensorType::get(elementType));\n'
s += indent + 'build(builder, state, outputTypes, operands, attributes);\n'
indent = dec_indent(indent)
s += indent + '}]>'
s += '\n' + indent + '];\n'
if schema.name in OpsWithPromotableConstOperands:
s = get_promotable_const_operands_func(
s, indent, OpsWithPromotableConstOperands[schema.name])
s += '}\n\n'
return s
def gen_op_importer(schema, file):
indent = inc_indent()
s = indent + 'if (opName == "' + schema.name + '")\n'
expected_num_operands = len(schema.inputs)
expected_num_results = len(schema.outputs)
for input in schema.inputs:
if OpSchema.FormalParameterOption.Variadic == input.option:
expected_num_operands = -1
for output in schema.outputs:
if OpSchema.FormalParameterOption.Variadic == output.option:
expected_num_results = -1
handler_func = special_op_handler.get(
schema.name, "buildOperation<mlir::ONNX{}Op>".format(schema.name))
# Special handlers currently require expected num operands/results to be specified.
# TODO: remove special handlers.
args = ["node"]
if expected_num_operands != -1 or expected_num_results != -1 or "buildOperation" not in handler_func:
args.append(
"/* expected_num_operands = */ {}".format(expected_num_operands))
args.append(
'/* expected_num_results = */ {}'.format(expected_num_results))
s += inc_indent(indent) + "return {}({});\n".format(
handler_func, ", ".join(args))
file.write(s)
def build_operator_schemas():
# domain -> support level -> name -> [schema]
index = defaultdict(lambda: defaultdict(lambda: defaultdict(
list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]
for schema in defs.get_all_schemas_with_history():
index[schema.domain][int(
schema.support_level)][schema.name].append(schema)
# Preprocess the Operator Schemas
# [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]
operator_schemas = list(
) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]
exsting_ops = set() # type: Set[Text]
for domain, _supportmap in sorted(index.items()):
if not should_render_domain(domain):
continue
processed_supportmap = list()
for _support, _namemap in sorted(_supportmap.items()):
processed_namemap = list()
for n, unsorted_versions in sorted(_namemap.items()):
versions = sorted(unsorted_versions,
key=lambda s: s.since_version)
schema = versions[-1]
if schema.name in exsting_ops:
continue
exsting_ops.add(schema.name)
processed_namemap.append((n, schema, versions))
processed_supportmap.append((_support, processed_namemap))
operator_schemas.append((domain, processed_supportmap))
return operator_schemas
def main(args): # type: (Type[Args]) -> None
curr_utc_time = datetime.datetime.now(
datetime.timezone.utc).strftime("%m/%d/%Y, %H:%M:%S")
autogen_warning = (
'//********************************************************\n'
'// Do not modify this file directly.\n'
'// This file is automatically generated via script.\n'
'// Details can be found in docs/readonnxdefs.md .\n'
'//********************************************************\n\n')
autogen_warning = autogen_warning.format(curr_utc_time)
op_def = args.op_def
op_def.write(autogen_warning)
op_importer = args.op_importer
op_importer.write(autogen_warning)
for domain, supportmap in build_operator_schemas():
for _, namemap in supportmap:
for op_type, schema, versions in namemap:
gen_op_importer(schema, op_importer)
r = gen_op_def(schema)
op_def.write(r)
if __name__ == '__main__':
curr_dir = os.path.dirname(os.path.realpath(__file__))
class Args(object):
if args.dry_run_onnx_ops:
op_def = StringIO()
else:
op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc')
op_def = io.open(op_def_file_path, 'w', newline='')
if args.dry_run_op_build_table:
op_importer = StringIO()
else:
op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc')
op_importer = io.open(op_importer_file_path, 'w', newline='')
main(Args)
if args.dry_run_onnx_ops:
sys.stdout.write(Args.op_def.getvalue())
if args.dry_run_op_build_table:
sys.stdout.write(Args.op_importer.getvalue())
| true | true |
f7f450171b08146bcb183cf274cedacce9fdca6b | 4,907 | py | Python | crypten/mpc/primitives/beaver.py | gmuraru/CrypTen | e39a7aaf65436706321fe4e3fc055308c78b6b92 | [
"MIT"
] | null | null | null | crypten/mpc/primitives/beaver.py | gmuraru/CrypTen | e39a7aaf65436706321fe4e3fc055308c78b6b92 | [
"MIT"
] | null | null | null | crypten/mpc/primitives/beaver.py | gmuraru/CrypTen | e39a7aaf65436706321fe4e3fc055308c78b6b92 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import crypten.communicator as comm
import torch
from crypten.common.util import count_wraps
def __beaver_protocol(op, x, y, *args, **kwargs):
"""Performs Beaver protocol for additively secret-shared tensors x and y
1. Obtain uniformly random sharings [a],[b] and [c] = [a * b]
2. Additively hide [x] and [y] with appropriately sized [a] and [b]
3. Open ([epsilon] = [x] - [a]) and ([delta] = [y] - [b])
4. Return [z] = [c] + (epsilon * [b]) + ([a] * delta) + (epsilon * delta)
"""
assert op in {
"mul",
"matmul",
"conv1d",
"conv2d",
"conv_transpose1d",
"conv_transpose2d",
}
provider = crypten.mpc.get_default_provider()
a, b, c = provider.generate_additive_triple(x.size(), y.size(), op, *args, **kwargs)
# Vectorized reveal to reduce rounds of communication
from .arithmetic import ArithmeticSharedTensor
eps_del = ArithmeticSharedTensor.reveal_batch([x - a, y - b])
epsilon = eps_del[0]
delta = eps_del[1]
# z = c + (a * delta) + (epsilon * b) + epsilon * delta
c._tensor += getattr(torch, op)(epsilon, b._tensor, *args, **kwargs)
c += getattr(a, op)(delta, *args, **kwargs)
c += getattr(torch, op)(epsilon, delta, *args, **kwargs)
return c
def mul(x, y):
return __beaver_protocol("mul", x, y)
def matmul(x, y):
return __beaver_protocol("matmul", x, y)
def conv1d(x, y, **kwargs):
return __beaver_protocol("conv1d", x, y, **kwargs)
def conv2d(x, y, **kwargs):
return __beaver_protocol("conv2d", x, y, **kwargs)
def conv_transpose1d(x, y, **kwargs):
return __beaver_protocol("conv_transpose1d", x, y, **kwargs)
def conv_transpose2d(x, y, **kwargs):
return __beaver_protocol("conv_transpose2d", x, y, **kwargs)
def square(x):
"""Computes the square of `x` for additively secret-shared tensor `x`
1. Obtain uniformly random sharings [r] and [r2] = [r * r]
2. Additively hide [x] with appropriately sized [r]
3. Open ([epsilon] = [x] - [r])
4. Return z = [r2] + 2 * epsilon * [r] + epsilon ** 2
"""
provider = crypten.mpc.get_default_provider()
r, r2 = provider.square(x.size())
epsilon = (x - r).reveal()
return r2 + 2 * r * epsilon + epsilon * epsilon
def wraps(x):
"""Privately computes the number of wraparounds for a set a shares
To do so, we note that:
[theta_x] = theta_z + [beta_xr] - [theta_r] - [eta_xr]
Where [theta_i] is the wraps for a variable i
[beta_ij] is the differential wraps for variables i and j
[eta_ij] is the plaintext wraps for variables i and j
Note: Since [eta_xr] = 0 with probability 1 - |x| / Q for modulus Q, we
can make the assumption that [eta_xr] = 0 with high probability.
"""
provider = crypten.mpc.get_default_provider()
r, theta_r = provider.wrap_rng(x.size())
beta_xr = theta_r.clone()
beta_xr._tensor = count_wraps([x._tensor, r._tensor])
z = x + r
theta_z = comm.get().gather(z._tensor, 0)
theta_x = beta_xr - theta_r
# TODO: Incorporate eta_xr
if x.rank == 0:
theta_z = count_wraps(theta_z)
theta_x._tensor += theta_z
return theta_x
def AND(x, y):
"""
Performs Beaver protocol for binary secret-shared tensors x and y
1. Obtain uniformly random sharings [a],[b] and [c] = [a & b]
2. XOR hide [x] and [y] with appropriately sized [a] and [b]
3. Open ([epsilon] = [x] ^ [a]) and ([delta] = [y] ^ [b])
4. Return [c] ^ (epsilon & [b]) ^ ([a] & delta) ^ (epsilon & delta)
"""
from .binary import BinarySharedTensor
provider = crypten.mpc.get_default_provider()
a, b, c = provider.generate_binary_triple(x.size(), y.size())
# Stack to vectorize reveal
eps_del = BinarySharedTensor.reveal_batch([x ^ a, y ^ b])
epsilon = eps_del[0]
delta = eps_del[1]
return (b & epsilon) ^ (a & delta) ^ (epsilon & delta) ^ c
def B2A_single_bit(xB):
"""Converts a single-bit BinarySharedTensor xB into an
ArithmeticSharedTensor. This is done by:
1. Generate ArithmeticSharedTensor [rA] and BinarySharedTensor =rB= with
a common 1-bit value r.
2. Hide xB with rB and open xB ^ rB
3. If xB ^ rB = 0, then return [rA], otherwise return 1 - [rA]
Note: This is an arithmetic xor of a single bit.
"""
if comm.get().get_world_size() < 2:
from .arithmetic import ArithmeticSharedTensor
return ArithmeticSharedTensor(xB._tensor, precision=0, src=0)
provider = crypten.mpc.get_default_provider()
rA, rB = provider.B2A_rng(xB.size())
z = (xB ^ rB).reveal()
rA = rA * (1 - 2 * z) + z
return rA
| 30.66875 | 88 | 0.627879 |
import crypten
import crypten.communicator as comm
import torch
from crypten.common.util import count_wraps
def __beaver_protocol(op, x, y, *args, **kwargs):
assert op in {
"mul",
"matmul",
"conv1d",
"conv2d",
"conv_transpose1d",
"conv_transpose2d",
}
provider = crypten.mpc.get_default_provider()
a, b, c = provider.generate_additive_triple(x.size(), y.size(), op, *args, **kwargs)
from .arithmetic import ArithmeticSharedTensor
eps_del = ArithmeticSharedTensor.reveal_batch([x - a, y - b])
epsilon = eps_del[0]
delta = eps_del[1]
c._tensor += getattr(torch, op)(epsilon, b._tensor, *args, **kwargs)
c += getattr(a, op)(delta, *args, **kwargs)
c += getattr(torch, op)(epsilon, delta, *args, **kwargs)
return c
def mul(x, y):
return __beaver_protocol("mul", x, y)
def matmul(x, y):
return __beaver_protocol("matmul", x, y)
def conv1d(x, y, **kwargs):
return __beaver_protocol("conv1d", x, y, **kwargs)
def conv2d(x, y, **kwargs):
return __beaver_protocol("conv2d", x, y, **kwargs)
def conv_transpose1d(x, y, **kwargs):
return __beaver_protocol("conv_transpose1d", x, y, **kwargs)
def conv_transpose2d(x, y, **kwargs):
return __beaver_protocol("conv_transpose2d", x, y, **kwargs)
def square(x):
provider = crypten.mpc.get_default_provider()
r, r2 = provider.square(x.size())
epsilon = (x - r).reveal()
return r2 + 2 * r * epsilon + epsilon * epsilon
def wraps(x):
provider = crypten.mpc.get_default_provider()
r, theta_r = provider.wrap_rng(x.size())
beta_xr = theta_r.clone()
beta_xr._tensor = count_wraps([x._tensor, r._tensor])
z = x + r
theta_z = comm.get().gather(z._tensor, 0)
theta_x = beta_xr - theta_r
if x.rank == 0:
theta_z = count_wraps(theta_z)
theta_x._tensor += theta_z
return theta_x
def AND(x, y):
from .binary import BinarySharedTensor
provider = crypten.mpc.get_default_provider()
a, b, c = provider.generate_binary_triple(x.size(), y.size())
eps_del = BinarySharedTensor.reveal_batch([x ^ a, y ^ b])
epsilon = eps_del[0]
delta = eps_del[1]
return (b & epsilon) ^ (a & delta) ^ (epsilon & delta) ^ c
def B2A_single_bit(xB):
if comm.get().get_world_size() < 2:
from .arithmetic import ArithmeticSharedTensor
return ArithmeticSharedTensor(xB._tensor, precision=0, src=0)
provider = crypten.mpc.get_default_provider()
rA, rB = provider.B2A_rng(xB.size())
z = (xB ^ rB).reveal()
rA = rA * (1 - 2 * z) + z
return rA
| true | true |
f7f450fac5b414ab47b05e898dd715a1a2e6704d | 8,708 | py | Python | sdk/python/pulumi_azure_native/batch/latest/application.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/batch/latest/application.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/batch/latest/application.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['Application']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:batch:Application'.""", DeprecationWarning)
class Application(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:batch:Application'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
allow_updates: Optional[pulumi.Input[bool]] = None,
application_name: Optional[pulumi.Input[str]] = None,
default_version: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Contains information about an application in a Batch account.
Latest API Version: 2021-01-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the Batch account.
:param pulumi.Input[bool] allow_updates: A value indicating whether packages within the application may be overwritten using the same version string.
:param pulumi.Input[str] application_name: The name of the application. This must be unique within the account.
:param pulumi.Input[str] default_version: The package to use if a client requests the application but does not specify a version. This property can only be set to the name of an existing package.
:param pulumi.Input[str] display_name: The display name for the application.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the Batch account.
"""
pulumi.log.warn("""Application is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:batch:Application'.""")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['allow_updates'] = allow_updates
__props__['application_name'] = application_name
__props__['default_version'] = default_version
__props__['display_name'] = display_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['etag'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:batch/latest:Application"), pulumi.Alias(type_="azure-native:batch:Application"), pulumi.Alias(type_="azure-nextgen:batch:Application"), pulumi.Alias(type_="azure-native:batch/v20151201:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20151201:Application"), pulumi.Alias(type_="azure-native:batch/v20170101:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20170101:Application"), pulumi.Alias(type_="azure-native:batch/v20170501:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20170501:Application"), pulumi.Alias(type_="azure-native:batch/v20170901:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20170901:Application"), pulumi.Alias(type_="azure-native:batch/v20181201:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20181201:Application"), pulumi.Alias(type_="azure-native:batch/v20190401:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20190401:Application"), pulumi.Alias(type_="azure-native:batch/v20190801:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20190801:Application"), pulumi.Alias(type_="azure-native:batch/v20200301:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20200301:Application"), pulumi.Alias(type_="azure-native:batch/v20200501:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20200501:Application"), pulumi.Alias(type_="azure-native:batch/v20200901:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20200901:Application"), pulumi.Alias(type_="azure-native:batch/v20210101:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20210101:Application")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Application, __self__).__init__(
'azure-native:batch/latest:Application',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Application':
"""
Get an existing Application resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["allow_updates"] = None
__props__["default_version"] = None
__props__["display_name"] = None
__props__["etag"] = None
__props__["name"] = None
__props__["type"] = None
return Application(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowUpdates")
def allow_updates(self) -> pulumi.Output[Optional[bool]]:
"""
A value indicating whether packages within the application may be overwritten using the same version string.
"""
return pulumi.get(self, "allow_updates")
@property
@pulumi.getter(name="defaultVersion")
def default_version(self) -> pulumi.Output[Optional[str]]:
"""
The package to use if a client requests the application but does not specify a version. This property can only be set to the name of an existing package.
"""
return pulumi.get(self, "default_version")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
The display name for the application.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
The ETag of the resource, used for concurrency statements.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 54.086957 | 1,643 | 0.681213 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['Application']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:batch:Application'.""", DeprecationWarning)
class Application(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:batch:Application'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
allow_updates: Optional[pulumi.Input[bool]] = None,
application_name: Optional[pulumi.Input[str]] = None,
default_version: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
pulumi.log.warn("""Application is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:batch:Application'.""")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['allow_updates'] = allow_updates
__props__['application_name'] = application_name
__props__['default_version'] = default_version
__props__['display_name'] = display_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['etag'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:batch/latest:Application"), pulumi.Alias(type_="azure-native:batch:Application"), pulumi.Alias(type_="azure-nextgen:batch:Application"), pulumi.Alias(type_="azure-native:batch/v20151201:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20151201:Application"), pulumi.Alias(type_="azure-native:batch/v20170101:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20170101:Application"), pulumi.Alias(type_="azure-native:batch/v20170501:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20170501:Application"), pulumi.Alias(type_="azure-native:batch/v20170901:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20170901:Application"), pulumi.Alias(type_="azure-native:batch/v20181201:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20181201:Application"), pulumi.Alias(type_="azure-native:batch/v20190401:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20190401:Application"), pulumi.Alias(type_="azure-native:batch/v20190801:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20190801:Application"), pulumi.Alias(type_="azure-native:batch/v20200301:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20200301:Application"), pulumi.Alias(type_="azure-native:batch/v20200501:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20200501:Application"), pulumi.Alias(type_="azure-native:batch/v20200901:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20200901:Application"), pulumi.Alias(type_="azure-native:batch/v20210101:Application"), pulumi.Alias(type_="azure-nextgen:batch/v20210101:Application")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Application, __self__).__init__(
'azure-native:batch/latest:Application',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Application':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["allow_updates"] = None
__props__["default_version"] = None
__props__["display_name"] = None
__props__["etag"] = None
__props__["name"] = None
__props__["type"] = None
return Application(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowUpdates")
def allow_updates(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "allow_updates")
@property
@pulumi.getter(name="defaultVersion")
def default_version(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "default_version")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f7f452b8b0bc1e604c5b8c914d90d7fc7184a803 | 231 | py | Python | apps/empresas/urls.py | andreFSilva/gestao_rh | 9e0bfce3c594186ed8b9acea12e4dff13337148e | [
"MIT"
] | null | null | null | apps/empresas/urls.py | andreFSilva/gestao_rh | 9e0bfce3c594186ed8b9acea12e4dff13337148e | [
"MIT"
] | 5 | 2022-02-18T13:43:34.000Z | 2022-03-31T13:38:47.000Z | apps/empresas/urls.py | andreFSilva/gestao_rh | 9e0bfce3c594186ed8b9acea12e4dff13337148e | [
"MIT"
] | null | null | null | from django.urls import path
from .views import EmpresasView, EmpresasListView
urlpatterns = [
path('', EmpresasView.as_view(), name='empresas'),
path('empresas_list/', EmpresasListView.as_view(), name='empresas_list'),
] | 28.875 | 77 | 0.735931 | from django.urls import path
from .views import EmpresasView, EmpresasListView
urlpatterns = [
path('', EmpresasView.as_view(), name='empresas'),
path('empresas_list/', EmpresasListView.as_view(), name='empresas_list'),
] | true | true |
f7f452edd8d2874e0c8220a022b5689f03907fb2 | 2,800 | py | Python | natural_bm/datasets/fast.py | alexhunterlang/natural_bm | b2a1cb15694f4f3a80a3a1cc6f8423892563806d | [
"MIT"
] | 1 | 2018-06-07T00:54:17.000Z | 2018-06-07T00:54:17.000Z | natural_bm/datasets/fast.py | alexhunterlang/natural_bm | b2a1cb15694f4f3a80a3a1cc6f8423892563806d | [
"MIT"
] | null | null | null | natural_bm/datasets/fast.py | alexhunterlang/natural_bm | b2a1cb15694f4f3a80a3a1cc6f8423892563806d | [
"MIT"
] | null | null | null | """Simplified version of MNIST that is useful for demos and testing """
#%%
import numpy as np
try:
import PIL.Image as Image
except ImportError:
import Image
from natural_bm.datasets.common import Dataset, sample_data, threshold_data, convert2uint8
from natural_bm.datasets import mnist
#%%
class Fast(Dataset):
def __init__(self, datatype):
super().__init__('fast', datatype)
def _create_probability(self):
# Start from the MNIST probabilities
prob = mnist.MNIST('probability')
mnist_dataset = prob.dataset_dict
def shrink_data(data, lbl, n_sample):
# only keep 0's and 1's
# subsample to 14 by 14
# then just drop first 2, last 2 rows/cols since mainly zero
new_data = np.zeros((2*n_sample, 10**2), dtype='float32')
new_lbl = np.concatenate((np.zeros((n_sample, )),
np.ones((n_sample, )))).astype('int32')
index0 = np.where(lbl == 0)[0][0:n_sample]
index1 = np.where(lbl == 1)[0][0:n_sample]
index = np.concatenate((index0, index1))
for i in range(new_data.shape[0]):
img = Image.fromarray(data[index[i]].reshape((28, 28)))
img_down = img.resize((14, 14))
temp = np.asarray(img_down)
temp = temp[:, 2:-2]
temp = temp[2:-2]
new_data[i] = temp.flatten()
return new_data, new_lbl
dataset = {}
for dset in ['train', 'valid', 'test']:
if dset == 'train':
num_samples = 500
else:
num_samples = 50
data, lbl = shrink_data(mnist_dataset[dset+'.data'],
mnist_dataset[dset+'.lbl'],
num_samples)
dataset[dset+'.data'] = data
dataset[dset+'.lbl'] = lbl
# save the dataset
np.savez_compressed(self.savename, **dataset)
def _create_sampled(self):
# Start from the probabilities
prob = Fast('probability')
datasets = prob.dataset_dict
# do the sampling
datasets = sample_data(datasets)
# reduce precision, only need uint8
datasets = convert2uint8(datasets)
# Save the dataset
np.savez_compressed(self.savename, **datasets)
def _create_threshold(self):
# Start from the probabilities
prob = Fast('probability')
datasets = prob.dataset_dict
# threshold the data
datasets = threshold_data(datasets)
# reduce precision, only need uint8
datasets = convert2uint8(datasets)
# Save the dataset
np.savez_compressed(self.savename, **datasets)
| 31.111111 | 90 | 0.563571 |
import numpy as np
try:
import PIL.Image as Image
except ImportError:
import Image
from natural_bm.datasets.common import Dataset, sample_data, threshold_data, convert2uint8
from natural_bm.datasets import mnist
class Fast(Dataset):
def __init__(self, datatype):
super().__init__('fast', datatype)
def _create_probability(self):
prob = mnist.MNIST('probability')
mnist_dataset = prob.dataset_dict
def shrink_data(data, lbl, n_sample):
new_data = np.zeros((2*n_sample, 10**2), dtype='float32')
new_lbl = np.concatenate((np.zeros((n_sample, )),
np.ones((n_sample, )))).astype('int32')
index0 = np.where(lbl == 0)[0][0:n_sample]
index1 = np.where(lbl == 1)[0][0:n_sample]
index = np.concatenate((index0, index1))
for i in range(new_data.shape[0]):
img = Image.fromarray(data[index[i]].reshape((28, 28)))
img_down = img.resize((14, 14))
temp = np.asarray(img_down)
temp = temp[:, 2:-2]
temp = temp[2:-2]
new_data[i] = temp.flatten()
return new_data, new_lbl
dataset = {}
for dset in ['train', 'valid', 'test']:
if dset == 'train':
num_samples = 500
else:
num_samples = 50
data, lbl = shrink_data(mnist_dataset[dset+'.data'],
mnist_dataset[dset+'.lbl'],
num_samples)
dataset[dset+'.data'] = data
dataset[dset+'.lbl'] = lbl
np.savez_compressed(self.savename, **dataset)
def _create_sampled(self):
prob = Fast('probability')
datasets = prob.dataset_dict
datasets = sample_data(datasets)
datasets = convert2uint8(datasets)
np.savez_compressed(self.savename, **datasets)
def _create_threshold(self):
prob = Fast('probability')
datasets = prob.dataset_dict
datasets = threshold_data(datasets)
datasets = convert2uint8(datasets)
np.savez_compressed(self.savename, **datasets)
| true | true |
f7f4535f06fc137c900ae9eaa837bf15c073f00e | 49 | py | Python | b_hello/hello.py | cclai999/pytest-0706 | 1707a3b4fefee2d97a1f9fbccda80f859e2933cb | [
"MIT"
] | null | null | null | b_hello/hello.py | cclai999/pytest-0706 | 1707a3b4fefee2d97a1f9fbccda80f859e2933cb | [
"MIT"
] | null | null | null | b_hello/hello.py | cclai999/pytest-0706 | 1707a3b4fefee2d97a1f9fbccda80f859e2933cb | [
"MIT"
] | null | null | null | def hello_name(name):
return f'Hello {name}'
| 16.333333 | 26 | 0.673469 | def hello_name(name):
return f'Hello {name}'
| true | true |
f7f454cba0758d1f05815cdfcffd0cde50042d13 | 15,504 | py | Python | kotti/views/util.py | mete0r/Kotti | e89103cc57d5d2af8d60eb8208ae9d04c068f6e7 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | kotti/views/util.py | mete0r/Kotti | e89103cc57d5d2af8d60eb8208ae9d04c068f6e7 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | kotti/views/util.py | mete0r/Kotti | e89103cc57d5d2af8d60eb8208ae9d04c068f6e7 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | import hashlib
import urllib
from collections import defaultdict
from datetime import datetime
from babel.dates import format_date
from babel.dates import format_datetime
from babel.dates import format_time
from babel.numbers import format_currency
from pyramid.decorator import reify
from pyramid.i18n import get_locale_name
from pyramid.location import inside
from pyramid.location import lineage
from pyramid.renderers import get_renderer
from pyramid.renderers import render
from pyramid.settings import asbool
from sqlalchemy import and_
from sqlalchemy import not_
from sqlalchemy import or_
from zope.deprecation.deprecation import deprecate
from zope.deprecation import deprecated
from kotti import DBSession
from kotti import get_settings
from kotti.events import objectevent_listeners
from kotti.interfaces import INavigationRoot
from kotti.resources import Content
from kotti.resources import Document
from kotti.resources import Tag
from kotti.resources import TagsToContents
from kotti.security import has_permission
from kotti.security import view_permitted
from kotti.util import render_view
from kotti.util import TemplateStructure
from kotti.views.site_setup import CONTROL_PANEL_LINKS
from kotti.views.slots import slot_events
class SettingHasValuePredicate(object):
def __init__(self, val, config):
self.name, self.value = val
if not isinstance(self.value, bool):
raise ValueError("Only boolean values supported")
def text(self):
return "if_setting_has_value = %s == %s" % (self.name, self.value)
phash = text
def __call__(self, context, request):
return asbool(request.registry.settings[self.name]) == self.value
class RootOnlyPredicate(object):
def __init__(self, val, config):
self.val = val
def text(self):
return "root_only = %s" % self.val
phash = text
def __call__(self, context, request):
return (context is request.root) == self.val
def template_api(context, request, **kwargs):
return get_settings()['kotti.templates.api'][0](
context, request, **kwargs)
def add_renderer_globals(event):
if event['renderer_name'] != 'json':
request = event['request']
api = getattr(request, 'template_api', None)
if api is None and request is not None:
api = template_api(event['context'], event['request'])
event['api'] = api
@deprecate("'is_root' is deprecated as of Kotti 1.0.0. "
"Use the 'root_only=True' if you were using this as a "
"'custom_predicates' predicate.")
def is_root(context, request):
return context is request.root
class Slots(object):
def __init__(self, context, request):
self.context = context
self.request = request
def __getattr__(self, name):
for event_type in slot_events:
if event_type.name == name:
break
else:
raise AttributeError(name)
value = []
event = event_type(self.context, self.request)
for snippet in objectevent_listeners(event):
if snippet is not None:
if isinstance(snippet, list):
value.extend(snippet)
else:
value.append(snippet)
setattr(self, name, value)
return value
class TemplateAPI(object):
"""This implements the 'api' object that's passed to all
templates.
Use dict-access as a shortcut to retrieve template macros from
templates.
"""
# Instead of overriding these, consider using the
# 'kotti.overrides' variable.
BARE_MASTER = 'kotti:templates/master-bare.pt'
VIEW_MASTER = 'kotti:templates/view/master.pt'
EDIT_MASTER = 'kotti:templates/edit/master.pt'
SITE_SETUP_MASTER = 'kotti:templates/site-setup/master.pt'
body_css_class = ''
def __init__(self, context, request, bare=None, **kwargs):
self.context, self.request = context, request
if getattr(request, 'template_api', None) is None:
request.template_api = self
self.S = get_settings()
if request.is_xhr and bare is None:
bare = True # use bare template that renders just the content area
self.bare = bare
self.slots = Slots(context, request)
self.__dict__.update(kwargs)
@reify
def edit_needed(self):
if 'kotti.fanstatic.edit_needed' in self.S:
return [r.need() for r in self.S['kotti.fanstatic.edit_needed']]
@reify
def view_needed(self):
if 'kotti.fanstatic.view_needed' in self.S:
return [r.need() for r in self.S['kotti.fanstatic.view_needed']]
def macro(self, asset_spec, macro_name='main'):
if self.bare and asset_spec in (
self.VIEW_MASTER, self.EDIT_MASTER, self.SITE_SETUP_MASTER):
asset_spec = self.BARE_MASTER
return get_renderer(asset_spec).implementation().macros[macro_name]
@reify
def site_title(self):
""" The site title.
:result: Value of the ``kotti.site_title`` setting (if specified) or
the root item's ``title`` attribute.
:rtype: unicode
"""
value = get_settings().get('kotti.site_title')
if not value:
value = self.root.title
return value
@reify
def page_title(self):
"""
Title for the current page as used in the ``<head>`` section of the
default ``master.pt`` template.
:result: '[Human readable view title ]``context.title`` -
:meth:`~TemplateAPI.site_title`''
:rtype: unicode
"""
view_title = self.request.view_name.replace('_', ' ').title()
if view_title:
view_title += u' '
view_title += self.context.title
return u'%s - %s' % (view_title, self.site_title)
def url(self, context=None, *elements, **kwargs):
"""
URL construction helper. Just a convenience wrapper for
:func:`pyramid.request.resource_url` with the same signature. If
``context`` is ``None`` the current context is passed to
``resource_url``.
"""
if context is None:
context = self.context
return self.request.resource_url(context, *elements, **kwargs)
@reify
def root(self):
"""
The site root.
:result: The root object of the site.
:rtype: :class:`kotti.resources.Node`
"""
return self.lineage[-1]
@reify
def navigation_root(self):
"""
The root node for the navigation.
:result: Nearest node in the :meth:`lineage` that provides
:class:`kotti.interfaces.INavigationRoot` or :meth:`root` if
no node provides that interface.
:rtype: :class:`kotti.resources.Node`
"""
for o in self.lineage:
if INavigationRoot.providedBy(o):
return o
return self.root
@reify
def lineage(self):
"""
Lineage from current context to the root node.
:result: List of nodes.
:rtype: list of :class:`kotti.resources.Node`
"""
return list(lineage(self.context))
@reify
def breadcrumbs(self):
"""
List of nodes from the :meth:`navigation_root` to the context.
:result: List of nodes.
:rtype: list of :class:`kotti.resources.Node`
"""
breadcrumbs = self.lineage
if self.root != self.navigation_root:
index = breadcrumbs.index(self.navigation_root)
breadcrumbs = breadcrumbs[:index + 1]
return reversed(breadcrumbs)
def has_permission(self, permission, context=None):
""" Convenience wrapper for :func:`pyramid.security.has_permission`
with the same signature. If ``context`` is ``None`` the current
context is passed to ``has_permission``."""
if context is None:
context = self.context
return has_permission(permission, context, self.request)
def render_view(self, name='', context=None, request=None, secure=True,
bare=True):
if context is None:
context = self.context
if request is None:
request = self.request
before = self.bare
try:
self.bare = bare
html = render_view(context, request, name, secure)
finally:
self.bare = before
return TemplateStructure(html)
def render_template(self, renderer, **kwargs):
return TemplateStructure(render(renderer, kwargs, self.request))
def list_children(self, context=None, permission='view'):
if context is None:
context = self.context
children = []
if hasattr(context, 'values'):
for child in context.values():
if (not permission or
has_permission(permission, child, self.request)):
children.append(child)
return children
inside = staticmethod(inside)
def avatar_url(self, user=None, size="14", default_image='identicon'):
if user is None:
user = self.request.user
email = user.email
if not email:
email = user.name
h = hashlib.md5(email).hexdigest()
query = {'default': default_image, 'size': str(size)}
url = 'https://secure.gravatar.com/avatar/%s?%s' % (
h, urllib.urlencode(query))
return url
@reify
def locale_name(self):
return get_locale_name(self.request)
def format_date(self, d, format=None):
if format is None:
format = self.S['kotti.date_format']
return format_date(d, format=format, locale=self.locale_name)
def format_datetime(self, dt, format=None):
if format is None:
format = self.S['kotti.datetime_format']
if not isinstance(dt, datetime):
dt = datetime.fromtimestamp(dt)
return format_datetime(dt, format=format, locale=self.locale_name)
def format_time(self, t, format=None):
if format is None:
format = self.S['kotti.time_format']
return format_time(t, format=format, locale=self.locale_name)
def format_currency(self, n, currency, format=None):
return format_currency(n, currency,
format=format, locale=self.locale_name)
def get_type(self, name):
for class_ in get_settings()['kotti.available_types']:
if class_.type_info.name == name:
return class_
def find_edit_view(self, item):
view_name = self.request.view_name
if not view_permitted(item, self.request, view_name):
view_name = u'edit'
if not view_permitted(item, self.request, view_name):
view_name = u''
return view_name
@reify
def edit_links(self):
if not hasattr(self.context, 'type_info'):
return []
return [l for l in self.context.type_info.edit_links
if l.visible(self.context, self.request)]
@reify
def site_setup_links(self):
return [l for l in CONTROL_PANEL_LINKS
if l.visible(self.root, self.request)]
class NodesTree(object):
def __init__(self, node, request, item_mapping, item_to_children,
permission):
self._node = node
self._request = request
self._item_mapping = item_mapping
self._item_to_children = item_to_children
self._permission = permission
@property
def __parent__(self):
if self.parent_id:
return self._item_mapping[self.parent_id]
@property
def children(self):
return [
NodesTree(
child,
self._request,
self._item_mapping,
self._item_to_children,
self._permission,
)
for child in self._item_to_children[self.id]
if has_permission(self._permission, child, self._request)
]
def _flatten(self, item):
yield item._node
for ch in item.children:
for item in self._flatten(ch):
yield item
def tolist(self):
return list(self._flatten(self))
def __getattr__(self, name):
return getattr(self._node, name)
def nodes_tree(request, context=None, permission='view'):
item_mapping = {}
item_to_children = defaultdict(lambda: [])
for node in DBSession.query(Content).with_polymorphic(Content):
item_mapping[node.id] = node
if has_permission('view', node, request):
item_to_children[node.parent_id].append(node)
for children in item_to_children.values():
children.sort(key=lambda ch: ch.position)
if context is None:
node = item_to_children[None][0]
else:
node = context
return NodesTree(
node,
request,
item_mapping,
item_to_children,
permission,
)
def search_content(search_term, request=None):
return get_settings()['kotti.search_content'][0](search_term, request)
def default_search_content(search_term, request=None):
searchstring = u'%%%s%%' % search_term
# generic_filter can be applied to all Node (and subclassed) objects
generic_filter = or_(Content.name.like(searchstring),
Content.title.like(searchstring),
Content.description.like(searchstring))
results = DBSession.query(Content).filter(generic_filter).\
order_by(Content.title.asc()).all()
# specific result contain objects matching additional criteria
# but must not match the generic criteria (because these objects
# are already in the generic_results)
document_results = DBSession.query(Document).filter(
and_(Document.body.like(searchstring),
not_(generic_filter)))
for results_set in [content_with_tags([searchstring]),
document_results.all()]:
[results.append(c) for c in results_set if not c in results]
result_dicts = []
for result in results:
if has_permission('view', result, request):
result_dicts.append(dict(
name=result.name,
title=result.title,
description=result.description,
path=request.resource_path(result)))
return result_dicts
def content_with_tags(tag_terms):
return DBSession.query(Content).join(TagsToContents).join(Tag).filter(
or_(*[Tag.title.like(tag_term) for tag_term in tag_terms])).all()
def search_content_for_tags(tags, request=None):
result_dicts = []
for result in content_with_tags(tags):
if has_permission('view', result, request):
result_dicts.append(dict(
name=result.name,
title=result.title,
description=result.description,
path=request.resource_path(result)))
return result_dicts
from kotti.util import (
get_localizer_for_locale_name,
translate,
)
for obj in (render_view, get_localizer_for_locale_name, translate,
TemplateStructure):
name = obj.__name__
deprecated(
name,
"kotti.views.util.{0} has been moved to the kotti.util module "
"as of Kotti 1.0.0. Use kotti.util.{0} instead".format(name))
| 31.770492 | 79 | 0.629063 | import hashlib
import urllib
from collections import defaultdict
from datetime import datetime
from babel.dates import format_date
from babel.dates import format_datetime
from babel.dates import format_time
from babel.numbers import format_currency
from pyramid.decorator import reify
from pyramid.i18n import get_locale_name
from pyramid.location import inside
from pyramid.location import lineage
from pyramid.renderers import get_renderer
from pyramid.renderers import render
from pyramid.settings import asbool
from sqlalchemy import and_
from sqlalchemy import not_
from sqlalchemy import or_
from zope.deprecation.deprecation import deprecate
from zope.deprecation import deprecated
from kotti import DBSession
from kotti import get_settings
from kotti.events import objectevent_listeners
from kotti.interfaces import INavigationRoot
from kotti.resources import Content
from kotti.resources import Document
from kotti.resources import Tag
from kotti.resources import TagsToContents
from kotti.security import has_permission
from kotti.security import view_permitted
from kotti.util import render_view
from kotti.util import TemplateStructure
from kotti.views.site_setup import CONTROL_PANEL_LINKS
from kotti.views.slots import slot_events
class SettingHasValuePredicate(object):
def __init__(self, val, config):
self.name, self.value = val
if not isinstance(self.value, bool):
raise ValueError("Only boolean values supported")
def text(self):
return "if_setting_has_value = %s == %s" % (self.name, self.value)
phash = text
def __call__(self, context, request):
return asbool(request.registry.settings[self.name]) == self.value
class RootOnlyPredicate(object):
def __init__(self, val, config):
self.val = val
def text(self):
return "root_only = %s" % self.val
phash = text
def __call__(self, context, request):
return (context is request.root) == self.val
def template_api(context, request, **kwargs):
return get_settings()['kotti.templates.api'][0](
context, request, **kwargs)
def add_renderer_globals(event):
if event['renderer_name'] != 'json':
request = event['request']
api = getattr(request, 'template_api', None)
if api is None and request is not None:
api = template_api(event['context'], event['request'])
event['api'] = api
@deprecate("'is_root' is deprecated as of Kotti 1.0.0. "
"Use the 'root_only=True' if you were using this as a "
"'custom_predicates' predicate.")
def is_root(context, request):
return context is request.root
class Slots(object):
def __init__(self, context, request):
self.context = context
self.request = request
def __getattr__(self, name):
for event_type in slot_events:
if event_type.name == name:
break
else:
raise AttributeError(name)
value = []
event = event_type(self.context, self.request)
for snippet in objectevent_listeners(event):
if snippet is not None:
if isinstance(snippet, list):
value.extend(snippet)
else:
value.append(snippet)
setattr(self, name, value)
return value
class TemplateAPI(object):
BARE_MASTER = 'kotti:templates/master-bare.pt'
VIEW_MASTER = 'kotti:templates/view/master.pt'
EDIT_MASTER = 'kotti:templates/edit/master.pt'
SITE_SETUP_MASTER = 'kotti:templates/site-setup/master.pt'
body_css_class = ''
def __init__(self, context, request, bare=None, **kwargs):
self.context, self.request = context, request
if getattr(request, 'template_api', None) is None:
request.template_api = self
self.S = get_settings()
if request.is_xhr and bare is None:
bare = True
self.bare = bare
self.slots = Slots(context, request)
self.__dict__.update(kwargs)
@reify
def edit_needed(self):
if 'kotti.fanstatic.edit_needed' in self.S:
return [r.need() for r in self.S['kotti.fanstatic.edit_needed']]
@reify
def view_needed(self):
if 'kotti.fanstatic.view_needed' in self.S:
return [r.need() for r in self.S['kotti.fanstatic.view_needed']]
def macro(self, asset_spec, macro_name='main'):
if self.bare and asset_spec in (
self.VIEW_MASTER, self.EDIT_MASTER, self.SITE_SETUP_MASTER):
asset_spec = self.BARE_MASTER
return get_renderer(asset_spec).implementation().macros[macro_name]
@reify
def site_title(self):
value = get_settings().get('kotti.site_title')
if not value:
value = self.root.title
return value
@reify
def page_title(self):
view_title = self.request.view_name.replace('_', ' ').title()
if view_title:
view_title += u' '
view_title += self.context.title
return u'%s - %s' % (view_title, self.site_title)
def url(self, context=None, *elements, **kwargs):
if context is None:
context = self.context
return self.request.resource_url(context, *elements, **kwargs)
@reify
def root(self):
return self.lineage[-1]
@reify
def navigation_root(self):
for o in self.lineage:
if INavigationRoot.providedBy(o):
return o
return self.root
@reify
def lineage(self):
return list(lineage(self.context))
@reify
def breadcrumbs(self):
breadcrumbs = self.lineage
if self.root != self.navigation_root:
index = breadcrumbs.index(self.navigation_root)
breadcrumbs = breadcrumbs[:index + 1]
return reversed(breadcrumbs)
def has_permission(self, permission, context=None):
if context is None:
context = self.context
return has_permission(permission, context, self.request)
def render_view(self, name='', context=None, request=None, secure=True,
bare=True):
if context is None:
context = self.context
if request is None:
request = self.request
before = self.bare
try:
self.bare = bare
html = render_view(context, request, name, secure)
finally:
self.bare = before
return TemplateStructure(html)
def render_template(self, renderer, **kwargs):
return TemplateStructure(render(renderer, kwargs, self.request))
def list_children(self, context=None, permission='view'):
if context is None:
context = self.context
children = []
if hasattr(context, 'values'):
for child in context.values():
if (not permission or
has_permission(permission, child, self.request)):
children.append(child)
return children
inside = staticmethod(inside)
def avatar_url(self, user=None, size="14", default_image='identicon'):
if user is None:
user = self.request.user
email = user.email
if not email:
email = user.name
h = hashlib.md5(email).hexdigest()
query = {'default': default_image, 'size': str(size)}
url = 'https://secure.gravatar.com/avatar/%s?%s' % (
h, urllib.urlencode(query))
return url
@reify
def locale_name(self):
return get_locale_name(self.request)
def format_date(self, d, format=None):
if format is None:
format = self.S['kotti.date_format']
return format_date(d, format=format, locale=self.locale_name)
def format_datetime(self, dt, format=None):
if format is None:
format = self.S['kotti.datetime_format']
if not isinstance(dt, datetime):
dt = datetime.fromtimestamp(dt)
return format_datetime(dt, format=format, locale=self.locale_name)
def format_time(self, t, format=None):
if format is None:
format = self.S['kotti.time_format']
return format_time(t, format=format, locale=self.locale_name)
def format_currency(self, n, currency, format=None):
return format_currency(n, currency,
format=format, locale=self.locale_name)
def get_type(self, name):
for class_ in get_settings()['kotti.available_types']:
if class_.type_info.name == name:
return class_
def find_edit_view(self, item):
view_name = self.request.view_name
if not view_permitted(item, self.request, view_name):
view_name = u'edit'
if not view_permitted(item, self.request, view_name):
view_name = u''
return view_name
@reify
def edit_links(self):
if not hasattr(self.context, 'type_info'):
return []
return [l for l in self.context.type_info.edit_links
if l.visible(self.context, self.request)]
@reify
def site_setup_links(self):
return [l for l in CONTROL_PANEL_LINKS
if l.visible(self.root, self.request)]
class NodesTree(object):
def __init__(self, node, request, item_mapping, item_to_children,
permission):
self._node = node
self._request = request
self._item_mapping = item_mapping
self._item_to_children = item_to_children
self._permission = permission
@property
def __parent__(self):
if self.parent_id:
return self._item_mapping[self.parent_id]
@property
def children(self):
return [
NodesTree(
child,
self._request,
self._item_mapping,
self._item_to_children,
self._permission,
)
for child in self._item_to_children[self.id]
if has_permission(self._permission, child, self._request)
]
def _flatten(self, item):
yield item._node
for ch in item.children:
for item in self._flatten(ch):
yield item
def tolist(self):
return list(self._flatten(self))
def __getattr__(self, name):
return getattr(self._node, name)
def nodes_tree(request, context=None, permission='view'):
item_mapping = {}
item_to_children = defaultdict(lambda: [])
for node in DBSession.query(Content).with_polymorphic(Content):
item_mapping[node.id] = node
if has_permission('view', node, request):
item_to_children[node.parent_id].append(node)
for children in item_to_children.values():
children.sort(key=lambda ch: ch.position)
if context is None:
node = item_to_children[None][0]
else:
node = context
return NodesTree(
node,
request,
item_mapping,
item_to_children,
permission,
)
def search_content(search_term, request=None):
return get_settings()['kotti.search_content'][0](search_term, request)
def default_search_content(search_term, request=None):
searchstring = u'%%%s%%' % search_term
generic_filter = or_(Content.name.like(searchstring),
Content.title.like(searchstring),
Content.description.like(searchstring))
results = DBSession.query(Content).filter(generic_filter).\
order_by(Content.title.asc()).all()
document_results = DBSession.query(Document).filter(
and_(Document.body.like(searchstring),
not_(generic_filter)))
for results_set in [content_with_tags([searchstring]),
document_results.all()]:
[results.append(c) for c in results_set if not c in results]
result_dicts = []
for result in results:
if has_permission('view', result, request):
result_dicts.append(dict(
name=result.name,
title=result.title,
description=result.description,
path=request.resource_path(result)))
return result_dicts
def content_with_tags(tag_terms):
return DBSession.query(Content).join(TagsToContents).join(Tag).filter(
or_(*[Tag.title.like(tag_term) for tag_term in tag_terms])).all()
def search_content_for_tags(tags, request=None):
result_dicts = []
for result in content_with_tags(tags):
if has_permission('view', result, request):
result_dicts.append(dict(
name=result.name,
title=result.title,
description=result.description,
path=request.resource_path(result)))
return result_dicts
from kotti.util import (
get_localizer_for_locale_name,
translate,
)
for obj in (render_view, get_localizer_for_locale_name, translate,
TemplateStructure):
name = obj.__name__
deprecated(
name,
"kotti.views.util.{0} has been moved to the kotti.util module "
"as of Kotti 1.0.0. Use kotti.util.{0} instead".format(name))
| true | true |
f7f4552c9f0ba9690541b9509c1e0a4e800f22a4 | 779 | py | Python | flask_test_1/__main__.py | huogerac/flask-test-1 | 31c2629ea2c1eb589710024f5a09cb1892b0fac7 | [
"Unlicense"
] | null | null | null | flask_test_1/__main__.py | huogerac/flask-test-1 | 31c2629ea2c1eb589710024f5a09cb1892b0fac7 | [
"Unlicense"
] | null | null | null | flask_test_1/__main__.py | huogerac/flask-test-1 | 31c2629ea2c1eb589710024f5a09cb1892b0fac7 | [
"Unlicense"
] | null | null | null | from . import BaseClass, base_function # pragma: no cover
def main() -> None: # pragma: no cover
"""
The main function executes on commands:
`python -m flask_test_1` and `$ flask_test_1 `.
This is your program's entry point.
You can change this function to do whatever you want.
Examples:
* Run a test suite
* Run a server
* Do some other stuff
* Run a command line application (Click, Typer, ArgParse)
* List all available tasks
* Run an application (Flask, FastAPI, Django, etc.)
"""
print("Executing main function")
base = BaseClass()
print(base.base_method())
print(base_function())
print("End of main function")
if __name__ == "__main__": # pragma: no cover
main()
| 26.862069 | 65 | 0.629012 | from . import BaseClass, base_function
def main() -> None:
print("Executing main function")
base = BaseClass()
print(base.base_method())
print(base_function())
print("End of main function")
if __name__ == "__main__":
main()
| true | true |
f7f4554fe3b5b666a79b50865f586a5637470347 | 24,313 | py | Python | query_designer/models.py | dipapaspyros/bdo_platform | 336de07c6ed14290c54f2154117dbf90a187e4ea | [
"MIT"
] | 2 | 2018-02-07T10:26:28.000Z | 2018-09-21T09:12:58.000Z | query_designer/models.py | dipapaspyros/bdo_platform | 336de07c6ed14290c54f2154117dbf90a187e4ea | [
"MIT"
] | 5 | 2018-09-21T10:40:44.000Z | 2019-04-06T10:59:57.000Z | query_designer/models.py | dipapaspyros/bdo_platform | 336de07c6ed14290c54f2154117dbf90a187e4ea | [
"MIT"
] | 3 | 2019-06-09T15:42:02.000Z | 2022-02-14T19:50:33.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import decimal
import datetime
import copy
import time
from collections import OrderedDict
import re
import sympy
from threading import Thread
from django.contrib.auth.models import User
from django.db.models import *
from aggregator.models import *
from query_designer.formula_functions import *
from query_designer.query_processors.utils import SolrResultEncoder, PostgresResultEncoder, PrestoResultEncoder
from django.http import JsonResponse
class AbstractQuery(Model):
user = ForeignKey(User, related_name='queries')
title = TextField(default='Untitled query')
created = DateTimeField(auto_now_add=True)
updated = DateTimeField(auto_now=True)
generated_by = CharField(max_length=32, choices=[
('CUSTOM', 'Custom query'),
('QDv1', 'Query Designer (old)'),
('QDv2', 'Query Designer (new)'),
])
document = JSONField()
# v1-specific fields
design = JSONField(blank=True, null=True, default=None)
# v2-specific fields
v2_fields = TextField(editable=False, blank=True, null=True, default=None)
v2_filters = TextField(editable=False, blank=True, null=True, default=None)
count = IntegerField(blank=True, null=True, default=None)
headers = JSONField(blank=True, null=True, default=None)
# def __unicode__(self):
# return '<#%d "%s"%s>' % (self.pk, self.title, ' (%d results)' % self.count if self.count is not None else '')
@staticmethod
def operator_to_str(op, mode='postgres'):
if mode == 'postgres':
return {
# comparison
'eq': ('=', ':'),
'neq': ('!=', None),
'gt': ('>', None),
'gte': ('>=', None),
'lt': ('<', None),
'lte': ('<=', None),
'mod': ('%', None),
# timestamp
'lte_time': ('<=', None),
'gte_time': ('>=', None),
# boolean
'&&': ('AND', 'AND'),
'and': ('AND', 'AND'),
'||': ('OR', 'OR'),
'or': ('OR', 'OR'),
'!': ('NOT', None),
'not': ('NOT', None),
}[op.lower()][0 if mode == 'postgres' else 1]
else:
return {
'not_null': (' IS NOT NULL ', None),
# comparison
'eq': ('=', ':'),
'neq': ('!=', None),
'gt': ('>', None),
'gte': ('>=', None),
'lt': ('<', None),
'lte': ('<=', None),
'mod': ('%', None),
# timestamp
'lte_time': ('<= timestamp ', None),
'gte_time': ('>= timestamp ', None),
# boolean
'&&': ('AND', 'AND'),
'and': ('AND', 'AND'),
'||': ('OR', 'OR'),
'or': ('OR', 'OR'),
'!': ('NOT', None),
'not': ('NOT', None),
}[op.lower()][0 if mode == 'presto' else 1]
def process_filters(self, filters, mode='postgres', quote=False, use_table_names=False):
print 'filters:'
print filters
# end value
if type(filters) in [int, float]:
try:
col_name = ''
from_order = int(filters[filters.find('i')+1:filters.find('_')])
if from_order >= 0:
table_name = self.document['from'][from_order]['name']
for x in self.document['from'][from_order]['select']:
if x['name'] == filters:
if x['type'] != 'VALUE':
col_name = Dimension.objects.get(pk=x['type']).data_column_name
else:
v_obj = Variable.objects.get(pk=int(self.document['from'][from_order]['type']))
if v_obj.dataset.stored_at == 'UBITECH_POSTGRES' or \
v_obj.dataset.stored_at == 'UBITECH_PRESTO':
col_name = v_obj.name
else:
col_name = 'value'
if use_table_names:
filters = table_name + '.' + col_name
else:
filters = col_name
except:
return filters
return filters
if type(filters) in [str, unicode]:
if quote and (mode == 'solr') and filters.strip() != '*' and (not filters.startswith('"')) and filters:
return '"%s"' % filters
else:
try:
col_name = ''
from_order = int(filters[filters.find('i') + 1:filters.find('_')])
if from_order >= 0:
table_name = self.document['from'][from_order]['name']
for x in self.document['from'][from_order]['select']:
if x['name'] == filters:
if x['type'] != 'VALUE':
col_name = Dimension.objects.get(pk=x['type']).data_column_name
else:
v_obj = Variable.objects.get(pk=int(self.document['from'][from_order]['type']))
if v_obj.dataset.stored_at == 'UBITECH_POSTGRES' or \
v_obj.dataset.stored_at == 'UBITECH_PRESTO':
col_name = v_obj.name
else:
col_name = 'value'
if use_table_names:
filters = table_name + '.' + col_name
else:
filters = col_name
except:
return filters
return "%s" % filters
# Special case: parsing location filters
# inside_rect|outside_rect <<lat_south,lng_west>,<lat_north,lng_east>>
for _from in self.document['from']:
v = Variable.objects.get(pk=_from['type'])
if _from['name'] == filters['a']:
if use_table_names:
filters['a'] = '%s.%s' % (_from['name'], 'value')
else:
filters['a'] = '%s' % ('value')
for x in _from['select']:
if x['name'] == filters['a']:
if x['type'] != 'VALUE':
# print 'type' + x['type']
if use_table_names:
filters['a'] = '%s.%s' % \
(_from['name'], Dimension.objects.get(pk=int(x['type'])).data_column_name)
else:
filters['a'] = '%s' % \
(Dimension.objects.get(pk=int(x['type'])).data_column_name)
else:
v_obj = Variable.objects.get(pk=int(_from['type']))
if v_obj.dataset.stored_at == 'UBITECH_POSTGRES' or \
v_obj.dataset.stored_at == 'UBITECH_PRESTO':
col_name = v_obj.name
else:
col_name = 'value'
if use_table_names:
filters['a'] = '%s.%s' % \
(_from['name'], col_name)
else:
filters['a'] = '%s' % \
(col_name)
if filters['op'] in ['inside_rect', 'outside_rect', ]:
print 'inside_rect'
rect_start = filters['b'].split('<')[2].split('>,')[0].split(',')
rect_end = filters['b'].split('>,<')[1].split('>')[0].split(',')
lat_col_id = int(filters['a'].split('<')[1].split(',')[0].split('>')[0])
lon_col_id = int(filters['a'].split('<')[1].split(',')[1].split('>')[0])
lat_col_name = Dimension.objects.get(pk=lat_col_id).data_column_name
lon_col_name = Dimension.objects.get(pk=lon_col_id).data_column_name
table_name = Dimension.objects.get(pk=lat_col_id).variable.dataset.table_name
v_name = Dimension.objects.get(pk=lat_col_id).variable.name
v_id = Dimension.objects.get(pk=lat_col_id).variable.id
for idx, _from in enumerate(self.document['from']):
# print 'from name: ' + _from['name']
# if _from['name'] == (v_name+'_'+str(idx)):
if _from['name'] == (v_name + '_' + str(v_id)):
table_name = _from['name']
if use_table_names:
lat = table_name + '.' + lat_col_name
lng = table_name + '.' + lon_col_name
else:
lat = lat_col_name
lng = lon_col_name
result = '%s >= %s AND %s <= %s' % (lat, rect_start[0], lat, rect_end[0])
result += ' AND %s >= %s AND %s <= %s' % (lng, rect_start[1], lng, rect_end[1])
#lat = filters['a'] + '_latitude'
#lng = filters['a'] + '_longitude'
if mode == 'solr':
result = '%s:[%s TO %s]' % (lat, rect_start[0], rect_end[0])
result += ' AND %s:[%s TO %s]' % (lng, rect_start[1], rect_end[1])
else:
result = '%s >= %s AND %s <= %s' % (lat, rect_start[0], lat, rect_end[0])
result += ' AND %s >= %s AND %s <= %s' % (lng, rect_start[1], lng, rect_end[1])
if filters['op'] == 'outside_rect':
if mode == 'postgres':
result = 'NOT(%s)' % result
else:
result = '-(%s)' % result
return result
result = ''
_op = filters['op'].lower()
if mode == 'solr' and _op in ['neq', 'gt', 'gte', 'lt', 'lte', 'mod', '!', 'not']:
if _op == 'neq':
result = '-%s:%s' % (self.process_filters(filters['a']), self.process_filters(filters['b']))
elif _op in ['gt', 'gte']:
result = '%s:[%s TO *]' % (self.process_filters(filters['a']), self.process_filters(filters['b']))
elif _op in ['lt', 'lte']:
result = '%s:[* TO %s]' % (self.process_filters(filters['a']), self.process_filters(filters['b']))
elif _op == 'mod':
result = 'mod(%s, %s)' % (self.process_filters(filters['a']), self.process_filters(filters['b']))
elif _op in ['!', 'not']:
raise NotImplementedError('TODO fix missing NOT operator in solr')
else:
_a = self.process_filters(filters['a'], mode=mode, use_table_names=use_table_names)
_b = self.process_filters(filters['b'], mode=mode, quote=True, use_table_names=use_table_names)
result = '%s %s %s' % \
(('(%s)' % _a) if type(_a) not in [str, unicode, int, float] else _a,
Query.operator_to_str(filters['op'], mode=mode),
('(%s)' % _b) if type(_b) not in [str, unicode, int, float] else _b)
return result
@staticmethod
def threaded_fetchall(conn, query, count):
def fetch_data_page(results, offset=0, limit=100):
cur = conn.cursor()
cur.execute(query + ' OFFSET %d LIMIT %d' % (offset, limit))
results.extend(cur.fetchall())
# try threaded fetch
unlimited_results_page_size = 50000
workers = 5
current_offset = 0
all_rows = []
while current_offset <= count:
print current_offset
threads = []
for w in range(0, workers):
if current_offset + w * unlimited_results_page_size > count:
break
thread = Thread(target=fetch_data_page,
args=(all_rows,
current_offset + w * unlimited_results_page_size,
unlimited_results_page_size))
thread.start()
threads.append(thread)
# wait for all to finish
for k, thread in enumerate(threads):
print 'waiting %d' % (k+1)
thread.join()
current_offset += unlimited_results_page_size * workers
return all_rows
def process(self, dimension_values='', variable='', only_headers=False, commit=True, execute=False, raw_query=False, from_visualizer=False):
is_postgres = True
is_presto = True
try:
is_postgres = 'POSTGRES' in Variable.objects.get(pk=self.document['from'][0]['type']).dataset.stored_at
except IndexError:
pass
try:
is_presto = 'PRESTO' in Variable.objects.get(pk=self.document['from'][0]['type']).dataset.stored_at
except IndexError:
pass
if is_postgres:
from query_designer.query_processors.postgres import process as q_process
encoder = PostgresResultEncoder
else:
if is_presto:
from query_designer.query_processors.presto import process as q_process
encoder = PrestoResultEncoder
else:
from query_designer.query_processors.solr import process as q_process
encoder = SolrResultEncoder
data = q_process(self, dimension_values=dimension_values, variable=variable,
only_headers=only_headers, commit=commit,
execute=execute, raw_query=raw_query, from_visualizer=from_visualizer)
return data, encoder
def execute(self, dimension_values='', variable='', only_headers=False, commit=True, with_encoder=True, from_visualizer=False):
try:
doc = self.document
except ValueError:
return JsonResponse({'error_message': 'Invalid query document'}, status=400)
result = self.process(dimension_values, variable, only_headers, commit, execute=True, from_visualizer=from_visualizer)
if with_encoder:
return result
encoder = result[1]
return json.loads(encoder().encode(result[0]))
@property
def raw_query(self):
# remove several keys from query
doc = copy.deepcopy(self.document)
# for key in ['limit', 'offset', 'granularity']:
for key in ['offset', 'granularity']:
if key in self.document:
del self.document[key]
# get raw query
res = self.process(dimension_values='', variable='', only_headers=True, commit=False,
execute=False, raw_query=True)
if res == None:
return None
# restore initial doc
self.document = doc
return res[0]['raw_query']
class InvalidUnitError(ValueError):
pass
class Formula(Model):
# generic information
date_created = DateTimeField(auto_now_add=True)
date_updated = DateTimeField(auto_now=True)
created_by = ForeignKey(User, blank=True, null=True, default=None)
name = TextField(blank=False, null=False)
# the actual formula
# e.g (`energydemandbefore_19` - `energydemandafter_20`)/`energydemandbefore_19`
value = TextField(blank=False, null=False)
# is this a public formula?
is_valid = BooleanField(default=False)
is_public = BooleanField(default=False)
@property
def dependencies(self):
"""
:return: A list with all the variables used in the formula
"""
return list(set([prop[1:-1] for prop in re.findall(r'`\w+`', self.value)]))
@property
def internal_value(self):
return '$%d' % self.pk
@staticmethod
def math():
return [fn['name'].split('(')[0] for fn in MATH_FUNCTIONS]
@staticmethod
def random():
return [fn['name'].split('(')[0] for fn in RAND_FUNCTIONS]
@staticmethod
def trig():
return [fn['name'].split('(')[0] for fn in TRIG_FUNCTIONS]
@staticmethod
def safe_function_info():
result = []
for item in MATH_FUNCTIONS:
result.append((item['name'], item['description']))
for item in RAND_FUNCTIONS:
result.append((item['name'], item['description']))
for item in TRIG_FUNCTIONS:
result.append((item['name'], item['description']))
return result
@staticmethod
def functions():
return [fn[0].split('(')[0] for fn in Formula.safe_function_info()]
@staticmethod
def safe(value):
"""
:param value: A potential formula
:return: True if formula contains only numbers, operators and safe functions, False otherwise
"""
for token in re.findall(r"[\w']+", value):
try:
float(token)
except ValueError:
# allowed functions here
if token not in Formula.functions():
return False
return True
@staticmethod
def find_unit(variable):
try:
return Variable.objects.filter(name=variable)[0].unit
except IndexError:
return Dimension.objects.filter(name=variable)[0].unit
@staticmethod
def _normalize_unit(unit):
"""
:param unit: The continuous version of the unit, e.g "€/kWh"
:return:
"""
unit_str = unit
unit_str = unit_str.replace('kWh', 'kW*h').replace('²', '**2')
return unit_str, re.split(r'[\s,.|/*]+', unit_str)
@property
def unit(self):
try:
return self.suggest_unit(fail_on_invalid=False)
except ValueError:
return '-'
def suggest_unit(self, fail_on_invalid=True):
# ignore minus as it could incorrectly cause expressions to collapse
# e.g € - € => €, not empty unit
value = self.value.replace('-', '+').replace(' ', '')
units = {}
# this is the symbols variable, should not use any unit character inside
q = []
# make sure value is safe to proceed
if self.errors(include_unit_errors=False):
raise ValueError('Can\'t detect unit of invalid expression')
# replace each dependency with its unit & define symbols
unit_cnt = 0
for dependency in self.dependencies:
unit_str, du = Formula._normalize_unit(Formula.find_unit(dependency))
if not du:
value = value.replace('`' + dependency + '`', '1')
for unit in du:
try:
# do not replace numbers with tokens
float(unit)
except ValueError:
if unit not in units:
units[unit] = 'q[%d]' % unit_cnt
q.append(sympy.Symbol(unit))
unit_cnt += 1
unit_str = unit_str.replace(unit, units[unit])
# replace in value
value = value.replace('`' + dependency + '`', '(' + unit_str + ')')
# remove functions
for fn in Formula.functions():
value = value.replace(str(fn) + '(', '(')
# simplify expression
expr_result = str(eval(value))
# replace original symbols
for unit in units:
expr_result = expr_result.replace(units[unit], unit)
# replace ** with ^
expr_result = expr_result.replace('**', '^')
# remove digits
result = ''
to_remove_constant = True
for x in expr_result:
if x == ' ':
continue
try:
int(x)
if not to_remove_constant:
result += x
except ValueError:
result += x
# should not remove the next constant if it exposes to power
to_remove_constant = x not in ['^', ]
# no unit remaining -- assume percentage:
if not result:
return '%'
# remove trailing symbols
while result and result[0] in ['+', '*', ]:
result = result[1:]
while result and result[len(result) - 1] in ['+', '*', '/']:
result = result[:-1]
# if addition is included, the formula most probably does not make sense
if '+' in result and fail_on_invalid:
# format error string
adders = result.split('+')
err_str = adders[0]
for idx, term in enumerate(adders[1:]):
if not term.strip():
continue
if idx == 0:
err_str += ' with %s' % term
elif idx + 2 < len(adders):
err_str += ', %s' % term
else:
err_str += ' and %s' % term
# raise error
raise InvalidUnitError('Formula seems to be incorrect: adding %s' % err_str)
if len(result):
if result[0] == '*':
result = result[1:]
elif result[0] == '/':
result = '1' + result[1:]
return result
def apply(self, context):
"""
:param context: A dictionary of variables and their values
:return: The result of the formula after applying the context
"""
# modules for formula calculation
###
# make sure all values are there
for dependency in self.dependencies:
if dependency not in context:
raise ValueError('Missing value "%s"' % dependency)
# apply context
value = self.value
for key in context:
value = value.replace('`' + key + '`', str(context[key]))
# make sure user input is safe
if not Formula.safe(value):
raise ValueError('Unsafe formula "%s"' % value)
# remove functions
for fn in Formula.functions():
value = value.replace(str(fn) + '(', '(')
# evaluate the expression
try:
result = eval(value)
except ZeroDivisionError:
result = None
# respond
return result
def errors(self, include_unit_errors=True):
"""
:return: A list of all the errors in the formula
"""
dummy_context = {}
errors = []
for prop in self.dependencies:
# make sure the variable is valid
if prop not in [v.name for v in Variable.objects.all()] + [d.name for d in Dimension.objects.all()]:
errors.append('Unknown variable %s' % prop)
dummy_context[prop] = 0
try:
dummy_result = self.apply(dummy_context)
if type(dummy_result) not in [int, float, type(None)]:
errors.append('Incorrect return type %s: Must be either an int or a float' % type(dummy_result))
return errors
except SyntaxError as se:
try:
errors.append(str(se).split(' (')[0])
except IndexError:
errors.append(str(se))
except ValueError:
errors.append('Unknown expression')
if include_unit_errors and not errors:
try:
self.suggest_unit()
except InvalidUnitError as err:
errors.append(str(err))
return errors
def save(self, *args, **kwargs):
"""
Override the save method to store the `valid`
"""
try:
self.is_valid = len(self.errors(include_unit_errors=False)) == 0
except ValueError: # unsafe formula or incorrect context
self.is_valid = False
super(Formula, self).save(*args, **kwargs)
def __str__(self):
return '=%s' % self.value
class Meta:
abstract = True
class Query(AbstractQuery):
pass
class TempQuery(AbstractQuery):
original = ForeignKey(Query, null=True)
| 36.28806 | 144 | 0.504545 |
from __future__ import unicode_literals
import json
import decimal
import datetime
import copy
import time
from collections import OrderedDict
import re
import sympy
from threading import Thread
from django.contrib.auth.models import User
from django.db.models import *
from aggregator.models import *
from query_designer.formula_functions import *
from query_designer.query_processors.utils import SolrResultEncoder, PostgresResultEncoder, PrestoResultEncoder
from django.http import JsonResponse
class AbstractQuery(Model):
user = ForeignKey(User, related_name='queries')
title = TextField(default='Untitled query')
created = DateTimeField(auto_now_add=True)
updated = DateTimeField(auto_now=True)
generated_by = CharField(max_length=32, choices=[
('CUSTOM', 'Custom query'),
('QDv1', 'Query Designer (old)'),
('QDv2', 'Query Designer (new)'),
])
document = JSONField()
design = JSONField(blank=True, null=True, default=None)
v2_fields = TextField(editable=False, blank=True, null=True, default=None)
v2_filters = TextField(editable=False, blank=True, null=True, default=None)
count = IntegerField(blank=True, null=True, default=None)
headers = JSONField(blank=True, null=True, default=None)
@staticmethod
def operator_to_str(op, mode='postgres'):
if mode == 'postgres':
return {
'eq': ('=', ':'),
'neq': ('!=', None),
'gt': ('>', None),
'gte': ('>=', None),
'lt': ('<', None),
'lte': ('<=', None),
'mod': ('%', None),
'lte_time': ('<=', None),
'gte_time': ('>=', None),
'&&': ('AND', 'AND'),
'and': ('AND', 'AND'),
'||': ('OR', 'OR'),
'or': ('OR', 'OR'),
'!': ('NOT', None),
'not': ('NOT', None),
}[op.lower()][0 if mode == 'postgres' else 1]
else:
return {
'not_null': (' IS NOT NULL ', None),
'eq': ('=', ':'),
'neq': ('!=', None),
'gt': ('>', None),
'gte': ('>=', None),
'lt': ('<', None),
'lte': ('<=', None),
'mod': ('%', None),
'lte_time': ('<= timestamp ', None),
'gte_time': ('>= timestamp ', None),
'&&': ('AND', 'AND'),
'and': ('AND', 'AND'),
'||': ('OR', 'OR'),
'or': ('OR', 'OR'),
'!': ('NOT', None),
'not': ('NOT', None),
}[op.lower()][0 if mode == 'presto' else 1]
def process_filters(self, filters, mode='postgres', quote=False, use_table_names=False):
print 'filters:'
print filters
if type(filters) in [int, float]:
try:
col_name = ''
from_order = int(filters[filters.find('i')+1:filters.find('_')])
if from_order >= 0:
table_name = self.document['from'][from_order]['name']
for x in self.document['from'][from_order]['select']:
if x['name'] == filters:
if x['type'] != 'VALUE':
col_name = Dimension.objects.get(pk=x['type']).data_column_name
else:
v_obj = Variable.objects.get(pk=int(self.document['from'][from_order]['type']))
if v_obj.dataset.stored_at == 'UBITECH_POSTGRES' or \
v_obj.dataset.stored_at == 'UBITECH_PRESTO':
col_name = v_obj.name
else:
col_name = 'value'
if use_table_names:
filters = table_name + '.' + col_name
else:
filters = col_name
except:
return filters
return filters
if type(filters) in [str, unicode]:
if quote and (mode == 'solr') and filters.strip() != '*' and (not filters.startswith('"')) and filters:
return '"%s"' % filters
else:
try:
col_name = ''
from_order = int(filters[filters.find('i') + 1:filters.find('_')])
if from_order >= 0:
table_name = self.document['from'][from_order]['name']
for x in self.document['from'][from_order]['select']:
if x['name'] == filters:
if x['type'] != 'VALUE':
col_name = Dimension.objects.get(pk=x['type']).data_column_name
else:
v_obj = Variable.objects.get(pk=int(self.document['from'][from_order]['type']))
if v_obj.dataset.stored_at == 'UBITECH_POSTGRES' or \
v_obj.dataset.stored_at == 'UBITECH_PRESTO':
col_name = v_obj.name
else:
col_name = 'value'
if use_table_names:
filters = table_name + '.' + col_name
else:
filters = col_name
except:
return filters
return "%s" % filters
# Special case: parsing location filters
# inside_rect|outside_rect <<lat_south,lng_west>,<lat_north,lng_east>>
for _from in self.document['from']:
v = Variable.objects.get(pk=_from['type'])
if _from['name'] == filters['a']:
if use_table_names:
filters['a'] = '%s.%s' % (_from['name'], 'value')
else:
filters['a'] = '%s' % ('value')
for x in _from['select']:
if x['name'] == filters['a']:
if x['type'] != 'VALUE':
# print 'type' + x['type']
if use_table_names:
filters['a'] = '%s.%s' % \
(_from['name'], Dimension.objects.get(pk=int(x['type'])).data_column_name)
else:
filters['a'] = '%s' % \
(Dimension.objects.get(pk=int(x['type'])).data_column_name)
else:
v_obj = Variable.objects.get(pk=int(_from['type']))
if v_obj.dataset.stored_at == 'UBITECH_POSTGRES' or \
v_obj.dataset.stored_at == 'UBITECH_PRESTO':
col_name = v_obj.name
else:
col_name = 'value'
if use_table_names:
filters['a'] = '%s.%s' % \
(_from['name'], col_name)
else:
filters['a'] = '%s' % \
(col_name)
if filters['op'] in ['inside_rect', 'outside_rect', ]:
print 'inside_rect'
rect_start = filters['b'].split('<')[2].split('>,')[0].split(',')
rect_end = filters['b'].split('>,<')[1].split('>')[0].split(',')
lat_col_id = int(filters['a'].split('<')[1].split(',')[0].split('>')[0])
lon_col_id = int(filters['a'].split('<')[1].split(',')[1].split('>')[0])
lat_col_name = Dimension.objects.get(pk=lat_col_id).data_column_name
lon_col_name = Dimension.objects.get(pk=lon_col_id).data_column_name
table_name = Dimension.objects.get(pk=lat_col_id).variable.dataset.table_name
v_name = Dimension.objects.get(pk=lat_col_id).variable.name
v_id = Dimension.objects.get(pk=lat_col_id).variable.id
for idx, _from in enumerate(self.document['from']):
# print 'from name: ' + _from['name']
# if _from['name'] == (v_name+'_'+str(idx)):
if _from['name'] == (v_name + '_' + str(v_id)):
table_name = _from['name']
if use_table_names:
lat = table_name + '.' + lat_col_name
lng = table_name + '.' + lon_col_name
else:
lat = lat_col_name
lng = lon_col_name
result = '%s >= %s AND %s <= %s' % (lat, rect_start[0], lat, rect_end[0])
result += ' AND %s >= %s AND %s <= %s' % (lng, rect_start[1], lng, rect_end[1])
#lat = filters['a'] + '_latitude'
#lng = filters['a'] + '_longitude'
if mode == 'solr':
result = '%s:[%s TO %s]' % (lat, rect_start[0], rect_end[0])
result += ' AND %s:[%s TO %s]' % (lng, rect_start[1], rect_end[1])
else:
result = '%s >= %s AND %s <= %s' % (lat, rect_start[0], lat, rect_end[0])
result += ' AND %s >= %s AND %s <= %s' % (lng, rect_start[1], lng, rect_end[1])
if filters['op'] == 'outside_rect':
if mode == 'postgres':
result = 'NOT(%s)' % result
else:
result = '-(%s)' % result
return result
result = ''
_op = filters['op'].lower()
if mode == 'solr' and _op in ['neq', 'gt', 'gte', 'lt', 'lte', 'mod', '!', 'not']:
if _op == 'neq':
result = '-%s:%s' % (self.process_filters(filters['a']), self.process_filters(filters['b']))
elif _op in ['gt', 'gte']:
result = '%s:[%s TO *]' % (self.process_filters(filters['a']), self.process_filters(filters['b']))
elif _op in ['lt', 'lte']:
result = '%s:[* TO %s]' % (self.process_filters(filters['a']), self.process_filters(filters['b']))
elif _op == 'mod':
result = 'mod(%s, %s)' % (self.process_filters(filters['a']), self.process_filters(filters['b']))
elif _op in ['!', 'not']:
raise NotImplementedError('TODO fix missing NOT operator in solr')
else:
_a = self.process_filters(filters['a'], mode=mode, use_table_names=use_table_names)
_b = self.process_filters(filters['b'], mode=mode, quote=True, use_table_names=use_table_names)
result = '%s %s %s' % \
(('(%s)' % _a) if type(_a) not in [str, unicode, int, float] else _a,
Query.operator_to_str(filters['op'], mode=mode),
('(%s)' % _b) if type(_b) not in [str, unicode, int, float] else _b)
return result
@staticmethod
def threaded_fetchall(conn, query, count):
def fetch_data_page(results, offset=0, limit=100):
cur = conn.cursor()
cur.execute(query + ' OFFSET %d LIMIT %d' % (offset, limit))
results.extend(cur.fetchall())
# try threaded fetch
unlimited_results_page_size = 50000
workers = 5
current_offset = 0
all_rows = []
while current_offset <= count:
print current_offset
threads = []
for w in range(0, workers):
if current_offset + w * unlimited_results_page_size > count:
break
thread = Thread(target=fetch_data_page,
args=(all_rows,
current_offset + w * unlimited_results_page_size,
unlimited_results_page_size))
thread.start()
threads.append(thread)
# wait for all to finish
for k, thread in enumerate(threads):
print 'waiting %d' % (k+1)
thread.join()
current_offset += unlimited_results_page_size * workers
return all_rows
def process(self, dimension_values='', variable='', only_headers=False, commit=True, execute=False, raw_query=False, from_visualizer=False):
is_postgres = True
is_presto = True
try:
is_postgres = 'POSTGRES' in Variable.objects.get(pk=self.document['from'][0]['type']).dataset.stored_at
except IndexError:
pass
try:
is_presto = 'PRESTO' in Variable.objects.get(pk=self.document['from'][0]['type']).dataset.stored_at
except IndexError:
pass
if is_postgres:
from query_designer.query_processors.postgres import process as q_process
encoder = PostgresResultEncoder
else:
if is_presto:
from query_designer.query_processors.presto import process as q_process
encoder = PrestoResultEncoder
else:
from query_designer.query_processors.solr import process as q_process
encoder = SolrResultEncoder
data = q_process(self, dimension_values=dimension_values, variable=variable,
only_headers=only_headers, commit=commit,
execute=execute, raw_query=raw_query, from_visualizer=from_visualizer)
return data, encoder
def execute(self, dimension_values='', variable='', only_headers=False, commit=True, with_encoder=True, from_visualizer=False):
try:
doc = self.document
except ValueError:
return JsonResponse({'error_message': 'Invalid query document'}, status=400)
result = self.process(dimension_values, variable, only_headers, commit, execute=True, from_visualizer=from_visualizer)
if with_encoder:
return result
encoder = result[1]
return json.loads(encoder().encode(result[0]))
@property
def raw_query(self):
# remove several keys from query
doc = copy.deepcopy(self.document)
# for key in ['limit', 'offset', 'granularity']:
for key in ['offset', 'granularity']:
if key in self.document:
del self.document[key]
# get raw query
res = self.process(dimension_values='', variable='', only_headers=True, commit=False,
execute=False, raw_query=True)
if res == None:
return None
# restore initial doc
self.document = doc
return res[0]['raw_query']
class InvalidUnitError(ValueError):
pass
class Formula(Model):
# generic information
date_created = DateTimeField(auto_now_add=True)
date_updated = DateTimeField(auto_now=True)
created_by = ForeignKey(User, blank=True, null=True, default=None)
name = TextField(blank=False, null=False)
# the actual formula
# e.g (`energydemandbefore_19` - `energydemandafter_20`)/`energydemandbefore_19`
value = TextField(blank=False, null=False)
# is this a public formula?
is_valid = BooleanField(default=False)
is_public = BooleanField(default=False)
@property
def dependencies(self):
"""
:return: A list with all the variables used in the formula
"""
return list(set([prop[1:-1] for prop in re.findall(r'`\w+`', self.value)]))
@property
def internal_value(self):
return '$%d' % self.pk
@staticmethod
def math():
return [fn['name'].split('(')[0] for fn in MATH_FUNCTIONS]
@staticmethod
def random():
return [fn['name'].split('(')[0] for fn in RAND_FUNCTIONS]
@staticmethod
def trig():
return [fn['name'].split('(')[0] for fn in TRIG_FUNCTIONS]
@staticmethod
def safe_function_info():
result = []
for item in MATH_FUNCTIONS:
result.append((item['name'], item['description']))
for item in RAND_FUNCTIONS:
result.append((item['name'], item['description']))
for item in TRIG_FUNCTIONS:
result.append((item['name'], item['description']))
return result
@staticmethod
def functions():
return [fn[0].split('(')[0] for fn in Formula.safe_function_info()]
@staticmethod
def safe(value):
"""
:param value: A potential formula
:return: True if formula contains only numbers, operators and safe functions, False otherwise
"""
for token in re.findall(r"[\w']+", value):
try:
float(token)
except ValueError:
# allowed functions here
if token not in Formula.functions():
return False
return True
@staticmethod
def find_unit(variable):
try:
return Variable.objects.filter(name=variable)[0].unit
except IndexError:
return Dimension.objects.filter(name=variable)[0].unit
@staticmethod
def _normalize_unit(unit):
"""
:param unit: The continuous version of the unit, e.g "€/kWh"
:return:
"""
unit_str = unit
unit_str = unit_str.replace('kWh', 'kW*h').replace('²', '**2')
return unit_str, re.split(r'[\s,.|/*]+', unit_str)
@property
def unit(self):
try:
return self.suggest_unit(fail_on_invalid=False)
except ValueError:
return '-'
def suggest_unit(self, fail_on_invalid=True):
# ignore minus as it could incorrectly cause expressions to collapse
# e.g € - € => €, not empty unit
value = self.value.replace('-', '+').replace(' ', '')
units = {}
# this is the symbols variable, should not use any unit character inside
q = []
# make sure value is safe to proceed
if self.errors(include_unit_errors=False):
raise ValueError('Can\'t detect unit of invalid expression')
# replace each dependency with its unit & define symbols
unit_cnt = 0
for dependency in self.dependencies:
unit_str, du = Formula._normalize_unit(Formula.find_unit(dependency))
if not du:
value = value.replace('`' + dependency + '`', '1')
for unit in du:
try:
# do not replace numbers with tokens
float(unit)
except ValueError:
if unit not in units:
units[unit] = 'q[%d]' % unit_cnt
q.append(sympy.Symbol(unit))
unit_cnt += 1
unit_str = unit_str.replace(unit, units[unit])
# replace in value
value = value.replace('`' + dependency + '`', '(' + unit_str + ')')
# remove functions
for fn in Formula.functions():
value = value.replace(str(fn) + '(', '(')
# simplify expression
expr_result = str(eval(value))
# replace original symbols
for unit in units:
expr_result = expr_result.replace(units[unit], unit)
# replace ** with ^
expr_result = expr_result.replace('**', '^')
# remove digits
result = ''
to_remove_constant = True
for x in expr_result:
if x == ' ':
continue
try:
int(x)
if not to_remove_constant:
result += x
except ValueError:
result += x
# should not remove the next constant if it exposes to power
to_remove_constant = x not in ['^', ]
# no unit remaining -- assume percentage:
if not result:
return '%'
# remove trailing symbols
while result and result[0] in ['+', '*', ]:
result = result[1:]
while result and result[len(result) - 1] in ['+', '*', '/']:
result = result[:-1]
# if addition is included, the formula most probably does not make sense
if '+' in result and fail_on_invalid:
# format error string
adders = result.split('+')
err_str = adders[0]
for idx, term in enumerate(adders[1:]):
if not term.strip():
continue
if idx == 0:
err_str += ' with %s' % term
elif idx + 2 < len(adders):
err_str += ', %s' % term
else:
err_str += ' and %s' % term
# raise error
raise InvalidUnitError('Formula seems to be incorrect: adding %s' % err_str)
if len(result):
if result[0] == '*':
result = result[1:]
elif result[0] == '/':
result = '1' + result[1:]
return result
def apply(self, context):
"""
:param context: A dictionary of variables and their values
:return: The result of the formula after applying the context
"""
# modules for formula calculation
###
# make sure all values are there
for dependency in self.dependencies:
if dependency not in context:
raise ValueError('Missing value "%s"' % dependency)
# apply context
value = self.value
for key in context:
value = value.replace('`' + key + '`', str(context[key]))
# make sure user input is safe
if not Formula.safe(value):
raise ValueError('Unsafe formula "%s"' % value)
# remove functions
for fn in Formula.functions():
value = value.replace(str(fn) + '(', '(')
# evaluate the expression
try:
result = eval(value)
except ZeroDivisionError:
result = None
# respond
return result
def errors(self, include_unit_errors=True):
"""
:return: A list of all the errors in the formula
"""
dummy_context = {}
errors = []
for prop in self.dependencies:
# make sure the variable is valid
if prop not in [v.name for v in Variable.objects.all()] + [d.name for d in Dimension.objects.all()]:
errors.append('Unknown variable %s' % prop)
dummy_context[prop] = 0
try:
dummy_result = self.apply(dummy_context)
if type(dummy_result) not in [int, float, type(None)]:
errors.append('Incorrect return type %s: Must be either an int or a float' % type(dummy_result))
return errors
except SyntaxError as se:
try:
errors.append(str(se).split(' (')[0])
except IndexError:
errors.append(str(se))
except ValueError:
errors.append('Unknown expression')
if include_unit_errors and not errors:
try:
self.suggest_unit()
except InvalidUnitError as err:
errors.append(str(err))
return errors
def save(self, *args, **kwargs):
"""
Override the save method to store the `valid`
"""
try:
self.is_valid = len(self.errors(include_unit_errors=False)) == 0
except ValueError: # unsafe formula or incorrect context
self.is_valid = False
super(Formula, self).save(*args, **kwargs)
def __str__(self):
return '=%s' % self.value
class Meta:
abstract = True
class Query(AbstractQuery):
pass
class TempQuery(AbstractQuery):
original = ForeignKey(Query, null=True)
| false | true |
f7f4569ef8f4fda048d951f63334c09822fb0e1d | 982 | py | Python | examples/dagster_examples_tests/intro_tutorial_tests/test_repos.py | JPeer264/dagster-fork | 32cc87a36134be7c442fa85d6867eb1d3301aea0 | [
"Apache-2.0"
] | 1 | 2020-09-19T16:35:59.000Z | 2020-09-19T16:35:59.000Z | examples/dagster_examples_tests/intro_tutorial_tests/test_repos.py | JPeer264/dagster-fork | 32cc87a36134be7c442fa85d6867eb1d3301aea0 | [
"Apache-2.0"
] | null | null | null | examples/dagster_examples_tests/intro_tutorial_tests/test_repos.py | JPeer264/dagster-fork | 32cc87a36134be7c442fa85d6867eb1d3301aea0 | [
"Apache-2.0"
] | null | null | null | from dagster_examples.intro_tutorial.repos import hello_cereal_repository
from dagster_examples.intro_tutorial.scheduler import (
hello_cereal_repository as scheduler_repository,
)
from dagster import execute_pipeline
from dagster.utils import pushd, script_relative_path
def test_define_repo():
repo = hello_cereal_repository
assert repo.name == 'hello_cereal_repository'
assert repo.has_pipeline('hello_cereal_pipeline')
with pushd(script_relative_path('../../dagster_examples/intro_tutorial/')):
result = execute_pipeline(repo.get_pipeline('hello_cereal_pipeline'))
assert result.success
def test_define_scheduler_repo():
repo = scheduler_repository
assert repo.name == 'hello_cereal_repository'
assert repo.has_pipeline('hello_cereal_pipeline')
with pushd(script_relative_path('../../dagster_examples/intro_tutorial/')):
result = execute_pipeline(repo.get_pipeline('hello_cereal_pipeline'))
assert result.success
| 37.769231 | 79 | 0.789206 | from dagster_examples.intro_tutorial.repos import hello_cereal_repository
from dagster_examples.intro_tutorial.scheduler import (
hello_cereal_repository as scheduler_repository,
)
from dagster import execute_pipeline
from dagster.utils import pushd, script_relative_path
def test_define_repo():
repo = hello_cereal_repository
assert repo.name == 'hello_cereal_repository'
assert repo.has_pipeline('hello_cereal_pipeline')
with pushd(script_relative_path('../../dagster_examples/intro_tutorial/')):
result = execute_pipeline(repo.get_pipeline('hello_cereal_pipeline'))
assert result.success
def test_define_scheduler_repo():
repo = scheduler_repository
assert repo.name == 'hello_cereal_repository'
assert repo.has_pipeline('hello_cereal_pipeline')
with pushd(script_relative_path('../../dagster_examples/intro_tutorial/')):
result = execute_pipeline(repo.get_pipeline('hello_cereal_pipeline'))
assert result.success
| true | true |
f7f457b4219c8b133fa7aa00f7f9096ba3e5f644 | 3,491 | py | Python | bindings/python/ensmallen/datasets/string/cellulomonastimonensis.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/cellulomonastimonensis.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/cellulomonastimonensis.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Cellulomonas timonensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CellulomonasTimonensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Cellulomonas timonensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Cellulomonas timonensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CellulomonasTimonensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.247619 | 223 | 0.679461 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def CellulomonasTimonensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="CellulomonasTimonensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f7f457d7002ad9997518e7d5ee61bbb60614956a | 97,272 | py | Python | test/functional/p2p_segwit.py | minblock/Scoutcoin | 532aebe3597dbffe4ea7cc3e70d6cbde542614d3 | [
"MIT"
] | null | null | null | test/functional/p2p_segwit.py | minblock/Scoutcoin | 532aebe3597dbffe4ea7cc3e70d6cbde542614d3 | [
"MIT"
] | null | null | null | test/functional/p2p_segwit.py | minblock/Scoutcoin | 532aebe3597dbffe4ea7cc3e70d6cbde542614d3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from binascii import hexlify
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_witness_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitVersion1SignatureHash,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
connect_nodes,
disconnect_nodes,
get_bip9_status,
hex_str_to_bytes,
sync_blocks,
sync_mempools,
assert_raises_rpc_error,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_tx(tx) if with_witness else msg_tx(tx))
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_block(block) if with_witness else msg_block(block))
p2p.sync_with_ping()
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999", "-mempoolreplacement=1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999", "-mempoolreplacement=1"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0", "-mempoolreplacement=1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=VB_TOP_BITS):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Segwit status 'defined'
self.segwit_status = 'defined'
self.test_non_witness_transaction()
self.test_unnecessary_witness_before_segwit_activation()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.advance_to_segwit_started()
# Segwit status 'started'
self.test_getblocktemplate_before_lockin()
self.advance_to_segwit_lockin()
# Segwit status 'locked_in'
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay()
self.test_standardness_v0()
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit status = {})".format(func.__name__, self.segwit_status))
# Assert segwit status is as expected
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
sync_blocks(self.nodes)
# Assert segwit status is as expected at end of subtest
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block()
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if self.segwit_status != 'active':
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
# Scoutcoin: SCRIPT_VERIFY_WITNESS is enforced when segwit is activated
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def advance_to_segwit_started(self):
"""Mine enough blocks for segwit's vb state to be 'started'."""
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD - height - 1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.segwit_status = 'started'
@subtest
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
@subtest
def advance_to_segwit_lockin(self):
"""Mine enough blocks to lock in segwit, but don't activate."""
height = self.nodes[0].getblockcount()
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD - 1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.segwit_status = 'locked_in'
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if self.segwit_status != 'active':
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height % VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
self.segwit_status = 'active'
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2 * 1024 * 1024)
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1)
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit version transactions are non-standard, but valid in blocks.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 10000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 10000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 10000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
sync_blocks(self.nodes)
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 10000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 10000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 10000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 10000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 10000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 10000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 50000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 50000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
self.log.info("Testing rejection of block.nVersion < BIP9_TOP_BITS blocks")
block = self.build_next_block(version=4)
block.solve()
resp = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(resp, 'bad-version(0x00000004)')
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-vbparams=segwit:0:999999999999"])
connect_nodes(self.nodes[0], 2)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[2], 'segwit')['status'] == "active")
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('rltc'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
if __name__ == '__main__':
SegWitTest().main()
| 46.32 | 295 | 0.655317 |
from binascii import hexlify
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_witness_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitVersion1SignatureHash,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
connect_nodes,
disconnect_nodes,
get_bip9_status,
hex_str_to_bytes,
sync_blocks,
sync_mempools,
assert_raises_rpc_error,
)
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
class UTXO():
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize(with_witness=True))
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_tx(tx) if with_witness else msg_tx(tx))
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_message(msg_witness_block(block) if with_witness else msg_block(block))
p2p.sync_with_ping()
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999", "-mempoolreplacement=1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999", "-mempoolreplacement=1"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0", "-mempoolreplacement=1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
def build_next_block(self, version=VB_TOP_BITS):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
self.utxo = []
# Segwit status 'defined'
self.segwit_status = 'defined'
self.test_non_witness_transaction()
self.test_unnecessary_witness_before_segwit_activation()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.advance_to_segwit_started()
# Segwit status 'started'
self.test_getblocktemplate_before_lockin()
self.advance_to_segwit_lockin()
# Segwit status 'locked_in'
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay()
self.test_standardness_v0()
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit status = {})".format(func.__name__, self.segwit_status))
# Assert segwit status is as expected
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
sync_blocks(self.nodes)
# Assert segwit status is as expected at end of subtest
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block()
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if self.segwit_status != 'active':
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# being processed after block getdata's, and announce a transaction as well,
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
@subtest
def test_v0_outputs_arent_spendable(self):
# node2 doesn't need to be connected for this test.
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# empty witness)' (otherwise).
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
assert(height < VB_PERIOD - 1)
self.nodes[0].generate(VB_PERIOD - height - 1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.segwit_status = 'started'
@subtest
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
@subtest
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - 1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.segwit_status = 'locked_in'
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey))
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if self.segwit_status != 'active':
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height % VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
self.segwit_status = 'active'
@subtest
def test_p2sh_witness(self):
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
sync_blocks(self.nodes)
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4)
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2 * 1024 * 1024)
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
block = self.build_next_block()
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
@subtest
def test_extra_witness_data(self):
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1)
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
NUM_SEGWIT_VERSIONS = 17
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 10000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1)
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 10000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# checked with fRequireStandard
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 10000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
sync_blocks(self.nodes)
@subtest
def test_uncompressed_pubkey(self):
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 10000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 10000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01'
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 10000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 10000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 10000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01'
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01'
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 10000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 50000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 50000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
self.log.info("Testing rejection of block.nVersion < BIP9_TOP_BITS blocks")
block = self.build_next_block(version=4)
block.solve()
resp = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(resp, 'bad-version(0x00000004)')
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-vbparams=segwit:0:999999999999"])
connect_nodes(self.nodes[0], 2)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[2], 'segwit')['status'] == "active")
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('rltc'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_message(msg_bogus_tx(tx))
self.nodes[0].p2p.sync_with_ping()
if __name__ == '__main__':
SegWitTest().main()
| true | true |
f7f457f23a56ee06de1c9669c96b4df8702c5614 | 18,363 | py | Python | pixel_server.py | turboki/pixel-server-pi | 1751d6c4d343fd37421291f24d56f10ad9b8ac74 | [
"MIT"
] | null | null | null | pixel_server.py | turboki/pixel-server-pi | 1751d6c4d343fd37421291f24d56f10ad9b8ac74 | [
"MIT"
] | null | null | null | pixel_server.py | turboki/pixel-server-pi | 1751d6c4d343fd37421291f24d56f10ad9b8ac74 | [
"MIT"
] | null | null | null | import time
import re
import neopixel
import board
import tornado.ioloop
import tornado.web
import signal
import pixel_utils
import multiprocessing
from multiprocessing.managers import BaseManager
class MyManager(BaseManager): pass
def Manager():
m = MyManager()
m.start()
return m
class PixelConfig(object):
def __init__(self):
self._running = True
self._done = False
self._mode = 'rainbow'
self._colors = [(255,0,0)]
self._wait_time = 0.001
self._brightness = 0.2
self._steps = "default"
def get_steps(self):
return self._steps
def set_steps(self, steps):
self._steps = steps
def get_done(self):
return self._done
def set_done(self, done):
self._done = done
def get_running(self):
return self._running
def set_running(self, running):
self._running = running
def get_mode(self):
return self._mode
def set_mode(self, mode):
self._mode = mode
def get_colors(self):
return self._colors
def set_colors(self, colors):
self._colors = colors
def get_wait_time(self):
return self._wait_time
def set_wait_time(self, wait_time):
self._wait_time = wait_time
def get_brightness(self):
return self._brightness
def set_brightness(self, brightness):
self._brightness = brightness
def increment_loop(curr, max):
if curr < max:
return curr + 1
else:
return 0
COLOR_NAMES = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'rebeccapurple': '#663399',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32'
}
def get_colors(color_param):
if color_param == '':
return []
color_arr = color_param.split(',')
return list(filter(lambda s: s != '', map(get_color, color_arr)))
def get_color(color_string):
if color_string in COLOR_NAMES:
return hex_to_rgb(COLOR_NAMES.get(color_string)[1:])
match = re.match(r'#([0-9a-f]{6})', color_string)
if match:
return hex_to_rgb(color_string[1:])
return ''
def hex_to_rgb(h):
return tuple(int(h[i:i+2], 16) for i in (0, 2, 4))
def parse_steps(steps = None):
if steps == None or steps == "":
return []
return list(filter(lambda s: s != None, map(parse_step, steps.split('|'))))
def parse_step(step = ''):
step_components = step.split(':')
if len(step_components) != 4:
print ('Invalid Step: %s' % step)
return None
return {
'mode': step_components[0],
'colors': get_colors(step_components[1]),
'wait': float(step_components[2]),
'loop': float(step_components[3])
}
def led_loop(led_config_proxy, thread_id):
# Tree
# pixels = neopixel.NeoPixel(board.D18, 50, brightness=0.3, auto_write=False, pixel_order=neopixel.RGB)
# House
pixels = neopixel.NeoPixel(board.D18, 450, brightness=0.3, auto_write=False, pixel_order=neopixel.RGB)
pixels.fill((255,255,255))
pixels.show()
heartbeat = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
creepy = [0.1, 0.2, 0.1, 0.2, 0.3, 0, 0.1, 0.5, 0.4, 0.2, 0.1, 0.3, 0.1, 0.3, 0.1, 0.3, 0.9, 1, 0.9, 0.1, 1, 0.3, 0.1, 0.2, 0.3, 0, 0.1, 0.5]
_current_mode = 'rainbow'
_current_colors = [(0,0,0)]
_current_brightness = 0.2
_current_steps = "default"
# Tree
# _current_steps_list = [
# {'mode':'marquee','loop':10, 'wait': 0.1, 'colors':[(255,0,0),(255,255,255),(255,0,0),(255,255,255),(255,0,0),(255,255,255)]},
# {'mode':'fade','loop':1, 'wait': 0.01, 'colors':[(255,255,255)]},
# {'mode':'twinkle','loop':1, 'wait': 0.1, 'colors':[(255,0,0),(255,255,255)]},
# {'mode':'fade','loop':1, 'wait': 0.01, 'colors':[(255,255,255)]},
# {'mode':'twinkle','loop':1, 'wait': 0.1, 'colors':[(0,255,0),(255,255,255)]},
# {'mode':'fade','loop':1, 'wait': 0.01, 'colors':[(255,255,255)]},
# {'mode':'twinkle','loop':1, 'wait': 0.1, 'colors':[(0,0,255),(255,255,255)]},
# {'mode':'fade','loop':1, 'wait': 0.01, 'colors':[(255,255,255)]},
# ]
# House
_current_steps_list = [
{'mode':'marquee','loop':10, 'wait': 0.01, 'colors':[(255,0,0),(255,255,255),(255,0,0),(255,255,255),(255,0,0),(255,255,255)]},
{'mode':'fade','loop':10, 'wait': 0.01, 'colors':[(255,0,0),(0,255,0),(255,255,0),(0,0,255)]},
{'mode':'twinkle','loop':10, 'wait': 0.1, 'colors':[(255,0,0),(255,255,0),(0,255,0),(0,0,255)]},
{'mode':'twinkle_adv','loop':10, 'wait': 0.001, 'colors':[(255,255,255),(0,255,255),(0,0,255)]},
{'mode':'chase', 'loop':5, 'wait': 0.001, 'colors': [(255,0,0),(255,255,255)]}
]
_current_loop = 0
_current_step = 0
_twinkle_colors = []
_twinkle_alphas = []
_current_rgb_index = 0
_current_pixel_index = 0
_current_colors_index = 0
_current_alpha = (0,True)
while True:
done = led_config_proxy.get_done()
running = led_config_proxy.get_running()
brightness = led_config_proxy.get_brightness()
if (done):
break
steps = led_config_proxy.get_steps()
if steps != _current_steps:
_current_steps = steps
_current_steps_list = parse_steps(steps)
_current_loop = 0
_current_step = 0
if _current_steps == None:
mode = led_config_proxy.get_mode()
colors = led_config_proxy.get_colors()
wait_time = led_config_proxy.get_wait_time()
else:
loop = _current_steps_list[_current_step]['loop']
if _current_loop >= loop:
_current_step = increment_loop(_current_step, len(_current_steps_list)-1)
_current_loop = 0
mode = _current_steps_list[_current_step]['mode']
colors = _current_steps_list[_current_step]['colors']
wait_time = _current_steps_list[_current_step]['wait']
if mode != _current_mode or colors != _current_colors:
_current_mode = mode
_current_colors = colors
_strip_colors = []
_strip_alphas = []
_current_rgb_index = 0
_current_pixel_index = 0
_current_colors_index = 0
_current_alpha = (0, True)
if _current_brightness != brightness:
_current_brightness = brightness
pixels.brightness = _current_brightness
if running == True:
if _current_mode == 'rainbow':
pixel_utils.rainbow(pixels,_current_rgb_index),
_current_rgb_index = increment_loop(_current_rgb_index, 255)
if _current_rgb_index == 0:
_current_loop += 1
elif mode == 'solid_rainbow':
pixel_utils.solid_rainbow(pixels, _current_rgb_index),
_current_rgb_index = increment_loop(_current_rgb_index, 255)
if _current_rgb_index == 0:
_current_loop += 1
elif mode == 'solid' and len(colors) > 0:
pixels.fill(colors[_current_colors_index])
pixels.show()
_current_colors_index = increment_loop(_current_colors_index, len(colors)-1)
if _current_colors_index == 0:
_current_loop += 1
elif mode == 'fade' and len(colors) > 0:
c = colors[_current_colors_index]
f = (round(c[0] * _current_alpha[0]), round(c[1] * _current_alpha[0]), round(c[2] * _current_alpha[0]))
pixels.fill(f)
pixels.show()
_current_alpha = pixel_utils.alpha_increment(_current_alpha[0], _current_alpha[1])
if (_current_alpha[0] == 0):
_current_colors_index = increment_loop(_current_colors_index, len(colors)-1)
if _current_colors_index == 0:
_current_loop += 1
elif mode == "chase":
if (len(colors) > 0):
pixel_utils.color_chase(pixels, _current_pixel_index, colors[_current_colors_index])
_current_pixel_index = increment_loop(_current_pixel_index, len(pixels)-1)
if _current_pixel_index == 0:
_current_colors_index = increment_loop(_current_colors_index, len(colors)-1)
if _current_colors_index == 0:
_current_loop += 1
elif mode == "twinkle":
if len(_strip_colors) == 0:
t = pixel_utils.twinkle_init(pixels, colors)
_strip_colors = t[0]
pixel_utils.twinkle(pixels, _strip_colors)
_current_pixel_index = increment_loop(_current_pixel_index, len(pixels)-1)
if _current_pixel_index == 0:
_current_loop += 1
elif mode == "twinkle_adv":
if len(_strip_colors) == 0:
t = pixel_utils.twinkle_init(pixels, colors)
_strip_colors = t[0]
_strip_alphas = t[1]
pixel_utils.twinkle_adv(pixels, _strip_colors, _strip_alphas)
_strip_alphas = pixel_utils.twinkle_alpha_increment(_strip_alphas)
_current_pixel_index = increment_loop(_current_pixel_index, len(pixels)-1)
if _current_pixel_index == 0:
_current_loop += 1
elif mode == "marquee":
if len(_strip_colors) == 0:
_strip_colors = pixel_utils.marquee_init(pixels, colors)
pixel_utils.marquee(pixels, _strip_colors)
_current_pixel_index = increment_loop(_current_pixel_index, len(pixels)-1)
if _current_pixel_index == 0:
_current_loop += 1
elif mode == "wave":
print()
elif mode == 'heartbeat' and len(colors) > 0:
c = colors[_current_colors_index]
f = (round(c[0] * heartbeat[_current_pixel_index]), round(c[1] * heartbeat[_current_pixel_index]), round(c[2] * heartbeat[_current_pixel_index]))
pixels.fill(f)
pixels.show()
_current_pixel_index = increment_loop(_current_pixel_index, len(heartbeat)-1)
if (_current_pixel_index == 0):
_current_colors_index = increment_loop(_current_colors_index, len(colors)-1)
if _current_colors_index == 0:
_current_loop += 1
elif mode == 'creepy' and len(colors) > 0:
c = colors[_current_colors_index]
f = (round(c[0] * creepy[_current_pixel_index]), round(c[1] * creepy[_current_pixel_index]), round(c[2] * creepy[_current_pixel_index]))
pixels.fill(f)
pixels.show()
_current_pixel_index = increment_loop(_current_pixel_index, len(creepy)-1)
if (_current_pixel_index == 0):
_current_colors_index = increment_loop(_current_colors_index, len(colors)-1)
if _current_colors_index == 0:
_current_loop += 1
else:
pixel_utils.off(pixels)
time.sleep(wait_time)
else:
pixel_utils.off(pixels)
time.sleep(1)
pixel_utils.off(pixels)
return led_config_proxy
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
class ModeController(tornado.web.RequestHandler):
def initialize(self, pixel_config):
self.pixel_config = pixel_config
def get(self):
running = self.get_argument('running', 'true', True)
mode = self.get_argument('mode', None, True)
wait = self.get_argument('wait', None, True)
colors = self.get_argument('colors',None, True)
brightness = self.get_argument('brightness',None, True)
steps = self.get_argument('steps',None, True)
self.pixel_config.set_steps(steps)
mutations = {}
if brightness != None:
try:
mutations['brightness'] = float(brightness)
self.pixel_config.set_brightness(float(brightness))
except:
pass
else:
if running == 'false':
self.pixel_config.set_running(False)
mutations['running'] = False
elif running == 'true':
self.pixel_config.set_running(True)
mutations['running'] = True
if mode != None:
mutations['mode'] = mode
self.pixel_config.set_mode(mode)
if colors != None:
rgb_colors = get_colors(colors)
mutations['colors'] = rgb_colors
self.pixel_config.set_colors(rgb_colors)
if wait != None:
try:
mutations['wait'] = float(wait)
self.pixel_config.set_wait_time(float(wait))
except:
pass
self.finish(mutations)
if __name__ == '__main__':
print('Starting Manager');
MyManager.register('PixelConfig', PixelConfig)
manager = Manager()
pixel_config = manager.PixelConfig()
p = multiprocessing.Process(target=led_loop, args=(pixel_config, 'Pixel Loop'))
app = tornado.web.Application([\
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": "/var/www/pixel/static/"}),
(r'/api', ModeController, {'pixel_config' : pixel_config}),\
(r'/', MainHandler, {})\
])
app.listen(80)
loop = tornado.ioloop.IOLoop.current()
try:
print('Starting LED Loop');
p.start()
print('Starting Web Server');
loop.start()
except KeyboardInterrupt:
print('Exiting')
pass
finally:
print('Finally')
pixel_config.set_done(True)
print('Stopping Web Server');
loop.stop() # might be redundant, the loop has already stopped
loop.close(True) # needed to close all open sockets
print('Stopping LED Loop');
p.join()
print('Stopping Manager');
manager.shutdown()
| 35.725681 | 282 | 0.560475 | import time
import re
import neopixel
import board
import tornado.ioloop
import tornado.web
import signal
import pixel_utils
import multiprocessing
from multiprocessing.managers import BaseManager
class MyManager(BaseManager): pass
def Manager():
m = MyManager()
m.start()
return m
class PixelConfig(object):
def __init__(self):
self._running = True
self._done = False
self._mode = 'rainbow'
self._colors = [(255,0,0)]
self._wait_time = 0.001
self._brightness = 0.2
self._steps = "default"
def get_steps(self):
return self._steps
def set_steps(self, steps):
self._steps = steps
def get_done(self):
return self._done
def set_done(self, done):
self._done = done
def get_running(self):
return self._running
def set_running(self, running):
self._running = running
def get_mode(self):
return self._mode
def set_mode(self, mode):
self._mode = mode
def get_colors(self):
return self._colors
def set_colors(self, colors):
self._colors = colors
def get_wait_time(self):
return self._wait_time
def set_wait_time(self, wait_time):
self._wait_time = wait_time
def get_brightness(self):
return self._brightness
def set_brightness(self, brightness):
self._brightness = brightness
def increment_loop(curr, max):
if curr < max:
return curr + 1
else:
return 0
COLOR_NAMES = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'rebeccapurple': '#663399',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32'
}
def get_colors(color_param):
if color_param == '':
return []
color_arr = color_param.split(',')
return list(filter(lambda s: s != '', map(get_color, color_arr)))
def get_color(color_string):
if color_string in COLOR_NAMES:
return hex_to_rgb(COLOR_NAMES.get(color_string)[1:])
match = re.match(r'#([0-9a-f]{6})', color_string)
if match:
return hex_to_rgb(color_string[1:])
return ''
def hex_to_rgb(h):
return tuple(int(h[i:i+2], 16) for i in (0, 2, 4))
def parse_steps(steps = None):
if steps == None or steps == "":
return []
return list(filter(lambda s: s != None, map(parse_step, steps.split('|'))))
def parse_step(step = ''):
step_components = step.split(':')
if len(step_components) != 4:
print ('Invalid Step: %s' % step)
return None
return {
'mode': step_components[0],
'colors': get_colors(step_components[1]),
'wait': float(step_components[2]),
'loop': float(step_components[3])
}
def led_loop(led_config_proxy, thread_id):
pixels = neopixel.NeoPixel(board.D18, 450, brightness=0.3, auto_write=False, pixel_order=neopixel.RGB)
pixels.fill((255,255,255))
pixels.show()
heartbeat = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
creepy = [0.1, 0.2, 0.1, 0.2, 0.3, 0, 0.1, 0.5, 0.4, 0.2, 0.1, 0.3, 0.1, 0.3, 0.1, 0.3, 0.9, 1, 0.9, 0.1, 1, 0.3, 0.1, 0.2, 0.3, 0, 0.1, 0.5]
_current_mode = 'rainbow'
_current_colors = [(0,0,0)]
_current_brightness = 0.2
_current_steps = "default"
_current_steps_list = [
{'mode':'marquee','loop':10, 'wait': 0.01, 'colors':[(255,0,0),(255,255,255),(255,0,0),(255,255,255),(255,0,0),(255,255,255)]},
{'mode':'fade','loop':10, 'wait': 0.01, 'colors':[(255,0,0),(0,255,0),(255,255,0),(0,0,255)]},
{'mode':'twinkle','loop':10, 'wait': 0.1, 'colors':[(255,0,0),(255,255,0),(0,255,0),(0,0,255)]},
{'mode':'twinkle_adv','loop':10, 'wait': 0.001, 'colors':[(255,255,255),(0,255,255),(0,0,255)]},
{'mode':'chase', 'loop':5, 'wait': 0.001, 'colors': [(255,0,0),(255,255,255)]}
]
_current_loop = 0
_current_step = 0
_twinkle_colors = []
_twinkle_alphas = []
_current_rgb_index = 0
_current_pixel_index = 0
_current_colors_index = 0
_current_alpha = (0,True)
while True:
done = led_config_proxy.get_done()
running = led_config_proxy.get_running()
brightness = led_config_proxy.get_brightness()
if (done):
break
steps = led_config_proxy.get_steps()
if steps != _current_steps:
_current_steps = steps
_current_steps_list = parse_steps(steps)
_current_loop = 0
_current_step = 0
if _current_steps == None:
mode = led_config_proxy.get_mode()
colors = led_config_proxy.get_colors()
wait_time = led_config_proxy.get_wait_time()
else:
loop = _current_steps_list[_current_step]['loop']
if _current_loop >= loop:
_current_step = increment_loop(_current_step, len(_current_steps_list)-1)
_current_loop = 0
mode = _current_steps_list[_current_step]['mode']
colors = _current_steps_list[_current_step]['colors']
wait_time = _current_steps_list[_current_step]['wait']
if mode != _current_mode or colors != _current_colors:
_current_mode = mode
_current_colors = colors
_strip_colors = []
_strip_alphas = []
_current_rgb_index = 0
_current_pixel_index = 0
_current_colors_index = 0
_current_alpha = (0, True)
if _current_brightness != brightness:
_current_brightness = brightness
pixels.brightness = _current_brightness
if running == True:
if _current_mode == 'rainbow':
pixel_utils.rainbow(pixels,_current_rgb_index),
_current_rgb_index = increment_loop(_current_rgb_index, 255)
if _current_rgb_index == 0:
_current_loop += 1
elif mode == 'solid_rainbow':
pixel_utils.solid_rainbow(pixels, _current_rgb_index),
_current_rgb_index = increment_loop(_current_rgb_index, 255)
if _current_rgb_index == 0:
_current_loop += 1
elif mode == 'solid' and len(colors) > 0:
pixels.fill(colors[_current_colors_index])
pixels.show()
_current_colors_index = increment_loop(_current_colors_index, len(colors)-1)
if _current_colors_index == 0:
_current_loop += 1
elif mode == 'fade' and len(colors) > 0:
c = colors[_current_colors_index]
f = (round(c[0] * _current_alpha[0]), round(c[1] * _current_alpha[0]), round(c[2] * _current_alpha[0]))
pixels.fill(f)
pixels.show()
_current_alpha = pixel_utils.alpha_increment(_current_alpha[0], _current_alpha[1])
if (_current_alpha[0] == 0):
_current_colors_index = increment_loop(_current_colors_index, len(colors)-1)
if _current_colors_index == 0:
_current_loop += 1
elif mode == "chase":
if (len(colors) > 0):
pixel_utils.color_chase(pixels, _current_pixel_index, colors[_current_colors_index])
_current_pixel_index = increment_loop(_current_pixel_index, len(pixels)-1)
if _current_pixel_index == 0:
_current_colors_index = increment_loop(_current_colors_index, len(colors)-1)
if _current_colors_index == 0:
_current_loop += 1
elif mode == "twinkle":
if len(_strip_colors) == 0:
t = pixel_utils.twinkle_init(pixels, colors)
_strip_colors = t[0]
pixel_utils.twinkle(pixels, _strip_colors)
_current_pixel_index = increment_loop(_current_pixel_index, len(pixels)-1)
if _current_pixel_index == 0:
_current_loop += 1
elif mode == "twinkle_adv":
if len(_strip_colors) == 0:
t = pixel_utils.twinkle_init(pixels, colors)
_strip_colors = t[0]
_strip_alphas = t[1]
pixel_utils.twinkle_adv(pixels, _strip_colors, _strip_alphas)
_strip_alphas = pixel_utils.twinkle_alpha_increment(_strip_alphas)
_current_pixel_index = increment_loop(_current_pixel_index, len(pixels)-1)
if _current_pixel_index == 0:
_current_loop += 1
elif mode == "marquee":
if len(_strip_colors) == 0:
_strip_colors = pixel_utils.marquee_init(pixels, colors)
pixel_utils.marquee(pixels, _strip_colors)
_current_pixel_index = increment_loop(_current_pixel_index, len(pixels)-1)
if _current_pixel_index == 0:
_current_loop += 1
elif mode == "wave":
print()
elif mode == 'heartbeat' and len(colors) > 0:
c = colors[_current_colors_index]
f = (round(c[0] * heartbeat[_current_pixel_index]), round(c[1] * heartbeat[_current_pixel_index]), round(c[2] * heartbeat[_current_pixel_index]))
pixels.fill(f)
pixels.show()
_current_pixel_index = increment_loop(_current_pixel_index, len(heartbeat)-1)
if (_current_pixel_index == 0):
_current_colors_index = increment_loop(_current_colors_index, len(colors)-1)
if _current_colors_index == 0:
_current_loop += 1
elif mode == 'creepy' and len(colors) > 0:
c = colors[_current_colors_index]
f = (round(c[0] * creepy[_current_pixel_index]), round(c[1] * creepy[_current_pixel_index]), round(c[2] * creepy[_current_pixel_index]))
pixels.fill(f)
pixels.show()
_current_pixel_index = increment_loop(_current_pixel_index, len(creepy)-1)
if (_current_pixel_index == 0):
_current_colors_index = increment_loop(_current_colors_index, len(colors)-1)
if _current_colors_index == 0:
_current_loop += 1
else:
pixel_utils.off(pixels)
time.sleep(wait_time)
else:
pixel_utils.off(pixels)
time.sleep(1)
pixel_utils.off(pixels)
return led_config_proxy
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
class ModeController(tornado.web.RequestHandler):
def initialize(self, pixel_config):
self.pixel_config = pixel_config
def get(self):
running = self.get_argument('running', 'true', True)
mode = self.get_argument('mode', None, True)
wait = self.get_argument('wait', None, True)
colors = self.get_argument('colors',None, True)
brightness = self.get_argument('brightness',None, True)
steps = self.get_argument('steps',None, True)
self.pixel_config.set_steps(steps)
mutations = {}
if brightness != None:
try:
mutations['brightness'] = float(brightness)
self.pixel_config.set_brightness(float(brightness))
except:
pass
else:
if running == 'false':
self.pixel_config.set_running(False)
mutations['running'] = False
elif running == 'true':
self.pixel_config.set_running(True)
mutations['running'] = True
if mode != None:
mutations['mode'] = mode
self.pixel_config.set_mode(mode)
if colors != None:
rgb_colors = get_colors(colors)
mutations['colors'] = rgb_colors
self.pixel_config.set_colors(rgb_colors)
if wait != None:
try:
mutations['wait'] = float(wait)
self.pixel_config.set_wait_time(float(wait))
except:
pass
self.finish(mutations)
if __name__ == '__main__':
print('Starting Manager');
MyManager.register('PixelConfig', PixelConfig)
manager = Manager()
pixel_config = manager.PixelConfig()
p = multiprocessing.Process(target=led_loop, args=(pixel_config, 'Pixel Loop'))
app = tornado.web.Application([\
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": "/var/www/pixel/static/"}),
(r'/api', ModeController, {'pixel_config' : pixel_config}),\
(r'/', MainHandler, {})\
])
app.listen(80)
loop = tornado.ioloop.IOLoop.current()
try:
print('Starting LED Loop');
p.start()
print('Starting Web Server');
loop.start()
except KeyboardInterrupt:
print('Exiting')
pass
finally:
print('Finally')
pixel_config.set_done(True)
print('Stopping Web Server');
loop.stop()
loop.close(True)
print('Stopping LED Loop');
p.join()
print('Stopping Manager');
manager.shutdown()
| true | true |
f7f458b8e6b98656b2c70a5a152a8370cee1b31a | 2,093 | py | Python | bscetl/jobs/extract_tokens_job.py | XWorldGames/bsc-etl | c4a1ba72381340994ec376e6de860cde6637becc | [
"MIT"
] | null | null | null | bscetl/jobs/extract_tokens_job.py | XWorldGames/bsc-etl | c4a1ba72381340994ec376e6de860cde6637becc | [
"MIT"
] | null | null | null | bscetl/jobs/extract_tokens_job.py | XWorldGames/bsc-etl | c4a1ba72381340994ec376e6de860cde6637becc | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bscetl.jobs.export_tokens_job import ExportTokensJob
class ExtractTokensJob(ExportTokensJob):
def __init__(self, web3, item_exporter, contracts_iterable, max_workers):
super().__init__(web3, item_exporter, [], max_workers)
self.contracts_iterable = contracts_iterable
def _export(self):
self.batch_work_executor.execute(self.contracts_iterable, self._export_tokens_from_contracts)
def _export_tokens_from_contracts(self, contracts):
[self._export_token_from_contract(contract) for contract in contracts if contract.get('is_erc20')
or contract.get('is_erc721')]
def _export_token_from_contract(self, contract):
token = self.token_service.get_token(contract['address'])
token.block_number = contract['block_number']
token.is_erc721 = contract['is_erc721']
token_dict = self.token_mapper.token_to_dict(token)
self.item_exporter.export_item(token_dict)
| 46.511111 | 105 | 0.764453 |
from bscetl.jobs.export_tokens_job import ExportTokensJob
class ExtractTokensJob(ExportTokensJob):
def __init__(self, web3, item_exporter, contracts_iterable, max_workers):
super().__init__(web3, item_exporter, [], max_workers)
self.contracts_iterable = contracts_iterable
def _export(self):
self.batch_work_executor.execute(self.contracts_iterable, self._export_tokens_from_contracts)
def _export_tokens_from_contracts(self, contracts):
[self._export_token_from_contract(contract) for contract in contracts if contract.get('is_erc20')
or contract.get('is_erc721')]
def _export_token_from_contract(self, contract):
token = self.token_service.get_token(contract['address'])
token.block_number = contract['block_number']
token.is_erc721 = contract['is_erc721']
token_dict = self.token_mapper.token_to_dict(token)
self.item_exporter.export_item(token_dict)
| true | true |
f7f458eb25802cbb5ccbbaad91dc276210c1b6f2 | 12,482 | py | Python | posteriors/fiducial/get_cosmo_posteriors.py | emilleishida/resspect_metric | 92f0b5d9de9cd6a031ec67fd76f8d302be0efef8 | [
"MIT"
] | 1 | 2021-08-11T13:04:38.000Z | 2021-08-11T13:04:38.000Z | posteriors/fiducial/get_cosmo_posteriors.py | emilleishida/resspect_metric | 92f0b5d9de9cd6a031ec67fd76f8d302be0efef8 | [
"MIT"
] | 1 | 2021-08-16T10:40:45.000Z | 2021-08-16T10:40:45.000Z | posteriors/fiducial/get_cosmo_posteriors.py | emilleishida/resspect_metric | 92f0b5d9de9cd6a031ec67fd76f8d302be0efef8 | [
"MIT"
] | null | null | null | case = 'fiducial'
import pandas as pd
import numpy as np
import pystan
import os
from resspect.salt3_utils import get_distances
import pickle
import time
from shutil import copyfile
fit_lightcurves = False
restart_master = True
# number of bins for SALT2mu
nbins = 70
# rather to re-write fitres file
replace_z = True
add_lowz = True
bias = True
###########################################################################################
# translate ids ###################################
###########################################################################################
SNANA_types = {90:11, 62:{1:3, 2:13}, 42:{1:2, 2:12, 3:14},
67:41, 52:43, 64:51, 95:60, 994:61, 992:62,
993:63, 15:64, 88:70, 92:80, 65:81, 16:83,
53:84, 991:90, 6:{1:91, 2:93}}
types_names = {90: 'Ia', 67: '91bg', 52:'Iax', 42:'II', 62:'Ibc',
95: 'SLSN', 15:'TDE', 64:'KN', 88:'AGN', 92:'RRL', 65:'M-dwarf',
16:'EB',53:'Mira', 6:'MicroL', 991:'MicroLB', 992:'ILOT',
993:'CART', 994:'PISN',995:'MLString'}
# read plasticc test metadata
test_zenodo_meta = '/media/RESSPECT/data/PLAsTiCC/PLAsTiCC_zenodo/plasticc_test_metadata.csv'
test_metadata = pd.read_csv(test_zenodo_meta)
# read sample for this case
fname = '/media/RESSPECT/data/PLAsTiCC/for_metrics/' + case + '_samp.csv'
data = pd.read_csv(fname)
data_new = {}
data_new['id'] = data['id'].values
data_new['redshift'] = data['redshift'].values
data_new['type'] = [types_names[item] for item in data['code'].values]
data_new['code'] = []
data_new['orig_sample'] = ['test' for i in range(data.shape[0])]
data_new['queryable'] = [True for i in range(data.shape[0])]
data_new['code_zenodo'] = data['code'].values
for i in range(data.shape[0]):
sncode = data.iloc[i]['code']
if sncode not in [62, 42, 6]:
data_new['code'].append(SNANA_types[sncode])
if SNANA_types[sncode] == 60:
print('sncode = ', sncode, ' new code=', SNANA_types[sncode])
else:
flag = test_metadata['object_id'].values == data.iloc[i]['id']
submodel = test_metadata[flag]['true_submodel'].values[0]
data_new['code'].append(SNANA_types[sncode][submodel])
data_out = pd.DataFrame(data_new)
data_out.to_csv('results/' + case + '_photoids_plasticc.dat', index=False)
###################################################################################
###################################################################################
res = {}
if fit_lightcurves:
start_time = time.time()
print('********* Fitting light curves ******************')
fname = 'results/' + case + '_photoids_plasticc.dat'
meta = pd.read_csv(fname, index_col=False)
codes = np.unique(meta['code'].values)
res = get_distances(fname,
data_prefix='LSST_DDF',
data_folder='/media/RESSPECT/data/PLAsTiCC/SNANA',
select_modelnum=None,
salt2mu_prefix='test_salt2mu_res',
maxsnnum=50000,
select_orig_sample=['test'],
salt3_outfile='salt3pipeinput.txt',
data_prefix_has_sntype=False,
master_fitres_name='results/master_fitres.fitres',
append_master_fitres=True,
restart_master_fitres=restart_master)
res['distances'].to_csv('results/mu_photoIa_plasticc_' + case + '.dat', index=False)
res['cosmopars'].to_csv('results/cosmo_photoIa_plasticc_' + case + '.dat', index=False)
print("--- %s seconds ---" % (time.time() - start_time))
# SALT2mu input file name
salt2mu_fname = 'SALT2mu.input'
if replace_z:
if add_lowz:
if bias:
# path to lowz fitres
fitres_lowz_fname = '/media/RESSPECT/data/temp_lowz_sim/lowz_only_fittres.fitres'
else:
raise ValueError('Low-z without bias not implemented yet.')
fitres_lowz = pd.read_csv(fitres_lowz_fname, index_col=False, comment="#",
skip_blank_lines=True, delim_whitespace=True)
fitres_lowz['zHD'] = fitres_lowz['SIM_ZCMB']
# path to main fitres
fitres_main_fname = 'results/master_fitres.fitres'
# read fitres
fitres_main = pd.read_csv(fitres_main_fname, index_col=False, comment="#",
skip_blank_lines=True, delim_whitespace=True)
if add_lowz:
# join samples considering only common columns
frames = [fitres_lowz, fitres_main]
fitres = pd.concat(frames, ignore_index=True)
else:
fitres = fitres_main
# update redshift value
fitres['zHD'] = fitres['SIM_ZCMB']
# replace nans with number so SNANA recognizes the columns
fitres.fillna(value=-99, inplace=True)
# save combined fitres to file
if add_lowz:
if bias:
fitres.to_csv('results/master_fitres_new_lowz_withbias.fitres', sep=" ", index=False)
else:
fitres.to_csv('results/master_fitres_new_lowz_nobias.fitres', sep=" ", index=False)
else:
fitres.to_csv('results/master_fitres_new.fitres', sep=" ", index=False)
samples_dir = '/media/RESSPECT/data/PLAsTiCC/for_metrics/posteriors/' + case + '/'
if not os.path.isdir(samples_dir):
os.makedirs(samples_dir)
# change parameters for SALT2mu
op = open(salt2mu_fname, 'r')
lin = op.readlines()
op.close()
lin[0] = 'bins=' + str(nbins) + '\n'
if add_lowz:
if bias:
lin[-3] = 'prefix=results/test_salt2mu_lowz_withbias_' + case + '\n'
lin[-4] = 'file=results/master_fitres_new_lowz_withbias.fitres' + '\n'
fitres_comb_fname = 'results/test_salt2mu_lowz_withbias_' + case + '.fitres'
stan_input_fname = 'results/stan_input_salt2mu_lowz_withbias_' + case + '.csv'
else:
lin[-3] = 'prefix=results/test_salt2mu_lowz_nobias_' + case + '\n'
lin[-4] = 'file=results/master_fitres_new_lowz_nobias.fitres' + '\n'
fitres_comb_fname = 'results/test_salt2mu_lowz_nobias_' + case + '.fitres'
stan_input_fname = 'results/stan_input_salt2mu_lowz_npbias_' + case + '.csv'
else:
lin[-3] = 'prefix=results/test_salt2mu_' + case + '\n'
lin[-4] = 'file=results/master_fitres_new.fitres' + '\n'
fitres_comb_fname = 'results/test_salt2mu_' + case + '.fitres'
stan_input_fname = 'results/stan_input_salt2mu_' + case + '.csv'
op2 = open(salt2mu_fname, 'w')
for line in lin:
op2.write(line)
op2.close()
# get distances from SALT2MU
os.system('SALT2mu.exe ' + salt2mu_fname)
# read data for Bayesian model
fitres_comb = pd.read_csv(fitres_comb_fname, index_col=False, comment="#", skip_blank_lines=True,
delim_whitespace=True)
# set initial conditions
z0 = 0
E0 = 0
c = 3e5
H0 = 70
# remove duplicated redshift
fitres_final = fitres_comb.drop_duplicates(subset=['SIM_ZCMB'], keep='first')
# order data according to redshift
indx = np.argsort(fitres_final['SIM_ZCMB'].values)
# create input data
stan_input = {}
stan_input['nobs'] = fitres_final.shape[0]
stan_input['z'] = fitres_final['SIM_ZCMB'].values[indx]
stan_input['mu'] = fitres_final['MU'].values[indx]
stan_input['muerr'] = fitres_final['MUERR'].values[indx]
stan_input['z0'] = z0
stan_input['H0'] = H0
stan_input['c'] = c
stan_input['E0'] = np.array([E0])
# save only stan input to file
stan_input2 = {}
stan_input2['z'] = stan_input['z']
stan_input2['mu'] = stan_input['mu']
stan_input2['muerr'] = stan_input['muerr']
stan_input_tofile = pd.DataFrame(stan_input2)
stan_input_tofile[['z', 'mu', 'muerr']].to_csv(stan_input_fname, index=False)
stan_model="""
functions {
/**
* ODE for the inverse Hubble parameter.
* System State E is 1 dimensional.
* The system has 2 parameters theta = (om, w)
*
* where
*
* om: dark matter energy density
* w: dark energy equation of state parameter
*
* The system redshift derivative is
*
* d.E[1] / d.z =
* 1.0/sqrt(om * pow(1+z,3) + (1-om) * (1+z)^(3 * (1+w)))
*
* @param z redshift at which derivatives are evaluated.
* @param E system state at which derivatives are evaluated.
* @param params parameters for system.
* @param x_r real constants for system (empty).
* @param x_i integer constants for system (empty).
*/
real[] Ez(real z,
real[] H,
real[] params,
real[] x_r,
int[] x_i) {
real dEdz[1];
dEdz[1] = 1.0/sqrt(params[1]*(1+z)^3
+(1-params[1])*(1+z)^(3*(1+params[2])));
return dEdz;
}
}
data {
int<lower=1> nobs; // number of data points
real E0[1]; // integral(1/H) at z=0
real z0; // initial redshift, 0
real c; // speed of light
real H0; // hubble parameter
real mu[nobs]; // distance modulus
vector[nobs] muerr; // error in distance modulus
real<lower=0> z[nobs]; // redshift
}
transformed data {
real x_r[0]; // required by ODE (empty)
int x_i[0];
}
parameters{
real<lower=0, upper=1> om; // dark matter energy density
real<lower=-2, upper=0> w; // dark energy equation of state parameter
}
transformed parameters{
real DC[nobs,1]; // co-moving distance
real pars[2]; // ODE input = (om, w)
real dl[nobs]; // luminosity distance
real DH; // Hubble distance = c/H0
DH = (c/H0);
pars[1] = om;
pars[2] = w;
// Integral of 1/E(z)
DC = integrate_ode_rk45(Ez, E0, z0, z, pars, x_r, x_i);
for (i in 1:nobs) {
dl[i] = 25 + 5 * log10(DH * (1 + z[i]) * DC[i, 1]);
}
}
model{
// priors and likelihood
om ~ normal(0.3, 0.1);
w ~ normal(-1, 0.2);
mu ~ normal(dl, muerr);
}
generated quantities {
vector[nobs] log_lik;
vector[nobs] mu_hat;
for (j in 1:nobs) {
log_lik[j] = normal_lpdf(mu[j] | dl[j], muerr[j]);
mu_hat[j] = normal_rng(dl[j], muerr[j]);
}
}
"""
model = pystan.StanModel(model_code=stan_model)
fit = model.sampling(data=stan_input, iter=16000, chains=3, warmup=10000, control={'adapt_delta':0.99})
# print summary
res = fit.stansummary(pars=["om", "w"])
check = str(pystan.check_hmc_diagnostics(fit))
print(res)
print( ' ******* ')
print(check)
if add_lowz and bias:
summ_fname = samples_dir + 'stan_summary_' + case + '_lowz_withbias.dat'
summ_fname2 = 'results/stan_summary_' + case + '_lowz_withbias.dat'
chains_fname = samples_dir + '/chains_' + case + '_lowz_withbias.pkl'
trace_fname = samples_dir + '/trace_plot_' + case + '_lowz_withbias.png'
trace_fname2 = 'results/trace_plot_' + case + '_lowz_withbias.png'
elif add_lowz and not bias:
summ_fname = samples_dir + 'stan_summary_' + case + '_lowz_nobias.dat'
summ_fname2 = 'results/stan_summary_' + case + '_lowz_nobias.dat'
chains_fname = samples_dir + '/chains_' + case + '_lowz_nobias.pkl'
trace_fname = samples_dir + '/trace_plot_' + case + '_lowz_nobias.png'
trace_fname2 = 'results/trace_plot_' + case + '_lowz_nobias.png'
else:
summ_fname = samples_dir + 'stan_summary_' + case + '.dat'
summ_fname2 = 'results/stan_summary_' + case + '.dat'
chains_fname = samples_dir + '/chains_' + case + '.pkl'
trace_fname = samples_dir + '/trace_plot_' + case + '.png'
trace_fname2 = 'results/trace_plot_' + case + '.png'
op2 = open(summ_fname, 'w')
op2.write(res)
op2.write('\n ************* \n')
op2.write(check)
op2.close()
samples = fit.extract(permuted=True)
pickle.dump(samples, open(chains_fname, "wb"))
pystan.check_hmc_diagnostics(fit)
# plot chains
import arviz
import matplotlib.pyplot as plt
arviz.plot_trace(fit, ['om', 'w'])
plt.savefig(trace_fname)
copyfile(trace_fname, trace_fname2)
copyfile(summ_fname, summ_fname2) | 34.103825 | 103 | 0.580516 | case = 'fiducial'
import pandas as pd
import numpy as np
import pystan
import os
from resspect.salt3_utils import get_distances
import pickle
import time
from shutil import copyfile
fit_lightcurves = False
restart_master = True
nbins = 70
replace_z = True
add_lowz = True
bias = True
| true | true |
f7f4594f046dcd50ce2dc1a91fa6cb8f7299dd8b | 3,475 | py | Python | main.py | xrayian/PhysicsX | 5211a43f79a179609144619ecbefa95d69379793 | [
"Apache-2.0"
] | 2 | 2019-12-07T09:29:59.000Z | 2019-12-27T23:45:54.000Z | main.py | xrayian/PhysicsX | 5211a43f79a179609144619ecbefa95d69379793 | [
"Apache-2.0"
] | null | null | null | main.py | xrayian/PhysicsX | 5211a43f79a179609144619ecbefa95d69379793 | [
"Apache-2.0"
] | null | null | null | from os import system as console
try:
from support import Velocity, Acceleration, Time, Distance
except:
print("Support Library Missing, Program will terminate")
exit()
finder = None
def intro():
console("title PhysicsX v1.4 Modular")
print("\n" + ' ' + "#" * 48)
introtext=(r""" ## ##
## \ \\/ / Version: 1.4 ##
## \ \\/ Code: xrayian ##
## /\ \\ ##
## PHYSICS_/ /\ \\ ##
## ##""")
print(introtext)
print(' ' + "#" * 48)
print("\n" + "#" * 68)
print("\n Enter [?] to determine value for the variable.\n Enter [clear] to clear the console.\n Press Enter to keep unspecified.")
start()
def parse_data(string, default = None, zero = 0):
if string == "":
return default
elif string == "?":
global finder
if finder is None:
finder = "self"
return string
else:
print("\n [Parse_Error]: Multiple [?] values aren't acceptable")
start()
elif zero == 0:
if string == '0':
return default
else:
try:
return float(string)
except:
print("\n [Parse_Error]: `"+ string + "` is not a valid number or command")
start()
elif string == "clear":
console("cls")
start()
else:
try:
return float(string)
except:
print("\n [Parse_Error]: `"+ string + "` is not a valid number or command")
start()
def start():
global finder
finder = None
print("\n" + "#" * 68)
final_velocity = parse_data(input("\n Enter Velocity(m/s): "),zero='1')
distance = parse_data(input(" Enter Traveled Distance(m): "))
time = parse_data(input(" Enter Required Time(s): "))
initial_velocity = parse_data(input(" Enter Initial Velocity(m/s): "),default=0,zero = '1')
acceleration = parse_data(input(" Enter Acceleration(m/s²): "))
if final_velocity == '?':
instance = Velocity(distance=distance,time=time,acceleration=acceleration,initial_velocity=initial_velocity)
print(f"\n {instance.calculate()}")
start()
elif distance == '?':
instance = Distance(final_velocity=final_velocity,time=time,acceleration=acceleration,initial_velocity=initial_velocity)
print(f"\n {instance.calculate()}")
start()
elif time == '?':
instance = Time(distance=distance,acceleration=acceleration,final_velocity=final_velocity,initial_velocity=initial_velocity)
print(f"\n {instance.calculate()}")
start()
elif acceleration == '?':
instance = Acceleration(distance=distance,time=time,final_velocity=final_velocity,initial_velocity=initial_velocity)
print(f"\n {instance.calculate()}")
start()
elif initial_velocity == '?':
print("\n [Coming_Soon]: Calculating initial velocity is not available now")
start()
else:
print("\n [Coming_Soon]: Proofchecker not ready yet")
start()
if __name__ == "__main__":
intro()
| 32.783019 | 139 | 0.513957 | from os import system as console
try:
from support import Velocity, Acceleration, Time, Distance
except:
print("Support Library Missing, Program will terminate")
exit()
finder = None
def intro():
console("title PhysicsX v1.4 Modular")
print("\n" + ' ' + "#" * 48)
introtext=(r""" ## ##
## \ \\/ / Version: 1.4 ##
## \ \\/ Code: xrayian ##
## /\ \\ ##
## PHYSICS_/ /\ \\ ##
## ##""")
print(introtext)
print(' ' + "#" * 48)
print("\n" + "#" * 68)
print("\n Enter [?] to determine value for the variable.\n Enter [clear] to clear the console.\n Press Enter to keep unspecified.")
start()
def parse_data(string, default = None, zero = 0):
if string == "":
return default
elif string == "?":
global finder
if finder is None:
finder = "self"
return string
else:
print("\n [Parse_Error]: Multiple [?] values aren't acceptable")
start()
elif zero == 0:
if string == '0':
return default
else:
try:
return float(string)
except:
print("\n [Parse_Error]: `"+ string + "` is not a valid number or command")
start()
elif string == "clear":
console("cls")
start()
else:
try:
return float(string)
except:
print("\n [Parse_Error]: `"+ string + "` is not a valid number or command")
start()
def start():
global finder
finder = None
print("\n" + "#" * 68)
final_velocity = parse_data(input("\n Enter Velocity(m/s): "),zero='1')
distance = parse_data(input(" Enter Traveled Distance(m): "))
time = parse_data(input(" Enter Required Time(s): "))
initial_velocity = parse_data(input(" Enter Initial Velocity(m/s): "),default=0,zero = '1')
acceleration = parse_data(input(" Enter Acceleration(m/s²): "))
if final_velocity == '?':
instance = Velocity(distance=distance,time=time,acceleration=acceleration,initial_velocity=initial_velocity)
print(f"\n {instance.calculate()}")
start()
elif distance == '?':
instance = Distance(final_velocity=final_velocity,time=time,acceleration=acceleration,initial_velocity=initial_velocity)
print(f"\n {instance.calculate()}")
start()
elif time == '?':
instance = Time(distance=distance,acceleration=acceleration,final_velocity=final_velocity,initial_velocity=initial_velocity)
print(f"\n {instance.calculate()}")
start()
elif acceleration == '?':
instance = Acceleration(distance=distance,time=time,final_velocity=final_velocity,initial_velocity=initial_velocity)
print(f"\n {instance.calculate()}")
start()
elif initial_velocity == '?':
print("\n [Coming_Soon]: Calculating initial velocity is not available now")
start()
else:
print("\n [Coming_Soon]: Proofchecker not ready yet")
start()
if __name__ == "__main__":
intro()
| true | true |
f7f45ad1101b960ca7a5e5945b3dd0afb1361c85 | 423 | py | Python | gettext_compile.py | SylvainCorlay/spyder | b87bfa08abd53e1c97b59feeb51f665f6a632415 | [
"MIT"
] | 2 | 2016-01-23T11:52:24.000Z | 2021-04-27T03:52:25.000Z | gettext_compile.py | SylvainCorlay/spyder | b87bfa08abd53e1c97b59feeb51f665f6a632415 | [
"MIT"
] | null | null | null | gettext_compile.py | SylvainCorlay/spyder | b87bfa08abd53e1c97b59feeb51f665f6a632415 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
from gettext_helpers import do_compile
if __name__ == "__main__":
do_compile("spyderlib")
do_compile("pylint", "spyplugins/ui/pylint")
do_compile("profiler", "spyplugins/ui/profiler")
do_compile("breakpoints", "spyplugins/ui/breakpoints")
| 30.214286 | 59 | 0.699764 |
from gettext_helpers import do_compile
if __name__ == "__main__":
do_compile("spyderlib")
do_compile("pylint", "spyplugins/ui/pylint")
do_compile("profiler", "spyplugins/ui/profiler")
do_compile("breakpoints", "spyplugins/ui/breakpoints")
| true | true |
f7f45b296d1c69c1af6be7bdcbdcaa3226d917a6 | 3,630 | py | Python | tests/test_slddb/test_dbcreation.py | bmaranville/orsopy | 74083afdce8f8f1ab3866c7f1f5209942c8734db | [
"MIT"
] | null | null | null | tests/test_slddb/test_dbcreation.py | bmaranville/orsopy | 74083afdce8f8f1ab3866c7f1f5209942c8734db | [
"MIT"
] | null | null | null | tests/test_slddb/test_dbcreation.py | bmaranville/orsopy | 74083afdce8f8f1ab3866c7f1f5209942c8734db | [
"MIT"
] | null | null | null | import sys
import unittest
from numpy import ndarray, testing
from orsopy.slddb import SLDDB, dbconfig, element_table
class TestCreateDB(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.db = SLDDB(":memory:")
cls.db.create_database()
@classmethod
def tearDownClass(cls):
del cls.db
def test_tables(self):
c = self.db.db.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
items = c.fetchall()
for i, tbl in enumerate([dbconfig.DB_MATERIALS_NAME]):
with self.subTest(msg=tbl, i=i):
self.assertTrue((tbl,) in items)
def test_element_search(self):
with self.subTest("database search", i=0):
s1 = element_table.get_element("Si")
s2 = element_table.get_element(14)
with self.subTest("equality", i=0):
self.assertEqual(s1.Z, s2.Z)
self.assertEqual(s1.symbol, s2.symbol)
self.assertEqual(s1.mass, s2.mass)
self.assertEqual(s1.b, s2.b)
testing.assert_array_equal(s1._xdata, s2._xdata)
def test_add_field(self):
global dbconfig
# call without changes
self.db.update_fields()
# call with appending column
dbconfig.DB_MATERIALS_FIELDS.append("testadd")
dbconfig.DB_MATERIALS_CONVERTERS.append(dbconfig.DB_MATERIALS_CONVERTERS[-1])
dbconfig.DB_MATERIALS_FIELD_DEFAULTS.append(dbconfig.DB_MATERIALS_FIELD_DEFAULTS[-1])
dbconfig.db_lookup = dict(
[
(field, (i, converter, default))
for i, (field, converter, default) in enumerate(
zip(
dbconfig.DB_MATERIALS_FIELDS,
dbconfig.DB_MATERIALS_CONVERTERS,
dbconfig.DB_MATERIALS_FIELD_DEFAULTS,
)
)
]
)
self.db.update_fields()
# call with inserted column
dbconfig.DB_MATERIALS_FIELDS.insert(5, "testadd2")
dbconfig.DB_MATERIALS_CONVERTERS.insert(5, dbconfig.DB_MATERIALS_CONVERTERS[-1])
dbconfig.DB_MATERIALS_FIELD_DEFAULTS.insert(5, dbconfig.DB_MATERIALS_FIELD_DEFAULTS[-1])
dbconfig.db_lookup = dict(
[
(field, (i, converter, default))
for i, (field, converter, default) in enumerate(
zip(
dbconfig.DB_MATERIALS_FIELDS,
dbconfig.DB_MATERIALS_CONVERTERS,
dbconfig.DB_MATERIALS_FIELD_DEFAULTS,
)
)
]
)
self.db.update_fields()
# reset database
dbconfig.DB_MATERIALS_FIELDS.pop(-1)
dbconfig.DB_MATERIALS_FIELDS.pop(5)
dbconfig.DB_MATERIALS_CONVERTERS.pop(-1)
dbconfig.DB_MATERIALS_CONVERTERS.pop(5)
dbconfig.DB_MATERIALS_FIELD_DEFAULTS.pop(-1)
dbconfig.DB_MATERIALS_FIELD_DEFAULTS.pop(5)
dbconfig.db_lookup = dict(
[
(field, (i, converter, default))
for i, (field, converter, default) in enumerate(
zip(
dbconfig.DB_MATERIALS_FIELDS,
dbconfig.DB_MATERIALS_CONVERTERS,
dbconfig.DB_MATERIALS_FIELD_DEFAULTS,
)
)
]
)
self.db = SLDDB(":memory:")
self.db.create_database()
def test_backup(self):
if sys.version_info.minor > 6:
self.db.backup(":memory:")
| 34.571429 | 96 | 0.57135 | import sys
import unittest
from numpy import ndarray, testing
from orsopy.slddb import SLDDB, dbconfig, element_table
class TestCreateDB(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.db = SLDDB(":memory:")
cls.db.create_database()
@classmethod
def tearDownClass(cls):
del cls.db
def test_tables(self):
c = self.db.db.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
items = c.fetchall()
for i, tbl in enumerate([dbconfig.DB_MATERIALS_NAME]):
with self.subTest(msg=tbl, i=i):
self.assertTrue((tbl,) in items)
def test_element_search(self):
with self.subTest("database search", i=0):
s1 = element_table.get_element("Si")
s2 = element_table.get_element(14)
with self.subTest("equality", i=0):
self.assertEqual(s1.Z, s2.Z)
self.assertEqual(s1.symbol, s2.symbol)
self.assertEqual(s1.mass, s2.mass)
self.assertEqual(s1.b, s2.b)
testing.assert_array_equal(s1._xdata, s2._xdata)
def test_add_field(self):
global dbconfig
self.db.update_fields()
dbconfig.DB_MATERIALS_FIELDS.append("testadd")
dbconfig.DB_MATERIALS_CONVERTERS.append(dbconfig.DB_MATERIALS_CONVERTERS[-1])
dbconfig.DB_MATERIALS_FIELD_DEFAULTS.append(dbconfig.DB_MATERIALS_FIELD_DEFAULTS[-1])
dbconfig.db_lookup = dict(
[
(field, (i, converter, default))
for i, (field, converter, default) in enumerate(
zip(
dbconfig.DB_MATERIALS_FIELDS,
dbconfig.DB_MATERIALS_CONVERTERS,
dbconfig.DB_MATERIALS_FIELD_DEFAULTS,
)
)
]
)
self.db.update_fields()
dbconfig.DB_MATERIALS_FIELDS.insert(5, "testadd2")
dbconfig.DB_MATERIALS_CONVERTERS.insert(5, dbconfig.DB_MATERIALS_CONVERTERS[-1])
dbconfig.DB_MATERIALS_FIELD_DEFAULTS.insert(5, dbconfig.DB_MATERIALS_FIELD_DEFAULTS[-1])
dbconfig.db_lookup = dict(
[
(field, (i, converter, default))
for i, (field, converter, default) in enumerate(
zip(
dbconfig.DB_MATERIALS_FIELDS,
dbconfig.DB_MATERIALS_CONVERTERS,
dbconfig.DB_MATERIALS_FIELD_DEFAULTS,
)
)
]
)
self.db.update_fields()
dbconfig.DB_MATERIALS_FIELDS.pop(-1)
dbconfig.DB_MATERIALS_FIELDS.pop(5)
dbconfig.DB_MATERIALS_CONVERTERS.pop(-1)
dbconfig.DB_MATERIALS_CONVERTERS.pop(5)
dbconfig.DB_MATERIALS_FIELD_DEFAULTS.pop(-1)
dbconfig.DB_MATERIALS_FIELD_DEFAULTS.pop(5)
dbconfig.db_lookup = dict(
[
(field, (i, converter, default))
for i, (field, converter, default) in enumerate(
zip(
dbconfig.DB_MATERIALS_FIELDS,
dbconfig.DB_MATERIALS_CONVERTERS,
dbconfig.DB_MATERIALS_FIELD_DEFAULTS,
)
)
]
)
self.db = SLDDB(":memory:")
self.db.create_database()
def test_backup(self):
if sys.version_info.minor > 6:
self.db.backup(":memory:")
| true | true |
f7f45d77db07e7cbee4bddde4115b571e24efbcb | 6,490 | py | Python | integration/airflow/openlineage/airflow/utils.py | kedar-cz/OpenLineage | bd75b53c84fd9655f593c4f161e15c14785eb93e | [
"Apache-2.0"
] | null | null | null | integration/airflow/openlineage/airflow/utils.py | kedar-cz/OpenLineage | bd75b53c84fd9655f593c4f161e15c14785eb93e | [
"Apache-2.0"
] | null | null | null | integration/airflow/openlineage/airflow/utils.py | kedar-cz/OpenLineage | bd75b53c84fd9655f593c4f161e15c14785eb93e | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import subprocess
from uuid import uuid4
from urllib.parse import urlparse, urlunparse
import airflow
from airflow.models import Connection
from airflow.utils.db import provide_session
from openlineage.airflow.facets import AirflowVersionRunFacet, AirflowRunArgsRunFacet
try:
# Import from pendulum 1.x version
from pendulum import Pendulum, from_timestamp
except ImportError:
# Import for Pendulum 2.x version
from pendulum import DateTime as Pendulum, from_timestamp
log = logging.getLogger(__name__)
_NOMINAL_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
class JobIdMapping:
# job_name here is OL job name - aka combination of dag_id and task_id
@staticmethod
def set(job_name: str, dag_run_id: str, task_run_id: str):
airflow.models.Variable.set(
JobIdMapping.make_key(job_name, dag_run_id),
json.dumps(task_run_id)
)
@staticmethod
def pop(job_name, dag_run_id, session):
return JobIdMapping.get(job_name, dag_run_id, session, delete=True)
@staticmethod
def get(job_name, dag_run_id, session, delete=False):
key = JobIdMapping.make_key(job_name, dag_run_id)
if session:
q = session.query(airflow.models.Variable).filter(
airflow.models.Variable.key == key)
if not q.first():
return None
else:
val = q.first().val
if delete:
q.delete(synchronize_session=False)
if val:
return json.loads(val)
return None
@staticmethod
def make_key(job_name, run_id):
return "openlineage_id_mapping-{}-{}".format(job_name, run_id)
def url_to_https(url) -> str:
# Ensure URL exists
if not url:
return None
base_url = None
if url.startswith('git@'):
part = url.split('git@')[1:2]
if part:
base_url = f'https://{part[0].replace(":", "/", 1)}'
elif url.startswith('https://'):
base_url = url
if not base_url:
raise ValueError(f"Unable to extract location from: {url}")
if base_url.endswith('.git'):
base_url = base_url[:-4]
return base_url
def get_location(file_path) -> str:
# Ensure file path exists
if not file_path:
return None
# move to the file directory
abs_path = os.path.abspath(file_path)
file_name = os.path.basename(file_path)
cwd = os.path.dirname(abs_path)
# get the repo url
repo_url = execute_git(cwd, ['config', '--get', 'remote.origin.url'])
# get the repo relative path
repo_relative_path = execute_git(cwd, ['rev-parse', '--show-prefix'])
# get the commitId for the particular file
commit_id = execute_git(cwd, ['rev-list', 'HEAD', '-1', '--', file_name])
# build the URL
base_url = url_to_https(repo_url)
if not base_url:
return None
return f'{base_url}/blob/{commit_id}/{repo_relative_path}{file_name}'
def execute_git(cwd, params):
p = subprocess.Popen(['git'] + params,
cwd=cwd, stdout=subprocess.PIPE, stderr=None)
p.wait(timeout=0.5)
out, err = p.communicate()
return out.decode('utf8').strip()
def get_connection_uri(conn: Connection):
"""
Return the connection URI for the given ID. We first attempt to lookup
the connection URI via AIRFLOW_CONN_<conn_id>, else fallback on querying
the Airflow's connection table.
"""
conn_uri = conn.get_uri()
parsed = urlparse(conn_uri)
# Remove username and password
parsed = parsed._replace(netloc=f'{parsed.hostname}:{parsed.port}')
return urlunparse(parsed)
def get_normalized_postgres_connection_uri(conn: Connection):
"""
URIs starting with postgresql:// and postgres:// are both valid
PostgreSQL connection strings. This function normalizes it to
postgres:// as canonical name according to OpenLineage spec.
"""
uri = get_connection_uri(conn)
if uri.startswith('postgresql'):
uri = uri.replace('postgresql', 'postgres', 1)
return uri
@provide_session
def get_connection(conn_id, session=None) -> Connection:
# TODO: We may want to throw an exception if the connection
# does not exist (ex: AirflowConnectionException). The connection
# URI is required when collecting metadata for a data source.
conn_uri = os.environ.get('AIRFLOW_CONN_' + conn_id.upper())
if conn_uri:
conn = Connection()
conn.parse_from_uri(conn_uri)
return conn
return (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
def get_job_name(task):
return f'{task.dag_id}.{task.task_id}'
def get_custom_facets(task, is_external_trigger: bool):
return {
"airflow_runArgs": AirflowRunArgsRunFacet(is_external_trigger),
"airflow_version": AirflowVersionRunFacet.from_task(task)
}
def new_lineage_run_id(dag_run_id: str, task_id: str) -> str:
return str(uuid4())
class DagUtils:
def get_execution_date(**kwargs):
return kwargs.get('execution_date')
@staticmethod
def get_start_time(execution_date=None):
if execution_date:
return DagUtils.to_iso_8601(execution_date)
else:
return None
@staticmethod
def get_end_time(execution_date, default):
if execution_date:
end_time = default
else:
end_time = None
if end_time:
end_time = DagUtils.to_iso_8601(end_time)
return end_time
@staticmethod
def to_iso_8601(dt):
if not dt:
return None
if isinstance(dt, int):
dt = from_timestamp(dt/1000.0)
if isinstance(dt, Pendulum):
return dt.format(_NOMINAL_TIME_FORMAT)
else:
return dt.strftime(_NOMINAL_TIME_FORMAT)
| 29.5 | 85 | 0.659168 |
import json
import logging
import os
import subprocess
from uuid import uuid4
from urllib.parse import urlparse, urlunparse
import airflow
from airflow.models import Connection
from airflow.utils.db import provide_session
from openlineage.airflow.facets import AirflowVersionRunFacet, AirflowRunArgsRunFacet
try:
from pendulum import Pendulum, from_timestamp
except ImportError:
from pendulum import DateTime as Pendulum, from_timestamp
log = logging.getLogger(__name__)
_NOMINAL_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
class JobIdMapping:
@staticmethod
def set(job_name: str, dag_run_id: str, task_run_id: str):
airflow.models.Variable.set(
JobIdMapping.make_key(job_name, dag_run_id),
json.dumps(task_run_id)
)
@staticmethod
def pop(job_name, dag_run_id, session):
return JobIdMapping.get(job_name, dag_run_id, session, delete=True)
@staticmethod
def get(job_name, dag_run_id, session, delete=False):
key = JobIdMapping.make_key(job_name, dag_run_id)
if session:
q = session.query(airflow.models.Variable).filter(
airflow.models.Variable.key == key)
if not q.first():
return None
else:
val = q.first().val
if delete:
q.delete(synchronize_session=False)
if val:
return json.loads(val)
return None
@staticmethod
def make_key(job_name, run_id):
return "openlineage_id_mapping-{}-{}".format(job_name, run_id)
def url_to_https(url) -> str:
if not url:
return None
base_url = None
if url.startswith('git@'):
part = url.split('git@')[1:2]
if part:
base_url = f'https://{part[0].replace(":", "/", 1)}'
elif url.startswith('https://'):
base_url = url
if not base_url:
raise ValueError(f"Unable to extract location from: {url}")
if base_url.endswith('.git'):
base_url = base_url[:-4]
return base_url
def get_location(file_path) -> str:
if not file_path:
return None
abs_path = os.path.abspath(file_path)
file_name = os.path.basename(file_path)
cwd = os.path.dirname(abs_path)
repo_url = execute_git(cwd, ['config', '--get', 'remote.origin.url'])
repo_relative_path = execute_git(cwd, ['rev-parse', '--show-prefix'])
commit_id = execute_git(cwd, ['rev-list', 'HEAD', '-1', '--', file_name])
base_url = url_to_https(repo_url)
if not base_url:
return None
return f'{base_url}/blob/{commit_id}/{repo_relative_path}{file_name}'
def execute_git(cwd, params):
p = subprocess.Popen(['git'] + params,
cwd=cwd, stdout=subprocess.PIPE, stderr=None)
p.wait(timeout=0.5)
out, err = p.communicate()
return out.decode('utf8').strip()
def get_connection_uri(conn: Connection):
conn_uri = conn.get_uri()
parsed = urlparse(conn_uri)
parsed = parsed._replace(netloc=f'{parsed.hostname}:{parsed.port}')
return urlunparse(parsed)
def get_normalized_postgres_connection_uri(conn: Connection):
uri = get_connection_uri(conn)
if uri.startswith('postgresql'):
uri = uri.replace('postgresql', 'postgres', 1)
return uri
@provide_session
def get_connection(conn_id, session=None) -> Connection:
conn_uri = os.environ.get('AIRFLOW_CONN_' + conn_id.upper())
if conn_uri:
conn = Connection()
conn.parse_from_uri(conn_uri)
return conn
return (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
def get_job_name(task):
return f'{task.dag_id}.{task.task_id}'
def get_custom_facets(task, is_external_trigger: bool):
return {
"airflow_runArgs": AirflowRunArgsRunFacet(is_external_trigger),
"airflow_version": AirflowVersionRunFacet.from_task(task)
}
def new_lineage_run_id(dag_run_id: str, task_id: str) -> str:
return str(uuid4())
class DagUtils:
def get_execution_date(**kwargs):
return kwargs.get('execution_date')
@staticmethod
def get_start_time(execution_date=None):
if execution_date:
return DagUtils.to_iso_8601(execution_date)
else:
return None
@staticmethod
def get_end_time(execution_date, default):
if execution_date:
end_time = default
else:
end_time = None
if end_time:
end_time = DagUtils.to_iso_8601(end_time)
return end_time
@staticmethod
def to_iso_8601(dt):
if not dt:
return None
if isinstance(dt, int):
dt = from_timestamp(dt/1000.0)
if isinstance(dt, Pendulum):
return dt.format(_NOMINAL_TIME_FORMAT)
else:
return dt.strftime(_NOMINAL_TIME_FORMAT)
| true | true |
f7f45d9028e21d4b9e2961c90ac7c1f05ec52e2a | 281 | py | Python | docs/examples/custom-resource/resource.py | besbes/formica | 94b43a11ed534ee7afa6a4f45848842bb163bbb6 | [
"MIT"
] | 50 | 2017-02-14T13:26:04.000Z | 2019-02-05T08:02:45.000Z | docs/examples/custom-resource/resource.py | besbes/formica | 94b43a11ed534ee7afa6a4f45848842bb163bbb6 | [
"MIT"
] | 54 | 2017-02-06T11:06:33.000Z | 2019-02-07T16:55:08.000Z | docs/examples/custom-resource/resource.py | besbes/formica | 94b43a11ed534ee7afa6a4f45848842bb163bbb6 | [
"MIT"
] | 7 | 2017-03-20T10:29:46.000Z | 2018-08-02T12:41:31.000Z | import cfnresponse
def handler(event, context):
print(event)
response_data = {}
response_data['Data'] = 'DataResponse'
response_data['Reason'] = 'SomeTestReason'
cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data, "CustomResourcePhysicalID")
| 28.1 | 100 | 0.733096 | import cfnresponse
def handler(event, context):
print(event)
response_data = {}
response_data['Data'] = 'DataResponse'
response_data['Reason'] = 'SomeTestReason'
cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data, "CustomResourcePhysicalID")
| true | true |
f7f45e2e4e2abd9e487829859986d352ed0b8bab | 683 | py | Python | app/core/migrations/0002_tag.py | EddieRosas/recipe-app-api | 956b0985edf0741e2510a7717c87b4f2c9903f73 | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | EddieRosas/recipe-app-api | 956b0985edf0741e2510a7717c87b4f2c9903f73 | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | EddieRosas/recipe-app-api | 956b0985edf0741e2510a7717c87b4f2c9903f73 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-07-20 21:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333 | 118 | 0.616398 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7f45eb0aa2e4b6a55a315d675f1f372136b3f51 | 1,220 | py | Python | examples/VTK/SimpleCone/LocalRendering/app.py | DavidBerger98/py-web-vue | 4f4fce83a9fdf447512c69d54727d62fd733ffca | [
"BSD-3-Clause"
] | 14 | 2021-04-30T09:19:05.000Z | 2022-03-29T06:47:37.000Z | examples/VTK/SimpleCone/LocalRendering/app.py | DavidBerger98/py-web-vue | 4f4fce83a9fdf447512c69d54727d62fd733ffca | [
"BSD-3-Clause"
] | 11 | 2021-06-11T17:54:15.000Z | 2022-03-17T19:54:50.000Z | examples/VTK/SimpleCone/LocalRendering/app.py | DavidBerger98/py-web-vue | 4f4fce83a9fdf447512c69d54727d62fd733ffca | [
"BSD-3-Clause"
] | 5 | 2021-09-06T11:30:54.000Z | 2022-03-11T10:01:24.000Z | from pywebvue import App
from pywebvue.modules import VTK
from vtkmodules.vtkFiltersSources import vtkConeSource
# -----------------------------------------------------------------------------
# Web App setup
# -----------------------------------------------------------------------------
app = App("VTK processing with local Rendering")
app.state = {"resolution": 6}
app.enable_module(VTK)
# -----------------------------------------------------------------------------
# VTK pipeline
# -----------------------------------------------------------------------------
cone_generator = vtkConeSource()
# -----------------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------------
@app.change("resolution")
def update_cone():
cone_generator.SetResolution(app.get("resolution"))
app.set("cone", VTK.mesh(cone_generator))
# -----------------------------------------------------------------------------
# MAIN
# python ./examples/.../app.py --port 1234
# -----------------------------------------------------------------------------
if __name__ == "__main__":
app.on_ready = update_cone
app.run_server()
| 31.282051 | 79 | 0.346721 | from pywebvue import App
from pywebvue.modules import VTK
from vtkmodules.vtkFiltersSources import vtkConeSource
app = App("VTK processing with local Rendering")
app.state = {"resolution": 6}
app.enable_module(VTK)
cone_generator = vtkConeSource()
@app.change("resolution")
def update_cone():
cone_generator.SetResolution(app.get("resolution"))
app.set("cone", VTK.mesh(cone_generator))
if __name__ == "__main__":
app.on_ready = update_cone
app.run_server()
| true | true |
f7f45ff33f23aa60951d3e540f64629b29408698 | 803 | py | Python | myproject/urls.py | MatthieuALLIER/Test_Django_Polls_Mallier | 687a0b2ba1c337778e84f7b6f53523a11a834af6 | [
"Apache-2.0"
] | null | null | null | myproject/urls.py | MatthieuALLIER/Test_Django_Polls_Mallier | 687a0b2ba1c337778e84f7b6f53523a11a834af6 | [
"Apache-2.0"
] | null | null | null | myproject/urls.py | MatthieuALLIER/Test_Django_Polls_Mallier | 687a0b2ba1c337778e84f7b6f53523a11a834af6 | [
"Apache-2.0"
] | null | null | null | """myproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]
| 34.913043 | 77 | 0.703611 | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]
| true | true |
f7f4607e62fab7d666b1c9695d2b8d03510b4670 | 1,503 | py | Python | src/models/modelclass.py | bowbahdoe/foremast-brain | b8f3994d639ab1150fca04124a096e3e93510ba4 | [
"Apache-2.0"
] | null | null | null | src/models/modelclass.py | bowbahdoe/foremast-brain | b8f3994d639ab1150fca04124a096e3e93510ba4 | [
"Apache-2.0"
] | null | null | null | src/models/modelclass.py | bowbahdoe/foremast-brain | b8f3994d639ab1150fca04124a096e3e93510ba4 | [
"Apache-2.0"
] | null | null | null |
from metadata.metadata import METRIC_PERIOD
class ModelHolder:
def __init__(self, model_name, model_config=None, model_data=None, period=METRIC_PERIOD.HISTORICAL.value, id=''):
if model_config is None:
model_config = {}
if model_data is None:
model_data = {}
self._model_name = model_name
self._model_data = model_data
self._period = period
self._model_config = model_config
self._id = id
def getModelByKey(self, key):
return self._model_data.get(key)
def setModelKV(self, key, value):
self._model_data.setdefault(key, value)
def getModelConfigByKey(self, key):
return self._model_config.get(key)
@property
def period(self):
return self._period
@property
def model_name(self):
return self._model_name
@property
def hasModel(self):
return len(self._model_data) > 0
@property
def id(self):
return self._id
def __getitem__(self, item):
return self.getModelByKey(item)
def __setitem__(self, key, value):
self.setModelKV(key, value)
def __str__(self):
sb = []
sb.append('model_name: ')
sb.append(self.model_name)
sb.append(', modeldata: ')
sb.append(str(self._model_data))
sb.append(', modelconfig: ')
sb.append(str(self._model_config))
sb.append(', period: ')
sb.append(self.period)
return ''.join(sb)
| 25.474576 | 117 | 0.616101 |
from metadata.metadata import METRIC_PERIOD
class ModelHolder:
def __init__(self, model_name, model_config=None, model_data=None, period=METRIC_PERIOD.HISTORICAL.value, id=''):
if model_config is None:
model_config = {}
if model_data is None:
model_data = {}
self._model_name = model_name
self._model_data = model_data
self._period = period
self._model_config = model_config
self._id = id
def getModelByKey(self, key):
return self._model_data.get(key)
def setModelKV(self, key, value):
self._model_data.setdefault(key, value)
def getModelConfigByKey(self, key):
return self._model_config.get(key)
@property
def period(self):
return self._period
@property
def model_name(self):
return self._model_name
@property
def hasModel(self):
return len(self._model_data) > 0
@property
def id(self):
return self._id
def __getitem__(self, item):
return self.getModelByKey(item)
def __setitem__(self, key, value):
self.setModelKV(key, value)
def __str__(self):
sb = []
sb.append('model_name: ')
sb.append(self.model_name)
sb.append(', modeldata: ')
sb.append(str(self._model_data))
sb.append(', modelconfig: ')
sb.append(str(self._model_config))
sb.append(', period: ')
sb.append(self.period)
return ''.join(sb)
| true | true |
f7f46089208e58668cc1bf071a2be5f75b9a71bc | 406 | py | Python | pacote-download/d034 - calc o sal e dar o valor do seu aumento.py | Carlos-DOliveira/cursoemvideo-python3 | 4546c8a7360155243e2f7ecbbb80c57868f770a2 | [
"MIT"
] | null | null | null | pacote-download/d034 - calc o sal e dar o valor do seu aumento.py | Carlos-DOliveira/cursoemvideo-python3 | 4546c8a7360155243e2f7ecbbb80c57868f770a2 | [
"MIT"
] | null | null | null | pacote-download/d034 - calc o sal e dar o valor do seu aumento.py | Carlos-DOliveira/cursoemvideo-python3 | 4546c8a7360155243e2f7ecbbb80c57868f770a2 | [
"MIT"
] | null | null | null | ''' 034 Escreva um programa que pergunte o salário de um funcionário e calcule o valor do seu aumento.
Para salário superiores a R$ 1.250, calcule um aumento de 10%.
Para inferiores ou iguais, o aumento é de 15%.'''
sal = float(input('Digite seu salário: R$ '))
if sal > 1250:
print(f'Seu aumento é de R$ {sal + (sal * 10/100):.2f}')
else:
print(f'Seu salário é dev R$ {sal + (sal *15/100):.2f}') | 40.6 | 102 | 0.669951 |
sal = float(input('Digite seu salário: R$ '))
if sal > 1250:
print(f'Seu aumento é de R$ {sal + (sal * 10/100):.2f}')
else:
print(f'Seu salário é dev R$ {sal + (sal *15/100):.2f}') | true | true |
f7f461061fe067101c1ab679bfcc52f084b83639 | 2,077 | py | Python | venv/Lib/site-packages/openapi_spec_validator-0.2.7-py3.7.egg/openapi_spec_validator/__init__.py | phurinaix/signed_credentials | 2757f65a0f8cbcdd9593aa669e155c1b91c863c2 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/openapi_spec_validator-0.2.7-py3.7.egg/openapi_spec_validator/__init__.py | phurinaix/signed_credentials | 2757f65a0f8cbcdd9593aa669e155c1b91c863c2 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/openapi_spec_validator-0.2.7-py3.7.egg/openapi_spec_validator/__init__.py | phurinaix/signed_credentials | 2757f65a0f8cbcdd9593aa669e155c1b91c863c2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from openapi_spec_validator.shortcuts import (
validate_spec_factory, validate_spec_url_factory,
)
from openapi_spec_validator.handlers import UrlHandler
from openapi_spec_validator.schemas import get_openapi_schema
from openapi_spec_validator.factories import JSONSpecValidatorFactory
from openapi_spec_validator.validators import SpecValidator
__author__ = 'Artur Maciag'
__email__ = 'maciag.artur@gmail.com'
__version__ = '0.2.7'
__url__ = 'https://github.com/p1c2u/openapi-spec-validator'
__license__ = 'Apache License, Version 2.0'
__all__ = [
'openapi_v2_spec_validator', 'openapi_v3_spec_validator',
'validate_v2_spec', 'validate_v3_spec', 'validate_spec',
'validate_v2_spec_url', 'validate_v3_spec_url', 'validate_spec_url',
]
default_handlers = {
'<all_urls>': UrlHandler('http', 'https', 'file'),
'http': UrlHandler('http'),
'https': UrlHandler('https'),
'file': UrlHandler('file'),
}
# v2.0 spec
schema_v2, schema_v2_url = get_openapi_schema('2.0')
openapi_v2_validator_factory = JSONSpecValidatorFactory(
schema_v2, schema_v2_url,
resolver_handlers=default_handlers,
)
openapi_v2_spec_validator = SpecValidator(
openapi_v2_validator_factory,
resolver_handlers=default_handlers,
)
# v3.0.0 spec
schema_v3, schema_v3_url = get_openapi_schema('3.0.0')
openapi_v3_validator_factory = JSONSpecValidatorFactory(
schema_v3, schema_v3_url,
resolver_handlers=default_handlers,
)
openapi_v3_spec_validator = SpecValidator(
openapi_v3_validator_factory,
resolver_handlers=default_handlers,
)
# shortcuts
validate_v2_spec = validate_spec_factory(openapi_v2_spec_validator.validate)
validate_v2_spec_url = validate_spec_url_factory(
openapi_v2_spec_validator.validate, default_handlers)
validate_v3_spec = validate_spec_factory(openapi_v3_spec_validator.validate)
validate_v3_spec_url = validate_spec_url_factory(
openapi_v3_spec_validator.validate, default_handlers)
# aliases to the latest version
validate_spec = validate_v3_spec
validate_spec_url = validate_v3_spec_url
| 32.968254 | 76 | 0.803081 |
from openapi_spec_validator.shortcuts import (
validate_spec_factory, validate_spec_url_factory,
)
from openapi_spec_validator.handlers import UrlHandler
from openapi_spec_validator.schemas import get_openapi_schema
from openapi_spec_validator.factories import JSONSpecValidatorFactory
from openapi_spec_validator.validators import SpecValidator
__author__ = 'Artur Maciag'
__email__ = 'maciag.artur@gmail.com'
__version__ = '0.2.7'
__url__ = 'https://github.com/p1c2u/openapi-spec-validator'
__license__ = 'Apache License, Version 2.0'
__all__ = [
'openapi_v2_spec_validator', 'openapi_v3_spec_validator',
'validate_v2_spec', 'validate_v3_spec', 'validate_spec',
'validate_v2_spec_url', 'validate_v3_spec_url', 'validate_spec_url',
]
default_handlers = {
'<all_urls>': UrlHandler('http', 'https', 'file'),
'http': UrlHandler('http'),
'https': UrlHandler('https'),
'file': UrlHandler('file'),
}
schema_v2, schema_v2_url = get_openapi_schema('2.0')
openapi_v2_validator_factory = JSONSpecValidatorFactory(
schema_v2, schema_v2_url,
resolver_handlers=default_handlers,
)
openapi_v2_spec_validator = SpecValidator(
openapi_v2_validator_factory,
resolver_handlers=default_handlers,
)
schema_v3, schema_v3_url = get_openapi_schema('3.0.0')
openapi_v3_validator_factory = JSONSpecValidatorFactory(
schema_v3, schema_v3_url,
resolver_handlers=default_handlers,
)
openapi_v3_spec_validator = SpecValidator(
openapi_v3_validator_factory,
resolver_handlers=default_handlers,
)
validate_v2_spec = validate_spec_factory(openapi_v2_spec_validator.validate)
validate_v2_spec_url = validate_spec_url_factory(
openapi_v2_spec_validator.validate, default_handlers)
validate_v3_spec = validate_spec_factory(openapi_v3_spec_validator.validate)
validate_v3_spec_url = validate_spec_url_factory(
openapi_v3_spec_validator.validate, default_handlers)
validate_spec = validate_v3_spec
validate_spec_url = validate_v3_spec_url
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.