hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fc84a545b9160630d3e9cfef5c5df972c5b656 | 27 | py | Python | livegraph/__init__.py | dav-ell/JupyterDraw | e571bc7c1044c61a75c272edd67f23048f124928 | [
"MIT"
] | 1 | 2020-04-03T19:41:06.000Z | 2020-04-03T19:41:06.000Z | livegraph/__init__.py | dav-ell/JupyterDraw | e571bc7c1044c61a75c272edd67f23048f124928 | [
"MIT"
] | null | null | null | livegraph/__init__.py | dav-ell/JupyterDraw | e571bc7c1044c61a75c272edd67f23048f124928 | [
"MIT"
] | null | null | null | from .livegraph import Draw | 27 | 27 | 0.851852 | from .livegraph import Draw | true | true |
f7fc84b56e5d586c52b33f29c03ed1d722cffa54 | 2,237 | py | Python | neutron/agent/l3/link_local_allocator.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 1 | 2016-03-25T21:13:13.000Z | 2016-03-25T21:13:13.000Z | neutron/agent/l3/link_local_allocator.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-27T00:48:55.000Z | 2015-04-21T05:29:37.000Z | neutron/agent/l3/link_local_allocator.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-26T00:55:17.000Z | 2020-03-01T17:05:40.000Z | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.agent.l3.item_allocator import ItemAllocator
class LinkLocalAddressPair(netaddr.IPNetwork):
def __init__(self, addr):
super(LinkLocalAddressPair, self).__init__(addr)
def get_pair(self):
"""Builds an address pair from the first and last addresses. """
# TODO(kevinbenton): the callers of this seem only interested in an IP,
# so we should just return two IPAddresses.
return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)),
netaddr.IPNetwork("%s/%s" % (self[-1], self.prefixlen)))
class LinkLocalAllocator(ItemAllocator):
"""Manages allocation of link local IP addresses.
These link local addresses are used for routing inside the fip namespaces.
The associations need to persist across agent restarts to maintain
consistency. Without this, there is disruption in network connectivity
as the agent rewires the connections with the new IP address assocations.
Persisting these in the database is unnecessary and would degrade
performance.
"""
def __init__(self, data_store_path, subnet):
"""Create the necessary pool and item allocator
using ',' as the delimiter and LinkLocalAllocator as the
class type
"""
subnet = netaddr.IPNetwork(subnet)
pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31))
super(LinkLocalAllocator, self).__init__(data_store_path,
LinkLocalAddressPair,
pool)
| 42.207547 | 79 | 0.680823 |
import netaddr
from neutron.agent.l3.item_allocator import ItemAllocator
class LinkLocalAddressPair(netaddr.IPNetwork):
def __init__(self, addr):
super(LinkLocalAddressPair, self).__init__(addr)
def get_pair(self):
return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)),
netaddr.IPNetwork("%s/%s" % (self[-1], self.prefixlen)))
class LinkLocalAllocator(ItemAllocator):
def __init__(self, data_store_path, subnet):
subnet = netaddr.IPNetwork(subnet)
pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31))
super(LinkLocalAllocator, self).__init__(data_store_path,
LinkLocalAddressPair,
pool)
| true | true |
f7fc84f573aa97d3b828afe66e29e4f49f7bb79c | 1,393 | py | Python | quantlab/COCO/utils/inference.py | lukasc-ch/QuantLab | 7ddcc51ec1131a58269768cd898ce04e8b49beb6 | [
"Apache-2.0"
] | 6 | 2019-05-24T17:39:07.000Z | 2021-11-06T22:19:55.000Z | quantlab/COCO/utils/inference.py | lukasc-ch/QuantLab | 7ddcc51ec1131a58269768cd898ce04e8b49beb6 | [
"Apache-2.0"
] | null | null | null | quantlab/COCO/utils/inference.py | lukasc-ch/QuantLab | 7ddcc51ec1131a58269768cd898ce04e8b49beb6 | [
"Apache-2.0"
] | 4 | 2019-05-24T17:39:15.000Z | 2021-04-02T07:13:11.000Z | import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
def view_instance(img, gt_label, pr_label=None):
img = img.cpu()
# gt_label = gt_label.cpu()
# pr_label = pr_label.cpu()
# c, h, w = img.shape
# with open('/home/spmatteo/MSDocuments/QuantLab/COCO/coco.names', 'r') as f:
# classes = [line.strip() for line in f.read().splitlines()]
# cmap = plt.get_cmap('tab20b')
# colors = [cmap(i) for i in np.linspace(0, 1, len(classes)-1)]
# fig, ax = plt.subplots(1, figsize=(12, 9))
# ax.imshow(img.permute(1, 2, 0)) # h, w, c
# # browse annotations and draw bounding boxes
# bboxes = []
# if label is not None:
# for i, annotation in enumerate(label):
# cls = annotation[6]
# if i < 6:
# print(annotation, classes[int(cls)])
# color = colors[int(cls)]
# bbox = patches.Rectangle((annotation[0], annotation[1]), annotation[2]-annotation[0], annotation[3]-annotation[1],
# linewidth=2, edgecolor=color, facecolor='none', label=classes[int(cls)])
# ax.add_patch(bbox)
# bboxes.append((bbox, classes[int(cls)], color))
# for bbox in bboxes:
# ax.annotate(bbox[1], bbox[0].get_xy(), weight='bold', fontsize=10, color=bbox[2])
# plt.axis('off')
# plt.show()
| 42.212121 | 128 | 0.580761 | import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
def view_instance(img, gt_label, pr_label=None):
img = img.cpu()
| true | true |
f7fc854ec16d842085b3bd24d1afacdfaac412f9 | 833 | py | Python | binary-indexed-tree/Python/5211-path-with-maximum-probability.py | Ivan-07/LeetCode-Solutions-in-Good-Style | b17bf19b13c9d9f52b7029a732d4d8a8be212dd4 | [
"Apache-2.0"
] | 1 | 2020-09-27T04:18:05.000Z | 2020-09-27T04:18:05.000Z | binary-indexed-tree/Python/5211-path-with-maximum-probability.py | Ivan-07/LeetCode-Solutions-in-Good-Style | b17bf19b13c9d9f52b7029a732d4d8a8be212dd4 | [
"Apache-2.0"
] | null | null | null | binary-indexed-tree/Python/5211-path-with-maximum-probability.py | Ivan-07/LeetCode-Solutions-in-Good-Style | b17bf19b13c9d9f52b7029a732d4d8a8be212dd4 | [
"Apache-2.0"
] | null | null | null | from typing import List
from collections import defaultdict
from collections import deque
class Solution:
def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:
d = defaultdict(list)
for (x, y), p in zip(edges, succProb):
d[x].append((y, p))
d[y].append((x, p))
queue = deque([(start, 1), ])
record = defaultdict(int)
res = 0
while queue:
node, prob = queue.popleft()
if node == end:
res = max(res, prob)
continue
for next, next_prob in d[node]:
val = next_prob * prob
if val > record[next]:
record[next] = val
queue.append((next, val))
return res
| 29.75 | 115 | 0.506603 | from typing import List
from collections import defaultdict
from collections import deque
class Solution:
def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:
d = defaultdict(list)
for (x, y), p in zip(edges, succProb):
d[x].append((y, p))
d[y].append((x, p))
queue = deque([(start, 1), ])
record = defaultdict(int)
res = 0
while queue:
node, prob = queue.popleft()
if node == end:
res = max(res, prob)
continue
for next, next_prob in d[node]:
val = next_prob * prob
if val > record[next]:
record[next] = val
queue.append((next, val))
return res
| true | true |
f7fc86ba9dfee99156e60ce481c84c3531910e5f | 730 | py | Python | worker.py | Nicholas-Morton/DutyScheduler | 681241a46e38e74e37f92bce832c378781e3c488 | [
"MIT"
] | null | null | null | worker.py | Nicholas-Morton/DutyScheduler | 681241a46e38e74e37f92bce832c378781e3c488 | [
"MIT"
] | null | null | null | worker.py | Nicholas-Morton/DutyScheduler | 681241a46e38e74e37f92bce832c378781e3c488 | [
"MIT"
] | null | null | null | class Worker:
def __init__(self):
self.name = ""
self.thisWeekGeneral = 0
self.thisWeekDetail = 0
self.jobs = []
self.counts = {
'east': 0,
'gcb': 0,
'second': 0,
'third': 0,
'dinner': 0,
'wakings': 0,
'setup': 0,
'cleanup': 0,
'phones': 0,
'driver': 0,
'bcu': 0,
'totalDetails': 0
}
self.availability = {
'Monday': {},
'Tuesday': {},
'Wednesday': {},
'Thursday': {},
'Friday': {}
}
self.daysWorking = []
self.isHome = True
| 20.857143 | 32 | 0.347945 | class Worker:
def __init__(self):
self.name = ""
self.thisWeekGeneral = 0
self.thisWeekDetail = 0
self.jobs = []
self.counts = {
'east': 0,
'gcb': 0,
'second': 0,
'third': 0,
'dinner': 0,
'wakings': 0,
'setup': 0,
'cleanup': 0,
'phones': 0,
'driver': 0,
'bcu': 0,
'totalDetails': 0
}
self.availability = {
'Monday': {},
'Tuesday': {},
'Wednesday': {},
'Thursday': {},
'Friday': {}
}
self.daysWorking = []
self.isHome = True
| true | true |
f7fc86ef6167230e5d3fc2c182cd900b04d1928a | 476 | py | Python | demo_flat/flatpage_control/admin.py | emelianovss-yandex-praktikum/01_pythonplus_template_engine_and_flatpages | ef6b35099200145c90a15597a2646bb279f43ffa | [
"MIT"
] | null | null | null | demo_flat/flatpage_control/admin.py | emelianovss-yandex-praktikum/01_pythonplus_template_engine_and_flatpages | ef6b35099200145c90a15597a2646bb279f43ffa | [
"MIT"
] | null | null | null | demo_flat/flatpage_control/admin.py | emelianovss-yandex-praktikum/01_pythonplus_template_engine_and_flatpages | ef6b35099200145c90a15597a2646bb279f43ffa | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.db import models
from django.contrib.flatpages.admin import FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from ckeditor.widgets import CKEditorWidget
# Define a new FlatPageAdmin
class FlatPageAdmin(FlatPageAdmin):
formfield_overrides = {
models.TextField: {'widget': CKEditorWidget}
}
# Re-register FlatPageAdmin
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin) | 28 | 56 | 0.804622 | from django.contrib import admin
from django.db import models
from django.contrib.flatpages.admin import FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from ckeditor.widgets import CKEditorWidget
class FlatPageAdmin(FlatPageAdmin):
formfield_overrides = {
models.TextField: {'widget': CKEditorWidget}
}
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin) | true | true |
f7fc87404d06e94918baa6e2b98e3e61c6d926e1 | 3,912 | py | Python | check-nordvpn-latency.py | rileyweber13/check-nordvpn-latency | 5946078eae8fb2c67dc1f26125f6a6748b157e50 | [
"MIT"
] | null | null | null | check-nordvpn-latency.py | rileyweber13/check-nordvpn-latency | 5946078eae8fb2c67dc1f26125f6a6748b157e50 | [
"MIT"
] | null | null | null | check-nordvpn-latency.py | rileyweber13/check-nordvpn-latency | 5946078eae8fb2c67dc1f26125f6a6748b157e50 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import datetime
import os
import re
import subprocess
import sys
import time
import urllib.request
from bs4 import BeautifulSoup as bs
# constants
VPN_FILE_NAME = 'vpn-urls'
def get_servers_soup():
site = 'https://nordvpn.com/ovpn/'
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib.request.Request(site, headers=hdr)
server_list_html = urllib.request.urlopen(req).read()
soup = bs(server_list_html, 'html.parser')
return soup
def get_link_list():
soup = get_servers_soup()
vpn_urls = soup.find_all('span', class_='mr-2')
return vpn_urls
def save_link_list():
vpn_urls = get_link_list()
with open(VPN_FILE_NAME, 'w') as f:
for vpn in vpn_urls:
f.write(vpn.text)
f.write('\n')
def ping_each_server(num_pings):
format_string_result = '{0:<25}{1}'
format_numeric_result = '{0:<25}{1:3.3f}'
ping_results_strings = [
'Ping statistics - number of trials per server: ' + str(num_pings),
format_string_result.format('server', 'avg latency (ms)')]
ping_success_results = []
ping_error_results = []
update_vpn_url_files()
with open(VPN_FILE_NAME, 'r') as f:
for line in f:
if len(line) > 1:
line = line[:-1]
print('pinging ', line)
try:
output = subprocess.check_output(
['ping', '-c', str(num_pings), line])
matches = re.findall(
r'mdev = \d+\.\d+/(?P<avg_ping_time>\d+.\d+)',
str(output))
avg_ping_time = float(matches[0])
ping_success_results.append((line, avg_ping_time))
except subprocess.CalledProcessError as e:
error_message = None
if e.returncode == 1:
error_message = 'server did not respond'
elif e.returncode == 2:
error_message = 'name could not be resolved'
ping_error_results.append(
format_string_result.format(line, error_message))
ping_success_results.sort(key=lambda tup: tup[1])
for result in ping_success_results:
ping_results_strings.append(format_numeric_result.format(result[0],
result[1]))
ping_results_strings += ping_error_results
return ping_results_strings
def update_vpn_url_files():
try:
two_weeks_in_seconds = 14*24*60*60
if(time.time() - os.path.getmtime(VPN_FILE_NAME) >
two_weeks_in_seconds):
save_link_list()
else:
return
except FileNotFoundError:
save_link_list()
return
# used for printing ping results
def print_list(input_list):
for item in input_list:
print(item)
# used for saving ping results
def save_list_to_text_file(input_list):
current_time_string = datetime.datetime.now().isoformat(
timespec='seconds', sep='_')
with open('ping-results_' + current_time_string, 'w') as f:
for line in input_list:
f.write(line)
f.write('\n')
if __name__ == '__main__':
ping_results = None
if len(sys.argv) > 1:
try:
num_pings = int(sys.argv[1])
ping_results = ping_each_server(num_pings)
except ValueError as e:
print('got a ValueError:', e)
print('only command line argument accepted is a single integer')
ping_results = ping_each_server(10)
else:
ping_results = ping_each_server(10)
print_list(ping_results)
print('would you like to save these results as a text file? Default = no')
response = input('(y/n):')
if response == 'y':
save_list_to_text_file(ping_results)
| 31.804878 | 78 | 0.585634 |
import datetime
import os
import re
import subprocess
import sys
import time
import urllib.request
from bs4 import BeautifulSoup as bs
VPN_FILE_NAME = 'vpn-urls'
def get_servers_soup():
site = 'https://nordvpn.com/ovpn/'
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib.request.Request(site, headers=hdr)
server_list_html = urllib.request.urlopen(req).read()
soup = bs(server_list_html, 'html.parser')
return soup
def get_link_list():
soup = get_servers_soup()
vpn_urls = soup.find_all('span', class_='mr-2')
return vpn_urls
def save_link_list():
vpn_urls = get_link_list()
with open(VPN_FILE_NAME, 'w') as f:
for vpn in vpn_urls:
f.write(vpn.text)
f.write('\n')
def ping_each_server(num_pings):
format_string_result = '{0:<25}{1}'
format_numeric_result = '{0:<25}{1:3.3f}'
ping_results_strings = [
'Ping statistics - number of trials per server: ' + str(num_pings),
format_string_result.format('server', 'avg latency (ms)')]
ping_success_results = []
ping_error_results = []
update_vpn_url_files()
with open(VPN_FILE_NAME, 'r') as f:
for line in f:
if len(line) > 1:
line = line[:-1]
print('pinging ', line)
try:
output = subprocess.check_output(
['ping', '-c', str(num_pings), line])
matches = re.findall(
r'mdev = \d+\.\d+/(?P<avg_ping_time>\d+.\d+)',
str(output))
avg_ping_time = float(matches[0])
ping_success_results.append((line, avg_ping_time))
except subprocess.CalledProcessError as e:
error_message = None
if e.returncode == 1:
error_message = 'server did not respond'
elif e.returncode == 2:
error_message = 'name could not be resolved'
ping_error_results.append(
format_string_result.format(line, error_message))
ping_success_results.sort(key=lambda tup: tup[1])
for result in ping_success_results:
ping_results_strings.append(format_numeric_result.format(result[0],
result[1]))
ping_results_strings += ping_error_results
return ping_results_strings
def update_vpn_url_files():
try:
two_weeks_in_seconds = 14*24*60*60
if(time.time() - os.path.getmtime(VPN_FILE_NAME) >
two_weeks_in_seconds):
save_link_list()
else:
return
except FileNotFoundError:
save_link_list()
return
def print_list(input_list):
for item in input_list:
print(item)
def save_list_to_text_file(input_list):
current_time_string = datetime.datetime.now().isoformat(
timespec='seconds', sep='_')
with open('ping-results_' + current_time_string, 'w') as f:
for line in input_list:
f.write(line)
f.write('\n')
if __name__ == '__main__':
ping_results = None
if len(sys.argv) > 1:
try:
num_pings = int(sys.argv[1])
ping_results = ping_each_server(num_pings)
except ValueError as e:
print('got a ValueError:', e)
print('only command line argument accepted is a single integer')
ping_results = ping_each_server(10)
else:
ping_results = ping_each_server(10)
print_list(ping_results)
print('would you like to save these results as a text file? Default = no')
response = input('(y/n):')
if response == 'y':
save_list_to_text_file(ping_results)
| true | true |
f7fc887379fdc09daf70baf42870115b78467f70 | 4,432 | py | Python | examples/keyword_watcher/keyword_watcher.py | iragamagoori/redditlib | e4efcde1ed53e6f684eae57b172263f15cfece86 | [
"MIT"
] | null | null | null | examples/keyword_watcher/keyword_watcher.py | iragamagoori/redditlib | e4efcde1ed53e6f684eae57b172263f15cfece86 | [
"MIT"
] | null | null | null | examples/keyword_watcher/keyword_watcher.py | iragamagoori/redditlib | e4efcde1ed53e6f684eae57b172263f15cfece86 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Requirements:
# subprocess32 python package
# nodejs
# puppeteer npm package
# sleep npm package
CREDS_FILENAME = 'creds.json'
import json, os, re, sys, time
sys.path.append(os.getcwd())
from redditlib import *
mydir, _ = os.path.split(os.path.abspath(sys.argv[0]))
def preflight():
try:
import subprocess32 as subprocess
except ImportError:
print >>sys.stderr, "This example requires the subprocess32 python package"
sys.exit(1)
try:
import praw
except ImportError:
print >>sys.stderr, "This example requires the praw python package"
sys.exit(1)
if not os.path.exists(CREDS_FILENAME):
print >>sys.stderr, 'This example requires a {} file in the current directory with your Reddit API credentials.'.format(CREDS_FILENAME)
print >>sys.stderr, 'Format:'
print >>sys.stderr, '{'
print >>sys.stderr, ' "client_id": "xxxxxx",'
print >>sys.stderr, ' "client_secret": "xxxxxx",'
print >>sys.stderr, ' "user_agent": "xxxxxx",'
print >>sys.stderr, '}'
print >>sys.stderr, 'See http://praw.readthedocs.io/en/latest/getting_started/quick_start.html for more info about these values.'
sys.exit(1)
try:
subprocess.call(['node','-v'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
print >>sys.stderr, "This example requires nodejs to be installed"
sys.exit(1)
for npm_package in ('puppeteer', 'sleep'):
rc = subprocess.call(['node','-e','require("{}")'.format(npm_package)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mydir)
if rc:
print >>sys.stderr, "This example requires the {} npm package installed in the Python script's directory".format(npm_package)
print >>sys.stderr, 'To install, cd to {} and run "npm install {}"'.format(mydir, npm_package)
sys.exit(1)
#phew, we're good to go now
preflight()
import subprocess32 as subprocess
import praw
with open(CREDS_FILENAME, 'r') as f:
creds = json.load(f)
r = praw.Reddit(client_id = creds['client_id'],
client_secret = creds['client_secret'],
user_agent = creds['user_agent'])
if len(sys.argv) < 3:
print >>sys.stderr, 'usage:'
print >>sys.stderr, ' python2 {} <subreddit_name> <keyword_file>'.format(__file__)
sys.exit(1)
jsonlog = JsonLog('kw_output.json', append=True)
sr_name = normalize_subreddit(sys.argv[1])
sr = r.subreddit(sr_name)
with open(sys.argv[2], 'r') as f:
KEYWORDS = set([kw.strip() for kw in f])
if len(KEYWORDS) < 40:
print 'watching /r/{} for the following keywords:'.format(sr.display_name)
for kw in KEYWORDS:
print ' ',kw
else:
print 'watching /r/{} for {} keywords'.format(sr.display_name, len(KEYWORDS))
watcher = CommentKeywordWatcher(sr)
for kw in KEYWORDS:
watcher.add_keyword(kw)
while True:
for match in watcher.check():
author = match.thing.author.name.encode('utf-8')
body = match.thing.body.encode('utf-8')
link = 'https://reddit.com' + match.thing.permalink.encode('utf-8')
jsonlog.write({
'author': author,
'id': match.thing.id,
'url': link,
'keywords': match.keywords,
'body': body,
})
for kw in match.keywords:
body = re.sub('('+kw.lower()+')', '\x1b[35m\x1b[1m\\1\x1b[22m\x1b[0m', body, flags=re.I)
print '---'
print '/u/{}'.format(author)
print link
print
print body
print
sys.stdout.flush()
if not os.path.exists('screens'):
print 'making screens directory'
os.mkdir('screens')
filename = os.path.join(os.getcwd(), 'screens', '{}.png'.format(match.thing.id))
print 'saving to {}'.format(filename)
try:
rc = subprocess.call(['node', 'screenshot.js', link, filename], timeout = 15, cwd = mydir)
except subprocess.TimeoutExpired as e:
pass
print 'submitting to archive.is'
try:
rc = subprocess.call(['node', 'archive.js', link + '?context=100'], timeout = 15, cwd = mydir)
except subprocess.TimeoutExpired as e:
pass
subprocess.call(['killall','-9','chrome'], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
| 32.82963 | 143 | 0.610785 |
CREDS_FILENAME = 'creds.json'
import json, os, re, sys, time
sys.path.append(os.getcwd())
from redditlib import *
mydir, _ = os.path.split(os.path.abspath(sys.argv[0]))
def preflight():
try:
import subprocess32 as subprocess
except ImportError:
print >>sys.stderr, "This example requires the subprocess32 python package"
sys.exit(1)
try:
import praw
except ImportError:
print >>sys.stderr, "This example requires the praw python package"
sys.exit(1)
if not os.path.exists(CREDS_FILENAME):
print >>sys.stderr, 'This example requires a {} file in the current directory with your Reddit API credentials.'.format(CREDS_FILENAME)
print >>sys.stderr, 'Format:'
print >>sys.stderr, '{'
print >>sys.stderr, ' "client_id": "xxxxxx",'
print >>sys.stderr, ' "client_secret": "xxxxxx",'
print >>sys.stderr, ' "user_agent": "xxxxxx",'
print >>sys.stderr, '}'
print >>sys.stderr, 'See http://praw.readthedocs.io/en/latest/getting_started/quick_start.html for more info about these values.'
sys.exit(1)
try:
subprocess.call(['node','-v'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
print >>sys.stderr, "This example requires nodejs to be installed"
sys.exit(1)
for npm_package in ('puppeteer', 'sleep'):
rc = subprocess.call(['node','-e','require("{}")'.format(npm_package)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mydir)
if rc:
print >>sys.stderr, "This example requires the {} npm package installed in the Python script's directory".format(npm_package)
print >>sys.stderr, 'To install, cd to {} and run "npm install {}"'.format(mydir, npm_package)
sys.exit(1)
#phew, we're good to go now
preflight()
import subprocess32 as subprocess
import praw
with open(CREDS_FILENAME, 'r') as f:
creds = json.load(f)
r = praw.Reddit(client_id = creds['client_id'],
client_secret = creds['client_secret'],
user_agent = creds['user_agent'])
if len(sys.argv) < 3:
print >>sys.stderr, 'usage:'
print >>sys.stderr, ' python2 {} <subreddit_name> <keyword_file>'.format(__file__)
sys.exit(1)
jsonlog = JsonLog('kw_output.json', append=True)
sr_name = normalize_subreddit(sys.argv[1])
sr = r.subreddit(sr_name)
with open(sys.argv[2], 'r') as f:
KEYWORDS = set([kw.strip() for kw in f])
if len(KEYWORDS) < 40:
print 'watching /r/{} for the following keywords:'.format(sr.display_name)
for kw in KEYWORDS:
print ' ',kw
else:
print 'watching /r/{} for {} keywords'.format(sr.display_name, len(KEYWORDS))
watcher = CommentKeywordWatcher(sr)
for kw in KEYWORDS:
watcher.add_keyword(kw)
while True:
for match in watcher.check():
author = match.thing.author.name.encode('utf-8')
body = match.thing.body.encode('utf-8')
link = 'https://reddit.com' + match.thing.permalink.encode('utf-8')
jsonlog.write({
'author': author,
'id': match.thing.id,
'url': link,
'keywords': match.keywords,
'body': body,
})
for kw in match.keywords:
body = re.sub('('+kw.lower()+')', '\x1b[35m\x1b[1m\\1\x1b[22m\x1b[0m', body, flags=re.I)
print '---'
print '/u/{}'.format(author)
print link
print
print body
print
sys.stdout.flush()
if not os.path.exists('screens'):
print 'making screens directory'
os.mkdir('screens')
filename = os.path.join(os.getcwd(), 'screens', '{}.png'.format(match.thing.id))
print 'saving to {}'.format(filename)
try:
rc = subprocess.call(['node', 'screenshot.js', link, filename], timeout = 15, cwd = mydir)
except subprocess.TimeoutExpired as e:
pass
print 'submitting to archive.is'
try:
rc = subprocess.call(['node', 'archive.js', link + '?context=100'], timeout = 15, cwd = mydir)
except subprocess.TimeoutExpired as e:
pass
subprocess.call(['killall','-9','chrome'], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
| false | true |
f7fc88ccef31475bce09b6ff3d017c76d9b074c4 | 655 | py | Python | bot_cmds/cmd_fragments/_time_parse.py | jmeel14/yt-databot-discord | 82c54acce87546039853307389b711c3f36284aa | [
"Apache-2.0"
] | null | null | null | bot_cmds/cmd_fragments/_time_parse.py | jmeel14/yt-databot-discord | 82c54acce87546039853307389b711c3f36284aa | [
"Apache-2.0"
] | null | null | null | bot_cmds/cmd_fragments/_time_parse.py | jmeel14/yt-databot-discord | 82c54acce87546039853307389b711c3f36284aa | [
"Apache-2.0"
] | null | null | null | from re import search as re_s
def convert_duration(time_str):
resp_obj = { "H": None, "M": None, "S": None }
re_match = re_s('^PT(\d*H)?(\d*M)?(\d*S)?', time_str)
if re_match:
re_str = re_match.groups()
for grp in re_str:
if grp and grp[-1] in resp_obj:
if "H" in grp:
resp_obj[grp[-1]] = grp[:-1] + ":"
else:
resp_obj[grp[-1]] = grp[:-1]
ret_str = "{0}{1}:{2}".format(
resp_obj["H"] or "",
resp_obj["M"] or "00",
resp_obj["S"] or "00"
)
return ret_str
else:
return None | 31.190476 | 57 | 0.445802 | from re import search as re_s
def convert_duration(time_str):
resp_obj = { "H": None, "M": None, "S": None }
re_match = re_s('^PT(\d*H)?(\d*M)?(\d*S)?', time_str)
if re_match:
re_str = re_match.groups()
for grp in re_str:
if grp and grp[-1] in resp_obj:
if "H" in grp:
resp_obj[grp[-1]] = grp[:-1] + ":"
else:
resp_obj[grp[-1]] = grp[:-1]
ret_str = "{0}{1}:{2}".format(
resp_obj["H"] or "",
resp_obj["M"] or "00",
resp_obj["S"] or "00"
)
return ret_str
else:
return None | true | true |
f7fc89c6a93f642bee3f460863e07915e30b28d9 | 312 | py | Python | ctypes/hello/__init__.py | AllGloryToTheHypnotoad/python-ctypes | 4b855349ecd614b929d435953a819603a7a11f03 | [
"MIT"
] | null | null | null | ctypes/hello/__init__.py | AllGloryToTheHypnotoad/python-ctypes | 4b855349ecd614b929d435953a819603a7a11f03 | [
"MIT"
] | null | null | null | ctypes/hello/__init__.py | AllGloryToTheHypnotoad/python-ctypes | 4b855349ecd614b929d435953a819603a7a11f03 | [
"MIT"
] | null | null | null | # import os
from hello.hello import cos
from hello.hello import test
from hello.hello import testarray
from hello.hello import ClassTest
__version__ = '0.1.0'
__author__ = 'bob'
__license__ = 'MIT'
# lib_path = os.path.join(os.path.dirname(__file__), 'libhello.dylib')
# lib = CDLL(lib_path)
# print(lib_path)
| 22.285714 | 70 | 0.75 |
from hello.hello import cos
from hello.hello import test
from hello.hello import testarray
from hello.hello import ClassTest
__version__ = '0.1.0'
__author__ = 'bob'
__license__ = 'MIT'
| true | true |
f7fc8aee8081880ef026c7110e128269d5ef17b5 | 2,234 | py | Python | timelapse.py | ocdude/raspberry-pi-timelapse | b9a3a941af6ae1b4e6aed51ae0df7c8a7172b4fd | [
"MIT"
] | 3 | 2020-05-17T00:06:00.000Z | 2021-11-17T00:22:23.000Z | timelapse.py | ocdude/raspberry-pi-timelapse | b9a3a941af6ae1b4e6aed51ae0df7c8a7172b4fd | [
"MIT"
] | null | null | null | timelapse.py | ocdude/raspberry-pi-timelapse | b9a3a941af6ae1b4e6aed51ae0df7c8a7172b4fd | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import time
import picamera
import configparser
import sys
import paramiko
import scp
from os import path
# set a default resolution to the max resolution of the HQ camera module
default_resolution = (4056,3040)
def set_camera(res):
camera = picamera.PiCamera()
camera.resolution = res
camera.exposure_mode = 'auto'
time.sleep(1)
return camera
def ssh_client(server, port, user, password):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(server, port, user, password)
return client
def upload_image(image_path, remote_path, ssh_client):
scp_client = scp.SCPClient(ssh_client.get_transport())
scp_client.put(image_path, remote_path)
if __name__ == "__main__":
# load configuration
config = configparser.ConfigParser()
config_file = sys.argv[1]
config.read(config_file)
resolution = (int(config.get('camera','width')), int(config.get('camera','height')))
frequency = int(config.get('camera','frequency'))
output = config.get('camera','output_path')
mode = config.get('camera','mode')
upload = config.getboolean('ssh','upload')
ssh_server = config.get('ssh','server')
ssh_port = config.get('ssh','port')
ssh_user = config.get('ssh','user')
ssh_password = config.get('ssh','password')
ssh_remote_path = config.get('ssh','remote_path')
if mode == "overwrite":
# create timelapse based on configuration file
with set_camera(resolution) as camera:
output_file = path.join(output,'output.jpg')
while True:
camera.capture(output_file)
if upload is True:
# upload image
client = ssh_client(ssh_server, ssh_port, ssh_user, ssh_password)
upload_image(output_file, ssh_remote_path, client)
time.sleep(frequency)
elif mode == "continuous":
with set_camera(resolution) as camera:
for filename in camera.capture_continuous(path.join(output,'img{counter:03d}.jpg')):
if upload is True:
# upload image
client = ssh_client(ssh_server, ssh_port, ssh_user, ssh_password)
upload_image(path.join(output,'%s' % filename), ssh_remote_path, client)
time.sleep(frequency)
else:
sys.exit('Please set mode in config.ini to overwrite or continuous')
| 31.914286 | 87 | 0.739481 |
import time
import picamera
import configparser
import sys
import paramiko
import scp
from os import path
default_resolution = (4056,3040)
def set_camera(res):
camera = picamera.PiCamera()
camera.resolution = res
camera.exposure_mode = 'auto'
time.sleep(1)
return camera
def ssh_client(server, port, user, password):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(server, port, user, password)
return client
def upload_image(image_path, remote_path, ssh_client):
scp_client = scp.SCPClient(ssh_client.get_transport())
scp_client.put(image_path, remote_path)
if __name__ == "__main__":
config = configparser.ConfigParser()
config_file = sys.argv[1]
config.read(config_file)
resolution = (int(config.get('camera','width')), int(config.get('camera','height')))
frequency = int(config.get('camera','frequency'))
output = config.get('camera','output_path')
mode = config.get('camera','mode')
upload = config.getboolean('ssh','upload')
ssh_server = config.get('ssh','server')
ssh_port = config.get('ssh','port')
ssh_user = config.get('ssh','user')
ssh_password = config.get('ssh','password')
ssh_remote_path = config.get('ssh','remote_path')
if mode == "overwrite":
with set_camera(resolution) as camera:
output_file = path.join(output,'output.jpg')
while True:
camera.capture(output_file)
if upload is True:
client = ssh_client(ssh_server, ssh_port, ssh_user, ssh_password)
upload_image(output_file, ssh_remote_path, client)
time.sleep(frequency)
elif mode == "continuous":
with set_camera(resolution) as camera:
for filename in camera.capture_continuous(path.join(output,'img{counter:03d}.jpg')):
if upload is True:
client = ssh_client(ssh_server, ssh_port, ssh_user, ssh_password)
upload_image(path.join(output,'%s' % filename), ssh_remote_path, client)
time.sleep(frequency)
else:
sys.exit('Please set mode in config.ini to overwrite or continuous')
| true | true |
f7fc8b95ab1117c9600140f7fc97c01a927786f4 | 977 | py | Python | FundProg/Aula21-Arquivos/ex08-vamospraticar.py | Bhclira/NExT | 192353a139089d0fed003bfb7e2523aa02a0a737 | [
"MIT"
] | null | null | null | FundProg/Aula21-Arquivos/ex08-vamospraticar.py | Bhclira/NExT | 192353a139089d0fed003bfb7e2523aa02a0a737 | [
"MIT"
] | null | null | null | FundProg/Aula21-Arquivos/ex08-vamospraticar.py | Bhclira/NExT | 192353a139089d0fed003bfb7e2523aa02a0a737 | [
"MIT"
] | null | null | null | '''
Crie um programa simples que pergunta se a pessoa deseja ler um
arquivo ou escrever algo nele."
Nosso script vai funcionar assim:
Aparece um menu de opções (sair, ler ou escrever). Se digitar ler, lê
o conteúdo do arquivo e exibe na tela.
Se optar por escrever, escreve algo no arquivo.
'''
import os.path
op=1
while op:
if os.path.isfile('teste.txt') is false:
print('Arquivo teste.txt não existe. Criando ...')
meuArquivo = open('teste.txt', 'w')
meuArquivo = open('teste.txt', 'r+')
print('\n\tMenu de Opções\n')
op=int(input('0. sair \n'
'1. Ler\n'
'2. Escrever\n'
'\nEscolha sua Opção: '))
if op == 1:
print('\n', meuArquivo.read())
meuArquivo.close()
elif op==2:
meuArquivo = open('teste.txt', 'a')
num = input('Digite um numero: ')
meuArquivo.write(num + '\n')
meuArquivo.close()
meuArquivo.close() | 26.405405 | 69 | 0.579324 |
import os.path
op=1
while op:
if os.path.isfile('teste.txt') is false:
print('Arquivo teste.txt não existe. Criando ...')
meuArquivo = open('teste.txt', 'w')
meuArquivo = open('teste.txt', 'r+')
print('\n\tMenu de Opções\n')
op=int(input('0. sair \n'
'1. Ler\n'
'2. Escrever\n'
'\nEscolha sua Opção: '))
if op == 1:
print('\n', meuArquivo.read())
meuArquivo.close()
elif op==2:
meuArquivo = open('teste.txt', 'a')
num = input('Digite um numero: ')
meuArquivo.write(num + '\n')
meuArquivo.close()
meuArquivo.close() | true | true |
f7fc8c07675a6f56eb25905e500243ce93167982 | 172 | py | Python | polyaxon_deploy/schemas/service_types.py | gideonbros/polyaxon-deploy | 77828e028670c43cc74704a4d7b9ec2e661e10a4 | [
"MIT"
] | null | null | null | polyaxon_deploy/schemas/service_types.py | gideonbros/polyaxon-deploy | 77828e028670c43cc74704a4d7b9ec2e661e10a4 | [
"MIT"
] | null | null | null | polyaxon_deploy/schemas/service_types.py | gideonbros/polyaxon-deploy | 77828e028670c43cc74704a4d7b9ec2e661e10a4 | [
"MIT"
] | null | null | null | class ServiceTypes(object):
LOAD_BALANCER = 'LoadBalancer'
NODE_PORT = 'NodePort'
CLUSTER_IP = 'ClusterIP'
VALUES = [LOAD_BALANCER, NODE_PORT, CLUSTER_IP]
| 24.571429 | 51 | 0.709302 | class ServiceTypes(object):
LOAD_BALANCER = 'LoadBalancer'
NODE_PORT = 'NodePort'
CLUSTER_IP = 'ClusterIP'
VALUES = [LOAD_BALANCER, NODE_PORT, CLUSTER_IP]
| true | true |
f7fc8c8159d12a85fdde1380bf14d96d4db9500f | 206 | py | Python | Python/100Excersises/76 to 100/85/.history/85_20201119144321.py | magusikrak/NAMI-TERM-I-GroupWork | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | null | null | null | Python/100Excersises/76 to 100/85/.history/85_20201119144321.py | magusikrak/NAMI-TERM-I-GroupWork | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | 1 | 2021-07-24T03:18:30.000Z | 2021-07-24T12:45:07.000Z | Python/100Excersises/76 to 100/85/.history/85_20201119144321.py | magusikrak/NAMI-TERM-I-GroupWork | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | null | null | null | myFile=open("c.txt","r")
countriesRaw=myFile.read()
countriesNeat=''
countriesRaw.split()
list1=list(map(list,countriesRaw))
print(countriesRaw)
# print((list1))
# for country in list1:
# print(country) | 22.888889 | 34 | 0.73301 | myFile=open("c.txt","r")
countriesRaw=myFile.read()
countriesNeat=''
countriesRaw.split()
list1=list(map(list,countriesRaw))
print(countriesRaw)
| true | true |
f7fc8c9a9cd05c9d57738e97c285667615cc6e93 | 1,346 | py | Python | python/pyarmnn/src/pyarmnn/_version.py | tuanhe/armnn | 8a4bd6671d0106dfb788b8c9019f2f9646770f8d | [
"MIT"
] | 1 | 2021-07-03T23:46:08.000Z | 2021-07-03T23:46:08.000Z | python/pyarmnn/src/pyarmnn/_version.py | tuanhe/armnn | 8a4bd6671d0106dfb788b8c9019f2f9646770f8d | [
"MIT"
] | null | null | null | python/pyarmnn/src/pyarmnn/_version.py | tuanhe/armnn | 8a4bd6671d0106dfb788b8c9019f2f9646770f8d | [
"MIT"
] | null | null | null | # Copyright © 2020 Arm Ltd. All rights reserved.
# Copyright 2020 NXP
# SPDX-License-Identifier: MIT
import os
version_info = (25, 0, 0)
__dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
if __dev_version_env:
__dev_version = "dev0"
try:
__dev_version = "dev{}".format(int(__dev_version_env))
except ValueError:
__dev_version = str(__dev_version_env)
version_info = (*version_info, __dev_version)
__version__ = '.'.join(str(c) for c in version_info)
__arm_ml_version__ = '{}.{}.{}'.format(version_info[0], version_info[1], version_info[2])
def check_armnn_version(installed_armnn_version: str, expected_armnn_version: str = __arm_ml_version__):
"""Compares expected Arm NN version and Arm NN version used to build the package.
Args:
installed_armnn_version (str): Arm NN version used to generate the package (e.g. 25.0.0)
expected_armnn_version (str): Expected Arm NN version
Returns:
None
"""
expected = expected_armnn_version.split('.', 2)
installed = installed_armnn_version.split('.', 2)
# only compare major and minor versions, not patch
assert (expected[0] == installed[0]) and (expected[1] == installed[1]), \
"Expected ArmNN version is {} but installed ArmNN version is {}".format(expected_armnn_version, installed_armnn_version)
| 34.512821 | 128 | 0.705795 |
import os
version_info = (25, 0, 0)
__dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
if __dev_version_env:
__dev_version = "dev0"
try:
__dev_version = "dev{}".format(int(__dev_version_env))
except ValueError:
__dev_version = str(__dev_version_env)
version_info = (*version_info, __dev_version)
__version__ = '.'.join(str(c) for c in version_info)
__arm_ml_version__ = '{}.{}.{}'.format(version_info[0], version_info[1], version_info[2])
def check_armnn_version(installed_armnn_version: str, expected_armnn_version: str = __arm_ml_version__):
expected = expected_armnn_version.split('.', 2)
installed = installed_armnn_version.split('.', 2)
assert (expected[0] == installed[0]) and (expected[1] == installed[1]), \
"Expected ArmNN version is {} but installed ArmNN version is {}".format(expected_armnn_version, installed_armnn_version)
| true | true |
f7fc8d25337c8366e77792eef08c8e79b8558676 | 1,228 | py | Python | django/AdeKurniawan/mysite/mysite/blog/migrations/0001_initial.py | profMsaif/web_applications_2022 | 849cfeb396b82551e2553028d03fe9693773fc49 | [
"MIT"
] | 1 | 2022-03-12T10:17:55.000Z | 2022-03-12T10:17:55.000Z | django/matarmaa/mysite/mysite/blog/migrations/0001_initial.py | adekoerniawan/web_applications_2022 | 2a4347be6537c3982436bdd362ad13ee1868ede0 | [
"MIT"
] | null | null | null | django/matarmaa/mysite/mysite/blog/migrations/0001_initial.py | adekoerniawan/web_applications_2022 | 2a4347be6537c3982436bdd362ad13ee1868ede0 | [
"MIT"
] | 4 | 2022-03-12T10:17:00.000Z | 2022-03-26T08:40:43.000Z | # Generated by Django 4.0.2 on 2022-02-26 10:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('content', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(choices=[(0, 'Draft'), (1, 'Publish')], default=0)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_on'],
},
),
]
| 36.117647 | 147 | 0.598534 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('content', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(choices=[(0, 'Draft'), (1, 'Publish')], default=0)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_on'],
},
),
]
| true | true |
f7fc8d9d150098c187a2f9d047fabe6bef077994 | 1,182 | py | Python | pyvisdk/do/dvs_port_reconfigured_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/dvs_port_reconfigured_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/dvs_port_reconfigured_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def DvsPortReconfiguredEvent(vim, *args, **kwargs):
'''Existing ports are reconfigured in the distributed virtual switch.'''
obj = vim.client.factory.create('{urn:vim25}DvsPortReconfiguredEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'portKey', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 34.764706 | 124 | 0.612521 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
| true | true |
f7fc8e15833d76dda08e5e73599360926c2834e4 | 4,230 | py | Python | qi-server/main.py | Grey0520/Qi | f6c5ecf6089fa748cc4a77b7fa7be6a82841921d | [
"MIT"
] | null | null | null | qi-server/main.py | Grey0520/Qi | f6c5ecf6089fa748cc4a77b7fa7be6a82841921d | [
"MIT"
] | null | null | null | qi-server/main.py | Grey0520/Qi | f6c5ecf6089fa748cc4a77b7fa7be6a82841921d | [
"MIT"
] | null | null | null | import codecs
import json
import sys
import tempfile
import flask
from flask import Flask, request
from auth3 import Auth
from course_excel import course_excel_handler
from exam import exam_handler
from semester import get_semester
from transposition import get_college_and_grade_list, get_major_list, transposition
# sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
app = Flask(__name__)
@app.route("/")
def homepage():
return {
"message": "Hello World"
}
@app.route("/login", methods=['POST'])
def login():
request_body = request.json
auth = Auth()
cookies, ok = auth.login(request_body['school_id'], request_body['password'])
if ok:
return {
"cookies": cookies
}
else:
return {
"error": "登录失败,学号或密码错误",
}, 403
@app.route("/check_user", methods=['POST'])
def check_user():
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
auth = Auth(cookies)
auth.check_login()
return {
"success": auth.ok
}
@app.route("/semesters", methods=['GET'])
def semesters():
try:
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
semester, selected = get_semester(cookies)
return {
"semesters": semester,
"current": selected
}
except Exception as e:
return {
"error": str(e)
}, 500
@app.route("/course", methods=['POST'])
def course():
request_body = request.json
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
with tempfile.TemporaryDirectory() as output_dir:
try:
file_path = \
course_excel_handler(cookies,
request_body['xnxqid'],
request_body['start_date'],
output_dir)
return {
"data": open(file_path).read()
}
except Exception as e:
return {
"error": str(e)
}, 500
@app.route("/exam", methods=['POST'])
def exam():
request_body = request.json
with tempfile.TemporaryDirectory() as output_dir:
try:
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
file_path = \
exam_handler(cookies,
request_body['xnxqid'],
output_dir)
return {
"data": open(file_path).read()
}
except Exception as e:
return {
"error": str(e)
}, 500
@app.route("/colleges_and_grades", methods=['GET'])
def college_and_grade():
try:
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
c, g = get_college_and_grade_list(cookies)
return {
"colleges": c,
"grades": g
}
except Exception as e:
return {
"error": str(e)
}, 500
@app.route("/majors", methods=['GET'])
def majors():
try:
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
_majors = get_major_list(cookies, request.args.get('college_id'), request.args.get('grade'))
return {
"majors": _majors
}
except Exception as e:
return {
"error": str(e)
}, 500
@app.route("/transposition", methods=['POST'])
def get_transposition():
request_body = request.json
try:
with tempfile.TemporaryDirectory() as output_dir:
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
file_path = transposition(cookies, output_dir, request_body['xnxqid'], request_body['college_id'],
request_body['grade'], request_body['major_id'])
return flask.send_file(file_path, as_attachment=True)
except Exception as e:
return {
"error": str(e)
}, 500
| 26.111111 | 110 | 0.539007 | import codecs
import json
import sys
import tempfile
import flask
from flask import Flask, request
from auth3 import Auth
from course_excel import course_excel_handler
from exam import exam_handler
from semester import get_semester
from transposition import get_college_and_grade_list, get_major_list, transposition
app = Flask(__name__)
@app.route("/")
def homepage():
return {
"message": "Hello World"
}
@app.route("/login", methods=['POST'])
def login():
request_body = request.json
auth = Auth()
cookies, ok = auth.login(request_body['school_id'], request_body['password'])
if ok:
return {
"cookies": cookies
}
else:
return {
"error": "登录失败,学号或密码错误",
}, 403
@app.route("/check_user", methods=['POST'])
def check_user():
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
auth = Auth(cookies)
auth.check_login()
return {
"success": auth.ok
}
@app.route("/semesters", methods=['GET'])
def semesters():
try:
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
semester, selected = get_semester(cookies)
return {
"semesters": semester,
"current": selected
}
except Exception as e:
return {
"error": str(e)
}, 500
@app.route("/course", methods=['POST'])
def course():
request_body = request.json
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
with tempfile.TemporaryDirectory() as output_dir:
try:
file_path = \
course_excel_handler(cookies,
request_body['xnxqid'],
request_body['start_date'],
output_dir)
return {
"data": open(file_path).read()
}
except Exception as e:
return {
"error": str(e)
}, 500
@app.route("/exam", methods=['POST'])
def exam():
request_body = request.json
with tempfile.TemporaryDirectory() as output_dir:
try:
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
file_path = \
exam_handler(cookies,
request_body['xnxqid'],
output_dir)
return {
"data": open(file_path).read()
}
except Exception as e:
return {
"error": str(e)
}, 500
@app.route("/colleges_and_grades", methods=['GET'])
def college_and_grade():
try:
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
c, g = get_college_and_grade_list(cookies)
return {
"colleges": c,
"grades": g
}
except Exception as e:
return {
"error": str(e)
}, 500
@app.route("/majors", methods=['GET'])
def majors():
try:
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
_majors = get_major_list(cookies, request.args.get('college_id'), request.args.get('grade'))
return {
"majors": _majors
}
except Exception as e:
return {
"error": str(e)
}, 500
@app.route("/transposition", methods=['POST'])
def get_transposition():
request_body = request.json
try:
with tempfile.TemporaryDirectory() as output_dir:
cookies = request.headers.get('Q-COOKIES')
cookies = json.loads(cookies)
file_path = transposition(cookies, output_dir, request_body['xnxqid'], request_body['college_id'],
request_body['grade'], request_body['major_id'])
return flask.send_file(file_path, as_attachment=True)
except Exception as e:
return {
"error": str(e)
}, 500
| true | true |
f7fc903c47ecc8b58dda0afbd0fe9cdd74d9d9ea | 267 | py | Python | geeksaga/archive/controller/__init__.py | geekflow/archive | a94bd50363d1cc0004d8d0984599432a31e70035 | [
"MIT"
] | null | null | null | geeksaga/archive/controller/__init__.py | geekflow/archive | a94bd50363d1cc0004d8d0984599432a31e70035 | [
"MIT"
] | null | null | null | geeksaga/archive/controller/__init__.py | geekflow/archive | a94bd50363d1cc0004d8d0984599432a31e70035 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
.controller
~~~~~~~~~~~~~~~~~~~
archive controller initialize module.
:copyright: (c) 2014 by geeksaga.
:license: MIT LICENSE 2.0, see license for more details.
"""
__all__ = ['login', 'user', 'admin', 'ArchiveAPI'] | 22.25 | 60 | 0.565543 |
__all__ = ['login', 'user', 'admin', 'ArchiveAPI'] | true | true |
f7fc9191a7bc25429ff4e0a44d28fb74b23a4622 | 3,371 | py | Python | setup.py | sequana/rnaseq | 470e3e5e2563596fc37515d0b38a7024e178a7a3 | [
"BSD-3-Clause"
] | 6 | 2021-03-24T15:49:23.000Z | 2022-01-05T17:00:43.000Z | setup.py | sequana/rnaseq | 470e3e5e2563596fc37515d0b38a7024e178a7a3 | [
"BSD-3-Clause"
] | 6 | 2020-12-08T12:58:04.000Z | 2022-01-18T10:23:11.000Z | setup.py | sequana/rnaseq | 470e3e5e2563596fc37515d0b38a7024e178a7a3 | [
"BSD-3-Clause"
] | 1 | 2021-03-24T15:49:24.000Z | 2021-03-24T15:49:24.000Z | from setuptools import setup, find_namespace_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
import subprocess
_MAJOR = 0
_MINOR = 14
_MICRO = 2
version = '%d.%d.%d' % (_MAJOR, _MINOR, _MICRO)
release = '%d.%d' % (_MAJOR, _MINOR)
metainfo = {
'authors': {"main": ("thomas cokelaer", "thomas.cokelaer@pasteur.fr")},
'version': version,
'license' : 'new BSD',
'url' : "https://github.com/sequana/",
'description': "A RNAseq pipeline from raw reads to feature counts" ,
'platforms' : ['Linux', 'Unix', 'MacOsX', 'Windows'],
'keywords' : ['snakemake, sequana, RNAseq, RNADiff, differential analysis'],
'classifiers' : [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics']
}
NAME = "rnaseq"
class Install(install):
def run(self):
cmd = "sequana_completion --name {} --force ".format(NAME)
try: subprocess.run(cmd.split())
except:pass
install.run(self)
class Develop(develop):
def run(self):
cmd = "sequana_completion --name {} --force ".format(NAME)
try:subprocess.run(cmd.split())
except:pass
develop.run(self)
setup(
name = "sequana_{}".format(NAME),
version = version,
maintainer = metainfo['authors']['main'][0],
maintainer_email = metainfo['authors']['main'][1],
author = metainfo['authors']['main'][0],
author_email = metainfo['authors']['main'][1],
long_description = open("README.rst").read(),
keywords = metainfo['keywords'],
description = metainfo['description'],
license = metainfo['license'],
platforms = metainfo['platforms'],
url = metainfo['url'],
classifiers = metainfo['classifiers'],
# package installation
packages = ["sequana_pipelines.rnaseq",
'sequana_pipelines.rnaseq.data' ,
'sequana_pipelines.rnaseq.data.Saccer3' ],
install_requires = open("requirements.txt").read(),
# This is recursive include of data files
exclude_package_data = {"": ["__pycache__"]},
package_data = {
'': ['*.yaml', "*.rules", "*.json", "requirements.txt", "*png",
"fastq_screen.conf"],
'sequana_pipelines.rnaseq.data' : ['*.*'],
},
zip_safe=False,
entry_points = {'console_scripts':[
'sequana_pipelines_rnaseq=sequana_pipelines.rnaseq.main:main',
'sequana_rnaseq=sequana_pipelines.rnaseq.main:main']
},
#cmdclass={"install": Install, "develop": Develop}
)
| 36.641304 | 80 | 0.592999 | from setuptools import setup, find_namespace_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
import subprocess
_MAJOR = 0
_MINOR = 14
_MICRO = 2
version = '%d.%d.%d' % (_MAJOR, _MINOR, _MICRO)
release = '%d.%d' % (_MAJOR, _MINOR)
metainfo = {
'authors': {"main": ("thomas cokelaer", "thomas.cokelaer@pasteur.fr")},
'version': version,
'license' : 'new BSD',
'url' : "https://github.com/sequana/",
'description': "A RNAseq pipeline from raw reads to feature counts" ,
'platforms' : ['Linux', 'Unix', 'MacOsX', 'Windows'],
'keywords' : ['snakemake, sequana, RNAseq, RNADiff, differential analysis'],
'classifiers' : [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics']
}
NAME = "rnaseq"
class Install(install):
def run(self):
cmd = "sequana_completion --name {} --force ".format(NAME)
try: subprocess.run(cmd.split())
except:pass
install.run(self)
class Develop(develop):
def run(self):
cmd = "sequana_completion --name {} --force ".format(NAME)
try:subprocess.run(cmd.split())
except:pass
develop.run(self)
setup(
name = "sequana_{}".format(NAME),
version = version,
maintainer = metainfo['authors']['main'][0],
maintainer_email = metainfo['authors']['main'][1],
author = metainfo['authors']['main'][0],
author_email = metainfo['authors']['main'][1],
long_description = open("README.rst").read(),
keywords = metainfo['keywords'],
description = metainfo['description'],
license = metainfo['license'],
platforms = metainfo['platforms'],
url = metainfo['url'],
classifiers = metainfo['classifiers'],
packages = ["sequana_pipelines.rnaseq",
'sequana_pipelines.rnaseq.data' ,
'sequana_pipelines.rnaseq.data.Saccer3' ],
install_requires = open("requirements.txt").read(),
exclude_package_data = {"": ["__pycache__"]},
package_data = {
'': ['*.yaml', "*.rules", "*.json", "requirements.txt", "*png",
"fastq_screen.conf"],
'sequana_pipelines.rnaseq.data' : ['*.*'],
},
zip_safe=False,
entry_points = {'console_scripts':[
'sequana_pipelines_rnaseq=sequana_pipelines.rnaseq.main:main',
'sequana_rnaseq=sequana_pipelines.rnaseq.main:main']
},
)
| true | true |
f7fc92c235ebec4cf3c31c64c347236171dd95d7 | 5,694 | py | Python | test_elasticsearch/test_async/test_server/test_mapbox_vector_tile.py | elastic/elasticsearch-py | c0d9bd1b0bd94f6819172199bebc338358410496 | [
"Apache-2.0"
] | 3,353 | 2015-03-12T17:41:01.000Z | 2022-03-31T05:03:02.000Z | test_elasticsearch/test_async/test_server/test_mapbox_vector_tile.py | elastic/elasticsearch-py | c0d9bd1b0bd94f6819172199bebc338358410496 | [
"Apache-2.0"
] | 1,356 | 2015-03-11T14:27:35.000Z | 2022-03-30T22:57:07.000Z | test_elasticsearch/test_async/test_server/test_mapbox_vector_tile.py | elastic/elasticsearch-py | c0d9bd1b0bd94f6819172199bebc338358410496 | [
"Apache-2.0"
] | 1,193 | 2015-03-11T15:13:26.000Z | 2022-03-29T02:45:55.000Z | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import logging
import re
import pytest
from elasticsearch import AsyncElasticsearch, RequestError
pytestmark = pytest.mark.asyncio
@pytest.fixture(scope="function")
async def mvt_setup(async_client):
await async_client.indices.create(
index="museums",
body={
"mappings": {
"properties": {
"location": {"type": "geo_point"},
"name": {"type": "keyword"},
"price": {"type": "long"},
"included": {"type": "boolean"},
}
}
},
)
await async_client.bulk(
index="museums",
body=[
{"index": {"_id": "1"}},
{
"location": "52.374081,4.912350",
"name": "NEMO Science Museum",
"price": 1750,
"included": True,
},
{"index": {"_id": "2"}},
{
"location": "52.369219,4.901618",
"name": "Museum Het Rembrandthuis",
"price": 1500,
"included": False,
},
{"index": {"_id": "3"}},
{
"location": "52.371667,4.914722",
"name": "Nederlands Scheepvaartmuseum",
"price": 1650,
"included": True,
},
{"index": {"_id": "4"}},
{
"location": "52.371667,4.914722",
"name": "Amsterdam Centre for Architecture",
"price": 0,
"included": True,
},
],
refresh=True,
)
@pytest.mark.xfail
async def test_mapbox_vector_tile_logging(elasticsearch_url, mvt_setup, ca_certs):
client = AsyncElasticsearch(elasticsearch_url, ca_certs=ca_certs)
output = io.StringIO()
handler = logging.StreamHandler(output)
logger = logging.getLogger("elasticsearch")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
try:
await client.search_mvt(
index="museums",
zoom=13,
x=4207,
y=2692,
field="location",
)
finally:
logger.removeHandler(handler)
handler.flush()
logs = output.getvalue()
assert re.search(
r"^POST https?://[^/]+/museums/_mvt/location/13/4207/2692 \[status:200 request:0\.[0-9]{3}s\]\n"
r"> None\n"
r"< b'.+'$",
logs,
flags=re.DOTALL,
)
output = io.StringIO()
handler = logging.StreamHandler(output)
logger = logging.getLogger("elasticsearch")
logger.addHandler(handler)
# Errors should still be JSON
try:
with pytest.raises(RequestError) as e:
await client.search_mvt(
index="museums",
zoom=-100,
x=4207,
y=2692,
field="location",
)
finally:
logger.removeHandler(handler)
assert str(e.value) == (
"RequestError(400, 'illegal_argument_exception', "
"'Invalid geotile_grid precision of -100. Must be between 0 and 29.')"
)
assert e.value.status_code == 400
handler.flush()
logs = output.getvalue()
assert re.search(
r"^POST https?://[^/]+/museums/_mvt/location/-100/4207/2692 \[status:400 request:0\.[0-9]{3}s\]\n",
logs,
flags=re.DOTALL,
)
# The JSON error body is still logged properly.
assert logs.endswith(
'> None\n< {"error":{"root_cause":[{"type":"illegal_argument_exception","reason":"Invalid '
'geotile_grid precision of -100. Must be between 0 and 29."}],"type":"illegal_argument_exception",'
'"reason":"Invalid geotile_grid precision of -100. Must be between 0 and 29."},"status":400}\n'
)
async def test_mapbox_vector_tile_response(elasticsearch_url, mvt_setup, ca_certs):
try:
import mapbox_vector_tile
except ImportError:
return pytest.skip(reason="Requires the 'mapbox-vector-tile' package")
client = AsyncElasticsearch(elasticsearch_url, ca_certs=ca_certs)
resp = await client.search_mvt(
index="museums",
zoom=13,
x=4207,
y=2692,
field="location",
body={
"grid_precision": 2,
"fields": ["name", "price"],
"query": {"term": {"included": True}},
"aggs": {
"min_price": {"min": {"field": "price"}},
"max_price": {"max": {"field": "price"}},
"avg_price": {"avg": {"field": "price"}},
},
},
)
assert isinstance(resp, bytes)
# Decode the binary as MVT
tile = mapbox_vector_tile.decode(resp)
# Assert some general things about the structure, mostly we want
# to know that we got back a valid MVT.
assert set(tile.keys()) == {"hits", "aggs", "meta"}
| 30.945652 | 107 | 0.556902 |
import io
import logging
import re
import pytest
from elasticsearch import AsyncElasticsearch, RequestError
pytestmark = pytest.mark.asyncio
@pytest.fixture(scope="function")
async def mvt_setup(async_client):
await async_client.indices.create(
index="museums",
body={
"mappings": {
"properties": {
"location": {"type": "geo_point"},
"name": {"type": "keyword"},
"price": {"type": "long"},
"included": {"type": "boolean"},
}
}
},
)
await async_client.bulk(
index="museums",
body=[
{"index": {"_id": "1"}},
{
"location": "52.374081,4.912350",
"name": "NEMO Science Museum",
"price": 1750,
"included": True,
},
{"index": {"_id": "2"}},
{
"location": "52.369219,4.901618",
"name": "Museum Het Rembrandthuis",
"price": 1500,
"included": False,
},
{"index": {"_id": "3"}},
{
"location": "52.371667,4.914722",
"name": "Nederlands Scheepvaartmuseum",
"price": 1650,
"included": True,
},
{"index": {"_id": "4"}},
{
"location": "52.371667,4.914722",
"name": "Amsterdam Centre for Architecture",
"price": 0,
"included": True,
},
],
refresh=True,
)
@pytest.mark.xfail
async def test_mapbox_vector_tile_logging(elasticsearch_url, mvt_setup, ca_certs):
client = AsyncElasticsearch(elasticsearch_url, ca_certs=ca_certs)
output = io.StringIO()
handler = logging.StreamHandler(output)
logger = logging.getLogger("elasticsearch")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
try:
await client.search_mvt(
index="museums",
zoom=13,
x=4207,
y=2692,
field="location",
)
finally:
logger.removeHandler(handler)
handler.flush()
logs = output.getvalue()
assert re.search(
r"^POST https?://[^/]+/museums/_mvt/location/13/4207/2692 \[status:200 request:0\.[0-9]{3}s\]\n"
r"> None\n"
r"< b'.+'$",
logs,
flags=re.DOTALL,
)
output = io.StringIO()
handler = logging.StreamHandler(output)
logger = logging.getLogger("elasticsearch")
logger.addHandler(handler)
try:
with pytest.raises(RequestError) as e:
await client.search_mvt(
index="museums",
zoom=-100,
x=4207,
y=2692,
field="location",
)
finally:
logger.removeHandler(handler)
assert str(e.value) == (
"RequestError(400, 'illegal_argument_exception', "
"'Invalid geotile_grid precision of -100. Must be between 0 and 29.')"
)
assert e.value.status_code == 400
handler.flush()
logs = output.getvalue()
assert re.search(
r"^POST https?://[^/]+/museums/_mvt/location/-100/4207/2692 \[status:400 request:0\.[0-9]{3}s\]\n",
logs,
flags=re.DOTALL,
)
assert logs.endswith(
'> None\n< {"error":{"root_cause":[{"type":"illegal_argument_exception","reason":"Invalid '
'geotile_grid precision of -100. Must be between 0 and 29."}],"type":"illegal_argument_exception",'
'"reason":"Invalid geotile_grid precision of -100. Must be between 0 and 29."},"status":400}\n'
)
async def test_mapbox_vector_tile_response(elasticsearch_url, mvt_setup, ca_certs):
try:
import mapbox_vector_tile
except ImportError:
return pytest.skip(reason="Requires the 'mapbox-vector-tile' package")
client = AsyncElasticsearch(elasticsearch_url, ca_certs=ca_certs)
resp = await client.search_mvt(
index="museums",
zoom=13,
x=4207,
y=2692,
field="location",
body={
"grid_precision": 2,
"fields": ["name", "price"],
"query": {"term": {"included": True}},
"aggs": {
"min_price": {"min": {"field": "price"}},
"max_price": {"max": {"field": "price"}},
"avg_price": {"avg": {"field": "price"}},
},
},
)
assert isinstance(resp, bytes)
tile = mapbox_vector_tile.decode(resp)
assert set(tile.keys()) == {"hits", "aggs", "meta"}
| true | true |
f7fc930266ac03a687af9e78d34759000cc0f843 | 2,264 | py | Python | third_party/gsutil/gslib/cloud_api_helper.py | ravitejavalluri/catapult | 246a39a82c2213d913a96fff020a263838dc76e6 | [
"BSD-3-Clause"
] | 8 | 2016-02-08T11:59:31.000Z | 2020-05-31T15:19:54.000Z | third_party/gsutil/gslib/cloud_api_helper.py | ravitejavalluri/catapult | 246a39a82c2213d913a96fff020a263838dc76e6 | [
"BSD-3-Clause"
] | 1 | 2021-02-23T22:20:14.000Z | 2021-02-23T22:20:14.000Z | third_party/gsutil/gslib/cloud_api_helper.py | ravitejavalluri/catapult | 246a39a82c2213d913a96fff020a263838dc76e6 | [
"BSD-3-Clause"
] | 7 | 2016-02-09T09:28:14.000Z | 2020-07-25T19:03:36.000Z | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for Cloud API implementations."""
from __future__ import absolute_import
import json
from gslib.cloud_api import ArgumentException
def ValidateDstObjectMetadata(dst_obj_metadata):
"""Ensures dst_obj_metadata supplies the needed fields for copy and insert.
Args:
dst_obj_metadata: Metadata to validate.
Raises:
ArgumentException if metadata is invalid.
"""
if not dst_obj_metadata:
raise ArgumentException(
'No object metadata supplied for destination object.')
if not dst_obj_metadata.name:
raise ArgumentException(
'Object metadata supplied for destination object had no object name.')
if not dst_obj_metadata.bucket:
raise ArgumentException(
'Object metadata supplied for destination object had no bucket name.')
def GetDownloadSerializationData(src_obj_metadata, progress=0):
"""Returns download serialization data.
There are four entries:
auto_transfer: JSON-specific field, always False.
progress: How much of the download has already been completed.
total_size: Total object size.
url: Implementation-specific field used for saving a metadata get call.
For JSON, this the download URL of the object.
For XML, this is a pickled boto key.
Args:
src_obj_metadata: Object to be downloaded.
progress: See above.
Returns:
Serialization data for use with Cloud API GetObjectMedia.
"""
serialization_dict = {
'auto_transfer': 'False',
'progress': progress,
'total_size': src_obj_metadata.size,
'url': src_obj_metadata.mediaLink
}
return json.dumps(serialization_dict)
| 31.887324 | 78 | 0.740283 |
from __future__ import absolute_import
import json
from gslib.cloud_api import ArgumentException
def ValidateDstObjectMetadata(dst_obj_metadata):
if not dst_obj_metadata:
raise ArgumentException(
'No object metadata supplied for destination object.')
if not dst_obj_metadata.name:
raise ArgumentException(
'Object metadata supplied for destination object had no object name.')
if not dst_obj_metadata.bucket:
raise ArgumentException(
'Object metadata supplied for destination object had no bucket name.')
def GetDownloadSerializationData(src_obj_metadata, progress=0):
serialization_dict = {
'auto_transfer': 'False',
'progress': progress,
'total_size': src_obj_metadata.size,
'url': src_obj_metadata.mediaLink
}
return json.dumps(serialization_dict)
| true | true |
f7fc93675fdb651d83bc2bd95e43eac2de5ac4d6 | 250 | py | Python | Assignments/2.2/C5.py | cRohda/Comp-Sci | 07e36fdcca242f5aa4f3e11440e9c77616973031 | [
"Apache-2.0"
] | null | null | null | Assignments/2.2/C5.py | cRohda/Comp-Sci | 07e36fdcca242f5aa4f3e11440e9c77616973031 | [
"Apache-2.0"
] | null | null | null | Assignments/2.2/C5.py | cRohda/Comp-Sci | 07e36fdcca242f5aa4f3e11440e9c77616973031 | [
"Apache-2.0"
] | null | null | null | my_string = "incredible" # Create string
s3 = (my_string*3) # Make new string that is 3x the first string
print(s3) # Print new string
cred3 = (s3[2:6]*3) # Make new string which is cred 3x, (indies 2,3,4,5)
print(cred3) # Print the new string | 35.714286 | 73 | 0.688 | my_string = "incredible"
s3 = (my_string*3)
print(s3)
cred3 = (s3[2:6]*3)
print(cred3) | true | true |
f7fc94f7ae7a3b4a646a520144a83d1846d54f56 | 6,405 | py | Python | configs/tile/fastercnn_r50_se_syncbn.py | NEUdeep/TileDetection | f453ac868de195a7859b9bf07c813e46eb35d2d0 | [
"Apache-2.0"
] | 41 | 2021-03-23T23:43:00.000Z | 2022-03-22T12:42:53.000Z | configs/tile/fastercnn_r50_se_syncbn.py | hlcedu/TileDetection | 77b5ef4bb4db29f5ffe6a6fa9f87b4bfe8516e4c | [
"Apache-2.0"
] | 3 | 2021-09-12T13:04:34.000Z | 2022-03-23T07:29:43.000Z | configs/tile/fastercnn_r50_se_syncbn.py | hlcedu/TileDetection | 77b5ef4bb4db29f5ffe6a6fa9f87b4bfe8516e4c | [
"Apache-2.0"
] | 7 | 2021-03-31T03:21:43.000Z | 2021-12-27T08:50:13.000Z | model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
add_se=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[6],
ratios=[0.1, 0.5, 1.0, 2.0, 10.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=7,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
gpu_assign_thr=10,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.3,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=False,
gpu_assign_thr=10,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='soft_nms', iou_threshold=0.1),
max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
)
# dataset setting
dataset_type = 'TileDataset'
data_root = '/ssd/huangyifei/data_guangdong/tile_round1_train_20201231/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(3000, 3000), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomVFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(3000, 3000),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'infos/train_infos_crop.pkl',
img_prefix=data_root + 'crop_train_imgs/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'infos/val_infos_crop.pkl',
img_prefix=data_root + 'crop_train_imgs/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'infos/val_infos_crop.pkl',
img_prefix=data_root + 'crop_train_imgs/',
pipeline=test_pipeline)
)
evaluation = dict(interval=1, metric='mAP')
# optimizer
# optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer = dict(type='Adam', lr=7e-5)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[12, 16])
total_epochs = 20
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = 'work_dirs/faster_r50_baseline_v2_se_syncbn/epoch_6.pth'
work_dir = 'work_dirs/faster_r50_baseline_v2_se_syncbn'
workflow = [('train', 1)]
# fp16 settings
fp16 = dict(loss_scale=512.)
cudnn_benchmark = True
dist_params = dict(backend='nccl')
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) | 31.551724 | 83 | 0.591101 | model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
add_se=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[6],
ratios=[0.1, 0.5, 1.0, 2.0, 10.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=7,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0))))
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
gpu_assign_thr=10,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.3,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=False,
gpu_assign_thr=10,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='soft_nms', iou_threshold=0.1),
max_per_img=100)
)
dataset_type = 'TileDataset'
data_root = '/ssd/huangyifei/data_guangdong/tile_round1_train_20201231/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(3000, 3000), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomVFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(3000, 3000),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'infos/train_infos_crop.pkl',
img_prefix=data_root + 'crop_train_imgs/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'infos/val_infos_crop.pkl',
img_prefix=data_root + 'crop_train_imgs/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'infos/val_infos_crop.pkl',
img_prefix=data_root + 'crop_train_imgs/',
pipeline=test_pipeline)
)
evaluation = dict(interval=1, metric='mAP')
optimizer = dict(type='Adam', lr=7e-5)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[12, 16])
total_epochs = 20
checkpoint_config = dict(interval=1)
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = 'work_dirs/faster_r50_baseline_v2_se_syncbn/epoch_6.pth'
work_dir = 'work_dirs/faster_r50_baseline_v2_se_syncbn'
workflow = [('train', 1)]
fp16 = dict(loss_scale=512.)
cudnn_benchmark = True
dist_params = dict(backend='nccl')
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) | true | true |
f7fc954534974574566a2551115cd4093a054165 | 7,236 | py | Python | app/venv/lib/python2.7/site-packages/numpy/fft/info.py | anaheino/Ufo-sightings-map | 64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc | [
"MIT"
] | 195 | 2016-01-14T16:03:02.000Z | 2021-12-29T09:15:02.000Z | app/venv/lib/python2.7/site-packages/numpy/fft/info.py | anaheino/Ufo-sightings-map | 64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc | [
"MIT"
] | 75 | 2016-01-14T16:03:02.000Z | 2020-04-29T22:51:53.000Z | app/venv/lib/python2.7/site-packages/numpy/fft/info.py | anaheino/Ufo-sightings-map | 64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc | [
"MIT"
] | 11 | 2016-01-15T17:30:16.000Z | 2020-07-30T03:58:42.000Z | """
Discrete Fourier Transform (:mod:`numpy.fft`)
=============================================
.. currentmodule:: numpy.fft
Standard FFTs
-------------
.. autosummary::
:toctree: generated/
fft Discrete Fourier transform.
ifft Inverse discrete Fourier transform.
fft2 Discrete Fourier transform in two dimensions.
ifft2 Inverse discrete Fourier transform in two dimensions.
fftn Discrete Fourier transform in N-dimensions.
ifftn Inverse discrete Fourier transform in N dimensions.
Real FFTs
---------
.. autosummary::
:toctree: generated/
rfft Real discrete Fourier transform.
irfft Inverse real discrete Fourier transform.
rfft2 Real discrete Fourier transform in two dimensions.
irfft2 Inverse real discrete Fourier transform in two dimensions.
rfftn Real discrete Fourier transform in N dimensions.
irfftn Inverse real discrete Fourier transform in N dimensions.
Hermitian FFTs
--------------
.. autosummary::
:toctree: generated/
hfft Hermitian discrete Fourier transform.
ihfft Inverse Hermitian discrete Fourier transform.
Helper routines
---------------
.. autosummary::
:toctree: generated/
fftfreq Discrete Fourier Transform sample frequencies.
rfftfreq DFT sample frequencies (for usage with rfft, irfft).
fftshift Shift zero-frequency component to center of spectrum.
ifftshift Inverse of fftshift.
Background information
----------------------
Fourier analysis is fundamentally a method for expressing a function as a
sum of periodic components, and for recovering the function from those
components. When both the function and its Fourier transform are
replaced with discretized counterparts, it is called the discrete Fourier
transform (DFT). The DFT has become a mainstay of numerical computing in
part because of a very fast algorithm for computing it, called the Fast
Fourier Transform (FFT), which was known to Gauss (1805) and was brought
to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
provide an accessible introduction to Fourier analysis and its
applications.
Because the discrete Fourier transform separates its input into
components that contribute at discrete frequencies, it has a great number
of applications in digital signal processing, e.g., for filtering, and in
this context the discretized input to the transform is customarily
referred to as a *signal*, which exists in the *time domain*. The output
is called a *spectrum* or *transform* and exists in the *frequency
domain*.
Implementation details
----------------------
There are many ways to define the DFT, varying in the sign of the
exponent, normalization, etc. In this implementation, the DFT is defined
as
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
\\qquad k = 0,\\ldots,n-1.
The DFT is in general defined for complex inputs and outputs, and a
single-frequency component at linear frequency :math:`f` is
represented by a complex exponential
:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
is the sampling interval.
The values in the result follow so-called "standard" order: If ``A =
fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the mean of
the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
negative-frequency terms, in order of decreasingly negative frequency.
For an even number of input points, ``A[n/2]`` represents both positive and
negative Nyquist frequency, and is also purely real for real input. For
an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
of corresponding elements in the output. The routine
``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
that shift.
When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
The phase spectrum is obtained by ``np.angle(A)``.
The inverse DFT is defined as
.. math::
a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
\\qquad m = 0,\\ldots,n-1.
It differs from the forward transform by the sign of the exponential
argument and the default normalization by :math:`1/n`.
Normalization
-------------
The default normalization has the direct transforms unscaled and the inverse
transforms are scaled by :math:`1/n`. It is possible to obtain unitary
transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
`None`) so that both direct and inverse transforms will be scaled by
:math:`1/\\sqrt{n}`.
Real and Hermitian transforms
-----------------------------
When the input is purely real, its transform is Hermitian, i.e., the
component at frequency :math:`f_k` is the complex conjugate of the
component at frequency :math:`-f_k`, which means that for real
inputs there is no information in the negative frequency components that
is not already available from the positive frequency components.
The family of `rfft` functions is
designed to operate on real inputs, and exploits this symmetry by
computing only the positive frequency components, up to and including the
Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
output points. The inverses of this family assumes the same symmetry of
its input, and for an output of ``n`` points uses ``n/2+1`` input points.
Correspondingly, when the spectrum is purely real, the signal is
Hermitian. The `hfft` family of functions exploits this symmetry by
using ``n/2+1`` complex points in the input (time) domain for ``n`` real
points in the frequency domain.
In higher dimensions, FFTs are used, e.g., for image analysis and
filtering. The computational efficiency of the FFT means that it can
also be a faster way to compute large convolutions, using the property
that a convolution in the time domain is equivalent to a point-by-point
multiplication in the frequency domain.
Higher dimensions
-----------------
In two dimensions, the DFT is defined as
.. math::
A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
\\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
which extends in the obvious way to higher dimensions, and the inverses
in higher dimensions also extend in the same way.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
12-13. Cambridge Univ. Press, Cambridge, UK.
Examples
--------
For examples, see the various functions.
"""
from __future__ import division, absolute_import, print_function
depends = ['core']
| 38.489362 | 84 | 0.714345 | from __future__ import division, absolute_import, print_function
depends = ['core']
| true | true |
f7fc954965f6f2d56e67279732ba141ca1874560 | 6,125 | py | Python | rmgpy/molecule/atomtypedatabase.py | mbprend/RMG-Py | 29e111d683f2daa0b376417be60e76b32ce8a993 | [
"MIT"
] | null | null | null | rmgpy/molecule/atomtypedatabase.py | mbprend/RMG-Py | 29e111d683f2daa0b376417be60e76b32ce8a993 | [
"MIT"
] | null | null | null | rmgpy/molecule/atomtypedatabase.py | mbprend/RMG-Py | 29e111d683f2daa0b376417be60e76b32ce8a993 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This creates atomTypes and assigns them with the correct bond/lone_pairs/charge
Used in isomorphismTest.py to create group_atomtypes
"""
class AbstractAtomType(object):
def __init__(self, element=None, label=None, double=-1, triple=-1, quadruple=-1, benzene=-1, lp=-1, chrg=-1):
self.element = element
self.label = label
self.double = double
self.triple = triple
self.quadruple = quadruple
self.benzene = benzene
self.lp = lp
self.chrg = chrg
class Column4(AbstractAtomType): # C
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.lp = 0
class Column5(AbstractAtomType): # N
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.lp = 1
class Column6(AbstractAtomType): # O, S
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.lp = 2
class Xs(AbstractAtomType):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.double, self.triple, self.benzene, self.quadruple = 0, 0, 0, 0
self.label = 's'
class Xd(AbstractAtomType):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.double, self.triple, self.benzene, self.quadruple = 1, 0, 0, 0
self.label = 'd'
class Xdd(AbstractAtomType):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.double, self.triple, self.benzene, self.quadruple = 2, 0, 0, 0
self.label = 'dd'
class Xt(AbstractAtomType):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.double, self.triple, self.benzene, self.quadruple = 0, 1, 0, 0
self.label = 't'
class Xq(AbstractAtomType):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.double, self.triple, self.benzene, self.quadruple = 0, 0, 0, 1
self.label = 'q'
class Xb(AbstractAtomType):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.double, self.triple, self.benzene, self.quadruple = 0, 0, 2, 0
self.label = 'b'
class Xbf(AbstractAtomType):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.double, self.triple, self.benzene, self.quadruple = 0, 0, 3, 0
self.label = 'bf'
def create_atom_types():
atomtypes = []
# tetravalent:
# tetravalent = []
# for type in [Xs, Xd, Xdd, Xt, Xb, Xbf]:
# #tetravalent.extend(create_types(type, ['C', 'Si']))
# tetravalent.extend(create_types(type, ['C']))
# for at in tetravalent: at.lp = 0
# atomtypes.extend(tetravalent)
# bivalent:
# bivalent = []
# for type in [Xs, Xd]:
# #bivalent.extend(create_types(type, ['O', 'S']))
# bivalent.extend(create_types(type, ['O']))
# for at in bivalent: at.lp = 2
# atomtypes.extend(bivalent)
# trivalent nitrogen:
# trivalent_N = []
# for type in [Xs, Xd, Xt, Xb]:
# trivalent_N.extend(create_types(type, ['N'], ['N3']))
# for at in trivalent_N: at.lp = 1
# atomtypes.extend(trivalent_N)
# pentavalent nitrogen:
# pentavalent_N = []
# for type in [Xs, Xd, Xdd, Xt, Xb]:
# pentavalent_N.extend(create_types(type, ['N'], ['N5']))
# for at in pentavalent_N: at.lp = 0
# atomtypes.extend(pentavalent_N)
return atomtypes
def create_types(Type, elements, labels=None):
if labels is None:
labels = elements
atomtypes = []
for el, label in zip(elements, labels):
at = Type(element=el)
at.label = label + at.label
atomtypes.append(at)
return atomtypes
| 36.458333 | 113 | 0.56098 | true | true | |
f7fc955fa7ad8cf42db2449242ce3d3b7a563b21 | 14,495 | py | Python | hax/hax/handler.py | nikhilpatil2995/cortx-hare | b240a919a082d1078178c50983b0cbfab17c6742 | [
"Apache-2.0"
] | null | null | null | hax/hax/handler.py | nikhilpatil2995/cortx-hare | b240a919a082d1078178c50983b0cbfab17c6742 | [
"Apache-2.0"
] | null | null | null | hax/hax/handler.py | nikhilpatil2995/cortx-hare | b240a919a082d1078178c50983b0cbfab17c6742 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import logging
from typing import List
from hax.message import (BroadcastHAStates, Die, EntrypointRequest,
FirstEntrypointRequest, HaNvecGetEvent, ProcessEvent,
SnsRebalancePause, SnsRebalanceResume,
SnsRebalanceStart, SnsRebalanceStatus,
SnsRebalanceStop, SnsRepairPause, SnsRepairResume,
SnsRepairStart, SnsRepairStatus, SnsRepairStop,
StobIoqError)
from hax.motr import Motr
from hax.motr.delivery import DeliveryHerald
from hax.motr.planner import WorkPlanner
from hax.queue.publish import EQPublisher
from hax.types import (ConfHaProcess, HaLinkMessagePromise, HAState, MessageId,
ObjT, ServiceHealth, StoppableThread, m0HaProcessEvent,
m0HaProcessType)
from hax.util import ConsulUtil, dump_json, repeat_if_fails
LOG = logging.getLogger('hax')
class ConsumerThread(StoppableThread):
"""
The only Motr-aware thread in whole HaX. This thread pulls messages from
the multithreaded Queue and considers the messages as commands. Every such
a command describes what should be sent to Motr land.
The thread exits gracefully when it receives message of type Die (i.e.
it is a 'poison pill').
"""
def __init__(self, planner: WorkPlanner, motr: Motr,
herald: DeliveryHerald, consul: ConsulUtil, idx: int):
super().__init__(target=self._do_work,
name=f'qconsumer-{idx}',
args=(planner, motr))
self.is_stopped = False
self.consul = consul
self.eq_publisher = EQPublisher()
self.herald = herald
self.idx = idx
def stop(self) -> None:
self.is_stopped = True
@repeat_if_fails(wait_seconds=1)
def _update_process_status(self, p: WorkPlanner, motr: Motr,
event: ConfHaProcess) -> None:
# If a consul-related exception appears, it will
# be processed by repeat_if_fails.
#
# This thread will become blocked until that
# intermittent error gets resolved.
motr_to_svc_status = {
(m0HaProcessType.M0_CONF_HA_PROCESS_M0MKFS,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STARTED): (
ServiceHealth.OK),
(m0HaProcessType.M0_CONF_HA_PROCESS_M0MKFS,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STOPPED): (
ServiceHealth.STOPPED),
(m0HaProcessType.M0_CONF_HA_PROCESS_M0D,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STARTED): (
ServiceHealth.OK),
(m0HaProcessType.M0_CONF_HA_PROCESS_M0D,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STOPPED): (
ServiceHealth.FAILED),
(m0HaProcessType.M0_CONF_HA_PROCESS_OTHER,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STARTED): (
ServiceHealth.OK),
(m0HaProcessType.M0_CONF_HA_PROCESS_OTHER,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STOPPED): (
ServiceHealth.FAILED)}
self.consul.update_process_status(event)
if event.chp_event in (m0HaProcessEvent.M0_CONF_HA_PROCESS_STARTED,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STOPPED):
svc_status = motr_to_svc_status[(event.chp_type, event.chp_event)]
motr.broadcast_ha_states(
[HAState(fid=event.fid, status=svc_status)])
@repeat_if_fails(wait_seconds=1)
def update_process_failure(self, planner: WorkPlanner,
ha_states: List[HAState]) -> List[HAState]:
new_ha_states: List[HAState] = []
for state in ha_states:
# We are only concerned with process statuses.
if state.fid.container == ObjT.PROCESS.value:
current_status = self.consul.get_process_current_status(
state.status, state.fid)
if current_status == ServiceHealth.OK:
if (self.consul.get_process_local_status(
state.fid) == 'M0_CONF_HA_PROCESS_STARTED'):
continue
if current_status in (ServiceHealth.FAILED,
ServiceHealth.STOPPED):
if (self.consul.get_process_local_status(
state.fid) == 'M0_CONF_HA_PROCESS_STOPPED'):
# Consul may report failure of a process multiple
# times, so we don't want to send duplicate failure
# notifications, it may cause delay in cleanup
# activities.
continue
if current_status == ServiceHealth.UNKNOWN:
# We got service status as UNKNOWN, that means hax was
# notified about process failure but hax couldn't
# confirm if the process is in failed state or have
# failed and restarted. So, we will not loose the
# event and try again to confirm the real time
# process status by enqueing a broadcast event
# specific to this process.
# It is expected that the process status gets
# eventually confirmed as either failed or passing (OK).
# This situation typically arises due to delay
# in receiving failure notification during which the
# corresponding process might be restarting or have
# already restarted. Thus it is important to confirm
# the real time status of the process before
# broadcasting failure.
current_status = ServiceHealth.UNKNOWN
planner.add_command(
BroadcastHAStates(states=[
HAState(fid=state.fid, status=ServiceHealth.FAILED)
],
reply_to=None))
if current_status not in (ServiceHealth.UNKNOWN,
ServiceHealth.OFFLINE):
# We also need to account and report the failure of remote
# Motr processes to this node's hax and motr processes.
# When Consul reports a remote process failure, hax
# confirms its current status from Consul KV and updates
# the list of failed services and also adds it to the
# broadcast list.
if current_status != ServiceHealth.OK:
event = m0HaProcessEvent.M0_CONF_HA_PROCESS_STOPPED
else:
event = m0HaProcessEvent.M0_CONF_HA_PROCESS_STARTED
self.consul.update_process_status(
ConfHaProcess(
chp_event=event,
chp_type=int(
m0HaProcessType.M0_CONF_HA_PROCESS_M0D),
chp_pid=0,
fid=state.fid))
new_ha_states.append(
HAState(fid=state.fid, status=current_status))
else:
new_ha_states.append(state)
return new_ha_states
def _do_work(self, planner: WorkPlanner, motr: Motr):
LOG.info('Handler thread has started')
try:
while True:
try:
LOG.debug('Waiting for the next message')
item = planner.get_next_command()
LOG.debug('Got %s message from planner', item)
if isinstance(item, FirstEntrypointRequest):
LOG.debug('first entrypoint request, broadcast FAILED')
ids: List[MessageId] = motr.broadcast_ha_states(
[
HAState(fid=item.process_fid,
status=ServiceHealth.FAILED)
],
notify_devices=False)
LOG.debug('waiting for broadcast of %s for ep: %s',
ids, item.remote_rpc_endpoint)
# Wait for failure delivery.
self.herald.wait_for_all(HaLinkMessagePromise(ids))
motr.send_entrypoint_request_reply(
EntrypointRequest(
reply_context=item.reply_context,
req_id=item.req_id,
remote_rpc_endpoint=item.remote_rpc_endpoint,
process_fid=item.process_fid,
git_rev=item.git_rev,
pid=item.pid,
is_first_request=item.is_first_request))
elif isinstance(item, EntrypointRequest):
# While replying any Exception is catched. In such a
# case, the motr process will receive EAGAIN and
# hence will need to make new attempt by itself
motr.send_entrypoint_request_reply(item)
elif isinstance(item, ProcessEvent):
self._update_process_status(planner, motr, item.evt)
elif isinstance(item, HaNvecGetEvent):
fn = motr.ha_nvec_get_reply
# If a consul-related exception appears, it will
# be processed by repeat_if_fails.
#
# This thread will become blocked until that
# intermittent error gets resolved.
decorated = (repeat_if_fails(wait_seconds=5))(fn)
decorated(item)
elif isinstance(item, BroadcastHAStates):
LOG.info('HA states: %s', item.states)
ha_states = self.update_process_failure(
planner, item.states)
result: List[MessageId] = motr.broadcast_ha_states(
ha_states)
if item.reply_to:
item.reply_to.put(result)
elif isinstance(item, StobIoqError):
LOG.info('Stob IOQ: %s', item.fid)
payload = dump_json(item)
LOG.debug('Stob IOQ JSON: %s', payload)
offset = self.eq_publisher.publish('stob-ioq', payload)
LOG.debug('Written to epoch: %s', offset)
elif isinstance(item, SnsRepairStatus):
LOG.info('Requesting SNS repair status')
status = motr.get_repair_status(item.fid)
LOG.info('SNS repair status is received: %s', status)
item.reply_to.put(status)
elif isinstance(item, SnsRebalanceStatus):
LOG.info('Requesting SNS rebalance status')
status = motr.get_rebalance_status(item.fid)
LOG.info('SNS rebalance status is received: %s',
status)
item.reply_to.put(status)
elif isinstance(item, SnsRebalanceStart):
LOG.info('Requesting SNS rebalance start')
motr.start_rebalance(item.fid)
elif isinstance(item, SnsRebalanceStop):
LOG.info('Requesting SNS rebalance stop')
motr.stop_rebalance(item.fid)
elif isinstance(item, SnsRebalancePause):
LOG.info('Requesting SNS rebalance pause')
motr.pause_rebalance(item.fid)
elif isinstance(item, SnsRebalanceResume):
LOG.info('Requesting SNS rebalance resume')
motr.resume_rebalance(item.fid)
elif isinstance(item, SnsRepairStart):
LOG.info('Requesting SNS repair start')
motr.start_repair(item.fid)
elif isinstance(item, SnsRepairStop):
LOG.info('Requesting SNS repair stop')
motr.stop_repair(item.fid)
elif isinstance(item, SnsRepairPause):
LOG.info('Requesting SNS repair pause')
motr.pause_repair(item.fid)
elif isinstance(item, SnsRepairResume):
LOG.info('Requesting SNS repair resume')
motr.resume_repair(item.fid)
elif isinstance(item, Die):
raise StopIteration()
else:
LOG.warning('Unsupported event type received: %s',
item)
except StopIteration:
raise
except Exception:
# no op, swallow the exception
LOG.exception('**ERROR**')
finally:
planner.notify_finished(item)
except StopIteration:
LOG.info('Consumer Stopped')
if self.idx == 0:
motr.stop()
finally:
LOG.info('Handler thread has exited')
| 51.219081 | 79 | 0.538462 |
import logging
from typing import List
from hax.message import (BroadcastHAStates, Die, EntrypointRequest,
FirstEntrypointRequest, HaNvecGetEvent, ProcessEvent,
SnsRebalancePause, SnsRebalanceResume,
SnsRebalanceStart, SnsRebalanceStatus,
SnsRebalanceStop, SnsRepairPause, SnsRepairResume,
SnsRepairStart, SnsRepairStatus, SnsRepairStop,
StobIoqError)
from hax.motr import Motr
from hax.motr.delivery import DeliveryHerald
from hax.motr.planner import WorkPlanner
from hax.queue.publish import EQPublisher
from hax.types import (ConfHaProcess, HaLinkMessagePromise, HAState, MessageId,
ObjT, ServiceHealth, StoppableThread, m0HaProcessEvent,
m0HaProcessType)
from hax.util import ConsulUtil, dump_json, repeat_if_fails
LOG = logging.getLogger('hax')
class ConsumerThread(StoppableThread):
def __init__(self, planner: WorkPlanner, motr: Motr,
herald: DeliveryHerald, consul: ConsulUtil, idx: int):
super().__init__(target=self._do_work,
name=f'qconsumer-{idx}',
args=(planner, motr))
self.is_stopped = False
self.consul = consul
self.eq_publisher = EQPublisher()
self.herald = herald
self.idx = idx
def stop(self) -> None:
self.is_stopped = True
@repeat_if_fails(wait_seconds=1)
def _update_process_status(self, p: WorkPlanner, motr: Motr,
event: ConfHaProcess) -> None:
motr_to_svc_status = {
(m0HaProcessType.M0_CONF_HA_PROCESS_M0MKFS,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STARTED): (
ServiceHealth.OK),
(m0HaProcessType.M0_CONF_HA_PROCESS_M0MKFS,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STOPPED): (
ServiceHealth.STOPPED),
(m0HaProcessType.M0_CONF_HA_PROCESS_M0D,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STARTED): (
ServiceHealth.OK),
(m0HaProcessType.M0_CONF_HA_PROCESS_M0D,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STOPPED): (
ServiceHealth.FAILED),
(m0HaProcessType.M0_CONF_HA_PROCESS_OTHER,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STARTED): (
ServiceHealth.OK),
(m0HaProcessType.M0_CONF_HA_PROCESS_OTHER,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STOPPED): (
ServiceHealth.FAILED)}
self.consul.update_process_status(event)
if event.chp_event in (m0HaProcessEvent.M0_CONF_HA_PROCESS_STARTED,
m0HaProcessEvent.M0_CONF_HA_PROCESS_STOPPED):
svc_status = motr_to_svc_status[(event.chp_type, event.chp_event)]
motr.broadcast_ha_states(
[HAState(fid=event.fid, status=svc_status)])
@repeat_if_fails(wait_seconds=1)
def update_process_failure(self, planner: WorkPlanner,
ha_states: List[HAState]) -> List[HAState]:
new_ha_states: List[HAState] = []
for state in ha_states:
if state.fid.container == ObjT.PROCESS.value:
current_status = self.consul.get_process_current_status(
state.status, state.fid)
if current_status == ServiceHealth.OK:
if (self.consul.get_process_local_status(
state.fid) == 'M0_CONF_HA_PROCESS_STARTED'):
continue
if current_status in (ServiceHealth.FAILED,
ServiceHealth.STOPPED):
if (self.consul.get_process_local_status(
state.fid) == 'M0_CONF_HA_PROCESS_STOPPED'):
# notifications, it may cause delay in cleanup
# activities.
continue
if current_status == ServiceHealth.UNKNOWN:
# We got service status as UNKNOWN, that means hax was
# notified about process failure but hax couldn't
current_status = ServiceHealth.UNKNOWN
planner.add_command(
BroadcastHAStates(states=[
HAState(fid=state.fid, status=ServiceHealth.FAILED)
],
reply_to=None))
if current_status not in (ServiceHealth.UNKNOWN,
ServiceHealth.OFFLINE):
# When Consul reports a remote process failure, hax
# confirms its current status from Consul KV and updates
# the list of failed services and also adds it to the
# broadcast list.
if current_status != ServiceHealth.OK:
event = m0HaProcessEvent.M0_CONF_HA_PROCESS_STOPPED
else:
event = m0HaProcessEvent.M0_CONF_HA_PROCESS_STARTED
self.consul.update_process_status(
ConfHaProcess(
chp_event=event,
chp_type=int(
m0HaProcessType.M0_CONF_HA_PROCESS_M0D),
chp_pid=0,
fid=state.fid))
new_ha_states.append(
HAState(fid=state.fid, status=current_status))
else:
new_ha_states.append(state)
return new_ha_states
def _do_work(self, planner: WorkPlanner, motr: Motr):
LOG.info('Handler thread has started')
try:
while True:
try:
LOG.debug('Waiting for the next message')
item = planner.get_next_command()
LOG.debug('Got %s message from planner', item)
if isinstance(item, FirstEntrypointRequest):
LOG.debug('first entrypoint request, broadcast FAILED')
ids: List[MessageId] = motr.broadcast_ha_states(
[
HAState(fid=item.process_fid,
status=ServiceHealth.FAILED)
],
notify_devices=False)
LOG.debug('waiting for broadcast of %s for ep: %s',
ids, item.remote_rpc_endpoint)
# Wait for failure delivery.
self.herald.wait_for_all(HaLinkMessagePromise(ids))
motr.send_entrypoint_request_reply(
EntrypointRequest(
reply_context=item.reply_context,
req_id=item.req_id,
remote_rpc_endpoint=item.remote_rpc_endpoint,
process_fid=item.process_fid,
git_rev=item.git_rev,
pid=item.pid,
is_first_request=item.is_first_request))
elif isinstance(item, EntrypointRequest):
# While replying any Exception is catched. In such a
# case, the motr process will receive EAGAIN and
# hence will need to make new attempt by itself
motr.send_entrypoint_request_reply(item)
elif isinstance(item, ProcessEvent):
self._update_process_status(planner, motr, item.evt)
elif isinstance(item, HaNvecGetEvent):
fn = motr.ha_nvec_get_reply
# If a consul-related exception appears, it will
# be processed by repeat_if_fails.
#
# This thread will become blocked until that
# intermittent error gets resolved.
decorated = (repeat_if_fails(wait_seconds=5))(fn)
decorated(item)
elif isinstance(item, BroadcastHAStates):
LOG.info('HA states: %s', item.states)
ha_states = self.update_process_failure(
planner, item.states)
result: List[MessageId] = motr.broadcast_ha_states(
ha_states)
if item.reply_to:
item.reply_to.put(result)
elif isinstance(item, StobIoqError):
LOG.info('Stob IOQ: %s', item.fid)
payload = dump_json(item)
LOG.debug('Stob IOQ JSON: %s', payload)
offset = self.eq_publisher.publish('stob-ioq', payload)
LOG.debug('Written to epoch: %s', offset)
elif isinstance(item, SnsRepairStatus):
LOG.info('Requesting SNS repair status')
status = motr.get_repair_status(item.fid)
LOG.info('SNS repair status is received: %s', status)
item.reply_to.put(status)
elif isinstance(item, SnsRebalanceStatus):
LOG.info('Requesting SNS rebalance status')
status = motr.get_rebalance_status(item.fid)
LOG.info('SNS rebalance status is received: %s',
status)
item.reply_to.put(status)
elif isinstance(item, SnsRebalanceStart):
LOG.info('Requesting SNS rebalance start')
motr.start_rebalance(item.fid)
elif isinstance(item, SnsRebalanceStop):
LOG.info('Requesting SNS rebalance stop')
motr.stop_rebalance(item.fid)
elif isinstance(item, SnsRebalancePause):
LOG.info('Requesting SNS rebalance pause')
motr.pause_rebalance(item.fid)
elif isinstance(item, SnsRebalanceResume):
LOG.info('Requesting SNS rebalance resume')
motr.resume_rebalance(item.fid)
elif isinstance(item, SnsRepairStart):
LOG.info('Requesting SNS repair start')
motr.start_repair(item.fid)
elif isinstance(item, SnsRepairStop):
LOG.info('Requesting SNS repair stop')
motr.stop_repair(item.fid)
elif isinstance(item, SnsRepairPause):
LOG.info('Requesting SNS repair pause')
motr.pause_repair(item.fid)
elif isinstance(item, SnsRepairResume):
LOG.info('Requesting SNS repair resume')
motr.resume_repair(item.fid)
elif isinstance(item, Die):
raise StopIteration()
else:
LOG.warning('Unsupported event type received: %s',
item)
except StopIteration:
raise
except Exception:
# no op, swallow the exception
LOG.exception('**ERROR**')
finally:
planner.notify_finished(item)
except StopIteration:
LOG.info('Consumer Stopped')
if self.idx == 0:
motr.stop()
finally:
LOG.info('Handler thread has exited')
| true | true |
f7fc960d91c5422dcbbd9f9a05fe4d96bc3fc0a4 | 806 | py | Python | LC_problems/396.py | Howardhuang98/Blog | cf58638d6d0bbf55b95fe08e43798e7dd14219ac | [
"MIT"
] | null | null | null | LC_problems/396.py | Howardhuang98/Blog | cf58638d6d0bbf55b95fe08e43798e7dd14219ac | [
"MIT"
] | null | null | null | LC_problems/396.py | Howardhuang98/Blog | cf58638d6d0bbf55b95fe08e43798e7dd14219ac | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 396.py
@Contact : huanghoward@foxmail.com
@Modify Time : 2022/4/22 10:42
------------
"""
from functools import reduce
from typing import List
class Solution:
def maxRotateFunction(self, nums: List[int]) -> int:
current = sum(map(lambda x: x[0] * x[1], zip(nums, range(len(nums)))))
ans = current
for i in range(1, len(nums)):
n = nums[len(nums) - i]
current -= n * (len(nums) - 1)
current += sum(nums[:len(nums) - i])
current += sum(nums[len(nums) - i + 1:])
ans = max(ans, current)
return ans
if __name__ == '__main__':
s = Solution()
print(s.maxRotateFunction([4, 3, 2, 6]))
print(s.maxRotateFunction([100]))
| 26.866667 | 78 | 0.53598 |
from functools import reduce
from typing import List
class Solution:
def maxRotateFunction(self, nums: List[int]) -> int:
current = sum(map(lambda x: x[0] * x[1], zip(nums, range(len(nums)))))
ans = current
for i in range(1, len(nums)):
n = nums[len(nums) - i]
current -= n * (len(nums) - 1)
current += sum(nums[:len(nums) - i])
current += sum(nums[len(nums) - i + 1:])
ans = max(ans, current)
return ans
if __name__ == '__main__':
s = Solution()
print(s.maxRotateFunction([4, 3, 2, 6]))
print(s.maxRotateFunction([100]))
| true | true |
f7fc98ec7f441ddc03443202d0d6b1ebedb9f742 | 22,140 | py | Python | tensorflow_probability/python/distributions/batch_reshape_test.py | nagachika/probability | 2a5609ceec01a388ec03b583b4f8e813cfbad981 | [
"Apache-2.0"
] | 4 | 2019-03-07T05:15:13.000Z | 2019-06-13T20:35:45.000Z | tensorflow_probability/python/distributions/batch_reshape_test.py | nagachika/probability | 2a5609ceec01a388ec03b583b4f8e813cfbad981 | [
"Apache-2.0"
] | 2 | 2019-08-01T18:31:41.000Z | 2019-08-01T19:42:15.000Z | tensorflow_probability/python/distributions/batch_reshape_test.py | nagachika/probability | 2a5609ceec01a388ec03b583b4f8e813cfbad981 | [
"Apache-2.0"
] | 1 | 2019-09-18T15:17:53.000Z | 2019-09-18T15:17:53.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for BatchReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class _BatchReshapeTest(object):
def make_wishart(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype([
[[1., 0.5],
[0.5, 1.]],
[[0.5, 0.25],
[0.25, 0.75]],
])
scale = np.reshape(np.concatenate([scale, scale], axis=0),
old_batch_shape + [dims, dims])
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
wishart = tfd.Wishart(df=5, scale=scale_ph)
reshape_wishart = tfd.BatchReshape(
distribution=wishart,
batch_shape=new_batch_shape_ph,
validate_args=True)
return wishart, reshape_wishart
def test_matrix_variate_sample_and_log_prob(self):
if tf.executing_eagerly():
# TODO(b/122840816): Modify this test so that it runs in eager mode or
# document that the test is not intended to run in eager mode.
return
dims = 2
seed = tfp_test_util.test_seed()
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_wishart.batch_shape_tensor()
event_shape = reshape_wishart.event_shape_tensor()
expected_sample_shape = [3, 1] + new_batch_shape + [dims, dims]
x = wishart.sample([3, 1], seed=seed)
expected_sample = tf.reshape(x, expected_sample_shape)
actual_sample = reshape_wishart.sample([3, 1], seed=seed)
expected_log_prob_shape = [3, 1] + new_batch_shape
expected_log_prob = tf.reshape(wishart.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_wishart.log_prob(expected_sample)
[
batch_shape_,
event_shape_,
expected_sample_,
actual_sample_,
expected_log_prob_,
actual_log_prob_,
] = self.evaluate([
batch_shape,
event_shape,
expected_sample,
actual_sample,
expected_log_prob,
actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims, dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_wishart.batch_shape)
self.assertAllEqual([dims, dims], reshape_wishart.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_matrix_variate_stats(self):
dims = 2
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_entropy = tf.reshape(wishart.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_wishart.entropy()
expected_mean = tf.reshape(wishart.mean(), expected_matrix_stat_shape)
actual_mean = reshape_wishart.mean()
expected_mode = tf.reshape(wishart.mode(), expected_matrix_stat_shape)
actual_mode = reshape_wishart.mode()
expected_stddev = tf.reshape(wishart.stddev(), expected_matrix_stat_shape)
actual_stddev = reshape_wishart.stddev()
expected_variance = tf.reshape(wishart.variance(),
expected_matrix_stat_shape)
actual_variance = reshape_wishart.variance()
[
expected_entropy_,
actual_entropy_,
expected_mean_,
actual_mean_,
expected_mode_,
actual_mode_,
expected_stddev_,
actual_stddev_,
expected_variance_,
actual_variance_,
] = self.evaluate([
expected_entropy,
actual_entropy,
expected_mean,
actual_mean,
expected_mode,
actual_mode,
expected_stddev,
actual_stddev,
expected_variance,
actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_variance.shape)
def make_normal(self, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype(0.5 + np.arange(
np.prod(old_batch_shape)).reshape(old_batch_shape))
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
normal = tfd.Normal(loc=self.dtype(0), scale=scale_ph)
reshape_normal = tfd.BatchReshape(
distribution=normal, batch_shape=new_batch_shape_ph, validate_args=True)
return normal, reshape_normal
def test_scalar_variate_sample_and_log_prob(self):
if tf.executing_eagerly():
# TODO(b/122840816): Modify this test so that it runs in eager mode or
# document that the test is not intended to run in eager mode.
return
seed = tfp_test_util.test_seed()
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(
new_batch_shape, old_batch_shape)
batch_shape = reshape_normal.batch_shape_tensor()
event_shape = reshape_normal.event_shape_tensor()
expected_sample_shape = new_batch_shape
x = normal.sample(seed=seed)
expected_sample = tf.reshape(x, expected_sample_shape)
actual_sample = reshape_normal.sample(seed=seed)
expected_log_prob_shape = new_batch_shape
expected_log_prob = tf.reshape(normal.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_normal.log_prob(expected_sample)
[
batch_shape_,
event_shape_,
expected_sample_,
actual_sample_,
expected_log_prob_,
actual_log_prob_,
] = self.evaluate([
batch_shape,
event_shape,
expected_sample,
actual_sample,
expected_log_prob,
actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_normal.batch_shape)
self.assertAllEqual([], reshape_normal.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_scalar_variate_stats(self):
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = tf.reshape(normal.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_normal.entropy()
expected_mean = tf.reshape(normal.mean(), expected_scalar_stat_shape)
actual_mean = reshape_normal.mean()
expected_mode = tf.reshape(normal.mode(), expected_scalar_stat_shape)
actual_mode = reshape_normal.mode()
expected_stddev = tf.reshape(normal.stddev(), expected_scalar_stat_shape)
actual_stddev = reshape_normal.stddev()
expected_variance = tf.reshape(normal.variance(),
expected_scalar_stat_shape)
actual_variance = reshape_normal.variance()
[
expected_entropy_,
actual_entropy_,
expected_mean_,
actual_mean_,
expected_mode_,
actual_mode_,
expected_stddev_,
actual_stddev_,
expected_variance_,
actual_variance_,
] = self.evaluate([
expected_entropy,
actual_entropy,
expected_mean,
actual_mean,
expected_mode,
actual_mode,
expected_stddev,
actual_stddev,
expected_variance,
actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_variance.shape)
def make_mvn(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)
reshape_mvn = tfd.BatchReshape(
distribution=mvn, batch_shape=new_batch_shape_ph, validate_args=True)
return mvn, reshape_mvn
def test_vector_variate_sample_and_log_prob(self):
if tf.executing_eagerly():
# TODO(b/122840816): Modify this test so that it runs in eager mode or
# document that the test is not intended to run in eager mode.
return
dims = 3
seed = tfp_test_util.test_seed()
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_mvn.batch_shape_tensor()
event_shape = reshape_mvn.event_shape_tensor()
expected_sample_shape = [3] + new_batch_shape + [dims]
x = mvn.sample(3, seed=seed)
expected_sample = tf.reshape(x, expected_sample_shape)
actual_sample = reshape_mvn.sample(3, seed=seed)
expected_log_prob_shape = [3] + new_batch_shape
expected_log_prob = tf.reshape(mvn.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_mvn.log_prob(expected_sample)
[
batch_shape_,
event_shape_,
expected_sample_,
actual_sample_,
expected_log_prob_,
actual_log_prob_,
] = self.evaluate([
batch_shape,
event_shape,
expected_sample,
actual_sample,
expected_log_prob,
actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_mvn.batch_shape)
self.assertAllEqual([dims], reshape_mvn.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_vector_variate_stats(self):
dims = 3
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = tf.reshape(mvn.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_mvn.entropy()
expected_vector_stat_shape = new_batch_shape + [dims]
expected_mean = tf.reshape(mvn.mean(), expected_vector_stat_shape)
actual_mean = reshape_mvn.mean()
expected_mode = tf.reshape(mvn.mode(), expected_vector_stat_shape)
actual_mode = reshape_mvn.mode()
expected_stddev = tf.reshape(mvn.stddev(), expected_vector_stat_shape)
actual_stddev = reshape_mvn.stddev()
expected_variance = tf.reshape(mvn.variance(), expected_vector_stat_shape)
actual_variance = reshape_mvn.variance()
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_covariance = tf.reshape(mvn.covariance(),
expected_matrix_stat_shape)
actual_covariance = reshape_mvn.covariance()
[
expected_entropy_,
actual_entropy_,
expected_mean_,
actual_mean_,
expected_mode_,
actual_mode_,
expected_stddev_,
actual_stddev_,
expected_variance_,
actual_variance_,
expected_covariance_,
actual_covariance_,
] = self.evaluate([
expected_entropy,
actual_entropy,
expected_mean,
actual_mean,
expected_mode,
actual_mode,
expected_stddev,
actual_stddev,
expected_variance,
actual_variance,
expected_covariance,
actual_covariance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_covariance_, actual_covariance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_variance.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_covariance.shape)
def test_bad_reshape_size(self):
dims = 2
new_batch_shape = [2, 3]
old_batch_shape = [2] # 2 != 2*3
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape or tf.executing_eagerly():
with self.assertRaisesRegexp(
ValueError, (r"`batch_shape` size \(6\) must match "
r"`distribution\.batch_shape` size \(2\)")):
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.assertRaisesOpError(r"Shape sizes do not match."):
self.evaluate(
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample())
def test_non_positive_shape(self):
dims = 2
old_batch_shape = [4]
if self.is_static_shape:
# Unknown first dimension does not trigger size check. Note that
# any dimension < 0 is treated statically as unknown.
new_batch_shape = [-1, 0]
else:
new_batch_shape = [-2, -2] # -2 * -2 = 4, same size as the old shape.
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape or tf.executing_eagerly():
with self.assertRaisesRegexp(ValueError, r".*must be >=(-1| 0).*"):
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.assertRaisesOpError(r".*must be >=(-1| 0).*"):
self.evaluate(
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample())
def test_non_vector_shape(self):
if tf.executing_eagerly():
# TODO(b/122840816): Modify this test so that it runs in eager mode or
# document that the test is not intended to run in eager mode.
return
dims = 2
new_batch_shape = 2
old_batch_shape = [2]
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(ValueError, r".*must be a vector.*"):
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.assertRaisesOpError(r".*must be a vector.*"):
self.evaluate(
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample())
def test_broadcasting_explicitly_unsupported(self):
old_batch_shape = [4]
new_batch_shape = [1, 4, 1]
rate_ = self.dtype([1, 10, 2, 20])
rate = tf.compat.v1.placeholder_with_default(
rate_, shape=old_batch_shape if self.is_static_shape else None)
poisson_4 = tfd.Poisson(rate)
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
poisson_141_reshaped = tfd.BatchReshape(
poisson_4, new_batch_shape_ph, validate_args=True)
x_4 = self.dtype([2, 12, 3, 23])
x_114 = self.dtype([2, 12, 3, 23]).reshape(1, 1, 4)
if self.is_static_shape or tf.executing_eagerly():
with self.assertRaisesRegexp(NotImplementedError,
"too few batch and event dims"):
poisson_141_reshaped.log_prob(x_4)
with self.assertRaisesRegexp(NotImplementedError,
"unexpected batch and event shape"):
poisson_141_reshaped.log_prob(x_114)
return
with self.assertRaisesOpError("too few batch and event dims"):
self.evaluate(poisson_141_reshaped.log_prob(x_4))
with self.assertRaisesOpError("unexpected batch and event shape"):
self.evaluate(poisson_141_reshaped.log_prob(x_114))
@test_util.run_all_in_graph_and_eager_modes
class BatchReshapeStaticTest(_BatchReshapeTest, tf.test.TestCase):
dtype = np.float32
is_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class BatchReshapeDynamicTest(_BatchReshapeTest, tf.test.TestCase):
dtype = np.float64
is_static_shape = False
if __name__ == "__main__":
tf.test.main()
| 36 | 95 | 0.68523 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class _BatchReshapeTest(object):
def make_wishart(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype([
[[1., 0.5],
[0.5, 1.]],
[[0.5, 0.25],
[0.25, 0.75]],
])
scale = np.reshape(np.concatenate([scale, scale], axis=0),
old_batch_shape + [dims, dims])
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
wishart = tfd.Wishart(df=5, scale=scale_ph)
reshape_wishart = tfd.BatchReshape(
distribution=wishart,
batch_shape=new_batch_shape_ph,
validate_args=True)
return wishart, reshape_wishart
def test_matrix_variate_sample_and_log_prob(self):
if tf.executing_eagerly():
return
dims = 2
seed = tfp_test_util.test_seed()
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_wishart.batch_shape_tensor()
event_shape = reshape_wishart.event_shape_tensor()
expected_sample_shape = [3, 1] + new_batch_shape + [dims, dims]
x = wishart.sample([3, 1], seed=seed)
expected_sample = tf.reshape(x, expected_sample_shape)
actual_sample = reshape_wishart.sample([3, 1], seed=seed)
expected_log_prob_shape = [3, 1] + new_batch_shape
expected_log_prob = tf.reshape(wishart.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_wishart.log_prob(expected_sample)
[
batch_shape_,
event_shape_,
expected_sample_,
actual_sample_,
expected_log_prob_,
actual_log_prob_,
] = self.evaluate([
batch_shape,
event_shape,
expected_sample,
actual_sample,
expected_log_prob,
actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims, dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_wishart.batch_shape)
self.assertAllEqual([dims, dims], reshape_wishart.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_matrix_variate_stats(self):
dims = 2
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_entropy = tf.reshape(wishart.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_wishart.entropy()
expected_mean = tf.reshape(wishart.mean(), expected_matrix_stat_shape)
actual_mean = reshape_wishart.mean()
expected_mode = tf.reshape(wishart.mode(), expected_matrix_stat_shape)
actual_mode = reshape_wishart.mode()
expected_stddev = tf.reshape(wishart.stddev(), expected_matrix_stat_shape)
actual_stddev = reshape_wishart.stddev()
expected_variance = tf.reshape(wishart.variance(),
expected_matrix_stat_shape)
actual_variance = reshape_wishart.variance()
[
expected_entropy_,
actual_entropy_,
expected_mean_,
actual_mean_,
expected_mode_,
actual_mode_,
expected_stddev_,
actual_stddev_,
expected_variance_,
actual_variance_,
] = self.evaluate([
expected_entropy,
actual_entropy,
expected_mean,
actual_mean,
expected_mode,
actual_mode,
expected_stddev,
actual_stddev,
expected_variance,
actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_variance.shape)
def make_normal(self, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype(0.5 + np.arange(
np.prod(old_batch_shape)).reshape(old_batch_shape))
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
normal = tfd.Normal(loc=self.dtype(0), scale=scale_ph)
reshape_normal = tfd.BatchReshape(
distribution=normal, batch_shape=new_batch_shape_ph, validate_args=True)
return normal, reshape_normal
def test_scalar_variate_sample_and_log_prob(self):
if tf.executing_eagerly():
return
seed = tfp_test_util.test_seed()
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(
new_batch_shape, old_batch_shape)
batch_shape = reshape_normal.batch_shape_tensor()
event_shape = reshape_normal.event_shape_tensor()
expected_sample_shape = new_batch_shape
x = normal.sample(seed=seed)
expected_sample = tf.reshape(x, expected_sample_shape)
actual_sample = reshape_normal.sample(seed=seed)
expected_log_prob_shape = new_batch_shape
expected_log_prob = tf.reshape(normal.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_normal.log_prob(expected_sample)
[
batch_shape_,
event_shape_,
expected_sample_,
actual_sample_,
expected_log_prob_,
actual_log_prob_,
] = self.evaluate([
batch_shape,
event_shape,
expected_sample,
actual_sample,
expected_log_prob,
actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_normal.batch_shape)
self.assertAllEqual([], reshape_normal.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_scalar_variate_stats(self):
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = tf.reshape(normal.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_normal.entropy()
expected_mean = tf.reshape(normal.mean(), expected_scalar_stat_shape)
actual_mean = reshape_normal.mean()
expected_mode = tf.reshape(normal.mode(), expected_scalar_stat_shape)
actual_mode = reshape_normal.mode()
expected_stddev = tf.reshape(normal.stddev(), expected_scalar_stat_shape)
actual_stddev = reshape_normal.stddev()
expected_variance = tf.reshape(normal.variance(),
expected_scalar_stat_shape)
actual_variance = reshape_normal.variance()
[
expected_entropy_,
actual_entropy_,
expected_mean_,
actual_mean_,
expected_mode_,
actual_mode_,
expected_stddev_,
actual_stddev_,
expected_variance_,
actual_variance_,
] = self.evaluate([
expected_entropy,
actual_entropy,
expected_mean,
actual_mean,
expected_mode,
actual_mode,
expected_stddev,
actual_stddev,
expected_variance,
actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_variance.shape)
def make_mvn(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)
reshape_mvn = tfd.BatchReshape(
distribution=mvn, batch_shape=new_batch_shape_ph, validate_args=True)
return mvn, reshape_mvn
def test_vector_variate_sample_and_log_prob(self):
if tf.executing_eagerly():
return
dims = 3
seed = tfp_test_util.test_seed()
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_mvn.batch_shape_tensor()
event_shape = reshape_mvn.event_shape_tensor()
expected_sample_shape = [3] + new_batch_shape + [dims]
x = mvn.sample(3, seed=seed)
expected_sample = tf.reshape(x, expected_sample_shape)
actual_sample = reshape_mvn.sample(3, seed=seed)
expected_log_prob_shape = [3] + new_batch_shape
expected_log_prob = tf.reshape(mvn.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_mvn.log_prob(expected_sample)
[
batch_shape_,
event_shape_,
expected_sample_,
actual_sample_,
expected_log_prob_,
actual_log_prob_,
] = self.evaluate([
batch_shape,
event_shape,
expected_sample,
actual_sample,
expected_log_prob,
actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_mvn.batch_shape)
self.assertAllEqual([dims], reshape_mvn.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_vector_variate_stats(self):
dims = 3
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = tf.reshape(mvn.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_mvn.entropy()
expected_vector_stat_shape = new_batch_shape + [dims]
expected_mean = tf.reshape(mvn.mean(), expected_vector_stat_shape)
actual_mean = reshape_mvn.mean()
expected_mode = tf.reshape(mvn.mode(), expected_vector_stat_shape)
actual_mode = reshape_mvn.mode()
expected_stddev = tf.reshape(mvn.stddev(), expected_vector_stat_shape)
actual_stddev = reshape_mvn.stddev()
expected_variance = tf.reshape(mvn.variance(), expected_vector_stat_shape)
actual_variance = reshape_mvn.variance()
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_covariance = tf.reshape(mvn.covariance(),
expected_matrix_stat_shape)
actual_covariance = reshape_mvn.covariance()
[
expected_entropy_,
actual_entropy_,
expected_mean_,
actual_mean_,
expected_mode_,
actual_mode_,
expected_stddev_,
actual_stddev_,
expected_variance_,
actual_variance_,
expected_covariance_,
actual_covariance_,
] = self.evaluate([
expected_entropy,
actual_entropy,
expected_mean,
actual_mean,
expected_mode,
actual_mode,
expected_stddev,
actual_stddev,
expected_variance,
actual_variance,
expected_covariance,
actual_covariance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_covariance_, actual_covariance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_variance.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_covariance.shape)
def test_bad_reshape_size(self):
dims = 2
new_batch_shape = [2, 3]
old_batch_shape = [2]
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape or tf.executing_eagerly():
with self.assertRaisesRegexp(
ValueError, (r"`batch_shape` size \(6\) must match "
r"`distribution\.batch_shape` size \(2\)")):
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.assertRaisesOpError(r"Shape sizes do not match."):
self.evaluate(
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample())
def test_non_positive_shape(self):
dims = 2
old_batch_shape = [4]
if self.is_static_shape:
new_batch_shape = [-1, 0]
else:
new_batch_shape = [-2, -2]
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape or tf.executing_eagerly():
with self.assertRaisesRegexp(ValueError, r".*must be >=(-1| 0).*"):
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.assertRaisesOpError(r".*must be >=(-1| 0).*"):
self.evaluate(
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample())
def test_non_vector_shape(self):
if tf.executing_eagerly():
return
dims = 2
new_batch_shape = 2
old_batch_shape = [2]
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = tf.compat.v1.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(ValueError, r".*must be a vector.*"):
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.assertRaisesOpError(r".*must be a vector.*"):
self.evaluate(
tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample())
def test_broadcasting_explicitly_unsupported(self):
old_batch_shape = [4]
new_batch_shape = [1, 4, 1]
rate_ = self.dtype([1, 10, 2, 20])
rate = tf.compat.v1.placeholder_with_default(
rate_, shape=old_batch_shape if self.is_static_shape else None)
poisson_4 = tfd.Poisson(rate)
new_batch_shape_ph = (
tf.constant(np.int32(new_batch_shape))
if self.is_static_shape else tf.compat.v1.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
poisson_141_reshaped = tfd.BatchReshape(
poisson_4, new_batch_shape_ph, validate_args=True)
x_4 = self.dtype([2, 12, 3, 23])
x_114 = self.dtype([2, 12, 3, 23]).reshape(1, 1, 4)
if self.is_static_shape or tf.executing_eagerly():
with self.assertRaisesRegexp(NotImplementedError,
"too few batch and event dims"):
poisson_141_reshaped.log_prob(x_4)
with self.assertRaisesRegexp(NotImplementedError,
"unexpected batch and event shape"):
poisson_141_reshaped.log_prob(x_114)
return
with self.assertRaisesOpError("too few batch and event dims"):
self.evaluate(poisson_141_reshaped.log_prob(x_4))
with self.assertRaisesOpError("unexpected batch and event shape"):
self.evaluate(poisson_141_reshaped.log_prob(x_114))
@test_util.run_all_in_graph_and_eager_modes
class BatchReshapeStaticTest(_BatchReshapeTest, tf.test.TestCase):
dtype = np.float32
is_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class BatchReshapeDynamicTest(_BatchReshapeTest, tf.test.TestCase):
dtype = np.float64
is_static_shape = False
if __name__ == "__main__":
tf.test.main()
| true | true |
f7fc9acb3cf896cce563706f13ec251c84f6d097 | 991 | py | Python | setup.py | c4mb0t/django-setman | 6551e3f6367bf8ee7c8f91e893c9e8439428f28a | [
"BSD-3-Clause"
] | 1 | 2015-05-30T15:05:14.000Z | 2015-05-30T15:05:14.000Z | setup.py | c4mb0t/django-setman | 6551e3f6367bf8ee7c8f91e893c9e8439428f28a | [
"BSD-3-Clause"
] | null | null | null | setup.py | c4mb0t/django-setman | 6551e3f6367bf8ee7c8f91e893c9e8439428f28a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
from distutils.core import setup
DIRNAME = os.path.dirname(__file__)
readme = open(os.path.join(DIRNAME, 'README.rst'), 'r')
README = readme.read()
readme.close()
version = __import__('setman').get_version()
setup(
name='django-setman',
version=version,
description='Django settings manager. Another.',
long_description=README,
author='Igor Davydenko',
author_email='playpauseandstop@gmail.com',
maintainer='Igor Davydenko',
maintainer_email='playpauseandstop@gmail.com',
url='https://github.com/odeskps/django-setman',
packages=[
'setman',
'setman.management',
'setman.management.commands',
'setman.migrations',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
],
keywords='django settings manager',
license='BSD License',
)
| 23.595238 | 55 | 0.649849 |
import os
from distutils.core import setup
DIRNAME = os.path.dirname(__file__)
readme = open(os.path.join(DIRNAME, 'README.rst'), 'r')
README = readme.read()
readme.close()
version = __import__('setman').get_version()
setup(
name='django-setman',
version=version,
description='Django settings manager. Another.',
long_description=README,
author='Igor Davydenko',
author_email='playpauseandstop@gmail.com',
maintainer='Igor Davydenko',
maintainer_email='playpauseandstop@gmail.com',
url='https://github.com/odeskps/django-setman',
packages=[
'setman',
'setman.management',
'setman.management.commands',
'setman.migrations',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
],
keywords='django settings manager',
license='BSD License',
)
| true | true |
f7fc9b6e153681769d3d8dbaa847b67443661a46 | 4,909 | py | Python | src/SA.py | Potgront/ABM | 76fef2c7ded7e362ecf72fffd82512b9d7926700 | [
"BSD-3-Clause"
] | null | null | null | src/SA.py | Potgront/ABM | 76fef2c7ded7e362ecf72fffd82512b9d7926700 | [
"BSD-3-Clause"
] | null | null | null | src/SA.py | Potgront/ABM | 76fef2c7ded7e362ecf72fffd82512b9d7926700 | [
"BSD-3-Clause"
] | null | null | null | """
Script to perform sobol analysis of the model.
Modified from the example sobol analysis notebook on canvas.
The variable parameters are specified in the problem dictionary.
"""
from SALib.sample import saltelli
from SALib.analyze import sobol
from mesa.batchrunner import BatchRunnerMP
from modelgrid import *
from IPython.display import clear_output
from tqdm import tqdm
import matplotlib.pyplot as plt
from itertools import combinations
from matplotlib import rcParams
def plot_index(s, params, i, title=''):
"""
Creates a plot for Sobol sensitivity analysis that shows the contributions
of each parameter to the global sensitivity.
Args:
s (dict): dictionary {'S#': dict, 'S#_conf': dict} of dicts that hold
the values for a set of parameters
params (list): the parameters taken from s
i (str): string that indicates what order the sensitivity is.
title (str): title for the plot
"""
if i == '2':
p = len(params)
params = list(combinations(params, 2))
indices = s['S' + i].reshape((p ** 2))
indices = indices[~np.isnan(indices)]
errors = s['S' + i + '_conf'].reshape((p ** 2))
errors = errors[~np.isnan(errors)]
else:
indices = s['S' + i]
errors = s['S' + i + '_conf']
plt.figure()
l = len(indices)
plt.title(title)
plt.ylim([-0.2, len(indices) - 1 + 0.2])
plt.yticks(range(l), params)
plt.errorbar(indices, range(l), xerr=errors, linestyle='None', marker='o')
plt.axvline(0, c='k')
fig = plt.gcf()
fig.set_size_inches(4, 3)
if __name__ == '__main__':
import seaborn as sns
sns.set()
rcParams.update({'figure.autolayout': True})
# Variable parameters
problem = {'num_vars': 3,
'names': ['spawn', 'agression', 'min_gap'],
'bounds': [[0.3, 0.8], [0.2, 0.8], [0.5, 2.0]]}
replicates = 5
max_steps = 5000
distinct_samples = 5
param_values = saltelli.sample(problem, distinct_samples)
model_reporters={'Final_avg_speed': avg_speed,
'Final_Cars_in_lane': cars_in_lane,
'Data Collector': lambda m: m.datacollector,
'Total_Avg_speed': avg_speed,
'Total_Cars_in_lane': cars_in_lane,
'Variance_speed': avg_speed,
'Variance_car': cars_in_lane}
batch = BatchRunnerMP(RoadSim,
nr_processes=8,
max_steps=max_steps,
variable_parameters={name:[] for name in problem['names']},
model_reporters=model_reporters)
count = 0
for i in tqdm(range(replicates)):
for vals in tqdm(param_values):
vals = list(vals)
variable_parameters = {}
for name, val in zip(problem['names'], vals):
variable_parameters[name] = val
batch.run_iteration(variable_parameters, tuple(vals), count)
count += 1
data_original = batch.get_model_vars_dataframe()
data = data_original.copy()
print(data.shape)
for i in tqdm(range(len(data["Data Collector"]))):
if isinstance(data["Data Collector"][i], DataCollector):
data_speed = data["Data Collector"][i].get_model_vars_dataframe()['Avg_speed']
data_cars = data["Data Collector"][i].get_model_vars_dataframe()['Cars_in_lane']
tenproc = int(0.2 * (len(data_speed)))
data['Total_Avg_speed'][i] = np.average(data_speed[tenproc:])
data['Total_Cars_in_lane'][i] = np.average(data_cars[tenproc:])
data['Variance_speed'][i] = np.var(data_speed[tenproc:])
data['Variance_car'][i] = np.var(data_cars[tenproc:])
data.to_csv('Sobol_result.csv', sep=',', index=False)
print(data)
Si_Speed = sobol.analyze(problem, data['Total_Avg_speed'].as_matrix(), print_to_console=False)
print("\n")
Si_Cars = sobol.analyze(problem, data['Total_Cars_in_lane'].as_matrix(), print_to_console=False)
typename = ["Average_speed", "Number_of_cars"]
for i, Si in enumerate((Si_Speed, Si_Cars)):
# First order
plot_index(Si, problem['names'], '1', 'First order sensitivity - ' + typename[i])
plt.savefig('plots/First_order_sensitivity_|_' + typename[i] + '.png')
plt.clf()
# Second order
plot_index(Si, problem['names'], '2', 'Second order sensitivity - ' + typename[i])
plt.savefig('plots/Second_order_sensitivity_|_' + typename[i] + '.png')
plt.clf()
# Total order
plot_index(Si, problem['names'], 'T', 'Total order sensitivity - ' + typename[i])
plt.savefig('plots/Total_order_sensitivity_|_' + typename[i] + '.png')
plt.clf()
| 36.634328 | 100 | 0.599919 | from SALib.sample import saltelli
from SALib.analyze import sobol
from mesa.batchrunner import BatchRunnerMP
from modelgrid import *
from IPython.display import clear_output
from tqdm import tqdm
import matplotlib.pyplot as plt
from itertools import combinations
from matplotlib import rcParams
def plot_index(s, params, i, title=''):
if i == '2':
p = len(params)
params = list(combinations(params, 2))
indices = s['S' + i].reshape((p ** 2))
indices = indices[~np.isnan(indices)]
errors = s['S' + i + '_conf'].reshape((p ** 2))
errors = errors[~np.isnan(errors)]
else:
indices = s['S' + i]
errors = s['S' + i + '_conf']
plt.figure()
l = len(indices)
plt.title(title)
plt.ylim([-0.2, len(indices) - 1 + 0.2])
plt.yticks(range(l), params)
plt.errorbar(indices, range(l), xerr=errors, linestyle='None', marker='o')
plt.axvline(0, c='k')
fig = plt.gcf()
fig.set_size_inches(4, 3)
if __name__ == '__main__':
import seaborn as sns
sns.set()
rcParams.update({'figure.autolayout': True})
problem = {'num_vars': 3,
'names': ['spawn', 'agression', 'min_gap'],
'bounds': [[0.3, 0.8], [0.2, 0.8], [0.5, 2.0]]}
replicates = 5
max_steps = 5000
distinct_samples = 5
param_values = saltelli.sample(problem, distinct_samples)
model_reporters={'Final_avg_speed': avg_speed,
'Final_Cars_in_lane': cars_in_lane,
'Data Collector': lambda m: m.datacollector,
'Total_Avg_speed': avg_speed,
'Total_Cars_in_lane': cars_in_lane,
'Variance_speed': avg_speed,
'Variance_car': cars_in_lane}
batch = BatchRunnerMP(RoadSim,
nr_processes=8,
max_steps=max_steps,
variable_parameters={name:[] for name in problem['names']},
model_reporters=model_reporters)
count = 0
for i in tqdm(range(replicates)):
for vals in tqdm(param_values):
vals = list(vals)
variable_parameters = {}
for name, val in zip(problem['names'], vals):
variable_parameters[name] = val
batch.run_iteration(variable_parameters, tuple(vals), count)
count += 1
data_original = batch.get_model_vars_dataframe()
data = data_original.copy()
print(data.shape)
for i in tqdm(range(len(data["Data Collector"]))):
if isinstance(data["Data Collector"][i], DataCollector):
data_speed = data["Data Collector"][i].get_model_vars_dataframe()['Avg_speed']
data_cars = data["Data Collector"][i].get_model_vars_dataframe()['Cars_in_lane']
tenproc = int(0.2 * (len(data_speed)))
data['Total_Avg_speed'][i] = np.average(data_speed[tenproc:])
data['Total_Cars_in_lane'][i] = np.average(data_cars[tenproc:])
data['Variance_speed'][i] = np.var(data_speed[tenproc:])
data['Variance_car'][i] = np.var(data_cars[tenproc:])
data.to_csv('Sobol_result.csv', sep=',', index=False)
print(data)
Si_Speed = sobol.analyze(problem, data['Total_Avg_speed'].as_matrix(), print_to_console=False)
print("\n")
Si_Cars = sobol.analyze(problem, data['Total_Cars_in_lane'].as_matrix(), print_to_console=False)
typename = ["Average_speed", "Number_of_cars"]
for i, Si in enumerate((Si_Speed, Si_Cars)):
plot_index(Si, problem['names'], '1', 'First order sensitivity - ' + typename[i])
plt.savefig('plots/First_order_sensitivity_|_' + typename[i] + '.png')
plt.clf()
plot_index(Si, problem['names'], '2', 'Second order sensitivity - ' + typename[i])
plt.savefig('plots/Second_order_sensitivity_|_' + typename[i] + '.png')
plt.clf()
plot_index(Si, problem['names'], 'T', 'Total order sensitivity - ' + typename[i])
plt.savefig('plots/Total_order_sensitivity_|_' + typename[i] + '.png')
plt.clf()
| true | true |
f7fc9b83187d5e2fb23503516327381f9e5864bc | 397 | py | Python | others/typical90/061.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | others/typical90/061.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | others/typical90/061.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | from collections import deque
def main():
# input
Q = int(input())
txs = [[*map(int, input().split())] for _ in range(Q)]
# compute
deq = deque()
# output
for tx in txs:
t, x = tx
if t == 1:
deq.appendleft(x)
elif t == 2:
deq.append(x)
else:
print(deq[x-1])
if __name__ == '__main__':
main()
| 16.541667 | 58 | 0.460957 | from collections import deque
def main():
Q = int(input())
txs = [[*map(int, input().split())] for _ in range(Q)]
deq = deque()
for tx in txs:
t, x = tx
if t == 1:
deq.appendleft(x)
elif t == 2:
deq.append(x)
else:
print(deq[x-1])
if __name__ == '__main__':
main()
| true | true |
f7fc9bf5a14ce8ee47be86a514a8516d8b951b9a | 8,267 | py | Python | sdk/python/pulumi_digitalocean/dns_record.py | andrewsomething/pulumi-digitalocean | 0b481e2cbf6084e69789b288870a574ef2a801fc | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_digitalocean/dns_record.py | andrewsomething/pulumi-digitalocean | 0b481e2cbf6084e69789b288870a574ef2a801fc | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_digitalocean/dns_record.py | andrewsomething/pulumi-digitalocean | 0b481e2cbf6084e69789b288870a574ef2a801fc | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class DnsRecord(pulumi.CustomResource):
domain: pulumi.Output[str]
"""
The domain to add the record to.
"""
flags: pulumi.Output[float]
"""
The flags of the record. Only valid when type is `CAA`. Must be between 0 and 255.
"""
fqdn: pulumi.Output[str]
"""
The FQDN of the record
"""
name: pulumi.Output[str]
"""
The name of the record. Use `@` for records on domain's name itself.
"""
port: pulumi.Output[float]
"""
The port of the record. Only valid when type is `SRV`. Must be between 1 and 65535.
"""
priority: pulumi.Output[float]
"""
The priority of the record. Only valid when type is `MX` or `SRV`. Must be between 0 and 65535.
"""
tag: pulumi.Output[str]
"""
The tag of the record. Only valid when type is `CAA`. Must be one of `issue`, `issuewild`, or `iodef`.
"""
ttl: pulumi.Output[float]
"""
The time to live for the record, in seconds. Must be at least 0.
"""
type: pulumi.Output[str]
"""
The type of record. Must be one of `A`, `AAAA`, `CAA`, `CNAME`, `MX`, `NS`, `TXT`, or `SRV`.
"""
value: pulumi.Output[str]
"""
The value of the record.
"""
weight: pulumi.Output[float]
"""
The weight of the record. Only valid when type is `SRV`. Must be between 0 and 65535.
"""
def __init__(__self__, resource_name, opts=None, domain=None, flags=None, name=None, port=None, priority=None, tag=None, ttl=None, type=None, value=None, weight=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a DigitalOcean DNS record resource.
## Example Usage
```python
import pulumi
import pulumi_digitalocean as digitalocean
default = digitalocean.Domain("default", name="example.com")
# Add an A record to the domain for www.example.com.
www = digitalocean.DnsRecord("www",
domain=default.name,
type="A",
value="192.168.0.11")
# Add a MX record for the example.com domain itself.
mx = digitalocean.DnsRecord("mx",
domain=default.name,
type="MX",
priority=10,
value="mail.example.com.")
pulumi.export("wwwFqdn", www.fqdn)
pulumi.export("mxFqdn", mx.fqdn)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain: The domain to add the record to.
:param pulumi.Input[float] flags: The flags of the record. Only valid when type is `CAA`. Must be between 0 and 255.
:param pulumi.Input[str] name: The name of the record. Use `@` for records on domain's name itself.
:param pulumi.Input[float] port: The port of the record. Only valid when type is `SRV`. Must be between 1 and 65535.
:param pulumi.Input[float] priority: The priority of the record. Only valid when type is `MX` or `SRV`. Must be between 0 and 65535.
:param pulumi.Input[str] tag: The tag of the record. Only valid when type is `CAA`. Must be one of `issue`, `issuewild`, or `iodef`.
:param pulumi.Input[float] ttl: The time to live for the record, in seconds. Must be at least 0.
:param pulumi.Input[str] type: The type of record. Must be one of `A`, `AAAA`, `CAA`, `CNAME`, `MX`, `NS`, `TXT`, or `SRV`.
:param pulumi.Input[str] value: The value of the record.
:param pulumi.Input[float] weight: The weight of the record. Only valid when type is `SRV`. Must be between 0 and 65535.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if domain is None:
raise TypeError("Missing required property 'domain'")
__props__['domain'] = domain
__props__['flags'] = flags
__props__['name'] = name
__props__['port'] = port
__props__['priority'] = priority
__props__['tag'] = tag
__props__['ttl'] = ttl
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
if value is None:
raise TypeError("Missing required property 'value'")
__props__['value'] = value
__props__['weight'] = weight
__props__['fqdn'] = None
super(DnsRecord, __self__).__init__(
'digitalocean:index/dnsRecord:DnsRecord',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, domain=None, flags=None, fqdn=None, name=None, port=None, priority=None, tag=None, ttl=None, type=None, value=None, weight=None):
"""
Get an existing DnsRecord resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain: The domain to add the record to.
:param pulumi.Input[float] flags: The flags of the record. Only valid when type is `CAA`. Must be between 0 and 255.
:param pulumi.Input[str] fqdn: The FQDN of the record
:param pulumi.Input[str] name: The name of the record. Use `@` for records on domain's name itself.
:param pulumi.Input[float] port: The port of the record. Only valid when type is `SRV`. Must be between 1 and 65535.
:param pulumi.Input[float] priority: The priority of the record. Only valid when type is `MX` or `SRV`. Must be between 0 and 65535.
:param pulumi.Input[str] tag: The tag of the record. Only valid when type is `CAA`. Must be one of `issue`, `issuewild`, or `iodef`.
:param pulumi.Input[float] ttl: The time to live for the record, in seconds. Must be at least 0.
:param pulumi.Input[str] type: The type of record. Must be one of `A`, `AAAA`, `CAA`, `CNAME`, `MX`, `NS`, `TXT`, or `SRV`.
:param pulumi.Input[str] value: The value of the record.
:param pulumi.Input[float] weight: The weight of the record. Only valid when type is `SRV`. Must be between 0 and 65535.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["domain"] = domain
__props__["flags"] = flags
__props__["fqdn"] = fqdn
__props__["name"] = name
__props__["port"] = port
__props__["priority"] = priority
__props__["tag"] = tag
__props__["ttl"] = ttl
__props__["type"] = type
__props__["value"] = value
__props__["weight"] = weight
return DnsRecord(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.423077 | 217 | 0.627676 |
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class DnsRecord(pulumi.CustomResource):
domain: pulumi.Output[str]
flags: pulumi.Output[float]
fqdn: pulumi.Output[str]
name: pulumi.Output[str]
port: pulumi.Output[float]
priority: pulumi.Output[float]
tag: pulumi.Output[str]
ttl: pulumi.Output[float]
type: pulumi.Output[str]
value: pulumi.Output[str]
weight: pulumi.Output[float]
def __init__(__self__, resource_name, opts=None, domain=None, flags=None, name=None, port=None, priority=None, tag=None, ttl=None, type=None, value=None, weight=None, __props__=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if domain is None:
raise TypeError("Missing required property 'domain'")
__props__['domain'] = domain
__props__['flags'] = flags
__props__['name'] = name
__props__['port'] = port
__props__['priority'] = priority
__props__['tag'] = tag
__props__['ttl'] = ttl
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
if value is None:
raise TypeError("Missing required property 'value'")
__props__['value'] = value
__props__['weight'] = weight
__props__['fqdn'] = None
super(DnsRecord, __self__).__init__(
'digitalocean:index/dnsRecord:DnsRecord',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, domain=None, flags=None, fqdn=None, name=None, port=None, priority=None, tag=None, ttl=None, type=None, value=None, weight=None):
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["domain"] = domain
__props__["flags"] = flags
__props__["fqdn"] = fqdn
__props__["name"] = name
__props__["port"] = port
__props__["priority"] = priority
__props__["tag"] = tag
__props__["ttl"] = ttl
__props__["type"] = type
__props__["value"] = value
__props__["weight"] = weight
return DnsRecord(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f7fc9df83f09b91adb2b7b7112fe7efeeb5c5128 | 6,627 | py | Python | argo/workflows/client/models/v1alpha1_workflow_list.py | fvdnabee/argo-client-python-1 | 4264e063e31865c55418e0b242dd21ba6d19ed64 | [
"Apache-2.0"
] | null | null | null | argo/workflows/client/models/v1alpha1_workflow_list.py | fvdnabee/argo-client-python-1 | 4264e063e31865c55418e0b242dd21ba6d19ed64 | [
"Apache-2.0"
] | null | null | null | argo/workflows/client/models/v1alpha1_workflow_list.py | fvdnabee/argo-client-python-1 | 4264e063e31865c55418e0b242dd21ba6d19ed64 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Argo
Python client for Argo Workflows # noqa: E501
OpenAPI spec version: 2.5.0-rc10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.models import V1ListMeta
class V1alpha1WorkflowList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1alpha1Workflow]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None): # noqa: E501
"""V1alpha1WorkflowList - a model defined in Swagger""" # noqa: E501
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1alpha1WorkflowList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1WorkflowList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1WorkflowList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1WorkflowList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1alpha1WorkflowList. # noqa: E501
:return: The items of this V1alpha1WorkflowList. # noqa: E501
:rtype: list[V1alpha1Workflow]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1alpha1WorkflowList.
:param items: The items of this V1alpha1WorkflowList. # noqa: E501
:type: list[V1alpha1Workflow]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1alpha1WorkflowList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1WorkflowList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1WorkflowList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1WorkflowList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1WorkflowList. # noqa: E501
:return: The metadata of this V1alpha1WorkflowList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1WorkflowList.
:param metadata: The metadata of this V1alpha1WorkflowList. # noqa: E501
:type: V1ListMeta
"""
if metadata is None:
raise ValueError("Invalid value for `metadata`, must not be `None`") # noqa: E501
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1alpha1WorkflowList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1WorkflowList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.970149 | 312 | 0.618228 |
import pprint
import re
import six
from kubernetes.client.models import V1ListMeta
class V1alpha1WorkflowList(object):
swagger_types = {
'api_version': 'str',
'items': 'list[V1alpha1Workflow]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
self.metadata = metadata
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, api_version):
self._api_version = api_version
@property
def items(self):
return self._items
@items.setter
def items(self, items):
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, metadata):
if metadata is None:
raise ValueError("Invalid value for `metadata`, must not be `None`")
self._metadata = metadata
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1alpha1WorkflowList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1alpha1WorkflowList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fc9e4e2112a5a70bcb9439c71a6b04f66e3cc3 | 1,209 | py | Python | smallinvoice/tests/accounts_tests.py | dreipol/smallinvoice | d56850c70542d92ca23385876ecb02f20b620077 | [
"MIT"
] | 1 | 2016-06-09T18:45:47.000Z | 2016-06-09T18:45:47.000Z | smallinvoice/tests/accounts_tests.py | dreipol/smallinvoice | d56850c70542d92ca23385876ecb02f20b620077 | [
"MIT"
] | null | null | null | smallinvoice/tests/accounts_tests.py | dreipol/smallinvoice | d56850c70542d92ca23385876ecb02f20b620077 | [
"MIT"
] | null | null | null | # coding=utf-8
import unittest
from smallinvoice.accounts import Account
from smallinvoice.tests import get_smallinvoice
def generate_account():
return Account(title='Testaccount',
institute='Familie Test',
number='Number123',
iban='Iban123',
swiftbic='Swift123',
clearing='clearing123',
postaccount='postaccount123',
lsv=0,
dd=0,
esr=1)
class AccountTests(unittest.TestCase):
def setUp(self):
self.a = generate_account()
self.account_id = get_smallinvoice().accounts.add(self.a)
def tearDown(self):
get_smallinvoice().accounts.delete(self.account_id)
def test_account_tests(self):
self.assertIsNotNone(self.account_id)
def test_account_add(self):
self.assertTrue(self.account_id)
def test_account_details(self):
self.assertEqual(self.a.institute, 'Familie Test')
def test_account_update(self):
self.assertEqual(self.a.institute, 'Familie Test')
self.a.institute = 'Test Change'
self.assertEqual(self.a.institute, 'Test Change')
| 29.487805 | 65 | 0.616212 |
import unittest
from smallinvoice.accounts import Account
from smallinvoice.tests import get_smallinvoice
def generate_account():
return Account(title='Testaccount',
institute='Familie Test',
number='Number123',
iban='Iban123',
swiftbic='Swift123',
clearing='clearing123',
postaccount='postaccount123',
lsv=0,
dd=0,
esr=1)
class AccountTests(unittest.TestCase):
def setUp(self):
self.a = generate_account()
self.account_id = get_smallinvoice().accounts.add(self.a)
def tearDown(self):
get_smallinvoice().accounts.delete(self.account_id)
def test_account_tests(self):
self.assertIsNotNone(self.account_id)
def test_account_add(self):
self.assertTrue(self.account_id)
def test_account_details(self):
self.assertEqual(self.a.institute, 'Familie Test')
def test_account_update(self):
self.assertEqual(self.a.institute, 'Familie Test')
self.a.institute = 'Test Change'
self.assertEqual(self.a.institute, 'Test Change')
| true | true |
f7fc9f4bcfd7c4307d32c9e7bea0a0e0cfaab8bc | 17,079 | py | Python | src/nglui/easyviewer/base.py | seung-lab/NeuroglancerAnnotationUI | 0429b93330b7aa83f4c591564978b3ceb9229374 | [
"Apache-2.0"
] | 7 | 2018-09-04T23:19:02.000Z | 2022-03-09T13:48:53.000Z | src/nglui/easyviewer/base.py | seung-lab/NeuroglancerAnnotationUI | 0429b93330b7aa83f4c591564978b3ceb9229374 | [
"Apache-2.0"
] | 7 | 2019-03-08T18:55:51.000Z | 2020-09-04T19:11:30.000Z | src/nglui/easyviewer/base.py | seung-lab/NeuroglancerAnnotationUI | 0429b93330b7aa83f4c591564978b3ceb9229374 | [
"Apache-2.0"
] | null | null | null | from .. import nglite as neuroglancer
from . import annotation, utils
from numpy import issubdtype, integer, uint64, vstack
from collections import OrderedDict
import copy
import re
SEGMENTATION_LAYER_TYPES = ["segmentation", "segmentation_with_graph"]
class EasyViewer(neuroglancer.Viewer):
"""
Extends the neuroglancer Viewer object to make simple operations simple.
"""
def __init__(self):
super(EasyViewer, self).__init__()
def __repr__(self):
return self.as_url()
def _repr_html_(self):
return '<a href="%s" target="_blank">Viewer</a>' % self.as_url()
def load_url(self, url):
"""Load neuroglancer compatible url and updates viewer state
Attributes:
url (str): neuroglancer url
"""
state = neuroglancer.parse_url(url)
self.set_state(state)
@staticmethod
def _smart_add_segmentation_layer(s, layer_name, source, **kwargs):
if re.search(r"^graphene://", source) is not None:
s.layers[layer_name] = neuroglancer.ChunkedgraphSegmentationLayer(
source=source, **kwargs
)
elif re.search(r"^precomputed://", source) is not None:
s.layers[layer_name] = neuroglancer.SegmentationLayer(
source=source, **kwargs
)
def add_layers(
self,
image_layers={},
segmentation_layers={},
annotation_layers={},
resolution=None,
):
with self.txn() as s:
for ln, kws in image_layers.items():
s.layers[ln] = neuroglancer.ImageLayer(**kws)
for ln, kws in segmentation_layers.items():
self._smart_add_segmentation_layer(s, **kws)
for ln, kws in annotation_layers.items():
s.layers[ln] = neuroglancer.AnnotationLayer(**kws)
if resolution is not None:
s.voxel_size = resolution
pass
def add_segmentation_layer(self, layer_name, source, **kwargs):
"""Add segmentation layer to viewer instance.
Attributes:
layer_name (str): name of layer to be displayed in neuroglancer ui.
source (str): source of neuroglancer segment layer
"""
with self.txn() as s:
self._smart_add_segmentation_layer(
s, layer_name=layer_name, source=source, **kwargs
)
def add_image_layer(self, layer_name, source, **kwargs):
"""Add segmentation layer to viewer instance.
Attributes:
layer_name (str): name of layer to be displayed in neuroglancer ui.
source (str): source of neuroglancer image layer
"""
with self.txn() as s:
s.layers[layer_name] = neuroglancer.ImageLayer(source=source, **kwargs)
def set_resolution(self, resolution):
with self.txn() as s:
s.voxel_size = resolution
def add_contrast_shader(self, layer_name, black=0.0, white=1.0):
shader_text = f"#uicontrol float black slider(min=0, max=1, default={black})\n#uicontrol float white slider(min=0, max=1, default={white})\nfloat rescale(float value) {{\n return (value - black) / (white - black);\n}}\nvoid main() {{\n float val = toNormalized(getDataValue());\n if (val < black) {{\n emitRGB(vec3(0,0,0));\n }} else if (val > white) {{\n emitRGB(vec3(1.0, 1.0, 1.0));\n }} else {{\n emitGrayscale(rescale(val));\n }}\n}}\n"
self._update_layer_shader(layer_name, shader_text)
def _update_layer_shader(self, layer_name, shader_text):
with self.txn() as s:
s.layers[layer_name]._json_data["shader"] = shader_text
def set_state_server(self, state_server):
with self.txn() as s:
s._json_data["jsonStateServer"] = state_server
def add_annotation_layer(
self,
layer_name=None,
color=None,
linked_segmentation_layer=None,
filter_by_segmentation=False,
brackets_show_segmentation=True,
selection_shows_segmentation=True,
tags=None,
):
"""Add annotation layer to the viewer instance.
Attributes:
layer_name (str): name of layer to be created
"""
if layer_name is None:
layer_name = "annos"
if layer_name in [l.name for l in self.state.layers]:
return
if linked_segmentation_layer is None:
filter_by_segmentation = None
with self.txn() as s:
new_layer = neuroglancer.AnnotationLayer(
linked_segmentation_layer=linked_segmentation_layer,
filter_by_segmentation=filter_by_segmentation,
brackets_show_segmentation=brackets_show_segmentation,
selection_shows_segmentation=selection_shows_segmentation,
)
s.layers.append(name=layer_name, layer=new_layer)
if color is not None:
s.layers[layer_name].annotationColor = color
if tags is not None:
self.add_annotation_tags(layer_name=layer_name, tags=tags)
def set_annotation_layer_color(self, layer_name, color):
"""Set the color for the annotation layer"""
if layer_name in [l.name for l in self.state.layers]:
with self.txn() as s:
s.layers[layer_name].annotationColor = color
else:
pass
def clear_annotation_layers(self, layer_names):
with self.txn() as s:
for ln in layer_names:
s.layers[ln].annotations._data = []
def set_annotation_one_shot(self, ln_anno_dict):
"""
ln_anno_dict is a layer_name to annotation list dict.
"""
with self.txn() as s:
for ln, annos in ln_anno_dict.items():
s.layers[ln].annotations._data = annos
def add_annotations(self, layer_name, annotations):
"""Add annotations to a viewer instance, the type is specified.
If layer name does not exist, add the layer
Attributes:
layer_name (str): name of layer to be displayed in neuroglancer ui.
layer_type (str): can be: 'points, ellipse or line' only
"""
with self.txn() as s:
for anno in annotations:
s.layers[layer_name].annotations.append(anno)
def remove_annotations(self, layer_name, anno_ids):
if isinstance(anno_ids, str):
anno_ids = [anno_ids]
try:
with self.txn() as s:
el = len(s.layers[layer_name].annotations)
for anno in reversed(s.layers[layer_name].annotations):
el -= 1
if anno.id in anno_ids:
anno_ids.remove(anno.id)
s.layers[layer_name].annotations.pop(el)
if len(anno_ids) == 0:
break
except:
self.update_message("Could not remove annotation")
def add_annotation_tags(self, layer_name, tags):
"""
Add a list of tags to an annotation layer
"""
if layer_name not in self.layer_names:
raise ValueError("Layer is not an annotation layer")
tag_list = [
OrderedDict({"id": tag_id + 1, "label": label})
for tag_id, label in enumerate(tags)
]
with self.txn() as s:
s.layers[layer_name]._json_data["annotationTags"] = tag_list
def update_description(self, layer_id_dict, new_description):
layer_id_dict = copy.deepcopy(layer_id_dict)
with self.txn() as s:
try:
for layer_name, id_list in layer_id_dict.items():
for anno in s.layers[layer_name].annotations:
if anno.id in id_list:
if anno.description is None:
anno.description = new_description
else:
anno.description = "{}\n{}".format(
anno.description, new_description
)
id_list.remove(anno.id)
if len(id_list) == 0:
break
except Exception as e:
print(e)
self.update_message("Could not update descriptions!")
@property
def url(self):
return self.get_viewer_url()
def as_url(self, prefix=None, as_html=False, link_text="Neuroglancer link"):
if prefix is None:
prefix = utils.default_neuroglancer_base
ngl_url = neuroglancer.to_url(self.state, prefix=prefix)
if as_html:
return '<a href="{}" target="_blank">{}</a>'.format(ngl_url, link_text)
else:
return ngl_url
def update_message(self, message):
with self.config_state.txn() as s:
if message is not None:
s.status_messages["status"] = message
def set_selected_layer(self, layer_name, tool=None):
if layer_name in self.layer_names:
with self.txn() as s:
s._json_data["selectedLayer"] = OrderedDict(
layer=layer_name, visible=True
)
if tool is not None:
s.layers[layer_name]._json_data["tool"] = tool
def get_selected_layer(self):
state_json = self.state.to_json()
try:
selected_layer = state_json["selectedLayer"]["layer"]
except:
selected_layer = None
return selected_layer
def get_annotation(self, layer_name, aid):
if self.state.layers[layer_name].type == "annotation":
for anno in self.state.layers[layer_name].annotations:
if anno.id == aid:
return anno
else:
return None
else:
return None
def get_selected_annotation_id(self):
layer_name = self.get_selected_layer()
try:
aid_data = self.state.layers[layer_name]._json_data["selectedAnnotation"]
if isinstance(aid_data, OrderedDict):
aid = aid_data["id"]
else:
aid = aid
except:
aid = None
return aid
def select_annotation(self, layer_name, aid):
if layer_name in self.layer_names:
with self.txn() as s:
s.layers[layer_name]._json_data["selectedAnnotation"] = aid
self.set_selected_layer(layer_name)
@property
def layer_names(self):
return [l.name for l in self.state.layers]
def selected_objects(self, segmentation_layer):
return list(self.state.layers[segmentation_layer].segments)
def add_selected_objects(self, segmentation_layer, oids, colors=None):
if issubdtype(type(oids), integer):
oids = [oids]
with self.txn() as s:
for oid in oids:
s.layers[segmentation_layer].segments.add(uint64(oid))
if colors is not None:
if isinstance(colors, dict):
self.assign_colors(segmentation_layer, colors)
elif len(colors) == len(oids):
seg_colors = {str(oid): clr for oid, clr in zip(oids, colors)}
self.assign_colors(segmentation_layer, seg_colors)
def get_mouse_coordinates(self, s):
pos = s.mouse_voxel_coordinates
return pos
def set_view_options(
self,
show_slices=None,
layout=None,
show_axis_lines=None,
show_scale_bar=None,
orthographic=None,
position=None,
zoom_image=None,
zoom_3d=None,
):
with self.txn() as s:
if show_slices is not None:
s.showSlices = show_slices
if layout is not None:
s.layout.type = layout
if show_axis_lines is not None:
s.show_axis_lines = show_axis_lines
if show_scale_bar is not None:
s.show_scale_bar = show_scale_bar
if orthographic is not None:
s.layout.orthographic_projection = orthographic
if position is not None:
s.position.voxelCoordinates = position
if zoom_image is not None:
s.navigation.zoomFactor = zoom_image
if zoom_3d is not None:
s.perspectiveZoom = zoom_3d
def set_segmentation_view_options(
self,
layer_name,
alpha_selected=None,
alpha_3d=None,
alpha_unselected=None,
):
if self.state.layers[layer_name].type not in SEGMENTATION_LAYER_TYPES:
return
with self.txn() as s:
l = s.layers[layer_name]
if alpha_selected is not None:
l.selectedAlpha = alpha_selected
if alpha_3d is not None:
l.objectAlpha = alpha_3d
if alpha_unselected is not None:
l.notSelectedAlpha = alpha_unselected
def set_timestamp(
self,
layer_name,
timestamp=None,
):
"""Set timestamp of a segmentation layer
Parameters
----------
layer_name : str
Name of a segmentation layer
timestamp : float, optional
Timestamp in unix epoch time (e.g. `time.time.now()` in python), by default None
"""
if self.state.layers[layer_name].type != "segmentation_with_graph":
return
with self.txn() as s:
l = s.layers[layer_name]
if timestamp is not None:
l.timestamp = int(timestamp)
else:
l.timestamp = None
def assign_colors(self, layer_name, seg_colors):
"""Assign colors to root ids in a segmentation layer
Parameters
----------
layer_name : str,
Segmentation layer name
seg_colors : dict
dict with root ids as keys and colors as values.
"""
with self.txn() as s:
if seg_colors is not None:
seg_colors = {
str(oid): k for oid, k in seg_colors.items() if k is not None
}
s.layers[layer_name]._json_data["segmentColors"] = seg_colors
def set_multicut_points(
self,
layer_name,
seg_id,
points_red,
points_blue,
supervoxels_red=None,
supervoxels_blue=None,
focus=True,
):
"""Configures multicut points in the neuroglancer state. Note that points need to be in mesh units (e.g. nanometers), not voxels!
Parameters
----------
layer_name : str
Segmentation layer name
seg_id : np.uint64
Segmentation id of the object in question
points_red : np.array
Nx3 array of locations in voxel space for side 1 of the cut.
points_blue : np.array
Mx3 array of locations in voxel space for side 2 of the cut.
supervoxels_red : np.array or None, optional
N-length array of supervoxel ids associated with locations in points_red or None. If None, supervoxels lookup occurs based on the mesh. By default None
supervoxels_blue : np.array or None, optional
M-length array of supervoxel ids associated with locations in points_blue or None. If None, supervoxels lookup occurs based on the mesh. By default None
focus : bool, optional
If True, makes the layer and graph tool focused. By default True
"""
def _multicut_annotation(pt, oid, sv_id):
if sv_id is None:
sv_id = oid
return annotation.point_annotation(
pt, description=str(sv_id), linked_segmentation=[sv_id, oid]
)
if supervoxels_red is None:
supervoxels_red = [None for x in points_red]
if supervoxels_blue is None:
supervoxels_blue = [None for x in points_blue]
annos_red = neuroglancer.annotationHolder()
for pt, sv_id in zip(points_red, supervoxels_red):
annos_red.annotations.append(
_multicut_annotation(pt * self.state.voxel_size, seg_id, sv_id)
)
annos_blue = neuroglancer.annotationHolder()
for pt, sv_id in zip(points_blue, supervoxels_blue):
annos_blue.annotations.append(
_multicut_annotation(pt * self.state.voxel_size, seg_id, sv_id)
)
self.add_selected_objects(layer_name, [seg_id])
with self.txn() as s:
l = s.layers[layer_name]
l.tab = "graph"
l.graphOperationMarker.append(annos_red)
l.graphOperationMarker.append(annos_blue)
if focus:
self.set_selected_layer(layer_name)
ctr_pt = vstack([points_red, points_blue]).mean(axis=0)
self.set_view_options(position=ctr_pt, zoom_3d=100)
| 36.967532 | 464 | 0.584168 | from .. import nglite as neuroglancer
from . import annotation, utils
from numpy import issubdtype, integer, uint64, vstack
from collections import OrderedDict
import copy
import re
SEGMENTATION_LAYER_TYPES = ["segmentation", "segmentation_with_graph"]
class EasyViewer(neuroglancer.Viewer):
def __init__(self):
super(EasyViewer, self).__init__()
def __repr__(self):
return self.as_url()
def _repr_html_(self):
return '<a href="%s" target="_blank">Viewer</a>' % self.as_url()
def load_url(self, url):
state = neuroglancer.parse_url(url)
self.set_state(state)
@staticmethod
def _smart_add_segmentation_layer(s, layer_name, source, **kwargs):
if re.search(r"^graphene://", source) is not None:
s.layers[layer_name] = neuroglancer.ChunkedgraphSegmentationLayer(
source=source, **kwargs
)
elif re.search(r"^precomputed://", source) is not None:
s.layers[layer_name] = neuroglancer.SegmentationLayer(
source=source, **kwargs
)
def add_layers(
self,
image_layers={},
segmentation_layers={},
annotation_layers={},
resolution=None,
):
with self.txn() as s:
for ln, kws in image_layers.items():
s.layers[ln] = neuroglancer.ImageLayer(**kws)
for ln, kws in segmentation_layers.items():
self._smart_add_segmentation_layer(s, **kws)
for ln, kws in annotation_layers.items():
s.layers[ln] = neuroglancer.AnnotationLayer(**kws)
if resolution is not None:
s.voxel_size = resolution
pass
def add_segmentation_layer(self, layer_name, source, **kwargs):
with self.txn() as s:
self._smart_add_segmentation_layer(
s, layer_name=layer_name, source=source, **kwargs
)
def add_image_layer(self, layer_name, source, **kwargs):
with self.txn() as s:
s.layers[layer_name] = neuroglancer.ImageLayer(source=source, **kwargs)
def set_resolution(self, resolution):
with self.txn() as s:
s.voxel_size = resolution
def add_contrast_shader(self, layer_name, black=0.0, white=1.0):
shader_text = f"#uicontrol float black slider(min=0, max=1, default={black})\n#uicontrol float white slider(min=0, max=1, default={white})\nfloat rescale(float value) {{\n return (value - black) / (white - black);\n}}\nvoid main() {{\n float val = toNormalized(getDataValue());\n if (val < black) {{\n emitRGB(vec3(0,0,0));\n }} else if (val > white) {{\n emitRGB(vec3(1.0, 1.0, 1.0));\n }} else {{\n emitGrayscale(rescale(val));\n }}\n}}\n"
self._update_layer_shader(layer_name, shader_text)
def _update_layer_shader(self, layer_name, shader_text):
with self.txn() as s:
s.layers[layer_name]._json_data["shader"] = shader_text
def set_state_server(self, state_server):
with self.txn() as s:
s._json_data["jsonStateServer"] = state_server
def add_annotation_layer(
self,
layer_name=None,
color=None,
linked_segmentation_layer=None,
filter_by_segmentation=False,
brackets_show_segmentation=True,
selection_shows_segmentation=True,
tags=None,
):
if layer_name is None:
layer_name = "annos"
if layer_name in [l.name for l in self.state.layers]:
return
if linked_segmentation_layer is None:
filter_by_segmentation = None
with self.txn() as s:
new_layer = neuroglancer.AnnotationLayer(
linked_segmentation_layer=linked_segmentation_layer,
filter_by_segmentation=filter_by_segmentation,
brackets_show_segmentation=brackets_show_segmentation,
selection_shows_segmentation=selection_shows_segmentation,
)
s.layers.append(name=layer_name, layer=new_layer)
if color is not None:
s.layers[layer_name].annotationColor = color
if tags is not None:
self.add_annotation_tags(layer_name=layer_name, tags=tags)
def set_annotation_layer_color(self, layer_name, color):
if layer_name in [l.name for l in self.state.layers]:
with self.txn() as s:
s.layers[layer_name].annotationColor = color
else:
pass
def clear_annotation_layers(self, layer_names):
with self.txn() as s:
for ln in layer_names:
s.layers[ln].annotations._data = []
def set_annotation_one_shot(self, ln_anno_dict):
with self.txn() as s:
for ln, annos in ln_anno_dict.items():
s.layers[ln].annotations._data = annos
def add_annotations(self, layer_name, annotations):
with self.txn() as s:
for anno in annotations:
s.layers[layer_name].annotations.append(anno)
def remove_annotations(self, layer_name, anno_ids):
if isinstance(anno_ids, str):
anno_ids = [anno_ids]
try:
with self.txn() as s:
el = len(s.layers[layer_name].annotations)
for anno in reversed(s.layers[layer_name].annotations):
el -= 1
if anno.id in anno_ids:
anno_ids.remove(anno.id)
s.layers[layer_name].annotations.pop(el)
if len(anno_ids) == 0:
break
except:
self.update_message("Could not remove annotation")
def add_annotation_tags(self, layer_name, tags):
if layer_name not in self.layer_names:
raise ValueError("Layer is not an annotation layer")
tag_list = [
OrderedDict({"id": tag_id + 1, "label": label})
for tag_id, label in enumerate(tags)
]
with self.txn() as s:
s.layers[layer_name]._json_data["annotationTags"] = tag_list
def update_description(self, layer_id_dict, new_description):
layer_id_dict = copy.deepcopy(layer_id_dict)
with self.txn() as s:
try:
for layer_name, id_list in layer_id_dict.items():
for anno in s.layers[layer_name].annotations:
if anno.id in id_list:
if anno.description is None:
anno.description = new_description
else:
anno.description = "{}\n{}".format(
anno.description, new_description
)
id_list.remove(anno.id)
if len(id_list) == 0:
break
except Exception as e:
print(e)
self.update_message("Could not update descriptions!")
@property
def url(self):
return self.get_viewer_url()
def as_url(self, prefix=None, as_html=False, link_text="Neuroglancer link"):
if prefix is None:
prefix = utils.default_neuroglancer_base
ngl_url = neuroglancer.to_url(self.state, prefix=prefix)
if as_html:
return '<a href="{}" target="_blank">{}</a>'.format(ngl_url, link_text)
else:
return ngl_url
def update_message(self, message):
with self.config_state.txn() as s:
if message is not None:
s.status_messages["status"] = message
def set_selected_layer(self, layer_name, tool=None):
if layer_name in self.layer_names:
with self.txn() as s:
s._json_data["selectedLayer"] = OrderedDict(
layer=layer_name, visible=True
)
if tool is not None:
s.layers[layer_name]._json_data["tool"] = tool
def get_selected_layer(self):
state_json = self.state.to_json()
try:
selected_layer = state_json["selectedLayer"]["layer"]
except:
selected_layer = None
return selected_layer
def get_annotation(self, layer_name, aid):
if self.state.layers[layer_name].type == "annotation":
for anno in self.state.layers[layer_name].annotations:
if anno.id == aid:
return anno
else:
return None
else:
return None
def get_selected_annotation_id(self):
layer_name = self.get_selected_layer()
try:
aid_data = self.state.layers[layer_name]._json_data["selectedAnnotation"]
if isinstance(aid_data, OrderedDict):
aid = aid_data["id"]
else:
aid = aid
except:
aid = None
return aid
def select_annotation(self, layer_name, aid):
if layer_name in self.layer_names:
with self.txn() as s:
s.layers[layer_name]._json_data["selectedAnnotation"] = aid
self.set_selected_layer(layer_name)
@property
def layer_names(self):
return [l.name for l in self.state.layers]
def selected_objects(self, segmentation_layer):
return list(self.state.layers[segmentation_layer].segments)
def add_selected_objects(self, segmentation_layer, oids, colors=None):
if issubdtype(type(oids), integer):
oids = [oids]
with self.txn() as s:
for oid in oids:
s.layers[segmentation_layer].segments.add(uint64(oid))
if colors is not None:
if isinstance(colors, dict):
self.assign_colors(segmentation_layer, colors)
elif len(colors) == len(oids):
seg_colors = {str(oid): clr for oid, clr in zip(oids, colors)}
self.assign_colors(segmentation_layer, seg_colors)
def get_mouse_coordinates(self, s):
pos = s.mouse_voxel_coordinates
return pos
def set_view_options(
self,
show_slices=None,
layout=None,
show_axis_lines=None,
show_scale_bar=None,
orthographic=None,
position=None,
zoom_image=None,
zoom_3d=None,
):
with self.txn() as s:
if show_slices is not None:
s.showSlices = show_slices
if layout is not None:
s.layout.type = layout
if show_axis_lines is not None:
s.show_axis_lines = show_axis_lines
if show_scale_bar is not None:
s.show_scale_bar = show_scale_bar
if orthographic is not None:
s.layout.orthographic_projection = orthographic
if position is not None:
s.position.voxelCoordinates = position
if zoom_image is not None:
s.navigation.zoomFactor = zoom_image
if zoom_3d is not None:
s.perspectiveZoom = zoom_3d
def set_segmentation_view_options(
self,
layer_name,
alpha_selected=None,
alpha_3d=None,
alpha_unselected=None,
):
if self.state.layers[layer_name].type not in SEGMENTATION_LAYER_TYPES:
return
with self.txn() as s:
l = s.layers[layer_name]
if alpha_selected is not None:
l.selectedAlpha = alpha_selected
if alpha_3d is not None:
l.objectAlpha = alpha_3d
if alpha_unselected is not None:
l.notSelectedAlpha = alpha_unselected
def set_timestamp(
self,
layer_name,
timestamp=None,
):
if self.state.layers[layer_name].type != "segmentation_with_graph":
return
with self.txn() as s:
l = s.layers[layer_name]
if timestamp is not None:
l.timestamp = int(timestamp)
else:
l.timestamp = None
def assign_colors(self, layer_name, seg_colors):
with self.txn() as s:
if seg_colors is not None:
seg_colors = {
str(oid): k for oid, k in seg_colors.items() if k is not None
}
s.layers[layer_name]._json_data["segmentColors"] = seg_colors
def set_multicut_points(
self,
layer_name,
seg_id,
points_red,
points_blue,
supervoxels_red=None,
supervoxels_blue=None,
focus=True,
):
def _multicut_annotation(pt, oid, sv_id):
if sv_id is None:
sv_id = oid
return annotation.point_annotation(
pt, description=str(sv_id), linked_segmentation=[sv_id, oid]
)
if supervoxels_red is None:
supervoxels_red = [None for x in points_red]
if supervoxels_blue is None:
supervoxels_blue = [None for x in points_blue]
annos_red = neuroglancer.annotationHolder()
for pt, sv_id in zip(points_red, supervoxels_red):
annos_red.annotations.append(
_multicut_annotation(pt * self.state.voxel_size, seg_id, sv_id)
)
annos_blue = neuroglancer.annotationHolder()
for pt, sv_id in zip(points_blue, supervoxels_blue):
annos_blue.annotations.append(
_multicut_annotation(pt * self.state.voxel_size, seg_id, sv_id)
)
self.add_selected_objects(layer_name, [seg_id])
with self.txn() as s:
l = s.layers[layer_name]
l.tab = "graph"
l.graphOperationMarker.append(annos_red)
l.graphOperationMarker.append(annos_blue)
if focus:
self.set_selected_layer(layer_name)
ctr_pt = vstack([points_red, points_blue]).mean(axis=0)
self.set_view_options(position=ctr_pt, zoom_3d=100)
| true | true |
f7fca0007a85048132a1d74f56af87865002739a | 6,328 | py | Python | traj/scripts/synchronized_traj_example.py | jonbinney/trajectory_smoothing | 0e2b8d7d646c96c0c22eef1371bcd42d169121dc | [
"Apache-2.0"
] | null | null | null | traj/scripts/synchronized_traj_example.py | jonbinney/trajectory_smoothing | 0e2b8d7d646c96c0c22eef1371bcd42d169121dc | [
"Apache-2.0"
] | 1 | 2021-01-04T00:37:34.000Z | 2021-01-21T22:02:07.000Z | traj/scripts/synchronized_traj_example.py | jonbinney/trajectory_smoothing | 0e2b8d7d646c96c0c22eef1371bcd42d169121dc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
'''
example to check the joint motion synchronization algorithm [full trajectory]
'''
import numpy as np
import math
import traj
from matplotlib import pyplot as plt
import rospy
rospy.init_node('traj_synchronization', log_level=rospy.DEBUG)
# limits, option_1: same limits that Jon used in first demo file
abs_max_pos = np.deg2rad(np.array([ 185.0, 60.0, 132.0, 360.0, 125.0, 360.0]))
abs_max_vel = np.deg2rad(np.array([ 150.0, 150.0, 200.0, 300.0, 300.0, 600.0]))
abs_max_acc = np.deg2rad(np.array([ 500.0, 500.0, 700.0, 1100.0, 1100.0, 2500.0]))
abs_max_jrk = np.deg2rad(np.array([ 4500.0, 4500.0, 5000.0, 8000.0, 8000.0, 16000.0]))
# limits, option_2: r-2000ic/165f that Gijs send
abs_max_pos = np.deg2rad(np.array([ 185.0, 60.0, 132.0, 360.0, 125.0, 360.0]))
abs_max_vel = np.deg2rad(np.array([ 1.300e+02, 1.150e+02, 1.250e+02, 1.800e+02, 1.800e+02, 2.600e+02 ]))
abs_max_acc = np.deg2rad(np.array([ 2.532467e+02, 2.240260e+02, 2.435065e+02, 3.506494e+02, 3.506494e+02, 5.064935e+02]))
abs_max_jrk = np.deg2rad(np.array([ 9.866757e+02, 8.728286e+02, 9.487267e+02, 1.366166e+03, 1.366166e+03, 1.973351e+03]))
#limits, option_3: M-20iB/25C that Gijs send
abs_max_pos = np.array([ 2.967060, 2.443461, 5.215218, 3.490659, 2.530727, 4.712389 ])
abs_min_pos = np.array([-2.967060, -1.745329, -2.600541, -3.490659, -2.530727, -4.712389 ])
# for now we consider even max/min position limits
pos_limits = [min(a, abs(b)) for a,b in zip(abs_max_pos, abs_min_pos)]
abs_max_pos = np.array(pos_limits)
abs_max_vel = np.array([ 3.577925, 3.577925, 4.537856, 7.243116, 7.243116, 15.358897])
abs_max_acc = np.array([ 12.423351, 12.423351, 15.756445, 25.149706, 25.149706, 53.329513])
abs_max_jrk = np.array([ 86.273266, 86.273266, 109.419752, 174.650735, 174.650735, 370.343857])
# print the limits
rospy.logdebug("> abs_max_pos:{}".format(abs_max_pos))
rospy.logdebug("> abs_max_vel:{}".format(abs_max_vel))
rospy.logdebug("> abs_max_acc:{}".format(abs_max_acc))
rospy.logdebug("> abs_max_jrk:{}".format(abs_max_jrk))
# path_option_1: Jon's path [the one Jon used for the first demo with zeros velocities]
path =[]
path.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
path.append([ 1.5, 0.7, 0.3, 0.0, 0.0, 0.0])
path.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
path.append([-1.5, 0.7, 0.3, 0.0, 0.0, 0.0])
path.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
#there is only one point that is not changing the motion direction
estimated_vel = [ ]
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([ -2.7, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
#path_option_1: random traj & random velocities
path =[]
path.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
path.append([ 1.0, 0.4, 0.5, 0.5, 0.0, 0.0])
path.append([ 1.5, 0.2, 0.7, 0.8, 0.0, 0.0])
path.append([ 2.0, 0.0, 0.9, 1.2, 0.0, 0.0])
path.append([ 0.5, -0.6, 0.4, -.5, 0.0, 0.0])
path.append([ 0.0, -0.8, 0.0, -1.0, 0.0, 0.0])
estimated_vel = [ ]
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([ 1.4, 0.0, 0.5, 0.7, 0.0, 0.0])
estimated_vel.append([ 1.4, -0.6, 0.5, 0.7, 0.0, 0.0])
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([-0.9, -0.3, -0.6, -0.9, 0.0, 0.0])
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
n_jts = len(path[0])
n_wpts = len(path)
n_segs = n_wpts - 1
min_sync_time_seg = [ ]
phases_dur_seg_jt = [ ]
phases_jrk_seg_jt = [ ]
# variables for sampling times and plotting
frq = 125.0
t_start = 0.0
abs_t = t_start
traj_pos = [ [] for jt in range(n_jts)]
traj_vel = [ [] for jt in range(n_jts)]
traj_acc = [ [] for jt in range(n_jts)]
traj_jrk = [ [] for jt in range(n_jts)]
traj_time = [ ]
waypt_times = [ ]
waypt_times.append(t_start)
for seg in range(n_segs):
rospy.logdebug("\n\n>> seg_numer: {}".format(seg))
min_sync_time, phase_dur_jt, phase_jrk_jt = traj.segment_synchronization(
path[seg], path[seg+1], estimated_vel[seg], estimated_vel[seg+1],
abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk)
waypt_times.append(waypt_times[-1] + min_sync_time)
while abs_t <= waypt_times[-1]:
for jt in range(n_jts):
p_start = path[seg][jt]
v_start = estimated_vel[seg][jt]
phases_dur = phase_dur_jt[jt]
phases_jrk = phase_jrk_jt[jt]
pos, vel, acc, jrk = traj.sample_segment(abs_t, waypt_times[-2], p_start, v_start, phases_jrk, phases_dur)
traj_pos[jt].append(pos)
traj_vel[jt].append(vel)
traj_acc[jt].append(acc)
traj_jrk[jt].append(jrk)
traj_time.append(abs_t)
abs_t = abs_t + 1/frq
# plot pos, vel, acc, jrk. plot waypoints and estimated velocity as well to check if there is any difference
fig, axes = plt.subplots(4, sharex=True)
for jt in range(0, n_jts):
axes[0].plot(traj_time, traj_pos[jt])
axes[1].plot(traj_time, traj_vel[jt])
axes[2].plot(traj_time, traj_acc[jt])
axes[3].plot(traj_time, traj_jrk[jt])
axes[0].plot(waypt_times, [path[wpt][jt] for wpt in range(n_wpts)], '*')
axes[1].plot(waypt_times, [estimated_vel[wpt][jt] for wpt in range(n_wpts)], '*')
axes[0].grid()
axes[1].grid()
axes[2].grid()
axes[3].grid()
axes[0].set_ylabel('position')
axes[1].set_ylabel('velocity')
axes[2].set_ylabel('acceleration')
axes[3].set_ylabel('jerk')
axes[3].set_xlabel('Time')
plt.legend()
plt.show()
# store outputs [pos, vel, acc, jrk] in csv file
# traj_pos = list(map(list, zip(*traj_pos)))
# traj_vel = list(map(list, zip(*traj_vel)))
# traj_acc = list(map(list, zip(*traj_acc)))
# traj_jrk = list(map(list, zip(*traj_jrk)))
# import csv
# with open("sampled_traj_time_pos_vel_acc_jrk_125.csv", "wb") as csv_file:
# writer = csv.writer(csv_file, delimiter=',')
# for pt in range(len(traj_time)):
# writer.writerow([traj_time[pt]] + traj_pos[pt] + traj_vel[pt] + traj_acc[pt] + traj_jrk[pt])
# with open("sampled_traj_time_positions_125.csv", "wb") as csv_file:
# writer = csv.writer(csv_file, delimiter=',')
# for pt in range(len(traj_time)):
# writer.writerow([traj_time[pt]] + traj_pos[pt])
| 43.342466 | 122 | 0.647124 |
import numpy as np
import math
import traj
from matplotlib import pyplot as plt
import rospy
rospy.init_node('traj_synchronization', log_level=rospy.DEBUG)
abs_max_pos = np.deg2rad(np.array([ 185.0, 60.0, 132.0, 360.0, 125.0, 360.0]))
abs_max_vel = np.deg2rad(np.array([ 150.0, 150.0, 200.0, 300.0, 300.0, 600.0]))
abs_max_acc = np.deg2rad(np.array([ 500.0, 500.0, 700.0, 1100.0, 1100.0, 2500.0]))
abs_max_jrk = np.deg2rad(np.array([ 4500.0, 4500.0, 5000.0, 8000.0, 8000.0, 16000.0]))
abs_max_pos = np.deg2rad(np.array([ 185.0, 60.0, 132.0, 360.0, 125.0, 360.0]))
abs_max_vel = np.deg2rad(np.array([ 1.300e+02, 1.150e+02, 1.250e+02, 1.800e+02, 1.800e+02, 2.600e+02 ]))
abs_max_acc = np.deg2rad(np.array([ 2.532467e+02, 2.240260e+02, 2.435065e+02, 3.506494e+02, 3.506494e+02, 5.064935e+02]))
abs_max_jrk = np.deg2rad(np.array([ 9.866757e+02, 8.728286e+02, 9.487267e+02, 1.366166e+03, 1.366166e+03, 1.973351e+03]))
abs_max_pos = np.array([ 2.967060, 2.443461, 5.215218, 3.490659, 2.530727, 4.712389 ])
abs_min_pos = np.array([-2.967060, -1.745329, -2.600541, -3.490659, -2.530727, -4.712389 ])
pos_limits = [min(a, abs(b)) for a,b in zip(abs_max_pos, abs_min_pos)]
abs_max_pos = np.array(pos_limits)
abs_max_vel = np.array([ 3.577925, 3.577925, 4.537856, 7.243116, 7.243116, 15.358897])
abs_max_acc = np.array([ 12.423351, 12.423351, 15.756445, 25.149706, 25.149706, 53.329513])
abs_max_jrk = np.array([ 86.273266, 86.273266, 109.419752, 174.650735, 174.650735, 370.343857])
rospy.logdebug("> abs_max_pos:{}".format(abs_max_pos))
rospy.logdebug("> abs_max_vel:{}".format(abs_max_vel))
rospy.logdebug("> abs_max_acc:{}".format(abs_max_acc))
rospy.logdebug("> abs_max_jrk:{}".format(abs_max_jrk))
path =[]
path.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
path.append([ 1.5, 0.7, 0.3, 0.0, 0.0, 0.0])
path.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
path.append([-1.5, 0.7, 0.3, 0.0, 0.0, 0.0])
path.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
#there is only one point that is not changing the motion direction
estimated_vel = [ ]
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([ -2.7, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
#path_option_1: random traj & random velocities
path =[]
path.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
path.append([ 1.0, 0.4, 0.5, 0.5, 0.0, 0.0])
path.append([ 1.5, 0.2, 0.7, 0.8, 0.0, 0.0])
path.append([ 2.0, 0.0, 0.9, 1.2, 0.0, 0.0])
path.append([ 0.5, -0.6, 0.4, -.5, 0.0, 0.0])
path.append([ 0.0, -0.8, 0.0, -1.0, 0.0, 0.0])
estimated_vel = [ ]
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([ 1.4, 0.0, 0.5, 0.7, 0.0, 0.0])
estimated_vel.append([ 1.4, -0.6, 0.5, 0.7, 0.0, 0.0])
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
estimated_vel.append([-0.9, -0.3, -0.6, -0.9, 0.0, 0.0])
estimated_vel.append([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
n_jts = len(path[0])
n_wpts = len(path)
n_segs = n_wpts - 1
min_sync_time_seg = [ ]
phases_dur_seg_jt = [ ]
phases_jrk_seg_jt = [ ]
# variables for sampling times and plotting
frq = 125.0
t_start = 0.0
abs_t = t_start
traj_pos = [ [] for jt in range(n_jts)]
traj_vel = [ [] for jt in range(n_jts)]
traj_acc = [ [] for jt in range(n_jts)]
traj_jrk = [ [] for jt in range(n_jts)]
traj_time = [ ]
waypt_times = [ ]
waypt_times.append(t_start)
for seg in range(n_segs):
rospy.logdebug("\n\n>> seg_numer: {}".format(seg))
min_sync_time, phase_dur_jt, phase_jrk_jt = traj.segment_synchronization(
path[seg], path[seg+1], estimated_vel[seg], estimated_vel[seg+1],
abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk)
waypt_times.append(waypt_times[-1] + min_sync_time)
while abs_t <= waypt_times[-1]:
for jt in range(n_jts):
p_start = path[seg][jt]
v_start = estimated_vel[seg][jt]
phases_dur = phase_dur_jt[jt]
phases_jrk = phase_jrk_jt[jt]
pos, vel, acc, jrk = traj.sample_segment(abs_t, waypt_times[-2], p_start, v_start, phases_jrk, phases_dur)
traj_pos[jt].append(pos)
traj_vel[jt].append(vel)
traj_acc[jt].append(acc)
traj_jrk[jt].append(jrk)
traj_time.append(abs_t)
abs_t = abs_t + 1/frq
# plot pos, vel, acc, jrk. plot waypoints and estimated velocity as well to check if there is any difference
fig, axes = plt.subplots(4, sharex=True)
for jt in range(0, n_jts):
axes[0].plot(traj_time, traj_pos[jt])
axes[1].plot(traj_time, traj_vel[jt])
axes[2].plot(traj_time, traj_acc[jt])
axes[3].plot(traj_time, traj_jrk[jt])
axes[0].plot(waypt_times, [path[wpt][jt] for wpt in range(n_wpts)], '*')
axes[1].plot(waypt_times, [estimated_vel[wpt][jt] for wpt in range(n_wpts)], '*')
axes[0].grid()
axes[1].grid()
axes[2].grid()
axes[3].grid()
axes[0].set_ylabel('position')
axes[1].set_ylabel('velocity')
axes[2].set_ylabel('acceleration')
axes[3].set_ylabel('jerk')
axes[3].set_xlabel('Time')
plt.legend()
plt.show()
# store outputs [pos, vel, acc, jrk] in csv file
# traj_pos = list(map(list, zip(*traj_pos)))
# traj_vel = list(map(list, zip(*traj_vel)))
# traj_acc = list(map(list, zip(*traj_acc)))
# traj_jrk = list(map(list, zip(*traj_jrk)))
# import csv
# with open("sampled_traj_time_pos_vel_acc_jrk_125.csv", "wb") as csv_file:
# writer = csv.writer(csv_file, delimiter=',')
# for pt in range(len(traj_time)):
# writer.writerow([traj_time[pt]] + traj_pos[pt] + traj_vel[pt] + traj_acc[pt] + traj_jrk[pt])
# with open("sampled_traj_time_positions_125.csv", "wb") as csv_file:
# writer = csv.writer(csv_file, delimiter=',')
# for pt in range(len(traj_time)):
# writer.writerow([traj_time[pt]] + traj_pos[pt])
| true | true |
f7fca014f83931eac8a2ed38763d468c3c283851 | 909 | py | Python | setup.py | gurteshwar/plivo-python | 0b475c2b5f1acc052dcdbe0b587c1031982bc5e5 | [
"MIT"
] | null | null | null | setup.py | gurteshwar/plivo-python | 0b475c2b5f1acc052dcdbe0b587c1031982bc5e5 | [
"MIT"
] | null | null | null | setup.py | gurteshwar/plivo-python | 0b475c2b5f1acc052dcdbe0b587c1031982bc5e5 | [
"MIT"
] | null | null | null | from setuptools import setup
import sys
requires = ['requests>=0.10.8']
if sys.version_info < (2, 6):
requires.append('simplejson')
setup(
name = "plivo",
py_modules = ['plivo', "plivoxml"],
version = "0.9",
description = "Plivo Python library",
author = "Plivo Team",
author_email = "hello@plivo.com",
url = "https://github.com/plivo/plivo-python",
keywords = ["plivo", "rest"],
install_requires = requires,
classifiers = [
"Programming Language :: Python",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Development Status :: 5 - Production/Stable",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Communications :: Telephony"
],
long_description = """\
Plivo Python library
""" )
| 30.3 | 71 | 0.60066 | from setuptools import setup
import sys
requires = ['requests>=0.10.8']
if sys.version_info < (2, 6):
requires.append('simplejson')
setup(
name = "plivo",
py_modules = ['plivo', "plivoxml"],
version = "0.9",
description = "Plivo Python library",
author = "Plivo Team",
author_email = "hello@plivo.com",
url = "https://github.com/plivo/plivo-python",
keywords = ["plivo", "rest"],
install_requires = requires,
classifiers = [
"Programming Language :: Python",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Development Status :: 5 - Production/Stable",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Communications :: Telephony"
],
long_description = """\
Plivo Python library
""" )
| true | true |
f7fca2128c1bf1d136f2b78fd70b9046b1f16bda | 3,792 | py | Python | intersight/models/pci_device_list.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | intersight/models/pci_device_list.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | intersight/models/pci_device_list.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PciDeviceList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int',
'results': 'list[PciDevice]'
}
attribute_map = {
'count': 'Count',
'results': 'Results'
}
def __init__(self, count=None, results=None):
"""
PciDeviceList - a model defined in Swagger
"""
self._count = None
self._results = None
if count is not None:
self.count = count
if results is not None:
self.results = results
@property
def count(self):
"""
Gets the count of this PciDeviceList.
The number of pciDevices matching your request in total for all pages.
:return: The count of this PciDeviceList.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""
Sets the count of this PciDeviceList.
The number of pciDevices matching your request in total for all pages.
:param count: The count of this PciDeviceList.
:type: int
"""
self._count = count
@property
def results(self):
"""
Gets the results of this PciDeviceList.
The array of pciDevices matching your request.
:return: The results of this PciDeviceList.
:rtype: list[PciDevice]
"""
return self._results
@results.setter
def results(self, results):
"""
Sets the results of this PciDeviceList.
The array of pciDevices matching your request.
:param results: The results of this PciDeviceList.
:type: list[PciDevice]
"""
self._results = results
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, PciDeviceList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.623377 | 78 | 0.542458 |
from pprint import pformat
from six import iteritems
import re
class PciDeviceList(object):
swagger_types = {
'count': 'int',
'results': 'list[PciDevice]'
}
attribute_map = {
'count': 'Count',
'results': 'Results'
}
def __init__(self, count=None, results=None):
self._count = None
self._results = None
if count is not None:
self.count = count
if results is not None:
self.results = results
@property
def count(self):
return self._count
@count.setter
def count(self, count):
self._count = count
@property
def results(self):
return self._results
@results.setter
def results(self, results):
self._results = results
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PciDeviceList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fca25e4995c759a70dc0edc7b67986023f0f60 | 3,023 | py | Python | src/OTLMOW/PostenMapping/Model/Post050404110.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/PostenMapping/Model/Post050404110.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/PostenMapping/Model/Post050404110.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | # coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post050404110(StandaardPost):
def __init__(self):
super().__init__(
nummer='0504.04110',
beschrijving='Steenslagfundering met continue korrelverdeling met toevoegsel, type IA volgens 5-4.4, dikte 10 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw.type',
dotnotatie='type',
defaultWaarde='met-toevoegsel-behandelde-steenslag-met-continue-korrelverdeling---type-IA',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.04110')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotatie='laagRol',
defaultWaarde='fundering',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.04110')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotatie='dikte',
defaultWaarde='10',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.04110')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotatie='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.04110')])
| 46.507692 | 126 | 0.571948 |
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
class Post050404110(StandaardPost):
def __init__(self):
super().__init__(
nummer='0504.04110',
beschrijving='Steenslagfundering met continue korrelverdeling met toevoegsel, type IA volgens 5-4.4, dikte 10 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw.type',
dotnotatie='type',
defaultWaarde='met-toevoegsel-behandelde-steenslag-met-continue-korrelverdeling---type-IA',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.04110')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotatie='laagRol',
defaultWaarde='fundering',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.04110')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotatie='dikte',
defaultWaarde='10',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.04110')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotatie='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.04110')])
| true | true |
f7fca2f953e75c10b514841bca483523e317a64e | 2,735 | py | Python | Unet/test.py | 1129ljc/video-inpainting-detection | 9a1aea6268f3ab2ba2f60c526ddf35ccc8350e04 | [
"Apache-2.0"
] | 2 | 2022-03-29T02:56:07.000Z | 2022-03-29T06:46:19.000Z | Unet/test.py | 1129ljc/video-inpainting-detection | 9a1aea6268f3ab2ba2f60c526ddf35ccc8350e04 | [
"Apache-2.0"
] | null | null | null | Unet/test.py | 1129ljc/video-inpainting-detection | 9a1aea6268f3ab2ba2f60c526ddf35ccc8350e04 | [
"Apache-2.0"
] | null | null | null | import os
import cv2
import numpy as np
import torch
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from postprocessing import post_deal
class IID_Dataset(Dataset):
def __init__(self, dataset):
self.input_size = (512, 512)
self.image_path = dataset
self.train_files = []
names = os.listdir(self.image_path)
for i in range(len(names)):
name = names[i]
image_name_dir = os.path.join(self.image_path, name)
image_files = sorted(os.listdir(image_name_dir))
image_num = len(image_files)
for j in range(image_num):
image_file = os.path.join(image_name_dir, image_files[j])
self.train_files.append(image_file)
self.transform = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
def __len__(self):
return len(self.train_files)
def __getitem__(self, item):
fname1 = self.train_files[item]
img = cv2.imread(fname1)
img = cv2.resize(img, self.input_size)
img = self.transform(img)
return img, fname1
def test(args):
dataset = args['dataset']
ckpt = args['ckpt']
gpu_id = args['gpu_id']
save = args['save']
device = torch.device('cuda:' + str(gpu_id))
model = torch.load(ckpt)
model = model.to(device=device)
model.eval()
test_dataset = IID_Dataset(dataset)
test_loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=False, num_workers=4)
num_label = []
with torch.no_grad():
for idx, (image, image_path) in enumerate(test_loader):
image_torch = image.float().to(device)
predict = model(image_torch)
predict_mask = predict[0, 0, ...].cpu().detach().numpy()
# predict_mask_image = predict_mask * 255.
predict_mask_image = np.zeros([512, 512, 3])
predict_mask_image[..., 0] = predict_mask * 255.
predict_mask_image[..., 1] = predict_mask * 255.
predict_mask_image[..., 2] = predict_mask * 255.
num_labels, output = post_deal(predict_mask_image)
save0 = str(image_path)[:-3].split('/')
save1, filename = save0[-2], save0[-1]
if not os.path.exists(os.path.join(save, save1)):
os.mkdir(os.path.join(save, save1))
cv2.imwrite(os.path.join(save, save1, filename), output)
print(os.path.join(save, save1, filename))
num_label.append(num_labels)
num_label_result = sum(num_label)/len(num_label)
return num_label_result
| 34.620253 | 94 | 0.610603 | import os
import cv2
import numpy as np
import torch
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from postprocessing import post_deal
class IID_Dataset(Dataset):
def __init__(self, dataset):
self.input_size = (512, 512)
self.image_path = dataset
self.train_files = []
names = os.listdir(self.image_path)
for i in range(len(names)):
name = names[i]
image_name_dir = os.path.join(self.image_path, name)
image_files = sorted(os.listdir(image_name_dir))
image_num = len(image_files)
for j in range(image_num):
image_file = os.path.join(image_name_dir, image_files[j])
self.train_files.append(image_file)
self.transform = transforms.Compose([
transforms.ToTensor(),
])
def __len__(self):
return len(self.train_files)
def __getitem__(self, item):
fname1 = self.train_files[item]
img = cv2.imread(fname1)
img = cv2.resize(img, self.input_size)
img = self.transform(img)
return img, fname1
def test(args):
dataset = args['dataset']
ckpt = args['ckpt']
gpu_id = args['gpu_id']
save = args['save']
device = torch.device('cuda:' + str(gpu_id))
model = torch.load(ckpt)
model = model.to(device=device)
model.eval()
test_dataset = IID_Dataset(dataset)
test_loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=False, num_workers=4)
num_label = []
with torch.no_grad():
for idx, (image, image_path) in enumerate(test_loader):
image_torch = image.float().to(device)
predict = model(image_torch)
predict_mask = predict[0, 0, ...].cpu().detach().numpy()
predict_mask_image = np.zeros([512, 512, 3])
predict_mask_image[..., 0] = predict_mask * 255.
predict_mask_image[..., 1] = predict_mask * 255.
predict_mask_image[..., 2] = predict_mask * 255.
num_labels, output = post_deal(predict_mask_image)
save0 = str(image_path)[:-3].split('/')
save1, filename = save0[-2], save0[-1]
if not os.path.exists(os.path.join(save, save1)):
os.mkdir(os.path.join(save, save1))
cv2.imwrite(os.path.join(save, save1, filename), output)
print(os.path.join(save, save1, filename))
num_label.append(num_labels)
num_label_result = sum(num_label)/len(num_label)
return num_label_result
| true | true |
f7fca36f1517932cc8c133213489c8a1e339786f | 20,645 | py | Python | mrjob/step.py | lydian/mrjob | 13028274296f5618d63ffc00301537fd385eef82 | [
"Apache-2.0"
] | null | null | null | mrjob/step.py | lydian/mrjob | 13028274296f5618d63ffc00301537fd385eef82 | [
"Apache-2.0"
] | null | null | null | mrjob/step.py | lydian/mrjob | 13028274296f5618d63ffc00301537fd385eef82 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Yelp and Contributors
# Copyright 2013 David Marin and Contributors
# Copyright 2015-2017 Yelp
# Copyright 2018 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representations of job steps, to use in your :py:class:`~mrjob.job.MRJob`'s
:py:meth:`~mrjob.job.MRJob.steps` method.
Because :py:class:`the runner <mrjob.runner.MRJobRunner>` just needs to know
how to invoke your MRJob script, not how it works insternally, each step
instance's ``description()`` method produces a simplified, JSON-able
description of the step, to pass to the runner.
"""
import logging
from mrjob.py2 import string_types
from mrjob.util import cmd_line
STEP_TYPES = ('jar', 'spark', 'spark_jar', 'spark_script', 'streaming')
# Function names mapping to mapper, reducer, and combiner operations
_MAPPER_FUNCS = ('mapper', 'mapper_init', 'mapper_final', 'mapper_cmd',
'mapper_pre_filter', 'mapper_raw')
_COMBINER_FUNCS = ('combiner', 'combiner_init', 'combiner_final',
'combiner_cmd', 'combiner_pre_filter')
_REDUCER_FUNCS = ('reducer', 'reducer_init', 'reducer_final', 'reducer_cmd',
'reducer_pre_filter')
_HADOOP_OPTS = ('jobconf',)
# params to specify how to run the step. need at least one of these
_JOB_STEP_FUNC_PARAMS = _MAPPER_FUNCS + _COMBINER_FUNCS + _REDUCER_FUNCS
# all allowable MRStep params
_JOB_STEP_PARAMS = _JOB_STEP_FUNC_PARAMS + _HADOOP_OPTS
# all allowable JarStep constructor keyword args
_JAR_STEP_KWARGS = ['args', 'main_class']
# all allowable SparkStep constructor keyword args
_SPARK_STEP_KWARGS = ['spark', 'spark_args']
# all allowable SparkJarStep constructor keyword args
_SPARK_JAR_STEP_KWARGS = ['args', 'jar', 'main_class', 'spark_args']
# all allowable SparkScriptStep constructor keyword args
_SPARK_SCRIPT_STEP_KWARGS = ['args', 'script', 'spark_args']
#: If passed as an argument to :py:class:`JarStep`, :py:class:`SparkJarStep`,
#: or :py:class:`SparkScriptStep`, it'll be replaced with the step's input
#: path(s). If there are multiple paths, they'll be joined with commas.
INPUT = '<input>'
#: If this is passed as an argument to :py:class:`JarStep`,
#: :py:class:`SparkJarStep`, or :py:class:`SparkScriptStep`, it'll be replaced
#: with the step's output path
OUTPUT = '<output>'
#: If this is passed as an argument to :py:class:`JarStep`,
#: it'll be replaced with generic hadoop args (-D and -libjars)
GENERIC_ARGS = '<generic args>'
log = logging.getLogger(__name__)
# used by MRStep below, to fake no mapper
def _IDENTITY_MAPPER(key, value):
yield key, value
# used by MRStep below, to fake no reducer
def _IDENTITY_REDUCER(key, values):
for value in values:
yield key, value
class StepFailedException(Exception):
"""Exception to throw when a step fails.
This will automatically be caught
and converted to an error message by :py:meth:`mrjob.job.MRJob.run`, but
you may wish to catch it if you
:ref:`run your job programatically <runners-programmatically>`.
"""
_FIELDS = ('reason', 'step_num', 'num_steps', 'step_desc')
def __init__(
self, reason=None, step_num=None, num_steps=None, step_desc=None):
"""Initialize a reason for step failure.
:param string reason: brief explanation of which step failed
:param int step_num: which step failed (0-indexed)
:param int num_steps: number of steps in the job
:param string step_desc: description of step (if we don't like the
default "Step X of Y")
*reason* should not be several lines long; use ``log.error(...)``
for that.
"""
self.reason = reason
self.step_num = step_num
self.num_steps = num_steps
self.step_desc = step_desc
def __str__(self):
"""Human-readable version of the exception. Note that this 1-indexes
*step_num*."""
return '%s failed%s' % (
(self.step_desc or 'Step%s%s' % (
'' if self.step_num is None else ' %d' % (self.step_num + 1),
'' if (self.step_num is None or self.num_steps is None) else (
' of %d' % self.num_steps))),
'' if self.reason is None else ': %s' % self.reason)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join(('%s=%r' % (k, getattr(self, k))
for k in self._FIELDS
if getattr(self, k) is not None)))
class MRStep(object):
# this docstring excludes mapper_cmd, etc.
"""Represents steps handled by the script containing your job.
Used by :py:meth:`MRJob.steps <mrjob.job.MRJob.steps>`.
See :ref:`writing-multi-step-jobs` for sample usage.
Takes the following keyword arguments: `combiner`, `combiner_cmd`,
`combiner_final`, `combiner_init`, `combiner_pre_filter`, `mapper`,
`mapper_cmd`, `mapper_final`, `mapper_init`, `mapper_pre_filter`,
`mapper_raw`, `reducer`, `reducer_cmd`, `reducer_final`, `reducer_init`,
`reducer_pre_filter`. These should be set to ``None`` or a function
with the same signature as the corresponding method in
:py:class:`~mrjob.job.MRJob`.
Also accepts `jobconf`, a dictionary with custom jobconf arguments to pass
to hadoop.
A MRStep's description looks like::
{
'type': 'streaming',
'mapper': { ... },
'combiner': { ... },
'reducer': { ... },
'jobconf': { ... }, # dict of Hadoop configuration properties
}
At least one of ``mapper``, ``combiner`` and ``reducer`` need be included.
``jobconf`` is completely optional.
``mapper``, ``combiner``, and ``reducer`` are either handled by
the script containing your job definition, in which case they look like::
{
'type': 'script',
'pre_filter': 'grep -v bad', # optional cmd to filter input
}
or they simply run a command, which looks like::
{
'type': 'command',
'command': 'cut -f 1-2', # command to run, as a string
}
"""
def __init__(self, **kwargs):
# limit which keyword args can be specified
bad_kwargs = sorted(set(kwargs) - set(_JOB_STEP_PARAMS))
if bad_kwargs:
raise TypeError('MRStep() got an unexpected keyword argument %r' %
bad_kwargs[0])
if not set(kwargs) & set(_JOB_STEP_FUNC_PARAMS):
raise ValueError("Step has no mappers and no reducers")
self.has_explicit_mapper = any(
value for name, value in kwargs.items()
if name in _MAPPER_FUNCS)
self.has_explicit_combiner = any(
value for name, value in kwargs.items()
if name in _COMBINER_FUNCS)
self.has_explicit_reducer = any(
value for name, value in kwargs.items()
if name in _REDUCER_FUNCS)
steps = dict((f, None) for f in _JOB_STEP_PARAMS)
steps.update(kwargs)
def _check_conflict(func, other_funcs):
if steps[func]:
for other_func in other_funcs:
if steps[other_func] and other_func != func:
raise ValueError("Can't specify both %s and %s" % (
func, other_func))
_check_conflict('mapper_cmd', _MAPPER_FUNCS)
_check_conflict('mapper_raw', ('mapper', 'mapper_pre_filter'))
_check_conflict('combiner_cmd', _COMBINER_FUNCS)
_check_conflict('reducer_cmd', _REDUCER_FUNCS)
self._steps = steps
def __repr__(self):
not_none = dict((k, v) for k, v in self._steps.items()
if v is not None)
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (k, v) for k, v in not_none.items()))
def __eq__(self, other):
return (isinstance(other, MRStep) and self._steps == other._steps)
def __getitem__(self, key):
# always be prepared to run a mapper, since Hadoop Streaming requires
# it
if key == 'mapper' and self._steps['mapper'] is None:
return _IDENTITY_MAPPER
# identity reducer should only show up if you specified 'reducer_init',
# 'reducer_final', or 'reducer_pre_filter', but not 'reducer' itself
if (key == 'reducer' and self._steps['reducer'] is None and
self.has_explicit_reducer):
return _IDENTITY_REDUCER
# identity combiner should only show up if you specified
# 'combiner_init', 'combiner_final', or 'combiner_pre_filter', but not
# 'combiner' itself
if (key == 'combiner' and self._steps['combiner'] is None and
self.has_explicit_combiner):
return _IDENTITY_REDUCER
return self._steps[key]
def _render_substep(self, cmd_key, pre_filter_key):
if self._steps[cmd_key]:
cmd = self._steps[cmd_key]
if not isinstance(cmd, string_types):
cmd = cmd_line(cmd)
if (pre_filter_key and self._steps[pre_filter_key]):
raise ValueError('Cannot specify both %s and %s' % (
cmd_key, pre_filter_key))
return {'type': 'command', 'command': cmd}
else:
substep = {'type': 'script'}
if (pre_filter_key and
self._steps[pre_filter_key]):
substep['pre_filter'] = self._steps[pre_filter_key]
return substep
def render_mapper(self):
return self._render_substep('mapper_cmd', 'mapper_pre_filter')
def render_combiner(self):
return self._render_substep('combiner_cmd', 'combiner_pre_filter')
def render_reducer(self):
return self._render_substep('reducer_cmd', 'reducer_pre_filter')
def description(self, step_num=0):
desc = {'type': 'streaming'}
# Use a mapper if:
# - the user writes one
# - it is the first step and we don't want to mess up protocols
# - there are only combiners and we don't want to mess up protocols
if (step_num == 0 or
self.has_explicit_mapper or
self.has_explicit_combiner):
desc['mapper'] = self.render_mapper()
if self.has_explicit_combiner:
desc['combiner'] = self.render_combiner()
if self.has_explicit_reducer:
desc['reducer'] = self.render_reducer()
if self._steps['mapper_raw']:
desc['input_manifest'] = True
# TODO: verify this is a dict, convert booleans to strings
if self._steps['jobconf']:
desc['jobconf'] = self._steps['jobconf']
return desc
class _Step(object):
"""Generic implementation of steps which are basically just simple objects
that hold attributes."""
# MRStep is different enough that I'm going to leave it as-is for now.
# unique string for this step type (e.g. 'jar'). Redefine in your subclass
_STEP_TYPE = None
# all keyword arguments we accept. Redefine in your subclass
_STEP_ATTRS = []
# attributes that don't show up in the step description because they
# are handled by the job, not the runner
_HIDDEN_ATTRS = []
# map from keyword argument to type(s), if we check. You can also use
# "callable" (which is actually a builtin, not a type) for callables
_STEP_ATTR_TYPES = {
'args': (list, tuple),
'jar': string_types,
'jobconf': dict,
'main_class': string_types,
'script': string_types,
'spark': callable,
'spark_args': (list, tuple),
}
# map from keyword argument to constructor that produces
# default values
_STEP_ATTR_DEFAULTS = {
'args': list,
'jobconf': dict,
'spark_args': list,
}
# use your own __init__() method to make arguments required
def __init__(self, **kwargs):
"""Set all attributes to the corresponding value in *kwargs*, or the
default value. Raise :py:class:`TypeError` for unknown arguments or
values with the wrong type."""
bad_kwargs = sorted(set(kwargs) - set(self._STEP_ATTRS))
if bad_kwargs:
raise TypeError('%s() got unexpected keyword arguments: %s' % (
self.__class__.__name__, ', '.join(bad_kwargs)))
for k in self._STEP_ATTRS:
v = kwargs.get(k)
if v is None:
v = self._default(k)
elif k in self._STEP_ATTR_TYPES:
attr_type = self._STEP_ATTR_TYPES[k]
if attr_type is callable:
if not callable(v):
raise TypeError('%s is not callable: %r' % (k, v))
elif not isinstance(v, attr_type):
raise TypeError('%s is not an instance of %r: %r' % (
k, self._STEP_ATTR_TYPES[k], v))
setattr(self, k, v)
def __repr__(self):
kwargs = dict(
(k, getattr(self, k))
for k in self._STEP_ATTR_TYPES if hasattr(self, k))
return '%s(%s)' % (
self.__class__.__name__, ', '.join(
'%s=%s' % (k, v)
for k, v in sorted(kwargs.items())
if v != self._default(k)))
def __eq__(self, other):
return (isinstance(other, self.__class__) and
all(getattr(self, key) == getattr(other, key)
for key in set(self._STEP_ATTRS)))
def _default(self, k):
if k in self._STEP_ATTR_DEFAULTS:
return self._STEP_ATTR_DEFAULTS[k]()
else:
return None
def description(self, step_num=0):
"""Return a dictionary representation of this step. See
:ref:`steps-format` for examples."""
result = dict(
(k, getattr(self, k))
for k in self._STEP_ATTRS
if k not in self._HIDDEN_ATTRS
)
result['type'] = self._STEP_TYPE
return result
class JarStep(_Step):
"""Represents a running a custom Jar as a step.
Accepts the following keyword arguments:
:param jar: The local path to the Jar. On EMR, this can also be an
``s3://`` URI, or ``file://`` to reference a jar on
the local filesystem of your EMR instance(s).
:param args: (optional) A list of arguments to the jar. Use
:py:data:`mrjob.step.INPUT` and :py:data:`OUTPUT` to
interpolate input and output paths.
:param jobconf: (optional) A dictionary of Hadoop properties
:param main_class: (optional) The main class to run from the jar. If
not specified, Hadoop will use the main class
in the jar's manifest file.
*jar* can also be passed as a positional argument
See :ref:`non-hadoop-streaming-jar-steps` for sample usage.
Sample description of a JarStep::
{
'type': 'jar',
'jar': 'binks.jar.jar',
'main_class': 'MyMainMan', # optional
'args': ['argh', 'argh'] # optional
'jobconf': { ... } # optional
}
To give your jar access to input files, an empty output directory,
configuration properties, and libjars managed by mrjob, you may include
:py:data:`INPUT`, :py:data:`OUTPUT`, and :py:data:`GENERIC_ARGS` in *args*.
"""
_STEP_TYPE = 'jar'
_STEP_ATTRS = ['args', 'jar', 'jobconf', 'main_class']
def __init__(self, jar, **kwargs):
super(JarStep, self).__init__(jar=jar, **kwargs)
class SparkStep(_Step):
"""Represents running a Spark step defined in your job.
Accepts the following keyword arguments:
:param spark: function containing your Spark code with same function
signature as :py:meth:`~mrjob.job.MRJob.spark`
:param jobconf: (optional) A dictionary of Hadoop properties
:param spark_args: (optional) an array of arguments to pass to spark-submit
(e.g. ``['--executor-memory', '2G']``).
Sample description of a SparkStep::
{
'type': 'spark',
'jobconf': { ... }, # optional
'spark_args': ['--executor-memory', '2G'], # optional
}
"""
_STEP_TYPE = 'spark'
_STEP_ATTRS = ['jobconf', 'spark', 'spark_args']
_HIDDEN_ATTRS = ['spark']
def __init__(self, spark, **kwargs):
super(SparkStep, self).__init__(spark=spark, **kwargs)
class SparkJarStep(_Step):
"""Represents a running a separate Jar through Spark
Accepts the following keyword arguments:
:param jar: The local path to the Python script to run. On EMR, this
can also be an ``s3://`` URI, or ``file://`` to reference a
jar on the local filesystem of your EMR instance(s).
:param main_class: Your application's main class (e.g.
``'org.apache.spark.examples.SparkPi'``)
:param args: (optional) A list of arguments to the script. Use
:py:data:`mrjob.step.INPUT` and :py:data:`OUTPUT` to
interpolate input and output paths.
:param jobconf: (optional) A dictionary of Hadoop properties
:param spark_args: (optional) an array of arguments to pass to spark-submit
(e.g. ``['--executor-memory', '2G']``).
*jar* and *main_class* can also be passed as positional arguments
Sample description of a SparkJarStep::
{
'type': 'spark_jar',
'jar': 'binks.jar.jar',
'main_class': 'MyMainMan', # optional
'args': ['argh', 'argh'], # optional
'jobconf': { ... }, # optional
'spark_args': ['--executor-memory', '2G'], # optional
}
To give your Spark JAR access to input files and an empty output directory
managed by mrjob, you may include :py:data:`INPUT` and :py:data:`OUTPUT`
in *args*.
"""
_STEP_TYPE = 'spark_jar'
_STEP_ATTRS = ['args', 'jar', 'jobconf', 'main_class', 'spark_args']
def __init__(self, jar, main_class, **kwargs):
super(SparkJarStep, self).__init__(
jar=jar, main_class=main_class, **kwargs)
class SparkScriptStep(_Step):
"""Represents a running a separate Python script through Spark
Accepts the following keyword arguments:
:param script: The local path to the Python script to run. On EMR, this
can also be an ``s3://`` URI, or ``file://`` to reference a
jar on the local filesystem of your EMR instance(s).
:param args: (optional) A list of arguments to the script. Use
:py:data:`mrjob.step.INPUT` and :py:data:`OUTPUT` to
interpolate input and output paths.
:param jobconf: (optional) A dictionary of Hadoop properties
:param spark_args: (optional) an array of arguments to pass to spark-submit
(e.g. ``['--executor-memory', '2G']``).
*script* can also be passed as a positional argument
Sample description of a ScriptStep::
{
'type': 'spark_script',
'script': 'my_spark_script.py',
'args': ['script_arg1', 'script_arg2'],
'jobconf': { ... }, # optional
'spark_args': ['--executor-memory', '2G'], # optional
}
To give your Spark script access to input files and an empty output
directory managed by mrjob, you may include :py:data:`INPUT` and
:py:data:`OUTPUT` in *args*.
"""
_STEP_TYPE = 'spark_script'
_STEP_ATTRS = ['args', 'jobconf', 'script', 'spark_args']
def __init__(self, script, **kwargs):
super(SparkScriptStep, self).__init__(script=script, **kwargs)
def _is_spark_step_type(step_type):
"""Does the given step type indicate that it uses Spark?"""
return step_type.split('_')[0] == 'spark'
def _is_pyspark_step_type(step_type):
"""Does the given step type indicate that it uses Spark and Python?"""
return step_type in ('spark', 'spark_script')
| 37.198198 | 79 | 0.61303 |
import logging
from mrjob.py2 import string_types
from mrjob.util import cmd_line
STEP_TYPES = ('jar', 'spark', 'spark_jar', 'spark_script', 'streaming')
_MAPPER_FUNCS = ('mapper', 'mapper_init', 'mapper_final', 'mapper_cmd',
'mapper_pre_filter', 'mapper_raw')
_COMBINER_FUNCS = ('combiner', 'combiner_init', 'combiner_final',
'combiner_cmd', 'combiner_pre_filter')
_REDUCER_FUNCS = ('reducer', 'reducer_init', 'reducer_final', 'reducer_cmd',
'reducer_pre_filter')
_HADOOP_OPTS = ('jobconf',)
_JOB_STEP_FUNC_PARAMS = _MAPPER_FUNCS + _COMBINER_FUNCS + _REDUCER_FUNCS
_JOB_STEP_PARAMS = _JOB_STEP_FUNC_PARAMS + _HADOOP_OPTS
_JAR_STEP_KWARGS = ['args', 'main_class']
_SPARK_STEP_KWARGS = ['spark', 'spark_args']
_SPARK_JAR_STEP_KWARGS = ['args', 'jar', 'main_class', 'spark_args']
_SPARK_SCRIPT_STEP_KWARGS = ['args', 'script', 'spark_args']
INPUT = '<input>'
#: If this is passed as an argument to :py:class:`JarStep`,
#: :py:class:`SparkJarStep`, or :py:class:`SparkScriptStep`, it'll be replaced
OUTPUT = '<output>'
#: If this is passed as an argument to :py:class:`JarStep`,
#: it'll be replaced with generic hadoop args (-D and -libjars)
GENERIC_ARGS = '<generic args>'
log = logging.getLogger(__name__)
def _IDENTITY_MAPPER(key, value):
yield key, value
def _IDENTITY_REDUCER(key, values):
for value in values:
yield key, value
class StepFailedException(Exception):
_FIELDS = ('reason', 'step_num', 'num_steps', 'step_desc')
def __init__(
self, reason=None, step_num=None, num_steps=None, step_desc=None):
self.reason = reason
self.step_num = step_num
self.num_steps = num_steps
self.step_desc = step_desc
def __str__(self):
return '%s failed%s' % (
(self.step_desc or 'Step%s%s' % (
'' if self.step_num is None else ' %d' % (self.step_num + 1),
'' if (self.step_num is None or self.num_steps is None) else (
' of %d' % self.num_steps))),
'' if self.reason is None else ': %s' % self.reason)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join(('%s=%r' % (k, getattr(self, k))
for k in self._FIELDS
if getattr(self, k) is not None)))
class MRStep(object):
def __init__(self, **kwargs):
bad_kwargs = sorted(set(kwargs) - set(_JOB_STEP_PARAMS))
if bad_kwargs:
raise TypeError('MRStep() got an unexpected keyword argument %r' %
bad_kwargs[0])
if not set(kwargs) & set(_JOB_STEP_FUNC_PARAMS):
raise ValueError("Step has no mappers and no reducers")
self.has_explicit_mapper = any(
value for name, value in kwargs.items()
if name in _MAPPER_FUNCS)
self.has_explicit_combiner = any(
value for name, value in kwargs.items()
if name in _COMBINER_FUNCS)
self.has_explicit_reducer = any(
value for name, value in kwargs.items()
if name in _REDUCER_FUNCS)
steps = dict((f, None) for f in _JOB_STEP_PARAMS)
steps.update(kwargs)
def _check_conflict(func, other_funcs):
if steps[func]:
for other_func in other_funcs:
if steps[other_func] and other_func != func:
raise ValueError("Can't specify both %s and %s" % (
func, other_func))
_check_conflict('mapper_cmd', _MAPPER_FUNCS)
_check_conflict('mapper_raw', ('mapper', 'mapper_pre_filter'))
_check_conflict('combiner_cmd', _COMBINER_FUNCS)
_check_conflict('reducer_cmd', _REDUCER_FUNCS)
self._steps = steps
def __repr__(self):
not_none = dict((k, v) for k, v in self._steps.items()
if v is not None)
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (k, v) for k, v in not_none.items()))
def __eq__(self, other):
return (isinstance(other, MRStep) and self._steps == other._steps)
def __getitem__(self, key):
# always be prepared to run a mapper, since Hadoop Streaming requires
# it
if key == 'mapper' and self._steps['mapper'] is None:
return _IDENTITY_MAPPER
# identity reducer should only show up if you specified 'reducer_init',
# 'reducer_final', or 'reducer_pre_filter', but not 'reducer' itself
if (key == 'reducer' and self._steps['reducer'] is None and
self.has_explicit_reducer):
return _IDENTITY_REDUCER
# identity combiner should only show up if you specified
# 'combiner_init', 'combiner_final', or 'combiner_pre_filter', but not
# 'combiner' itself
if (key == 'combiner' and self._steps['combiner'] is None and
self.has_explicit_combiner):
return _IDENTITY_REDUCER
return self._steps[key]
def _render_substep(self, cmd_key, pre_filter_key):
if self._steps[cmd_key]:
cmd = self._steps[cmd_key]
if not isinstance(cmd, string_types):
cmd = cmd_line(cmd)
if (pre_filter_key and self._steps[pre_filter_key]):
raise ValueError('Cannot specify both %s and %s' % (
cmd_key, pre_filter_key))
return {'type': 'command', 'command': cmd}
else:
substep = {'type': 'script'}
if (pre_filter_key and
self._steps[pre_filter_key]):
substep['pre_filter'] = self._steps[pre_filter_key]
return substep
def render_mapper(self):
return self._render_substep('mapper_cmd', 'mapper_pre_filter')
def render_combiner(self):
return self._render_substep('combiner_cmd', 'combiner_pre_filter')
def render_reducer(self):
return self._render_substep('reducer_cmd', 'reducer_pre_filter')
def description(self, step_num=0):
desc = {'type': 'streaming'}
# Use a mapper if:
# - the user writes one
# - it is the first step and we don't want to mess up protocols
if (step_num == 0 or
self.has_explicit_mapper or
self.has_explicit_combiner):
desc['mapper'] = self.render_mapper()
if self.has_explicit_combiner:
desc['combiner'] = self.render_combiner()
if self.has_explicit_reducer:
desc['reducer'] = self.render_reducer()
if self._steps['mapper_raw']:
desc['input_manifest'] = True
# TODO: verify this is a dict, convert booleans to strings
if self._steps['jobconf']:
desc['jobconf'] = self._steps['jobconf']
return desc
class _Step(object):
# MRStep is different enough that I'm going to leave it as-is for now.
_STEP_TYPE = None
_STEP_ATTRS = []
# are handled by the job, not the runner
_HIDDEN_ATTRS = []
# map from keyword argument to type(s), if we check. You can also use
# "callable" (which is actually a builtin, not a type) for callables
_STEP_ATTR_TYPES = {
'args': (list, tuple),
'jar': string_types,
'jobconf': dict,
'main_class': string_types,
'script': string_types,
'spark': callable,
'spark_args': (list, tuple),
}
# map from keyword argument to constructor that produces
# default values
_STEP_ATTR_DEFAULTS = {
'args': list,
'jobconf': dict,
'spark_args': list,
}
# use your own __init__() method to make arguments required
def __init__(self, **kwargs):
bad_kwargs = sorted(set(kwargs) - set(self._STEP_ATTRS))
if bad_kwargs:
raise TypeError('%s() got unexpected keyword arguments: %s' % (
self.__class__.__name__, ', '.join(bad_kwargs)))
for k in self._STEP_ATTRS:
v = kwargs.get(k)
if v is None:
v = self._default(k)
elif k in self._STEP_ATTR_TYPES:
attr_type = self._STEP_ATTR_TYPES[k]
if attr_type is callable:
if not callable(v):
raise TypeError('%s is not callable: %r' % (k, v))
elif not isinstance(v, attr_type):
raise TypeError('%s is not an instance of %r: %r' % (
k, self._STEP_ATTR_TYPES[k], v))
setattr(self, k, v)
def __repr__(self):
kwargs = dict(
(k, getattr(self, k))
for k in self._STEP_ATTR_TYPES if hasattr(self, k))
return '%s(%s)' % (
self.__class__.__name__, ', '.join(
'%s=%s' % (k, v)
for k, v in sorted(kwargs.items())
if v != self._default(k)))
def __eq__(self, other):
return (isinstance(other, self.__class__) and
all(getattr(self, key) == getattr(other, key)
for key in set(self._STEP_ATTRS)))
def _default(self, k):
if k in self._STEP_ATTR_DEFAULTS:
return self._STEP_ATTR_DEFAULTS[k]()
else:
return None
def description(self, step_num=0):
result = dict(
(k, getattr(self, k))
for k in self._STEP_ATTRS
if k not in self._HIDDEN_ATTRS
)
result['type'] = self._STEP_TYPE
return result
class JarStep(_Step):
_STEP_TYPE = 'jar'
_STEP_ATTRS = ['args', 'jar', 'jobconf', 'main_class']
def __init__(self, jar, **kwargs):
super(JarStep, self).__init__(jar=jar, **kwargs)
class SparkStep(_Step):
_STEP_TYPE = 'spark'
_STEP_ATTRS = ['jobconf', 'spark', 'spark_args']
_HIDDEN_ATTRS = ['spark']
def __init__(self, spark, **kwargs):
super(SparkStep, self).__init__(spark=spark, **kwargs)
class SparkJarStep(_Step):
_STEP_TYPE = 'spark_jar'
_STEP_ATTRS = ['args', 'jar', 'jobconf', 'main_class', 'spark_args']
def __init__(self, jar, main_class, **kwargs):
super(SparkJarStep, self).__init__(
jar=jar, main_class=main_class, **kwargs)
class SparkScriptStep(_Step):
_STEP_TYPE = 'spark_script'
_STEP_ATTRS = ['args', 'jobconf', 'script', 'spark_args']
def __init__(self, script, **kwargs):
super(SparkScriptStep, self).__init__(script=script, **kwargs)
def _is_spark_step_type(step_type):
return step_type.split('_')[0] == 'spark'
def _is_pyspark_step_type(step_type):
return step_type in ('spark', 'spark_script')
| true | true |
f7fca4940e32556179329965b4aff27bc692c7ae | 16,327 | py | Python | eznlp/model/decoder/boundary_selection.py | syuoni/eznlp | c0380c6c30d68b4df1769150424735c04ea9d714 | [
"Apache-2.0"
] | 9 | 2021-08-06T07:12:55.000Z | 2022-03-26T08:20:59.000Z | eznlp/model/decoder/boundary_selection.py | Hhx1999/eznlp | 9d1397d8e9630c099295712cbcffa495353a3268 | [
"Apache-2.0"
] | 1 | 2022-03-11T13:27:29.000Z | 2022-03-16T11:52:14.000Z | eznlp/model/decoder/boundary_selection.py | Hhx1999/eznlp | 9d1397d8e9630c099295712cbcffa495353a3268 | [
"Apache-2.0"
] | 3 | 2021-11-15T03:24:24.000Z | 2022-03-09T09:36:05.000Z | # -*- coding: utf-8 -*-
from typing import List, Tuple
from collections import Counter
import logging
import torch
from ...wrapper import TargetWrapper, Batch
from ...utils.chunk import detect_nested, filter_clashed_by_priority
from ...nn.modules import CombinedDropout, SoftLabelCrossEntropyLoss
from ...nn.init import reinit_embedding_, reinit_layer_
from ...metrics import precision_recall_f1_report
from ..encoder import EncoderConfig
from .base import DecoderMixinBase, SingleDecoderConfigBase, DecoderBase
logger = logging.getLogger(__name__)
class BoundarySelectionDecoderMixin(DecoderMixinBase):
@property
def idx2label(self):
return self._idx2label
@idx2label.setter
def idx2label(self, idx2label: List[str]):
self._idx2label = idx2label
self.label2idx = {l: i for i, l in enumerate(idx2label)} if idx2label is not None else None
@property
def voc_dim(self):
return len(self.label2idx)
@property
def none_idx(self):
return self.label2idx[self.none_label]
def exemplify(self, data_entry: dict, training: bool=True):
return {'boundaries_obj': Boundaries(data_entry, self, training=training)}
def batchify(self, batch_examples: List[dict]):
return {'boundaries_objs': [ex['boundaries_obj'] for ex in batch_examples]}
def retrieve(self, batch: Batch):
return [boundaries_obj.chunks for boundaries_obj in batch.boundaries_objs]
def evaluate(self, y_gold: List[List[tuple]], y_pred: List[List[tuple]]):
"""Micro-F1 for entity recognition.
References
----------
https://www.clips.uantwerpen.be/conll2000/chunking/output.html
"""
scores, ave_scores = precision_recall_f1_report(y_gold, y_pred)
return ave_scores['micro']['f1']
def _spans_from_surrounding(span: Tuple[int], distance: int, num_tokens: int):
"""Spans from the surrounding area of the given `span`.
"""
for k in range(distance):
for start_offset, end_offset in [(-k, -distance+k),
(-distance+k, k),
(k, distance-k),
(distance-k, -k)]:
start, end = span[0]+start_offset, span[1]+end_offset
if 0 <= start < end <= num_tokens:
yield (start, end)
def _spans_from_upper_triangular(seq_len: int):
"""Spans from the upper triangular area.
"""
for start in range(seq_len):
for end in range(start+1, seq_len+1):
yield (start, end)
class Boundaries(TargetWrapper):
"""A wrapper of boundaries with underlying chunks.
Parameters
----------
data_entry: dict
{'tokens': TokenSequence,
'chunks': List[tuple]}
"""
def __init__(self, data_entry: dict, config: BoundarySelectionDecoderMixin, training: bool=True):
super().__init__(training)
self.chunks = data_entry.get('chunks', None)
num_tokens = len(data_entry['tokens'])
if training and config.neg_sampling_rate < 1:
non_mask = (torch.arange(num_tokens) - torch.arange(num_tokens).unsqueeze(-1) >= 0)
pos_non_mask = torch.zeros_like(non_mask)
for label, start, end in self.chunks:
pos_non_mask[start, end-1] = True
neg_sampled = torch.empty_like(non_mask).bernoulli(p=config.neg_sampling_rate)
if config.hard_neg_sampling_rate > config.neg_sampling_rate:
hard_neg_non_mask = torch.zeros_like(non_mask)
for label, start, end in self.chunks:
for dist in range(1, config.hard_neg_sampling_size+1):
for sur_start, sur_end in _spans_from_surrounding((start, end), dist, num_tokens):
hard_neg_non_mask[sur_start, sur_end-1] = True
if config.hard_neg_sampling_rate < 1:
# Solve: 1 - (1 - p_{neg})(1 - p_{comp}) = p_{hard}
# Get: p_{comp} = (p_{hard} - p_{neg}) / (1 - p_{neg})
comp_sampling_rate = (config.hard_neg_sampling_rate - config.neg_sampling_rate) / (1 - config.neg_sampling_rate)
comp_sampled = torch.empty_like(non_mask).bernoulli(p=comp_sampling_rate)
neg_sampled = neg_sampled | (comp_sampled & hard_neg_non_mask)
else:
neg_sampled = neg_sampled | hard_neg_non_mask
self.non_mask = pos_non_mask | (neg_sampled & non_mask)
if self.chunks is not None:
if config.sb_epsilon <= 0 and config.sl_epsilon <= 0:
# Cross entropy loss
self.boundary2label_id = torch.full((num_tokens, num_tokens), config.none_idx, dtype=torch.long)
for label, start, end in self.chunks:
self.boundary2label_id[start, end-1] = config.label2idx[label]
else:
# Soft label loss for either boundary or label smoothing
self.boundary2label_id = torch.zeros(num_tokens, num_tokens, config.voc_dim, dtype=torch.float)
for label, start, end in self.chunks:
label_id = config.label2idx[label]
self.boundary2label_id[start, end-1, label_id] += (1 - config.sb_epsilon)
for dist in range(1, config.sb_size+1):
eps_per_span = config.sb_epsilon / (config.sb_size * dist * 4)
sur_spans = list(_spans_from_surrounding((start, end), dist, num_tokens))
for sur_start, sur_end in sur_spans:
self.boundary2label_id[sur_start, sur_end-1, label_id] += (eps_per_span*config.sb_adj_factor)
# Absorb the probabilities assigned to illegal positions
self.boundary2label_id[start, end-1, label_id] += eps_per_span * (dist * 4 - len(sur_spans))
# In very rare cases (e.g., ACE 2005), multiple entities may have the same span but different types
overflow_indic = (self.boundary2label_id.sum(dim=-1) > 1)
if overflow_indic.any().item():
self.boundary2label_id[overflow_indic] = torch.nn.functional.normalize(self.boundary2label_id[overflow_indic], p=1, dim=-1)
self.boundary2label_id[:, :, config.none_idx] = 1 - self.boundary2label_id.sum(dim=-1)
if config.sl_epsilon > 0:
# Do not smooth to `<none>` label
pos_indic = (torch.arange(config.voc_dim) != config.none_idx)
self.boundary2label_id[:, :, pos_indic] = (self.boundary2label_id[:, :, pos_indic] * (1-config.sl_epsilon) +
self.boundary2label_id[:, :, pos_indic].sum(dim=-1, keepdim=True)*config.sl_epsilon / (config.voc_dim-1))
class BoundarySelectionDecoderConfig(SingleDecoderConfigBase, BoundarySelectionDecoderMixin):
def __init__(self, **kwargs):
self.use_biaffine = kwargs.pop('use_biaffine', True)
self.affine = kwargs.pop('affine', EncoderConfig(arch='FFN', hid_dim=150, num_layers=1, in_drop_rates=(0.4, 0.0, 0.0), hid_drop_rate=0.2))
self.max_len = kwargs.pop('max_len', None)
self.max_span_size = kwargs.pop('max_span_size', 50)
self.size_emb_dim = kwargs.pop('size_emb_dim', 25)
self.hid_drop_rates = kwargs.pop('hid_drop_rates', (0.2, 0.0, 0.0))
self.neg_sampling_rate = kwargs.pop('neg_sampling_rate', 1.0)
self.hard_neg_sampling_rate = kwargs.pop('hard_neg_sampling_rate', 1.0)
self.hard_neg_sampling_rate = max(self.hard_neg_sampling_rate, self.neg_sampling_rate)
self.hard_neg_sampling_size = kwargs.pop('hard_neg_sampling_size', 5)
self.none_label = kwargs.pop('none_label', '<none>')
self.idx2label = kwargs.pop('idx2label', None)
# Note: non-nested overlapping chunks are never allowed
self.allow_nested = kwargs.pop('allow_nested', None)
# Boundary smoothing epsilon
self.sb_epsilon = kwargs.pop('sb_epsilon', 0.0)
self.sb_size = kwargs.pop('sb_size', 1)
self.sb_adj_factor = kwargs.pop('sb_adj_factor', 1.0)
super().__init__(**kwargs)
@property
def name(self):
return self._name_sep.join([self.affine.arch, self.criterion])
def __repr__(self):
repr_attr_dict = {key: getattr(self, key) for key in ['in_dim', 'hid_drop_rates', 'criterion']}
return self._repr_non_config_attrs(repr_attr_dict)
@property
def in_dim(self):
return self.affine.in_dim
@in_dim.setter
def in_dim(self, dim: int):
self.affine.in_dim = dim
@property
def criterion(self):
if self.sb_epsilon > 0:
return f"SB({self.sb_epsilon:.2f}, {self.sb_size})"
else:
return super().criterion
def instantiate_criterion(self, **kwargs):
if self.criterion.lower().startswith(('sb', 'sl')):
# For boundary/label smoothing, the `Boundaries` object has been accordingly changed;
# hence, do not use `SmoothLabelCrossEntropyLoss`
return SoftLabelCrossEntropyLoss(**kwargs)
else:
return super().instantiate_criterion(**kwargs)
def build_vocab(self, *partitions):
counter = Counter(label for data in partitions for entry in data for label, start, end in entry['chunks'])
self.idx2label = [self.none_label] + list(counter.keys())
self.allow_nested = any(detect_nested(entry['chunks']) for data in partitions for entry in data)
if self.allow_nested:
logger.info("Nested chunks detected, nested chunks are allowed in decoding...")
else:
logger.info("No nested chunks detected, only flat chunks are allowed in decoding...")
self.max_len = max(len(data_entry['tokens']) for data in partitions for data_entry in data)
def instantiate(self):
return BoundarySelectionDecoder(self)
class BoundarySelectionDecoder(DecoderBase, BoundarySelectionDecoderMixin):
def __init__(self, config: BoundarySelectionDecoderConfig):
super().__init__()
self.none_label = config.none_label
self.idx2label = config.idx2label
self.allow_nested = config.allow_nested
if config.use_biaffine:
self.affine_start = config.affine.instantiate()
self.affine_end = config.affine.instantiate()
else:
self.affine = config.affine.instantiate()
if config.size_emb_dim > 0:
self.size_embedding = torch.nn.Embedding(config.max_span_size, config.size_emb_dim)
reinit_embedding_(self.size_embedding)
# Use buffer to accelerate computation
# Note: size_id = size - 1
self.register_buffer('_span_size_ids', torch.arange(config.max_len) - torch.arange(config.max_len).unsqueeze(-1))
# Create `_span_non_mask` before changing values of `_span_size_ids`
self.register_buffer('_span_non_mask', self._span_size_ids >= 0)
self._span_size_ids.masked_fill_(self._span_size_ids < 0, 0)
self._span_size_ids.masked_fill_(self._span_size_ids >= config.max_span_size, config.max_span_size-1)
self.dropout = CombinedDropout(*config.hid_drop_rates)
self.U = torch.nn.Parameter(torch.empty(config.voc_dim, config.affine.out_dim, config.affine.out_dim))
self.W = torch.nn.Parameter(torch.empty(config.voc_dim, config.affine.out_dim*2 + config.size_emb_dim))
self.b = torch.nn.Parameter(torch.empty(config.voc_dim))
torch.nn.init.orthogonal_(self.U.data)
torch.nn.init.orthogonal_(self.W.data)
torch.nn.init.zeros_(self.b.data)
self.criterion = config.instantiate_criterion(reduction='sum')
def _get_span_size_ids(self, seq_len: int):
return self._span_size_ids[:seq_len, :seq_len]
def _get_span_non_mask(self, seq_len: int):
return self._span_non_mask[:seq_len, :seq_len]
def compute_scores(self, batch: Batch, full_hidden: torch.Tensor):
if hasattr(self, 'affine_start'):
affined_start = self.affine_start(full_hidden, batch.mask)
affined_end = self.affine_end(full_hidden, batch.mask)
else:
affined_start = self.affine(full_hidden, batch.mask)
affined_end = self.affine(full_hidden, batch.mask)
# affined_start: (batch, start_step, affine_dim) -> (batch, 1, start_step, affine_dim)
# affined_end: (batch, end_step, affine_dim) -> (batch, 1, affine_dim, end_step)
# scores1: (batch, 1, start_step, affine_dim) * (voc_dim, affine_dim, affine_dim) * (batch, 1, affine_dim, end_step) -> (batch, voc_dim, start_step, end_step)
scores1 = self.dropout(affined_start).unsqueeze(1).matmul(self.U).matmul(self.dropout(affined_end).permute(0, 2, 1).unsqueeze(1))
# affined_cat: (batch, start_step, end_step, affine_dim*2)
affined_cat = torch.cat([self.dropout(affined_start).unsqueeze(2).expand(-1, -1, affined_end.size(1), -1),
self.dropout(affined_end).unsqueeze(1).expand(-1, affined_start.size(1), -1, -1)], dim=-1)
if hasattr(self, 'size_embedding'):
# size_embedded: (start_step, end_step, emb_dim)
size_embedded = self.size_embedding(self._get_span_size_ids(full_hidden.size(1)))
# affined_cat: (batch, start_step, end_step, affine_dim*2 + emb_dim)
affined_cat = torch.cat([affined_cat, self.dropout(size_embedded).unsqueeze(0).expand(full_hidden.size(0), -1, -1, -1)], dim=-1)
# scores2: (voc_dim, affine_dim*2 + emb_dim) * (batch, start_step, end_step, affine_dim*2 + emb_dim, 1) -> (batch, start_step, end_step, voc_dim, 1)
scores2 = self.W.matmul(affined_cat.unsqueeze(-1))
# scores: (batch, start_step, end_step, voc_dim)
return scores1.permute(0, 2, 3, 1) + scores2.squeeze(-1) + self.b
def forward(self, batch: Batch, full_hidden: torch.Tensor):
batch_scores = self.compute_scores(batch, full_hidden)
losses = []
for curr_scores, boundaries_obj, curr_len in zip(batch_scores, batch.boundaries_objs, batch.seq_lens.cpu().tolist()):
curr_non_mask = getattr(boundaries_obj, 'non_mask', self._get_span_non_mask(curr_len))
loss = self.criterion(curr_scores[:curr_len, :curr_len][curr_non_mask], boundaries_obj.boundary2label_id[curr_non_mask])
losses.append(loss)
return torch.stack(losses)
def decode(self, batch: Batch, full_hidden: torch.Tensor):
batch_scores = self.compute_scores(batch, full_hidden)
batch_chunks = []
for curr_scores, curr_len in zip(batch_scores, batch.seq_lens.cpu().tolist()):
curr_non_mask = self._get_span_non_mask(curr_len)
confidences, label_ids = curr_scores[:curr_len, :curr_len][curr_non_mask].softmax(dim=-1).max(dim=-1)
labels = [self.idx2label[i] for i in label_ids.cpu().tolist()]
chunks = [(label, start, end) for label, (start, end) in zip(labels, _spans_from_upper_triangular(curr_len)) if label != self.none_label]
confidences = [conf for label, conf in zip(labels, confidences.cpu().tolist()) if label != self.none_label]
assert len(confidences) == len(chunks)
# Sort chunks from high to low confidences
chunks = [ck for _, ck in sorted(zip(confidences, chunks), reverse=True)]
chunks = filter_clashed_by_priority(chunks, allow_nested=self.allow_nested)
batch_chunks.append(chunks)
return batch_chunks
| 48.737313 | 168 | 0.62412 |
from typing import List, Tuple
from collections import Counter
import logging
import torch
from ...wrapper import TargetWrapper, Batch
from ...utils.chunk import detect_nested, filter_clashed_by_priority
from ...nn.modules import CombinedDropout, SoftLabelCrossEntropyLoss
from ...nn.init import reinit_embedding_, reinit_layer_
from ...metrics import precision_recall_f1_report
from ..encoder import EncoderConfig
from .base import DecoderMixinBase, SingleDecoderConfigBase, DecoderBase
logger = logging.getLogger(__name__)
class BoundarySelectionDecoderMixin(DecoderMixinBase):
@property
def idx2label(self):
return self._idx2label
@idx2label.setter
def idx2label(self, idx2label: List[str]):
self._idx2label = idx2label
self.label2idx = {l: i for i, l in enumerate(idx2label)} if idx2label is not None else None
@property
def voc_dim(self):
return len(self.label2idx)
@property
def none_idx(self):
return self.label2idx[self.none_label]
def exemplify(self, data_entry: dict, training: bool=True):
return {'boundaries_obj': Boundaries(data_entry, self, training=training)}
def batchify(self, batch_examples: List[dict]):
return {'boundaries_objs': [ex['boundaries_obj'] for ex in batch_examples]}
def retrieve(self, batch: Batch):
return [boundaries_obj.chunks for boundaries_obj in batch.boundaries_objs]
def evaluate(self, y_gold: List[List[tuple]], y_pred: List[List[tuple]]):
scores, ave_scores = precision_recall_f1_report(y_gold, y_pred)
return ave_scores['micro']['f1']
def _spans_from_surrounding(span: Tuple[int], distance: int, num_tokens: int):
for k in range(distance):
for start_offset, end_offset in [(-k, -distance+k),
(-distance+k, k),
(k, distance-k),
(distance-k, -k)]:
start, end = span[0]+start_offset, span[1]+end_offset
if 0 <= start < end <= num_tokens:
yield (start, end)
def _spans_from_upper_triangular(seq_len: int):
for start in range(seq_len):
for end in range(start+1, seq_len+1):
yield (start, end)
class Boundaries(TargetWrapper):
def __init__(self, data_entry: dict, config: BoundarySelectionDecoderMixin, training: bool=True):
super().__init__(training)
self.chunks = data_entry.get('chunks', None)
num_tokens = len(data_entry['tokens'])
if training and config.neg_sampling_rate < 1:
non_mask = (torch.arange(num_tokens) - torch.arange(num_tokens).unsqueeze(-1) >= 0)
pos_non_mask = torch.zeros_like(non_mask)
for label, start, end in self.chunks:
pos_non_mask[start, end-1] = True
neg_sampled = torch.empty_like(non_mask).bernoulli(p=config.neg_sampling_rate)
if config.hard_neg_sampling_rate > config.neg_sampling_rate:
hard_neg_non_mask = torch.zeros_like(non_mask)
for label, start, end in self.chunks:
for dist in range(1, config.hard_neg_sampling_size+1):
for sur_start, sur_end in _spans_from_surrounding((start, end), dist, num_tokens):
hard_neg_non_mask[sur_start, sur_end-1] = True
if config.hard_neg_sampling_rate < 1:
comp_sampling_rate = (config.hard_neg_sampling_rate - config.neg_sampling_rate) / (1 - config.neg_sampling_rate)
comp_sampled = torch.empty_like(non_mask).bernoulli(p=comp_sampling_rate)
neg_sampled = neg_sampled | (comp_sampled & hard_neg_non_mask)
else:
neg_sampled = neg_sampled | hard_neg_non_mask
self.non_mask = pos_non_mask | (neg_sampled & non_mask)
if self.chunks is not None:
if config.sb_epsilon <= 0 and config.sl_epsilon <= 0:
self.boundary2label_id = torch.full((num_tokens, num_tokens), config.none_idx, dtype=torch.long)
for label, start, end in self.chunks:
self.boundary2label_id[start, end-1] = config.label2idx[label]
else:
self.boundary2label_id = torch.zeros(num_tokens, num_tokens, config.voc_dim, dtype=torch.float)
for label, start, end in self.chunks:
label_id = config.label2idx[label]
self.boundary2label_id[start, end-1, label_id] += (1 - config.sb_epsilon)
for dist in range(1, config.sb_size+1):
eps_per_span = config.sb_epsilon / (config.sb_size * dist * 4)
sur_spans = list(_spans_from_surrounding((start, end), dist, num_tokens))
for sur_start, sur_end in sur_spans:
self.boundary2label_id[sur_start, sur_end-1, label_id] += (eps_per_span*config.sb_adj_factor)
self.boundary2label_id[start, end-1, label_id] += eps_per_span * (dist * 4 - len(sur_spans))
overflow_indic = (self.boundary2label_id.sum(dim=-1) > 1)
if overflow_indic.any().item():
self.boundary2label_id[overflow_indic] = torch.nn.functional.normalize(self.boundary2label_id[overflow_indic], p=1, dim=-1)
self.boundary2label_id[:, :, config.none_idx] = 1 - self.boundary2label_id.sum(dim=-1)
if config.sl_epsilon > 0:
pos_indic = (torch.arange(config.voc_dim) != config.none_idx)
self.boundary2label_id[:, :, pos_indic] = (self.boundary2label_id[:, :, pos_indic] * (1-config.sl_epsilon) +
self.boundary2label_id[:, :, pos_indic].sum(dim=-1, keepdim=True)*config.sl_epsilon / (config.voc_dim-1))
class BoundarySelectionDecoderConfig(SingleDecoderConfigBase, BoundarySelectionDecoderMixin):
def __init__(self, **kwargs):
self.use_biaffine = kwargs.pop('use_biaffine', True)
self.affine = kwargs.pop('affine', EncoderConfig(arch='FFN', hid_dim=150, num_layers=1, in_drop_rates=(0.4, 0.0, 0.0), hid_drop_rate=0.2))
self.max_len = kwargs.pop('max_len', None)
self.max_span_size = kwargs.pop('max_span_size', 50)
self.size_emb_dim = kwargs.pop('size_emb_dim', 25)
self.hid_drop_rates = kwargs.pop('hid_drop_rates', (0.2, 0.0, 0.0))
self.neg_sampling_rate = kwargs.pop('neg_sampling_rate', 1.0)
self.hard_neg_sampling_rate = kwargs.pop('hard_neg_sampling_rate', 1.0)
self.hard_neg_sampling_rate = max(self.hard_neg_sampling_rate, self.neg_sampling_rate)
self.hard_neg_sampling_size = kwargs.pop('hard_neg_sampling_size', 5)
self.none_label = kwargs.pop('none_label', '<none>')
self.idx2label = kwargs.pop('idx2label', None)
self.allow_nested = kwargs.pop('allow_nested', None)
self.sb_epsilon = kwargs.pop('sb_epsilon', 0.0)
self.sb_size = kwargs.pop('sb_size', 1)
self.sb_adj_factor = kwargs.pop('sb_adj_factor', 1.0)
super().__init__(**kwargs)
@property
def name(self):
return self._name_sep.join([self.affine.arch, self.criterion])
def __repr__(self):
repr_attr_dict = {key: getattr(self, key) for key in ['in_dim', 'hid_drop_rates', 'criterion']}
return self._repr_non_config_attrs(repr_attr_dict)
@property
def in_dim(self):
return self.affine.in_dim
@in_dim.setter
def in_dim(self, dim: int):
self.affine.in_dim = dim
@property
def criterion(self):
if self.sb_epsilon > 0:
return f"SB({self.sb_epsilon:.2f}, {self.sb_size})"
else:
return super().criterion
def instantiate_criterion(self, **kwargs):
if self.criterion.lower().startswith(('sb', 'sl')):
return SoftLabelCrossEntropyLoss(**kwargs)
else:
return super().instantiate_criterion(**kwargs)
def build_vocab(self, *partitions):
counter = Counter(label for data in partitions for entry in data for label, start, end in entry['chunks'])
self.idx2label = [self.none_label] + list(counter.keys())
self.allow_nested = any(detect_nested(entry['chunks']) for data in partitions for entry in data)
if self.allow_nested:
logger.info("Nested chunks detected, nested chunks are allowed in decoding...")
else:
logger.info("No nested chunks detected, only flat chunks are allowed in decoding...")
self.max_len = max(len(data_entry['tokens']) for data in partitions for data_entry in data)
def instantiate(self):
return BoundarySelectionDecoder(self)
class BoundarySelectionDecoder(DecoderBase, BoundarySelectionDecoderMixin):
def __init__(self, config: BoundarySelectionDecoderConfig):
super().__init__()
self.none_label = config.none_label
self.idx2label = config.idx2label
self.allow_nested = config.allow_nested
if config.use_biaffine:
self.affine_start = config.affine.instantiate()
self.affine_end = config.affine.instantiate()
else:
self.affine = config.affine.instantiate()
if config.size_emb_dim > 0:
self.size_embedding = torch.nn.Embedding(config.max_span_size, config.size_emb_dim)
reinit_embedding_(self.size_embedding)
self.register_buffer('_span_size_ids', torch.arange(config.max_len) - torch.arange(config.max_len).unsqueeze(-1))
self.register_buffer('_span_non_mask', self._span_size_ids >= 0)
self._span_size_ids.masked_fill_(self._span_size_ids < 0, 0)
self._span_size_ids.masked_fill_(self._span_size_ids >= config.max_span_size, config.max_span_size-1)
self.dropout = CombinedDropout(*config.hid_drop_rates)
self.U = torch.nn.Parameter(torch.empty(config.voc_dim, config.affine.out_dim, config.affine.out_dim))
self.W = torch.nn.Parameter(torch.empty(config.voc_dim, config.affine.out_dim*2 + config.size_emb_dim))
self.b = torch.nn.Parameter(torch.empty(config.voc_dim))
torch.nn.init.orthogonal_(self.U.data)
torch.nn.init.orthogonal_(self.W.data)
torch.nn.init.zeros_(self.b.data)
self.criterion = config.instantiate_criterion(reduction='sum')
def _get_span_size_ids(self, seq_len: int):
return self._span_size_ids[:seq_len, :seq_len]
def _get_span_non_mask(self, seq_len: int):
return self._span_non_mask[:seq_len, :seq_len]
def compute_scores(self, batch: Batch, full_hidden: torch.Tensor):
if hasattr(self, 'affine_start'):
affined_start = self.affine_start(full_hidden, batch.mask)
affined_end = self.affine_end(full_hidden, batch.mask)
else:
affined_start = self.affine(full_hidden, batch.mask)
affined_end = self.affine(full_hidden, batch.mask)
scores1 = self.dropout(affined_start).unsqueeze(1).matmul(self.U).matmul(self.dropout(affined_end).permute(0, 2, 1).unsqueeze(1))
affined_cat = torch.cat([self.dropout(affined_start).unsqueeze(2).expand(-1, -1, affined_end.size(1), -1),
self.dropout(affined_end).unsqueeze(1).expand(-1, affined_start.size(1), -1, -1)], dim=-1)
if hasattr(self, 'size_embedding'):
size_embedded = self.size_embedding(self._get_span_size_ids(full_hidden.size(1)))
affined_cat = torch.cat([affined_cat, self.dropout(size_embedded).unsqueeze(0).expand(full_hidden.size(0), -1, -1, -1)], dim=-1)
scores2 = self.W.matmul(affined_cat.unsqueeze(-1))
return scores1.permute(0, 2, 3, 1) + scores2.squeeze(-1) + self.b
def forward(self, batch: Batch, full_hidden: torch.Tensor):
batch_scores = self.compute_scores(batch, full_hidden)
losses = []
for curr_scores, boundaries_obj, curr_len in zip(batch_scores, batch.boundaries_objs, batch.seq_lens.cpu().tolist()):
curr_non_mask = getattr(boundaries_obj, 'non_mask', self._get_span_non_mask(curr_len))
loss = self.criterion(curr_scores[:curr_len, :curr_len][curr_non_mask], boundaries_obj.boundary2label_id[curr_non_mask])
losses.append(loss)
return torch.stack(losses)
def decode(self, batch: Batch, full_hidden: torch.Tensor):
batch_scores = self.compute_scores(batch, full_hidden)
batch_chunks = []
for curr_scores, curr_len in zip(batch_scores, batch.seq_lens.cpu().tolist()):
curr_non_mask = self._get_span_non_mask(curr_len)
confidences, label_ids = curr_scores[:curr_len, :curr_len][curr_non_mask].softmax(dim=-1).max(dim=-1)
labels = [self.idx2label[i] for i in label_ids.cpu().tolist()]
chunks = [(label, start, end) for label, (start, end) in zip(labels, _spans_from_upper_triangular(curr_len)) if label != self.none_label]
confidences = [conf for label, conf in zip(labels, confidences.cpu().tolist()) if label != self.none_label]
assert len(confidences) == len(chunks)
chunks = [ck for _, ck in sorted(zip(confidences, chunks), reverse=True)]
chunks = filter_clashed_by_priority(chunks, allow_nested=self.allow_nested)
batch_chunks.append(chunks)
return batch_chunks
| true | true |
f7fca5bc3a3c278f7367d976fa6970e212a18c61 | 627 | py | Python | lib/mplterm/_kitty.py | anntzer/mplterm | 90c7e44f0038632c37ef65c0096caf3b381533b0 | [
"Zlib"
] | null | null | null | lib/mplterm/_kitty.py | anntzer/mplterm | 90c7e44f0038632c37ef65c0096caf3b381533b0 | [
"Zlib"
] | null | null | null | lib/mplterm/_kitty.py | anntzer/mplterm | 90c7e44f0038632c37ef65c0096caf3b381533b0 | [
"Zlib"
] | null | null | null | from io import BytesIO
import subprocess
import PIL
from ._util import Protocol
def _icat(args, **kwargs):
return subprocess.run(["kitty", "+kitten", "icat", *args], **kwargs)
class Kitty(Protocol):
supports_transparency = True
@staticmethod
def get_pixel_size():
cp = _icat(["--print-window-size"], capture_output=True)
w, h = map(int, cp.stdout.split(b"x"))
return w, h
@staticmethod
def display(mem):
h, w, _ = mem.shape
buf = BytesIO()
PIL.Image.frombuffer("RGBA", (w, h), mem).save(buf, format="png")
_icat([], input=buf.getbuffer())
| 22.392857 | 73 | 0.610845 | from io import BytesIO
import subprocess
import PIL
from ._util import Protocol
def _icat(args, **kwargs):
return subprocess.run(["kitty", "+kitten", "icat", *args], **kwargs)
class Kitty(Protocol):
supports_transparency = True
@staticmethod
def get_pixel_size():
cp = _icat(["--print-window-size"], capture_output=True)
w, h = map(int, cp.stdout.split(b"x"))
return w, h
@staticmethod
def display(mem):
h, w, _ = mem.shape
buf = BytesIO()
PIL.Image.frombuffer("RGBA", (w, h), mem).save(buf, format="png")
_icat([], input=buf.getbuffer())
| true | true |
f7fca6bd51d06c513fd58932a58f69cd8491093e | 375 | py | Python | kitsune/search/v2/__init__.py | terrorizer1980/kitsune | 15e0cdc4e941c352b2892010479df9190e9c0035 | [
"BSD-3-Clause"
] | 1 | 2021-07-18T00:41:16.000Z | 2021-07-18T00:41:16.000Z | kitsune/search/v2/__init__.py | hafixo/kitsune | d7756872e16590eea1c6adaeb5bc78f83414d753 | [
"BSD-3-Clause"
] | 13 | 2021-04-13T18:02:21.000Z | 2022-03-12T00:54:51.000Z | kitsune/search/v2/__init__.py | Whoerr/kitsune | 2428573b4920a824c3e712b8a4870f8c1ada8f64 | [
"BSD-3-Clause"
] | null | null | null | import imp
import sys
# Monkey patch PYTHONPATH to temporarily work with multiple
# versions of elasticsearch
sys.path.insert(0, "/vendor")
# Manually import elasticsearch v7.x
f, filename, description = imp.find_module("elasticsearch")
elasticsearch7 = imp.load_module("elasticsearch7", f, filename, description)
default_app_config = "kitsune.search.apps.SearchV2Config"
| 28.846154 | 76 | 0.797333 | import imp
import sys
sys.path.insert(0, "/vendor")
f, filename, description = imp.find_module("elasticsearch")
elasticsearch7 = imp.load_module("elasticsearch7", f, filename, description)
default_app_config = "kitsune.search.apps.SearchV2Config"
| true | true |
f7fca72e52232376356f35434e420a846dc84c45 | 850 | py | Python | backend/utils.py | DenX/pixyship | b8c75d18bfcaf86e1d1c4d7de5b58a51175b9110 | [
"MIT"
] | null | null | null | backend/utils.py | DenX/pixyship | b8c75d18bfcaf86e1d1c4d7de5b58a51175b9110 | [
"MIT"
] | null | null | null | backend/utils.py | DenX/pixyship | b8c75d18bfcaf86e1d1c4d7de5b58a51175b9110 | [
"MIT"
] | null | null | null | import time
from config import CONFIG
def float_range(values, start_key, end_key):
start = 0
if values[start_key]:
start = float(values[start_key])
end = 0
if values[end_key]:
end = float(values[end_key])
return start, end
def int_range(values, start_key, end_key):
start = 0
if values[start_key]:
start = int(values[start_key])
end = 0
if values[end_key]:
end = int(values[end_key])
return start, end
def api_sleep(secs, force_sleep=False):
if not CONFIG['SAVY_PUBLIC_API_TOKEN'] or force_sleep:
time.sleep(secs)
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
| 20.238095 | 81 | 0.635294 | import time
from config import CONFIG
def float_range(values, start_key, end_key):
start = 0
if values[start_key]:
start = float(values[start_key])
end = 0
if values[end_key]:
end = float(values[end_key])
return start, end
def int_range(values, start_key, end_key):
start = 0
if values[start_key]:
start = int(values[start_key])
end = 0
if values[end_key]:
end = int(values[end_key])
return start, end
def api_sleep(secs, force_sleep=False):
if not CONFIG['SAVY_PUBLIC_API_TOKEN'] or force_sleep:
time.sleep(secs)
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
| true | true |
f7fca84031269e2ffea03bfb1841f7237e156990 | 2,551 | py | Python | space_invader_wrappers/wrappers.py | drmeerkat/IQN-and-Extensions | 1cc024c0eedd6cbad62e6db0aae21157847a7e85 | [
"MIT"
] | null | null | null | space_invader_wrappers/wrappers.py | drmeerkat/IQN-and-Extensions | 1cc024c0eedd6cbad62e6db0aae21157847a7e85 | [
"MIT"
] | null | null | null | space_invader_wrappers/wrappers.py | drmeerkat/IQN-and-Extensions | 1cc024c0eedd6cbad62e6db0aae21157847a7e85 | [
"MIT"
] | null | null | null | from abc import abstractmethod
from typing import *
import gym
from toybox import Input
from toybox.envs.atari.base import ToyboxBaseEnv
# thin layer for feature-based toybox environment
# ToyboxBaseEnv is already a gym wrapper as a subclass of gym.atari.AtariEnv
class FeatureVecWrapper(gym.ObservationWrapper):
def __init__(self, tbenv: ToyboxBaseEnv, verbose=0):
super().__init__(tbenv)
self.env = tbenv
# note: self.env is a toybox.env and will also have its own self.env.toybox
self.toybox = tbenv.toybox
self.verbose = verbose
# abstract method for gym.ObservationWrapper
# this can be a good place to return a custom state feature vector
@abstractmethod
def observation(self, observation):
return [1]
def step(self, action: Union[int, Input]):
if type(action) == int:
return self.step_ale(action)
elif type(action) == Input:
return self.step_toybox_actions(action)
else:
return self.step_ale(int(action))
def step_ale(self, action: int):
# this is a little clunky because self.env.step returns the RGB state
_, reward, done, info = self.env.step(action)
# and we could skip right to the feature vec
# step_toybox_actions avoids this extra work
state_vec = self.observation(1)
return state_vec, reward, done, info
def step_toybox_actions(self, action_input: Input):
obs_state_vec = None
reward = None
done = False
info = {}
assert type(action_input) == Input
self.env.toybox.apply_action(action_input)
if self.toybox.game_over():
if self.verbose:
print("GAME OVER")
info["cached_state"] = self.toybox.to_state_json()
obs_state_vec = self.observation(1)
# Compute the reward from the current score and reset the current score.
# this gives the raw reward
# and would require an additional gym.RewardWrapper to use other reward schemes
# e.g. clipped rewards are common for Atari
score = self.toybox.get_score()
reward = max(score - self.env.score, 0)
self.env.score = score
# Check whether the episode is done
done = self.toybox.game_over()
# Send back diagnostic information
info["lives"] = self.toybox.get_lives()
# info['frame'] = frame
info["score"] = 0 if done else self.env.score
return obs_state_vec, reward, done, info
| 34.013333 | 87 | 0.648373 | from abc import abstractmethod
from typing import *
import gym
from toybox import Input
from toybox.envs.atari.base import ToyboxBaseEnv
class FeatureVecWrapper(gym.ObservationWrapper):
def __init__(self, tbenv: ToyboxBaseEnv, verbose=0):
super().__init__(tbenv)
self.env = tbenv
self.toybox = tbenv.toybox
self.verbose = verbose
@abstractmethod
def observation(self, observation):
return [1]
def step(self, action: Union[int, Input]):
if type(action) == int:
return self.step_ale(action)
elif type(action) == Input:
return self.step_toybox_actions(action)
else:
return self.step_ale(int(action))
def step_ale(self, action: int):
_, reward, done, info = self.env.step(action)
state_vec = self.observation(1)
return state_vec, reward, done, info
def step_toybox_actions(self, action_input: Input):
obs_state_vec = None
reward = None
done = False
info = {}
assert type(action_input) == Input
self.env.toybox.apply_action(action_input)
if self.toybox.game_over():
if self.verbose:
print("GAME OVER")
info["cached_state"] = self.toybox.to_state_json()
obs_state_vec = self.observation(1)
score = self.toybox.get_score()
reward = max(score - self.env.score, 0)
self.env.score = score
done = self.toybox.game_over()
info["lives"] = self.toybox.get_lives()
info["score"] = 0 if done else self.env.score
return obs_state_vec, reward, done, info
| true | true |
f7fca84cc760a407ce91c1a0d9c56e11845def28 | 675 | py | Python | configs/tood/tood_r101_fpn_1x_minicoco.py | ruiningTang/mmdetection | 100b0b5e0edddc45af0812b9f1474493c61671ef | [
"Apache-2.0"
] | null | null | null | configs/tood/tood_r101_fpn_1x_minicoco.py | ruiningTang/mmdetection | 100b0b5e0edddc45af0812b9f1474493c61671ef | [
"Apache-2.0"
] | null | null | null | configs/tood/tood_r101_fpn_1x_minicoco.py | ruiningTang/mmdetection | 100b0b5e0edddc45af0812b9f1474493c61671ef | [
"Apache-2.0"
] | null | null | null | _base_ = './tood_r50_fpn_1x_minicoco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
data = dict(samples_per_gpu=2,
workers_per_gpu=2)
# optimizer
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
work_dir = 'work_dirs/coco/tood/tood_r101_fpn_1x_minicoco' | 32.142857 | 80 | 0.657778 | _base_ = './tood_r50_fpn_1x_minicoco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
data = dict(samples_per_gpu=2,
workers_per_gpu=2)
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
work_dir = 'work_dirs/coco/tood/tood_r101_fpn_1x_minicoco' | true | true |
f7fca9036e079f007b760a1788941bd757dab13d | 6,383 | py | Python | nayan/live_server_tests/selenium_test.py | patilnayan92/etonlinetest | 42b57cb6f10e518be99faa47e3f9f57a1a54b413 | [
"Python-2.0"
] | 2 | 2019-03-06T02:17:25.000Z | 2019-10-03T17:43:26.000Z | nayan/live_server_tests/selenium_test.py | patilnayan92/etonlinetest | 42b57cb6f10e518be99faa47e3f9f57a1a54b413 | [
"Python-2.0"
] | null | null | null | nayan/live_server_tests/selenium_test.py | patilnayan92/etonlinetest | 42b57cb6f10e518be99faa47e3f9f57a1a54b413 | [
"Python-2.0"
] | 4 | 2019-02-01T16:10:40.000Z | 2020-08-30T02:44:39.000Z | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import WebDriverException
import multiprocessing
import argparse
class ElementDisplay(object):
'''Custom expected condition '''
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
element = EC._find_element(driver, self.locator)
return element.value_of_css_property("display") == "none"
except Exception as e:
return False
class SeleniumTestError(Exception):
pass
class SeleniumTest():
def __init__(self, url, quiz_name):
self.driver = webdriver.Firefox()
self.quiz_name = quiz_name
self.url = url
def run_load_test(self, url, username, password):
try:
self.driver.delete_all_cookies()
self.driver.get(self.url)
self.login(username, password)
self.open_quiz()
self.quit_quiz()
self.close_quiz()
self.logout()
self.driver.close()
except Exception as e:
self.driver.close()
msg = ("An Error occurred while running the Selenium load"
" test on Nayan!\n"
"Error:\n{0}".format(e))
raise SeleniumTestError(msg)
def login(self, username, password):
# get the username, password and submit form elements
username_elem = self.driver.find_element_by_id("id_username")
password_elem = self.driver.find_element_by_id("id_password")
submit_login_elem = self.driver.find_element_by_css_selector('button.btn')
# Type in the username, password and submit form
username_elem.send_keys(username)
password_elem.send_keys(password)
submit_login_elem.click()
def submit_answer(self, question_label, answer, loop_count=1):
self.driver.implicitly_wait(2)
for count in range(loop_count):
self.driver.find_element_by_link_text(question_label).click()
submit_answer_elem = self.driver.find_element_by_id("check")
self.driver.execute_script('global_editor.editor.setValue({});'.format(answer))
submit_answer_elem.click()
WebDriverWait(self.driver, 90).until(ElementDisplay(
(By.XPATH, "//*[@id='ontop']")))
def test_c_question(self, question_label):
# Incorrect Answer
loop_count = 10
answer = '\"int add(int a, int b, int c)\\n{return;}\"'
self.submit_answer(question_label, answer, loop_count)
# Infinite Loop
loop_count = 3
answer = '\"int add(int a, int b, int c)\\n{while(1){}}\"'
self.submit_answer(question_label, answer, loop_count)
# Correct Answer
loop_count = 1
answer = '\"int add(int a, int b, int c)\\n{return a + b + c;}\"'
self.submit_answer(question_label, answer, loop_count)
def test_python_question(self, question_label):
# Incorrect Answer
loop_count = 10
answer = '\"def is_palindrome(s):\\n return s\"'
self.submit_answer(question_label, answer, loop_count)
# Infinite Loop
loop_count = 3
answer = '\"while True:\\n pass"'
self.submit_answer(question_label, answer, loop_count)
# Correct Answer
loop_count = 1
answer = '\"def is_palindrome(s):\\n return s[::-1] == s\"'
self.submit_answer(question_label, answer, loop_count)
def test_bash_question(self, question_label):
# Incorrect Answer
loop_count = 10
answer = '\"#!/bin/bash\\nls\"'
self.submit_answer(question_label, answer, loop_count)
# Infinite Loop
loop_count = 3
answer = '\"#!/bin/bash\\nwhile [ 1 ]; do : ; done\"'
self.submit_answer(question_label, answer, loop_count)
# Correct Answer
loop_count = 1
answer = '\"#!/bin/bash\\ncat $1 | cut -d: -f2 | paste -d: $3 - $2\"'
self.submit_answer(question_label, answer, loop_count)
def open_quiz(self):
# open quiz link
quiz_link_elem = self.driver.find_element_by_link_text(self.quiz_name).click()
# Get page elements
start_exam_elem = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.NAME, "start"))
)
start_exam_elem.click()
self.test_c_question(question_label=7)
self.test_python_question(question_label=5)
self.test_bash_question(question_label=4)
def quit_quiz(self):
quit_link_elem = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.NAME, "quit"))
)
quit_link_elem.click()
quit_link_elem = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.NAME, "yes"))
)
quit_link_elem.click()
def close_quiz(self):
quit_link_elem = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.ID, "home"))
)
quit_link_elem.click()
def logout(self):
logout_link_elem = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.ID, "logout"))
)
logout_link_elem.click()
def user_gen(url, ids):
return [(url, 'User%d'%x, 'User%d'%x) for x in ids]
def wrap_run_load_test(args):
url = "http://yaksh.fossee.aero.iitb.ac.in/exam/"
selenium_test = SeleniumTest(url=url, quiz_name=quiz_name)
return selenium_test.run_load_test(*args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('url', type=str, help="url of the website being tested")
parser.add_argument('start', type=int, help="Starting user id")
parser.add_argument("-n", "--number", type=int, default=10, help="number of users")
opts = parser.parse_args()
quiz_name = "Demo quiz"
selenium_test = SeleniumTest(url=opts.url, quiz_name=quiz_name)
pool = multiprocessing.Pool(opts.number)
pool.map(wrap_run_load_test, user_gen(opts.url, range(opts.start, opts.start + opts.number)))
pool.close()
pool.join()
| 35.659218 | 97 | 0.639668 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import WebDriverException
import multiprocessing
import argparse
class ElementDisplay(object):
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
element = EC._find_element(driver, self.locator)
return element.value_of_css_property("display") == "none"
except Exception as e:
return False
class SeleniumTestError(Exception):
pass
class SeleniumTest():
def __init__(self, url, quiz_name):
self.driver = webdriver.Firefox()
self.quiz_name = quiz_name
self.url = url
def run_load_test(self, url, username, password):
try:
self.driver.delete_all_cookies()
self.driver.get(self.url)
self.login(username, password)
self.open_quiz()
self.quit_quiz()
self.close_quiz()
self.logout()
self.driver.close()
except Exception as e:
self.driver.close()
msg = ("An Error occurred while running the Selenium load"
" test on Nayan!\n"
"Error:\n{0}".format(e))
raise SeleniumTestError(msg)
def login(self, username, password):
username_elem = self.driver.find_element_by_id("id_username")
password_elem = self.driver.find_element_by_id("id_password")
submit_login_elem = self.driver.find_element_by_css_selector('button.btn')
username_elem.send_keys(username)
password_elem.send_keys(password)
submit_login_elem.click()
def submit_answer(self, question_label, answer, loop_count=1):
self.driver.implicitly_wait(2)
for count in range(loop_count):
self.driver.find_element_by_link_text(question_label).click()
submit_answer_elem = self.driver.find_element_by_id("check")
self.driver.execute_script('global_editor.editor.setValue({});'.format(answer))
submit_answer_elem.click()
WebDriverWait(self.driver, 90).until(ElementDisplay(
(By.XPATH, "//*[@id='ontop']")))
def test_c_question(self, question_label):
loop_count = 10
answer = '\"int add(int a, int b, int c)\\n{return;}\"'
self.submit_answer(question_label, answer, loop_count)
loop_count = 3
answer = '\"int add(int a, int b, int c)\\n{while(1){}}\"'
self.submit_answer(question_label, answer, loop_count)
loop_count = 1
answer = '\"int add(int a, int b, int c)\\n{return a + b + c;}\"'
self.submit_answer(question_label, answer, loop_count)
def test_python_question(self, question_label):
loop_count = 10
answer = '\"def is_palindrome(s):\\n return s\"'
self.submit_answer(question_label, answer, loop_count)
loop_count = 3
answer = '\"while True:\\n pass"'
self.submit_answer(question_label, answer, loop_count)
loop_count = 1
answer = '\"def is_palindrome(s):\\n return s[::-1] == s\"'
self.submit_answer(question_label, answer, loop_count)
def test_bash_question(self, question_label):
loop_count = 10
answer = '\"#!/bin/bash\\nls\"'
self.submit_answer(question_label, answer, loop_count)
loop_count = 3
answer = '\"#!/bin/bash\\nwhile [ 1 ]; do : ; done\"'
self.submit_answer(question_label, answer, loop_count)
loop_count = 1
answer = '\"#!/bin/bash\\ncat $1 | cut -d: -f2 | paste -d: $3 - $2\"'
self.submit_answer(question_label, answer, loop_count)
def open_quiz(self):
quiz_link_elem = self.driver.find_element_by_link_text(self.quiz_name).click()
start_exam_elem = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.NAME, "start"))
)
start_exam_elem.click()
self.test_c_question(question_label=7)
self.test_python_question(question_label=5)
self.test_bash_question(question_label=4)
def quit_quiz(self):
quit_link_elem = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.NAME, "quit"))
)
quit_link_elem.click()
quit_link_elem = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.NAME, "yes"))
)
quit_link_elem.click()
def close_quiz(self):
quit_link_elem = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.ID, "home"))
)
quit_link_elem.click()
def logout(self):
logout_link_elem = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.ID, "logout"))
)
logout_link_elem.click()
def user_gen(url, ids):
return [(url, 'User%d'%x, 'User%d'%x) for x in ids]
def wrap_run_load_test(args):
url = "http://yaksh.fossee.aero.iitb.ac.in/exam/"
selenium_test = SeleniumTest(url=url, quiz_name=quiz_name)
return selenium_test.run_load_test(*args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('url', type=str, help="url of the website being tested")
parser.add_argument('start', type=int, help="Starting user id")
parser.add_argument("-n", "--number", type=int, default=10, help="number of users")
opts = parser.parse_args()
quiz_name = "Demo quiz"
selenium_test = SeleniumTest(url=opts.url, quiz_name=quiz_name)
pool = multiprocessing.Pool(opts.number)
pool.map(wrap_run_load_test, user_gen(opts.url, range(opts.start, opts.start + opts.number)))
pool.close()
pool.join()
| true | true |
f7fca937ba6f1800091c1da17d99217c3e5e48f9 | 1,919 | py | Python | solvers/cave.py | kevinychen/nikoli-puzzle-solver | be288566e9b864d7acd0c063a6d2bc1fc067a32c | [
"MIT"
] | 4 | 2022-01-03T04:21:08.000Z | 2022-02-25T05:51:59.000Z | solvers/cave.py | kevinychen/nikoli-puzzle-solver | be288566e9b864d7acd0c063a6d2bc1fc067a32c | [
"MIT"
] | null | null | null | solvers/cave.py | kevinychen/nikoli-puzzle-solver | be288566e9b864d7acd0c063a6d2bc1fc067a32c | [
"MIT"
] | 1 | 2022-01-12T00:03:48.000Z | 2022-01-12T00:03:48.000Z | from solvers.utils import *
class CaveSolver(AbstractSolver):
def __init__(self, pzprv3):
matched = match('pzprv3/cave/(\\d+)/(\\d+)/(.*)/', pzprv3)
self.height = int(matched.group(1))
self.width = int(matched.group(2))
self.grid = parse_table(matched.group(3))[:self.height]
def to_pzprv3(self, solved_grid):
symbol_set = self.symbol_set()
result = [[symbol_set.symbols[solved_grid[Point(row, col)]].label
for col in range(self.width)] for row in range(self.height)]
return f'pzprv3/cave/{self.height}/{self.width}/{table(self.grid)}/{table(result)}/'
def lattice(self):
return RectangularLattice(
[Point(row, col) for row in range(-1, self.height + 1) for col in range(-1, self.width + 1)])
def symbol_set(self):
return SymbolSet([("WHITE", "+"), ("BLACK", "#")])
def configure(self, sg):
symbol_set = self.symbol_set()
rc = RegionConstrainer(sg.lattice, sg.solver)
for p in sg.lattice.points:
if p.x == -1 or p.x == self.width or p.y == -1 or p.y == self.height:
sg.solver.add(sg.cell_is(p, symbol_set.BLACK))
else:
num = self.grid[p.y][p.x]
if num.isnumeric():
all_is_visible = [sg.cell_is(p, symbol_set.WHITE)]
for direction in sg.lattice.edge_sharing_directions():
line = sight_line(sg, p, direction)
for i in range(1, len(line)):
all_is_visible.append(And([sg.cell_is(q, symbol_set.WHITE) for q in line[:i+1]]))
sg.solver.add(PbEq([(is_visible, 1) for is_visible in all_is_visible], int(num)))
continuous_region(sg, rc, lambda q: sg.cell_is(q, symbol_set.WHITE))
continuous_region(sg, rc, lambda q: sg.cell_is(q, symbol_set.BLACK))
| 43.613636 | 109 | 0.574779 | from solvers.utils import *
class CaveSolver(AbstractSolver):
def __init__(self, pzprv3):
matched = match('pzprv3/cave/(\\d+)/(\\d+)/(.*)/', pzprv3)
self.height = int(matched.group(1))
self.width = int(matched.group(2))
self.grid = parse_table(matched.group(3))[:self.height]
def to_pzprv3(self, solved_grid):
symbol_set = self.symbol_set()
result = [[symbol_set.symbols[solved_grid[Point(row, col)]].label
for col in range(self.width)] for row in range(self.height)]
return f'pzprv3/cave/{self.height}/{self.width}/{table(self.grid)}/{table(result)}/'
def lattice(self):
return RectangularLattice(
[Point(row, col) for row in range(-1, self.height + 1) for col in range(-1, self.width + 1)])
def symbol_set(self):
return SymbolSet([("WHITE", "+"), ("BLACK", "#")])
def configure(self, sg):
symbol_set = self.symbol_set()
rc = RegionConstrainer(sg.lattice, sg.solver)
for p in sg.lattice.points:
if p.x == -1 or p.x == self.width or p.y == -1 or p.y == self.height:
sg.solver.add(sg.cell_is(p, symbol_set.BLACK))
else:
num = self.grid[p.y][p.x]
if num.isnumeric():
all_is_visible = [sg.cell_is(p, symbol_set.WHITE)]
for direction in sg.lattice.edge_sharing_directions():
line = sight_line(sg, p, direction)
for i in range(1, len(line)):
all_is_visible.append(And([sg.cell_is(q, symbol_set.WHITE) for q in line[:i+1]]))
sg.solver.add(PbEq([(is_visible, 1) for is_visible in all_is_visible], int(num)))
continuous_region(sg, rc, lambda q: sg.cell_is(q, symbol_set.WHITE))
continuous_region(sg, rc, lambda q: sg.cell_is(q, symbol_set.BLACK))
| true | true |
f7fca9d0b259e65f68ab0e51ff56191260c3c4e1 | 16,928 | py | Python | erroranalysis/erroranalysis/_internal/surrogate_error_tree.py | alexquach/responsible-ai-widgets | 6208f72a5dc14b955f0e8b7b2203d0cd74f32497 | [
"MIT"
] | null | null | null | erroranalysis/erroranalysis/_internal/surrogate_error_tree.py | alexquach/responsible-ai-widgets | 6208f72a5dc14b955f0e8b7b2203d0cd74f32497 | [
"MIT"
] | null | null | null | erroranalysis/erroranalysis/_internal/surrogate_error_tree.py | alexquach/responsible-ai-widgets | 6208f72a5dc14b955f0e8b7b2203d0cd74f32497 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
import numpy as np
import pandas as pd
from lightgbm import LGBMClassifier, LGBMRegressor
from enum import Enum
from erroranalysis._internal.cohort_filter import filter_from_cohort
from erroranalysis._internal.constants import (PRED_Y,
TRUE_Y,
ROW_INDEX,
DIFF,
SPLIT_INDEX,
SPLIT_FEATURE,
LEAF_INDEX,
METHOD,
METHOD_EXCLUDES,
METHOD_INCLUDES,
ModelTask,
Metrics,
metric_to_display_name,
error_metrics)
from sklearn.metrics import (
mean_absolute_error, mean_squared_error, median_absolute_error,
r2_score, f1_score, precision_score, recall_score)
MODEL = 'model'
DEFAULT_MAX_DEPTH = 3
DEFAULT_NUM_LEAVES = 31
class TreeSide(str, Enum):
"""Provide model task constants.
Can be 'classification', 'regression', or 'unknown'.
By default the model domain is inferred if 'unknown',
but this can be overridden if you specify
'classification' or 'regression'.
"""
RIGHT_CHILD = 'right_child'
LEFT_CHILD = 'left_child'
UNKNOWN = 'unknown'
def compute_json_error_tree(analyzer,
features,
filters,
composite_filters,
max_depth=DEFAULT_MAX_DEPTH,
num_leaves=DEFAULT_NUM_LEAVES):
# Note: this is for backcompat for older versions
# of raiwidgets pypi package
return compute_error_tree(analyzer,
features,
filters,
composite_filters,
max_depth,
num_leaves)
def compute_error_tree(analyzer,
features,
filters,
composite_filters,
max_depth=DEFAULT_MAX_DEPTH,
num_leaves=DEFAULT_NUM_LEAVES):
# Fit a surrogate model on errors
if max_depth is None:
max_depth = DEFAULT_MAX_DEPTH
if num_leaves is None:
num_leaves = DEFAULT_NUM_LEAVES
is_model_analyzer = hasattr(analyzer, MODEL)
if is_model_analyzer:
filtered_df = filter_from_cohort(analyzer.dataset,
filters,
composite_filters,
analyzer.feature_names,
analyzer.true_y,
analyzer.categorical_features,
analyzer.categories)
else:
filtered_df = filter_from_cohort(analyzer.dataset,
filters,
composite_filters,
analyzer.feature_names,
analyzer.true_y,
analyzer.categorical_features,
analyzer.categories,
analyzer.pred_y)
row_index = filtered_df[ROW_INDEX]
true_y = filtered_df[TRUE_Y]
dropped_cols = [TRUE_Y, ROW_INDEX]
if not is_model_analyzer:
pred_y = filtered_df[PRED_Y]
dropped_cols.append(PRED_Y)
input_data = filtered_df.drop(columns=dropped_cols)
is_pandas = isinstance(analyzer.dataset, pd.DataFrame)
if is_pandas:
true_y = true_y.to_numpy()
else:
input_data = input_data.to_numpy()
if is_model_analyzer:
pred_y = analyzer.model.predict(input_data)
if analyzer.model_task == ModelTask.CLASSIFICATION:
diff = pred_y != true_y
else:
diff = pred_y - true_y
if not isinstance(diff, np.ndarray):
diff = np.array(diff)
if not isinstance(pred_y, np.ndarray):
pred_y = np.array(pred_y)
if not isinstance(true_y, np.ndarray):
true_y = np.array(true_y)
indexes = []
for feature in features:
indexes.append(analyzer.feature_names.index(feature))
if is_pandas:
input_data = input_data.to_numpy()
if analyzer.categorical_features:
# Inplace replacement of columns
for idx, c_i in enumerate(analyzer.categorical_indexes):
input_data[:, c_i] = analyzer.string_indexed_data[row_index, idx]
dataset_sub_features = input_data[:, indexes]
dataset_sub_names = np.array(analyzer.feature_names)[np.array(indexes)]
dataset_sub_names = list(dataset_sub_names)
categorical_info = get_categorical_info(analyzer,
dataset_sub_names)
cat_ind_reindexed, categories_reindexed = categorical_info
surrogate = create_surrogate_model(analyzer,
dataset_sub_features,
diff,
max_depth,
num_leaves,
cat_ind_reindexed)
filtered_indexed_df = pd.DataFrame(dataset_sub_features,
columns=dataset_sub_names)
filtered_indexed_df[DIFF] = diff
filtered_indexed_df[TRUE_Y] = true_y
filtered_indexed_df[PRED_Y] = pred_y
dumped_model = surrogate._Booster.dump_model()
tree_structure = dumped_model["tree_info"][0]['tree_structure']
max_split_index = get_max_split_index(tree_structure) + 1
tree = traverse(filtered_indexed_df,
tree_structure,
max_split_index,
(categories_reindexed,
cat_ind_reindexed),
[],
dataset_sub_names,
metric=analyzer.metric)
return tree
def create_surrogate_model(analyzer,
dataset_sub_features,
diff,
max_depth,
num_leaves,
cat_ind_reindexed):
"""Creates and fits the surrogate lightgbm model.
:param analyzer: The error analyzer containing the categorical
features and categories for the full dataset.
:type analyzer: BaseAnalyzer
:param dataset_sub_features: The subset of features to train the
surrogate model on.
:type dataset_sub_features: numpy.array or pandas.DataFrame
:param diff: The difference between the true and predicted labels column.
:type diff: numpy.array
:param max_depth: The maximum depth of the surrogate tree trained
on errors.
:type max_depth: int
:param num_leaves: The number of leaves of the surrogate tree
trained on errors.
:type num_leaves: int
:param cat_ind_reindexed: The list of categorical feature indexes.
:type cat_ind_reindexed: list[int]
:return: The trained surrogate model.
:rtype: LGBMClassifier or LGBMRegressor
"""
if analyzer.model_task == ModelTask.CLASSIFICATION:
surrogate = LGBMClassifier(n_estimators=1,
max_depth=max_depth,
num_leaves=num_leaves)
else:
surrogate = LGBMRegressor(n_estimators=1,
max_depth=max_depth,
num_leaves=num_leaves)
if cat_ind_reindexed:
surrogate.fit(dataset_sub_features, diff,
categorical_feature=cat_ind_reindexed)
else:
surrogate.fit(dataset_sub_features, diff)
return surrogate
def get_categorical_info(analyzer, dataset_sub_names):
"""Returns the categorical information for the given feature names.
:param analyzer: The error analyzer containing the categorical
features and categories for the full dataset.
:type analyzer: BaseAnalyzer
:param dataset_sub_names: The subset of feature names to get the
categorical indexes and names for.
:type dataset_sub_names: list[str]
:return: The categorical indexes and categories for the subset
of features specified.
:rtype: tuple[list]
"""
cat_ind_reindexed = []
categories_reindexed = []
if analyzer.categorical_features:
for c_index, feature in enumerate(analyzer.categorical_features):
try:
index_sub = dataset_sub_names.index(feature)
except ValueError:
continue
cat_ind_reindexed.append(index_sub)
categories_reindexed.append(analyzer.categories[c_index])
return (cat_ind_reindexed, categories_reindexed)
def get_max_split_index(tree):
if SPLIT_INDEX in tree:
max_index = tree[SPLIT_INDEX]
index1 = get_max_split_index(tree[TreeSide.LEFT_CHILD])
index2 = get_max_split_index(tree[TreeSide.RIGHT_CHILD])
return max(max(max_index, index1), index2)
else:
return 0
def traverse(df,
tree,
max_split_index,
categories,
dict,
feature_names,
parent=None,
side=TreeSide.UNKNOWN,
metric=None):
if SPLIT_INDEX in tree:
nodeid = tree[SPLIT_INDEX]
elif LEAF_INDEX in tree:
nodeid = max_split_index + tree[LEAF_INDEX]
else:
nodeid = 0
# write current node to a dictionary that can be saved as json
dict, df = node_to_dict(df, tree, nodeid, categories, dict,
feature_names, metric, parent, side)
# write children to a dictionary that can be saved as json
if 'leaf_value' not in tree:
left_child = tree[TreeSide.LEFT_CHILD]
right_child = tree[TreeSide.RIGHT_CHILD]
dict = traverse(df, left_child, max_split_index,
categories, dict, feature_names,
tree, TreeSide.LEFT_CHILD, metric)
dict = traverse(df, right_child, max_split_index,
categories, dict, feature_names,
tree, TreeSide.RIGHT_CHILD, metric)
return dict
def create_categorical_arg(parent_threshold):
return [float(i) for i in parent_threshold.split('||')]
def create_categorical_query(method, arg, p_node_name, parent, categories):
if method == METHOD_INCLUDES:
operation = "=="
else:
operation = "!="
categorical_values = categories[0]
categorical_indexes = categories[1]
thresholds = []
catcoli = categorical_indexes.index(parent[SPLIT_FEATURE])
catvals = categorical_values[catcoli]
for argi in arg:
encoded_val = catvals[int(argi)]
if not isinstance(encoded_val, str):
encoded_val = str(encoded_val)
thresholds.append(encoded_val)
threshold_str = " | ".join(thresholds)
condition = "{} {} {}".format(p_node_name, operation, threshold_str)
query = []
for argi in arg:
query.append("`" + p_node_name + "` " + operation + " " + str(argi))
if method == METHOD_INCLUDES:
query = " | ".join(query)
else:
query = " & ".join(query)
return query, condition
def node_to_dict(df, tree, nodeid, categories, json,
feature_names, metric, parent=None,
side=TreeSide.UNKNOWN):
p_node_name = None
condition = None
arg = None
method = None
parentid = None
if parent is not None:
parentid = int(parent[SPLIT_INDEX])
p_node_name = feature_names[parent[SPLIT_FEATURE]]
parent_threshold = parent['threshold']
parent_decision_type = parent['decision_type']
if side == TreeSide.LEFT_CHILD:
if parent_decision_type == '<=':
method = "less and equal"
arg = float(parent_threshold)
condition = "{} <= {:.2f}".format(p_node_name,
parent_threshold)
query = "`" + p_node_name + "` <= " + str(parent_threshold)
df = df.query(query)
elif parent_decision_type == '==':
method = METHOD_INCLUDES
arg = create_categorical_arg(parent_threshold)
query, condition = create_categorical_query(method,
arg,
p_node_name,
parent,
categories)
df = df.query(query)
elif side == TreeSide.RIGHT_CHILD:
if parent_decision_type == '<=':
method = "greater"
arg = float(parent_threshold)
condition = "{} > {:.2f}".format(p_node_name,
parent_threshold)
query = "`" + p_node_name + "` > " + str(parent_threshold)
df = df.query(query)
elif parent_decision_type == '==':
method = METHOD_EXCLUDES
arg = create_categorical_arg(parent_threshold)
query, condition = create_categorical_query(method,
arg,
p_node_name,
parent,
categories)
df = df.query(query)
success = 0
total = df.shape[0]
if df.shape[0] == 0 and metric != Metrics.ERROR_RATE:
metric_value = 0
error = 0
success = 0
elif metric == Metrics.MEAN_ABSOLUTE_ERROR:
pred_y, true_y, error = get_regression_metric_data(df)
metric_value = mean_absolute_error(pred_y, true_y)
elif metric == Metrics.MEAN_SQUARED_ERROR:
pred_y, true_y, error = get_regression_metric_data(df)
metric_value = mean_squared_error(pred_y, true_y)
elif metric == Metrics.MEDIAN_ABSOLUTE_ERROR:
pred_y, true_y, error = get_regression_metric_data(df)
metric_value = median_absolute_error(pred_y, true_y)
elif metric == Metrics.R2_SCORE:
pred_y, true_y, error = get_regression_metric_data(df)
metric_value = r2_score(pred_y, true_y)
elif metric == Metrics.F1_SCORE:
pred_y, true_y, error = get_classification_metric_data(df)
metric_value = f1_score(pred_y, true_y)
success = total - error
elif metric == Metrics.PRECISION_SCORE:
pred_y, true_y, error = get_classification_metric_data(df)
metric_value = precision_score(pred_y, true_y)
success = total - error
elif metric == Metrics.RECALL_SCORE:
pred_y, true_y, error = get_classification_metric_data(df)
metric_value = recall_score(pred_y, true_y)
success = total - error
else:
error = df[DIFF].values.sum()
if total == 0:
metric_value = 0
else:
metric_value = error / total
success = total - error
metric_name = metric_to_display_name[metric]
is_error_metric = metric in error_metrics
if SPLIT_FEATURE in tree:
node_name = feature_names[tree[SPLIT_FEATURE]]
else:
node_name = None
json.append({
"arg": arg,
"badFeaturesRowCount": 0, # Note: remove this eventually
"condition": condition,
"error": float(error),
"id": int(nodeid),
METHOD: method,
"nodeIndex": int(nodeid),
"nodeName": node_name,
"parentId": parentid,
"parentNodeName": p_node_name,
"pathFromRoot": "", # Note: remove this eventually
"size": float(total),
"sourceRowKeyHash": "hashkey", # Note: remove this eventually
"success": float(success), # Note: remove this eventually
"metricName": metric_name,
"metricValue": float(metric_value),
"isErrorMetric": is_error_metric
})
return json, df
def get_regression_metric_data(df):
pred_y = df[PRED_Y]
true_y = df[TRUE_Y]
# total abs error at the node
error = sum(abs(pred_y - true_y))
return pred_y, true_y, error
def get_classification_metric_data(df):
pred_y = df[PRED_Y]
true_y = df[TRUE_Y]
error = df[DIFF].values.sum()
return pred_y, true_y, error
| 39.644028 | 77 | 0.562618 |
import numpy as np
import pandas as pd
from lightgbm import LGBMClassifier, LGBMRegressor
from enum import Enum
from erroranalysis._internal.cohort_filter import filter_from_cohort
from erroranalysis._internal.constants import (PRED_Y,
TRUE_Y,
ROW_INDEX,
DIFF,
SPLIT_INDEX,
SPLIT_FEATURE,
LEAF_INDEX,
METHOD,
METHOD_EXCLUDES,
METHOD_INCLUDES,
ModelTask,
Metrics,
metric_to_display_name,
error_metrics)
from sklearn.metrics import (
mean_absolute_error, mean_squared_error, median_absolute_error,
r2_score, f1_score, precision_score, recall_score)
MODEL = 'model'
DEFAULT_MAX_DEPTH = 3
DEFAULT_NUM_LEAVES = 31
class TreeSide(str, Enum):
RIGHT_CHILD = 'right_child'
LEFT_CHILD = 'left_child'
UNKNOWN = 'unknown'
def compute_json_error_tree(analyzer,
features,
filters,
composite_filters,
max_depth=DEFAULT_MAX_DEPTH,
num_leaves=DEFAULT_NUM_LEAVES):
return compute_error_tree(analyzer,
features,
filters,
composite_filters,
max_depth,
num_leaves)
def compute_error_tree(analyzer,
features,
filters,
composite_filters,
max_depth=DEFAULT_MAX_DEPTH,
num_leaves=DEFAULT_NUM_LEAVES):
if max_depth is None:
max_depth = DEFAULT_MAX_DEPTH
if num_leaves is None:
num_leaves = DEFAULT_NUM_LEAVES
is_model_analyzer = hasattr(analyzer, MODEL)
if is_model_analyzer:
filtered_df = filter_from_cohort(analyzer.dataset,
filters,
composite_filters,
analyzer.feature_names,
analyzer.true_y,
analyzer.categorical_features,
analyzer.categories)
else:
filtered_df = filter_from_cohort(analyzer.dataset,
filters,
composite_filters,
analyzer.feature_names,
analyzer.true_y,
analyzer.categorical_features,
analyzer.categories,
analyzer.pred_y)
row_index = filtered_df[ROW_INDEX]
true_y = filtered_df[TRUE_Y]
dropped_cols = [TRUE_Y, ROW_INDEX]
if not is_model_analyzer:
pred_y = filtered_df[PRED_Y]
dropped_cols.append(PRED_Y)
input_data = filtered_df.drop(columns=dropped_cols)
is_pandas = isinstance(analyzer.dataset, pd.DataFrame)
if is_pandas:
true_y = true_y.to_numpy()
else:
input_data = input_data.to_numpy()
if is_model_analyzer:
pred_y = analyzer.model.predict(input_data)
if analyzer.model_task == ModelTask.CLASSIFICATION:
diff = pred_y != true_y
else:
diff = pred_y - true_y
if not isinstance(diff, np.ndarray):
diff = np.array(diff)
if not isinstance(pred_y, np.ndarray):
pred_y = np.array(pred_y)
if not isinstance(true_y, np.ndarray):
true_y = np.array(true_y)
indexes = []
for feature in features:
indexes.append(analyzer.feature_names.index(feature))
if is_pandas:
input_data = input_data.to_numpy()
if analyzer.categorical_features:
for idx, c_i in enumerate(analyzer.categorical_indexes):
input_data[:, c_i] = analyzer.string_indexed_data[row_index, idx]
dataset_sub_features = input_data[:, indexes]
dataset_sub_names = np.array(analyzer.feature_names)[np.array(indexes)]
dataset_sub_names = list(dataset_sub_names)
categorical_info = get_categorical_info(analyzer,
dataset_sub_names)
cat_ind_reindexed, categories_reindexed = categorical_info
surrogate = create_surrogate_model(analyzer,
dataset_sub_features,
diff,
max_depth,
num_leaves,
cat_ind_reindexed)
filtered_indexed_df = pd.DataFrame(dataset_sub_features,
columns=dataset_sub_names)
filtered_indexed_df[DIFF] = diff
filtered_indexed_df[TRUE_Y] = true_y
filtered_indexed_df[PRED_Y] = pred_y
dumped_model = surrogate._Booster.dump_model()
tree_structure = dumped_model["tree_info"][0]['tree_structure']
max_split_index = get_max_split_index(tree_structure) + 1
tree = traverse(filtered_indexed_df,
tree_structure,
max_split_index,
(categories_reindexed,
cat_ind_reindexed),
[],
dataset_sub_names,
metric=analyzer.metric)
return tree
def create_surrogate_model(analyzer,
dataset_sub_features,
diff,
max_depth,
num_leaves,
cat_ind_reindexed):
if analyzer.model_task == ModelTask.CLASSIFICATION:
surrogate = LGBMClassifier(n_estimators=1,
max_depth=max_depth,
num_leaves=num_leaves)
else:
surrogate = LGBMRegressor(n_estimators=1,
max_depth=max_depth,
num_leaves=num_leaves)
if cat_ind_reindexed:
surrogate.fit(dataset_sub_features, diff,
categorical_feature=cat_ind_reindexed)
else:
surrogate.fit(dataset_sub_features, diff)
return surrogate
def get_categorical_info(analyzer, dataset_sub_names):
cat_ind_reindexed = []
categories_reindexed = []
if analyzer.categorical_features:
for c_index, feature in enumerate(analyzer.categorical_features):
try:
index_sub = dataset_sub_names.index(feature)
except ValueError:
continue
cat_ind_reindexed.append(index_sub)
categories_reindexed.append(analyzer.categories[c_index])
return (cat_ind_reindexed, categories_reindexed)
def get_max_split_index(tree):
if SPLIT_INDEX in tree:
max_index = tree[SPLIT_INDEX]
index1 = get_max_split_index(tree[TreeSide.LEFT_CHILD])
index2 = get_max_split_index(tree[TreeSide.RIGHT_CHILD])
return max(max(max_index, index1), index2)
else:
return 0
def traverse(df,
tree,
max_split_index,
categories,
dict,
feature_names,
parent=None,
side=TreeSide.UNKNOWN,
metric=None):
if SPLIT_INDEX in tree:
nodeid = tree[SPLIT_INDEX]
elif LEAF_INDEX in tree:
nodeid = max_split_index + tree[LEAF_INDEX]
else:
nodeid = 0
dict, df = node_to_dict(df, tree, nodeid, categories, dict,
feature_names, metric, parent, side)
if 'leaf_value' not in tree:
left_child = tree[TreeSide.LEFT_CHILD]
right_child = tree[TreeSide.RIGHT_CHILD]
dict = traverse(df, left_child, max_split_index,
categories, dict, feature_names,
tree, TreeSide.LEFT_CHILD, metric)
dict = traverse(df, right_child, max_split_index,
categories, dict, feature_names,
tree, TreeSide.RIGHT_CHILD, metric)
return dict
def create_categorical_arg(parent_threshold):
return [float(i) for i in parent_threshold.split('||')]
def create_categorical_query(method, arg, p_node_name, parent, categories):
if method == METHOD_INCLUDES:
operation = "=="
else:
operation = "!="
categorical_values = categories[0]
categorical_indexes = categories[1]
thresholds = []
catcoli = categorical_indexes.index(parent[SPLIT_FEATURE])
catvals = categorical_values[catcoli]
for argi in arg:
encoded_val = catvals[int(argi)]
if not isinstance(encoded_val, str):
encoded_val = str(encoded_val)
thresholds.append(encoded_val)
threshold_str = " | ".join(thresholds)
condition = "{} {} {}".format(p_node_name, operation, threshold_str)
query = []
for argi in arg:
query.append("`" + p_node_name + "` " + operation + " " + str(argi))
if method == METHOD_INCLUDES:
query = " | ".join(query)
else:
query = " & ".join(query)
return query, condition
def node_to_dict(df, tree, nodeid, categories, json,
feature_names, metric, parent=None,
side=TreeSide.UNKNOWN):
p_node_name = None
condition = None
arg = None
method = None
parentid = None
if parent is not None:
parentid = int(parent[SPLIT_INDEX])
p_node_name = feature_names[parent[SPLIT_FEATURE]]
parent_threshold = parent['threshold']
parent_decision_type = parent['decision_type']
if side == TreeSide.LEFT_CHILD:
if parent_decision_type == '<=':
method = "less and equal"
arg = float(parent_threshold)
condition = "{} <= {:.2f}".format(p_node_name,
parent_threshold)
query = "`" + p_node_name + "` <= " + str(parent_threshold)
df = df.query(query)
elif parent_decision_type == '==':
method = METHOD_INCLUDES
arg = create_categorical_arg(parent_threshold)
query, condition = create_categorical_query(method,
arg,
p_node_name,
parent,
categories)
df = df.query(query)
elif side == TreeSide.RIGHT_CHILD:
if parent_decision_type == '<=':
method = "greater"
arg = float(parent_threshold)
condition = "{} > {:.2f}".format(p_node_name,
parent_threshold)
query = "`" + p_node_name + "` > " + str(parent_threshold)
df = df.query(query)
elif parent_decision_type == '==':
method = METHOD_EXCLUDES
arg = create_categorical_arg(parent_threshold)
query, condition = create_categorical_query(method,
arg,
p_node_name,
parent,
categories)
df = df.query(query)
success = 0
total = df.shape[0]
if df.shape[0] == 0 and metric != Metrics.ERROR_RATE:
metric_value = 0
error = 0
success = 0
elif metric == Metrics.MEAN_ABSOLUTE_ERROR:
pred_y, true_y, error = get_regression_metric_data(df)
metric_value = mean_absolute_error(pred_y, true_y)
elif metric == Metrics.MEAN_SQUARED_ERROR:
pred_y, true_y, error = get_regression_metric_data(df)
metric_value = mean_squared_error(pred_y, true_y)
elif metric == Metrics.MEDIAN_ABSOLUTE_ERROR:
pred_y, true_y, error = get_regression_metric_data(df)
metric_value = median_absolute_error(pred_y, true_y)
elif metric == Metrics.R2_SCORE:
pred_y, true_y, error = get_regression_metric_data(df)
metric_value = r2_score(pred_y, true_y)
elif metric == Metrics.F1_SCORE:
pred_y, true_y, error = get_classification_metric_data(df)
metric_value = f1_score(pred_y, true_y)
success = total - error
elif metric == Metrics.PRECISION_SCORE:
pred_y, true_y, error = get_classification_metric_data(df)
metric_value = precision_score(pred_y, true_y)
success = total - error
elif metric == Metrics.RECALL_SCORE:
pred_y, true_y, error = get_classification_metric_data(df)
metric_value = recall_score(pred_y, true_y)
success = total - error
else:
error = df[DIFF].values.sum()
if total == 0:
metric_value = 0
else:
metric_value = error / total
success = total - error
metric_name = metric_to_display_name[metric]
is_error_metric = metric in error_metrics
if SPLIT_FEATURE in tree:
node_name = feature_names[tree[SPLIT_FEATURE]]
else:
node_name = None
json.append({
"arg": arg,
"badFeaturesRowCount": 0,
"condition": condition,
"error": float(error),
"id": int(nodeid),
METHOD: method,
"nodeIndex": int(nodeid),
"nodeName": node_name,
"parentId": parentid,
"parentNodeName": p_node_name,
"pathFromRoot": "",
"size": float(total),
"sourceRowKeyHash": "hashkey",
"success": float(success),
"metricName": metric_name,
"metricValue": float(metric_value),
"isErrorMetric": is_error_metric
})
return json, df
def get_regression_metric_data(df):
pred_y = df[PRED_Y]
true_y = df[TRUE_Y]
error = sum(abs(pred_y - true_y))
return pred_y, true_y, error
def get_classification_metric_data(df):
pred_y = df[PRED_Y]
true_y = df[TRUE_Y]
error = df[DIFF].values.sum()
return pred_y, true_y, error
| true | true |
f7fca9fe73979efb9e1581a8605974f57eca8614 | 1,239 | py | Python | upcfcardsearch/c316.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | upcfcardsearch/c316.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | upcfcardsearch/c316.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from discord.utils import get
class c316(commands.Cog, name="c316"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Alexander_the_Impenetrable', aliases=['c316'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Alexander the Impenetrable',
color=0x00008B)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2367727.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)
embed.add_field(name='Type (Attribute)', value='Machine/Link/Effect (DARK)', inline=False)
embed.add_field(name='Link Rating (ATK/Link Arrows)', value='3 (3200/↙️⬇️➡️)', inline=False)
embed.add_field(name='Monster Effect', value='2+ Effect Monsters\nCannot attack. This effect cannot be negated. Once per turn, when a card or effect is activated that targets a monster(s) you control (Quick Effect): You can discard 1 card; negate the activation.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c316(bot)) | 51.625 | 286 | 0.69088 | import discord
from discord.ext import commands
from discord.utils import get
class c316(commands.Cog, name="c316"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Alexander_the_Impenetrable', aliases=['c316'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Alexander the Impenetrable',
color=0x00008B)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2367727.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)
embed.add_field(name='Type (Attribute)', value='Machine/Link/Effect (DARK)', inline=False)
embed.add_field(name='Link Rating (ATK/Link Arrows)', value='3 (3200/↙️⬇️➡️)', inline=False)
embed.add_field(name='Monster Effect', value='2+ Effect Monsters\nCannot attack. This effect cannot be negated. Once per turn, when a card or effect is activated that targets a monster(s) you control (Quick Effect): You can discard 1 card; negate the activation.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c316(bot)) | true | true |
f7fcab09e39bcb24eda8adad618411cc290c9410 | 4,086 | py | Python | Codes/MLP/MLP.py | eipm/CIN | 1cde66166f40a1921eaec4d65bea5d2da201ca8b | [
"MIT"
] | 2 | 2021-06-21T14:00:09.000Z | 2021-12-07T16:01:49.000Z | Codes/MLP/MLP.py | eipm/CIN | 1cde66166f40a1921eaec4d65bea5d2da201ca8b | [
"MIT"
] | null | null | null | Codes/MLP/MLP.py | eipm/CIN | 1cde66166f40a1921eaec4d65bea5d2da201ca8b | [
"MIT"
] | 5 | 2021-04-01T17:23:21.000Z | 2022-03-22T16:24:40.000Z | # Train top MLP and evaluate model performance
# %%
import numpy as np
import pandas as pd
import random as rand
import skimage
from skimage import io,feature, filters,color
from skimage.exposure import rescale_intensity
import re
import os
import shutil
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential, Model,load_model
from tensorflow.keras import applications,optimizers
from tensorflow.keras.layers import Conv2D, MaxPooling2D,MaxPool1D,GlobalAveragePooling2D,GlobalMaxPool1D,GlobalMaxPooling1D,BatchNormalization,Activation, Dropout, Flatten, Dense,LeakyReLU,TimeDistributed,GlobalAveragePooling1D,Concatenate,Reshape,Lambda
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score, cross_val_predict, GridSearchCV
from sklearn.model_selection import train_test_split
# %%
#---input:
#features: patient level features
#WSI_df: summarise file
def feature_split(WSI_df,features,ID,Label,testratio,seed):
X_train,X_test,y_train,y_test,ID_train,ID_test=train_test_split(features,list(WSI_df[Label]),list(WSI_df[ID]),test_size=testratio,random_state=seed)
np.save('ID_train.npy',ID_train)
np.save('ID_test.npy',ID_test)
np.save('y_train.npy',y_train)
np.save('y_test.npy',y_test)
#scaling
scaler = preprocessing.MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
y_train=np.array([int(y) for y in y_train])
y_test=np.array([int(y) for y in y_test])
return(X_train,X_test,y_train,y_test,ID_train,ID_test)
# %%
#train top MLP
#---WSI_df: summarise file, a dataframe
#must have columns indicating: label, patient ID
#---Label, ID:
#column names in WSI_df. example: ID='Barcode',Label='Cin_Label'
#---features:
#patient level feature array.
#---testratio: percentage of test dataset, default is 0.15
#---seed, default is 1001
#---Model_Name:
#name to store the model, example: 'Model.h5'
#---Learning parameters:
#layer_num,nodes_num_1,nodes_num_2,dropout,lr
#you can set up to 2 hidden layers. Too many hidden layers is not likely to have good performance by experiments.
#use layer_num to set number of hidden layers
#use nodes_num_1 and nodes_num_2 to set nodes number of two hidden layers. Only set nodes_num_2 when layer_num=2
#you can set up dropout and learning rate. default setting: dropout=0,lr=0.00001
def MLP_train(WSI_df=WSI_df,features=features,ID='Barcode',Label='Cin_Label',testratio=0.15,seed=1001,
Model_Name='Model.h5',layer_num=1,nodes_num_1=1024,nodes_num_2=512,dropout=0,lr=0.00001):
#split
X_train,X_test,y_train,y_test,ID_train,ID_test=feature_split(WSI_df=WSI_df,features=features,
ID=ID,Label=Label,testratio=testratio,seed=seed)
#build MLP
if layer_num==1:
MLP = Sequential()
MLP.add(Dense(nodes_num_1,input_shape=(1024,),kernel_initializer=tf.keras.initializers.he_normal(),activation='relu'))
MLP.add(Dropout(dropout))
MLP.add(Dense(1,activation='sigmoid'))
if layer_num==2:
MLP = Sequential()
MLP.add(Dense(nodes_num_1,input_shape=(1024,),kernel_initializer=tf.keras.initializers.he_normal(),activation='relu'))
MLP.add(Dropout(dropout))
MLP.add(Dense(nodes_num_2,kernel_initializer=tf.keras.initializers.he_normal(),activation='relu'))
MLP.add(Dense(1,activation='sigmoid'))
#compile
MLP.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
metrics=[tf.keras.metrics.BinaryAccuracy(),tf.keras.metrics.AUC()])
#train
es=EarlyStopping(monitor='val_loss',mode='min',patience=200)
mc = ModelCheckpoint(Model_Name, monitor='val_loss', mode='min', verbose=1,save_best_only=True)
history=MLP.fit(
X_train,y_train,
validation_split=0.15,
epochs=2500,
callbacks=[es,mc])
| 43.935484 | 255 | 0.758933 |
import numpy as np
import pandas as pd
import random as rand
import skimage
from skimage import io,feature, filters,color
from skimage.exposure import rescale_intensity
import re
import os
import shutil
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential, Model,load_model
from tensorflow.keras import applications,optimizers
from tensorflow.keras.layers import Conv2D, MaxPooling2D,MaxPool1D,GlobalAveragePooling2D,GlobalMaxPool1D,GlobalMaxPooling1D,BatchNormalization,Activation, Dropout, Flatten, Dense,LeakyReLU,TimeDistributed,GlobalAveragePooling1D,Concatenate,Reshape,Lambda
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score, cross_val_predict, GridSearchCV
from sklearn.model_selection import train_test_split
def feature_split(WSI_df,features,ID,Label,testratio,seed):
X_train,X_test,y_train,y_test,ID_train,ID_test=train_test_split(features,list(WSI_df[Label]),list(WSI_df[ID]),test_size=testratio,random_state=seed)
np.save('ID_train.npy',ID_train)
np.save('ID_test.npy',ID_test)
np.save('y_train.npy',y_train)
np.save('y_test.npy',y_test)
scaler = preprocessing.MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
y_train=np.array([int(y) for y in y_train])
y_test=np.array([int(y) for y in y_test])
return(X_train,X_test,y_train,y_test,ID_train,ID_test)
def MLP_train(WSI_df=WSI_df,features=features,ID='Barcode',Label='Cin_Label',testratio=0.15,seed=1001,
Model_Name='Model.h5',layer_num=1,nodes_num_1=1024,nodes_num_2=512,dropout=0,lr=0.00001):
X_train,X_test,y_train,y_test,ID_train,ID_test=feature_split(WSI_df=WSI_df,features=features,
ID=ID,Label=Label,testratio=testratio,seed=seed)
if layer_num==1:
MLP = Sequential()
MLP.add(Dense(nodes_num_1,input_shape=(1024,),kernel_initializer=tf.keras.initializers.he_normal(),activation='relu'))
MLP.add(Dropout(dropout))
MLP.add(Dense(1,activation='sigmoid'))
if layer_num==2:
MLP = Sequential()
MLP.add(Dense(nodes_num_1,input_shape=(1024,),kernel_initializer=tf.keras.initializers.he_normal(),activation='relu'))
MLP.add(Dropout(dropout))
MLP.add(Dense(nodes_num_2,kernel_initializer=tf.keras.initializers.he_normal(),activation='relu'))
MLP.add(Dense(1,activation='sigmoid'))
MLP.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
metrics=[tf.keras.metrics.BinaryAccuracy(),tf.keras.metrics.AUC()])
es=EarlyStopping(monitor='val_loss',mode='min',patience=200)
mc = ModelCheckpoint(Model_Name, monitor='val_loss', mode='min', verbose=1,save_best_only=True)
history=MLP.fit(
X_train,y_train,
validation_split=0.15,
epochs=2500,
callbacks=[es,mc])
| true | true |
f7fcabc340e182c359d57a9e8b73911c0d0f3642 | 1,636 | py | Python | usaspending_api/awards/migrations/0058_auto_20170206_2032.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | 1 | 2021-06-17T05:09:00.000Z | 2021-06-17T05:09:00.000Z | usaspending_api/awards/migrations/0058_auto_20170206_2032.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | null | null | null | usaspending_api/awards/migrations/0058_auto_20170206_2032.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-02-06 20:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awards', '0057_merge_20170124_1927'),
]
operations = [
migrations.AddField(
model_name='procurement',
name='type_of_contract_pricing_description',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='Type of Contract Pricing Description'),
),
migrations.AlterField(
model_name='financialassistanceaward',
name='period_of_performance_current_end_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='financialassistanceaward',
name='period_of_performance_start_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='procurement',
name='type_of_contract_pricing',
field=models.CharField(blank=True, choices=[('A', 'Fixed Price Redetermination'), ('B', 'Fixed Price Level of Effort'), ('J', 'Firm Fixed Price'), ('K', 'Fixed Price with Economic Price Adjustment'), ('L', 'Fixed Price Incentive'), ('M', 'Fixed Price Award Fee'), ('R', 'Cost Plus Award Fee'), ('S', 'Cost No Fee'), ('T', 'Cost Sharing'), ('U', 'Cost Plus Fixed Fee'), ('V', 'Cost Plus Incentive Fee'), ('Y', 'Time and Materials'), ('Z', 'Labor Hours')], max_length=2, null=True, verbose_name='Type of Contract Pricing'),
),
]
| 45.444444 | 533 | 0.633863 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awards', '0057_merge_20170124_1927'),
]
operations = [
migrations.AddField(
model_name='procurement',
name='type_of_contract_pricing_description',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='Type of Contract Pricing Description'),
),
migrations.AlterField(
model_name='financialassistanceaward',
name='period_of_performance_current_end_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='financialassistanceaward',
name='period_of_performance_start_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='procurement',
name='type_of_contract_pricing',
field=models.CharField(blank=True, choices=[('A', 'Fixed Price Redetermination'), ('B', 'Fixed Price Level of Effort'), ('J', 'Firm Fixed Price'), ('K', 'Fixed Price with Economic Price Adjustment'), ('L', 'Fixed Price Incentive'), ('M', 'Fixed Price Award Fee'), ('R', 'Cost Plus Award Fee'), ('S', 'Cost No Fee'), ('T', 'Cost Sharing'), ('U', 'Cost Plus Fixed Fee'), ('V', 'Cost Plus Incentive Fee'), ('Y', 'Time and Materials'), ('Z', 'Labor Hours')], max_length=2, null=True, verbose_name='Type of Contract Pricing'),
),
]
| true | true |
f7fcabe87946145c30b1ce205314f58bbb5385ea | 7,289 | py | Python | Zabbix_scripts/Json2sender.py | Runamook/PyCharmProjects | 1b1a063345e052451f00e3fdea82e31bdd2a0cae | [
"MIT"
] | null | null | null | Zabbix_scripts/Json2sender.py | Runamook/PyCharmProjects | 1b1a063345e052451f00e3fdea82e31bdd2a0cae | [
"MIT"
] | null | null | null | Zabbix_scripts/Json2sender.py | Runamook/PyCharmProjects | 1b1a063345e052451f00e3fdea82e31bdd2a0cae | [
"MIT"
] | null | null | null | from pyzabbix import ZabbixMetric, ZabbixSender
from datetime import datetime as dt
import requests
import argparse
try:
from Zabbix_scripts.Helpers.create_logger import create_logger
except ImportError:
from Helpers.create_logger import create_logger
try:
from Zabbix_scripts.Helpers.helpers import make_dates, Numeric_Metrics, get_metric_time, find_meter_voltage
except ImportError:
from Helpers.helpers import make_dates, Numeric_Metrics, get_metric_time, find_meter_voltage
# TODO: Empty response from API
# TODO: Multiple meters
# TODO: "StromNeutralleiter": null - removed from Numeric_Metrics
class Meters:
meter_voltage_item = "MeterNormalVoltage" # Meter reference voltage item name
def __init__(self, logfile, loglevel, api_user, api_pass, meter_id, date, zabbix_server_ip):
self.logger = create_logger(logfile, __name__, loglevel)
self.logger.warning("Initiated app")
self.logger.info("Input: \n\tlogfile = %s\n\tloglevel = %s\n\tapi_user = %s\
\n\tapi_pass = %s\n\tmeter_id = %s\n\tdate = %s\n\tzabbix_server_ip = %s" % (logfile, loglevel, api_user,
api_pass, meter_id, date, zabbix_server_ip)
)
self.zabbix_server_ip = zabbix_server_ip
if date is None:
date = make_dates()
self.logger.info("Date not provided, using %s" % date)
try:
dt.strptime(date, "%d.%m.%Y")
except ValueError:
self.logger.error("Incorrect date %s, use \"dd.mm.yyyy\"" % date)
exit(1)
self.api_url = "https://webhelper.acteno.de:4443/home/GetGridQualityData?\
User=%s\
&pass=%s\
&id=%s&\
datum=%s" % (api_user, api_pass, meter_id, date)
self.meter_id = meter_id
def get_json(self):
self.logger.info("Querying URL %s" % self.api_url)
response = requests.get(self.api_url)
assert response.status_code == 200, self.logger.error("API responded %s, expected 200" % response.status_code)
assert response.text != "Authenticarion Error.", self.logger.error("API authenticarion error") # Typo in text
self.logger.info("Response code %s in %s ms" % (response.status_code,
str(response.elapsed.microseconds / 1000))
)
self.logger.debug("Headers: %s" % response.headers)
self.logger.debug("Text: %s" % response.text)
return response.json()
def process_metrics_per_host(self):
# ZabbixMetric('Zabbix server', 'WirkleistungP3[04690915]', 3, clock=1554851400))
json = self.get_json()
self.logger.info("Meter %s - %s measurements" % (self.meter_id, len(json)))
results = []
metric_host = "Meter %s" % self.meter_id
for measurement in json: # measurement is a JSON element in a list returned by API
metric_time = get_metric_time(measurement)
for metric_key in Numeric_Metrics:
metric_value = measurement[metric_key]
self.logger.debug("Metric %s %s %s %s" % (metric_host, metric_key, metric_value, metric_time))
results.append(ZabbixMetric(metric_host, metric_key, metric_value, clock=metric_time))
metric_value = self.find_meter_normal_voltage(measurement)
results.append(ZabbixMetric(metric_host, Meters.meter_voltage_item, metric_value, clock=metric_time))
self.logger.info("Meter %s - %s metrics for insertion" % (self.meter_id, len(results)))
return results
def process_metrics(self):
json = self.get_json()
self.logger.info("Found %s measurements" % len(json))
results = []
for measurement in json:
IdentificationNumber = measurement["IdentificationNumber"]
metric_time = get_metric_time(measurement)
for metric in Numeric_Metrics:
metric_value = measurement[metric]
metric_key = metric + "[" + IdentificationNumber + "]"
self.logger.debug("Metric %s %s %s" % (metric_key, metric_value, metric_time))
results.append(ZabbixMetric("Zabbix server", metric_key, metric_value, clock=metric_time))
metric_value = self.find_meter_normal_voltage(measurement)
results.append(ZabbixMetric("Zabbix server", "MeterNormalVoltage", metric_value, clock=metric_time))
self.logger.info("Prepared %s metrics for insertion" % len(results))
return results
def find_meter_normal_voltage(self, measurement):
voltage_list = [measurement["SpannungP1"], measurement["SpannungP2"], measurement["SpannungP3"]]
normal_voltage = find_meter_voltage(voltage_list)
self.logger.debug("Meter %s, normal voltage %s" % (measurement["IdentificationNumber"], normal_voltage))
return normal_voltage
def send_metrics(self, metrics):
sender = ZabbixSender(self.zabbix_server_ip)
zabbix_response = sender.send(metrics)
if zabbix_response.failed > 0 and zabbix_response.processed == 0:
self.logger.error("Something went totally wrong, terminating\n%s" % zabbix_response)
exit(1)
elif zabbix_response.failed > 0 and zabbix_response.failed > zabbix_response.processed:
self.logger.warning("More failures that successes %s" % zabbix_response)
else:
self.logger.warning("Result %s" % zabbix_response)
return
def run(self):
self.logger.warning("Meter %s started" % self.meter_id)
metrics = self.process_metrics_per_host()
self.send_metrics(metrics)
if __name__ == "__main__":
optparser = argparse.ArgumentParser(description="Get JSON data from API and push it to Zabbix server")
requiredNamed = optparser.add_argument_group("Mandatory arguments")
requiredNamed.add_argument("-u", "--user", type=str, help="API user", required=True)
requiredNamed.add_argument("-p", "--password", type=str, help="API password", required=True)
requiredNamed.add_argument("-m", "--meter_id", type=str, help="Meter id", required=True)
optparser.add_argument("-l", "--log_file", type=str, help="Log filename. Default Json2sender.log")
optparser.add_argument("--log_level", help="Default: INFO")
optparser.add_argument("--date", help="Date as \"dd.mm.yyyy\"")
optparser.add_argument("-z", "--zabbix_server", help="Server IP address")
args = optparser.parse_args()
api_user = args.user
api_pass = args.password
meter_id = args.meter_id
if not args.log_file:
log_file = "Json2sender.log"
else:
log_file = args.log_file
if not args.log_level:
log_level = "INFO"
else:
log_level = args.log_level
if not args.date:
date = None
else:
date = args.date
if not args.zabbix_server:
zabbix_server = "127.0.0.1"
else:
zabbix_server = args.zabbix_server
app = Meters(logfile=log_file, loglevel=log_level,
api_user=api_user, meter_id=meter_id, api_pass=api_pass,
date=date, zabbix_server_ip=zabbix_server
)
app.run()
| 43.386905 | 119 | 0.651667 | from pyzabbix import ZabbixMetric, ZabbixSender
from datetime import datetime as dt
import requests
import argparse
try:
from Zabbix_scripts.Helpers.create_logger import create_logger
except ImportError:
from Helpers.create_logger import create_logger
try:
from Zabbix_scripts.Helpers.helpers import make_dates, Numeric_Metrics, get_metric_time, find_meter_voltage
except ImportError:
from Helpers.helpers import make_dates, Numeric_Metrics, get_metric_time, find_meter_voltage
class Meters:
meter_voltage_item = "MeterNormalVoltage"
def __init__(self, logfile, loglevel, api_user, api_pass, meter_id, date, zabbix_server_ip):
self.logger = create_logger(logfile, __name__, loglevel)
self.logger.warning("Initiated app")
self.logger.info("Input: \n\tlogfile = %s\n\tloglevel = %s\n\tapi_user = %s\
\n\tapi_pass = %s\n\tmeter_id = %s\n\tdate = %s\n\tzabbix_server_ip = %s" % (logfile, loglevel, api_user,
api_pass, meter_id, date, zabbix_server_ip)
)
self.zabbix_server_ip = zabbix_server_ip
if date is None:
date = make_dates()
self.logger.info("Date not provided, using %s" % date)
try:
dt.strptime(date, "%d.%m.%Y")
except ValueError:
self.logger.error("Incorrect date %s, use \"dd.mm.yyyy\"" % date)
exit(1)
self.api_url = "https://webhelper.acteno.de:4443/home/GetGridQualityData?\
User=%s\
&pass=%s\
&id=%s&\
datum=%s" % (api_user, api_pass, meter_id, date)
self.meter_id = meter_id
def get_json(self):
self.logger.info("Querying URL %s" % self.api_url)
response = requests.get(self.api_url)
assert response.status_code == 200, self.logger.error("API responded %s, expected 200" % response.status_code)
assert response.text != "Authenticarion Error.", self.logger.error("API authenticarion error")
self.logger.info("Response code %s in %s ms" % (response.status_code,
str(response.elapsed.microseconds / 1000))
)
self.logger.debug("Headers: %s" % response.headers)
self.logger.debug("Text: %s" % response.text)
return response.json()
def process_metrics_per_host(self):
json = self.get_json()
self.logger.info("Meter %s - %s measurements" % (self.meter_id, len(json)))
results = []
metric_host = "Meter %s" % self.meter_id
for measurement in json:
metric_time = get_metric_time(measurement)
for metric_key in Numeric_Metrics:
metric_value = measurement[metric_key]
self.logger.debug("Metric %s %s %s %s" % (metric_host, metric_key, metric_value, metric_time))
results.append(ZabbixMetric(metric_host, metric_key, metric_value, clock=metric_time))
metric_value = self.find_meter_normal_voltage(measurement)
results.append(ZabbixMetric(metric_host, Meters.meter_voltage_item, metric_value, clock=metric_time))
self.logger.info("Meter %s - %s metrics for insertion" % (self.meter_id, len(results)))
return results
def process_metrics(self):
json = self.get_json()
self.logger.info("Found %s measurements" % len(json))
results = []
for measurement in json:
IdentificationNumber = measurement["IdentificationNumber"]
metric_time = get_metric_time(measurement)
for metric in Numeric_Metrics:
metric_value = measurement[metric]
metric_key = metric + "[" + IdentificationNumber + "]"
self.logger.debug("Metric %s %s %s" % (metric_key, metric_value, metric_time))
results.append(ZabbixMetric("Zabbix server", metric_key, metric_value, clock=metric_time))
metric_value = self.find_meter_normal_voltage(measurement)
results.append(ZabbixMetric("Zabbix server", "MeterNormalVoltage", metric_value, clock=metric_time))
self.logger.info("Prepared %s metrics for insertion" % len(results))
return results
def find_meter_normal_voltage(self, measurement):
voltage_list = [measurement["SpannungP1"], measurement["SpannungP2"], measurement["SpannungP3"]]
normal_voltage = find_meter_voltage(voltage_list)
self.logger.debug("Meter %s, normal voltage %s" % (measurement["IdentificationNumber"], normal_voltage))
return normal_voltage
def send_metrics(self, metrics):
sender = ZabbixSender(self.zabbix_server_ip)
zabbix_response = sender.send(metrics)
if zabbix_response.failed > 0 and zabbix_response.processed == 0:
self.logger.error("Something went totally wrong, terminating\n%s" % zabbix_response)
exit(1)
elif zabbix_response.failed > 0 and zabbix_response.failed > zabbix_response.processed:
self.logger.warning("More failures that successes %s" % zabbix_response)
else:
self.logger.warning("Result %s" % zabbix_response)
return
def run(self):
self.logger.warning("Meter %s started" % self.meter_id)
metrics = self.process_metrics_per_host()
self.send_metrics(metrics)
if __name__ == "__main__":
optparser = argparse.ArgumentParser(description="Get JSON data from API and push it to Zabbix server")
requiredNamed = optparser.add_argument_group("Mandatory arguments")
requiredNamed.add_argument("-u", "--user", type=str, help="API user", required=True)
requiredNamed.add_argument("-p", "--password", type=str, help="API password", required=True)
requiredNamed.add_argument("-m", "--meter_id", type=str, help="Meter id", required=True)
optparser.add_argument("-l", "--log_file", type=str, help="Log filename. Default Json2sender.log")
optparser.add_argument("--log_level", help="Default: INFO")
optparser.add_argument("--date", help="Date as \"dd.mm.yyyy\"")
optparser.add_argument("-z", "--zabbix_server", help="Server IP address")
args = optparser.parse_args()
api_user = args.user
api_pass = args.password
meter_id = args.meter_id
if not args.log_file:
log_file = "Json2sender.log"
else:
log_file = args.log_file
if not args.log_level:
log_level = "INFO"
else:
log_level = args.log_level
if not args.date:
date = None
else:
date = args.date
if not args.zabbix_server:
zabbix_server = "127.0.0.1"
else:
zabbix_server = args.zabbix_server
app = Meters(logfile=log_file, loglevel=log_level,
api_user=api_user, meter_id=meter_id, api_pass=api_pass,
date=date, zabbix_server_ip=zabbix_server
)
app.run()
| true | true |
f7fcac3d4f8cb441c154ffc03903561bbbca6406 | 108,407 | py | Python | Lib/test/pickletester.py | kristjanvalur/stackless | 4b817a8b19dd0d02153700641877e7a021807a0c | [
"PSF-2.0"
] | null | null | null | Lib/test/pickletester.py | kristjanvalur/stackless | 4b817a8b19dd0d02153700641877e7a021807a0c | [
"PSF-2.0"
] | null | null | null | Lib/test/pickletester.py | kristjanvalur/stackless | 4b817a8b19dd0d02153700641877e7a021807a0c | [
"PSF-2.0"
] | null | null | null | import collections
import copyreg
import dbm
import io
import functools
import pickle
import pickletools
import struct
import sys
import unittest
import weakref
from http.cookies import SimpleCookie
from test import support
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest, stackless,
)
from pickle import bytes_types
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
class H(object):
pass
# Hashable mutable key
class K(object):
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
__main__.H = H
H.__module__ = "__main__"
__main__.K = K
K.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
if stackless:
DATA_XRANGE_SLP = b'\x80\x02cstackless._wrap\nrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02)b.'
else:
DATA_XRANGE_SLP = DATA_XRANGE
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests(unittest.TestCase):
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE_SLP)
if not stackless:
# Stackless-Python provides a fake range for unpickling
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(KeyError, b'g0\np0')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_tuple_and_list(self):
t = ([],)
t[0].append(t)
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], list)
self.assertEqual(len(x[0]), 1)
self.assertIs(x[0][0], x)
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_key(self):
d = {}
k = K(d)
d[k] = 1
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(len(x.keys()), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_set(self):
y = set()
k = K(y)
y.add(k)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
def test_recursive_list_subclass(self):
y = MyList()
y.append(y)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, MyList)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_dict_subclass(self):
d = MyDict()
d[1] = d
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_subclass_key(self):
d = MyDict()
k = K(d)
d[k] = 1
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(len(list(x.keys())), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, C)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertTrue(x[0].attr[1] is x)
def check_recursive_collection_and_inst(self, factory):
h = H()
y = factory([h])
h.attr = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, type(y))
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], H)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self.check_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self.check_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self.check_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self.check_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self.check_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyFrozenSet)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "stat"):
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8', 'BINUNICODE8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermitted with small objects
# (dict keys)
obj = {i: bytes([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
if stackless:
self.skipTest("Stackless can pickle functions by value")
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
class BigmemPickleTests(unittest.TestCase):
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
import os
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_closed_file(self):
import os
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 4)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if f.seekable():
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if f.seekable():
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
pass
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
| 35.601642 | 96 | 0.528757 | import collections
import copyreg
import dbm
import io
import functools
import pickle
import pickletools
import struct
import sys
import unittest
import weakref
from http.cookies import SimpleCookie
from test import support
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest, stackless,
)
from pickle import bytes_types
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
class ExtensionSaver:
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
class H(object):
pass
class K(object):
def __init__(self, value):
self.value = value
def __reduce__(self):
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
__main__.H = H
H.__module__ = "__main__"
__main__.K = K
K.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
if stackless:
DATA_XRANGE_SLP = b'\x80\x02cstackless._wrap\nrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02)b.'
else:
DATA_XRANGE_SLP = DATA_XRANGE
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests(unittest.TestCase):
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE_SLP)
if not stackless:
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(KeyError, b'g0\np0')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_stack(self):
badpickles = [
b'.',
b'0',
b'1',
b'2',
b'(2',
b'R',
b')R',
b'a',
b'Na',
b'b',
b'Nb',
b'd',
b'e',
b'(e',
b'ibuiltins\nlist\n',
b'l',
b'o',
b'(o',
b'p1\n',
b'q\x00',
b'r\x00\x00\x00\x00',
b's',
b'Ns',
b'NNs',
b't',
b'u',
b'(u',
b'}(Nu',
b'\x81',
b')\x81',
b'\x85',
b'\x86',
b'N\x86',
b'\x87',
b'N\x87',
b'NN\x87',
b'\x90',
b'(\x90',
b'\x91',
b'\x92',
b')}\x92',
b'\x93',
b'Vlist\n\x93',
b'\x94',
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.',
b'N(2',
b'cbuiltins\nlist\n)(R',
b'cbuiltins\nlist\n()R',
b']N(a',
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd',
b'N(p1\n',
b'N(q\x00',
b'N(r\x00\x00\x00\x00',
b'}NN(s',
b'}N(Ns',
b'}(NNs',
b'}((u',
b'cbuiltins\nlist\n)(\x81',
b'cbuiltins\nlist\n()\x81',
b'N(\x85',
b'NN(\x86',
b'N(N\x86',
b'NNN(\x87',
b'NN(N\x87',
b'N(NN\x87',
b']((\x90',
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94',
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B',
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C',
b'C\x03',
b'C\x03ab',
b'F',
b'F0.0',
b'F0.00',
b'G',
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I',
b'I0',
b'J',
b'J\x00\x00\x00',
b'K',
b'L',
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M',
b'M\x00',
b'S',
b"S'abc'",
b'T',
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U',
b'U\x03',
b'U\x03ab',
b'V',
b'Vabc',
b'X',
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c',
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng',
b'Ng0',
b'(i',
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh',
b'Nj',
b'Nj\x00\x00\x00',
b'Np',
b'Np0',
b'Nq',
b'Nr',
b'Nr\x00\x00\x00',
b'\x80',
b'\x82',
b'\x83',
b'\x84\x01',
b'\x84',
b'\x84\x01\x00\x00',
b'\x8a',
b'\x8b',
b'\x8b\x00\x00\x00',
b'\x8c',
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d',
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e',
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95',
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
class AbstractPickleTests(unittest.TestCase):
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# there's a comment with an exclamation point there whose meaning
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_tuple_and_list(self):
t = ([],)
t[0].append(t)
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], list)
self.assertEqual(len(x[0]), 1)
self.assertIs(x[0][0], x)
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_key(self):
d = {}
k = K(d)
d[k] = 1
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(len(x.keys()), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_set(self):
y = set()
k = K(y)
y.add(k)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
def test_recursive_list_subclass(self):
y = MyList()
y.append(y)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, MyList)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_dict_subclass(self):
d = MyDict()
d[1] = d
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_subclass_key(self):
d = MyDict()
k = K(d)
d[k] = 1
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(len(list(x.keys())), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, C)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertTrue(x[0].attr[1] is x)
def check_recursive_collection_and_inst(self, factory):
h = H()
y = factory([h])
h.attr = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, type(y))
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], H)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self.check_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self.check_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self.check_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self.check_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self.check_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyFrozenSet)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "stat"):
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8', 'BINUNICODE8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermitted with small objects
# (dict keys)
obj = {i: bytes([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
("abcd".index, ("c",)),
(str.index, ("abcd", "c")),
([1, 2, 3].__len__, ()),
(list.__len__, ([1, 2, 3],)),
({1, 2}.__contains__, (2,)),
(set.__contains__, ({1, 2}, 2)),
(dict.fromkeys, (("a", 1), ("b", 2))),
(bytearray.maketrans, (b"abc", b"xyz")),
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
if stackless:
self.skipTest("Stackless can pickle functions by value")
def f():
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
class BigmemPickleTests(unittest.TestCase):
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
import os
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_closed_file(self):
import os
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
self.assertEqual(pickle.HIGHEST_PROTOCOL, 4)
def test_callapi(self):
f = io.BytesIO()
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_bad_init(self):
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
class AbstractPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if f.seekable():
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if f.seekable():
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO)
def test_unpickling_buffering_readline(self):
(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
pass
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
| true | true |
f7fcac985188b5590a78764e293685efb6fa8536 | 449 | py | Python | dbf_przystanki.py | RafalKucharskiPK/PTVVisum_Python_Snippets | 08700ac9ff8dd8d0db01ed9b65550a15105cff17 | [
"MIT"
] | 3 | 2020-02-14T19:43:23.000Z | 2021-04-26T06:39:33.000Z | dbf_przystanki.py | RafalKucharskiPK/PTVVisum_Python_Snippets | 08700ac9ff8dd8d0db01ed9b65550a15105cff17 | [
"MIT"
] | null | null | null | dbf_przystanki.py | RafalKucharskiPK/PTVVisum_Python_Snippets | 08700ac9ff8dd8d0db01ed9b65550a15105cff17 | [
"MIT"
] | 2 | 2020-05-03T13:53:05.000Z | 2020-10-13T17:11:02.000Z | from dbfpy.dbf import Dbf
#
#przystanki = Dbf("C:\dane\PRZYSTANKI_AUTOBUSOWE.dbf")
#linie = Dbf("C:\dane\LINIE_PRZEWOZNIK.dbf")
#
#from shapefile import Reader as shpr
#
#przystanki =shpr("C:\dane\PRZYSTANKI_AUTOBUSOWE.shp")
#Przystanki= przystanki.shapeRecords()
#
#
#for przystanek in Przystanki:
# print przystanek
td = Dbf("D:\\Dropbox\\i2\\Prace\\___Nie Visumowe\\2012, Malopolska\\Dane Wejsciowe\\PBS_styczen\\a.dbf")
| 23.631579 | 106 | 0.706013 | from dbfpy.dbf import Dbf
td = Dbf("D:\\Dropbox\\i2\\Prace\\___Nie Visumowe\\2012, Malopolska\\Dane Wejsciowe\\PBS_styczen\\a.dbf")
| true | true |
f7fcb07c787fb7e210c66d8a3f386ba27d6ebc8d | 5,036 | py | Python | imblearn/over_sampling/tests/test_adasyn.py | laurallu/imbalanced-learn | 321b751f90ef8faaec6b39218f8c531893e9e79f | [
"MIT"
] | 2 | 2019-11-17T22:32:23.000Z | 2020-06-06T10:37:05.000Z | imblearn/over_sampling/tests/test_adasyn.py | MattEding/imbalanced-learn | 9a1191e1369f688903649b4342b24e0041c6cf33 | [
"MIT"
] | null | null | null | imblearn/over_sampling/tests/test_adasyn.py | MattEding/imbalanced-learn | 9a1191e1369f688903649b4342b24e0041c6cf33 | [
"MIT"
] | null | null | null | """Test the module under sampler."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import pytest
import numpy as np
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_equal
from sklearn.neighbors import NearestNeighbors
from imblearn.over_sampling import ADASYN
RND_SEED = 0
X = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
]
)
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
R_TOL = 1e-4
def test_ada_init():
sampling_strategy = "auto"
ada = ADASYN(sampling_strategy=sampling_strategy, random_state=RND_SEED)
assert ada.random_state == RND_SEED
def test_ada_fit_resample():
ada = ADASYN(random_state=RND_SEED)
X_resampled, y_resampled = ada.fit_resample(X, Y)
X_gt = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.94899098, -0.30508981],
[0.28204936, -0.13953426],
[1.58028868, -0.04089947],
[0.66117333, -0.28009063],
]
)
y_gt = np.array(
[
0,
1,
0,
0,
0,
1,
1,
1,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
]
)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_ada_fit_resample_nn_obj():
nn = NearestNeighbors(n_neighbors=6)
ada = ADASYN(random_state=RND_SEED, n_neighbors=nn)
X_resampled, y_resampled = ada.fit_resample(X, Y)
X_gt = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.94899098, -0.30508981],
[0.28204936, -0.13953426],
[1.58028868, -0.04089947],
[0.66117333, -0.28009063],
]
)
y_gt = np.array(
[
0,
1,
0,
0,
0,
1,
1,
1,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
]
)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
@pytest.mark.parametrize(
"adasyn_params, err_msg",
[
(
{"sampling_strategy": {0: 9, 1: 12}},
"No samples will be generated.",
),
({"n_neighbors": "rnd"}, "has to be one of"),
],
)
def test_adasyn_error(adasyn_params, err_msg):
adasyn = ADASYN(**adasyn_params)
with pytest.raises(ValueError, match=err_msg):
adasyn.fit_resample(X, Y)
| 26.366492 | 76 | 0.474782 |
import pytest
import numpy as np
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_equal
from sklearn.neighbors import NearestNeighbors
from imblearn.over_sampling import ADASYN
RND_SEED = 0
X = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
]
)
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
R_TOL = 1e-4
def test_ada_init():
sampling_strategy = "auto"
ada = ADASYN(sampling_strategy=sampling_strategy, random_state=RND_SEED)
assert ada.random_state == RND_SEED
def test_ada_fit_resample():
ada = ADASYN(random_state=RND_SEED)
X_resampled, y_resampled = ada.fit_resample(X, Y)
X_gt = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.94899098, -0.30508981],
[0.28204936, -0.13953426],
[1.58028868, -0.04089947],
[0.66117333, -0.28009063],
]
)
y_gt = np.array(
[
0,
1,
0,
0,
0,
1,
1,
1,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
]
)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_ada_fit_resample_nn_obj():
nn = NearestNeighbors(n_neighbors=6)
ada = ADASYN(random_state=RND_SEED, n_neighbors=nn)
X_resampled, y_resampled = ada.fit_resample(X, Y)
X_gt = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.94899098, -0.30508981],
[0.28204936, -0.13953426],
[1.58028868, -0.04089947],
[0.66117333, -0.28009063],
]
)
y_gt = np.array(
[
0,
1,
0,
0,
0,
1,
1,
1,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
]
)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
@pytest.mark.parametrize(
"adasyn_params, err_msg",
[
(
{"sampling_strategy": {0: 9, 1: 12}},
"No samples will be generated.",
),
({"n_neighbors": "rnd"}, "has to be one of"),
],
)
def test_adasyn_error(adasyn_params, err_msg):
adasyn = ADASYN(**adasyn_params)
with pytest.raises(ValueError, match=err_msg):
adasyn.fit_resample(X, Y)
| true | true |
f7fcb1d2d40190592945ed379557110bb3720e57 | 3,143 | py | Python | src/post_restart_message.py | sven-borden/bandwidth-monitor | 060f52bb377c9c05d5b92528759ca5ab44d02afa | [
"Apache-2.0"
] | null | null | null | src/post_restart_message.py | sven-borden/bandwidth-monitor | 060f52bb377c9c05d5b92528759ca5ab44d02afa | [
"Apache-2.0"
] | null | null | null | src/post_restart_message.py | sven-borden/bandwidth-monitor | 060f52bb377c9c05d5b92528759ca5ab44d02afa | [
"Apache-2.0"
] | null | null | null | # * | File : post_restart_message.py
# * | Author : HoChri (aka Legufix)
# * | Function : Write restart message to Ubidots dashboard
# * | Info :
# *----------------
# * | This version: V0.4
# * | Date : 2018-05-18
# ******************************************************************************/
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import requests
import time
import RPi.GPIO as GPIO
import json
with open('config.json') as f:
j = json.load(f)
TOKEN = j['token']
DEVICE = j["device"] # Put your device label here
VARIABLE = "killswitch" # Put your first variable label here
VARIABLE_2 = "reset-code" # Put your second variable label here
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GREEN = 5
BLUE = 6
RED = 13
GPIO.setup(GREEN, GPIO.OUT)
GPIO.setup(BLUE, GPIO.OUT)
GPIO.setup(RED, GPIO.OUT)
GPIO.output(GREEN, GPIO.HIGH)
GPIO.output(BLUE, GPIO.HIGH)
GPIO.output(RED, GPIO.HIGH)
def post_request(payload):
# Creates the headers for the HTTP requests
url = "http://things.ubidots.com"
url = "{}/api/v1.6/devices/{}".format(url, DEVICE)
headers = {"X-Auth-Token": TOKEN, "Content-Type": "application/json"}
# Makes the HTTP requests
status = 400
attempts = 0
while status >= 400 and attempts <= 5:
req = requests.post(url=url, headers=headers, json=payload)
status = req.status_code
attempts += 1
time.sleep(1)
# Processes results
if status >= 400:
print("[ERROR] Could not send data after 5 attempts, please check \
your token credentials and internet connection")
return False
print("[INFO] request made properly, your device is updated")
return True
if __name__ == "__main__":
GPIO.output(BLUE, GPIO.LOW)
time.sleep(0.5)
GPIO.output(BLUE, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(BLUE, GPIO.LOW)
time.sleep(0.5)
GPIO.output(BLUE, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(BLUE, GPIO.LOW)
time.sleep(0.5)
GPIO.output(BLUE, GPIO.HIGH)
time.sleep(180)
post_request({VARIABLE: 0.0, VARIABLE_2: 3.0})
| 32.402062 | 81 | 0.666879 |
import requests
import time
import RPi.GPIO as GPIO
import json
with open('config.json') as f:
j = json.load(f)
TOKEN = j['token']
DEVICE = j["device"]
VARIABLE = "killswitch"
VARIABLE_2 = "reset-code"
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GREEN = 5
BLUE = 6
RED = 13
GPIO.setup(GREEN, GPIO.OUT)
GPIO.setup(BLUE, GPIO.OUT)
GPIO.setup(RED, GPIO.OUT)
GPIO.output(GREEN, GPIO.HIGH)
GPIO.output(BLUE, GPIO.HIGH)
GPIO.output(RED, GPIO.HIGH)
def post_request(payload):
url = "http://things.ubidots.com"
url = "{}/api/v1.6/devices/{}".format(url, DEVICE)
headers = {"X-Auth-Token": TOKEN, "Content-Type": "application/json"}
status = 400
attempts = 0
while status >= 400 and attempts <= 5:
req = requests.post(url=url, headers=headers, json=payload)
status = req.status_code
attempts += 1
time.sleep(1)
if status >= 400:
print("[ERROR] Could not send data after 5 attempts, please check \
your token credentials and internet connection")
return False
print("[INFO] request made properly, your device is updated")
return True
if __name__ == "__main__":
GPIO.output(BLUE, GPIO.LOW)
time.sleep(0.5)
GPIO.output(BLUE, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(BLUE, GPIO.LOW)
time.sleep(0.5)
GPIO.output(BLUE, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(BLUE, GPIO.LOW)
time.sleep(0.5)
GPIO.output(BLUE, GPIO.HIGH)
time.sleep(180)
post_request({VARIABLE: 0.0, VARIABLE_2: 3.0})
| true | true |
f7fcb2bffe891999d7d10e40442413ad9d4bcea8 | 11,189 | py | Python | Lib/test/test_unparse.py | eduardanghel/cpython | 1382c3289bcfd34ac6811fdf9aa5bc09ca8c320e | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/test/test_unparse.py | eduardanghel/cpython | 1382c3289bcfd34ac6811fdf9aa5bc09ca8c320e | [
"CNRI-Python-GPL-Compatible"
] | 2 | 2019-07-11T03:38:43.000Z | 2019-07-16T05:19:22.000Z | Lib/test/test_unparse.py | aeros167/cpython | 114081f8adafa16283df30c456716a1bef4758d0 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | """Tests for the unparse.py script in the Tools/parser directory."""
import unittest
import test.support
import pathlib
import random
import tokenize
import ast
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
for_else = """\
def f():
for x in range(10):
break
else:
y = 2
z = 3
"""
while_else = """\
def g():
while True:
break
else:
y = 2
z = 3
"""
relative_import = """\
from . import fred
from .. import barney
from .australia import shrimp as prawns
"""
nonlocal_ex = """\
def f():
x = 1
def g():
nonlocal x
x = 2
y = 7
def h():
nonlocal x, y
"""
# also acts as test for 'except ... as ...'
raise_from = """\
try:
1 / 0
except ZeroDivisionError as e:
raise ArithmeticError from e
"""
class_decorator = """\
@f1(arg)
@f2
class Foo: pass
"""
elif1 = """\
if cond1:
suite1
elif cond2:
suite2
else:
suite3
"""
elif2 = """\
if cond1:
suite1
elif cond2:
suite2
"""
try_except_finally = """\
try:
suite1
except ex1:
suite2
except ex2:
suite3
else:
suite4
finally:
suite5
"""
with_simple = """\
with f():
suite1
"""
with_as = """\
with f() as x:
suite1
"""
with_two_items = """\
with f() as x, g() as y:
suite1
"""
class ASTTestCase(unittest.TestCase):
def assertASTEqual(self, ast1, ast2):
self.assertEqual(ast.dump(ast1), ast.dump(ast2))
def check_roundtrip(self, code1):
ast1 = ast.parse(code1)
code2 = ast.unparse(ast1)
ast2 = ast.parse(code2)
self.assertASTEqual(ast1, ast2)
def check_invalid(self, node, raises=ValueError):
self.assertRaises(raises, ast.unparse, node)
def check_src_roundtrip(self, code1, code2=None, strip=True):
code2 = code2 or code1
code1 = ast.unparse(ast.parse(code1))
if strip:
code1 = code1.strip()
self.assertEqual(code2, code1)
class UnparseTestCase(ASTTestCase):
# Tests for specific bugs found in earlier versions of unparse
def test_fstrings(self):
# See issue 25180
self.check_roundtrip(r"""f'{f"{0}"*3}'""")
self.check_roundtrip(r"""f'{f"{y}"*3}'""")
def test_strings(self):
self.check_roundtrip("u'foo'")
self.check_roundtrip("r'foo'")
self.check_roundtrip("b'foo'")
def test_del_statement(self):
self.check_roundtrip("del x, y, z")
def test_shifts(self):
self.check_roundtrip("45 << 2")
self.check_roundtrip("13 >> 7")
def test_for_else(self):
self.check_roundtrip(for_else)
def test_while_else(self):
self.check_roundtrip(while_else)
def test_unary_parens(self):
self.check_roundtrip("(-1)**7")
self.check_roundtrip("(-1.)**8")
self.check_roundtrip("(-1j)**6")
self.check_roundtrip("not True or False")
self.check_roundtrip("True or not False")
def test_integer_parens(self):
self.check_roundtrip("3 .__abs__()")
def test_huge_float(self):
self.check_roundtrip("1e1000")
self.check_roundtrip("-1e1000")
self.check_roundtrip("1e1000j")
self.check_roundtrip("-1e1000j")
def test_min_int(self):
self.check_roundtrip(str(-(2 ** 31)))
self.check_roundtrip(str(-(2 ** 63)))
def test_imaginary_literals(self):
self.check_roundtrip("7j")
self.check_roundtrip("-7j")
self.check_roundtrip("0j")
self.check_roundtrip("-0j")
def test_lambda_parentheses(self):
self.check_roundtrip("(lambda: int)()")
def test_chained_comparisons(self):
self.check_roundtrip("1 < 4 <= 5")
self.check_roundtrip("a is b is c is not d")
def test_function_arguments(self):
self.check_roundtrip("def f(): pass")
self.check_roundtrip("def f(a): pass")
self.check_roundtrip("def f(b = 2): pass")
self.check_roundtrip("def f(a, b): pass")
self.check_roundtrip("def f(a, b = 2): pass")
self.check_roundtrip("def f(a = 5, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b): pass")
self.check_roundtrip("def f(*, a, b = 2): pass")
self.check_roundtrip("def f(a, b = None, *, c, **kwds): pass")
self.check_roundtrip("def f(a=2, *args, c=5, d, **kwds): pass")
self.check_roundtrip("def f(*args, **kwargs): pass")
def test_relative_import(self):
self.check_roundtrip(relative_import)
def test_nonlocal(self):
self.check_roundtrip(nonlocal_ex)
def test_raise_from(self):
self.check_roundtrip(raise_from)
def test_bytes(self):
self.check_roundtrip("b'123'")
def test_annotations(self):
self.check_roundtrip("def f(a : int): pass")
self.check_roundtrip("def f(a: int = 5): pass")
self.check_roundtrip("def f(*args: [int]): pass")
self.check_roundtrip("def f(**kwargs: dict): pass")
self.check_roundtrip("def f() -> None: pass")
def test_set_literal(self):
self.check_roundtrip("{'a', 'b', 'c'}")
def test_set_comprehension(self):
self.check_roundtrip("{x for x in range(5)}")
def test_dict_comprehension(self):
self.check_roundtrip("{x: x*x for x in range(10)}")
def test_class_decorators(self):
self.check_roundtrip(class_decorator)
def test_class_definition(self):
self.check_roundtrip("class A(metaclass=type, *[], **{}): pass")
def test_elifs(self):
self.check_roundtrip(elif1)
self.check_roundtrip(elif2)
def test_try_except_finally(self):
self.check_roundtrip(try_except_finally)
def test_starred_assignment(self):
self.check_roundtrip("a, *b, c = seq")
self.check_roundtrip("a, (*b, c) = seq")
self.check_roundtrip("a, *b[0], c = seq")
self.check_roundtrip("a, *(b, c) = seq")
def test_with_simple(self):
self.check_roundtrip(with_simple)
def test_with_as(self):
self.check_roundtrip(with_as)
def test_with_two_items(self):
self.check_roundtrip(with_two_items)
def test_dict_unpacking_in_dict(self):
# See issue 26489
self.check_roundtrip(r"""{**{'y': 2}, 'x': 1}""")
self.check_roundtrip(r"""{**{'y': 2}, **{'x': 1}}""")
def test_invalid_raise(self):
self.check_invalid(ast.Raise(exc=None, cause=ast.Name(id="X")))
def test_invalid_fstring_constant(self):
self.check_invalid(ast.JoinedStr(values=[ast.Constant(value=100)]))
def test_invalid_fstring_conversion(self):
self.check_invalid(
ast.FormattedValue(
value=ast.Constant(value="a", kind=None),
conversion=ord("Y"), # random character
format_spec=None,
)
)
def test_invalid_set(self):
self.check_invalid(ast.Set(elts=[]))
def test_invalid_yield_from(self):
self.check_invalid(ast.YieldFrom(value=None))
class CosmeticTestCase(ASTTestCase):
"""Test if there are cosmetic issues caused by unnecesary additions"""
def test_simple_expressions_parens(self):
self.check_src_roundtrip("(a := b)")
self.check_src_roundtrip("await x")
self.check_src_roundtrip("x if x else y")
self.check_src_roundtrip("lambda x: x")
self.check_src_roundtrip("1 + 1")
self.check_src_roundtrip("1 + 2 / 3")
self.check_src_roundtrip("(1 + 2) / 3")
self.check_src_roundtrip("(1 + 2) * 3 + 4 * (5 + 2)")
self.check_src_roundtrip("(1 + 2) * 3 + 4 * (5 + 2) ** 2")
self.check_src_roundtrip("~ x")
self.check_src_roundtrip("x and y")
self.check_src_roundtrip("x and y and z")
self.check_src_roundtrip("x and (y and x)")
self.check_src_roundtrip("(x and y) and z")
self.check_src_roundtrip("(x ** y) ** z ** q")
self.check_src_roundtrip("x >> y")
self.check_src_roundtrip("x << y")
self.check_src_roundtrip("x >> y and x >> z")
self.check_src_roundtrip("x + y - z * q ^ t ** k")
self.check_src_roundtrip("P * V if P and V else n * R * T")
self.check_src_roundtrip("lambda P, V, n: P * V == n * R * T")
self.check_src_roundtrip("flag & (other | foo)")
self.check_src_roundtrip("not x == y")
self.check_src_roundtrip("x == (not y)")
self.check_src_roundtrip("yield x")
self.check_src_roundtrip("yield from x")
self.check_src_roundtrip("call((yield x))")
self.check_src_roundtrip("return x + (yield x)")
class DirectoryTestCase(ASTTestCase):
"""Test roundtrip behaviour on all files in Lib and Lib/test."""
lib_dir = pathlib.Path(__file__).parent / ".."
test_directories = (lib_dir, lib_dir / "test")
skip_files = {"test_fstring.py"}
run_always_files = {"test_grammar.py", "test_syntax.py", "test_compile.py",
"test_ast.py", "test_asdl_parser.py"}
_files_to_test = None
@classmethod
def files_to_test(cls):
if cls._files_to_test is not None:
return cls._files_to_test
items = [
item.resolve()
for directory in cls.test_directories
for item in directory.glob("*.py")
if not item.name.startswith("bad")
]
# Test limited subset of files unless the 'cpu' resource is specified.
if not test.support.is_resource_enabled("cpu"):
tests_to_run_always = {item for item in items if
item.name in cls.run_always_files}
items = set(random.sample(items, 10))
# Make sure that at least tests that heavily use grammar features are
# always considered in order to reduce the chance of missing something.
items = list(items | tests_to_run_always)
# bpo-31174: Store the names sample to always test the same files.
# It prevents false alarms when hunting reference leaks.
cls._files_to_test = items
return items
def test_files(self):
for item in self.files_to_test():
if test.support.verbose:
print(f"Testing {item.absolute()}")
# Some f-strings are not correctly round-tripped by
# Tools/parser/unparse.py. See issue 28002 for details.
# We need to skip files that contain such f-strings.
if item.name in self.skip_files:
if test.support.verbose:
print(f"Skipping {item.absolute()}: see issue 28002")
continue
with self.subTest(filename=item):
source = read_pyfile(item)
self.check_roundtrip(source)
if __name__ == "__main__":
unittest.main()
| 28.912145 | 83 | 0.607829 |
import unittest
import test.support
import pathlib
import random
import tokenize
import ast
def read_pyfile(filename):
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
for_else = """\
def f():
for x in range(10):
break
else:
y = 2
z = 3
"""
while_else = """\
def g():
while True:
break
else:
y = 2
z = 3
"""
relative_import = """\
from . import fred
from .. import barney
from .australia import shrimp as prawns
"""
nonlocal_ex = """\
def f():
x = 1
def g():
nonlocal x
x = 2
y = 7
def h():
nonlocal x, y
"""
raise_from = """\
try:
1 / 0
except ZeroDivisionError as e:
raise ArithmeticError from e
"""
class_decorator = """\
@f1(arg)
@f2
class Foo: pass
"""
elif1 = """\
if cond1:
suite1
elif cond2:
suite2
else:
suite3
"""
elif2 = """\
if cond1:
suite1
elif cond2:
suite2
"""
try_except_finally = """\
try:
suite1
except ex1:
suite2
except ex2:
suite3
else:
suite4
finally:
suite5
"""
with_simple = """\
with f():
suite1
"""
with_as = """\
with f() as x:
suite1
"""
with_two_items = """\
with f() as x, g() as y:
suite1
"""
class ASTTestCase(unittest.TestCase):
def assertASTEqual(self, ast1, ast2):
self.assertEqual(ast.dump(ast1), ast.dump(ast2))
def check_roundtrip(self, code1):
ast1 = ast.parse(code1)
code2 = ast.unparse(ast1)
ast2 = ast.parse(code2)
self.assertASTEqual(ast1, ast2)
def check_invalid(self, node, raises=ValueError):
self.assertRaises(raises, ast.unparse, node)
def check_src_roundtrip(self, code1, code2=None, strip=True):
code2 = code2 or code1
code1 = ast.unparse(ast.parse(code1))
if strip:
code1 = code1.strip()
self.assertEqual(code2, code1)
class UnparseTestCase(ASTTestCase):
def test_fstrings(self):
self.check_roundtrip(r"""f'{f"{0}"*3}'""")
self.check_roundtrip(r"""f'{f"{y}"*3}'""")
def test_strings(self):
self.check_roundtrip("u'foo'")
self.check_roundtrip("r'foo'")
self.check_roundtrip("b'foo'")
def test_del_statement(self):
self.check_roundtrip("del x, y, z")
def test_shifts(self):
self.check_roundtrip("45 << 2")
self.check_roundtrip("13 >> 7")
def test_for_else(self):
self.check_roundtrip(for_else)
def test_while_else(self):
self.check_roundtrip(while_else)
def test_unary_parens(self):
self.check_roundtrip("(-1)**7")
self.check_roundtrip("(-1.)**8")
self.check_roundtrip("(-1j)**6")
self.check_roundtrip("not True or False")
self.check_roundtrip("True or not False")
def test_integer_parens(self):
self.check_roundtrip("3 .__abs__()")
def test_huge_float(self):
self.check_roundtrip("1e1000")
self.check_roundtrip("-1e1000")
self.check_roundtrip("1e1000j")
self.check_roundtrip("-1e1000j")
def test_min_int(self):
self.check_roundtrip(str(-(2 ** 31)))
self.check_roundtrip(str(-(2 ** 63)))
def test_imaginary_literals(self):
self.check_roundtrip("7j")
self.check_roundtrip("-7j")
self.check_roundtrip("0j")
self.check_roundtrip("-0j")
def test_lambda_parentheses(self):
self.check_roundtrip("(lambda: int)()")
def test_chained_comparisons(self):
self.check_roundtrip("1 < 4 <= 5")
self.check_roundtrip("a is b is c is not d")
def test_function_arguments(self):
self.check_roundtrip("def f(): pass")
self.check_roundtrip("def f(a): pass")
self.check_roundtrip("def f(b = 2): pass")
self.check_roundtrip("def f(a, b): pass")
self.check_roundtrip("def f(a, b = 2): pass")
self.check_roundtrip("def f(a = 5, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b): pass")
self.check_roundtrip("def f(*, a, b = 2): pass")
self.check_roundtrip("def f(a, b = None, *, c, **kwds): pass")
self.check_roundtrip("def f(a=2, *args, c=5, d, **kwds): pass")
self.check_roundtrip("def f(*args, **kwargs): pass")
def test_relative_import(self):
self.check_roundtrip(relative_import)
def test_nonlocal(self):
self.check_roundtrip(nonlocal_ex)
def test_raise_from(self):
self.check_roundtrip(raise_from)
def test_bytes(self):
self.check_roundtrip("b'123'")
def test_annotations(self):
self.check_roundtrip("def f(a : int): pass")
self.check_roundtrip("def f(a: int = 5): pass")
self.check_roundtrip("def f(*args: [int]): pass")
self.check_roundtrip("def f(**kwargs: dict): pass")
self.check_roundtrip("def f() -> None: pass")
def test_set_literal(self):
self.check_roundtrip("{'a', 'b', 'c'}")
def test_set_comprehension(self):
self.check_roundtrip("{x for x in range(5)}")
def test_dict_comprehension(self):
self.check_roundtrip("{x: x*x for x in range(10)}")
def test_class_decorators(self):
self.check_roundtrip(class_decorator)
def test_class_definition(self):
self.check_roundtrip("class A(metaclass=type, *[], **{}): pass")
def test_elifs(self):
self.check_roundtrip(elif1)
self.check_roundtrip(elif2)
def test_try_except_finally(self):
self.check_roundtrip(try_except_finally)
def test_starred_assignment(self):
self.check_roundtrip("a, *b, c = seq")
self.check_roundtrip("a, (*b, c) = seq")
self.check_roundtrip("a, *b[0], c = seq")
self.check_roundtrip("a, *(b, c) = seq")
def test_with_simple(self):
self.check_roundtrip(with_simple)
def test_with_as(self):
self.check_roundtrip(with_as)
def test_with_two_items(self):
self.check_roundtrip(with_two_items)
def test_dict_unpacking_in_dict(self):
self.check_roundtrip(r"""{**{'y': 2}, 'x': 1}""")
self.check_roundtrip(r"""{**{'y': 2}, **{'x': 1}}""")
def test_invalid_raise(self):
self.check_invalid(ast.Raise(exc=None, cause=ast.Name(id="X")))
def test_invalid_fstring_constant(self):
self.check_invalid(ast.JoinedStr(values=[ast.Constant(value=100)]))
def test_invalid_fstring_conversion(self):
self.check_invalid(
ast.FormattedValue(
value=ast.Constant(value="a", kind=None),
conversion=ord("Y"),
format_spec=None,
)
)
def test_invalid_set(self):
self.check_invalid(ast.Set(elts=[]))
def test_invalid_yield_from(self):
self.check_invalid(ast.YieldFrom(value=None))
class CosmeticTestCase(ASTTestCase):
def test_simple_expressions_parens(self):
self.check_src_roundtrip("(a := b)")
self.check_src_roundtrip("await x")
self.check_src_roundtrip("x if x else y")
self.check_src_roundtrip("lambda x: x")
self.check_src_roundtrip("1 + 1")
self.check_src_roundtrip("1 + 2 / 3")
self.check_src_roundtrip("(1 + 2) / 3")
self.check_src_roundtrip("(1 + 2) * 3 + 4 * (5 + 2)")
self.check_src_roundtrip("(1 + 2) * 3 + 4 * (5 + 2) ** 2")
self.check_src_roundtrip("~ x")
self.check_src_roundtrip("x and y")
self.check_src_roundtrip("x and y and z")
self.check_src_roundtrip("x and (y and x)")
self.check_src_roundtrip("(x and y) and z")
self.check_src_roundtrip("(x ** y) ** z ** q")
self.check_src_roundtrip("x >> y")
self.check_src_roundtrip("x << y")
self.check_src_roundtrip("x >> y and x >> z")
self.check_src_roundtrip("x + y - z * q ^ t ** k")
self.check_src_roundtrip("P * V if P and V else n * R * T")
self.check_src_roundtrip("lambda P, V, n: P * V == n * R * T")
self.check_src_roundtrip("flag & (other | foo)")
self.check_src_roundtrip("not x == y")
self.check_src_roundtrip("x == (not y)")
self.check_src_roundtrip("yield x")
self.check_src_roundtrip("yield from x")
self.check_src_roundtrip("call((yield x))")
self.check_src_roundtrip("return x + (yield x)")
class DirectoryTestCase(ASTTestCase):
lib_dir = pathlib.Path(__file__).parent / ".."
test_directories = (lib_dir, lib_dir / "test")
skip_files = {"test_fstring.py"}
run_always_files = {"test_grammar.py", "test_syntax.py", "test_compile.py",
"test_ast.py", "test_asdl_parser.py"}
_files_to_test = None
@classmethod
def files_to_test(cls):
if cls._files_to_test is not None:
return cls._files_to_test
items = [
item.resolve()
for directory in cls.test_directories
for item in directory.glob("*.py")
if not item.name.startswith("bad")
]
if not test.support.is_resource_enabled("cpu"):
tests_to_run_always = {item for item in items if
item.name in cls.run_always_files}
items = set(random.sample(items, 10))
items = list(items | tests_to_run_always)
cls._files_to_test = items
return items
def test_files(self):
for item in self.files_to_test():
if test.support.verbose:
print(f"Testing {item.absolute()}")
if item.name in self.skip_files:
if test.support.verbose:
print(f"Skipping {item.absolute()}: see issue 28002")
continue
with self.subTest(filename=item):
source = read_pyfile(item)
self.check_roundtrip(source)
if __name__ == "__main__":
unittest.main()
| true | true |
f7fcb2d33c13e48fed6248480de3cfd82bbfd05a | 8,442 | py | Python | otter/integration/lib/nova.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 20 | 2015-02-11T16:32:07.000Z | 2019-11-12T03:27:54.000Z | otter/integration/lib/nova.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 1,145 | 2015-01-01T00:00:47.000Z | 2022-02-11T03:40:39.000Z | otter/integration/lib/nova.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 29 | 2015-01-08T15:00:11.000Z | 2021-02-16T16:33:53.000Z | """Contains reusable classes relating to nova."""
import json
from operator import itemgetter
from characteristic import Attribute, attributes
import treq
from twisted.internet import reactor
from twisted.internet.defer import gatherResults, inlineCallbacks, returnValue
from twisted.python.log import msg
from otter.integration.lib.utils import diagnose
from otter.util.deferredutils import retry_and_timeout
from otter.util.http import APIError, check_success, headers
from otter.util.retry import (
TransientRetryError,
repeating_interval,
terminal_errors_except
)
@attributes(["id", "pool",
Attribute("treq", default_value=treq),
Attribute("clock", default_value=reactor)])
class NovaServer(object):
"""
Represents an existing server in Nova.
:ivar str id: The nova server ID
:ivar pool: :class:`twisted.web.client.HTTPConnectionPool`
:ivar treq: defaults to the `treq` module if not provided - used mainly
for test injection
"""
@diagnose("nova", "Deleting server")
def delete(self, rcs):
"""
Delete the server.
:param rcs: an instance of
:class:`otter.integration.lib.resources.TestResources`
"""
def try_delete():
d = self.treq.delete(
"{}/servers/{}".format(rcs.endpoints["nova"], self.id),
headers=headers(str(rcs.token)),
pool=self.pool)
d.addCallback(check_success, [404], _treq=self.treq)
d.addCallback(self.treq.content)
return d
return retry_and_timeout(
try_delete, 120,
can_retry=terminal_errors_except(APIError),
next_interval=repeating_interval(5),
clock=self.clock,
deferred_description=(
"Waiting for server {} to get deleted".format(self.id)))
@diagnose("nova", "Getting server's metadata")
def list_metadata(self, rcs):
"""
Use Nova to get the server's metadata.
:param rcs: an instance of
:class:`otter.integration.lib.resources.TestResources`
"""
return self.treq.get(
"{}/servers/{}/metadata".format(rcs.endpoints["nova"], self.id),
headers=headers(str(rcs.token)),
pool=self.pool,
).addCallback(check_success, [200]).addCallback(self.treq.json_content)
@diagnose("nova", "Updating server's metadata")
def update_metadata(self, metadata, rcs):
"""
Use Nova to alter a server's metadata.
:param rcs: an instance of
:class:`otter.integration.lib.resources.TestResources`
"""
return self.treq.put(
"{}/servers/{}/metadata".format(rcs.endpoints["nova"], self.id),
json.dumps({'metadata': metadata}),
headers=headers(str(rcs.token)),
pool=self.pool,
).addCallback(check_success, [200]).addCallback(self.treq.json_content)
@diagnose("nova", "Getting server's addresses")
def get_addresses(self, rcs):
"""
Get the network addresses for a server.
:param rcs: an instance of
:class:`otter.integration.lib.resources.TestResources`
"""
return self.treq.get(
"{}/servers/{}/ips".format(rcs.endpoints["nova"], self.id),
headers=headers(str(rcs.token)),
pool=self.pool
).addCallback(check_success, [200]).addCallback(self.treq.json_content)
@diagnose("nova", "Checking server's details")
def details(self, rcs):
"""
Get a server's details.
"""
return self.treq.get(
"{}/servers/{}".format(rcs.endpoints["nova"], self.id),
headers=headers(str(rcs.token)),
pool=self.pool
).addCallback(check_success, [200]).addCallback(self.treq.json_content)
@diagnose("nova", "Deleting one or more servers")
def delete_servers(server_ids, rcs, pool, _treq=treq):
"""
Use Nova to delete multiple servers.
:param iterable server_ids: The IDs of the servers to delete
"""
return gatherResults([NovaServer(id=_id, pool=pool, treq=_treq).delete(rcs)
for _id in server_ids])
@diagnose("nova", "Listing all servers")
def list_servers(rcs, pool, _treq=treq):
"""
Get a list of all servers, with an optional name regex provided. This
does not handle pagination, and instead just increases the limit to an
absurdly high number.
"""
params = {'limit': 10000}
return _treq.get(
"{}/servers/detail".format(rcs.endpoints['nova']),
params=params,
headers=headers(str(rcs.token)),
pool=pool
).addCallback(check_success, [200]).addCallback(_treq.json_content)
@diagnose("nova", "Listing all images")
def list_images(rcs, pool, _treq=treq):
"""
Get a list of all images.
"""
params = {'limit': 10000}
return _treq.get(
"{}/images".format(rcs.endpoints['nova']),
params=params,
headers=headers(str(rcs.token)),
pool=pool
).addCallback(check_success, [200]).addCallback(_treq.json_content)
@inlineCallbacks
def fetch_ubuntu_image_id(rcs, pool):
"""
Get image ID from nova that can be used in creating servers or
scaling group
Note: Serves the same purpose as fixtures.image_ids_with_and_without_name
in cloudcafe tests
"""
images_resp = yield list_images(rcs, pool)
for image in images_resp["images"]:
if image["name"].startswith("Ubuntu"):
returnValue(image["id"])
else:
returnValue(images_resp["images"][0]["id"])
@diagnose("nova", "Creating server")
def create_server(rcs, pool, server_args, _treq=treq):
"""
Create a server using Nova.
:param rcs: an instance of
:class:`otter.integration.lib.resources.TestResources`
:param pool: a :class:`twisted.web.client.HTTPConnectionPool`
:param server_args: a ``dict`` containing the arguments with which to
create the server.
:return: the server ID of the created server.
"""
d = _treq.post(
"{0}/servers".format(rcs.endpoints['nova']),
json.dumps(server_args),
headers=headers(str(rcs.token)),
pool=pool
).addCallback(check_success, [202]).addCallback(_treq.json_content)
return d.addCallback(itemgetter('server')).addCallback(itemgetter('id'))
@diagnose("nova", "Waiting for all servers to reach a particular state")
def wait_for_servers(rcs, pool, matcher, group=None, timeout=600, period=10,
clock=None, _treq=treq):
"""
Wait until Nova reaches a particular state (as described by the given
matcher) - if a group is provided, then match only the servers for the
given group.
:param rcs: an instance of
:class:`otter.integration.lib.resources.TestResources`
:param pool: a :class:`twisted.web.client.HTTPConnectionPool`
:param matcher: a :mod:`testtools.matcher` matcher that describes the
desired state of the servers belonging to the autoscaling group.
:param group: a :class:`otter.integration.lib.autoscale.ScalingGroup` that
specifies which autoscaling group's servers we are looking at. This
group should already exist, and have a `group_id` attribute. If not
provided, the matcher will apply to all servers.
"""
message = "Waiting for {0} Nova servers".format(
"all" if group is None else "group {0} 's".format(group.group_id))
@inlineCallbacks
def do_work():
servers = yield list_servers(rcs, pool, _treq=_treq)
servers = servers['servers']
if group is not None:
servers = [
server for server in servers
if (group.group_id ==
server['metadata'].get("rax:autoscale:group:id", None))
]
mismatch = matcher.match(servers)
if mismatch:
msg("{0}.\nMismatch: {1}".format(message, mismatch.describe()))
raise TransientRetryError(mismatch.describe())
returnValue(servers)
return retry_and_timeout(
do_work, timeout,
can_retry=terminal_errors_except(TransientRetryError),
next_interval=repeating_interval(period),
clock=clock or reactor,
deferred_description=(
"{0} to reach state {1}".format(message, str(matcher)))
)
| 35.029046 | 79 | 0.636816 | import json
from operator import itemgetter
from characteristic import Attribute, attributes
import treq
from twisted.internet import reactor
from twisted.internet.defer import gatherResults, inlineCallbacks, returnValue
from twisted.python.log import msg
from otter.integration.lib.utils import diagnose
from otter.util.deferredutils import retry_and_timeout
from otter.util.http import APIError, check_success, headers
from otter.util.retry import (
TransientRetryError,
repeating_interval,
terminal_errors_except
)
@attributes(["id", "pool",
Attribute("treq", default_value=treq),
Attribute("clock", default_value=reactor)])
class NovaServer(object):
@diagnose("nova", "Deleting server")
def delete(self, rcs):
def try_delete():
d = self.treq.delete(
"{}/servers/{}".format(rcs.endpoints["nova"], self.id),
headers=headers(str(rcs.token)),
pool=self.pool)
d.addCallback(check_success, [404], _treq=self.treq)
d.addCallback(self.treq.content)
return d
return retry_and_timeout(
try_delete, 120,
can_retry=terminal_errors_except(APIError),
next_interval=repeating_interval(5),
clock=self.clock,
deferred_description=(
"Waiting for server {} to get deleted".format(self.id)))
@diagnose("nova", "Getting server's metadata")
def list_metadata(self, rcs):
return self.treq.get(
"{}/servers/{}/metadata".format(rcs.endpoints["nova"], self.id),
headers=headers(str(rcs.token)),
pool=self.pool,
).addCallback(check_success, [200]).addCallback(self.treq.json_content)
@diagnose("nova", "Updating server's metadata")
def update_metadata(self, metadata, rcs):
return self.treq.put(
"{}/servers/{}/metadata".format(rcs.endpoints["nova"], self.id),
json.dumps({'metadata': metadata}),
headers=headers(str(rcs.token)),
pool=self.pool,
).addCallback(check_success, [200]).addCallback(self.treq.json_content)
@diagnose("nova", "Getting server's addresses")
def get_addresses(self, rcs):
return self.treq.get(
"{}/servers/{}/ips".format(rcs.endpoints["nova"], self.id),
headers=headers(str(rcs.token)),
pool=self.pool
).addCallback(check_success, [200]).addCallback(self.treq.json_content)
@diagnose("nova", "Checking server's details")
def details(self, rcs):
return self.treq.get(
"{}/servers/{}".format(rcs.endpoints["nova"], self.id),
headers=headers(str(rcs.token)),
pool=self.pool
).addCallback(check_success, [200]).addCallback(self.treq.json_content)
@diagnose("nova", "Deleting one or more servers")
def delete_servers(server_ids, rcs, pool, _treq=treq):
return gatherResults([NovaServer(id=_id, pool=pool, treq=_treq).delete(rcs)
for _id in server_ids])
@diagnose("nova", "Listing all servers")
def list_servers(rcs, pool, _treq=treq):
params = {'limit': 10000}
return _treq.get(
"{}/servers/detail".format(rcs.endpoints['nova']),
params=params,
headers=headers(str(rcs.token)),
pool=pool
).addCallback(check_success, [200]).addCallback(_treq.json_content)
@diagnose("nova", "Listing all images")
def list_images(rcs, pool, _treq=treq):
params = {'limit': 10000}
return _treq.get(
"{}/images".format(rcs.endpoints['nova']),
params=params,
headers=headers(str(rcs.token)),
pool=pool
).addCallback(check_success, [200]).addCallback(_treq.json_content)
@inlineCallbacks
def fetch_ubuntu_image_id(rcs, pool):
images_resp = yield list_images(rcs, pool)
for image in images_resp["images"]:
if image["name"].startswith("Ubuntu"):
returnValue(image["id"])
else:
returnValue(images_resp["images"][0]["id"])
@diagnose("nova", "Creating server")
def create_server(rcs, pool, server_args, _treq=treq):
d = _treq.post(
"{0}/servers".format(rcs.endpoints['nova']),
json.dumps(server_args),
headers=headers(str(rcs.token)),
pool=pool
).addCallback(check_success, [202]).addCallback(_treq.json_content)
return d.addCallback(itemgetter('server')).addCallback(itemgetter('id'))
@diagnose("nova", "Waiting for all servers to reach a particular state")
def wait_for_servers(rcs, pool, matcher, group=None, timeout=600, period=10,
clock=None, _treq=treq):
message = "Waiting for {0} Nova servers".format(
"all" if group is None else "group {0} 's".format(group.group_id))
@inlineCallbacks
def do_work():
servers = yield list_servers(rcs, pool, _treq=_treq)
servers = servers['servers']
if group is not None:
servers = [
server for server in servers
if (group.group_id ==
server['metadata'].get("rax:autoscale:group:id", None))
]
mismatch = matcher.match(servers)
if mismatch:
msg("{0}.\nMismatch: {1}".format(message, mismatch.describe()))
raise TransientRetryError(mismatch.describe())
returnValue(servers)
return retry_and_timeout(
do_work, timeout,
can_retry=terminal_errors_except(TransientRetryError),
next_interval=repeating_interval(period),
clock=clock or reactor,
deferred_description=(
"{0} to reach state {1}".format(message, str(matcher)))
)
| true | true |
f7fcb355bc752460a08cb2c7d7e343fd973db080 | 9,804 | py | Python | ci/safe_docker_run.py | larroy/mxnet | a6a85bd066387ba735150491b248b54951653395 | [
"Apache-2.0"
] | 1 | 2017-09-14T09:25:16.000Z | 2017-09-14T09:25:16.000Z | ci/safe_docker_run.py | larroy/mxnet | a6a85bd066387ba735150491b248b54951653395 | [
"Apache-2.0"
] | null | null | null | ci/safe_docker_run.py | larroy/mxnet | a6a85bd066387ba735150491b248b54951653395 | [
"Apache-2.0"
] | 1 | 2019-09-24T17:49:29.000Z | 2019-09-24T17:49:29.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Docker command wrapper to guard against Zombie containers
"""
import argparse
import atexit
import logging
import os
import signal
import sys
from functools import reduce
from itertools import chain
from typing import Dict, Any
import docker
import docker.constants
from docker.errors import NotFound
from docker.models.containers import Container
from util import config_logging
docker.constants.DEFAULT_TIMEOUT_SECONDS = 300
DOCKER_STOP_TIMEOUT_SECONDS = 10
CONTAINER_WAIT_SECONDS = 600
class SafeDockerClient:
"""
A wrapper around the docker client to ensure that no zombie containers are left hanging around
in case the script is not allowed to finish normally
"""
@staticmethod
def _trim_container_id(cid):
""":return: trimmed container id"""
return cid[:12]
def __init__(self):
self._docker_client = docker.from_env()
self._containers = set()
self._docker_stop_timeout = DOCKER_STOP_TIMEOUT_SECONDS
self._container_wait_seconds = CONTAINER_WAIT_SECONDS
def signal_handler(signum, _):
signal.pthread_sigmask(signal.SIG_BLOCK, {signum})
logging.warning("Signal %d received, cleaning up...", signum)
self._clean_up()
logging.warning("done. Exiting with error.")
sys.exit(1)
atexit.register(self._clean_up)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
def _clean_up(self):
if self._containers:
logging.warning("Cleaning up containers")
else:
return
# noinspection PyBroadException
try:
stop_timeout = int(os.environ.get("DOCKER_STOP_TIMEOUT", self._docker_stop_timeout))
except Exception:
stop_timeout = 3
for container in self._containers:
try:
container.stop(timeout=stop_timeout)
logging.info("☠: stopped container %s", self._trim_container_id(container.id))
container.remove()
logging.info("🚽: removed container %s", self._trim_container_id(container.id))
except Exception as e:
logging.exception(e)
self._containers.clear()
logging.info("Cleaning up containers finished.")
def _add_container(self, container: Container) -> Container:
self._containers.add(container)
return container
def _remove_container(self, container: Container):
self._containers.remove(container)
def run(self, *args, **kwargs) -> int:
if "detach" in kwargs and kwargs.get("detach") is False:
raise ValueError("Can only safe run with 'detach' set to True")
else:
kwargs["detach"] = True
# These variables are passed to the container so the process tree killer can find runaway
# process inside the container
# https://wiki.jenkins.io/display/JENKINS/ProcessTreeKiller
# https://github.com/jenkinsci/jenkins/blob/578d6bacb33a5e99f149de504c80275796f0b231/core/src/main/java/hudson/model/Run.java#L2393
if "environment" not in kwargs:
kwargs["environment"] = {}
jenkins_env_vars = ["BUILD_NUMBER", "BUILD_ID", "BUILD_TAG"]
kwargs["environment"].update({k: os.environ[k] for k in jenkins_env_vars if k in os.environ})
ret = 0
try:
# Race condition:
# If the call to docker_client.containers.run is interrupted, it is possible that
# the container won't be cleaned up. We avoid this by temporarily masking the signals.
signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM})
container = self._add_container(self._docker_client.containers.run(*args, **kwargs))
signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM})
logging.info("Started container: %s", self._trim_container_id(container.id))
stream = container.logs(stream=True, stdout=True, stderr=True)
sys.stdout.flush()
for chunk in stream:
sys.stdout.buffer.write(chunk)
sys.stdout.buffer.flush()
sys.stdout.flush()
stream.close()
try:
logging.info("Waiting for status of container %s for %d s.",
self._trim_container_id(container.id),
self._container_wait_seconds)
wait_result = container.wait(timeout=self._container_wait_seconds)
logging.info("Container exit status: %s", wait_result)
ret = wait_result.get('StatusCode', 200)
if ret != 0:
logging.error("Container exited with an error 😞")
logging.info("Executed command for reproduction:\n\n%s\n", " ".join(sys.argv))
else:
logging.info("Container exited with success 👍")
logging.info("Executed command for reproduction:\n\n%s\n", " ".join(sys.argv))
except Exception as err:
logging.exception(err)
return 150
try:
logging.info("Stopping container: %s", self._trim_container_id(container.id))
container.stop()
except Exception as e:
logging.exception(e)
ret = 151
try:
logging.info("Removing container: %s", self._trim_container_id(container.id))
container.remove()
except Exception as e:
logging.exception(e)
ret = 152
self._remove_container(container)
containers = self._docker_client.containers.list()
if containers:
logging.info("Other running containers: %s", [self._trim_container_id(x.id) for x in containers])
except NotFound as e:
logging.info("Container was stopped before cleanup started: %s", e)
return ret
def _volume_mount(volume_dfn: str) -> Dict[str, Any]:
"""
Converts docker volume mount format, e.g. docker run --volume /local/path:/container/path:ro
to an object understood by the python docker library, e.g. {"local/path": {"bind": "/container/path", "mode": "ro"}}
This is used by the argparser for automatic conversion and input validation.
If the mode is not specified, 'rw' is assumed.
:param volume_dfn: A string to convert to a volume mount object in the format <local path>:<container path>[:ro|rw]
:return: An object in the form {"<local path>" : {"bind": "<container path>", "mode": "rw|ro"}}
"""
if volume_dfn is None:
raise argparse.ArgumentTypeError("Missing value for volume definition")
parts = volume_dfn.split(":")
if len(parts) < 2 or len(parts) > 3:
raise argparse.ArgumentTypeError("Invalid volume definition {}".format(volume_dfn))
mode = "rw"
if len(parts) == 3:
mode = parts[2]
if mode not in ["rw", "ro"]:
raise argparse.ArgumentTypeError("Invalid volume mount mode {} in volume definition {}".format(mode, volume_dfn))
return {parts[0]: {"bind": parts[1], "mode": mode}}
def main(command_line_arguments):
config_logging()
parser = argparse.ArgumentParser(
description="""Wrapper around docker run that protects against Zombie containers""", epilog="")
parser.add_argument("-u", "--user",
help="Username or UID (format: <name|uid>[:<group|gid>])",
default=None)
parser.add_argument("-v", "--volume",
action='append',
type=_volume_mount,
help="Bind mount a volume",
default=[])
parser.add_argument("--cap-add",
help="Add Linux capabilities",
action="append",
type=str,
default=[])
parser.add_argument("--runtime",
help="Runtime to use for this container",
default=None)
parser.add_argument("--name",
help="Assign a name to the container",
default=None)
parser.add_argument("image", metavar="IMAGE")
parser.add_argument("command", metavar="COMMAND")
parser.add_argument("args", nargs='*', metavar="ARG")
args = parser.parse_args(args=command_line_arguments)
docker_client = SafeDockerClient()
return docker_client.run(args.image, **{
"command": " ".join(list(chain([args.command] + args.args))),
"user": args.user,
"runtime": args.runtime,
"name": args.name,
"volumes": reduce(lambda dct, v: {**dct, **v}, args.volume, {}),
"cap_add": args.cap_add
})
if __name__ == "__main__":
exit(main(sys.argv[1:]))
| 39.059761 | 139 | 0.621889 |
import argparse
import atexit
import logging
import os
import signal
import sys
from functools import reduce
from itertools import chain
from typing import Dict, Any
import docker
import docker.constants
from docker.errors import NotFound
from docker.models.containers import Container
from util import config_logging
docker.constants.DEFAULT_TIMEOUT_SECONDS = 300
DOCKER_STOP_TIMEOUT_SECONDS = 10
CONTAINER_WAIT_SECONDS = 600
class SafeDockerClient:
@staticmethod
def _trim_container_id(cid):
return cid[:12]
def __init__(self):
self._docker_client = docker.from_env()
self._containers = set()
self._docker_stop_timeout = DOCKER_STOP_TIMEOUT_SECONDS
self._container_wait_seconds = CONTAINER_WAIT_SECONDS
def signal_handler(signum, _):
signal.pthread_sigmask(signal.SIG_BLOCK, {signum})
logging.warning("Signal %d received, cleaning up...", signum)
self._clean_up()
logging.warning("done. Exiting with error.")
sys.exit(1)
atexit.register(self._clean_up)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
def _clean_up(self):
if self._containers:
logging.warning("Cleaning up containers")
else:
return
try:
stop_timeout = int(os.environ.get("DOCKER_STOP_TIMEOUT", self._docker_stop_timeout))
except Exception:
stop_timeout = 3
for container in self._containers:
try:
container.stop(timeout=stop_timeout)
logging.info("☠: stopped container %s", self._trim_container_id(container.id))
container.remove()
logging.info("🚽: removed container %s", self._trim_container_id(container.id))
except Exception as e:
logging.exception(e)
self._containers.clear()
logging.info("Cleaning up containers finished.")
def _add_container(self, container: Container) -> Container:
self._containers.add(container)
return container
def _remove_container(self, container: Container):
self._containers.remove(container)
def run(self, *args, **kwargs) -> int:
if "detach" in kwargs and kwargs.get("detach") is False:
raise ValueError("Can only safe run with 'detach' set to True")
else:
kwargs["detach"] = True
if "environment" not in kwargs:
kwargs["environment"] = {}
jenkins_env_vars = ["BUILD_NUMBER", "BUILD_ID", "BUILD_TAG"]
kwargs["environment"].update({k: os.environ[k] for k in jenkins_env_vars if k in os.environ})
ret = 0
try:
signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM})
container = self._add_container(self._docker_client.containers.run(*args, **kwargs))
signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM})
logging.info("Started container: %s", self._trim_container_id(container.id))
stream = container.logs(stream=True, stdout=True, stderr=True)
sys.stdout.flush()
for chunk in stream:
sys.stdout.buffer.write(chunk)
sys.stdout.buffer.flush()
sys.stdout.flush()
stream.close()
try:
logging.info("Waiting for status of container %s for %d s.",
self._trim_container_id(container.id),
self._container_wait_seconds)
wait_result = container.wait(timeout=self._container_wait_seconds)
logging.info("Container exit status: %s", wait_result)
ret = wait_result.get('StatusCode', 200)
if ret != 0:
logging.error("Container exited with an error 😞")
logging.info("Executed command for reproduction:\n\n%s\n", " ".join(sys.argv))
else:
logging.info("Container exited with success 👍")
logging.info("Executed command for reproduction:\n\n%s\n", " ".join(sys.argv))
except Exception as err:
logging.exception(err)
return 150
try:
logging.info("Stopping container: %s", self._trim_container_id(container.id))
container.stop()
except Exception as e:
logging.exception(e)
ret = 151
try:
logging.info("Removing container: %s", self._trim_container_id(container.id))
container.remove()
except Exception as e:
logging.exception(e)
ret = 152
self._remove_container(container)
containers = self._docker_client.containers.list()
if containers:
logging.info("Other running containers: %s", [self._trim_container_id(x.id) for x in containers])
except NotFound as e:
logging.info("Container was stopped before cleanup started: %s", e)
return ret
def _volume_mount(volume_dfn: str) -> Dict[str, Any]:
if volume_dfn is None:
raise argparse.ArgumentTypeError("Missing value for volume definition")
parts = volume_dfn.split(":")
if len(parts) < 2 or len(parts) > 3:
raise argparse.ArgumentTypeError("Invalid volume definition {}".format(volume_dfn))
mode = "rw"
if len(parts) == 3:
mode = parts[2]
if mode not in ["rw", "ro"]:
raise argparse.ArgumentTypeError("Invalid volume mount mode {} in volume definition {}".format(mode, volume_dfn))
return {parts[0]: {"bind": parts[1], "mode": mode}}
def main(command_line_arguments):
config_logging()
parser = argparse.ArgumentParser(
description="""Wrapper around docker run that protects against Zombie containers""", epilog="")
parser.add_argument("-u", "--user",
help="Username or UID (format: <name|uid>[:<group|gid>])",
default=None)
parser.add_argument("-v", "--volume",
action='append',
type=_volume_mount,
help="Bind mount a volume",
default=[])
parser.add_argument("--cap-add",
help="Add Linux capabilities",
action="append",
type=str,
default=[])
parser.add_argument("--runtime",
help="Runtime to use for this container",
default=None)
parser.add_argument("--name",
help="Assign a name to the container",
default=None)
parser.add_argument("image", metavar="IMAGE")
parser.add_argument("command", metavar="COMMAND")
parser.add_argument("args", nargs='*', metavar="ARG")
args = parser.parse_args(args=command_line_arguments)
docker_client = SafeDockerClient()
return docker_client.run(args.image, **{
"command": " ".join(list(chain([args.command] + args.args))),
"user": args.user,
"runtime": args.runtime,
"name": args.name,
"volumes": reduce(lambda dct, v: {**dct, **v}, args.volume, {}),
"cap_add": args.cap_add
})
if __name__ == "__main__":
exit(main(sys.argv[1:]))
| true | true |
f7fcb3a706968af6612a52186e99599ada677299 | 3,610 | py | Python | custom_components/hella_onyx/api_connector.py | muhlba91/onyx-homeassistant-integration | f50f17d827947984778634f8b162ae0a31de01e1 | [
"MIT"
] | 1 | 2021-02-10T13:44:47.000Z | 2021-02-10T13:44:47.000Z | custom_components/hella_onyx/api_connector.py | muhlba91/onyx-homeassistant-integration | f50f17d827947984778634f8b162ae0a31de01e1 | [
"MIT"
] | 10 | 2021-02-12T11:42:35.000Z | 2021-11-25T14:56:44.000Z | custom_components/hella_onyx/api_connector.py | muhlba91/onyx-homeassistant-integration | f50f17d827947984778634f8b162ae0a31de01e1 | [
"MIT"
] | null | null | null | """API connector for the ONYX integration."""
import logging
from aiohttp import ClientSession, ClientTimeout
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from onyx_client.client import create
from onyx_client.data.device_command import DeviceCommand
from onyx_client.enum.action import Action
_LOGGER = logging.getLogger(__name__)
class APIConnector:
"""API connector for an ONYX.CENTER."""
def __init__(self, hass, fingerprint, token):
"""Initialize the connector."""
self.hass = hass
self.fingerprint = fingerprint
self.token = token
self.devices = {}
self.groups = {}
def _client(self, session=None):
return create(
fingerprint=self.fingerprint,
access_token=self.token,
client_session=session
if session is not None
else async_get_clientsession(self.hass),
)
async def get_timezone(self):
"""Gets the ONYX.CENTER timezone."""
client = self._client()
date_information = await client.date_information()
if date_information is not None:
return date_information.timezone
else:
return "UTC"
async def update(self):
"""Update all entities."""
client = self._client()
devices = await client.devices(include_details=True)
self.devices = {device.identifier: device for device in devices}
groups = await client.groups()
self.groups = {group.identifier: group for group in groups}
def device(self, uuid: str):
"""Get the Device associated with the provided UUID."""
if uuid in self.devices:
return self.devices[uuid]
raise UnknownStateException("UNKNOWN_DEVICE")
async def update_device(self, uuid: str):
"""Update the given entity."""
client = self._client()
device = await client.device(uuid)
self.devices[device.identifier] = device
return device
async def send_device_command_action(self, uuid: str, action: Action):
_LOGGER.info("executing %s for device %s", action.string(), uuid)
success = await self._client().send_command(uuid, DeviceCommand(action=action))
if not success:
raise CommandException("ONYX_ACTION_ERROR", uuid)
async def send_device_command_properties(self, uuid: str, properties: dict):
_LOGGER.info("executing %s for device %s", properties, uuid)
success = await self._client().send_command(
uuid, DeviceCommand(properties=properties)
)
if not success:
raise CommandException("ONYX_ACTION_ERROR", uuid)
async def listen_events(self, force_update: bool = False):
"""Listen for events."""
async with ClientSession(
timeout=ClientTimeout(
total=None, connect=None, sock_connect=None, sock_read=None
)
) as session:
client = self._client(session)
async for device in client.events(force_update):
_LOGGER.debug("received device data for %s", device.identifier)
yield device
class CommandException(Exception):
"""Exception for a failed command."""
def __init__(self, msg: str, uuid: str):
super().__init__(msg)
_LOGGER.error("command errored: %s for id %s", msg, uuid)
class UnknownStateException(Exception):
"""Exception if the shutter is unknown."""
def __init__(self, msg):
super().__init__(msg)
_LOGGER.error("unknown state: %s", msg)
| 34.711538 | 87 | 0.644598 | import logging
from aiohttp import ClientSession, ClientTimeout
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from onyx_client.client import create
from onyx_client.data.device_command import DeviceCommand
from onyx_client.enum.action import Action
_LOGGER = logging.getLogger(__name__)
class APIConnector:
def __init__(self, hass, fingerprint, token):
self.hass = hass
self.fingerprint = fingerprint
self.token = token
self.devices = {}
self.groups = {}
def _client(self, session=None):
return create(
fingerprint=self.fingerprint,
access_token=self.token,
client_session=session
if session is not None
else async_get_clientsession(self.hass),
)
async def get_timezone(self):
client = self._client()
date_information = await client.date_information()
if date_information is not None:
return date_information.timezone
else:
return "UTC"
async def update(self):
client = self._client()
devices = await client.devices(include_details=True)
self.devices = {device.identifier: device for device in devices}
groups = await client.groups()
self.groups = {group.identifier: group for group in groups}
def device(self, uuid: str):
if uuid in self.devices:
return self.devices[uuid]
raise UnknownStateException("UNKNOWN_DEVICE")
async def update_device(self, uuid: str):
client = self._client()
device = await client.device(uuid)
self.devices[device.identifier] = device
return device
async def send_device_command_action(self, uuid: str, action: Action):
_LOGGER.info("executing %s for device %s", action.string(), uuid)
success = await self._client().send_command(uuid, DeviceCommand(action=action))
if not success:
raise CommandException("ONYX_ACTION_ERROR", uuid)
async def send_device_command_properties(self, uuid: str, properties: dict):
_LOGGER.info("executing %s for device %s", properties, uuid)
success = await self._client().send_command(
uuid, DeviceCommand(properties=properties)
)
if not success:
raise CommandException("ONYX_ACTION_ERROR", uuid)
async def listen_events(self, force_update: bool = False):
async with ClientSession(
timeout=ClientTimeout(
total=None, connect=None, sock_connect=None, sock_read=None
)
) as session:
client = self._client(session)
async for device in client.events(force_update):
_LOGGER.debug("received device data for %s", device.identifier)
yield device
class CommandException(Exception):
def __init__(self, msg: str, uuid: str):
super().__init__(msg)
_LOGGER.error("command errored: %s for id %s", msg, uuid)
class UnknownStateException(Exception):
def __init__(self, msg):
super().__init__(msg)
_LOGGER.error("unknown state: %s", msg)
| true | true |
f7fcb4a60ad7af87b18cd37e5c81407d09f0dd52 | 405 | py | Python | BOT/chat_texto.py | sergiopiresbarra/BOT-CHAT | 4317b5ecc1e60dda28b9ef9803a0d217c6d7bc01 | [
"MIT"
] | null | null | null | BOT/chat_texto.py | sergiopiresbarra/BOT-CHAT | 4317b5ecc1e60dda28b9ef9803a0d217c6d7bc01 | [
"MIT"
] | null | null | null | BOT/chat_texto.py | sergiopiresbarra/BOT-CHAT | 4317b5ecc1e60dda28b9ef9803a0d217c6d7bc01 | [
"MIT"
] | null | null | null | from chatterbot.trainers import ListTrainer
from chatterbot import ChatBot
bot = ChatBot('Chatbot')
conversa = ['oi','olá']
trainer = ListTrainer(bot)
trainer.train(conversa)
while True:
pergunta = input("Usuário: ")
resposta = bot.get_response(pergunta)
if float(resposta.confidence) > 0.5:
print('Bot: ', resposta)
else:
print('Bot:não tenho certeza..:',resposta)
| 19.285714 | 50 | 0.679012 | from chatterbot.trainers import ListTrainer
from chatterbot import ChatBot
bot = ChatBot('Chatbot')
conversa = ['oi','olá']
trainer = ListTrainer(bot)
trainer.train(conversa)
while True:
pergunta = input("Usuário: ")
resposta = bot.get_response(pergunta)
if float(resposta.confidence) > 0.5:
print('Bot: ', resposta)
else:
print('Bot:não tenho certeza..:',resposta)
| true | true |
f7fcb6175c11dd31804df927a350775884ffb0e7 | 1,688 | py | Python | tests/indexes/models.py | danhayden/django | 49b470b9187b6be60e573fed08c8f4a87f133750 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | tests/indexes/models.py | danhayden/django | 49b470b9187b6be60e573fed08c8f4a87f133750 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | tests/indexes/models.py | danhayden/django | 49b470b9187b6be60e573fed08c8f4a87f133750 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | from django.db import models
class CurrentTranslation(models.ForeignObject):
"""
Creates virtual relation to the translation with model cache enabled.
"""
# Avoid validation
requires_unique_target = False
def __init__(self, to, on_delete, from_fields, to_fields, **kwargs):
# Disable reverse relation
kwargs["related_name"] = "+"
# Set unique to enable model cache.
kwargs["unique"] = True
super().__init__(to, on_delete, from_fields, to_fields, **kwargs)
class ArticleTranslation(models.Model):
article = models.ForeignKey("indexes.Article", models.CASCADE)
article_no_constraint = models.ForeignKey(
"indexes.Article", models.CASCADE, db_constraint=False, related_name="+"
)
language = models.CharField(max_length=10, unique=True)
content = models.TextField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
published = models.BooleanField(default=False)
# Add virtual relation to the ArticleTranslation model.
translation = CurrentTranslation(
ArticleTranslation, models.CASCADE, ["id"], ["article"]
)
class Meta:
index_together = [
["headline", "pub_date"],
]
class IndexedArticle(models.Model):
headline = models.CharField(max_length=100, db_index=True)
body = models.TextField(db_index=True)
slug = models.CharField(max_length=40, unique=True)
class Meta:
required_db_features = {"supports_index_on_text_field"}
class IndexedArticle2(models.Model):
headline = models.CharField(max_length=100)
body = models.TextField()
| 29.103448 | 80 | 0.692536 | from django.db import models
class CurrentTranslation(models.ForeignObject):
requires_unique_target = False
def __init__(self, to, on_delete, from_fields, to_fields, **kwargs):
kwargs["related_name"] = "+"
kwargs["unique"] = True
super().__init__(to, on_delete, from_fields, to_fields, **kwargs)
class ArticleTranslation(models.Model):
article = models.ForeignKey("indexes.Article", models.CASCADE)
article_no_constraint = models.ForeignKey(
"indexes.Article", models.CASCADE, db_constraint=False, related_name="+"
)
language = models.CharField(max_length=10, unique=True)
content = models.TextField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
published = models.BooleanField(default=False)
translation = CurrentTranslation(
ArticleTranslation, models.CASCADE, ["id"], ["article"]
)
class Meta:
index_together = [
["headline", "pub_date"],
]
class IndexedArticle(models.Model):
headline = models.CharField(max_length=100, db_index=True)
body = models.TextField(db_index=True)
slug = models.CharField(max_length=40, unique=True)
class Meta:
required_db_features = {"supports_index_on_text_field"}
class IndexedArticle2(models.Model):
headline = models.CharField(max_length=100)
body = models.TextField()
| true | true |
f7fcb65ebca7450a426e3be1e7b65cc1fc0b46f8 | 30,716 | py | Python | sdk/python/pulumi_gcp/containeranalysis/occurence.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/containeranalysis/occurence.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/containeranalysis/occurence.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['OccurenceArgs', 'Occurence']
@pulumi.input_type
class OccurenceArgs:
def __init__(__self__, *,
attestation: pulumi.Input['OccurenceAttestationArgs'],
note_name: pulumi.Input[str],
resource_uri: pulumi.Input[str],
project: Optional[pulumi.Input[str]] = None,
remediation: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Occurence resource.
:param pulumi.Input['OccurenceAttestationArgs'] attestation: Occurrence that represents a single "attestation". The authenticity
of an attestation can be verified using the attached signature.
If the verifier trusts the public key of the signer, then verifying
the signature is sufficient to establish trust. In this circumstance,
the authority to which this attestation is attached is primarily
useful for lookup (how to find this attestation if you already
know the authority and artifact to be verified) and intent (for
which authority this attestation was intended to sign.
Structure is documented below.
:param pulumi.Input[str] note_name: The analysis note associated with this occurrence, in the form of
projects/[PROJECT]/notes/[NOTE_ID]. This field can be used as a
filter in list requests.
:param pulumi.Input[str] resource_uri: Required. Immutable. A URI that represents the resource for which
the occurrence applies. For example,
https://gcr.io/project/image@sha256:123abc for a Docker image.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] remediation: A description of actions that can be taken to remedy the note.
"""
pulumi.set(__self__, "attestation", attestation)
pulumi.set(__self__, "note_name", note_name)
pulumi.set(__self__, "resource_uri", resource_uri)
if project is not None:
pulumi.set(__self__, "project", project)
if remediation is not None:
pulumi.set(__self__, "remediation", remediation)
@property
@pulumi.getter
def attestation(self) -> pulumi.Input['OccurenceAttestationArgs']:
"""
Occurrence that represents a single "attestation". The authenticity
of an attestation can be verified using the attached signature.
If the verifier trusts the public key of the signer, then verifying
the signature is sufficient to establish trust. In this circumstance,
the authority to which this attestation is attached is primarily
useful for lookup (how to find this attestation if you already
know the authority and artifact to be verified) and intent (for
which authority this attestation was intended to sign.
Structure is documented below.
"""
return pulumi.get(self, "attestation")
@attestation.setter
def attestation(self, value: pulumi.Input['OccurenceAttestationArgs']):
pulumi.set(self, "attestation", value)
@property
@pulumi.getter(name="noteName")
def note_name(self) -> pulumi.Input[str]:
"""
The analysis note associated with this occurrence, in the form of
projects/[PROJECT]/notes/[NOTE_ID]. This field can be used as a
filter in list requests.
"""
return pulumi.get(self, "note_name")
@note_name.setter
def note_name(self, value: pulumi.Input[str]):
pulumi.set(self, "note_name", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> pulumi.Input[str]:
"""
Required. Immutable. A URI that represents the resource for which
the occurrence applies. For example,
https://gcr.io/project/image@sha256:123abc for a Docker image.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def remediation(self) -> Optional[pulumi.Input[str]]:
"""
A description of actions that can be taken to remedy the note.
"""
return pulumi.get(self, "remediation")
@remediation.setter
def remediation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remediation", value)
@pulumi.input_type
class _OccurenceState:
def __init__(__self__, *,
attestation: Optional[pulumi.Input['OccurenceAttestationArgs']] = None,
create_time: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
note_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remediation: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Occurence resources.
:param pulumi.Input['OccurenceAttestationArgs'] attestation: Occurrence that represents a single "attestation". The authenticity
of an attestation can be verified using the attached signature.
If the verifier trusts the public key of the signer, then verifying
the signature is sufficient to establish trust. In this circumstance,
the authority to which this attestation is attached is primarily
useful for lookup (how to find this attestation if you already
know the authority and artifact to be verified) and intent (for
which authority this attestation was intended to sign.
Structure is documented below.
:param pulumi.Input[str] create_time: The time when the repository was created.
:param pulumi.Input[str] kind: The note kind which explicitly denotes which of the occurrence details are specified. This field can be used as a filter
in list requests.
:param pulumi.Input[str] name: The name of the occurrence.
:param pulumi.Input[str] note_name: The analysis note associated with this occurrence, in the form of
projects/[PROJECT]/notes/[NOTE_ID]. This field can be used as a
filter in list requests.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] remediation: A description of actions that can be taken to remedy the note.
:param pulumi.Input[str] resource_uri: Required. Immutable. A URI that represents the resource for which
the occurrence applies. For example,
https://gcr.io/project/image@sha256:123abc for a Docker image.
:param pulumi.Input[str] update_time: The time when the repository was last updated.
"""
if attestation is not None:
pulumi.set(__self__, "attestation", attestation)
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if note_name is not None:
pulumi.set(__self__, "note_name", note_name)
if project is not None:
pulumi.set(__self__, "project", project)
if remediation is not None:
pulumi.set(__self__, "remediation", remediation)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
if update_time is not None:
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter
def attestation(self) -> Optional[pulumi.Input['OccurenceAttestationArgs']]:
"""
Occurrence that represents a single "attestation". The authenticity
of an attestation can be verified using the attached signature.
If the verifier trusts the public key of the signer, then verifying
the signature is sufficient to establish trust. In this circumstance,
the authority to which this attestation is attached is primarily
useful for lookup (how to find this attestation if you already
know the authority and artifact to be verified) and intent (for
which authority this attestation was intended to sign.
Structure is documented below.
"""
return pulumi.get(self, "attestation")
@attestation.setter
def attestation(self, value: Optional[pulumi.Input['OccurenceAttestationArgs']]):
pulumi.set(self, "attestation", value)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the repository was created.
"""
return pulumi.get(self, "create_time")
@create_time.setter
def create_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_time", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
The note kind which explicitly denotes which of the occurrence details are specified. This field can be used as a filter
in list requests.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the occurrence.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="noteName")
def note_name(self) -> Optional[pulumi.Input[str]]:
"""
The analysis note associated with this occurrence, in the form of
projects/[PROJECT]/notes/[NOTE_ID]. This field can be used as a
filter in list requests.
"""
return pulumi.get(self, "note_name")
@note_name.setter
def note_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "note_name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def remediation(self) -> Optional[pulumi.Input[str]]:
"""
A description of actions that can be taken to remedy the note.
"""
return pulumi.get(self, "remediation")
@remediation.setter
def remediation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remediation", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[pulumi.Input[str]]:
"""
Required. Immutable. A URI that represents the resource for which
the occurrence applies. For example,
https://gcr.io/project/image@sha256:123abc for a Docker image.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the repository was last updated.
"""
return pulumi.get(self, "update_time")
@update_time.setter
def update_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_time", value)
class Occurence(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attestation: Optional[pulumi.Input[pulumi.InputType['OccurenceAttestationArgs']]] = None,
note_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remediation: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An occurrence is an instance of a Note, or type of analysis that
can be done for a resource.
To get more information about Occurrence, see:
* [API documentation](https://cloud.google.com/container-analysis/api/reference/rest/)
* How-to Guides
* [Official Documentation](https://cloud.google.com/container-analysis/)
## Example Usage
### Container Analysis Occurrence Kms
```python
import pulumi
import base64
import pulumi_gcp as gcp
note = gcp.containeranalysis.Note("note", attestation_authority=gcp.containeranalysis.NoteAttestationAuthorityArgs(
hint=gcp.containeranalysis.NoteAttestationAuthorityHintArgs(
human_readable_name="Attestor Note",
),
))
keyring = gcp.kms.get_kms_key_ring(name="my-key-ring",
location="global")
crypto_key = gcp.kms.get_kms_crypto_key(name="my-key",
key_ring=keyring.self_link)
version = gcp.kms.get_kms_crypto_key_version(crypto_key=crypto_key.self_link)
attestor = gcp.binaryauthorization.Attestor("attestor", attestation_authority_note=gcp.binaryauthorization.AttestorAttestationAuthorityNoteArgs(
note_reference=note.name,
public_keys=[gcp.binaryauthorization.AttestorAttestationAuthorityNotePublicKeyArgs(
id=version.id,
pkix_public_key=gcp.binaryauthorization.AttestorAttestationAuthorityNotePublicKeyPkixPublicKeyArgs(
public_key_pem=version.public_keys[0].pem,
signature_algorithm=version.public_keys[0].algorithm,
),
)],
))
occurrence = gcp.containeranalysis.Occurence("occurrence",
resource_uri="gcr.io/my-project/my-image",
note_name=note.id,
attestation=gcp.containeranalysis.OccurenceAttestationArgs(
serialized_payload=(lambda path: base64.b64encode(open(path).read().encode()).decode())("path/to/my/payload.json"),
signatures=[gcp.containeranalysis.OccurenceAttestationSignatureArgs(
public_key_id=version.id,
serialized_payload=(lambda path: base64.b64encode(open(path).read().encode()).decode())("path/to/my/payload.json.sig"),
)],
))
```
## Import
Occurrence can be imported using any of these accepted formats
```sh
$ pulumi import gcp:containeranalysis/occurence:Occurence default projects/{{project}}/occurrences/{{name}}
```
```sh
$ pulumi import gcp:containeranalysis/occurence:Occurence default {{project}}/{{name}}
```
```sh
$ pulumi import gcp:containeranalysis/occurence:Occurence default {{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['OccurenceAttestationArgs']] attestation: Occurrence that represents a single "attestation". The authenticity
of an attestation can be verified using the attached signature.
If the verifier trusts the public key of the signer, then verifying
the signature is sufficient to establish trust. In this circumstance,
the authority to which this attestation is attached is primarily
useful for lookup (how to find this attestation if you already
know the authority and artifact to be verified) and intent (for
which authority this attestation was intended to sign.
Structure is documented below.
:param pulumi.Input[str] note_name: The analysis note associated with this occurrence, in the form of
projects/[PROJECT]/notes/[NOTE_ID]. This field can be used as a
filter in list requests.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] remediation: A description of actions that can be taken to remedy the note.
:param pulumi.Input[str] resource_uri: Required. Immutable. A URI that represents the resource for which
the occurrence applies. For example,
https://gcr.io/project/image@sha256:123abc for a Docker image.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OccurenceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An occurrence is an instance of a Note, or type of analysis that
can be done for a resource.
To get more information about Occurrence, see:
* [API documentation](https://cloud.google.com/container-analysis/api/reference/rest/)
* How-to Guides
* [Official Documentation](https://cloud.google.com/container-analysis/)
## Example Usage
### Container Analysis Occurrence Kms
```python
import pulumi
import base64
import pulumi_gcp as gcp
note = gcp.containeranalysis.Note("note", attestation_authority=gcp.containeranalysis.NoteAttestationAuthorityArgs(
hint=gcp.containeranalysis.NoteAttestationAuthorityHintArgs(
human_readable_name="Attestor Note",
),
))
keyring = gcp.kms.get_kms_key_ring(name="my-key-ring",
location="global")
crypto_key = gcp.kms.get_kms_crypto_key(name="my-key",
key_ring=keyring.self_link)
version = gcp.kms.get_kms_crypto_key_version(crypto_key=crypto_key.self_link)
attestor = gcp.binaryauthorization.Attestor("attestor", attestation_authority_note=gcp.binaryauthorization.AttestorAttestationAuthorityNoteArgs(
note_reference=note.name,
public_keys=[gcp.binaryauthorization.AttestorAttestationAuthorityNotePublicKeyArgs(
id=version.id,
pkix_public_key=gcp.binaryauthorization.AttestorAttestationAuthorityNotePublicKeyPkixPublicKeyArgs(
public_key_pem=version.public_keys[0].pem,
signature_algorithm=version.public_keys[0].algorithm,
),
)],
))
occurrence = gcp.containeranalysis.Occurence("occurrence",
resource_uri="gcr.io/my-project/my-image",
note_name=note.id,
attestation=gcp.containeranalysis.OccurenceAttestationArgs(
serialized_payload=(lambda path: base64.b64encode(open(path).read().encode()).decode())("path/to/my/payload.json"),
signatures=[gcp.containeranalysis.OccurenceAttestationSignatureArgs(
public_key_id=version.id,
serialized_payload=(lambda path: base64.b64encode(open(path).read().encode()).decode())("path/to/my/payload.json.sig"),
)],
))
```
## Import
Occurrence can be imported using any of these accepted formats
```sh
$ pulumi import gcp:containeranalysis/occurence:Occurence default projects/{{project}}/occurrences/{{name}}
```
```sh
$ pulumi import gcp:containeranalysis/occurence:Occurence default {{project}}/{{name}}
```
```sh
$ pulumi import gcp:containeranalysis/occurence:Occurence default {{name}}
```
:param str resource_name: The name of the resource.
:param OccurenceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OccurenceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attestation: Optional[pulumi.Input[pulumi.InputType['OccurenceAttestationArgs']]] = None,
note_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remediation: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OccurenceArgs.__new__(OccurenceArgs)
if attestation is None and not opts.urn:
raise TypeError("Missing required property 'attestation'")
__props__.__dict__["attestation"] = attestation
if note_name is None and not opts.urn:
raise TypeError("Missing required property 'note_name'")
__props__.__dict__["note_name"] = note_name
__props__.__dict__["project"] = project
__props__.__dict__["remediation"] = remediation
if resource_uri is None and not opts.urn:
raise TypeError("Missing required property 'resource_uri'")
__props__.__dict__["resource_uri"] = resource_uri
__props__.__dict__["create_time"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["update_time"] = None
super(Occurence, __self__).__init__(
'gcp:containeranalysis/occurence:Occurence',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
attestation: Optional[pulumi.Input[pulumi.InputType['OccurenceAttestationArgs']]] = None,
create_time: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
note_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remediation: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None) -> 'Occurence':
"""
Get an existing Occurence resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['OccurenceAttestationArgs']] attestation: Occurrence that represents a single "attestation". The authenticity
of an attestation can be verified using the attached signature.
If the verifier trusts the public key of the signer, then verifying
the signature is sufficient to establish trust. In this circumstance,
the authority to which this attestation is attached is primarily
useful for lookup (how to find this attestation if you already
know the authority and artifact to be verified) and intent (for
which authority this attestation was intended to sign.
Structure is documented below.
:param pulumi.Input[str] create_time: The time when the repository was created.
:param pulumi.Input[str] kind: The note kind which explicitly denotes which of the occurrence details are specified. This field can be used as a filter
in list requests.
:param pulumi.Input[str] name: The name of the occurrence.
:param pulumi.Input[str] note_name: The analysis note associated with this occurrence, in the form of
projects/[PROJECT]/notes/[NOTE_ID]. This field can be used as a
filter in list requests.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] remediation: A description of actions that can be taken to remedy the note.
:param pulumi.Input[str] resource_uri: Required. Immutable. A URI that represents the resource for which
the occurrence applies. For example,
https://gcr.io/project/image@sha256:123abc for a Docker image.
:param pulumi.Input[str] update_time: The time when the repository was last updated.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _OccurenceState.__new__(_OccurenceState)
__props__.__dict__["attestation"] = attestation
__props__.__dict__["create_time"] = create_time
__props__.__dict__["kind"] = kind
__props__.__dict__["name"] = name
__props__.__dict__["note_name"] = note_name
__props__.__dict__["project"] = project
__props__.__dict__["remediation"] = remediation
__props__.__dict__["resource_uri"] = resource_uri
__props__.__dict__["update_time"] = update_time
return Occurence(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def attestation(self) -> pulumi.Output['outputs.OccurenceAttestation']:
"""
Occurrence that represents a single "attestation". The authenticity
of an attestation can be verified using the attached signature.
If the verifier trusts the public key of the signer, then verifying
the signature is sufficient to establish trust. In this circumstance,
the authority to which this attestation is attached is primarily
useful for lookup (how to find this attestation if you already
know the authority and artifact to be verified) and intent (for
which authority this attestation was intended to sign.
Structure is documented below.
"""
return pulumi.get(self, "attestation")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
The time when the repository was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The note kind which explicitly denotes which of the occurrence details are specified. This field can be used as a filter
in list requests.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the occurrence.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="noteName")
def note_name(self) -> pulumi.Output[str]:
"""
The analysis note associated with this occurrence, in the form of
projects/[PROJECT]/notes/[NOTE_ID]. This field can be used as a
filter in list requests.
"""
return pulumi.get(self, "note_name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def remediation(self) -> pulumi.Output[Optional[str]]:
"""
A description of actions that can be taken to remedy the note.
"""
return pulumi.get(self, "remediation")
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> pulumi.Output[str]:
"""
Required. Immutable. A URI that represents the resource for which
the occurrence applies. For example,
https://gcr.io/project/image@sha256:123abc for a Docker image.
"""
return pulumi.get(self, "resource_uri")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> pulumi.Output[str]:
"""
The time when the repository was last updated.
"""
return pulumi.get(self, "update_time")
| 45.5727 | 159 | 0.644811 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['OccurenceArgs', 'Occurence']
@pulumi.input_type
class OccurenceArgs:
def __init__(__self__, *,
attestation: pulumi.Input['OccurenceAttestationArgs'],
note_name: pulumi.Input[str],
resource_uri: pulumi.Input[str],
project: Optional[pulumi.Input[str]] = None,
remediation: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "attestation", attestation)
pulumi.set(__self__, "note_name", note_name)
pulumi.set(__self__, "resource_uri", resource_uri)
if project is not None:
pulumi.set(__self__, "project", project)
if remediation is not None:
pulumi.set(__self__, "remediation", remediation)
@property
@pulumi.getter
def attestation(self) -> pulumi.Input['OccurenceAttestationArgs']:
return pulumi.get(self, "attestation")
@attestation.setter
def attestation(self, value: pulumi.Input['OccurenceAttestationArgs']):
pulumi.set(self, "attestation", value)
@property
@pulumi.getter(name="noteName")
def note_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "note_name")
@note_name.setter
def note_name(self, value: pulumi.Input[str]):
pulumi.set(self, "note_name", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def remediation(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "remediation")
@remediation.setter
def remediation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remediation", value)
@pulumi.input_type
class _OccurenceState:
def __init__(__self__, *,
attestation: Optional[pulumi.Input['OccurenceAttestationArgs']] = None,
create_time: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
note_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remediation: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None):
if attestation is not None:
pulumi.set(__self__, "attestation", attestation)
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if note_name is not None:
pulumi.set(__self__, "note_name", note_name)
if project is not None:
pulumi.set(__self__, "project", project)
if remediation is not None:
pulumi.set(__self__, "remediation", remediation)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
if update_time is not None:
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter
def attestation(self) -> Optional[pulumi.Input['OccurenceAttestationArgs']]:
return pulumi.get(self, "attestation")
@attestation.setter
def attestation(self, value: Optional[pulumi.Input['OccurenceAttestationArgs']]):
pulumi.set(self, "attestation", value)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "create_time")
@create_time.setter
def create_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_time", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="noteName")
def note_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "note_name")
@note_name.setter
def note_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "note_name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def remediation(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "remediation")
@remediation.setter
def remediation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remediation", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "update_time")
@update_time.setter
def update_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_time", value)
class Occurence(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attestation: Optional[pulumi.Input[pulumi.InputType['OccurenceAttestationArgs']]] = None,
note_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remediation: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: OccurenceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OccurenceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attestation: Optional[pulumi.Input[pulumi.InputType['OccurenceAttestationArgs']]] = None,
note_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remediation: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OccurenceArgs.__new__(OccurenceArgs)
if attestation is None and not opts.urn:
raise TypeError("Missing required property 'attestation'")
__props__.__dict__["attestation"] = attestation
if note_name is None and not opts.urn:
raise TypeError("Missing required property 'note_name'")
__props__.__dict__["note_name"] = note_name
__props__.__dict__["project"] = project
__props__.__dict__["remediation"] = remediation
if resource_uri is None and not opts.urn:
raise TypeError("Missing required property 'resource_uri'")
__props__.__dict__["resource_uri"] = resource_uri
__props__.__dict__["create_time"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["update_time"] = None
super(Occurence, __self__).__init__(
'gcp:containeranalysis/occurence:Occurence',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
attestation: Optional[pulumi.Input[pulumi.InputType['OccurenceAttestationArgs']]] = None,
create_time: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
note_name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
remediation: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None) -> 'Occurence':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _OccurenceState.__new__(_OccurenceState)
__props__.__dict__["attestation"] = attestation
__props__.__dict__["create_time"] = create_time
__props__.__dict__["kind"] = kind
__props__.__dict__["name"] = name
__props__.__dict__["note_name"] = note_name
__props__.__dict__["project"] = project
__props__.__dict__["remediation"] = remediation
__props__.__dict__["resource_uri"] = resource_uri
__props__.__dict__["update_time"] = update_time
return Occurence(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def attestation(self) -> pulumi.Output['outputs.OccurenceAttestation']:
return pulumi.get(self, "attestation")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="noteName")
def note_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "note_name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter
def remediation(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "remediation")
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> pulumi.Output[str]:
return pulumi.get(self, "resource_uri")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "update_time")
| true | true |
f7fcb681d045a3bc90693ef6be1a366a766eb756 | 4,721 | py | Python | L1Trigger/ME0Trigger/test/runME0_Reco_L1.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | L1Trigger/ME0Trigger/test/runME0_Reco_L1.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 7 | 2016-07-17T02:34:54.000Z | 2019-08-13T07:58:37.000Z | L1Trigger/ME0Trigger/test/runME0_Reco_L1.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step2 --conditions auto:phase2_realistic -s DIGI:pdigi_valid,L1,L1TrackTrigger,DIGI2RAW,HLT:@fake2 --datatier GEN-SIM-DIGI-RAW -n 10 --geometry Extended2023D22 --era Phase2 --eventcontent FEVTDEBUGHLT --filein file:step1.root --fileout file:step2.root
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2_cff import Phase2
process = cms.Process('L1',Phase2)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023D22Reco_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
#process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.me0RecoSequence = cms.Sequence(
process.me0RecHits * process.me0Segments
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("PoolSource",
dropDescendantsOfDroppedBranches = cms.untracked.bool(False),
fileNames = cms.untracked.vstring('file:step2.root'),
inputCommands = cms.untracked.vstring(
'keep *',
'drop *_genParticles_*_*',
'drop *_genParticlesForJets_*_*',
'drop *_kt4GenJets_*_*',
'drop *_kt6GenJets_*_*',
'drop *_iterativeCone5GenJets_*_*',
'drop *_ak4GenJets_*_*',
'drop *_ak7GenJets_*_*',
'drop *_ak8GenJets_*_*',
'drop *_ak4GenJetsNoNu_*_*',
'drop *_ak8GenJetsNoNu_*_*',
'drop *_genCandidatesForMET_*_*',
'drop *_genParticlesForMETAllVisible_*_*',
'drop *_genMetCalo_*_*',
'drop *_genMetCaloAndNonPrompt_*_*',
'drop *_genMetTrue_*_*',
'drop *_genMetIC5GenJs_*_*'
),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step2 nevts:10'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.FEVTDEBUGHLToutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM-DIGI-RAW-RECO'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:ME0step3.root'),
outputCommands = process.FEVTDEBUGHLTEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic', '')
process.load('L1Trigger/ME0Trigger/me0TriggerPseudoDigis_cfi')
process.me0TriggerPseudoDigis.info = 3
process.me0TriggerPseudoDigis.ME0SegmentProducer = cms.InputTag("me0Segments")
# Path and EndPath definitions
#process.ME0_step = cms.Path(process.me0RecoSequence_all)
process.ME0Reco_step = cms.Path(process.me0RecoSequence)
process.ME0L1_step = cms.Path(process.me0TriggerPseudoDigis)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGHLToutput_step = cms.EndPath(process.FEVTDEBUGHLToutput)
# Schedule definition
process.schedule = cms.Schedule(process.ME0Reco_step, process.ME0L1_step)
process.schedule.extend([process.endjob_step,process.FEVTDEBUGHLToutput_step])
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from HLTrigger.Configuration.customizeHLTforMC
#from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC
#call to customisation function customizeHLTforMC imported from HLTrigger.Configuration.customizeHLTforMC
#process = customizeHLTforMC(process)
# End of customisation functions
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| 37.768 | 280 | 0.775259 |
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2_cff import Phase2
process = cms.Process('L1',Phase2)
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023D22Reco_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.me0RecoSequence = cms.Sequence(
process.me0RecHits * process.me0Segments
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
process.source = cms.Source("PoolSource",
dropDescendantsOfDroppedBranches = cms.untracked.bool(False),
fileNames = cms.untracked.vstring('file:step2.root'),
inputCommands = cms.untracked.vstring(
'keep *',
'drop *_genParticles_*_*',
'drop *_genParticlesForJets_*_*',
'drop *_kt4GenJets_*_*',
'drop *_kt6GenJets_*_*',
'drop *_iterativeCone5GenJets_*_*',
'drop *_ak4GenJets_*_*',
'drop *_ak7GenJets_*_*',
'drop *_ak8GenJets_*_*',
'drop *_ak4GenJetsNoNu_*_*',
'drop *_ak8GenJetsNoNu_*_*',
'drop *_genCandidatesForMET_*_*',
'drop *_genParticlesForMETAllVisible_*_*',
'drop *_genMetCalo_*_*',
'drop *_genMetCaloAndNonPrompt_*_*',
'drop *_genMetTrue_*_*',
'drop *_genMetIC5GenJs_*_*'
),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step2 nevts:10'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
process.FEVTDEBUGHLToutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM-DIGI-RAW-RECO'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:ME0step3.root'),
outputCommands = process.FEVTDEBUGHLTEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic', '')
process.load('L1Trigger/ME0Trigger/me0TriggerPseudoDigis_cfi')
process.me0TriggerPseudoDigis.info = 3
process.me0TriggerPseudoDigis.ME0SegmentProducer = cms.InputTag("me0Segments")
process.ME0Reco_step = cms.Path(process.me0RecoSequence)
process.ME0L1_step = cms.Path(process.me0TriggerPseudoDigis)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGHLToutput_step = cms.EndPath(process.FEVTDEBUGHLToutput)
process.schedule = cms.Schedule(process.ME0Reco_step, process.ME0L1_step)
process.schedule.extend([process.endjob_step,process.FEVTDEBUGHLToutput_step])
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
| true | true |
f7fcb6a5b207b440e8b51f2a3b3be559b7c53b71 | 858 | py | Python | Python/Libraries&Modules/sendEmail.py | vdrey/Toolbox | d268ad08c50d1dfaa9589a8f6269deead60a7f80 | [
"MIT"
] | null | null | null | Python/Libraries&Modules/sendEmail.py | vdrey/Toolbox | d268ad08c50d1dfaa9589a8f6269deead60a7f80 | [
"MIT"
] | 4 | 2015-02-16T00:36:06.000Z | 2015-02-16T23:52:54.000Z | Python/Libraries&Modules/sendEmail.py | vdrey/Toolbox | d268ad08c50d1dfaa9589a8f6269deead60a7f80 | [
"MIT"
] | null | null | null | # This uses Email Yak to send an email
import requests
import json
import os
def sendEmail(API_KEY=None, To=None, From=None, CC=None, BCC=None, Subject=None, Message=None):
# Still need to add all Email Yak fields
url = 'https://api.emailyak.com/v1/' + API_KEY + '/json/send/email/'
headers = {'Content-Type' : 'application/json'} # This is still needed.
#Possibly need to account for http vs email headers
sendFrom = "1027@ohos.simpleyak.com"
content = {"FromAddress": sendFrom, "ToAddress": email,
"Subject": "1027 Patricians Lane IP Address Change",
"TextBody": message} # Make this so that it is standardized
JSONcontent = json.dumps(content)
sendMail = requests.post(url, data=JSONcontent, headers=headers)
print(sendMail.json())
| 42.9 | 103 | 0.63986 |
import requests
import json
import os
def sendEmail(API_KEY=None, To=None, From=None, CC=None, BCC=None, Subject=None, Message=None):
url = 'https://api.emailyak.com/v1/' + API_KEY + '/json/send/email/'
headers = {'Content-Type' : 'application/json'}
sendFrom = "1027@ohos.simpleyak.com"
content = {"FromAddress": sendFrom, "ToAddress": email,
"Subject": "1027 Patricians Lane IP Address Change",
"TextBody": message}
JSONcontent = json.dumps(content)
sendMail = requests.post(url, data=JSONcontent, headers=headers)
print(sendMail.json())
| true | true |
f7fcb6da50dd252e53759b1a00067d50fb708c28 | 3,782 | py | Python | roles/openshift_preflight/base/library/aos_version.py | ramkrsna/openshift-ansible | fc96d8d22f6c277b599e6e2fa4e9cc06814a9460 | [
"Apache-2.0"
] | 2 | 2015-02-04T07:24:39.000Z | 2015-05-03T10:27:56.000Z | roles/openshift_preflight/base/library/aos_version.py | ramkrsna/openshift-ansible | fc96d8d22f6c277b599e6e2fa4e9cc06814a9460 | [
"Apache-2.0"
] | null | null | null | roles/openshift_preflight/base/library/aos_version.py | ramkrsna/openshift-ansible | fc96d8d22f6c277b599e6e2fa4e9cc06814a9460 | [
"Apache-2.0"
] | 1 | 2019-10-28T15:03:29.000Z | 2019-10-28T15:03:29.000Z | #!/usr/bin/python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
An ansible module for determining if more than one minor version
of any atomic-openshift package is available, which would indicate
that multiple repos are enabled for different versions of the same
thing which may cause problems.
Also, determine if the version requested is available down to the
precision requested.
'''
# import os
# import sys
import yum # pylint: disable=import-error
from ansible.module_utils.basic import AnsibleModule
def main(): # pylint: disable=missing-docstring
module = AnsibleModule(
argument_spec=dict(
version=dict(required=True)
),
supports_check_mode=True
)
# NOTE(rhcarvalho): sosiouxme added _unmute, but I couldn't find a case yet
# for when it is actually necessary. Leaving it commented out for now,
# though this comment and the commented out code related to _unmute should
# be deleted later if not proven necessary.
# sys.stdout = os.devnull # mute yum so it doesn't break our output
# sys.stderr = os.devnull # mute yum so it doesn't break our output
# def _unmute(): # pylint: disable=missing-docstring
# sys.stdout = sys.__stdout__
def bail(error): # pylint: disable=missing-docstring
# _unmute()
module.fail_json(msg=error)
yb = yum.YumBase() # pylint: disable=invalid-name
# search for package versions available for aos pkgs
expected_pkgs = [
'atomic-openshift',
'atomic-openshift-master',
'atomic-openshift-node',
]
try:
pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
except yum.Errors.PackageSackError as e: # pylint: disable=invalid-name
# you only hit this if *none* of the packages are available
bail('Unable to find any atomic-openshift packages. \nCheck your subscription and repo settings. \n%s' % e)
# determine what level of precision we're expecting for the version
expected_version = module.params['version']
if expected_version.startswith('v'): # v3.3 => 3.3
expected_version = expected_version[1:]
num_dots = expected_version.count('.')
pkgs_by_name_version = {}
pkgs_precise_version_found = {}
for pkg in pkgs:
# get expected version precision
match_version = '.'.join(pkg.version.split('.')[:num_dots + 1])
if match_version == expected_version:
pkgs_precise_version_found[pkg.name] = True
# get x.y version precision
minor_version = '.'.join(pkg.version.split('.')[:2])
if pkg.name not in pkgs_by_name_version:
pkgs_by_name_version[pkg.name] = {}
pkgs_by_name_version[pkg.name][minor_version] = True
# see if any packages couldn't be found at requested version
# see if any packages are available in more than one minor version
not_found = []
multi_found = []
for name in expected_pkgs:
if name not in pkgs_precise_version_found:
not_found.append(name)
if name in pkgs_by_name_version and len(pkgs_by_name_version[name]) > 1:
multi_found.append(name)
if not_found:
msg = 'Not all of the required packages are available at requested version %s:\n' % expected_version
for name in not_found:
msg += ' %s\n' % name
bail(msg + 'Please check your subscriptions and enabled repositories.')
if multi_found:
msg = 'Multiple minor versions of these packages are available\n'
for name in multi_found:
msg += ' %s\n' % name
bail(msg + "There should only be one OpenShift version's repository enabled at a time.")
# _unmute()
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| 37.445545 | 115 | 0.674775 |
import yum
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
version=dict(required=True)
),
supports_check_mode=True
)
# for when it is actually necessary. Leaving it commented out for now,
# though this comment and the commented out code related to _unmute should
# be deleted later if not proven necessary.
# sys.stdout = os.devnull # mute yum so it doesn't break our output
=missing-docstring
# sys.stdout = sys.__stdout__
def bail(error): # pylint: disable=missing-docstring
# _unmute()
module.fail_json(msg=error)
yb = yum.YumBase() # pylint: disable=invalid-name
# search for package versions available for aos pkgs
expected_pkgs = [
'atomic-openshift',
'atomic-openshift-master',
'atomic-openshift-node',
]
try:
pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
except yum.Errors.PackageSackError as e: # pylint: disable=invalid-name
# you only hit this if *none* of the packages are available
bail('Unable to find any atomic-openshift packages. \nCheck your subscription and repo settings. \n%s' % e)
# determine what level of precision we're expecting for the version
expected_version = module.params['version']
if expected_version.startswith('v'):
expected_version = expected_version[1:]
num_dots = expected_version.count('.')
pkgs_by_name_version = {}
pkgs_precise_version_found = {}
for pkg in pkgs:
match_version = '.'.join(pkg.version.split('.')[:num_dots + 1])
if match_version == expected_version:
pkgs_precise_version_found[pkg.name] = True
minor_version = '.'.join(pkg.version.split('.')[:2])
if pkg.name not in pkgs_by_name_version:
pkgs_by_name_version[pkg.name] = {}
pkgs_by_name_version[pkg.name][minor_version] = True
# see if any packages are available in more than one minor version
not_found = []
multi_found = []
for name in expected_pkgs:
if name not in pkgs_precise_version_found:
not_found.append(name)
if name in pkgs_by_name_version and len(pkgs_by_name_version[name]) > 1:
multi_found.append(name)
if not_found:
msg = 'Not all of the required packages are available at requested version %s:\n' % expected_version
for name in not_found:
msg += ' %s\n' % name
bail(msg + 'Please check your subscriptions and enabled repositories.')
if multi_found:
msg = 'Multiple minor versions of these packages are available\n'
for name in multi_found:
msg += ' %s\n' % name
bail(msg + "There should only be one OpenShift version's repository enabled at a time.")
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| true | true |
f7fcb6dab1605834d4b5410331424eee473cbda6 | 24,965 | py | Python | nipype/pipeline/plugins/base.py | BenjaminMey/nipype | 954fd4d29249d1bd4ce7460e5585da04479049da | [
"Apache-2.0"
] | 1 | 2019-03-25T14:11:18.000Z | 2019-03-25T14:11:18.000Z | nipype/pipeline/plugins/base.py | josephmje/nipype | 0225a4555cb262626f2fcad2a5df54fe2722f2a8 | [
"Apache-2.0"
] | 1 | 2017-01-05T01:24:33.000Z | 2017-01-05T01:24:33.000Z | nipype/pipeline/plugins/base.py | wtriplett/nipype | 388f140fceaf55438a987e9cdfa2a8e995428afd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Common graph operations for execution
"""
import sys
from copy import deepcopy
from glob import glob
import os
import shutil
from time import sleep, time
from traceback import format_exception
import numpy as np
from ... import logging
from ...utils.misc import str2bool
from ..engine.utils import topological_sort, load_resultfile
from ..engine import MapNode
from .tools import report_crash, report_nodes_not_run, create_pyscript
logger = logging.getLogger("nipype.workflow")
class PluginBase(object):
"""
Base class for plugins
"""
def __init__(self, plugin_args=None):
if plugin_args is None:
plugin_args = {}
self.plugin_args = plugin_args
self._config = None
self._status_callback = plugin_args.get("status_callback")
def run(self, graph, config, updatehash=False):
"""
The core plugin member that should be implemented by
all plugins.
graph: a networkx, flattened :abbr:`DAG (Directed Acyclic Graph)`
to be executed
config: a nipype.config object
updatehash:
"""
raise NotImplementedError
class DistributedPluginBase(PluginBase):
"""
Execute workflow with a distribution engine
Relevant class attributes
-------------------------
procs: list (N) of underlying interface elements to be processed
proc_done: a boolean numpy array (N,) signifying whether a process has been
submitted for execution
proc_pending: a boolean numpy array (N,) signifying whether a
process is currently running.
depidx: a boolean matrix (NxN) storing the dependency structure accross
processes. Process dependencies are derived from each column.
Combinations of ``proc_done`` and ``proc_pending``
--------------------------------------------------
+------------+---------------+--------------------------------+
| proc_done | proc_pending | outcome |
+============+===============+================================+
| True | False | Process is finished |
+------------+---------------+--------------------------------+
| True | True | Process is currently being run |
+------------+---------------+--------------------------------+
| False | False | Process is queued |
+------------+---------------+--------------------------------+
| False | True | INVALID COMBINATION |
+------------+---------------+--------------------------------+
"""
def __init__(self, plugin_args=None):
"""
Initialize runtime attributes to none
"""
super(DistributedPluginBase, self).__init__(plugin_args=plugin_args)
self.procs = None
self.depidx = None
self.refidx = None
self.mapnodes = None
self.mapnodesubids = None
self.proc_done = None
self.proc_pending = None
self.pending_tasks = []
self.max_jobs = self.plugin_args.get("max_jobs", np.inf)
def _prerun_check(self, graph):
"""Stub method to validate/massage graph and nodes before running"""
def _postrun_check(self):
"""Stub method to close any open resources"""
def run(self, graph, config, updatehash=False):
"""
Executes a pre-defined pipeline using distributed approaches
"""
logger.info("Running in parallel.")
self._config = config
poll_sleep_secs = float(config["execution"]["poll_sleep_duration"])
self._prerun_check(graph)
# Generate appropriate structures for worker-manager model
self._generate_dependency_list(graph)
self.mapnodes = []
self.mapnodesubids = {}
# setup polling - TODO: change to threaded model
notrun = []
old_progress_stats = None
old_presub_stats = None
while not np.all(self.proc_done) or np.any(self.proc_pending):
loop_start = time()
# Check if a job is available (jobs with all dependencies run)
# https://github.com/nipy/nipype/pull/2200#discussion_r141605722
jobs_ready = np.nonzero(~self.proc_done & (self.depidx.sum(0) == 0))[1]
progress_stats = (
len(self.proc_done),
np.sum(self.proc_done ^ self.proc_pending),
np.sum(self.proc_done & self.proc_pending),
len(jobs_ready),
len(self.pending_tasks),
np.sum(~self.proc_done & ~self.proc_pending),
)
display_stats = progress_stats != old_progress_stats
if display_stats:
logger.debug(
"Progress: %d jobs, %d/%d/%d "
"(done/running/ready), %d/%d "
"(pending_tasks/waiting).",
*progress_stats
)
old_progress_stats = progress_stats
toappend = []
# trigger callbacks for any pending results
while self.pending_tasks:
taskid, jobid = self.pending_tasks.pop()
try:
result = self._get_result(taskid)
except Exception:
notrun.append(self._clean_queue(jobid, graph))
else:
if result:
if result["traceback"]:
notrun.append(
self._clean_queue(jobid, graph, result=result)
)
else:
self._task_finished_cb(jobid)
self._remove_node_dirs()
self._clear_task(taskid)
else:
assert self.proc_done[jobid] and self.proc_pending[jobid]
toappend.insert(0, (taskid, jobid))
if toappend:
self.pending_tasks.extend(toappend)
num_jobs = len(self.pending_tasks)
presub_stats = (num_jobs, np.sum(self.proc_done & self.proc_pending))
display_stats = display_stats or presub_stats != old_presub_stats
if display_stats:
logger.debug("Tasks currently running: %d. Pending: %d.", *presub_stats)
old_presub_stats = presub_stats
if num_jobs < self.max_jobs:
self._send_procs_to_workers(updatehash=updatehash, graph=graph)
elif display_stats:
logger.debug("Not submitting (max jobs reached)")
sleep_til = loop_start + poll_sleep_secs
sleep(max(0, sleep_til - time()))
self._remove_node_dirs()
report_nodes_not_run(notrun)
# close any open resources
self._postrun_check()
def _get_result(self, taskid):
raise NotImplementedError
def _submit_job(self, node, updatehash=False):
raise NotImplementedError
def _report_crash(self, node, result=None):
tb = None
if result is not None:
node._result = result["result"]
tb = result["traceback"]
node._traceback = tb
return report_crash(node, traceback=tb)
def _clear_task(self, taskid):
raise NotImplementedError
def _clean_queue(self, jobid, graph, result=None):
logger.debug("Clearing %d from queue", jobid)
if self._status_callback:
self._status_callback(self.procs[jobid], "exception")
if result is None:
result = {
"result": None,
"traceback": "\n".join(format_exception(*sys.exc_info())),
}
crashfile = self._report_crash(self.procs[jobid], result=result)
if str2bool(self._config["execution"]["stop_on_first_crash"]):
raise RuntimeError("".join(result["traceback"]))
if jobid in self.mapnodesubids:
# remove current jobid
self.proc_pending[jobid] = False
self.proc_done[jobid] = True
# remove parent mapnode
jobid = self.mapnodesubids[jobid]
self.proc_pending[jobid] = False
self.proc_done[jobid] = True
# remove dependencies from queue
return self._remove_node_deps(jobid, crashfile, graph)
def _submit_mapnode(self, jobid):
import scipy.sparse as ssp
if jobid in self.mapnodes:
return True
self.mapnodes.append(jobid)
mapnodesubids = self.procs[jobid].get_subnodes()
numnodes = len(mapnodesubids)
logger.debug("Adding %d jobs for mapnode %s", numnodes, self.procs[jobid])
for i in range(numnodes):
self.mapnodesubids[self.depidx.shape[0] + i] = jobid
self.procs.extend(mapnodesubids)
self.depidx = ssp.vstack(
(self.depidx, ssp.lil_matrix(np.zeros((numnodes, self.depidx.shape[1])))),
"lil",
)
self.depidx = ssp.hstack(
(self.depidx, ssp.lil_matrix(np.zeros((self.depidx.shape[0], numnodes)))),
"lil",
)
self.depidx[-numnodes:, jobid] = 1
self.proc_done = np.concatenate(
(self.proc_done, np.zeros(numnodes, dtype=bool))
)
self.proc_pending = np.concatenate(
(self.proc_pending, np.zeros(numnodes, dtype=bool))
)
return False
def _send_procs_to_workers(self, updatehash=False, graph=None):
"""
Sends jobs to workers
"""
while not np.all(self.proc_done):
num_jobs = len(self.pending_tasks)
if np.isinf(self.max_jobs):
slots = None
else:
slots = max(0, self.max_jobs - num_jobs)
logger.debug("Slots available: %s", slots)
if (num_jobs >= self.max_jobs) or (slots == 0):
break
# Check if a job is available (jobs with all dependencies run)
# https://github.com/nipy/nipype/pull/2200#discussion_r141605722
jobids = np.nonzero(~self.proc_done & (self.depidx.sum(0) == 0))[1]
if len(jobids) > 0:
# send all available jobs
logger.info(
"Pending[%d] Submitting[%d] jobs Slots[%s]",
num_jobs,
len(jobids[:slots]),
slots or "inf",
)
for jobid in jobids[:slots]:
if isinstance(self.procs[jobid], MapNode):
try:
num_subnodes = self.procs[jobid].num_subnodes()
except Exception:
self._clean_queue(jobid, graph)
self.proc_pending[jobid] = False
continue
if num_subnodes > 1:
submit = self._submit_mapnode(jobid)
if not submit:
continue
# change job status in appropriate queues
self.proc_done[jobid] = True
self.proc_pending[jobid] = True
# Send job to task manager and add to pending tasks
logger.info("Submitting: %s ID: %d", self.procs[jobid], jobid)
if self._status_callback:
self._status_callback(self.procs[jobid], "start")
if not self._local_hash_check(jobid, graph):
if self.procs[jobid].run_without_submitting:
logger.debug(
"Running node %s on master thread", self.procs[jobid]
)
try:
self.procs[jobid].run()
except Exception:
self._clean_queue(jobid, graph)
self._task_finished_cb(jobid)
self._remove_node_dirs()
else:
tid = self._submit_job(
deepcopy(self.procs[jobid]), updatehash=updatehash
)
if tid is None:
self.proc_done[jobid] = False
self.proc_pending[jobid] = False
else:
self.pending_tasks.insert(0, (tid, jobid))
logger.info(
"Finished submitting: %s ID: %d", self.procs[jobid], jobid
)
else:
break
def _local_hash_check(self, jobid, graph):
if not str2bool(self.procs[jobid].config["execution"]["local_hash_check"]):
return False
try:
cached, updated = self.procs[jobid].is_cached()
except Exception:
logger.warning(
"Error while checking node hash, forcing re-run. "
"Although this error may not prevent the workflow from running, "
"it could indicate a major problem. Please report a new issue "
"at https://github.com/nipy/nipype/issues adding the following "
"information:\n\n\tNode: %s\n\tInterface: %s.%s\n\tTraceback:\n%s",
self.procs[jobid],
self.procs[jobid].interface.__module__,
self.procs[jobid].interface.__class__.__name__,
"\n".join(format_exception(*sys.exc_info())),
)
return False
logger.debug(
'Checking hash "%s" locally: cached=%s, updated=%s.',
self.procs[jobid],
cached,
updated,
)
overwrite = self.procs[jobid].overwrite
always_run = self.procs[jobid].interface.always_run
if (
cached
and updated
and (overwrite is False or overwrite is None and not always_run)
):
logger.debug(
"Skipping cached node %s with ID %s.", self.procs[jobid], jobid
)
try:
self._task_finished_cb(jobid, cached=True)
self._remove_node_dirs()
except Exception:
logger.debug(
"Error skipping cached node %s (%s).\n\n%s",
self.procs[jobid],
jobid,
"\n".join(format_exception(*sys.exc_info())),
)
self._clean_queue(jobid, graph)
self.proc_pending[jobid] = False
return True
return False
def _task_finished_cb(self, jobid, cached=False):
""" Extract outputs and assign to inputs of dependent tasks
This is called when a job is completed.
"""
logger.info(
"[Job %d] %s (%s).",
jobid,
"Cached" if cached else "Completed",
self.procs[jobid],
)
if self._status_callback:
self._status_callback(self.procs[jobid], "end")
# Update job and worker queues
self.proc_pending[jobid] = False
# update the job dependency structure
rowview = self.depidx.getrowview(jobid)
rowview[rowview.nonzero()] = 0
if jobid not in self.mapnodesubids:
self.refidx[self.refidx[:, jobid].nonzero()[0], jobid] = 0
def _generate_dependency_list(self, graph):
""" Generates a dependency list for a list of graphs.
"""
import networkx as nx
self.procs, _ = topological_sort(graph)
try:
self.depidx = nx.to_scipy_sparse_matrix(
graph, nodelist=self.procs, format="lil"
)
except:
self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs)
self.refidx = deepcopy(self.depidx)
self.refidx.astype = np.int
self.proc_done = np.zeros(len(self.procs), dtype=bool)
self.proc_pending = np.zeros(len(self.procs), dtype=bool)
def _remove_node_deps(self, jobid, crashfile, graph):
import networkx as nx
try:
dfs_preorder = nx.dfs_preorder
except AttributeError:
dfs_preorder = nx.dfs_preorder_nodes
subnodes = [s for s in dfs_preorder(graph, self.procs[jobid])]
for node in subnodes:
idx = self.procs.index(node)
self.proc_done[idx] = True
self.proc_pending[idx] = False
return dict(node=self.procs[jobid], dependents=subnodes, crashfile=crashfile)
def _remove_node_dirs(self):
"""Removes directories whose outputs have already been used up
"""
if str2bool(self._config["execution"]["remove_node_directories"]):
indices = np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0]
for idx in indices:
if idx in self.mapnodesubids:
continue
if self.proc_done[idx] and (not self.proc_pending[idx]):
self.refidx[idx, idx] = -1
outdir = self.procs[idx].output_dir()
logger.info(
(
"[node dependencies finished] "
"removing node: %s from directory %s"
)
% (self.procs[idx]._id, outdir)
)
shutil.rmtree(outdir)
class SGELikeBatchManagerBase(DistributedPluginBase):
"""Execute workflow with SGE/OGE/PBS like batch system
"""
def __init__(self, template, plugin_args=None):
super(SGELikeBatchManagerBase, self).__init__(plugin_args=plugin_args)
self._template = template
self._qsub_args = None
if plugin_args:
if "template" in plugin_args:
self._template = plugin_args["template"]
if os.path.isfile(self._template):
with open(self._template) as tpl_file:
self._template = tpl_file.read()
if "qsub_args" in plugin_args:
self._qsub_args = plugin_args["qsub_args"]
self._pending = {}
def _is_pending(self, taskid):
"""Check if a task is pending in the batch system
"""
raise NotImplementedError
def _submit_batchtask(self, scriptfile, node):
"""Submit a task to the batch system
"""
raise NotImplementedError
def _get_result(self, taskid):
if taskid not in self._pending:
raise Exception("Task %d not found" % taskid)
if self._is_pending(taskid):
return None
node_dir = self._pending[taskid]
# MIT HACK
# on the pbs system at mit the parent node directory needs to be
# accessed before internal directories become available. there
# is a disconnect when the queueing engine knows a job is
# finished to when the directories become statable.
t = time()
timeout = float(self._config["execution"]["job_finished_timeout"])
timed_out = True
while (time() - t) < timeout:
try:
glob(os.path.join(node_dir, "result_*.pklz")).pop()
timed_out = False
break
except Exception as e:
logger.debug(e)
sleep(2)
if timed_out:
result_data = {"hostname": "unknown", "result": None, "traceback": None}
results_file = None
try:
error_message = (
"Job id ({0}) finished or terminated, but "
"results file does not exist after ({1}) "
"seconds. Batch dir contains crashdump file "
"if node raised an exception.\n"
"Node working directory: ({2}) ".format(taskid, timeout, node_dir)
)
raise IOError(error_message)
except IOError as e:
result_data["traceback"] = "\n".join(format_exception(*sys.exc_info()))
else:
results_file = glob(os.path.join(node_dir, "result_*.pklz"))[0]
result_data = load_resultfile(results_file)
result_out = dict(result=None, traceback=None)
if isinstance(result_data, dict):
result_out["result"] = result_data["result"]
result_out["traceback"] = result_data["traceback"]
result_out["hostname"] = result_data["hostname"]
if results_file:
crash_file = os.path.join(node_dir, "crashstore.pklz")
os.rename(results_file, crash_file)
else:
result_out["result"] = result_data
return result_out
def _submit_job(self, node, updatehash=False):
"""submit job and return taskid
"""
pyscript = create_pyscript(node, updatehash=updatehash)
batch_dir, name = os.path.split(pyscript)
name = ".".join(name.split(".")[:-1])
batchscript = "\n".join((self._template, "%s %s" % (sys.executable, pyscript)))
batchscriptfile = os.path.join(batch_dir, "batchscript_%s.sh" % name)
with open(batchscriptfile, "wt") as fp:
fp.writelines(batchscript)
return self._submit_batchtask(batchscriptfile, node)
def _clear_task(self, taskid):
del self._pending[taskid]
class GraphPluginBase(PluginBase):
"""Base class for plugins that distribute graphs to workflows
"""
def __init__(self, plugin_args=None):
if plugin_args and plugin_args.get("status_callback"):
logger.warning(
"status_callback not supported for Graph submission" " plugins"
)
super(GraphPluginBase, self).__init__(plugin_args=plugin_args)
def run(self, graph, config, updatehash=False):
import networkx as nx
pyfiles = []
dependencies = {}
self._config = config
nodes = list(nx.topological_sort(graph))
logger.debug("Creating executable python files for each node")
for idx, node in enumerate(nodes):
pyfiles.append(
create_pyscript(node, updatehash=updatehash, store_exception=False)
)
dependencies[idx] = [
nodes.index(prevnode) for prevnode in list(graph.predecessors(node))
]
self._submit_graph(pyfiles, dependencies, nodes)
def _get_args(self, node, keywords):
values = ()
for keyword in keywords:
value = getattr(self, "_" + keyword)
if keyword == "template" and os.path.isfile(value):
with open(value) as f:
value = f.read()
if (
hasattr(node, "plugin_args")
and isinstance(node.plugin_args, dict)
and keyword in node.plugin_args
):
if keyword == "template" and os.path.isfile(node.plugin_args[keyword]):
with open(node.plugin_args[keyword]) as f:
tmp_value = f.read()
else:
tmp_value = node.plugin_args[keyword]
if "overwrite" in node.plugin_args and node.plugin_args["overwrite"]:
value = tmp_value
else:
value += tmp_value
values += (value,)
return values
def _submit_graph(self, pyfiles, dependencies, nodes):
"""
pyfiles: list of files corresponding to a topological sort
dependencies: dictionary of dependencies based on the toplogical sort
"""
raise NotImplementedError
def _get_result(self, taskid):
if taskid not in self._pending:
raise Exception("Task %d not found" % taskid)
if self._is_pending(taskid):
return None
node_dir = self._pending[taskid]
glob(os.path.join(node_dir, "result_*.pklz")).pop()
results_file = glob(os.path.join(node_dir, "result_*.pklz"))[0]
result_data = load_resultfile(results_file)
result_out = dict(result=None, traceback=None)
if isinstance(result_data, dict):
result_out["result"] = result_data["result"]
result_out["traceback"] = result_data["traceback"]
result_out["hostname"] = result_data["hostname"]
if results_file:
crash_file = os.path.join(node_dir, "crashstore.pklz")
os.rename(results_file, crash_file)
else:
result_out["result"] = result_data
return result_out
| 38.886293 | 88 | 0.54264 |
import sys
from copy import deepcopy
from glob import glob
import os
import shutil
from time import sleep, time
from traceback import format_exception
import numpy as np
from ... import logging
from ...utils.misc import str2bool
from ..engine.utils import topological_sort, load_resultfile
from ..engine import MapNode
from .tools import report_crash, report_nodes_not_run, create_pyscript
logger = logging.getLogger("nipype.workflow")
class PluginBase(object):
def __init__(self, plugin_args=None):
if plugin_args is None:
plugin_args = {}
self.plugin_args = plugin_args
self._config = None
self._status_callback = plugin_args.get("status_callback")
def run(self, graph, config, updatehash=False):
raise NotImplementedError
class DistributedPluginBase(PluginBase):
def __init__(self, plugin_args=None):
super(DistributedPluginBase, self).__init__(plugin_args=plugin_args)
self.procs = None
self.depidx = None
self.refidx = None
self.mapnodes = None
self.mapnodesubids = None
self.proc_done = None
self.proc_pending = None
self.pending_tasks = []
self.max_jobs = self.plugin_args.get("max_jobs", np.inf)
def _prerun_check(self, graph):
def _postrun_check(self):
def run(self, graph, config, updatehash=False):
logger.info("Running in parallel.")
self._config = config
poll_sleep_secs = float(config["execution"]["poll_sleep_duration"])
self._prerun_check(graph)
self._generate_dependency_list(graph)
self.mapnodes = []
self.mapnodesubids = {}
notrun = []
old_progress_stats = None
old_presub_stats = None
while not np.all(self.proc_done) or np.any(self.proc_pending):
loop_start = time()
y = np.nonzero(~self.proc_done & (self.depidx.sum(0) == 0))[1]
progress_stats = (
len(self.proc_done),
np.sum(self.proc_done ^ self.proc_pending),
np.sum(self.proc_done & self.proc_pending),
len(jobs_ready),
len(self.pending_tasks),
np.sum(~self.proc_done & ~self.proc_pending),
)
display_stats = progress_stats != old_progress_stats
if display_stats:
logger.debug(
"Progress: %d jobs, %d/%d/%d "
"(done/running/ready), %d/%d "
"(pending_tasks/waiting).",
*progress_stats
)
old_progress_stats = progress_stats
toappend = []
while self.pending_tasks:
taskid, jobid = self.pending_tasks.pop()
try:
result = self._get_result(taskid)
except Exception:
notrun.append(self._clean_queue(jobid, graph))
else:
if result:
if result["traceback"]:
notrun.append(
self._clean_queue(jobid, graph, result=result)
)
else:
self._task_finished_cb(jobid)
self._remove_node_dirs()
self._clear_task(taskid)
else:
assert self.proc_done[jobid] and self.proc_pending[jobid]
toappend.insert(0, (taskid, jobid))
if toappend:
self.pending_tasks.extend(toappend)
num_jobs = len(self.pending_tasks)
presub_stats = (num_jobs, np.sum(self.proc_done & self.proc_pending))
display_stats = display_stats or presub_stats != old_presub_stats
if display_stats:
logger.debug("Tasks currently running: %d. Pending: %d.", *presub_stats)
old_presub_stats = presub_stats
if num_jobs < self.max_jobs:
self._send_procs_to_workers(updatehash=updatehash, graph=graph)
elif display_stats:
logger.debug("Not submitting (max jobs reached)")
sleep_til = loop_start + poll_sleep_secs
sleep(max(0, sleep_til - time()))
self._remove_node_dirs()
report_nodes_not_run(notrun)
self._postrun_check()
def _get_result(self, taskid):
raise NotImplementedError
def _submit_job(self, node, updatehash=False):
raise NotImplementedError
def _report_crash(self, node, result=None):
tb = None
if result is not None:
node._result = result["result"]
tb = result["traceback"]
node._traceback = tb
return report_crash(node, traceback=tb)
def _clear_task(self, taskid):
raise NotImplementedError
def _clean_queue(self, jobid, graph, result=None):
logger.debug("Clearing %d from queue", jobid)
if self._status_callback:
self._status_callback(self.procs[jobid], "exception")
if result is None:
result = {
"result": None,
"traceback": "\n".join(format_exception(*sys.exc_info())),
}
crashfile = self._report_crash(self.procs[jobid], result=result)
if str2bool(self._config["execution"]["stop_on_first_crash"]):
raise RuntimeError("".join(result["traceback"]))
if jobid in self.mapnodesubids:
self.proc_pending[jobid] = False
self.proc_done[jobid] = True
jobid = self.mapnodesubids[jobid]
self.proc_pending[jobid] = False
self.proc_done[jobid] = True
return self._remove_node_deps(jobid, crashfile, graph)
def _submit_mapnode(self, jobid):
import scipy.sparse as ssp
if jobid in self.mapnodes:
return True
self.mapnodes.append(jobid)
mapnodesubids = self.procs[jobid].get_subnodes()
numnodes = len(mapnodesubids)
logger.debug("Adding %d jobs for mapnode %s", numnodes, self.procs[jobid])
for i in range(numnodes):
self.mapnodesubids[self.depidx.shape[0] + i] = jobid
self.procs.extend(mapnodesubids)
self.depidx = ssp.vstack(
(self.depidx, ssp.lil_matrix(np.zeros((numnodes, self.depidx.shape[1])))),
"lil",
)
self.depidx = ssp.hstack(
(self.depidx, ssp.lil_matrix(np.zeros((self.depidx.shape[0], numnodes)))),
"lil",
)
self.depidx[-numnodes:, jobid] = 1
self.proc_done = np.concatenate(
(self.proc_done, np.zeros(numnodes, dtype=bool))
)
self.proc_pending = np.concatenate(
(self.proc_pending, np.zeros(numnodes, dtype=bool))
)
return False
def _send_procs_to_workers(self, updatehash=False, graph=None):
while not np.all(self.proc_done):
num_jobs = len(self.pending_tasks)
if np.isinf(self.max_jobs):
slots = None
else:
slots = max(0, self.max_jobs - num_jobs)
logger.debug("Slots available: %s", slots)
if (num_jobs >= self.max_jobs) or (slots == 0):
break
np.nonzero(~self.proc_done & (self.depidx.sum(0) == 0))[1]
if len(jobids) > 0:
logger.info(
"Pending[%d] Submitting[%d] jobs Slots[%s]",
num_jobs,
len(jobids[:slots]),
slots or "inf",
)
for jobid in jobids[:slots]:
if isinstance(self.procs[jobid], MapNode):
try:
num_subnodes = self.procs[jobid].num_subnodes()
except Exception:
self._clean_queue(jobid, graph)
self.proc_pending[jobid] = False
continue
if num_subnodes > 1:
submit = self._submit_mapnode(jobid)
if not submit:
continue
self.proc_done[jobid] = True
self.proc_pending[jobid] = True
logger.info("Submitting: %s ID: %d", self.procs[jobid], jobid)
if self._status_callback:
self._status_callback(self.procs[jobid], "start")
if not self._local_hash_check(jobid, graph):
if self.procs[jobid].run_without_submitting:
logger.debug(
"Running node %s on master thread", self.procs[jobid]
)
try:
self.procs[jobid].run()
except Exception:
self._clean_queue(jobid, graph)
self._task_finished_cb(jobid)
self._remove_node_dirs()
else:
tid = self._submit_job(
deepcopy(self.procs[jobid]), updatehash=updatehash
)
if tid is None:
self.proc_done[jobid] = False
self.proc_pending[jobid] = False
else:
self.pending_tasks.insert(0, (tid, jobid))
logger.info(
"Finished submitting: %s ID: %d", self.procs[jobid], jobid
)
else:
break
def _local_hash_check(self, jobid, graph):
if not str2bool(self.procs[jobid].config["execution"]["local_hash_check"]):
return False
try:
cached, updated = self.procs[jobid].is_cached()
except Exception:
logger.warning(
"Error while checking node hash, forcing re-run. "
"Although this error may not prevent the workflow from running, "
"it could indicate a major problem. Please report a new issue "
"at https://github.com/nipy/nipype/issues adding the following "
"information:\n\n\tNode: %s\n\tInterface: %s.%s\n\tTraceback:\n%s",
self.procs[jobid],
self.procs[jobid].interface.__module__,
self.procs[jobid].interface.__class__.__name__,
"\n".join(format_exception(*sys.exc_info())),
)
return False
logger.debug(
'Checking hash "%s" locally: cached=%s, updated=%s.',
self.procs[jobid],
cached,
updated,
)
overwrite = self.procs[jobid].overwrite
always_run = self.procs[jobid].interface.always_run
if (
cached
and updated
and (overwrite is False or overwrite is None and not always_run)
):
logger.debug(
"Skipping cached node %s with ID %s.", self.procs[jobid], jobid
)
try:
self._task_finished_cb(jobid, cached=True)
self._remove_node_dirs()
except Exception:
logger.debug(
"Error skipping cached node %s (%s).\n\n%s",
self.procs[jobid],
jobid,
"\n".join(format_exception(*sys.exc_info())),
)
self._clean_queue(jobid, graph)
self.proc_pending[jobid] = False
return True
return False
def _task_finished_cb(self, jobid, cached=False):
logger.info(
"[Job %d] %s (%s).",
jobid,
"Cached" if cached else "Completed",
self.procs[jobid],
)
if self._status_callback:
self._status_callback(self.procs[jobid], "end")
self.proc_pending[jobid] = False
rowview = self.depidx.getrowview(jobid)
rowview[rowview.nonzero()] = 0
if jobid not in self.mapnodesubids:
self.refidx[self.refidx[:, jobid].nonzero()[0], jobid] = 0
def _generate_dependency_list(self, graph):
import networkx as nx
self.procs, _ = topological_sort(graph)
try:
self.depidx = nx.to_scipy_sparse_matrix(
graph, nodelist=self.procs, format="lil"
)
except:
self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs)
self.refidx = deepcopy(self.depidx)
self.refidx.astype = np.int
self.proc_done = np.zeros(len(self.procs), dtype=bool)
self.proc_pending = np.zeros(len(self.procs), dtype=bool)
def _remove_node_deps(self, jobid, crashfile, graph):
import networkx as nx
try:
dfs_preorder = nx.dfs_preorder
except AttributeError:
dfs_preorder = nx.dfs_preorder_nodes
subnodes = [s for s in dfs_preorder(graph, self.procs[jobid])]
for node in subnodes:
idx = self.procs.index(node)
self.proc_done[idx] = True
self.proc_pending[idx] = False
return dict(node=self.procs[jobid], dependents=subnodes, crashfile=crashfile)
def _remove_node_dirs(self):
if str2bool(self._config["execution"]["remove_node_directories"]):
indices = np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0]
for idx in indices:
if idx in self.mapnodesubids:
continue
if self.proc_done[idx] and (not self.proc_pending[idx]):
self.refidx[idx, idx] = -1
outdir = self.procs[idx].output_dir()
logger.info(
(
"[node dependencies finished] "
"removing node: %s from directory %s"
)
% (self.procs[idx]._id, outdir)
)
shutil.rmtree(outdir)
class SGELikeBatchManagerBase(DistributedPluginBase):
def __init__(self, template, plugin_args=None):
super(SGELikeBatchManagerBase, self).__init__(plugin_args=plugin_args)
self._template = template
self._qsub_args = None
if plugin_args:
if "template" in plugin_args:
self._template = plugin_args["template"]
if os.path.isfile(self._template):
with open(self._template) as tpl_file:
self._template = tpl_file.read()
if "qsub_args" in plugin_args:
self._qsub_args = plugin_args["qsub_args"]
self._pending = {}
def _is_pending(self, taskid):
raise NotImplementedError
def _submit_batchtask(self, scriptfile, node):
raise NotImplementedError
def _get_result(self, taskid):
if taskid not in self._pending:
raise Exception("Task %d not found" % taskid)
if self._is_pending(taskid):
return None
node_dir = self._pending[taskid]
t = time()
timeout = float(self._config["execution"]["job_finished_timeout"])
timed_out = True
while (time() - t) < timeout:
try:
glob(os.path.join(node_dir, "result_*.pklz")).pop()
timed_out = False
break
except Exception as e:
logger.debug(e)
sleep(2)
if timed_out:
result_data = {"hostname": "unknown", "result": None, "traceback": None}
results_file = None
try:
error_message = (
"Job id ({0}) finished or terminated, but "
"results file does not exist after ({1}) "
"seconds. Batch dir contains crashdump file "
"if node raised an exception.\n"
"Node working directory: ({2}) ".format(taskid, timeout, node_dir)
)
raise IOError(error_message)
except IOError as e:
result_data["traceback"] = "\n".join(format_exception(*sys.exc_info()))
else:
results_file = glob(os.path.join(node_dir, "result_*.pklz"))[0]
result_data = load_resultfile(results_file)
result_out = dict(result=None, traceback=None)
if isinstance(result_data, dict):
result_out["result"] = result_data["result"]
result_out["traceback"] = result_data["traceback"]
result_out["hostname"] = result_data["hostname"]
if results_file:
crash_file = os.path.join(node_dir, "crashstore.pklz")
os.rename(results_file, crash_file)
else:
result_out["result"] = result_data
return result_out
def _submit_job(self, node, updatehash=False):
pyscript = create_pyscript(node, updatehash=updatehash)
batch_dir, name = os.path.split(pyscript)
name = ".".join(name.split(".")[:-1])
batchscript = "\n".join((self._template, "%s %s" % (sys.executable, pyscript)))
batchscriptfile = os.path.join(batch_dir, "batchscript_%s.sh" % name)
with open(batchscriptfile, "wt") as fp:
fp.writelines(batchscript)
return self._submit_batchtask(batchscriptfile, node)
def _clear_task(self, taskid):
del self._pending[taskid]
class GraphPluginBase(PluginBase):
def __init__(self, plugin_args=None):
if plugin_args and plugin_args.get("status_callback"):
logger.warning(
"status_callback not supported for Graph submission" " plugins"
)
super(GraphPluginBase, self).__init__(plugin_args=plugin_args)
def run(self, graph, config, updatehash=False):
import networkx as nx
pyfiles = []
dependencies = {}
self._config = config
nodes = list(nx.topological_sort(graph))
logger.debug("Creating executable python files for each node")
for idx, node in enumerate(nodes):
pyfiles.append(
create_pyscript(node, updatehash=updatehash, store_exception=False)
)
dependencies[idx] = [
nodes.index(prevnode) for prevnode in list(graph.predecessors(node))
]
self._submit_graph(pyfiles, dependencies, nodes)
def _get_args(self, node, keywords):
values = ()
for keyword in keywords:
value = getattr(self, "_" + keyword)
if keyword == "template" and os.path.isfile(value):
with open(value) as f:
value = f.read()
if (
hasattr(node, "plugin_args")
and isinstance(node.plugin_args, dict)
and keyword in node.plugin_args
):
if keyword == "template" and os.path.isfile(node.plugin_args[keyword]):
with open(node.plugin_args[keyword]) as f:
tmp_value = f.read()
else:
tmp_value = node.plugin_args[keyword]
if "overwrite" in node.plugin_args and node.plugin_args["overwrite"]:
value = tmp_value
else:
value += tmp_value
values += (value,)
return values
def _submit_graph(self, pyfiles, dependencies, nodes):
raise NotImplementedError
def _get_result(self, taskid):
if taskid not in self._pending:
raise Exception("Task %d not found" % taskid)
if self._is_pending(taskid):
return None
node_dir = self._pending[taskid]
glob(os.path.join(node_dir, "result_*.pklz")).pop()
results_file = glob(os.path.join(node_dir, "result_*.pklz"))[0]
result_data = load_resultfile(results_file)
result_out = dict(result=None, traceback=None)
if isinstance(result_data, dict):
result_out["result"] = result_data["result"]
result_out["traceback"] = result_data["traceback"]
result_out["hostname"] = result_data["hostname"]
if results_file:
crash_file = os.path.join(node_dir, "crashstore.pklz")
os.rename(results_file, crash_file)
else:
result_out["result"] = result_data
return result_out
| true | true |
f7fcb70a2de5d7e73b50c1307eefb511df9c3870 | 14,766 | py | Python | vulnerabilities/package_managers.py | trendkim/Snyk-Demo-vulnerablecode | 22af5701dfbd15deee5da463f0f9b7d1537291ef | [
"Apache-2.0"
] | null | null | null | vulnerabilities/package_managers.py | trendkim/Snyk-Demo-vulnerablecode | 22af5701dfbd15deee5da463f0f9b7d1537291ef | [
"Apache-2.0"
] | 9 | 2021-03-30T14:29:36.000Z | 2021-08-02T17:11:08.000Z | vulnerabilities/package_managers.py | AmitGupta7580/vulnerablecode | e6e6b9050141842568ce0a88e298bd8f7dd11525 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/vulnerablecode/
# The VulnerableCode software is licensed under the Apache License version 2.0.
# Data generated with VulnerableCode require an acknowledgment.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with VulnerableCode or any VulnerableCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with VulnerableCode and provided on an 'AS IS' BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# VulnerableCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# VulnerableCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/vulnerablecode/ for support and download.
import asyncio
import dataclasses
import xml.etree.ElementTree as ET
from datetime import datetime
from json import JSONDecodeError
from subprocess import check_output
from typing import List
from typing import Mapping
from typing import Set
from aiohttp import ClientSession
from aiohttp.client_exceptions import ClientResponseError
from aiohttp.client_exceptions import ServerDisconnectedError
from bs4 import BeautifulSoup
from dateutil import parser as dateparser
@dataclasses.dataclass(frozen=True)
class Version:
value: str
release_date: datetime = None
@dataclasses.dataclass
class VersionResponse:
valid_versions: Set[str] = dataclasses.field(default_factory=set)
newer_versions: Set[str] = dataclasses.field(default_factory=set)
class VersionAPI:
def __init__(self, cache: Mapping[str, Set[str]] = None):
self.cache = cache or {}
def get(self, package_name, until=None) -> Set[str]:
new_versions = set()
valid_versions = set()
for version in self.cache.get(package_name, set()):
if until and version.release_date and version.release_date > until:
new_versions.add(version.value)
continue
valid_versions.add(version.value)
return VersionResponse(valid_versions=valid_versions, newer_versions=new_versions)
async def load_api(self, pkg_set):
async with client_session() as session:
await asyncio.gather(
*[self.fetch(pkg, session) for pkg in pkg_set if pkg not in self.cache]
)
async def fetch(self, pkg, session):
"""
Override this method to fetch the pkg's version in the cache
"""
raise NotImplementedError
def client_session():
return ClientSession(raise_for_status=True, trust_env=True)
class LaunchpadVersionAPI(VersionAPI):
package_type = "deb"
async def fetch(self, pkg, session):
url = (
"https://api.launchpad.net/1.0/ubuntu/+archive/"
"primary?ws.op=getPublishedSources&"
"source_name={}&exact_match=true".format(pkg)
)
try:
all_versions = set()
while True:
response = await session.request(method="GET", url=url)
resp_json = await response.json()
if resp_json["entries"] == []:
self.cache[pkg] = {}
break
for release in resp_json["entries"]:
all_versions.add(
Version(
value=release["source_package_version"].replace("0:", ""),
release_date=release["date_published"],
)
)
if resp_json.get("next_collection_link"):
url = resp_json["next_collection_link"]
else:
break
self.cache[pkg] = all_versions
except (ClientResponseError, asyncio.exceptions.TimeoutError, ServerDisconnectedError):
self.cache[pkg] = {}
class PypiVersionAPI(VersionAPI):
package_type = "pypi"
async def fetch(self, pkg, session):
url = f"https://pypi.org/pypi/{pkg}/json"
versions = set()
try:
response = await session.request(method="GET", url=url)
response = await response.json()
for version, download_items in response["releases"].items():
if download_items:
latest_download_item = max(
download_items,
key=lambda download_item: dateparser.parse(
download_item["upload_time_iso_8601"]
),
)
versions.add(
Version(
value=version,
release_date=dateparser.parse(
latest_download_item["upload_time_iso_8601"]
),
)
)
except ClientResponseError:
# PYPI removed this package.
# https://www.zdnet.com/article/twelve-malicious-python-libraries-found-and-removed-from-pypi/ # nopep8
pass
self.cache[pkg] = versions
class CratesVersionAPI(VersionAPI):
package_type = "cargo"
async def fetch(self, pkg, session):
url = f"https://crates.io/api/v1/crates/{pkg}"
response = await session.request(method="GET", url=url)
response = await response.json()
versions = set()
for version_info in response["versions"]:
versions.add(
Version(
value=version_info["num"],
release_date=dateparser.parse(version_info["updated_at"]),
)
)
self.cache[pkg] = versions
class RubyVersionAPI(VersionAPI):
package_type = "gem"
async def fetch(self, pkg, session):
url = f"https://rubygems.org/api/v1/versions/{pkg}.json"
versions = set()
try:
response = await session.request(method="GET", url=url)
response = await response.json()
for release in response:
versions.add(
Version(
value=release["number"],
release_date=dateparser.parse(release["created_at"]),
)
)
except (ClientResponseError, JSONDecodeError):
pass
self.cache[pkg] = versions
class NpmVersionAPI(VersionAPI):
package_type = "npm"
async def fetch(self, pkg, session):
url = f"https://registry.npmjs.org/{pkg}"
versions = set()
try:
response = await session.request(method="GET", url=url)
response = await response.json()
for version in response.get("versions", []):
release_date = response.get("time", {}).get(version)
if release_date:
release_date = dateparser.parse(release_date)
versions.add(Version(value=version, release_date=release_date))
else:
versions.add(Version(value=version, release_date=None))
except ClientResponseError:
pass
self.cache[pkg] = versions
class DebianVersionAPI(VersionAPI):
package_type = "deb"
async def load_api(self, pkg_set):
# Need to set the headers, because the Debian API upgrades
# the connection to HTTP 2.0
async with ClientSession(
raise_for_status=True, headers={"Connection": "keep-alive"}
) as session:
await asyncio.gather(
*[self.fetch(pkg, session) for pkg in pkg_set if pkg not in self.cache]
)
async def fetch(self, pkg, session, retry_count=5):
url = "https://sources.debian.org/api/src/{}".format(pkg)
try:
all_versions = set()
response = await session.request(method="GET", url=url)
resp_json = await response.json()
if resp_json.get("error") or not resp_json.get("versions"):
self.cache[pkg] = {}
return
for release in resp_json["versions"]:
all_versions.add(Version(value=release["version"].replace("0:", "")))
self.cache[pkg] = all_versions
# TODO : Handle ServerDisconnectedError by using some sort of
# retry mechanism
except (ClientResponseError, asyncio.exceptions.TimeoutError, ServerDisconnectedError):
self.cache[pkg] = {}
class MavenVersionAPI(VersionAPI):
package_type = "maven"
async def fetch(self, pkg, session) -> None:
artifact_comps = pkg.split(":")
endpoint = self.artifact_url(artifact_comps)
try:
resp = await session.request(method="GET", url=endpoint)
resp = await resp.read()
except ClientResponseError:
self.cache[pkg] = set()
return
xml_resp = ET.ElementTree(ET.fromstring(resp.decode("utf-8")))
self.cache[pkg] = self.extract_versions(xml_resp)
@staticmethod
def artifact_url(artifact_comps: List[str]) -> str:
base_url = "https://repo1.maven.org/maven2/{}"
try:
group_id, artifact_id = artifact_comps
except ValueError:
if len(artifact_comps) == 1:
group_id = artifact_comps[0]
artifact_id = artifact_comps[0].split(".")[-1]
elif len(artifact_comps) == 3:
group_id, artifact_id = list(dict.fromkeys(artifact_comps))
else:
raise
group_url = group_id.replace(".", "/")
suffix = group_url + "/" + artifact_id + "/" + "maven-metadata.xml"
endpoint = base_url.format(suffix)
return endpoint
@staticmethod
def extract_versions(xml_response: ET.ElementTree) -> Set[str]:
all_versions = set()
for child in xml_response.getroot().iter():
if child.tag == "version":
all_versions.add(Version(child.text))
return all_versions
class NugetVersionAPI(VersionAPI):
package_type = "nuget"
async def fetch(self, pkg, session) -> None:
endpoint = self.nuget_url(pkg)
resp = await session.request(method="GET", url=endpoint)
resp = await resp.json()
self.cache[pkg] = self.extract_versions(resp)
@staticmethod
def nuget_url(pkg_name: str) -> str:
pkg_name = pkg_name.lower().strip()
base_url = "https://api.nuget.org/v3/registration5-semver1/{}/index.json"
return base_url.format(pkg_name)
@staticmethod
def extract_versions(resp: dict) -> Set[str]:
all_versions = set()
try:
for entry_group in resp["items"]:
for entry in entry_group["items"]:
all_versions.add(
Version(
value=entry["catalogEntry"]["version"],
release_date=dateparser.parse(entry["catalogEntry"]["published"]),
)
)
# FIXME: json response for YamlDotNet.Signed triggers this exception.
# Some packages with many versions give a response of a list of endpoints.
# In such cases rather, we should collect data from those endpoints.
except KeyError:
pass
return all_versions
class ComposerVersionAPI(VersionAPI):
package_type = "composer"
async def fetch(self, pkg, session) -> None:
endpoint = self.composer_url(pkg)
if endpoint:
resp = await session.request(method="GET", url=endpoint)
resp = await resp.json()
self.cache[pkg] = self.extract_versions(resp, pkg)
@staticmethod
def composer_url(pkg_name: str) -> str:
try:
vendor, name = pkg_name.split("/")
except ValueError:
# TODO Log this
return
return f"https://repo.packagist.org/p/{vendor}/{name}.json"
@staticmethod
def extract_versions(resp: dict, pkg_name: str) -> Set[str]:
all_versions = set()
for version in resp["packages"][pkg_name]:
if "dev" in version:
continue
# This if statement ensures, that all_versions contains only released versions
# See https://github.com/composer/composer/blob/44a4429978d1b3c6223277b875762b2930e83e8c/doc/articles/versions.md#tags # nopep8
# for explanation of removing 'v'
all_versions.add(
Version(
value=version.lstrip("v"),
release_date=dateparser.parse(resp["packages"][pkg_name][version]["time"]),
)
)
return all_versions
class GitHubTagsAPI(VersionAPI):
package_type = "github"
async def fetch(self, owner_repo: str, session) -> None:
"""
owner_repo is a string of format "{repo_owner}/{repo_name}"
Example value of owner_repo = "nexB/scancode-toolkit"
"""
self.cache[owner_repo] = set()
endpoint = f"https://github.com/{owner_repo}"
tags_xml = check_output(["svn", "ls", "--xml", f"{endpoint}/tags"], text=True)
elements = ET.fromstring(tags_xml)
for entry in elements.iter("entry"):
name = entry.find("name").text
release_date = dateparser.parse(entry.find("commit/date").text)
self.cache[owner_repo].add(Version(value=name, release_date=release_date))
class HexVersionAPI(VersionAPI):
async def fetch(self, pkg, session):
url = f"https://hex.pm/api/packages/{pkg}"
versions = set()
try:
response = await session.request(method="GET", url=url)
response = await response.json()
for release in response["releases"]:
versions.add(
Version(
value=release["version"],
release_date=dateparser.parse(release["inserted_at"]),
)
)
except (ClientResponseError, JSONDecodeError):
pass
self.cache[pkg] = versions
| 35.580723 | 140 | 0.594542 |
import asyncio
import dataclasses
import xml.etree.ElementTree as ET
from datetime import datetime
from json import JSONDecodeError
from subprocess import check_output
from typing import List
from typing import Mapping
from typing import Set
from aiohttp import ClientSession
from aiohttp.client_exceptions import ClientResponseError
from aiohttp.client_exceptions import ServerDisconnectedError
from bs4 import BeautifulSoup
from dateutil import parser as dateparser
@dataclasses.dataclass(frozen=True)
class Version:
value: str
release_date: datetime = None
@dataclasses.dataclass
class VersionResponse:
valid_versions: Set[str] = dataclasses.field(default_factory=set)
newer_versions: Set[str] = dataclasses.field(default_factory=set)
class VersionAPI:
def __init__(self, cache: Mapping[str, Set[str]] = None):
self.cache = cache or {}
def get(self, package_name, until=None) -> Set[str]:
new_versions = set()
valid_versions = set()
for version in self.cache.get(package_name, set()):
if until and version.release_date and version.release_date > until:
new_versions.add(version.value)
continue
valid_versions.add(version.value)
return VersionResponse(valid_versions=valid_versions, newer_versions=new_versions)
async def load_api(self, pkg_set):
async with client_session() as session:
await asyncio.gather(
*[self.fetch(pkg, session) for pkg in pkg_set if pkg not in self.cache]
)
async def fetch(self, pkg, session):
raise NotImplementedError
def client_session():
return ClientSession(raise_for_status=True, trust_env=True)
class LaunchpadVersionAPI(VersionAPI):
package_type = "deb"
async def fetch(self, pkg, session):
url = (
"https://api.launchpad.net/1.0/ubuntu/+archive/"
"primary?ws.op=getPublishedSources&"
"source_name={}&exact_match=true".format(pkg)
)
try:
all_versions = set()
while True:
response = await session.request(method="GET", url=url)
resp_json = await response.json()
if resp_json["entries"] == []:
self.cache[pkg] = {}
break
for release in resp_json["entries"]:
all_versions.add(
Version(
value=release["source_package_version"].replace("0:", ""),
release_date=release["date_published"],
)
)
if resp_json.get("next_collection_link"):
url = resp_json["next_collection_link"]
else:
break
self.cache[pkg] = all_versions
except (ClientResponseError, asyncio.exceptions.TimeoutError, ServerDisconnectedError):
self.cache[pkg] = {}
class PypiVersionAPI(VersionAPI):
package_type = "pypi"
async def fetch(self, pkg, session):
url = f"https://pypi.org/pypi/{pkg}/json"
versions = set()
try:
response = await session.request(method="GET", url=url)
response = await response.json()
for version, download_items in response["releases"].items():
if download_items:
latest_download_item = max(
download_items,
key=lambda download_item: dateparser.parse(
download_item["upload_time_iso_8601"]
),
)
versions.add(
Version(
value=version,
release_date=dateparser.parse(
latest_download_item["upload_time_iso_8601"]
),
)
)
except ClientResponseError:
pass
self.cache[pkg] = versions
class CratesVersionAPI(VersionAPI):
package_type = "cargo"
async def fetch(self, pkg, session):
url = f"https://crates.io/api/v1/crates/{pkg}"
response = await session.request(method="GET", url=url)
response = await response.json()
versions = set()
for version_info in response["versions"]:
versions.add(
Version(
value=version_info["num"],
release_date=dateparser.parse(version_info["updated_at"]),
)
)
self.cache[pkg] = versions
class RubyVersionAPI(VersionAPI):
package_type = "gem"
async def fetch(self, pkg, session):
url = f"https://rubygems.org/api/v1/versions/{pkg}.json"
versions = set()
try:
response = await session.request(method="GET", url=url)
response = await response.json()
for release in response:
versions.add(
Version(
value=release["number"],
release_date=dateparser.parse(release["created_at"]),
)
)
except (ClientResponseError, JSONDecodeError):
pass
self.cache[pkg] = versions
class NpmVersionAPI(VersionAPI):
package_type = "npm"
async def fetch(self, pkg, session):
url = f"https://registry.npmjs.org/{pkg}"
versions = set()
try:
response = await session.request(method="GET", url=url)
response = await response.json()
for version in response.get("versions", []):
release_date = response.get("time", {}).get(version)
if release_date:
release_date = dateparser.parse(release_date)
versions.add(Version(value=version, release_date=release_date))
else:
versions.add(Version(value=version, release_date=None))
except ClientResponseError:
pass
self.cache[pkg] = versions
class DebianVersionAPI(VersionAPI):
package_type = "deb"
async def load_api(self, pkg_set):
async with ClientSession(
raise_for_status=True, headers={"Connection": "keep-alive"}
) as session:
await asyncio.gather(
*[self.fetch(pkg, session) for pkg in pkg_set if pkg not in self.cache]
)
async def fetch(self, pkg, session, retry_count=5):
url = "https://sources.debian.org/api/src/{}".format(pkg)
try:
all_versions = set()
response = await session.request(method="GET", url=url)
resp_json = await response.json()
if resp_json.get("error") or not resp_json.get("versions"):
self.cache[pkg] = {}
return
for release in resp_json["versions"]:
all_versions.add(Version(value=release["version"].replace("0:", "")))
self.cache[pkg] = all_versions
except (ClientResponseError, asyncio.exceptions.TimeoutError, ServerDisconnectedError):
self.cache[pkg] = {}
class MavenVersionAPI(VersionAPI):
package_type = "maven"
async def fetch(self, pkg, session) -> None:
artifact_comps = pkg.split(":")
endpoint = self.artifact_url(artifact_comps)
try:
resp = await session.request(method="GET", url=endpoint)
resp = await resp.read()
except ClientResponseError:
self.cache[pkg] = set()
return
xml_resp = ET.ElementTree(ET.fromstring(resp.decode("utf-8")))
self.cache[pkg] = self.extract_versions(xml_resp)
@staticmethod
def artifact_url(artifact_comps: List[str]) -> str:
base_url = "https://repo1.maven.org/maven2/{}"
try:
group_id, artifact_id = artifact_comps
except ValueError:
if len(artifact_comps) == 1:
group_id = artifact_comps[0]
artifact_id = artifact_comps[0].split(".")[-1]
elif len(artifact_comps) == 3:
group_id, artifact_id = list(dict.fromkeys(artifact_comps))
else:
raise
group_url = group_id.replace(".", "/")
suffix = group_url + "/" + artifact_id + "/" + "maven-metadata.xml"
endpoint = base_url.format(suffix)
return endpoint
@staticmethod
def extract_versions(xml_response: ET.ElementTree) -> Set[str]:
all_versions = set()
for child in xml_response.getroot().iter():
if child.tag == "version":
all_versions.add(Version(child.text))
return all_versions
class NugetVersionAPI(VersionAPI):
package_type = "nuget"
async def fetch(self, pkg, session) -> None:
endpoint = self.nuget_url(pkg)
resp = await session.request(method="GET", url=endpoint)
resp = await resp.json()
self.cache[pkg] = self.extract_versions(resp)
@staticmethod
def nuget_url(pkg_name: str) -> str:
pkg_name = pkg_name.lower().strip()
base_url = "https://api.nuget.org/v3/registration5-semver1/{}/index.json"
return base_url.format(pkg_name)
@staticmethod
def extract_versions(resp: dict) -> Set[str]:
all_versions = set()
try:
for entry_group in resp["items"]:
for entry in entry_group["items"]:
all_versions.add(
Version(
value=entry["catalogEntry"]["version"],
release_date=dateparser.parse(entry["catalogEntry"]["published"]),
)
)
except KeyError:
pass
return all_versions
class ComposerVersionAPI(VersionAPI):
package_type = "composer"
async def fetch(self, pkg, session) -> None:
endpoint = self.composer_url(pkg)
if endpoint:
resp = await session.request(method="GET", url=endpoint)
resp = await resp.json()
self.cache[pkg] = self.extract_versions(resp, pkg)
@staticmethod
def composer_url(pkg_name: str) -> str:
try:
vendor, name = pkg_name.split("/")
except ValueError:
return
return f"https://repo.packagist.org/p/{vendor}/{name}.json"
@staticmethod
def extract_versions(resp: dict, pkg_name: str) -> Set[str]:
all_versions = set()
for version in resp["packages"][pkg_name]:
if "dev" in version:
continue
all_versions.add(
Version(
value=version.lstrip("v"),
release_date=dateparser.parse(resp["packages"][pkg_name][version]["time"]),
)
)
return all_versions
class GitHubTagsAPI(VersionAPI):
package_type = "github"
async def fetch(self, owner_repo: str, session) -> None:
self.cache[owner_repo] = set()
endpoint = f"https://github.com/{owner_repo}"
tags_xml = check_output(["svn", "ls", "--xml", f"{endpoint}/tags"], text=True)
elements = ET.fromstring(tags_xml)
for entry in elements.iter("entry"):
name = entry.find("name").text
release_date = dateparser.parse(entry.find("commit/date").text)
self.cache[owner_repo].add(Version(value=name, release_date=release_date))
class HexVersionAPI(VersionAPI):
async def fetch(self, pkg, session):
url = f"https://hex.pm/api/packages/{pkg}"
versions = set()
try:
response = await session.request(method="GET", url=url)
response = await response.json()
for release in response["releases"]:
versions.add(
Version(
value=release["version"],
release_date=dateparser.parse(release["inserted_at"]),
)
)
except (ClientResponseError, JSONDecodeError):
pass
self.cache[pkg] = versions
| true | true |
f7fcb768c78d8aba3efd428eddbaf486e980f8f3 | 4,432 | py | Python | zynthesiser/sygus_spec.py | thomasfsteeples/zynthesiser | 1bb2ff0f6fb3386dc2522db6db727b70236f0842 | [
"MIT"
] | null | null | null | zynthesiser/sygus_spec.py | thomasfsteeples/zynthesiser | 1bb2ff0f6fb3386dc2522db6db727b70236f0842 | [
"MIT"
] | null | null | null | zynthesiser/sygus_spec.py | thomasfsteeples/zynthesiser | 1bb2ff0f6fb3386dc2522db6db727b70236f0842 | [
"MIT"
] | null | null | null | import z3
from lark import Lark, Transformer
import zynthesiser.util as util
class SygusSpec:
def __init__(self, spec: str):
with open('sygus.lark') as f:
sygus_grammar = f.read()
sygus_parser = Lark(sygus_grammar, start="sygus", parser='lalr')
sygus_parser.parse(spec)
self.logic = text_spec.logic
self.variables = text_spec.variables
self.z3_variables = self._initialise_z3_variables(text_spec.variables)
self.synth_funcs = self._initialise_synth_funcs(text_spec.synth_funcs)
self.uninterpreted_funcs = self._initialise_uninterpreted_funcs(
text_spec.uninterpreted_funcs)
self.macros = self._initialise_macros(text_spec.macros)
self.goal = self._initialise_goal(text_spec)
def _initialise_z3_variables(self, text_vars):
z3_variables = []
for var in text_vars:
z3_variables.append(z3.Const(var,
util.str_to_sort(text_vars[var])))
return z3_variables
def _initialise_synth_funcs(self, text_synth_funcs):
synth_funcs = {}
for synth_func in text_synth_funcs:
current_func = text_synth_funcs[synth_func]
input_sorts = list(
map(util.str_to_sort, current_func["inputs"].values()))
output_sort = util.str_to_sort(current_func["output_sort"])
inputs = []
for i, text_input in enumerate(current_func["inputs"]):
inputs.append(z3.Const(text_input, input_sorts[i]))
synth_func_declaration = z3.Function(synth_func, *input_sorts,
output_sort)
synth_funcs[synth_func] = {
"decl": synth_func_declaration,
"inputs": current_func["inputs"],
"z3_inputs": inputs,
"output_sort": current_func["output_sort"],
"z3_output_sort": output_sort,
"grammar": current_func["grammar"],
}
return synth_funcs
def _initialise_uninterpreted_funcs(self, text_u_funcs):
u_funcs = {}
for u_func in text_u_funcs:
current_func = text_u_funcs[u_func]
sorts = list(map(util.str_to_sort, current_func["sorts"]))
u_funcs[u_func] = {"decl": z3.Function(u_func, *sorts)}
return u_funcs
def _initialise_macros(self, text_macros):
macros = {}
for macro in text_macros:
current_macro = text_macros[macro]
input_sorts = list(
map(util.str_to_sort, current_macro["inputs"].values()))
output_sort = util.str_to_sort(current_macro["output_sort"])
inputs = []
for i, text_input in enumerate(current_macro["inputs"]):
inputs.append(z3.Const(text_input, input_sorts[i]))
macro_declaration = z3.Function(macro, *input_sorts, output_sort)
macro_definition = (current_macro["definition"].replace(
"(", "").replace(")", ""))
macro_definition = " ".join(macro_definition.split())
macros[macro] = {
"decl": macro_declaration,
"inputs": current_macro["inputs"],
"z3_inputs": inputs,
"output_sort": current_macro["output_sort"],
"z3_output_sort": output_sort,
"definition": macro_definition,
}
return macros
def _initialise_goal(self, text_spec):
constraints = []
funcs = {**self.synth_funcs, **self.uninterpreted_funcs, **self.macros}
for original_constraint in text_spec.constraints:
constraint_lexer = TermLexer(
antlr4.InputStream(original_constraint))
constraint_stream = antlr4.CommonTokenStream(constraint_lexer)
constraint_parser = TermParser(constraint_stream)
constraint_tree = constraint_parser.term()
constraint_extractor = Constraint_Extractor(
self.logic, self.variables, funcs)
constraint = constraint_extractor.visit(constraint_tree)
constraints.append(constraint)
goal = z3.BoolVal(True)
for constraint in constraints:
goal = z3.And(goal, constraint)
goal = z3.simplify(goal)
return z3.simplify(goal)
| 35.456 | 79 | 0.602662 | import z3
from lark import Lark, Transformer
import zynthesiser.util as util
class SygusSpec:
def __init__(self, spec: str):
with open('sygus.lark') as f:
sygus_grammar = f.read()
sygus_parser = Lark(sygus_grammar, start="sygus", parser='lalr')
sygus_parser.parse(spec)
self.logic = text_spec.logic
self.variables = text_spec.variables
self.z3_variables = self._initialise_z3_variables(text_spec.variables)
self.synth_funcs = self._initialise_synth_funcs(text_spec.synth_funcs)
self.uninterpreted_funcs = self._initialise_uninterpreted_funcs(
text_spec.uninterpreted_funcs)
self.macros = self._initialise_macros(text_spec.macros)
self.goal = self._initialise_goal(text_spec)
def _initialise_z3_variables(self, text_vars):
z3_variables = []
for var in text_vars:
z3_variables.append(z3.Const(var,
util.str_to_sort(text_vars[var])))
return z3_variables
def _initialise_synth_funcs(self, text_synth_funcs):
synth_funcs = {}
for synth_func in text_synth_funcs:
current_func = text_synth_funcs[synth_func]
input_sorts = list(
map(util.str_to_sort, current_func["inputs"].values()))
output_sort = util.str_to_sort(current_func["output_sort"])
inputs = []
for i, text_input in enumerate(current_func["inputs"]):
inputs.append(z3.Const(text_input, input_sorts[i]))
synth_func_declaration = z3.Function(synth_func, *input_sorts,
output_sort)
synth_funcs[synth_func] = {
"decl": synth_func_declaration,
"inputs": current_func["inputs"],
"z3_inputs": inputs,
"output_sort": current_func["output_sort"],
"z3_output_sort": output_sort,
"grammar": current_func["grammar"],
}
return synth_funcs
def _initialise_uninterpreted_funcs(self, text_u_funcs):
u_funcs = {}
for u_func in text_u_funcs:
current_func = text_u_funcs[u_func]
sorts = list(map(util.str_to_sort, current_func["sorts"]))
u_funcs[u_func] = {"decl": z3.Function(u_func, *sorts)}
return u_funcs
def _initialise_macros(self, text_macros):
macros = {}
for macro in text_macros:
current_macro = text_macros[macro]
input_sorts = list(
map(util.str_to_sort, current_macro["inputs"].values()))
output_sort = util.str_to_sort(current_macro["output_sort"])
inputs = []
for i, text_input in enumerate(current_macro["inputs"]):
inputs.append(z3.Const(text_input, input_sorts[i]))
macro_declaration = z3.Function(macro, *input_sorts, output_sort)
macro_definition = (current_macro["definition"].replace(
"(", "").replace(")", ""))
macro_definition = " ".join(macro_definition.split())
macros[macro] = {
"decl": macro_declaration,
"inputs": current_macro["inputs"],
"z3_inputs": inputs,
"output_sort": current_macro["output_sort"],
"z3_output_sort": output_sort,
"definition": macro_definition,
}
return macros
def _initialise_goal(self, text_spec):
constraints = []
funcs = {**self.synth_funcs, **self.uninterpreted_funcs, **self.macros}
for original_constraint in text_spec.constraints:
constraint_lexer = TermLexer(
antlr4.InputStream(original_constraint))
constraint_stream = antlr4.CommonTokenStream(constraint_lexer)
constraint_parser = TermParser(constraint_stream)
constraint_tree = constraint_parser.term()
constraint_extractor = Constraint_Extractor(
self.logic, self.variables, funcs)
constraint = constraint_extractor.visit(constraint_tree)
constraints.append(constraint)
goal = z3.BoolVal(True)
for constraint in constraints:
goal = z3.And(goal, constraint)
goal = z3.simplify(goal)
return z3.simplify(goal)
| true | true |
f7fcb7ae4a29b34594bca55ad6c662e8dce851ed | 34,225 | py | Python | tensorboard/loader.py | svpcoder/tensorboard | 70753476c7aad3a5cb3eb4047994af1bcf3524b6 | [
"Apache-2.0"
] | 4 | 2018-02-16T16:12:21.000Z | 2020-08-19T19:53:57.000Z | tensorboard/loader.py | svpcoder/tensorboard | 70753476c7aad3a5cb3eb4047994af1bcf3524b6 | [
"Apache-2.0"
] | null | null | null | tensorboard/loader.py | svpcoder/tensorboard | 70753476c7aad3a5cb3eb4047994af1bcf3524b6 | [
"Apache-2.0"
] | 2 | 2020-06-02T22:35:20.000Z | 2020-10-26T06:35:58.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorBoard data ingestion module.
WARNING: This module is currently EXPERIMENTAL.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
import locale
import logging
import os
import re
import sys
import time
import threading
import types # pylint: disable=unused-import
import six
import tensorflow as tf
from tensorboard import db
from tensorboard import util
class Record(collections.namedtuple('Record', ('record', 'offset'))):
"""Value class for a record returned by RecordReader.
Fields:
record: The byte string record that was read.
offset: The byte offset in the file *after* this record was read.
:type record: str
:type offset: int
"""
__slots__ = () # Enforces use of only tuple fields.
@util.closeable
@six.python_2_unicode_compatible
class RecordReader(object):
"""Pythonic veneer around PyRecordReader."""
def __init__(self, path, start_offset=0):
"""Creates new instance.
Args:
path: Path of file. This can be on a remote file system if the
TensorFlow build supports it.
start_offset: Byte offset to seek in file once it's opened.
:type path: str
:type start_offset: int
"""
self.path = tf.compat.as_text(path)
self._offset = start_offset
self._size = -1
self._reader = None # type: tf.pywrap_tensorflow.PyRecordReader
self._is_closed = False
self._lock = threading.Lock()
def get_size(self):
"""Returns byte length of file.
This is guaranteed to return a number greater than or equal to the
offset of the last record returned by get_next_record().
This method can be called after the instance has been closed.
Raises:
IOError: If file has shrunk from last read offset, or start
offset, or last read size.
:rtype: int
"""
size = tf.gfile.Stat(self.path).length
minimum = max(self._offset, self._size)
if size < minimum:
raise IOError('File shrunk: %d < %d: %s' % (size, minimum, self.path))
self._size = size
return size
def get_next_record(self):
"""Reads record from file.
Returns:
A Record or None if no more were available.
Raises:
IOError: On open or read error, or if close was called.
tf.errors.DataLossError: If corruption was encountered in the
records file.
:rtype: Record
"""
if self._is_closed:
raise IOError('%s is closed' % self)
if self._reader is None:
self._reader = self._open()
try:
with tf.errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
except tf.errors.OutOfRangeError:
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
return None
self._offset = self._reader.offset()
return Record(self._reader.record(), self._offset)
def close(self):
"""Closes record reader if open.
Further reads are not permitted after this method is called.
"""
if self._is_closed:
return
if self._reader is not None:
self._reader.Close()
self._is_closed = True
self._reader = None
def _open(self):
with tf.errors.raise_exception_on_not_ok_status() as status:
return tf.pywrap_tensorflow.PyRecordReader_New(
tf.resource_loader.readahead_file_path(tf.compat.as_bytes(self.path)),
self._offset, tf.compat.as_bytes(''), status)
def __str__(self):
return u'RecordReader{%s}' % self.path
@util.closeable
@six.python_2_unicode_compatible
class BufferedRecordReader(object):
"""Wrapper around RecordReader that does threaded read-ahead.
This class implements the same interface as RecordReader. It prevents
remote file systems from devastating loader performance. It does not
degrade throughput on local file systems.
The thread is spawned when the first read operation happens. The
thread will diligently try to buffer records in the background. Its
goal is to sleep as much as possible without blocking read operations.
This class is thread safe. It can be used from multiple threads
without any need for external synchronization.
"""
READ_AHEAD_AGGRESSION = 2.3 # Does full replenish when ~40% full.
READ_AHEAD_BYTES = 16 * 1024 * 1024
STAT_INTERVAL_SECONDS = 4.0
def __init__(self, path,
start_offset=0,
read_ahead=READ_AHEAD_BYTES,
stat_interval=STAT_INTERVAL_SECONDS,
clock=time.time,
record_reader_factory=RecordReader):
"""Creates new instance.
The i/o thread is not started until the first read happens.
Args:
path: Path of file. This can be on a remote file system if the
TensorFlow build supports it.
start_offset: Byte offset to seek in file once it's opened.
read_ahead: The number of record bytes to buffer into memory
before the thread starts blocking. This value must be >0 and
the default is BufferedRecordReader.READ_AHEAD_BYTES.
stat_interval: A float with the minimum number of seconds between
stat calls, to determine the file size. If this is 0.0 then
the thread will stat after every re-buffer, but never be
woken up in order to stat.
clock: Function returning a float with the number of seconds
since the UNIX epoch in zulu time.
record_reader_factory: The RecordReader constructor, which can be
changed for testing.
:type path: str
:type start_offset: int
:type read_ahead: int
:type clock: () -> float
:type record_reader_factory: (str, int) -> RecordReader
"""
self.path = tf.compat.as_text(path)
self._read_ahead = read_ahead
self._stat_interval = stat_interval
self._clock = clock
self._is_closed = False
self._has_reached_end = False
self._offset = 0
self._size = -1
self._last_stat = 0.0
self._buffered = 0
self._reader = record_reader_factory(self.path, start_offset)
self._records = collections.deque() # type: collections.deque[Record]
self._read_exception = \
None # type: tuple[BaseException, BaseException, types.TracebackType]
self._close_exception = \
None # type: tuple[BaseException, BaseException, types.TracebackType]
self._lock = threading.Lock()
self._wake_up_producer = threading.Condition(self._lock)
self._wake_up_consumers = threading.Condition(self._lock)
self._thread = threading.Thread(target=self._run,
name=_shorten_event_log_path(self.path))
def get_size(self):
"""Returns byte length of file.
This is guaranteed to return a number greater than or equal to the
offset of the last record returned by get_next_record().
In the average case, this method will not block. However, if the
i/o thread has not yet computed this value, then this method will
block on a stat call.
This method can be called after the instance has been closed.
Returns:
The byte length of file, which might increase over time, but is
guaranteed to never decrease. It's also guaranteed that it will
be greater than or equal to the offset field of any Record.
:rtype: int
"""
with self._lock:
if self._should_stat():
self._stat()
return self._size
def get_next_record(self):
"""Reads one record.
When this method is first called, it will spawn the thread and
block until a record is read. Once the thread starts, it will queue
up records which can be read without blocking. The exception is
when we reach the end of the file, in which case each repeated call
will be synchronous. There is no background polling. If new data is
appended to the file, new records won't be buffered until this
method is invoked again. The caller should take care to meter calls
to this method once it reaches the end of file, lest they impact
performance.
Returns:
A Record object, or None if there are no more records available
at the moment.
Raises:
IOError: If this instance has been closed.
tf.errors.DataLossError: If corruption was encountered in the
records file.
Exception: To propagate any exceptions that may have been thrown
by the read operation in the other thread. If an exception is
thrown, then all subsequent calls to this method will rethrow
that same exception.
:rtype: Record
"""
with self._lock:
if self._is_closed:
raise IOError('%s is closed' % self)
if not self._thread.is_alive():
self._thread.start()
else:
record = self._get_record()
if record is not None:
if self._should_wakeup():
self._wake_up_producer.notify()
return record
self._has_reached_end = False
self._wake_up_producer.notify()
while not (self._read_exception or
self._has_reached_end or
self._records):
self._wake_up_consumers.wait()
return self._get_record()
def close(self):
"""Closes event log reader if open.
If the i/o thread is running, this method blocks until it has been
shut down.
Further reads are not permitted after this method is called.
Raises:
Exception: To propagate any exceptions that may have been thrown
by the close operation in the other thread. If an exception
is thrown, then all subsequent calls to this method will
rethrow that same exception.
"""
with self._lock:
if not self._is_closed:
self._is_closed = True
if not self._thread.is_alive():
self._reader = None
return
self._wake_up_producer.notify()
while self._reader is not None:
self._wake_up_consumers.wait()
if self._close_exception is not None:
six.reraise(*self._close_exception)
def _get_record(self):
if self._read_exception is not None:
six.reraise(*self._read_exception)
if not self._records:
return None
record = self._records.popleft()
self._buffered -= len(record.record)
return record
@util.guarded_by('_lock')
def _should_wakeup(self):
return (self._is_closed or
self._read_exception is None and
(self._should_rebuffer() or
(self._stat_interval and self._should_stat())))
@util.guarded_by('_lock')
def _should_rebuffer(self):
return (not self._has_reached_end and
(float(self._buffered) <
self._read_ahead / BufferedRecordReader.READ_AHEAD_AGGRESSION))
@util.guarded_by('_lock')
def _should_stat(self):
return (self._read_exception is None and
(self._offset > self._size or
self._last_stat <= self._clock() - self._stat_interval))
@util.guarded_by('_lock')
def _stat(self):
try:
now = self._clock()
self._size = self._reader.get_size()
self._last_stat = now
except Exception as e: # pylint: disable=broad-except
tf.logging.debug('Stat failed: %s', e)
self._read_exception = sys.exc_info()
def _run(self):
while True:
with self._lock:
while not self._should_wakeup():
self._wake_up_producer.wait()
if self._is_closed:
try:
self._reader.close()
tf.logging.debug('Closed')
except Exception as e: # pylint: disable=broad-except
self._close_exception = sys.exc_info()
tf.logging.debug('Close failed: %s', e)
self._reader = None
self._wake_up_consumers.notify_all()
return
if self._buffered >= self._read_ahead:
tf.logging.debug('Waking up to stat')
self._stat()
continue
# Calculate a good amount of data to read outside the lock.
# The less we have buffered, the less re-buffering we'll do.
# We want to minimize wait time in the other thread. See the
# following contour plot: https://goo.gl/HTBcCU
x = float(self._buffered)
y = BufferedRecordReader.READ_AHEAD_AGGRESSION
c = float(self._read_ahead)
want = int(min(c - x, y/c * x**y + 1))
# Perform re-buffering outside lock.
self._rebuffer(want)
def _rebuffer(self, want):
tf.logging.debug('Waking up to read %s bytes', _localize_int(want))
records = []
read_exception = self._read_exception
if read_exception is None:
try:
while want > 0:
record = self._reader.get_next_record()
if record is None:
break
self._offset = record.offset
records.append(record)
want -= len(record.record)
except Exception as e: # pylint: disable=broad-except
tf.logging.debug('Read failed: %s', e)
read_exception = sys.exc_info()
with self._lock:
self._read_exception = read_exception
if self._should_stat():
self._stat()
if not self._read_exception:
if not records:
self._has_reached_end = True
else:
for record in records:
self._records.append(record)
self._buffered += len(record.record)
self._wake_up_consumers.notify_all()
def __str__(self):
return u'BufferedRecordReader{%s}' % self.path
class RateCounter(object):
"""Utility class for tracking how much a number increases each second.
The rate is calculated by averaging of samples within a time window,
which weights recent samples more strongly.
"""
def __init__(self, window, clock=time.time):
"""Creates new instance.
Args:
window: The maximum number of seconds across which rate is
averaged. In practice, the rate might be averaged over a time
period greater than window if set_value is being called less
frequently than window.
clock: Function returning a float with the number of seconds
since the UNIX epoch in zulu time.
:type window: float
:type clock: () -> float
"""
self._window = window
self._clock = clock
self._points = collections.deque()
self._last_value = None # type: float
self._last_time = None # type: float
def get_rate(self):
"""Determines rate of increase in value per second averaged over window.
Returns:
An integer representing the rate or None if not enough
information has been collected yet.
:rtype: int
"""
points = []
total_elapsed = 0.0
total_weight = 0.0
for rate, elapsed, _ in self._points:
weight = 1.0 / (total_elapsed + 1) * elapsed
total_elapsed += elapsed
total_weight += weight
points.append((rate, weight))
if not total_weight:
return 0
return int(sum(w / total_weight * r for r, w in points))
def set_value(self, value):
"""Sets number state.
This method adds a delta between value and the value of the last
time this method was called. Therefore the first invocation does
not add a delta.
Raises:
ValueError: If value is less than the last value.
:type value: float
"""
value = float(value)
now = self._clock()
if self._last_value is None:
self._last_value = value
self._last_time = now
return
if value < self._last_value:
raise ValueError('%f < %f' % (value, self._last_value))
delta = value - self._last_value
elapsed = now - self._last_time
if not elapsed:
return
self._points.appendleft((delta / elapsed, elapsed, now))
self._last_time = now
self._last_value = value
self._remove_old_points()
def bump(self):
"""Makes time since last set_value count for nothing."""
self._last_time = self._clock()
def _remove_old_points(self):
threshold = self._clock() - self._window
while self._points:
r, e, t = self._points.pop()
if t > threshold:
self._points.append((r, e, t))
break
@util.closeable
class Progress(object):
"""Terminal UI for displaying job progress in terms of bytes.
On teletypes, this class will display a nice ephemeral unicode
progress bar. Otherwise it just emits periodic log messages.
This class keeps track of the rate at which input is processed, as
well as the rate it grows. These values are represented to the user
using the DELTA and NABLA symbols.
An alarm is displayed if the consumption rate falls behind the
production rate. In order for this to be calculated properly, the
sleep method of this class should be used rather than time.sleep.
"""
BAR_INTERVAL_SECONDS = 0.25
BAR_LOGGER = logging.getLogger('tensorflow' + util.LogHandler.EPHEMERAL)
BAR_WIDTH = 45
BLOCK_DARK = u'\u2593'
BLOCK_LIGHT = u'\u2591'
DELTA = u'\u2206'
LOG_INTERVAL_SECONDS = 5.0
NABLA = u'\u2207'
RATE_WINDOW = 20.0
def __init__(self, clock=time.time,
sleep=time.sleep,
log_callback=tf.logging.info,
bar_callback=BAR_LOGGER.info,
rate_counter_factory=RateCounter):
"""Creates new instance.
Args:
clock: Function returning a float with the number of seconds
since the UNIX epoch in zulu time.
sleep: Injected time.sleep function.
log_callback: Callback for emitting normal log records.
bar_callback: Callback for emitting ephemeral bar records.
rate_counter_factory: Constructor to RateCounter, which can be
swapped out for testing.
:type clock: () -> float
:type sleep: (float) -> None
:type rate_counter_factory: (float) -> RateCounter
"""
self._clock = clock
self._sleep = sleep
self._log_callback = log_callback
self._bar_callback = bar_callback
self._initialized = False
self._offset = 0
self._size = 0
self._last_log_time = 0.0
self._last_bar_time = 0.0
self._last_log_offset = -1
self._last_bar_offset = -1
self._rate_offset = rate_counter_factory(Progress.RATE_WINDOW)
self._rate_size = rate_counter_factory(Progress.RATE_WINDOW)
def set_progress(self, offset, size):
"""Updates the progress bar state.
This method will cause progress information to be occasionally
written out.
Args:
offset: The number of bytes processed so far.
size: The total number of bytes. This is allowed to increase or
decrease, but it must remain at least offset.
Raises:
ValueError: If offset is greater than size, or offset or size
decreased from the last invocation.
:type offset: int
:type size: int
"""
if offset > size:
raise ValueError('offset (%d) can not exceed size (%d)' % (offset, size))
self._rate_offset.set_value(offset)
self._rate_size.set_value(size)
self._offset = offset
self._size = size
now = self._clock()
if not self._initialized:
self._last_log_time = now
self._last_bar_time = now
self._initialized = True
return
elapsed = now - self._last_log_time
if elapsed >= Progress.LOG_INTERVAL_SECONDS:
self._last_log_time = now
self._show_log()
elapsed = now - self._last_bar_time
if elapsed >= Progress.BAR_INTERVAL_SECONDS:
self._last_bar_time = now
self._show_bar()
def close(self):
"""Forces progress to be written to log.
This method exists because we don't want the progress bar to say
something like 98% once the file is done loading.
"""
self._show_log(can_stall=False)
self._show_bar(can_stall=False)
# Instructs util.LogHandler to clear the ephemeral logging state.
self._bar_callback('')
def sleep(self, seconds):
"""Sleeps for a given number of seconds.
Time spent sleeping in this method does not have a detrimental
impact on the consumption rate.
:type seconds: float
"""
self._sleep(seconds)
self._rate_offset.bump()
def _show_log(self, can_stall=True):
is_stalled = can_stall and self._offset == self._last_log_offset
self._last_log_offset = self._offset
self._log_callback('Loaded %s', self._get_message(is_stalled))
def _show_bar(self, can_stall=True):
is_stalled = can_stall and self._offset == self._last_bar_offset
self._last_bar_offset = self._offset
sofar = int(self._get_fraction() * Progress.BAR_WIDTH)
bar = (Progress.BLOCK_DARK * sofar +
Progress.BLOCK_LIGHT * (Progress.BAR_WIDTH - sofar))
self._bar_callback(u'%s %s ', bar, self._get_message(is_stalled))
def _get_message(self, is_stalled):
rate_offset = self._rate_offset.get_rate() # summary processing speed
rate_size = self._rate_size.get_rate() # summary production speed
message = u'%d%% of %s%s%s' % (
int(self._get_fraction() * 100.0),
_localize_int(self._size),
self._get_rate_suffix(Progress.DELTA, rate_offset),
self._get_rate_suffix(Progress.NABLA, rate_size))
if rate_offset and rate_size and rate_offset < rate_size:
# If TensorFlow is writing summaries to disk faster than we can
# insert them into the database, that's kind of problematic.
message += u' ' + self._make_red(u'[meltdown]')
elif is_stalled:
message += u' %s[stalled]%s' % (util.Ansi.BOLD, util.Ansi.RESET)
return message
def _get_fraction(self):
if not self._size:
return 0.0
else:
return float(self._offset) / self._size
def _get_rate_suffix(self, symbol, rate):
if not rate:
return u''
return u' %s %sB/s' % (symbol, _localize_int(rate))
def _make_red(self, text):
return (util.Ansi.BOLD +
util.Ansi.RED +
(util.Ansi.FLIP if self._offset % 2 == 0 else u'') +
text +
util.Ansi.RESET)
@util.closeable
@functools.total_ordering
@six.python_2_unicode_compatible
class EventLogReader(object):
"""Helper class for reading from event log files.
This class is a wrapper around BufferedRecordReader that operates on
record files containing tf.Event protocol buffers.
Fields:
rowid: An integer primary key in EventLogs table, or 0 if unknown.
path: A string with the path of the event log on the local or
remote file system.
timestamp: An integer of the number of seconds since the UNIX epoch
in UTC according to hostname at the time when the event log
file was created.
hostname: A string with the FQDN of the machine that wrote this
event log file.
"""
def __init__(self, path,
start_offset=0,
record_reader_factory=BufferedRecordReader):
"""Creates new instance.
Args:
path: Path of event log file.
start_offset: Byte offset to seek in file once it's opened.
record_reader_factory: A reference to the constructor of a class
that implements the same interface as RecordReader.
:type path: str
:type record_reader_factory: (str, int) -> RecordReader
"""
self.rowid = 0
self.path = tf.compat.as_text(path)
m = _EVENT_LOG_PATH_PATTERN.search(self.path)
if not m:
raise ValueError('Bad event log path: ' + self.path)
self.timestamp = int(m.group('timestamp'))
self.hostname = m.group('hostname')
self._offset = start_offset
self._reader_factory = record_reader_factory
self._reader = self._reader_factory(self.path, start_offset)
self._key = (os.path.dirname(self.path), self.timestamp, self.hostname)
def get_next_event(self):
"""Reads an event proto from the file.
Returns:
A tf.Event or None if no more records exist in the file. Please
note that the file remains open for subsequent reads in case more
are appended later.
:rtype: tf.Event
"""
record = self._reader.get_next_record()
if record is None:
return None
event = tf.Event()
event.ParseFromString(record.record)
self._offset = record.offset
return event
def set_offset(self, offset):
"""Sets byte offset in file.
:type offset: int
"""
if offset == self._offset:
return
self._reader.close()
self._reader = self._reader_factory(self.path, offset)
self._offset = offset
def get_offset(self):
"""Returns current byte offset in file.
:rtype: int
"""
return self._offset
def get_size(self):
"""Returns byte length of file.
:rtype: int
"""
return self._reader.get_size()
def save_progress(self, db_conn):
"""Saves current offset to DB.
The rowid property must be set beforehand.
:type db_conn: db.Connection
"""
with contextlib.closing(db_conn.cursor()) as c:
c.execute(
'UPDATE EventLogs SET offset = ? WHERE rowid = ? AND offset < ?',
(self._offset, self.rowid, self._offset))
def close(self):
"""Closes event log reader if open.
Further i/o is not permitted after this method is called.
"""
if self._reader is not None:
self._reader.close()
self._reader = None
def __hash__(self):
return hash(self._key)
def __eq__(self, other):
return self._key == other._key
def __lt__(self, other):
return self._key < other._key
def __str__(self):
offset = self.get_offset()
if offset:
return u'EventLogReader{path=%s, offset=%d}' % (self.path, offset)
else:
return u'EventLogReader{%s}' % self.path
@util.closeable
@functools.total_ordering
@six.python_2_unicode_compatible
class RunReader(object):
"""Utility for loading event logs into the DB.
This class merges the chain of event log files into one meaningful
stream of events, ordered by step or timestamp.
Fields:
rowid: The primary key of the corresponding row in Runs.
name: Display name of this run.
"""
def __init__(self, rowid, name):
"""Creates new instance.
Args:
rowid: Primary key of run in `Runs` table, which should already
be inserted. This is a bit-packed int made by db.RUN_ROWID.
name: Display name of run.
:type rowid: int
:type name: str
"""
self.rowid = db.RUN_ROWID.check(rowid)
self.run_id = db.RUN_ROWID.parse(rowid)[1]
self.name = tf.compat.as_text(name)
self._mark = -1
self._logs = [] # type: list[EventLogReader]
self._index = 0
self._entombed_progress = 0
self._saved_events = \
collections.deque() # type: collections.deque[tf.Event]
self._prepended_events = \
collections.deque() # type: collections.deque[tf.Event]
def add_event_log(self, db_conn, log):
"""Adds event log to run loader.
Event logs must be added monotonically, based on the timestamp in
the filename. Please note that calling this method could cause a
current batch of reads to fast forward.
Args:
db_conn: A PEP 249 Connection object.
log: An EventLogReader instance.
Returns:
True if log was actually added.
:type db_conn: db.Connection
:type log: EventLogReader
:rtype: bool
"""
if self._logs and log <= self._logs[-1]:
return False
with contextlib.closing(db_conn.cursor()) as c:
c.execute(
'SELECT rowid, offset FROM EventLogs WHERE run_id = ? AND path = ?',
(self.run_id, log.path))
row = c.fetchone()
if row:
log.rowid = row[0]
log.set_offset(row[1])
else:
event_log_id = db.EVENT_LOG_ID.generate()
log.rowid = db.EVENT_LOG_ROWID.create(self.run_id, event_log_id)
c.execute(
('INSERT INTO EventLogs (rowid, run_id, path, offset)'
' VALUES (?, ?, ?, 0)'),
(log.rowid, self.run_id, log.path))
tf.logging.debug('Adding %s', log)
self._logs.append(log)
# Skip over event logs we've already read.
if log.get_offset() > 0 and not self._prepended_events:
self._index = len(self._logs) - 1
self._cleanup()
return True
def get_next_event(self):
"""Returns next tf.Event from event logs or None if stalled.
:rtype: tf.Event
"""
event = None
if self._prepended_events:
event = self._prepended_events.popleft()
elif self._index < len(self._logs):
while True:
log = self._logs[self._index]
event = log.get_next_event()
if event is not None:
break
if self._index == len(self._logs) - 1:
break
self._index += 1
self._cleanup()
if event is not None and self._mark != -1:
self._saved_events.append(event)
return event
def mark_peek_reset(self):
"""Returns next event without advancing.
Note: This method sets the mark to the current position.
:rtype: tf.Event
"""
self.mark()
result = self.get_next_event()
self.reset()
return result
def get_offset(self):
"""Returns number of bytes read across all event log files.
:rtype: int
"""
if self._mark != -1:
return self._mark
return self._get_offset()
def _get_offset(self):
return sum(el.get_offset() for el in self._logs) + self._entombed_progress
def get_size(self):
"""Returns sum of byte lengths of event log files.
:rtype: int
"""
return sum(el.get_size() for el in self._logs) + self._entombed_progress
def save_progress(self, db_conn):
"""Saves current offsets of all open event logs to DB.
This should be called after the mark has been advanced.
:type db_conn: db.Connection
"""
n = 0
while self._index >= n < len(self._logs):
self._logs[n].save_progress(db_conn)
n += 1
def mark(self):
"""Marks current position in file so reset() can be called."""
if self._prepended_events:
raise ValueError('mark() offsets must be monotonic')
self._mark = self._get_offset()
self._saved_events.clear()
def reset(self):
"""Resets read state to where mark() was called."""
if self._mark == -1:
return
self._prepended_events.extend(self._saved_events)
self._saved_events.clear()
def close(self):
"""Closes all event log readers.
This method may be called multiple times, but further operations
are not permitted.
Raises:
Exception: To propagate the most recent exception thrown by the
EventLogReader close method. Suppressed exceptions are
logged.
"""
util.close_all(self._logs)
self._index = len(self._logs)
self._mark = -1
self._prepended_events.clear()
self._saved_events.clear()
def _cleanup(self):
# Last event log has to be preserved so we can continue enforcing
# monotonicity. We entomb offset because that also has to be
# monotonic, but the size does not.
if 0 < self._index < len(self._logs):
deleted = self._logs[:self._index]
self._logs = self._logs[self._index:]
self._index = 0
self._entombed_progress += sum(l.get_offset() for l in deleted)
util.close_all(deleted)
def _skip_to_event_log(self, i):
should_mark = self._mark != -1 and i > self._index
self._index = i
if should_mark:
self._prepended_events.clear()
self.mark()
def __hash__(self):
return hash(self.rowid)
def __eq__(self, other):
return self.rowid == other.rowid
def __lt__(self, other):
return self.rowid < other.rowid
def __str__(self):
offset = self.get_offset()
if offset:
return u'RunReader{name=%s, offset=%d}' % (self.name, offset)
else:
return u'RunReader{%s}' % self.name
def _get_basename(path):
"""Gets base name of path.
This is the same as os.path.basename, however it may potentially do
i/o to handle a few edge cases, which would otherwise cause the
result to be less meaningful, e.g. "." and "..".
:type path: str
:rtype: str
"""
return os.path.basename(os.path.normpath(os.path.join(_get_cwd(), path)))
def _get_cwd():
"""Returns current directory and try not to expand symlinks.
:rtype: str
"""
result = os.environ.get('PWD')
if not result:
result = os.getcwd()
return result
def get_event_logs(directory):
"""Walks directory tree for EventLogReader files.
Args:
directory: Path of directory.
Returns:
List of EventLogReader objects, ordered by directory name and
timestamp.
:type directory: str
:rtype: list[EventLogReader]
"""
logs = []
for dirname, _, filenames in tf.gfile.Walk(directory):
for filename in filenames:
if is_event_log_file(filename):
logs.append(EventLogReader(os.path.join(dirname, filename)))
logs.sort()
return logs
_EVENT_LOG_PATH_PATTERN = re.compile(
r'\.tfevents\.(?P<timestamp>\d+).(?P<hostname>[-.0-9A-Za-z]+)$')
def is_event_log_file(path):
"""Returns True if path appears to be an event log file.
:type path: str
:rtype: bool
"""
return bool(_EVENT_LOG_PATH_PATTERN.search(path))
_SHORTEN_EVENT_LOG_PATH_PATTERN = re.compile(r'(?:[^/\\]+[/\\])?(?:[^/\\]+)$')
def _shorten_event_log_path(path):
"""Makes an event log path more human readable.
Returns:
Path containing only basename and the first parent directory name,
if there is one.
:type path: str
:rtype: str
"""
m = _SHORTEN_EVENT_LOG_PATH_PATTERN.search(path)
return m.group(0) if m else None
def _localize_int(n):
"""Adds locale specific thousands group separators.
:type n: int
:rtype: str
"""
return locale.format('%d', n, grouping=True)
| 31.029012 | 80 | 0.668225 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
import locale
import logging
import os
import re
import sys
import time
import threading
import types
import six
import tensorflow as tf
from tensorboard import db
from tensorboard import util
class Record(collections.namedtuple('Record', ('record', 'offset'))):
__slots__ = ()
@util.closeable
@six.python_2_unicode_compatible
class RecordReader(object):
def __init__(self, path, start_offset=0):
self.path = tf.compat.as_text(path)
self._offset = start_offset
self._size = -1
self._reader = None
self._is_closed = False
self._lock = threading.Lock()
def get_size(self):
size = tf.gfile.Stat(self.path).length
minimum = max(self._offset, self._size)
if size < minimum:
raise IOError('File shrunk: %d < %d: %s' % (size, minimum, self.path))
self._size = size
return size
def get_next_record(self):
if self._is_closed:
raise IOError('%s is closed' % self)
if self._reader is None:
self._reader = self._open()
try:
with tf.errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
except tf.errors.OutOfRangeError:
return None
self._offset = self._reader.offset()
return Record(self._reader.record(), self._offset)
def close(self):
if self._is_closed:
return
if self._reader is not None:
self._reader.Close()
self._is_closed = True
self._reader = None
def _open(self):
with tf.errors.raise_exception_on_not_ok_status() as status:
return tf.pywrap_tensorflow.PyRecordReader_New(
tf.resource_loader.readahead_file_path(tf.compat.as_bytes(self.path)),
self._offset, tf.compat.as_bytes(''), status)
def __str__(self):
return u'RecordReader{%s}' % self.path
@util.closeable
@six.python_2_unicode_compatible
class BufferedRecordReader(object):
READ_AHEAD_AGGRESSION = 2.3
READ_AHEAD_BYTES = 16 * 1024 * 1024
STAT_INTERVAL_SECONDS = 4.0
def __init__(self, path,
start_offset=0,
read_ahead=READ_AHEAD_BYTES,
stat_interval=STAT_INTERVAL_SECONDS,
clock=time.time,
record_reader_factory=RecordReader):
self.path = tf.compat.as_text(path)
self._read_ahead = read_ahead
self._stat_interval = stat_interval
self._clock = clock
self._is_closed = False
self._has_reached_end = False
self._offset = 0
self._size = -1
self._last_stat = 0.0
self._buffered = 0
self._reader = record_reader_factory(self.path, start_offset)
self._records = collections.deque()
self._read_exception = \
None
self._close_exception = \
None
self._lock = threading.Lock()
self._wake_up_producer = threading.Condition(self._lock)
self._wake_up_consumers = threading.Condition(self._lock)
self._thread = threading.Thread(target=self._run,
name=_shorten_event_log_path(self.path))
def get_size(self):
with self._lock:
if self._should_stat():
self._stat()
return self._size
def get_next_record(self):
with self._lock:
if self._is_closed:
raise IOError('%s is closed' % self)
if not self._thread.is_alive():
self._thread.start()
else:
record = self._get_record()
if record is not None:
if self._should_wakeup():
self._wake_up_producer.notify()
return record
self._has_reached_end = False
self._wake_up_producer.notify()
while not (self._read_exception or
self._has_reached_end or
self._records):
self._wake_up_consumers.wait()
return self._get_record()
def close(self):
with self._lock:
if not self._is_closed:
self._is_closed = True
if not self._thread.is_alive():
self._reader = None
return
self._wake_up_producer.notify()
while self._reader is not None:
self._wake_up_consumers.wait()
if self._close_exception is not None:
six.reraise(*self._close_exception)
def _get_record(self):
if self._read_exception is not None:
six.reraise(*self._read_exception)
if not self._records:
return None
record = self._records.popleft()
self._buffered -= len(record.record)
return record
@util.guarded_by('_lock')
def _should_wakeup(self):
return (self._is_closed or
self._read_exception is None and
(self._should_rebuffer() or
(self._stat_interval and self._should_stat())))
@util.guarded_by('_lock')
def _should_rebuffer(self):
return (not self._has_reached_end and
(float(self._buffered) <
self._read_ahead / BufferedRecordReader.READ_AHEAD_AGGRESSION))
@util.guarded_by('_lock')
def _should_stat(self):
return (self._read_exception is None and
(self._offset > self._size or
self._last_stat <= self._clock() - self._stat_interval))
@util.guarded_by('_lock')
def _stat(self):
try:
now = self._clock()
self._size = self._reader.get_size()
self._last_stat = now
except Exception as e:
tf.logging.debug('Stat failed: %s', e)
self._read_exception = sys.exc_info()
def _run(self):
while True:
with self._lock:
while not self._should_wakeup():
self._wake_up_producer.wait()
if self._is_closed:
try:
self._reader.close()
tf.logging.debug('Closed')
except Exception as e:
self._close_exception = sys.exc_info()
tf.logging.debug('Close failed: %s', e)
self._reader = None
self._wake_up_consumers.notify_all()
return
if self._buffered >= self._read_ahead:
tf.logging.debug('Waking up to stat')
self._stat()
continue
# We want to minimize wait time in the other thread. See the
# following contour plot: https://goo.gl/HTBcCU
x = float(self._buffered)
y = BufferedRecordReader.READ_AHEAD_AGGRESSION
c = float(self._read_ahead)
want = int(min(c - x, y/c * x**y + 1))
# Perform re-buffering outside lock.
self._rebuffer(want)
def _rebuffer(self, want):
tf.logging.debug('Waking up to read %s bytes', _localize_int(want))
records = []
read_exception = self._read_exception
if read_exception is None:
try:
while want > 0:
record = self._reader.get_next_record()
if record is None:
break
self._offset = record.offset
records.append(record)
want -= len(record.record)
except Exception as e: # pylint: disable=broad-except
tf.logging.debug('Read failed: %s', e)
read_exception = sys.exc_info()
with self._lock:
self._read_exception = read_exception
if self._should_stat():
self._stat()
if not self._read_exception:
if not records:
self._has_reached_end = True
else:
for record in records:
self._records.append(record)
self._buffered += len(record.record)
self._wake_up_consumers.notify_all()
def __str__(self):
return u'BufferedRecordReader{%s}' % self.path
class RateCounter(object):
def __init__(self, window, clock=time.time):
self._window = window
self._clock = clock
self._points = collections.deque()
self._last_value = None # type: float
self._last_time = None # type: float
def get_rate(self):
points = []
total_elapsed = 0.0
total_weight = 0.0
for rate, elapsed, _ in self._points:
weight = 1.0 / (total_elapsed + 1) * elapsed
total_elapsed += elapsed
total_weight += weight
points.append((rate, weight))
if not total_weight:
return 0
return int(sum(w / total_weight * r for r, w in points))
def set_value(self, value):
value = float(value)
now = self._clock()
if self._last_value is None:
self._last_value = value
self._last_time = now
return
if value < self._last_value:
raise ValueError('%f < %f' % (value, self._last_value))
delta = value - self._last_value
elapsed = now - self._last_time
if not elapsed:
return
self._points.appendleft((delta / elapsed, elapsed, now))
self._last_time = now
self._last_value = value
self._remove_old_points()
def bump(self):
self._last_time = self._clock()
def _remove_old_points(self):
threshold = self._clock() - self._window
while self._points:
r, e, t = self._points.pop()
if t > threshold:
self._points.append((r, e, t))
break
@util.closeable
class Progress(object):
BAR_INTERVAL_SECONDS = 0.25
BAR_LOGGER = logging.getLogger('tensorflow' + util.LogHandler.EPHEMERAL)
BAR_WIDTH = 45
BLOCK_DARK = u'\u2593'
BLOCK_LIGHT = u'\u2591'
DELTA = u'\u2206'
LOG_INTERVAL_SECONDS = 5.0
NABLA = u'\u2207'
RATE_WINDOW = 20.0
def __init__(self, clock=time.time,
sleep=time.sleep,
log_callback=tf.logging.info,
bar_callback=BAR_LOGGER.info,
rate_counter_factory=RateCounter):
self._clock = clock
self._sleep = sleep
self._log_callback = log_callback
self._bar_callback = bar_callback
self._initialized = False
self._offset = 0
self._size = 0
self._last_log_time = 0.0
self._last_bar_time = 0.0
self._last_log_offset = -1
self._last_bar_offset = -1
self._rate_offset = rate_counter_factory(Progress.RATE_WINDOW)
self._rate_size = rate_counter_factory(Progress.RATE_WINDOW)
def set_progress(self, offset, size):
if offset > size:
raise ValueError('offset (%d) can not exceed size (%d)' % (offset, size))
self._rate_offset.set_value(offset)
self._rate_size.set_value(size)
self._offset = offset
self._size = size
now = self._clock()
if not self._initialized:
self._last_log_time = now
self._last_bar_time = now
self._initialized = True
return
elapsed = now - self._last_log_time
if elapsed >= Progress.LOG_INTERVAL_SECONDS:
self._last_log_time = now
self._show_log()
elapsed = now - self._last_bar_time
if elapsed >= Progress.BAR_INTERVAL_SECONDS:
self._last_bar_time = now
self._show_bar()
def close(self):
self._show_log(can_stall=False)
self._show_bar(can_stall=False)
# Instructs util.LogHandler to clear the ephemeral logging state.
self._bar_callback('')
def sleep(self, seconds):
self._sleep(seconds)
self._rate_offset.bump()
def _show_log(self, can_stall=True):
is_stalled = can_stall and self._offset == self._last_log_offset
self._last_log_offset = self._offset
self._log_callback('Loaded %s', self._get_message(is_stalled))
def _show_bar(self, can_stall=True):
is_stalled = can_stall and self._offset == self._last_bar_offset
self._last_bar_offset = self._offset
sofar = int(self._get_fraction() * Progress.BAR_WIDTH)
bar = (Progress.BLOCK_DARK * sofar +
Progress.BLOCK_LIGHT * (Progress.BAR_WIDTH - sofar))
self._bar_callback(u'%s %s ', bar, self._get_message(is_stalled))
def _get_message(self, is_stalled):
rate_offset = self._rate_offset.get_rate() # summary processing speed
rate_size = self._rate_size.get_rate() # summary production speed
message = u'%d%% of %s%s%s' % (
int(self._get_fraction() * 100.0),
_localize_int(self._size),
self._get_rate_suffix(Progress.DELTA, rate_offset),
self._get_rate_suffix(Progress.NABLA, rate_size))
if rate_offset and rate_size and rate_offset < rate_size:
# If TensorFlow is writing summaries to disk faster than we can
# insert them into the database, that's kind of problematic.
message += u' ' + self._make_red(u'[meltdown]')
elif is_stalled:
message += u' %s[stalled]%s' % (util.Ansi.BOLD, util.Ansi.RESET)
return message
def _get_fraction(self):
if not self._size:
return 0.0
else:
return float(self._offset) / self._size
def _get_rate_suffix(self, symbol, rate):
if not rate:
return u''
return u' %s %sB/s' % (symbol, _localize_int(rate))
def _make_red(self, text):
return (util.Ansi.BOLD +
util.Ansi.RED +
(util.Ansi.FLIP if self._offset % 2 == 0 else u'') +
text +
util.Ansi.RESET)
@util.closeable
@functools.total_ordering
@six.python_2_unicode_compatible
class EventLogReader(object):
def __init__(self, path,
start_offset=0,
record_reader_factory=BufferedRecordReader):
self.rowid = 0
self.path = tf.compat.as_text(path)
m = _EVENT_LOG_PATH_PATTERN.search(self.path)
if not m:
raise ValueError('Bad event log path: ' + self.path)
self.timestamp = int(m.group('timestamp'))
self.hostname = m.group('hostname')
self._offset = start_offset
self._reader_factory = record_reader_factory
self._reader = self._reader_factory(self.path, start_offset)
self._key = (os.path.dirname(self.path), self.timestamp, self.hostname)
def get_next_event(self):
record = self._reader.get_next_record()
if record is None:
return None
event = tf.Event()
event.ParseFromString(record.record)
self._offset = record.offset
return event
def set_offset(self, offset):
if offset == self._offset:
return
self._reader.close()
self._reader = self._reader_factory(self.path, offset)
self._offset = offset
def get_offset(self):
return self._offset
def get_size(self):
return self._reader.get_size()
def save_progress(self, db_conn):
with contextlib.closing(db_conn.cursor()) as c:
c.execute(
'UPDATE EventLogs SET offset = ? WHERE rowid = ? AND offset < ?',
(self._offset, self.rowid, self._offset))
def close(self):
if self._reader is not None:
self._reader.close()
self._reader = None
def __hash__(self):
return hash(self._key)
def __eq__(self, other):
return self._key == other._key
def __lt__(self, other):
return self._key < other._key
def __str__(self):
offset = self.get_offset()
if offset:
return u'EventLogReader{path=%s, offset=%d}' % (self.path, offset)
else:
return u'EventLogReader{%s}' % self.path
@util.closeable
@functools.total_ordering
@six.python_2_unicode_compatible
class RunReader(object):
def __init__(self, rowid, name):
self.rowid = db.RUN_ROWID.check(rowid)
self.run_id = db.RUN_ROWID.parse(rowid)[1]
self.name = tf.compat.as_text(name)
self._mark = -1
self._logs = []
self._index = 0
self._entombed_progress = 0
self._saved_events = \
collections.deque()
self._prepended_events = \
collections.deque()
def add_event_log(self, db_conn, log):
if self._logs and log <= self._logs[-1]:
return False
with contextlib.closing(db_conn.cursor()) as c:
c.execute(
'SELECT rowid, offset FROM EventLogs WHERE run_id = ? AND path = ?',
(self.run_id, log.path))
row = c.fetchone()
if row:
log.rowid = row[0]
log.set_offset(row[1])
else:
event_log_id = db.EVENT_LOG_ID.generate()
log.rowid = db.EVENT_LOG_ROWID.create(self.run_id, event_log_id)
c.execute(
('INSERT INTO EventLogs (rowid, run_id, path, offset)'
' VALUES (?, ?, ?, 0)'),
(log.rowid, self.run_id, log.path))
tf.logging.debug('Adding %s', log)
self._logs.append(log)
if log.get_offset() > 0 and not self._prepended_events:
self._index = len(self._logs) - 1
self._cleanup()
return True
def get_next_event(self):
event = None
if self._prepended_events:
event = self._prepended_events.popleft()
elif self._index < len(self._logs):
while True:
log = self._logs[self._index]
event = log.get_next_event()
if event is not None:
break
if self._index == len(self._logs) - 1:
break
self._index += 1
self._cleanup()
if event is not None and self._mark != -1:
self._saved_events.append(event)
return event
def mark_peek_reset(self):
self.mark()
result = self.get_next_event()
self.reset()
return result
def get_offset(self):
if self._mark != -1:
return self._mark
return self._get_offset()
def _get_offset(self):
return sum(el.get_offset() for el in self._logs) + self._entombed_progress
def get_size(self):
return sum(el.get_size() for el in self._logs) + self._entombed_progress
def save_progress(self, db_conn):
n = 0
while self._index >= n < len(self._logs):
self._logs[n].save_progress(db_conn)
n += 1
def mark(self):
if self._prepended_events:
raise ValueError('mark() offsets must be monotonic')
self._mark = self._get_offset()
self._saved_events.clear()
def reset(self):
if self._mark == -1:
return
self._prepended_events.extend(self._saved_events)
self._saved_events.clear()
def close(self):
util.close_all(self._logs)
self._index = len(self._logs)
self._mark = -1
self._prepended_events.clear()
self._saved_events.clear()
def _cleanup(self):
# Last event log has to be preserved so we can continue enforcing
# monotonicity. We entomb offset because that also has to be
# monotonic, but the size does not.
if 0 < self._index < len(self._logs):
deleted = self._logs[:self._index]
self._logs = self._logs[self._index:]
self._index = 0
self._entombed_progress += sum(l.get_offset() for l in deleted)
util.close_all(deleted)
def _skip_to_event_log(self, i):
should_mark = self._mark != -1 and i > self._index
self._index = i
if should_mark:
self._prepended_events.clear()
self.mark()
def __hash__(self):
return hash(self.rowid)
def __eq__(self, other):
return self.rowid == other.rowid
def __lt__(self, other):
return self.rowid < other.rowid
def __str__(self):
offset = self.get_offset()
if offset:
return u'RunReader{name=%s, offset=%d}' % (self.name, offset)
else:
return u'RunReader{%s}' % self.name
def _get_basename(path):
return os.path.basename(os.path.normpath(os.path.join(_get_cwd(), path)))
def _get_cwd():
result = os.environ.get('PWD')
if not result:
result = os.getcwd()
return result
def get_event_logs(directory):
logs = []
for dirname, _, filenames in tf.gfile.Walk(directory):
for filename in filenames:
if is_event_log_file(filename):
logs.append(EventLogReader(os.path.join(dirname, filename)))
logs.sort()
return logs
_EVENT_LOG_PATH_PATTERN = re.compile(
r'\.tfevents\.(?P<timestamp>\d+).(?P<hostname>[-.0-9A-Za-z]+)$')
def is_event_log_file(path):
return bool(_EVENT_LOG_PATH_PATTERN.search(path))
_SHORTEN_EVENT_LOG_PATH_PATTERN = re.compile(r'(?:[^/\\]+[/\\])?(?:[^/\\]+)$')
def _shorten_event_log_path(path):
m = _SHORTEN_EVENT_LOG_PATH_PATTERN.search(path)
return m.group(0) if m else None
def _localize_int(n):
return locale.format('%d', n, grouping=True)
| true | true |
f7fcb833a98f048348c37616cd8c46a99bc9ff66 | 1,200 | py | Python | convert2svg.py | shiyunon/openqasm | 6bc6ff11e67cd1b9cec989f6369981e13baa0717 | [
"Apache-2.0"
] | 603 | 2018-07-11T03:45:15.000Z | 2022-03-30T17:19:10.000Z | convert2svg.py | shiyunon/openqasm | 6bc6ff11e67cd1b9cec989f6369981e13baa0717 | [
"Apache-2.0"
] | 240 | 2018-07-17T18:38:44.000Z | 2022-03-30T05:57:29.000Z | convert2svg.py | shiyunon/openqasm | 6bc6ff11e67cd1b9cec989f6369981e13baa0717 | [
"Apache-2.0"
] | 195 | 2018-07-12T17:13:02.000Z | 2022-03-29T15:38:32.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
CONVERT_COMMAND = 'pdftocairo'
def main(relative_tex_filepath):
if not os.path.exists(relative_tex_filepath):
print(
'File %s does not exist.' % relative_tex_filepath, file=sys.stderr)
return -1
absolute_tex_filepath = os.path.abspath(relative_tex_filepath)
destination_directory = os.path.dirname(absolute_tex_filepath)
try:
subprocess.run(
['command', '-v', CONVERT_COMMAND]).check_returncode()
except subprocess.CalledProcessError:
print('Cannot find `%s`. Ensure you it installed and the command is '
'in the PATH.' % CONVERT_COMMAND, file=sys.stderr)
return -1
try:
subprocess.run(
[CONVERT_COMMAND, '-svg', absolute_tex_filepath],
cwd=destination_directory)
except subprocess.CalledProcessError:
return -1
return 0
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: convert2svg.py path/to/pdffile.pdf')
print('For converting to PDF, use convert2pdf.py first.')
sys.exit(-1)
sys.exit(main(sys.argv[1]))
| 26.666667 | 79 | 0.645833 |
import os
import sys
import subprocess
CONVERT_COMMAND = 'pdftocairo'
def main(relative_tex_filepath):
if not os.path.exists(relative_tex_filepath):
print(
'File %s does not exist.' % relative_tex_filepath, file=sys.stderr)
return -1
absolute_tex_filepath = os.path.abspath(relative_tex_filepath)
destination_directory = os.path.dirname(absolute_tex_filepath)
try:
subprocess.run(
['command', '-v', CONVERT_COMMAND]).check_returncode()
except subprocess.CalledProcessError:
print('Cannot find `%s`. Ensure you it installed and the command is '
'in the PATH.' % CONVERT_COMMAND, file=sys.stderr)
return -1
try:
subprocess.run(
[CONVERT_COMMAND, '-svg', absolute_tex_filepath],
cwd=destination_directory)
except subprocess.CalledProcessError:
return -1
return 0
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: convert2svg.py path/to/pdffile.pdf')
print('For converting to PDF, use convert2pdf.py first.')
sys.exit(-1)
sys.exit(main(sys.argv[1]))
| true | true |
f7fcb8d9c8c076de239855212c57469313d13aa1 | 260 | py | Python | pyvol/experimental/__init__.py | eiriks/pyvol | 983c6821d9eea3cb1d5e4c217c990fb1dfcf5f22 | [
"MIT"
] | null | null | null | pyvol/experimental/__init__.py | eiriks/pyvol | 983c6821d9eea3cb1d5e4c217c990fb1dfcf5f22 | [
"MIT"
] | null | null | null | pyvol/experimental/__init__.py | eiriks/pyvol | 983c6821d9eea3cb1d5e4c217c990fb1dfcf5f22 | [
"MIT"
] | null | null | null | """Package for experimental code that is not part of core pyvol distribution.
This is generally where you should put your own estimators so you can
experiment with them or submit them to pyvol contests without
interfering with the main source tree.
"""
| 37.142857 | 78 | 0.780769 | true | true | |
f7fcba9683843296ee112881b8a2b4d45f77e3f4 | 1,705 | py | Python | recursive_cut.py | Rokid/better_jieba | b5b897044acc8c423ce37aeabebc6ffde333176f | [
"MIT"
] | 16 | 2018-06-20T11:19:10.000Z | 2019-07-25T01:39:30.000Z | recursive_cut.py | rokid/better_jieba | b5b897044acc8c423ce37aeabebc6ffde333176f | [
"MIT"
] | 6 | 2018-06-19T07:31:39.000Z | 2019-09-23T07:03:53.000Z | recursive_cut.py | Rokid/better_jieba | b5b897044acc8c423ce37aeabebc6ffde333176f | [
"MIT"
] | 2 | 2018-06-26T01:14:03.000Z | 2019-11-02T03:20:20.000Z | # coding=UTF-8
#全部切成四字及以下
import jieba
def clean_and_append(result_list,word):
# word = word.replace("\n","")
if word == " " or word == "":
return result_list
result_list.append(word)
return result_list
def recursive_cut(line):
line = line.replace("\n", "")
result = []
for big_word in jieba.lcut(line,HMM=False):
subword_list = get_subword_list(big_word)
if isinstance(subword_list, list):
go_subword_list(subword_list,result)
elif isinstance(subword_list, str):
clean_and_append(result,subword_list)
else:
print("error")
return result
def isEN(uchar):
if (uchar >= u'\u0041' and uchar <= u'\u005a') or (uchar >= u'\u0061' and uchar <= u'\u007a'):
return True
else:
return False
def isZH(char):
if not ('\u4e00' <= char <= '\u9fa5'):
return False
return True
def get_subword_list(big_word):
if not isZH(big_word[0]):
return big_word
if len(big_word)>4:
jieba.del_word(big_word)
return jieba.lcut(big_word, HMM=False)
else:
return big_word
def go_subword_list(input_list,result):
for big_word in input_list:
if len(big_word)>4:
subword_list = get_subword_list(big_word)
if isinstance(subword_list,list):
go_subword_list(subword_list,result)
elif isinstance(subword_list,str):
clean_and_append(result, subword_list)
else:
print("error")
else:
clean_and_append(result, big_word)
#print(recursive_cut("一二三四五六七八九十"))
#print(recursive_cut("十九八七六五四三二一"))
| 27.95082 | 98 | 0.603519 |
import jieba
def clean_and_append(result_list,word):
if word == " " or word == "":
return result_list
result_list.append(word)
return result_list
def recursive_cut(line):
line = line.replace("\n", "")
result = []
for big_word in jieba.lcut(line,HMM=False):
subword_list = get_subword_list(big_word)
if isinstance(subword_list, list):
go_subword_list(subword_list,result)
elif isinstance(subword_list, str):
clean_and_append(result,subword_list)
else:
print("error")
return result
def isEN(uchar):
if (uchar >= u'\u0041' and uchar <= u'\u005a') or (uchar >= u'\u0061' and uchar <= u'\u007a'):
return True
else:
return False
def isZH(char):
if not ('\u4e00' <= char <= '\u9fa5'):
return False
return True
def get_subword_list(big_word):
if not isZH(big_word[0]):
return big_word
if len(big_word)>4:
jieba.del_word(big_word)
return jieba.lcut(big_word, HMM=False)
else:
return big_word
def go_subword_list(input_list,result):
for big_word in input_list:
if len(big_word)>4:
subword_list = get_subword_list(big_word)
if isinstance(subword_list,list):
go_subword_list(subword_list,result)
elif isinstance(subword_list,str):
clean_and_append(result, subword_list)
else:
print("error")
else:
clean_and_append(result, big_word)
| true | true |
f7fcba9cd4df33076109987ef33243f2058f0224 | 15,414 | py | Python | osbuild/objectstore.py | PaulWay/osbuild | 3731a323084896dd172733ac72f6b8f4ac42c318 | [
"Apache-2.0"
] | null | null | null | osbuild/objectstore.py | PaulWay/osbuild | 3731a323084896dd172733ac72f6b8f4ac42c318 | [
"Apache-2.0"
] | null | null | null | osbuild/objectstore.py | PaulWay/osbuild | 3731a323084896dd172733ac72f6b8f4ac42c318 | [
"Apache-2.0"
] | null | null | null | import contextlib
import errno
import hashlib
import os
import subprocess
import tempfile
from typing import Optional
from osbuild.util.types import PathLike
from osbuild.util import ctx, jsoncomm, rmrf
from . import api
from . import treesum
__all__ = [
"ObjectStore",
]
def mount(source, target, bind=True, ro=True, private=True, mode="0755"):
options = []
if bind:
options += ["bind"]
if ro:
options += ["ro"]
if mode:
options += [mode]
args = []
if private:
args += ["--make-private"]
if options:
args += ["-o", ",".join(options)]
r = subprocess.run(["mount"] + args + [source, target],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
encoding="utf-8",
check=False)
if r.returncode != 0:
code = r.returncode
msg = r.stdout.strip()
raise RuntimeError(f"{msg} (code: {code})")
def umount(target, lazy=True):
args = []
if lazy:
args += ["--lazy"]
subprocess.run(["umount"] + args + [target], check=True)
class Object:
def __init__(self, store: "ObjectStore"):
self._init = True
self._readers = 0
self._writer = False
self._base = None
self._workdir = None
self._tree = None
self.id = None
self.store = store
self.reset()
def init(self) -> None:
"""Initialize the object with content of its base"""
self._check_writable()
self._check_readers()
self._check_writer()
if self._init:
return
with self.store.new(self._base) as obj:
obj.export(self._tree)
self._init = True
@property
def base(self) -> Optional[str]:
return self._base
@base.setter
def base(self, base_id: Optional[str]):
self._init = not base_id
self._base = base_id
self.id = base_id
@property
def treesum(self) -> str:
"""Calculate the treesum of the object"""
with self._open() as fd:
m = hashlib.sha256()
treesum.treesum(m, fd)
treesum_hash = m.hexdigest()
return treesum_hash
@property
def _path(self) -> str:
if self._base and not self._init:
path = self.store.resolve_ref(self._base)
else:
path = self._tree
return path
@contextlib.contextmanager
def write(self) -> str:
"""Return a path that can be written to"""
self._check_writable()
self._check_readers()
self._check_writer()
self.init()
self.id = None
with self.tempdir("writer") as target:
mount(self._path, target, ro=False)
try:
self._writer = True
yield target
finally:
umount(target)
self._writer = False
@contextlib.contextmanager
def read(self) -> str:
with self.tempdir("reader") as target:
with self.read_at(target) as path:
yield path
@contextlib.contextmanager
def read_at(self, target: PathLike, path: str = "/") -> str:
"""Read the object or a part of it at given location
Map the tree or a part of it specified via `path` at the
specified path `target`.
"""
self._check_writable()
self._check_writer()
path = os.path.join(self._path, path.lstrip("/"))
mount(path, target)
try:
self._readers += 1
yield target
finally:
umount(target)
self._readers -= 1
def store_tree(self, destination: str):
"""Store the tree at destination and reset itself
Moves the tree atomically by using rename(2). If the
target already exist, does nothing. Afterwards it
resets itself and can be used as if it was new.
"""
self._check_writable()
self._check_readers()
self._check_writer()
self.init()
with ctx.suppress_oserror(errno.ENOTEMPTY, errno.EEXIST):
os.rename(self._tree, destination)
self.reset()
def reset(self):
self.cleanup()
self._workdir = self.store.tempdir(suffix="object")
self._tree = os.path.join(self._workdir.name, "tree")
os.makedirs(self._tree, mode=0o755, exist_ok=True)
self._init = not self._base
def cleanup(self):
self._check_readers()
self._check_writer()
if self._tree:
# manually remove the tree, it might contain
# files with immutable flag set, which will
# throw off standard Python 3 tempdir cleanup
rmrf.rmtree(self._tree)
self._tree = None
if self._workdir:
self._workdir.cleanup()
self._workdir = None
self.id = None
def _check_readers(self):
"""Internal: Raise a ValueError if there are readers"""
if self._readers:
raise ValueError("Read operation is ongoing")
def _check_writable(self):
"""Internal: Raise a ValueError if not writable"""
if not self._workdir:
raise ValueError("Object is not writable")
def _check_writer(self):
"""Internal: Raise a ValueError if there is a writer"""
if self._writer:
raise ValueError("Write operation is ongoing")
@contextlib.contextmanager
def _open(self):
"""Open the directory and return the file descriptor"""
with self.read() as path:
fd = os.open(path, os.O_DIRECTORY)
try:
yield fd
finally:
os.close(fd)
def tempdir(self, suffix=None):
workdir = self._workdir.name
if suffix:
suffix = "-" + suffix
return tempfile.TemporaryDirectory(dir=workdir,
suffix=suffix)
def __enter__(self):
self._check_writable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
def export(self, to_directory: PathLike):
"""Copy object into an external directory"""
with self.read() as from_directory:
subprocess.run(
[
"cp",
"--reflink=auto",
"-a",
os.fspath(from_directory) + "/.",
os.fspath(to_directory),
],
check=True,
)
class HostTree:
"""Read-only access to the host file system
An object that provides the same interface as
`objectstore.Object` that can be used to read
the host file-system.
"""
def __init__(self, store):
self.store = store
@staticmethod
def write():
raise ValueError("Cannot write to host")
@contextlib.contextmanager
def read(self):
with self.store.tempdir() as tmp:
mount("/", tmp)
try:
yield tmp
finally:
umount(tmp)
def cleanup(self):
pass # noop for the host
class ObjectStore(contextlib.AbstractContextManager):
def __init__(self, store: PathLike):
self.store = store
self.objects = os.path.join(store, "objects")
self.refs = os.path.join(store, "refs")
self.tmp = os.path.join(store, "tmp")
os.makedirs(self.store, exist_ok=True)
os.makedirs(self.objects, exist_ok=True)
os.makedirs(self.refs, exist_ok=True)
os.makedirs(self.tmp, exist_ok=True)
self._objs = set()
def _get_floating(self, object_id: str) -> Optional[Object]:
"""Internal: get a non-committed object"""
for obj in self._objs:
if obj.id == object_id:
return obj
return None
def contains(self, object_id):
if not object_id:
return False
if self._get_floating(object_id):
return True
return os.access(self.resolve_ref(object_id), os.F_OK)
def resolve_ref(self, object_id: Optional[str]) -> Optional[str]:
"""Returns the path to the given object_id"""
if not object_id:
return None
return os.path.join(self.refs, object_id)
def tempdir(self, prefix=None, suffix=None):
"""Return a tempfile.TemporaryDirectory within the store"""
return tempfile.TemporaryDirectory(dir=self.tmp,
prefix=prefix,
suffix=suffix)
def get(self, object_id):
obj = self._get_floating(object_id)
if obj:
return obj
if not self.contains(object_id):
return None
obj = self.new(base_id=object_id)
return obj
def new(self, base_id=None):
"""Creates a new temporary `Object`.
It returns a temporary instance of `Object`, the base
optionally set to `base_id`. It can be used to interact
with the store.
If changes to the object's content were made (by calling
`Object.write`), these must manually be committed to the
store via `commit()`.
"""
obj = Object(self)
if base_id:
# if we were given a base id then this is the base
# content for the new object
# NB: `Object` has copy-on-write semantics, so no
# copying of the data takes places at this point
obj.base = base_id
self._objs.add(obj)
return obj
def commit(self, obj: Object, object_id: str) -> str:
"""Commits a Object to the object store
Move the contents of the obj (Object) to object directory
of the store with the content hash (obj.treesum) as its name.
Creates a symlink to that ('objects/{hash}') in the references
directory with the object_id as the name ('refs/{object_id}).
If the link already exists, it will be atomically replaced.
Returns: The treesum of the object
"""
treesum_hash = obj.treesum
# the object is stored in the objects directory using its content
# hash as its name, ideally a given object_id (i.e., given config)
# will always produce the same content hash, but that is not
# guaranteed. If an object with the same treesum already exist, us
# the existing one instead
obj.store_tree(os.path.join(self.objects, treesum_hash))
# symlink the object_id (config hash) in the refs directory to the
# treesum (content hash) in the objects directory. If a symlink by
# that name already exists, atomically replace it, but leave the
# backing object in place (it may be in use).
with self.tempdir() as tmp:
link = f"{tmp}/link"
os.symlink(f"../objects/{treesum_hash}", link)
os.replace(link, self.resolve_ref(object_id))
# the reference that is pointing to `treesum_hash` is now the base
# of `obj`. It is not actively initialized but any subsequent calls
# to `obj.write()` will initialize it again
# NB: in the case that an object with the same treesum as `obj`
# already existed in the store obj.store_tree() will not actually
# have written anything to the store. In this case `obj` will then
# be initialized with the content of the already existing object.
obj.base = object_id
return treesum_hash
def cleanup(self):
"""Cleanup all created Objects that are still alive"""
for obj in self._objs:
obj.cleanup()
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
class StoreServer(api.BaseAPI):
endpoint = "store"
def __init__(self, store: ObjectStore, *, socket_address=None):
super().__init__(socket_address)
self.store = store
self.tmproot = store.tempdir(prefix="store-server-")
self._stack = contextlib.ExitStack()
def _cleanup(self):
self.tmproot.cleanup()
self.tmproot = None
self._stack.close()
self._stack = None
def _read_tree(self, msg, sock):
object_id = msg["object-id"]
obj = self.store.get(object_id)
if not obj:
sock.send({"path": None})
return
reader = obj.read()
path = self._stack.enter_context(reader)
sock.send({"path": path})
def _read_tree_at(self, msg, sock):
object_id = msg["object-id"]
target = msg["target"]
subtree = msg["subtree"]
obj = self.store.get(object_id)
if not obj:
sock.send({"path": None})
return
try:
reader = obj.read_at(target, subtree)
path = self._stack.enter_context(reader)
# pylint: disable=broad-except
except Exception as e:
sock.send({"error": str(e)})
return
sock.send({"path": path})
def _mkdtemp(self, msg, sock):
args = {
"suffix": msg.get("suffix"),
"prefix": msg.get("prefix"),
"dir": self.tmproot.name
}
path = tempfile.mkdtemp(**args)
sock.send({"path": path})
def _source(self, msg, sock):
name = msg["name"]
base = self.store.store
path = os.path.join(base, "sources", name)
sock.send({"path": path})
def _message(self, msg, _fds, sock):
if msg["method"] == "read-tree":
self._read_tree(msg, sock)
elif msg["method"] == "read-tree-at":
self._read_tree_at(msg, sock)
elif msg["method"] == "mkdtemp":
self._mkdtemp(msg, sock)
elif msg["method"] == "source":
self._source(msg, sock)
else:
raise ValueError("Invalid RPC call", msg)
class StoreClient:
def __init__(self, connect_to="/run/osbuild/api/store"):
self.client = jsoncomm.Socket.new_client(connect_to)
def __del__(self):
if self.client is not None:
self.client.close()
def mkdtemp(self, suffix=None, prefix=None):
msg = {
"method": "mkdtemp",
"suffix": suffix,
"prefix": prefix
}
self.client.send(msg)
msg, _, _ = self.client.recv()
return msg["path"]
def read_tree(self, object_id: str):
msg = {
"method": "read-tree",
"object-id": object_id
}
self.client.send(msg)
msg, _, _ = self.client.recv()
return msg["path"]
def read_tree_at(self, object_id: str, target: str, path="/"):
msg = {
"method": "read-tree-at",
"object-id": object_id,
"target": os.fspath(target),
"subtree": os.fspath(path)
}
self.client.send(msg)
msg, _, _ = self.client.recv()
err = msg.get("error")
if err:
raise RuntimeError(err)
return msg["path"]
def source(self, name: str) -> str:
msg = {
"method": "source",
"name": name
}
self.client.send(msg)
msg, _, _ = self.client.recv()
return msg["path"]
| 29.416031 | 75 | 0.562735 | import contextlib
import errno
import hashlib
import os
import subprocess
import tempfile
from typing import Optional
from osbuild.util.types import PathLike
from osbuild.util import ctx, jsoncomm, rmrf
from . import api
from . import treesum
__all__ = [
"ObjectStore",
]
def mount(source, target, bind=True, ro=True, private=True, mode="0755"):
options = []
if bind:
options += ["bind"]
if ro:
options += ["ro"]
if mode:
options += [mode]
args = []
if private:
args += ["--make-private"]
if options:
args += ["-o", ",".join(options)]
r = subprocess.run(["mount"] + args + [source, target],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
encoding="utf-8",
check=False)
if r.returncode != 0:
code = r.returncode
msg = r.stdout.strip()
raise RuntimeError(f"{msg} (code: {code})")
def umount(target, lazy=True):
args = []
if lazy:
args += ["--lazy"]
subprocess.run(["umount"] + args + [target], check=True)
class Object:
def __init__(self, store: "ObjectStore"):
self._init = True
self._readers = 0
self._writer = False
self._base = None
self._workdir = None
self._tree = None
self.id = None
self.store = store
self.reset()
def init(self) -> None:
self._check_writable()
self._check_readers()
self._check_writer()
if self._init:
return
with self.store.new(self._base) as obj:
obj.export(self._tree)
self._init = True
@property
def base(self) -> Optional[str]:
return self._base
@base.setter
def base(self, base_id: Optional[str]):
self._init = not base_id
self._base = base_id
self.id = base_id
@property
def treesum(self) -> str:
with self._open() as fd:
m = hashlib.sha256()
treesum.treesum(m, fd)
treesum_hash = m.hexdigest()
return treesum_hash
@property
def _path(self) -> str:
if self._base and not self._init:
path = self.store.resolve_ref(self._base)
else:
path = self._tree
return path
@contextlib.contextmanager
def write(self) -> str:
self._check_writable()
self._check_readers()
self._check_writer()
self.init()
self.id = None
with self.tempdir("writer") as target:
mount(self._path, target, ro=False)
try:
self._writer = True
yield target
finally:
umount(target)
self._writer = False
@contextlib.contextmanager
def read(self) -> str:
with self.tempdir("reader") as target:
with self.read_at(target) as path:
yield path
@contextlib.contextmanager
def read_at(self, target: PathLike, path: str = "/") -> str:
self._check_writable()
self._check_writer()
path = os.path.join(self._path, path.lstrip("/"))
mount(path, target)
try:
self._readers += 1
yield target
finally:
umount(target)
self._readers -= 1
def store_tree(self, destination: str):
self._check_writable()
self._check_readers()
self._check_writer()
self.init()
with ctx.suppress_oserror(errno.ENOTEMPTY, errno.EEXIST):
os.rename(self._tree, destination)
self.reset()
def reset(self):
self.cleanup()
self._workdir = self.store.tempdir(suffix="object")
self._tree = os.path.join(self._workdir.name, "tree")
os.makedirs(self._tree, mode=0o755, exist_ok=True)
self._init = not self._base
def cleanup(self):
self._check_readers()
self._check_writer()
if self._tree:
rmrf.rmtree(self._tree)
self._tree = None
if self._workdir:
self._workdir.cleanup()
self._workdir = None
self.id = None
def _check_readers(self):
if self._readers:
raise ValueError("Read operation is ongoing")
def _check_writable(self):
if not self._workdir:
raise ValueError("Object is not writable")
def _check_writer(self):
if self._writer:
raise ValueError("Write operation is ongoing")
@contextlib.contextmanager
def _open(self):
with self.read() as path:
fd = os.open(path, os.O_DIRECTORY)
try:
yield fd
finally:
os.close(fd)
def tempdir(self, suffix=None):
workdir = self._workdir.name
if suffix:
suffix = "-" + suffix
return tempfile.TemporaryDirectory(dir=workdir,
suffix=suffix)
def __enter__(self):
self._check_writable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
def export(self, to_directory: PathLike):
with self.read() as from_directory:
subprocess.run(
[
"cp",
"--reflink=auto",
"-a",
os.fspath(from_directory) + "/.",
os.fspath(to_directory),
],
check=True,
)
class HostTree:
def __init__(self, store):
self.store = store
@staticmethod
def write():
raise ValueError("Cannot write to host")
@contextlib.contextmanager
def read(self):
with self.store.tempdir() as tmp:
mount("/", tmp)
try:
yield tmp
finally:
umount(tmp)
def cleanup(self):
pass
class ObjectStore(contextlib.AbstractContextManager):
def __init__(self, store: PathLike):
self.store = store
self.objects = os.path.join(store, "objects")
self.refs = os.path.join(store, "refs")
self.tmp = os.path.join(store, "tmp")
os.makedirs(self.store, exist_ok=True)
os.makedirs(self.objects, exist_ok=True)
os.makedirs(self.refs, exist_ok=True)
os.makedirs(self.tmp, exist_ok=True)
self._objs = set()
def _get_floating(self, object_id: str) -> Optional[Object]:
for obj in self._objs:
if obj.id == object_id:
return obj
return None
def contains(self, object_id):
if not object_id:
return False
if self._get_floating(object_id):
return True
return os.access(self.resolve_ref(object_id), os.F_OK)
def resolve_ref(self, object_id: Optional[str]) -> Optional[str]:
if not object_id:
return None
return os.path.join(self.refs, object_id)
def tempdir(self, prefix=None, suffix=None):
return tempfile.TemporaryDirectory(dir=self.tmp,
prefix=prefix,
suffix=suffix)
def get(self, object_id):
obj = self._get_floating(object_id)
if obj:
return obj
if not self.contains(object_id):
return None
obj = self.new(base_id=object_id)
return obj
def new(self, base_id=None):
obj = Object(self)
if base_id:
obj.base = base_id
self._objs.add(obj)
return obj
def commit(self, obj: Object, object_id: str) -> str:
treesum_hash = obj.treesum
obj.store_tree(os.path.join(self.objects, treesum_hash))
with self.tempdir() as tmp:
link = f"{tmp}/link"
os.symlink(f"../objects/{treesum_hash}", link)
os.replace(link, self.resolve_ref(object_id))
obj.base = object_id
return treesum_hash
def cleanup(self):
for obj in self._objs:
obj.cleanup()
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
class StoreServer(api.BaseAPI):
endpoint = "store"
def __init__(self, store: ObjectStore, *, socket_address=None):
super().__init__(socket_address)
self.store = store
self.tmproot = store.tempdir(prefix="store-server-")
self._stack = contextlib.ExitStack()
def _cleanup(self):
self.tmproot.cleanup()
self.tmproot = None
self._stack.close()
self._stack = None
def _read_tree(self, msg, sock):
object_id = msg["object-id"]
obj = self.store.get(object_id)
if not obj:
sock.send({"path": None})
return
reader = obj.read()
path = self._stack.enter_context(reader)
sock.send({"path": path})
def _read_tree_at(self, msg, sock):
object_id = msg["object-id"]
target = msg["target"]
subtree = msg["subtree"]
obj = self.store.get(object_id)
if not obj:
sock.send({"path": None})
return
try:
reader = obj.read_at(target, subtree)
path = self._stack.enter_context(reader)
except Exception as e:
sock.send({"error": str(e)})
return
sock.send({"path": path})
def _mkdtemp(self, msg, sock):
args = {
"suffix": msg.get("suffix"),
"prefix": msg.get("prefix"),
"dir": self.tmproot.name
}
path = tempfile.mkdtemp(**args)
sock.send({"path": path})
def _source(self, msg, sock):
name = msg["name"]
base = self.store.store
path = os.path.join(base, "sources", name)
sock.send({"path": path})
def _message(self, msg, _fds, sock):
if msg["method"] == "read-tree":
self._read_tree(msg, sock)
elif msg["method"] == "read-tree-at":
self._read_tree_at(msg, sock)
elif msg["method"] == "mkdtemp":
self._mkdtemp(msg, sock)
elif msg["method"] == "source":
self._source(msg, sock)
else:
raise ValueError("Invalid RPC call", msg)
class StoreClient:
def __init__(self, connect_to="/run/osbuild/api/store"):
self.client = jsoncomm.Socket.new_client(connect_to)
def __del__(self):
if self.client is not None:
self.client.close()
def mkdtemp(self, suffix=None, prefix=None):
msg = {
"method": "mkdtemp",
"suffix": suffix,
"prefix": prefix
}
self.client.send(msg)
msg, _, _ = self.client.recv()
return msg["path"]
def read_tree(self, object_id: str):
msg = {
"method": "read-tree",
"object-id": object_id
}
self.client.send(msg)
msg, _, _ = self.client.recv()
return msg["path"]
def read_tree_at(self, object_id: str, target: str, path="/"):
msg = {
"method": "read-tree-at",
"object-id": object_id,
"target": os.fspath(target),
"subtree": os.fspath(path)
}
self.client.send(msg)
msg, _, _ = self.client.recv()
err = msg.get("error")
if err:
raise RuntimeError(err)
return msg["path"]
def source(self, name: str) -> str:
msg = {
"method": "source",
"name": name
}
self.client.send(msg)
msg, _, _ = self.client.recv()
return msg["path"]
| true | true |
f7fcbb9eb740bad7978d80231501f86bd64b1b37 | 16,726 | py | Python | vjemmie/cogs/avatar_cog.py | PederHA/vjemmie | e3742380d3ea06de90f8227a0934569f8fd02b5c | [
"MIT"
] | 1 | 2018-07-30T02:43:27.000Z | 2018-07-30T02:43:27.000Z | vjemmie/cogs/avatar_cog.py | PederHA/vjemmie | e3742380d3ea06de90f8227a0934569f8fd02b5c | [
"MIT"
] | 5 | 2020-09-20T14:07:28.000Z | 2022-01-13T01:18:23.000Z | vjemmie/cogs/avatar_cog.py | PederHA/vjemmie | e3742380d3ea06de90f8227a0934569f8fd02b5c | [
"MIT"
] | null | null | null | from __future__ import annotations
import io
from itertools import zip_longest
from typing import List, Tuple, Union, Optional, Callable
from unidecode import unidecode
from dataclasses import dataclass, field
from pathlib import Path
from copy import deepcopy
import discord
from discord.ext import commands
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from .base_cog import BaseCog
from ..utils.converters import NonCaseSensMemberConverter, MemberOrURLConverter
from ..utils.commands import add_command
from ..utils.exceptions import CommandError
@dataclass
class Avatar: # Should be a collections.namedtuple or typing.NamedTuple instead tbh
"""Size and position of a user's avatar."""
w: int # Width
h: int # Height
x: int # X Position
y: int # Y Position
@dataclass
class Text:
"""Represents text to be added to an image.
`content` always defaults to ctx.message.author.name or user.name if
not given a value.
"""
size: int
offset: Tuple[int, int] # x, y
content: str = ""
font: str = "LiberationSans-Regular.ttf"
color: Tuple[int, int, int, int] = (255, 255, 255, 255) # RGBA
shadow: bool = False
stroke: bool = False
stroke_thickness: int = 1
stroke_color: Tuple[int, int, int, int] = (0, 0, 0, 255)
upper: bool = False
center: bool = False # Center text horizontally
helper: Optional[Callable[[Text], None]] = None
def __post_init__(self) -> None:
# TODO: Check that font exists
# Check that parameters are correctly formatted and values are within acceptable ranges
pass
@dataclass
class AvatarCommand:
name: str
template: Union[Path, str]
avatars: List[Avatar]
help: Optional[str] = None
aliases: List[str] = field(default_factory=list)
text: List[Text] = field(default_factory=list)
template_overlay: bool = False
async def avatar_command(cog: commands.Cog, ctx: commands.Context, user: NonCaseSensMemberConverter=None, *, command: AvatarCommand) -> None:
# NOTE: Handle this somewhere else?
cmd = deepcopy(command) # so we can modify command attributes locally
for text in cmd.text:
if not text.content:
text.content = unidecode(user.name if user else ctx.message.author.name)
await cog.make_composite_image(
ctx,
command=cmd,
user=user,
)
class AvatarCog(BaseCog):
"""Create images featuring a user's avatar."""
EMOJI = ":person_frowning:"
def __init__(self, bot: commands.Bot) -> None:
super().__init__(bot)
self.add_avatar_commands()
def add_avatar_commands(self) -> None:
for command in avatar_commands:
add_command(
self,
avatar_command,
name=command.name,
aliases=command.aliases,
help=command.help,
command=command
)
async def make_composite_image(
self,
ctx: commands.Context,
command: AvatarCommand,
user: Optional[discord.Member]=None,
) -> None:
"""Creates a composite image of a user's avatar and a given template.
Parameters
----------
ctx : `commands.Context`
Discord Context object
command : `AvatarCommand`
TODO: Description
user : `discord.Member`, optional
A Discord user. If specified, this user's avatar is
downloaded in place of the message author's.
"""
# Use message author's avatar if no user is specified
if not user:
avatar_url = ctx.message.author.avatar_url
elif isinstance(user, discord.Member):
avatar_url = user.avatar_url
elif isinstance(user, str):
avatar_url = user
else:
raise TypeError("Argument 'user' must be type 'discord.User' or an image URL of type 'str'")
if isinstance(avatar_url, discord.asset.Asset):
_avatar = io.BytesIO(await avatar_url.read())
else:
_avatar = await self.download_from_url(ctx, avatar_url)
result = await self.bot.loop.run_in_executor(
None,
self._do_make_composite_image,
command,
_avatar
)
embed = await self.get_embed_from_img_upload(ctx, result, "out.png")
await ctx.send(embed=embed)
def _do_make_composite_image(self, command: AvatarCommand, byteavatar: io.BytesIO) -> io.BytesIO:
avatar = Image.open(byteavatar)
tpath = Path(f"memes/templates/{command.template}")
if not tpath.exists():
raise CommandError(f"Template {command.template}")
background = Image.open(tpath, "r")
# Convert template to RGBA
if background.mode == "RGB":
background.putalpha(255) # puts an alpha channel on the image
# Add avatar to template
background = self._add_avatar(background, avatar, command.avatars, command.template_overlay)
# Add text
for txt in command.text:
background = self._add_text(background, txt)
# Save image to file-like object
result = io.BytesIO()
background.save(result, format="PNG")
result.seek(0) # Seek to byte 0, so discord.File can use BytesIO.read()
return result
def _resize_paste(
self,
background: Image.Image,
overlay: Image.Image,
avatar: Avatar
) -> Image.Image:
"""Resizes an image (param `overlay`) and pastes it onto another
image (param `background`)
Parameters
----------
background : `Image.Image`
Image that will have overlay pasted to it
overlay : `Image.Image`
Image to paste on to background
avatar: `Avatar`
Dimensions and position of avatar to paste.
Returns
-------
`Image.Image`
Image background with Image overlay pasted on top of it
"""
overlay = overlay.resize((avatar.w, avatar.h), resample=Image.BICUBIC)
background.paste(overlay, (avatar.x, avatar.y), overlay.convert("RGBA"))
return background
def _add_avatar(self,
background: Image.Image,
user_avatar: Image.Image,
avatars: List[Avatar],
template_overlay: bool) -> Image.Image:
# Paste user avatars
for av in avatars:
# Template goes on top of image
if template_overlay:
new = Image.new("RGBA", background.size)
new = self._resize_paste(new, user_avatar, av)
background = Image.alpha_composite(new, background)
else: # Image goes on top of template
background = self._resize_paste(background, user_avatar, av)
return background
def _add_text(self,
background: Image.Image,
text: Text
) -> Image.Image:
"""Adds text to an image by creating an alpha composite of a given
image and one or more generated lines of text.
Parameters
----------
background : `Image.Image`
Image to be modified
text : `Text`
Text to add on to image
Returns
-------
`Image.Image`
A composite of image `background` and generated text
"""
if text.upper:
text.content = text.content.upper()
if text.helper:
text.helper(text)
# Get new image
_txt = Image.new("RGBA", background.size)
# Get font
font = ImageFont.truetype(f"memes/fonts/{text.font}", text.size)
# Whether or not to center text determines the value of the text offset
if text.center:
w, _ = ImageDraw.Draw(_txt).textsize(text.content, font=font)
img_w, _ = background.size
offset = ((img_w-w)/2, text.offset[1]) # how ugly is this dude
else:
offset = text.offset
# Drop shadow
if text.shadow:
_shadow = Image.new("RGBA", background.size)
s = ImageDraw.Draw(_shadow)
s.text(
(
# Offset + 1% of width/height of image
# TODO: If result of integer divison is 0,
# set value to 1.
offset[0]+(background.size[0]//100),
offset[1]+(background.size[1]//100)
),
text.content,
font=font,
fill=(0, 0, 0, 92)
)
_shadow = _shadow.filter(ImageFilter.BLUR)
_txt = Image.alpha_composite(_txt, _shadow)
# Get a drawing context
d = ImageDraw.Draw(_txt)
# Add stroke FIRST
if text.stroke:
t = text.stroke_thickness
d.text((offset[0]-t, offset[1]-t), text.content, font=font, fill=text.stroke_color)
d.text((offset[0]+t, offset[1]-t), text.content, font=font, fill=text.stroke_color)
d.text((offset[0]-t, offset[1]+t), text.content, font=font, fill=text.stroke_color)
d.text((offset[0]+t, offset[1]+t), text.content, font=font, fill=text.stroke_color)
d.text(offset, text.content, font=font, fill=text.color)
# Return alpha composite of background and text
return Image.alpha_composite(background, _txt)
def _allmyhomies_helper(text: Text) -> None:
text.offset = (
(
text.offset[0],
int(text.offset[1] - 20 + len(text.content)**1.5)
)
)
text.size = 580 // len(text.content) if len(text.content) > 5 else 70
def _threat_helper(text: Text) -> None:
text.size = round((30 / len(text.content)) * 8.5)
if text.size > 40:
text.size = 40
text.offset = (
text.offset[0] + (text.size - (len(text.content)*5) if text.size > 30 else 0),
text.offset[1] - (round(text.size/20) if text.size > 30 else -4)
)
def _banter_helper(text: Text) -> None:
if len(text.content) > 13:
text.content = text.content[:10] + "..."
avatar_commands = [
AvatarCommand(
name="fuckup",
aliases=["nasa"],
template="nasa.jpg",
help="https://i.imgur.com/xWlh36n.jpg",
avatars=[Avatar(w=100, h=100, x=347, y=403)]
),
AvatarCommand(
name="cancer",
template="cancer.jpg",
help="https://i.imgur.com/vDtktIq.jpg",
avatars=[Avatar(w=762, h=740, x=772, y=680)],
),
AvatarCommand(
name="northkorea",
template="northkorea1.jpg",
help="https://i.imgur.com/PiqzXNs.jpg",
avatars=[Avatar(w=295, h=295, x=712, y=195)],
),
AvatarCommand(
name="mlady",
template="mlady.png",
help="https://i.imgur.com/2LQkErQ.png",
avatars=[Avatar(w=275, h=275, x=86, y=78)],
template_overlay=True
),
AvatarCommand(
name="mlady2",
template="mlady2.png",
help="https://i.imgur.com/2LQkErQ.png",
avatars=[Avatar(w=200, h=200, x=161, y=101)],
template_overlay=True
),
AvatarCommand(
name="loud",
template="loud.jpg",
help="https://i.imgur.com/y7y7MRt.jpg",
avatars=[Avatar(w=190, h=190, x=556, y=445)],
),
AvatarCommand(
name="guys",
template="guyswant.jpg",
help="https://i.imgur.com/5oUe8VN.jpg",
avatars=[Avatar(w=400, h=400, x=121, y=347)],
),
AvatarCommand(
name="furry",
template="furry.png",
help="https://i.imgur.com/Jq3uu02.png",
avatars=[Avatar(w=230, h=230, x=26, y=199)],
template_overlay=True
),
AvatarCommand(
name="autism",
template="autism.jpg",
help="https://i.imgur.com/HcjIbpP.jpg",
avatars=[Avatar(w=303, h=255, x=0, y=512)],
),
AvatarCommand(
name="autism2",
template="autism2.jpg",
help="https://i.imgur.com/6lxlqPk.jpg",
avatars=[
Avatar(w=73, h=73, x=15, y=1),
Avatar(w=73, h=73, x=15, y=551),
Avatar(w=46, h=46, x=123, y=709)
],
text=[
Text(
size=20,
offset=(96, 0),
font="LiberationSans-Regular.ttf",
color=(0, 0, 0, 255)
),
Text(
size=20,
offset=(96, 551),
font="LiberationSans-Regular.ttf",
color=(0, 0, 0, 255)
)
],
),
AvatarCommand(
name="disabled",
template="disabled.jpg",
help="https://i.imgur.com/hZSghxu.jpg",
avatars=[Avatar(w=320, h=320, x=736, y=794)],
),
AvatarCommand(
name="fatfuck",
template="fatfuck.jpg",
help="https://i.imgur.com/Vbkfu4u.jpg",
avatars=[Avatar(w=385, h=385, x=67, y=0)],
),
AvatarCommand(
name="saxophone",
template="saxophone.png",
help="https://i.imgur.com/Gfw036Q.png",
avatars=[
Avatar(w=366, h=358, x=0, y=0),
Avatar(w=366, h=358, x=0, y=361)
],
template_overlay=True
),
AvatarCommand(
name="fingercircle",
template="fingercircle.jpg",
help="https://i.imgur.com/HnpJkvB.jpg",
avatars=[Avatar(w=251, h=278, x=317, y=20)],
),
AvatarCommand(
name="lordofthepit",
template="lordofthepit.jpg",
help="https://i.imgur.com/IRntn02.jpg",
avatars=[Avatar(w=559, h=410, x=57, y=110)],
),
AvatarCommand(
name="bigounce",
template="bigounce.png",
help="https://i.imgur.com/apDeSO6.jpg",
avatars=[Avatar(w=504, h=504, x=0, y=0)],
template_overlay=True
),
AvatarCommand(
name="bigounce2",
template="bigounce2.png",
help="https://i.imgur.com/apDeSO6.jpg",
avatars=[Avatar(w=504, h=504, x=0, y=0)],
template_overlay=True,
text=[
Text(
size=31,
offset=(194, 431),
font="Cocogoose Pro-trial.ttf",
color=(234, 246, 247, 255),
shadow=True,
upper=True
)
]
),
AvatarCommand(
name="allmyhomies",
template="allmyhomies.jpg",
help="https://i.imgur.com/7jxk8Qd.jpg",
avatars=[Avatar(w=200, h=200, x=275, y=240)],
#template_overlay=True, # hide avatar
text=[
Text(
size=70,
offset=(325, 0),
font="Cocogoose Pro-trial.ttf",
color=(237, 221, 208, 255),
shadow=True,
stroke=True,
stroke_thickness=3,
upper=True,
helper=_allmyhomies_helper
),
Text(
size=80,
offset=(160, 560),
font="Cocogoose Pro-trial.ttf",
color=(237, 221, 208, 255),
shadow=True,
stroke=True,
stroke_thickness=3,
upper=True,
center=True
)
]
),
AvatarCommand(
name="threat",
template="threat.jpg",
help="https://i.imgur.com/7jxk8Qd.jpg",
avatars=[Avatar(w=195, h=177, x=0, y=157)],
#template_overlay=True, # hide avatar
text=[
Text(
size=30,
offset=(11, 333),
font="Cocogoose Pro-trial.ttf",
color=(0, 0, 0, 255),
upper=True,
helper=_threat_helper
),
]
),
AvatarCommand(
name="banter",
template="banter.jpg",
help="https://i.imgur.com/7jxk8Qd.jpg",
avatars=[Avatar(w=190, h=267, x=12, y=80)],
#template_overlay=True, # hide avatar
text=[
Text(
size=7,
offset=(50, 352),
font="LiberationSans-Regular.ttf",
color=(0, 0, 0, 255),
upper=True,
helper=_banter_helper
),
]
),
] | 32.352031 | 141 | 0.537128 | from __future__ import annotations
import io
from itertools import zip_longest
from typing import List, Tuple, Union, Optional, Callable
from unidecode import unidecode
from dataclasses import dataclass, field
from pathlib import Path
from copy import deepcopy
import discord
from discord.ext import commands
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from .base_cog import BaseCog
from ..utils.converters import NonCaseSensMemberConverter, MemberOrURLConverter
from ..utils.commands import add_command
from ..utils.exceptions import CommandError
@dataclass
class Avatar:
w: int
h: int
x: int
y: int
@dataclass
class Text:
size: int
offset: Tuple[int, int]
content: str = ""
font: str = "LiberationSans-Regular.ttf"
color: Tuple[int, int, int, int] = (255, 255, 255, 255)
shadow: bool = False
stroke: bool = False
stroke_thickness: int = 1
stroke_color: Tuple[int, int, int, int] = (0, 0, 0, 255)
upper: bool = False
center: bool = False
helper: Optional[Callable[[Text], None]] = None
def __post_init__(self) -> None:
pass
@dataclass
class AvatarCommand:
name: str
template: Union[Path, str]
avatars: List[Avatar]
help: Optional[str] = None
aliases: List[str] = field(default_factory=list)
text: List[Text] = field(default_factory=list)
template_overlay: bool = False
async def avatar_command(cog: commands.Cog, ctx: commands.Context, user: NonCaseSensMemberConverter=None, *, command: AvatarCommand) -> None:
cmd = deepcopy(command)
for text in cmd.text:
if not text.content:
text.content = unidecode(user.name if user else ctx.message.author.name)
await cog.make_composite_image(
ctx,
command=cmd,
user=user,
)
class AvatarCog(BaseCog):
EMOJI = ":person_frowning:"
def __init__(self, bot: commands.Bot) -> None:
super().__init__(bot)
self.add_avatar_commands()
def add_avatar_commands(self) -> None:
for command in avatar_commands:
add_command(
self,
avatar_command,
name=command.name,
aliases=command.aliases,
help=command.help,
command=command
)
async def make_composite_image(
self,
ctx: commands.Context,
command: AvatarCommand,
user: Optional[discord.Member]=None,
) -> None:
if not user:
avatar_url = ctx.message.author.avatar_url
elif isinstance(user, discord.Member):
avatar_url = user.avatar_url
elif isinstance(user, str):
avatar_url = user
else:
raise TypeError("Argument 'user' must be type 'discord.User' or an image URL of type 'str'")
if isinstance(avatar_url, discord.asset.Asset):
_avatar = io.BytesIO(await avatar_url.read())
else:
_avatar = await self.download_from_url(ctx, avatar_url)
result = await self.bot.loop.run_in_executor(
None,
self._do_make_composite_image,
command,
_avatar
)
embed = await self.get_embed_from_img_upload(ctx, result, "out.png")
await ctx.send(embed=embed)
def _do_make_composite_image(self, command: AvatarCommand, byteavatar: io.BytesIO) -> io.BytesIO:
avatar = Image.open(byteavatar)
tpath = Path(f"memes/templates/{command.template}")
if not tpath.exists():
raise CommandError(f"Template {command.template}")
background = Image.open(tpath, "r")
# Convert template to RGBA
if background.mode == "RGB":
background.putalpha(255) # puts an alpha channel on the image
# Add avatar to template
background = self._add_avatar(background, avatar, command.avatars, command.template_overlay)
# Add text
for txt in command.text:
background = self._add_text(background, txt)
# Save image to file-like object
result = io.BytesIO()
background.save(result, format="PNG")
result.seek(0) # Seek to byte 0, so discord.File can use BytesIO.read()
return result
def _resize_paste(
self,
background: Image.Image,
overlay: Image.Image,
avatar: Avatar
) -> Image.Image:
overlay = overlay.resize((avatar.w, avatar.h), resample=Image.BICUBIC)
background.paste(overlay, (avatar.x, avatar.y), overlay.convert("RGBA"))
return background
def _add_avatar(self,
background: Image.Image,
user_avatar: Image.Image,
avatars: List[Avatar],
template_overlay: bool) -> Image.Image:
# Paste user avatars
for av in avatars:
# Template goes on top of image
if template_overlay:
new = Image.new("RGBA", background.size)
new = self._resize_paste(new, user_avatar, av)
background = Image.alpha_composite(new, background)
else: # Image goes on top of template
background = self._resize_paste(background, user_avatar, av)
return background
def _add_text(self,
background: Image.Image,
text: Text
) -> Image.Image:
if text.upper:
text.content = text.content.upper()
if text.helper:
text.helper(text)
# Get new image
_txt = Image.new("RGBA", background.size)
# Get font
font = ImageFont.truetype(f"memes/fonts/{text.font}", text.size)
# Whether or not to center text determines the value of the text offset
if text.center:
w, _ = ImageDraw.Draw(_txt).textsize(text.content, font=font)
img_w, _ = background.size
offset = ((img_w-w)/2, text.offset[1]) # how ugly is this dude
else:
offset = text.offset
# Drop shadow
if text.shadow:
_shadow = Image.new("RGBA", background.size)
s = ImageDraw.Draw(_shadow)
s.text(
(
# Offset + 1% of width/height of image
# TODO: If result of integer divison is 0,
# set value to 1.
offset[0]+(background.size[0]//100),
offset[1]+(background.size[1]//100)
),
text.content,
font=font,
fill=(0, 0, 0, 92)
)
_shadow = _shadow.filter(ImageFilter.BLUR)
_txt = Image.alpha_composite(_txt, _shadow)
# Get a drawing context
d = ImageDraw.Draw(_txt)
# Add stroke FIRST
if text.stroke:
t = text.stroke_thickness
d.text((offset[0]-t, offset[1]-t), text.content, font=font, fill=text.stroke_color)
d.text((offset[0]+t, offset[1]-t), text.content, font=font, fill=text.stroke_color)
d.text((offset[0]-t, offset[1]+t), text.content, font=font, fill=text.stroke_color)
d.text((offset[0]+t, offset[1]+t), text.content, font=font, fill=text.stroke_color)
d.text(offset, text.content, font=font, fill=text.color)
# Return alpha composite of background and text
return Image.alpha_composite(background, _txt)
def _allmyhomies_helper(text: Text) -> None:
text.offset = (
(
text.offset[0],
int(text.offset[1] - 20 + len(text.content)**1.5)
)
)
text.size = 580 // len(text.content) if len(text.content) > 5 else 70
def _threat_helper(text: Text) -> None:
text.size = round((30 / len(text.content)) * 8.5)
if text.size > 40:
text.size = 40
text.offset = (
text.offset[0] + (text.size - (len(text.content)*5) if text.size > 30 else 0),
text.offset[1] - (round(text.size/20) if text.size > 30 else -4)
)
def _banter_helper(text: Text) -> None:
if len(text.content) > 13:
text.content = text.content[:10] + "..."
avatar_commands = [
AvatarCommand(
name="fuckup",
aliases=["nasa"],
template="nasa.jpg",
help="https://i.imgur.com/xWlh36n.jpg",
avatars=[Avatar(w=100, h=100, x=347, y=403)]
),
AvatarCommand(
name="cancer",
template="cancer.jpg",
help="https://i.imgur.com/vDtktIq.jpg",
avatars=[Avatar(w=762, h=740, x=772, y=680)],
),
AvatarCommand(
name="northkorea",
template="northkorea1.jpg",
help="https://i.imgur.com/PiqzXNs.jpg",
avatars=[Avatar(w=295, h=295, x=712, y=195)],
),
AvatarCommand(
name="mlady",
template="mlady.png",
help="https://i.imgur.com/2LQkErQ.png",
avatars=[Avatar(w=275, h=275, x=86, y=78)],
template_overlay=True
),
AvatarCommand(
name="mlady2",
template="mlady2.png",
help="https://i.imgur.com/2LQkErQ.png",
avatars=[Avatar(w=200, h=200, x=161, y=101)],
template_overlay=True
),
AvatarCommand(
name="loud",
template="loud.jpg",
help="https://i.imgur.com/y7y7MRt.jpg",
avatars=[Avatar(w=190, h=190, x=556, y=445)],
),
AvatarCommand(
name="guys",
template="guyswant.jpg",
help="https://i.imgur.com/5oUe8VN.jpg",
avatars=[Avatar(w=400, h=400, x=121, y=347)],
),
AvatarCommand(
name="furry",
template="furry.png",
help="https://i.imgur.com/Jq3uu02.png",
avatars=[Avatar(w=230, h=230, x=26, y=199)],
template_overlay=True
),
AvatarCommand(
name="autism",
template="autism.jpg",
help="https://i.imgur.com/HcjIbpP.jpg",
avatars=[Avatar(w=303, h=255, x=0, y=512)],
),
AvatarCommand(
name="autism2",
template="autism2.jpg",
help="https://i.imgur.com/6lxlqPk.jpg",
avatars=[
Avatar(w=73, h=73, x=15, y=1),
Avatar(w=73, h=73, x=15, y=551),
Avatar(w=46, h=46, x=123, y=709)
],
text=[
Text(
size=20,
offset=(96, 0),
font="LiberationSans-Regular.ttf",
color=(0, 0, 0, 255)
),
Text(
size=20,
offset=(96, 551),
font="LiberationSans-Regular.ttf",
color=(0, 0, 0, 255)
)
],
),
AvatarCommand(
name="disabled",
template="disabled.jpg",
help="https://i.imgur.com/hZSghxu.jpg",
avatars=[Avatar(w=320, h=320, x=736, y=794)],
),
AvatarCommand(
name="fatfuck",
template="fatfuck.jpg",
help="https://i.imgur.com/Vbkfu4u.jpg",
avatars=[Avatar(w=385, h=385, x=67, y=0)],
),
AvatarCommand(
name="saxophone",
template="saxophone.png",
help="https://i.imgur.com/Gfw036Q.png",
avatars=[
Avatar(w=366, h=358, x=0, y=0),
Avatar(w=366, h=358, x=0, y=361)
],
template_overlay=True
),
AvatarCommand(
name="fingercircle",
template="fingercircle.jpg",
help="https://i.imgur.com/HnpJkvB.jpg",
avatars=[Avatar(w=251, h=278, x=317, y=20)],
),
AvatarCommand(
name="lordofthepit",
template="lordofthepit.jpg",
help="https://i.imgur.com/IRntn02.jpg",
avatars=[Avatar(w=559, h=410, x=57, y=110)],
),
AvatarCommand(
name="bigounce",
template="bigounce.png",
help="https://i.imgur.com/apDeSO6.jpg",
avatars=[Avatar(w=504, h=504, x=0, y=0)],
template_overlay=True
),
AvatarCommand(
name="bigounce2",
template="bigounce2.png",
help="https://i.imgur.com/apDeSO6.jpg",
avatars=[Avatar(w=504, h=504, x=0, y=0)],
template_overlay=True,
text=[
Text(
size=31,
offset=(194, 431),
font="Cocogoose Pro-trial.ttf",
color=(234, 246, 247, 255),
shadow=True,
upper=True
)
]
),
AvatarCommand(
name="allmyhomies",
template="allmyhomies.jpg",
help="https://i.imgur.com/7jxk8Qd.jpg",
avatars=[Avatar(w=200, h=200, x=275, y=240)],
#template_overlay=True, # hide avatar
text=[
Text(
size=70,
offset=(325, 0),
font="Cocogoose Pro-trial.ttf",
color=(237, 221, 208, 255),
shadow=True,
stroke=True,
stroke_thickness=3,
upper=True,
helper=_allmyhomies_helper
),
Text(
size=80,
offset=(160, 560),
font="Cocogoose Pro-trial.ttf",
color=(237, 221, 208, 255),
shadow=True,
stroke=True,
stroke_thickness=3,
upper=True,
center=True
)
]
),
AvatarCommand(
name="threat",
template="threat.jpg",
help="https://i.imgur.com/7jxk8Qd.jpg",
avatars=[Avatar(w=195, h=177, x=0, y=157)],
#template_overlay=True, # hide avatar
text=[
Text(
size=30,
offset=(11, 333),
font="Cocogoose Pro-trial.ttf",
color=(0, 0, 0, 255),
upper=True,
helper=_threat_helper
),
]
),
AvatarCommand(
name="banter",
template="banter.jpg",
help="https://i.imgur.com/7jxk8Qd.jpg",
avatars=[Avatar(w=190, h=267, x=12, y=80)],
#template_overlay=True, # hide avatar
text=[
Text(
size=7,
offset=(50, 352),
font="LiberationSans-Regular.ttf",
color=(0, 0, 0, 255),
upper=True,
helper=_banter_helper
),
]
),
] | true | true |
f7fcbbebf4f5c66c6b9da2023edb02c5e07bddd0 | 293 | py | Python | src/settings/development.py | cstenkamp/django_server_template | a4646cffd2b96cea239924290fdbff02f8a37db7 | [
"CC0-1.0"
] | null | null | null | src/settings/development.py | cstenkamp/django_server_template | a4646cffd2b96cea239924290fdbff02f8a37db7 | [
"CC0-1.0"
] | null | null | null | src/settings/development.py | cstenkamp/django_server_template | a4646cffd2b96cea239924290fdbff02f8a37db7 | [
"CC0-1.0"
] | null | null | null | import os
from settings.settings_base import *
from settings.settings_base import BASE_DIR
ALLOWED_HOSTS = ["0.0.0.0", "127.0.0.1", "localhost"]
DEBUG = True
STATIC_ROOT = os.path.join(BASE_DIR, "django_data", "collected_static")
MEDIA_ROOT = os.path.join(BASE_DIR, "django_data", "media")
| 26.636364 | 71 | 0.74744 | import os
from settings.settings_base import *
from settings.settings_base import BASE_DIR
ALLOWED_HOSTS = ["0.0.0.0", "127.0.0.1", "localhost"]
DEBUG = True
STATIC_ROOT = os.path.join(BASE_DIR, "django_data", "collected_static")
MEDIA_ROOT = os.path.join(BASE_DIR, "django_data", "media")
| true | true |
f7fcbc8a88878fe34e97666aa553e01193169af6 | 6,558 | py | Python | sdk/python/pulumi_azure_nextgen/apimanagement/v20191201/certificate.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/apimanagement/v20191201/certificate.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/apimanagement/v20191201/certificate.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['Certificate']
class Certificate(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_id: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Certificate details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] certificate_id: Identifier of the certificate entity. Must be unique in the current API Management service instance.
:param pulumi.Input[str] data: Base 64 encoded certificate using the application/x-pkcs12 representation.
:param pulumi.Input[str] password: Password for the Certificate
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['certificate_id'] = certificate_id
if data is None and not opts.urn:
raise TypeError("Missing required property 'data'")
__props__['data'] = data
if password is None and not opts.urn:
raise TypeError("Missing required property 'password'")
__props__['password'] = password
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['expiration_date'] = None
__props__['name'] = None
__props__['subject'] = None
__props__['thumbprint'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20160707:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20161010:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:Certificate")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Certificate, __self__).__init__(
'azure-nextgen:apimanagement/v20191201:Certificate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Certificate':
"""
Get an existing Certificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Certificate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="expirationDate")
def expiration_date(self) -> pulumi.Output[str]:
"""
Expiration date of the certificate. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "expiration_date")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def subject(self) -> pulumi.Output[str]:
"""
Subject attribute of the certificate.
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter
def thumbprint(self) -> pulumi.Output[str]:
"""
Thumbprint of the certificate.
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.541667 | 791 | 0.65584 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['Certificate']
class Certificate(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_id: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['certificate_id'] = certificate_id
if data is None and not opts.urn:
raise TypeError("Missing required property 'data'")
__props__['data'] = data
if password is None and not opts.urn:
raise TypeError("Missing required property 'password'")
__props__['password'] = password
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['expiration_date'] = None
__props__['name'] = None
__props__['subject'] = None
__props__['thumbprint'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20160707:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20161010:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:Certificate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:Certificate")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Certificate, __self__).__init__(
'azure-nextgen:apimanagement/v20191201:Certificate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Certificate':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Certificate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="expirationDate")
def expiration_date(self) -> pulumi.Output[str]:
return pulumi.get(self, "expiration_date")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def subject(self) -> pulumi.Output[str]:
return pulumi.get(self, "subject")
@property
@pulumi.getter
def thumbprint(self) -> pulumi.Output[str]:
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f7fcbe2240d5caa8ef1fa76eb36343404f02ac9f | 3,326 | py | Python | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/distutils/tests/test_misc_util.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 145 | 2017-01-19T23:33:03.000Z | 2021-06-05T05:34:55.000Z | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/distutils/tests/test_misc_util.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 17 | 2017-02-03T20:51:39.000Z | 2020-05-21T11:33:52.000Z | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/distutils/tests/test_misc_util.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 44 | 2017-02-04T19:40:03.000Z | 2020-10-01T19:24:19.000Z | from __future__ import division, absolute_import, print_function
from os.path import join, sep, dirname
from numpy.distutils.misc_util import (
appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal
)
ajoin = lambda *paths: join(*((sep,)+paths))
class TestAppendpath(TestCase):
def test_1(self):
assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name'))
assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name'))
assert_equal(appendpath('prefix', '/name'), join('prefix', 'name'))
def test_2(self):
assert_equal(appendpath('prefix/sub', 'name'),
join('prefix', 'sub', 'name'))
assert_equal(appendpath('prefix/sub', 'sup/name'),
join('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub', '/prefix/name'),
ajoin('prefix', 'sub', 'name'))
def test_3(self):
assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'),
ajoin('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
class TestMinrelpath(TestCase):
def test_1(self):
n = lambda path: path.replace('/', sep)
assert_equal(minrelpath(n('aa/bb')), n('aa/bb'))
assert_equal(minrelpath('..'), '..')
assert_equal(minrelpath(n('aa/..')), '')
assert_equal(minrelpath(n('aa/../bb')), 'bb')
assert_equal(minrelpath(n('aa/bb/..')), 'aa')
assert_equal(minrelpath(n('aa/bb/../..')), '')
assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd'))
assert_equal(minrelpath(n('.././..')), n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
class TestGpaths(TestCase):
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__), '..'))
ls = gpaths('command/*.py', local_path)
assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls))
f = gpaths('system_info.py', local_path)
assert_(join(local_path, 'system_info.py') == f[0], repr(f))
class TestSharedExtension(TestCase):
def test_get_shared_lib_extension(self):
import sys
ext = get_shared_lib_extension(is_python_ext=False)
if sys.platform.startswith('linux'):
assert_equal(ext, '.so')
elif sys.platform.startswith('gnukfreebsd'):
assert_equal(ext, '.so')
elif sys.platform.startswith('darwin'):
assert_equal(ext, '.dylib')
elif sys.platform.startswith('win'):
assert_equal(ext, '.dll')
# just check for no crash
assert_(get_shared_lib_extension(is_python_ext=True))
def test_installed_npymath_ini():
# Regression test for gh-7707. If npymath.ini wasn't installed, then this
# will give an error.
info = get_info('npymath')
if __name__ == "__main__":
run_module_suite()
| 38.674419 | 78 | 0.601323 | from __future__ import division, absolute_import, print_function
from os.path import join, sep, dirname
from numpy.distutils.misc_util import (
appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal
)
ajoin = lambda *paths: join(*((sep,)+paths))
class TestAppendpath(TestCase):
def test_1(self):
assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name'))
assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name'))
assert_equal(appendpath('prefix', '/name'), join('prefix', 'name'))
def test_2(self):
assert_equal(appendpath('prefix/sub', 'name'),
join('prefix', 'sub', 'name'))
assert_equal(appendpath('prefix/sub', 'sup/name'),
join('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub', '/prefix/name'),
ajoin('prefix', 'sub', 'name'))
def test_3(self):
assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'),
ajoin('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
class TestMinrelpath(TestCase):
def test_1(self):
n = lambda path: path.replace('/', sep)
assert_equal(minrelpath(n('aa/bb')), n('aa/bb'))
assert_equal(minrelpath('..'), '..')
assert_equal(minrelpath(n('aa/..')), '')
assert_equal(minrelpath(n('aa/../bb')), 'bb')
assert_equal(minrelpath(n('aa/bb/..')), 'aa')
assert_equal(minrelpath(n('aa/bb/../..')), '')
assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd'))
assert_equal(minrelpath(n('.././..')), n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
class TestGpaths(TestCase):
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__), '..'))
ls = gpaths('command/*.py', local_path)
assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls))
f = gpaths('system_info.py', local_path)
assert_(join(local_path, 'system_info.py') == f[0], repr(f))
class TestSharedExtension(TestCase):
def test_get_shared_lib_extension(self):
import sys
ext = get_shared_lib_extension(is_python_ext=False)
if sys.platform.startswith('linux'):
assert_equal(ext, '.so')
elif sys.platform.startswith('gnukfreebsd'):
assert_equal(ext, '.so')
elif sys.platform.startswith('darwin'):
assert_equal(ext, '.dylib')
elif sys.platform.startswith('win'):
assert_equal(ext, '.dll')
assert_(get_shared_lib_extension(is_python_ext=True))
def test_installed_npymath_ini():
# will give an error.
info = get_info('npymath')
if __name__ == "__main__":
run_module_suite()
| true | true |
f7fcbef729a96d7105aa13dece74b9d07836dbb8 | 1,090 | py | Python | debug_toolbar/panels/__init__.py | paltman/django-debug-toolbar | 36be38b422ab630abacc46fb2f4b2924337d88bc | [
"BSD-3-Clause"
] | 2 | 2015-12-31T08:11:21.000Z | 2016-05-08T14:54:14.000Z | debug_toolbar/panels/__init__.py | paltman/django-debug-toolbar | 36be38b422ab630abacc46fb2f4b2924337d88bc | [
"BSD-3-Clause"
] | null | null | null | debug_toolbar/panels/__init__.py | paltman/django-debug-toolbar | 36be38b422ab630abacc46fb2f4b2924337d88bc | [
"BSD-3-Clause"
] | 2 | 2015-12-31T08:11:23.000Z | 2019-08-02T13:27:12.000Z | """Base DebugPanel class"""
class DebugPanel(object):
"""
Base class for debug panels.
"""
# name = Base
has_content = False # If content returns something, set to true in subclass
# Panel methods
def __init__(self):
pass
def dom_id(self):
return 'djDebug%sPanel' % (self.name.replace(' ', ''))
def title(self):
raise NotImplementedError
def url(self):
raise NotImplementedError
def content(self):
raise NotImplementedError
def _to_data(self):
raise NotImplementedError
@property
def data(self):
"""Use this property instead of self._to_data() to avoid
double-escaping of processed data - eg, Pygmentizing twice."""
if not hasattr(self, "_data"):
self._data = self._to_data()
return self._data
# Standard middleware methods
def process_request(self, request):
pass
def process_view(self, request, view_func, view_args, view_kwargs):
pass
def process_response(self, request, response):
pass
| 23.191489 | 79 | 0.625688 |
class DebugPanel(object):
has_content = False
def __init__(self):
pass
def dom_id(self):
return 'djDebug%sPanel' % (self.name.replace(' ', ''))
def title(self):
raise NotImplementedError
def url(self):
raise NotImplementedError
def content(self):
raise NotImplementedError
def _to_data(self):
raise NotImplementedError
@property
def data(self):
if not hasattr(self, "_data"):
self._data = self._to_data()
return self._data
def process_request(self, request):
pass
def process_view(self, request, view_func, view_args, view_kwargs):
pass
def process_response(self, request, response):
pass
| true | true |
f7fcbf0eff30e2601a0ae640091715a2143a832d | 1,452 | py | Python | qstrader/alpha_model/fixed_signals.py | Dynami/qstrader-1 | 394f19d0ca96a1ee6e0e222f44e571f31ad815a8 | [
"MIT"
] | null | null | null | qstrader/alpha_model/fixed_signals.py | Dynami/qstrader-1 | 394f19d0ca96a1ee6e0e222f44e571f31ad815a8 | [
"MIT"
] | null | null | null | qstrader/alpha_model/fixed_signals.py | Dynami/qstrader-1 | 394f19d0ca96a1ee6e0e222f44e571f31ad815a8 | [
"MIT"
] | null | null | null | from qstrader.data.backtest_data_handler import DataHandler
from qstrader.asset.universe.universe import Universe
from qstrader.alpha_model.alpha_model import AlphaModel
class FixedSignalsAlphaModel(AlphaModel):
"""
A simple AlphaModel that provides a single scalar forecast
value for each Asset in the Universe.
Parameters
----------
signal_weights : `dict{str: float}`
The signal weights per asset symbol.
universe : `Universe`, optional
The Assets to make signal forecasts for.
data_handler : `DataHandler`, optional
An optional DataHandler used to preserve interface across AlphaModels.
"""
def __init__(
self,
signal_weights,
universe:Universe=None,
data_handler:DataHandler=None
):
self.signal_weights = signal_weights
self.universe = universe
self.data_handler = data_handler
self.signals = None
def __call__(self, dt, universe:Universe):
"""
Produce the dictionary of fixed scalar signals for
each of the Asset instances within the Universe.
Parameters
----------
dt : `pd.Timestamp`
The time 'now' used to obtain appropriate data and universe
for the the signals.
Returns
-------
`dict{str: float}`
The Asset symbol keyed scalar-valued signals.
"""
return self.signal_weights
| 29.632653 | 78 | 0.646006 | from qstrader.data.backtest_data_handler import DataHandler
from qstrader.asset.universe.universe import Universe
from qstrader.alpha_model.alpha_model import AlphaModel
class FixedSignalsAlphaModel(AlphaModel):
def __init__(
self,
signal_weights,
universe:Universe=None,
data_handler:DataHandler=None
):
self.signal_weights = signal_weights
self.universe = universe
self.data_handler = data_handler
self.signals = None
def __call__(self, dt, universe:Universe):
return self.signal_weights
| true | true |
f7fcbf4b327a62a52b1b53edb544b114d98f5ada | 2,898 | py | Python | gen_3/model.py | Horki/CarND-Behavioral-Cloning-P3 | d6c7a3d35749f4e995fb14d38bf370755b60a466 | [
"MIT"
] | null | null | null | gen_3/model.py | Horki/CarND-Behavioral-Cloning-P3 | d6c7a3d35749f4e995fb14d38bf370755b60a466 | [
"MIT"
] | null | null | null | gen_3/model.py | Horki/CarND-Behavioral-Cloning-P3 | d6c7a3d35749f4e995fb14d38bf370755b60a466 | [
"MIT"
] | null | null | null | import csv
import numpy as np
from scipy import ndimage
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
DATA_PATH='../data'
# DATA_PATH='../behavioral_data/all'
DRIVING_LOG='driving_log.csv'
IMG_WIDTH=320
IMG_HEIGHT=160
IMG_COMPONENTS=3
def load_data(path):
lines = []
with open(path, "r") as f:
# Udacity sample data has a ", " delimiter
reader = csv.reader(f, skipinitialspace=True, delimiter=',')
# reader = csv.reader(f, delimiter=',')
# Skip header for Udacity sample data
next(reader) # Skip header
lines = [line for line in reader]
assert len(lines[0]) == 7
return lines
def load_image(image_path):
filename = image_path.split('/')[-1]
image = ndimage.imread('{}/IMG/{}'.format(DATA_PATH, filename))
# Check image shape only slows processing
# assert image.shape == (IMG_HEIGHT, IMG_WIDTH, IMG_COMPONENTS)
return image
# Most basic neural network
def load_model():
model = Sequential()
# Preproceesing layer
# Normalize the image by dividing each element with 255
# which is maximum vale of image pixel
# Once the image is normalized between 0 and 1,
# mean centre by subtracting with 0.5 from each element
# which will shift the element from 0.5 to 0
# Training and validation loss are now much smaller
print("Lambda preprocessing start...")
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_COMPONENTS)))
print("...end preprocessing")
model.add(Convolution2D(6, 5, 5, activation='relu'))
model.add(MaxPooling2D())
model.add(Convolution2D(6, 5, 5, activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(128))
model.add(Dense(84))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
def get_train_data(samples):
images = []
measurements = []
for line in samples:
# Feature: Center Image
images.append(load_image(line[0]))
# Label: Steering measurement
measurements.append(float(line[3]))
features = np.array(images)
labels = np.array(measurements)
return features, labels
if __name__ == "__main__":
print("Load driving log. start...")
# Indexes
# center[0], left[1], right[2], steering[3], throttle[4], brake[5], speed[6]
samples = load_data("{}/{}".format(DATA_PATH, DRIVING_LOG))
print("...done\nLoad train data: start...")
X_train, y_train = get_train_data(samples)
print("...done\nCompile model: start...")
# Model Part
model = load_model()
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=5)
print("...done\nSave model")
model.save('model.h5')
| 32.2 | 103 | 0.669082 | import csv
import numpy as np
from scipy import ndimage
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
DATA_PATH='../data'
DRIVING_LOG='driving_log.csv'
IMG_WIDTH=320
IMG_HEIGHT=160
IMG_COMPONENTS=3
def load_data(path):
lines = []
with open(path, "r") as f:
reader = csv.reader(f, skipinitialspace=True, delimiter=',')
next(reader)
lines = [line for line in reader]
assert len(lines[0]) == 7
return lines
def load_image(image_path):
filename = image_path.split('/')[-1]
image = ndimage.imread('{}/IMG/{}'.format(DATA_PATH, filename))
return image
def load_model():
model = Sequential()
print("Lambda preprocessing start...")
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_COMPONENTS)))
print("...end preprocessing")
model.add(Convolution2D(6, 5, 5, activation='relu'))
model.add(MaxPooling2D())
model.add(Convolution2D(6, 5, 5, activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(128))
model.add(Dense(84))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
def get_train_data(samples):
images = []
measurements = []
for line in samples:
images.append(load_image(line[0]))
measurements.append(float(line[3]))
features = np.array(images)
labels = np.array(measurements)
return features, labels
if __name__ == "__main__":
print("Load driving log. start...")
samples = load_data("{}/{}".format(DATA_PATH, DRIVING_LOG))
print("...done\nLoad train data: start...")
X_train, y_train = get_train_data(samples)
print("...done\nCompile model: start...")
model = load_model()
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=5)
print("...done\nSave model")
model.save('model.h5')
| true | true |
f7fcbfb41b098618c5dd67925c45271e13ec1976 | 1,857 | py | Python | electroncash_plugins/fusion/__init__.py | christroutner/Electron-Cash | d5217ed3e878bd56977181f022f9e5c43f449241 | [
"MIT"
] | 208 | 2017-07-25T19:52:15.000Z | 2018-09-21T13:44:58.000Z | electroncash_plugins/fusion/__init__.py | christroutner/Electron-Cash | d5217ed3e878bd56977181f022f9e5c43f449241 | [
"MIT"
] | 1,478 | 2018-09-24T09:30:13.000Z | 2022-03-29T15:48:17.000Z | electroncash_plugins/fusion/__init__.py | christroutner/Electron-Cash | d5217ed3e878bd56977181f022f9e5c43f449241 | [
"MIT"
] | 159 | 2018-09-24T12:56:47.000Z | 2022-03-28T23:52:17.000Z | #!/usr/bin/env python3
#
# Electron Cash - a lightweight Bitcoin Cash client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from electroncash.i18n import _
fullname = _('CashFusion')
description = [
_('Protect your privacy and anonymize your coins (UTXOs) by shuffling them with other users of CashFusion.'),
"\n\n",
_('A commitment and anonymous announcement scheme is used so that none of the participants know the inputs nor '
'outputs of the other participants.'), " ",
_('In addition, a blame protocol is used to mitigate time-wasting denial-of-service type attacks.')
]
description_delimiter = ''
available_for = ['qt', 'cmdline']
# If default_on is set to True, this plugin is loaded by default on new installs
default_on = True
| 45.292683 | 116 | 0.758212 |
from electroncash.i18n import _
fullname = _('CashFusion')
description = [
_('Protect your privacy and anonymize your coins (UTXOs) by shuffling them with other users of CashFusion.'),
"\n\n",
_('A commitment and anonymous announcement scheme is used so that none of the participants know the inputs nor '
'outputs of the other participants.'), " ",
_('In addition, a blame protocol is used to mitigate time-wasting denial-of-service type attacks.')
]
description_delimiter = ''
available_for = ['qt', 'cmdline']
default_on = True
| true | true |
f7fcbfe95c4f1429e6cabcd6d97976dbe3c0a4ef | 153 | py | Python | Python/Programming Fundamentals/Regex/12. Match Phone Number.py | teodoramilcheva/softuni-software-engineering | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | [
"MIT"
] | null | null | null | Python/Programming Fundamentals/Regex/12. Match Phone Number.py | teodoramilcheva/softuni-software-engineering | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | [
"MIT"
] | null | null | null | Python/Programming Fundamentals/Regex/12. Match Phone Number.py | teodoramilcheva/softuni-software-engineering | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | [
"MIT"
] | null | null | null | import re
text = input()
pattern = r'(\+359-2-\d{3}-\d{4}\b|\+359 2 \d{3} \d{4})\b'
matches = re.findall(pattern, text)
print(', '.join(matches))
| 21.857143 | 59 | 0.562092 | import re
text = input()
pattern = r'(\+359-2-\d{3}-\d{4}\b|\+359 2 \d{3} \d{4})\b'
matches = re.findall(pattern, text)
print(', '.join(matches))
| true | true |
f7fcc0247bffa7d5ad90651380c319258f099e35 | 633 | py | Python | dockwidhistory.py | kimoamer/Clinic-Manager | 53184a4e8f369bf083109d065b2042fc7cf5bfbd | [
"MIT"
] | 3 | 2021-05-12T01:05:12.000Z | 2022-02-11T15:43:00.000Z | dockwidhistory.py | kimoamer/Clinic-Manager | 53184a4e8f369bf083109d065b2042fc7cf5bfbd | [
"MIT"
] | null | null | null | dockwidhistory.py | kimoamer/Clinic-Manager | 53184a4e8f369bf083109d065b2042fc7cf5bfbd | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QDialog
from PyQt5.QtGui import QFont
from PyQt5.QtCore import Qt
from dockwina import Ui_Form as docka
class Dialog(QDialog, docka):
def __init__(self):
super(Dialog, self).__init__()
QDialog.__init__(self)
self.setupUi(self)
self.setWindowFlag(Qt.FramelessWindowHint)
self.font1 = QFont("Tajawal", 9)
self.label2.setFont(self.font1)
self.label7.setFont(self.font1)
self.label3.setFont(self.font1)
self.label5.setFont(self.font1)
self.label6.setFont(self.font1)
self.label.setFont(self.font1)
| 33.315789 | 51 | 0.665087 | from PyQt5.QtWidgets import QDialog
from PyQt5.QtGui import QFont
from PyQt5.QtCore import Qt
from dockwina import Ui_Form as docka
class Dialog(QDialog, docka):
def __init__(self):
super(Dialog, self).__init__()
QDialog.__init__(self)
self.setupUi(self)
self.setWindowFlag(Qt.FramelessWindowHint)
self.font1 = QFont("Tajawal", 9)
self.label2.setFont(self.font1)
self.label7.setFont(self.font1)
self.label3.setFont(self.font1)
self.label5.setFont(self.font1)
self.label6.setFont(self.font1)
self.label.setFont(self.font1)
| true | true |
f7fcc0722726e6fb50d41cf8e692c23bbb0797b9 | 370 | py | Python | cuenca/resources/service_providers.py | andreshndz/cuenca-python | ca9f0f078584f1458e71baeb4cd15fcc55b40397 | [
"MIT"
] | 6 | 2020-11-02T21:03:11.000Z | 2022-01-13T23:12:01.000Z | cuenca/resources/service_providers.py | andreshndz/cuenca-python | ca9f0f078584f1458e71baeb4cd15fcc55b40397 | [
"MIT"
] | 220 | 2020-05-13T19:20:57.000Z | 2022-03-30T22:03:03.000Z | cuenca/resources/service_providers.py | andreshndz/cuenca-python | ca9f0f078584f1458e71baeb4cd15fcc55b40397 | [
"MIT"
] | 14 | 2020-07-15T15:32:03.000Z | 2021-09-17T19:11:14.000Z | from typing import ClassVar, List
from cuenca_validations.types import ServiceProviderCategory
from pydantic.dataclasses import dataclass
from .base import Queryable, Retrievable
@dataclass
class ServiceProvider(Retrievable, Queryable):
_resource: ClassVar = 'service_providers'
name: str
provider_key: str
categories: List[ServiceProviderCategory]
| 23.125 | 60 | 0.805405 | from typing import ClassVar, List
from cuenca_validations.types import ServiceProviderCategory
from pydantic.dataclasses import dataclass
from .base import Queryable, Retrievable
@dataclass
class ServiceProvider(Retrievable, Queryable):
_resource: ClassVar = 'service_providers'
name: str
provider_key: str
categories: List[ServiceProviderCategory]
| true | true |
f7fcc0ad155bf6fffad282e49a68c0b62500b437 | 19,833 | py | Python | api/forms.py | IronTooch/bounca | ad728c39ff8921ec38f924ed8ebcd088516b8438 | [
"Apache-2.0"
] | null | null | null | api/forms.py | IronTooch/bounca | ad728c39ff8921ec38f924ed8ebcd088516b8438 | [
"Apache-2.0"
] | 1 | 2022-02-19T16:21:18.000Z | 2022-02-19T16:21:18.000Z | api/forms.py | IronTooch/bounca | ad728c39ff8921ec38f924ed8ebcd088516b8438 | [
"Apache-2.0"
] | null | null | null | from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, BaseInput, ButtonHolder, Column, Fieldset, Layout, Row
from django import forms
from django.contrib.auth.forms import SetPasswordForm, UserChangeForm
from django.utils.deconstruct import deconstructible
from vuetifyforms.components import VueField, VueSpacer
from vuetifyforms.vue import VuetifyFormMixin
from x509_pki.models import Certificate, DistinguishedName
class Submit(BaseInput):
"""
Used to create a Submit button descriptor for the {% crispy %} template tag::
submit = Submit('Search the Site', 'search this site')
.. note:: The first argument is also slugified and turned into the id for the submit button.
"""
input_type = "submit"
def __init__(self, *args, **kwargs):
kwargs.update({"dark": True, "color": "secondary"})
self.field_classes = ""
super().__init__(*args, **kwargs)
class Button(BaseInput):
"""
Used to create a Submit input descriptor for the {% crispy %} template tag::
button = Button('Button 1', 'Press Me!')
.. note:: The first argument is also slugified and turned into the id for the button.
"""
input_type = "button"
def __init__(self, *args, **kwargs):
kwargs.update({"text": True, "plain": True, "color": "primary"})
self.field_classes = ""
super().__init__(*args, **kwargs)
class DistinguishedNameForm(forms.ModelForm):
class Meta:
model = DistinguishedName
fields = [
"commonName",
"subjectAltNames",
"organizationName",
"organizationalUnitName",
"emailAddress",
"countryName",
"stateOrProvinceName",
"localityName",
]
@deconstructible
class PasswordConfirmValidator:
def __init__(self, field):
self.field = field
class CertificateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dn = DistinguishedNameForm(**kwargs)
error_messages = {"password_mismatch": "The two passphrase fields didn't match."}
passphrase_issuer = forms.CharField(
label="Passphrase issuer",
initial="",
widget=forms.PasswordInput,
strip=False,
# help_text=password_validation.password_validators_help_text_html(),
help_text="The passphrase for unlocking your signing key.",
)
passphrase_out = forms.CharField(
label="Passphrase",
initial="",
widget=forms.PasswordInput,
strip=False,
# help_text=password_validation.password_validators_help_text_html(),
help_text="Passphrase for protecting the key of your new certificate.",
)
passphrase_out_confirmation = forms.CharField(
label="Passphrase confirmation",
initial="",
strip=False,
widget=forms.PasswordInput,
help_text="Enter the same passphrase as before, for verification.",
validators=[PasswordConfirmValidator("passphrase_out")],
)
class Meta:
model = Certificate
fields = ["name", "parent", "dn", "type", "expires_at", "crl_distribution_url", "ocsp_distribution_host"]
class AddRootCAForm(CertificateForm, VuetifyFormMixin):
scope_prefix = "cert_data"
vue_file = "front/src/components/forms/RootCert.vue"
form_title = "Root Certificate"
form_component_name = "RootCert"
form_object = "rootcert"
vue_card_classes = "elevation-10"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dn_fields = {f"dn.{f}": DistinguishedNameForm().fields[f] for f in DistinguishedNameForm().fields}
self.fields.update(dn_fields)
self.fields.pop("dn")
self.fields.pop("parent")
self.fields.pop("type")
self.fields.pop("passphrase_issuer")
self.helper = FormHelper()
self.helper.layout = Layout(
Row(
Column(
Fieldset(
"Distinguished Name",
Row(Column("dn.commonName", md="8"), Column("expires_at")),
Row(Column("dn.organizationName"), Column("dn.organizationalUnitName", xs12=True, md6=True)),
Row(Column("dn.emailAddress", xs12=True, md12=True)),
Row(
Column("dn.stateOrProvinceName", md="5"),
Column("dn.localityName", md="5"),
Column("dn.countryName"),
),
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Revocation Services",
HTML("<h5>These services are set in the extensions of the issued certificates</h5>"),
HTML("<h5>Note: Provide only available services</h5>"),
"crl_distribution_url",
"ocsp_distribution_host",
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Certificate",
"name",
Row(Column("passphrase_out"), Column("passphrase_out_confirmation")),
outlined=True,
)
)
),
ButtonHolder(
VueSpacer(),
Button("cancel", "Cancel", **{"@click": "onCancel"}),
Submit("submit", "Create", **{"@click": "onCreateCertificate", "css_class": "px-6"}),
css_class="mt-4",
outlined=True,
),
)
self.vue_imports = [("certificates", "../../api/certificates")]
self.vue_methods = [
"""
onCreateCertificate() {
this.$refs.form.validate().then((isValid) => {
if (isValid) {
this.passphrase_out_visible = false;
this.passphrase_out_confirmation_visible = false;
this.rootcert.type = 'R';
certificates.create(this.rootcert).then( response => {
this.$emit('update-dasboard');
this.resetForm();
this.$emit('close-dialog');
}).catch( r => {
this.$refs.form.setErrors(r.response.data);
this.$refs.form.$el.scrollIntoView({behavior: 'smooth'});
});
}
});
}
""",
"""
onCancel(){
this.resetForm();
this.$emit('close-dialog');
}
""",
]
class AddIntermediateCAForm(CertificateForm, VuetifyFormMixin):
scope_prefix = "cert_data"
vue_file = "front/src/components/forms/IntermediateCert.vue"
form_title = "Intermediate Certificate"
form_component_name = "IntermediateCert"
form_object = "intermediatecert"
vue_card_classes = "elevation-10"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dn_fields = {f"dn.{f}": DistinguishedNameForm().fields[f] for f in DistinguishedNameForm().fields}
self.fields.update(dn_fields)
self.fields.pop("dn")
self.fields.pop("parent")
self.fields.pop("type")
self.helper = FormHelper()
self.helper.layout = Layout(
Row(
Column(
Fieldset(
"Distinguished Name",
Row(Column("dn.commonName", md="8"), Column("expires_at")),
Row(
Column(VueField("dn.organizationName", disabled=True)),
Column("dn.organizationalUnitName", xs12=True, md6=True),
),
Row(Column("dn.emailAddress", xs12=True, md12=True)),
Row(
Column(VueField("dn.stateOrProvinceName", disabled=True), md="5"),
Column("dn.localityName", md="5"),
Column(VueField("dn.countryName", disabled=True)),
),
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Revocation Services",
HTML("<h5>These services are set in the extensions of the issued certificates</h5>"),
HTML("<h5>Note: Provide only available services</h5>"),
"crl_distribution_url",
"ocsp_distribution_host",
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Certificate",
"name",
Row(Column("passphrase_out"), Column("passphrase_out_confirmation")),
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Signing credentials",
"passphrase_issuer",
outlined=True,
)
)
),
ButtonHolder(
VueSpacer(),
Button("cancel", "Cancel", **{"@click": "onCancel"}),
Submit("submit", "Create", **{"@click": "onCreateCertificate", "css_class": "px-6"}),
css_class="mt-4",
outlined=True,
),
)
self.vue_imports = [("certificates", "../../api/certificates")]
self.vue_props = ["parent"]
self.vue_extra_init_rules = """
this.setParentData();
"""
self.vue_watchers = []
self.vue_mounted = """
this.setParentData();
"""
self.vue_methods = [
"""
setParentData() {
this.intermediatecert.dn.organizationName = this.parent.dn.organizationName;
this.intermediatecert.dn.stateOrProvinceName = this.parent.dn.stateOrProvinceName;
this.intermediatecert.dn.countryName = this.parent.dn.countryName;
this.intermediatecert.dn.localityName = this.parent.dn.localityName;
this.intermediatecert.dn.organizationalUnitName = this.parent.dn.organizationalUnitName;
this.intermediatecert.dn.emailAddress = this.parent.dn.emailAddress;
}
""",
"""
onCreateCertificate() {
this.$refs.form.validate().then((isValid) => {
if (isValid) {
this.passphrase_out_visible = false;
this.passphrase_out_confirmation_visible = false;
this.passphrase_in_visible = false;
this.intermediatecert.type = 'I';
this.intermediatecert.parent = this.parent.id;
certificates.create(this.intermediatecert).then( response => {
this.$emit('update-dasboard');
this.resetForm();
this.$emit('close-dialog');
this.setParentData();
}).catch( r => {
this.$refs.form.setErrors(r.response.data);
this.$refs.form.$el.scrollIntoView({behavior: 'smooth'});
});
}
});
}
""",
"""
onCancel(){
this.resetForm();
this.$emit('close-dialog');
this.setParentData();
}
""",
]
class AddCertificateForm(CertificateForm, VuetifyFormMixin):
scope_prefix = "cert_data"
vue_file = "front/src/components/forms/Certificate.vue"
form_title = '{{ {"S": "Server", "C": "Client", "O": "OCSP"}[this.certtype] }} certificate '
form_component_name = "Certificate"
form_object = "certificate"
vue_card_classes = "elevation-10"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dn_fields = {f"dn.{f}": DistinguishedNameForm().fields[f] for f in DistinguishedNameForm().fields}
self.fields.update(dn_fields)
self.fields["passphrase_out"].required = False
self.fields["passphrase_out_confirmation"].required = False
self.fields.pop("dn")
self.fields.pop("parent")
self.fields.pop("crl_distribution_url")
self.fields.pop("ocsp_distribution_host")
self.fields.pop("type")
self.helper = FormHelper()
self.helper.layout = Layout(
Row(
Column(
Fieldset(
"Distinguished Name",
Row(
VueSpacer(),
Button("reset", "Reset Form", **{"@click": "resetForm"}),
Submit("copy", "Copy from Intermediate", **{"@click": "setParentData"}),
css_class="mr-1",
),
Row(Column("dn.commonName", md="8"), Column("expires_at")),
Row(
Column(
VueField(
"dn.subjectAltNames",
multiple=True,
chips=True,
deletable_chips=True,
append_icon="",
),
xs12=True,
md12=True,
)
),
Row(Column("dn.organizationName"), Column("dn.organizationalUnitName", xs12=True, md6=True)),
Row(Column("dn.emailAddress", xs12=True, md12=True)),
Row(
Column("dn.stateOrProvinceName", md="5"),
Column("dn.localityName", md="5"),
Column("dn.countryName"),
),
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Certificate",
"name",
Row(Column("passphrase_out"), Column("passphrase_out_confirmation")),
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Signing credentials",
"passphrase_issuer",
outlined=True,
)
)
),
ButtonHolder(
VueSpacer(),
Button("cancel", "Cancel", **{"@click": "onCancel"}),
Submit("submit", "Create", **{"@click": "onCreateCertificate", "css_class": "px-6"}),
css_class="mt-4",
outlined=True,
),
)
self.vue_imports = [("certificates", "../../api/certificates")]
self.vue_props = ["parent", "certtype"]
self.vue_watchers = []
self.vue_methods = [
"""
setParentData() {
this.certificate.dn.organizationName = this.parent.dn.organizationName;
this.certificate.dn.stateOrProvinceName = this.parent.dn.stateOrProvinceName;
this.certificate.dn.countryName = this.parent.dn.countryName;
this.certificate.dn.localityName = this.parent.dn.localityName;
this.certificate.dn.organizationalUnitName = this.parent.dn.organizationalUnitName;
this.certificate.dn.emailAddress = this.parent.dn.emailAddress;
}
""",
"""
onCreateCertificate() {
this.$refs.form.validate().then((isValid) => {
if (isValid) {
this.passphrase_out_visible = false;
this.passphrase_out_confirmation_visible = false;
this.passphrase_in_visible = false;
this.certificate.type = this.certtype;
this.certificate.parent = this.parent.id;
certificates.create(this.certificate).then( response => {
this.$emit('update-dasboard');
this.resetForm();
this.$emit('close-dialog');
}).catch( r => {
this.$refs.form.setErrors(r.response.data);
this.$refs.form.$el.scrollIntoView({behavior: 'smooth'});
});
}
});
}
""",
"""
onCancel(){
this.resetForm();
this.$emit('close-dialog');
}
""",
]
class ChangePasswordForm(SetPasswordForm, VuetifyFormMixin):
scope_prefix = "user_data"
vue_file = "front/src/components/forms/user/ChangePassword.vue"
form_title = "Change Password"
form_component_name = "changePassword"
form_object = "password"
def __init__(self, *args, **kwargs):
super().__init__(user=None, *args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Row(Column("new_password1"), Column("new_password2")),
ButtonHolder(
VueSpacer(),
Button("cancel", "Cancel", **{"@click": "onCancel"}),
Submit("submit", "Update", **{"@click": "updatePassword", "css_class": "px-6"}),
css_class="mt-4",
outlined=True,
),
)
self.vue_imports = [("profile", "../../../api/profile")]
self.vue_props = []
self.vue_watchers = []
self.vue_methods = [
"""
updatePassword() {
this.$refs.form.validate().then((isValid) => {
if (isValid) {
this.new_password1_visible = false;
this.new_password1_visible = false;
profile.changeAccountPassword(this.password).then( response => {
this.$emit('success', 'Password has been updated.');
this.resetForm();
}).catch( r => {
this.$refs.form.setErrors(r.response.data);
});
}
});
}
""",
"""
onCancel(){
this.resetForm();
}
""",
]
class ChangeProfileForm(UserChangeForm, VuetifyFormMixin):
scope_prefix = "profile_data"
vue_file = "front/src/components/forms/user/ChangeProfile.vue"
form_title = "Change Profile"
form_component_name = "changeProfile"
form_object = "profile"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Row(
Column(VueField("username", disabled=True)),
),
Row(
Column("first_name"),
Column("last_name"),
),
Row(
Column("email"),
),
ButtonHolder(
VueSpacer(),
Button("cancel", "Cancel", **{"@click": "onCancel"}),
Submit("submit", "Update", **{"@click": "updateProfile", "css_class": "px-6"}),
css_class="mt-4",
outlined=True,
),
)
self.vue_imports = [("profile", "../../../api/profile")]
self.vue_props = []
self.vue_watchers = []
self.vue_mounted = """
this.resetForm();
this.setupUserForm();
"""
self.vue_methods = [
"""
setupUserForm() {
profile.getAccountDetails()
.then( response => {
this.profile = response.data;
}).catch((e) => {
console.log(e);
});
},
updateProfile() {
this.$refs.form.validate().then((isValid) => {
if (isValid) {
const data = {...this.profile};
delete this.profile['username'];
profile.updateAccountDetails(this.profile).then( response => {
this.resetForm();
this.setupUserForm();
}).catch( r => {
this.$refs.form.setErrors(r.response.data);
});
}
});
}
""",
"""
onCancel(){
this.resetForm();
this.setupUserForm();
}
""",
]
| 34.673077 | 117 | 0.516009 | from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, BaseInput, ButtonHolder, Column, Fieldset, Layout, Row
from django import forms
from django.contrib.auth.forms import SetPasswordForm, UserChangeForm
from django.utils.deconstruct import deconstructible
from vuetifyforms.components import VueField, VueSpacer
from vuetifyforms.vue import VuetifyFormMixin
from x509_pki.models import Certificate, DistinguishedName
class Submit(BaseInput):
input_type = "submit"
def __init__(self, *args, **kwargs):
kwargs.update({"dark": True, "color": "secondary"})
self.field_classes = ""
super().__init__(*args, **kwargs)
class Button(BaseInput):
input_type = "button"
def __init__(self, *args, **kwargs):
kwargs.update({"text": True, "plain": True, "color": "primary"})
self.field_classes = ""
super().__init__(*args, **kwargs)
class DistinguishedNameForm(forms.ModelForm):
class Meta:
model = DistinguishedName
fields = [
"commonName",
"subjectAltNames",
"organizationName",
"organizationalUnitName",
"emailAddress",
"countryName",
"stateOrProvinceName",
"localityName",
]
@deconstructible
class PasswordConfirmValidator:
def __init__(self, field):
self.field = field
class CertificateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dn = DistinguishedNameForm(**kwargs)
error_messages = {"password_mismatch": "The two passphrase fields didn't match."}
passphrase_issuer = forms.CharField(
label="Passphrase issuer",
initial="",
widget=forms.PasswordInput,
strip=False,
# help_text=password_validation.password_validators_help_text_html(),
help_text="The passphrase for unlocking your signing key.",
)
passphrase_out = forms.CharField(
label="Passphrase",
initial="",
widget=forms.PasswordInput,
strip=False,
# help_text=password_validation.password_validators_help_text_html(),
help_text="Passphrase for protecting the key of your new certificate.",
)
passphrase_out_confirmation = forms.CharField(
label="Passphrase confirmation",
initial="",
strip=False,
widget=forms.PasswordInput,
help_text="Enter the same passphrase as before, for verification.",
validators=[PasswordConfirmValidator("passphrase_out")],
)
class Meta:
model = Certificate
fields = ["name", "parent", "dn", "type", "expires_at", "crl_distribution_url", "ocsp_distribution_host"]
class AddRootCAForm(CertificateForm, VuetifyFormMixin):
scope_prefix = "cert_data"
vue_file = "front/src/components/forms/RootCert.vue"
form_title = "Root Certificate"
form_component_name = "RootCert"
form_object = "rootcert"
vue_card_classes = "elevation-10"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dn_fields = {f"dn.{f}": DistinguishedNameForm().fields[f] for f in DistinguishedNameForm().fields}
self.fields.update(dn_fields)
self.fields.pop("dn")
self.fields.pop("parent")
self.fields.pop("type")
self.fields.pop("passphrase_issuer")
self.helper = FormHelper()
self.helper.layout = Layout(
Row(
Column(
Fieldset(
"Distinguished Name",
Row(Column("dn.commonName", md="8"), Column("expires_at")),
Row(Column("dn.organizationName"), Column("dn.organizationalUnitName", xs12=True, md6=True)),
Row(Column("dn.emailAddress", xs12=True, md12=True)),
Row(
Column("dn.stateOrProvinceName", md="5"),
Column("dn.localityName", md="5"),
Column("dn.countryName"),
),
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Revocation Services",
HTML("<h5>These services are set in the extensions of the issued certificates</h5>"),
HTML("<h5>Note: Provide only available services</h5>"),
"crl_distribution_url",
"ocsp_distribution_host",
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Certificate",
"name",
Row(Column("passphrase_out"), Column("passphrase_out_confirmation")),
outlined=True,
)
)
),
ButtonHolder(
VueSpacer(),
Button("cancel", "Cancel", **{"@click": "onCancel"}),
Submit("submit", "Create", **{"@click": "onCreateCertificate", "css_class": "px-6"}),
css_class="mt-4",
outlined=True,
),
)
self.vue_imports = [("certificates", "../../api/certificates")]
self.vue_methods = [
"""
onCreateCertificate() {
this.$refs.form.validate().then((isValid) => {
if (isValid) {
this.passphrase_out_visible = false;
this.passphrase_out_confirmation_visible = false;
this.rootcert.type = 'R';
certificates.create(this.rootcert).then( response => {
this.$emit('update-dasboard');
this.resetForm();
this.$emit('close-dialog');
}).catch( r => {
this.$refs.form.setErrors(r.response.data);
this.$refs.form.$el.scrollIntoView({behavior: 'smooth'});
});
}
});
}
""",
"""
onCancel(){
this.resetForm();
this.$emit('close-dialog');
}
""",
]
class AddIntermediateCAForm(CertificateForm, VuetifyFormMixin):
scope_prefix = "cert_data"
vue_file = "front/src/components/forms/IntermediateCert.vue"
form_title = "Intermediate Certificate"
form_component_name = "IntermediateCert"
form_object = "intermediatecert"
vue_card_classes = "elevation-10"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dn_fields = {f"dn.{f}": DistinguishedNameForm().fields[f] for f in DistinguishedNameForm().fields}
self.fields.update(dn_fields)
self.fields.pop("dn")
self.fields.pop("parent")
self.fields.pop("type")
self.helper = FormHelper()
self.helper.layout = Layout(
Row(
Column(
Fieldset(
"Distinguished Name",
Row(Column("dn.commonName", md="8"), Column("expires_at")),
Row(
Column(VueField("dn.organizationName", disabled=True)),
Column("dn.organizationalUnitName", xs12=True, md6=True),
),
Row(Column("dn.emailAddress", xs12=True, md12=True)),
Row(
Column(VueField("dn.stateOrProvinceName", disabled=True), md="5"),
Column("dn.localityName", md="5"),
Column(VueField("dn.countryName", disabled=True)),
),
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Revocation Services",
HTML("<h5>These services are set in the extensions of the issued certificates</h5>"),
HTML("<h5>Note: Provide only available services</h5>"),
"crl_distribution_url",
"ocsp_distribution_host",
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Certificate",
"name",
Row(Column("passphrase_out"), Column("passphrase_out_confirmation")),
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Signing credentials",
"passphrase_issuer",
outlined=True,
)
)
),
ButtonHolder(
VueSpacer(),
Button("cancel", "Cancel", **{"@click": "onCancel"}),
Submit("submit", "Create", **{"@click": "onCreateCertificate", "css_class": "px-6"}),
css_class="mt-4",
outlined=True,
),
)
self.vue_imports = [("certificates", "../../api/certificates")]
self.vue_props = ["parent"]
self.vue_extra_init_rules = """
this.setParentData();
"""
self.vue_watchers = []
self.vue_mounted = """
this.setParentData();
"""
self.vue_methods = [
"""
setParentData() {
this.intermediatecert.dn.organizationName = this.parent.dn.organizationName;
this.intermediatecert.dn.stateOrProvinceName = this.parent.dn.stateOrProvinceName;
this.intermediatecert.dn.countryName = this.parent.dn.countryName;
this.intermediatecert.dn.localityName = this.parent.dn.localityName;
this.intermediatecert.dn.organizationalUnitName = this.parent.dn.organizationalUnitName;
this.intermediatecert.dn.emailAddress = this.parent.dn.emailAddress;
}
""",
"""
onCreateCertificate() {
this.$refs.form.validate().then((isValid) => {
if (isValid) {
this.passphrase_out_visible = false;
this.passphrase_out_confirmation_visible = false;
this.passphrase_in_visible = false;
this.intermediatecert.type = 'I';
this.intermediatecert.parent = this.parent.id;
certificates.create(this.intermediatecert).then( response => {
this.$emit('update-dasboard');
this.resetForm();
this.$emit('close-dialog');
this.setParentData();
}).catch( r => {
this.$refs.form.setErrors(r.response.data);
this.$refs.form.$el.scrollIntoView({behavior: 'smooth'});
});
}
});
}
""",
"""
onCancel(){
this.resetForm();
this.$emit('close-dialog');
this.setParentData();
}
""",
]
class AddCertificateForm(CertificateForm, VuetifyFormMixin):
scope_prefix = "cert_data"
vue_file = "front/src/components/forms/Certificate.vue"
form_title = '{{ {"S": "Server", "C": "Client", "O": "OCSP"}[this.certtype] }} certificate '
form_component_name = "Certificate"
form_object = "certificate"
vue_card_classes = "elevation-10"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dn_fields = {f"dn.{f}": DistinguishedNameForm().fields[f] for f in DistinguishedNameForm().fields}
self.fields.update(dn_fields)
self.fields["passphrase_out"].required = False
self.fields["passphrase_out_confirmation"].required = False
self.fields.pop("dn")
self.fields.pop("parent")
self.fields.pop("crl_distribution_url")
self.fields.pop("ocsp_distribution_host")
self.fields.pop("type")
self.helper = FormHelper()
self.helper.layout = Layout(
Row(
Column(
Fieldset(
"Distinguished Name",
Row(
VueSpacer(),
Button("reset", "Reset Form", **{"@click": "resetForm"}),
Submit("copy", "Copy from Intermediate", **{"@click": "setParentData"}),
css_class="mr-1",
),
Row(Column("dn.commonName", md="8"), Column("expires_at")),
Row(
Column(
VueField(
"dn.subjectAltNames",
multiple=True,
chips=True,
deletable_chips=True,
append_icon="",
),
xs12=True,
md12=True,
)
),
Row(Column("dn.organizationName"), Column("dn.organizationalUnitName", xs12=True, md6=True)),
Row(Column("dn.emailAddress", xs12=True, md12=True)),
Row(
Column("dn.stateOrProvinceName", md="5"),
Column("dn.localityName", md="5"),
Column("dn.countryName"),
),
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Certificate",
"name",
Row(Column("passphrase_out"), Column("passphrase_out_confirmation")),
outlined=True,
)
)
),
Row(
Column(
Fieldset(
"Signing credentials",
"passphrase_issuer",
outlined=True,
)
)
),
ButtonHolder(
VueSpacer(),
Button("cancel", "Cancel", **{"@click": "onCancel"}),
Submit("submit", "Create", **{"@click": "onCreateCertificate", "css_class": "px-6"}),
css_class="mt-4",
outlined=True,
),
)
self.vue_imports = [("certificates", "../../api/certificates")]
self.vue_props = ["parent", "certtype"]
self.vue_watchers = []
self.vue_methods = [
"""
setParentData() {
this.certificate.dn.organizationName = this.parent.dn.organizationName;
this.certificate.dn.stateOrProvinceName = this.parent.dn.stateOrProvinceName;
this.certificate.dn.countryName = this.parent.dn.countryName;
this.certificate.dn.localityName = this.parent.dn.localityName;
this.certificate.dn.organizationalUnitName = this.parent.dn.organizationalUnitName;
this.certificate.dn.emailAddress = this.parent.dn.emailAddress;
}
""",
"""
onCreateCertificate() {
this.$refs.form.validate().then((isValid) => {
if (isValid) {
this.passphrase_out_visible = false;
this.passphrase_out_confirmation_visible = false;
this.passphrase_in_visible = false;
this.certificate.type = this.certtype;
this.certificate.parent = this.parent.id;
certificates.create(this.certificate).then( response => {
this.$emit('update-dasboard');
this.resetForm();
this.$emit('close-dialog');
}).catch( r => {
this.$refs.form.setErrors(r.response.data);
this.$refs.form.$el.scrollIntoView({behavior: 'smooth'});
});
}
});
}
""",
"""
onCancel(){
this.resetForm();
this.$emit('close-dialog');
}
""",
]
class ChangePasswordForm(SetPasswordForm, VuetifyFormMixin):
scope_prefix = "user_data"
vue_file = "front/src/components/forms/user/ChangePassword.vue"
form_title = "Change Password"
form_component_name = "changePassword"
form_object = "password"
def __init__(self, *args, **kwargs):
super().__init__(user=None, *args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Row(Column("new_password1"), Column("new_password2")),
ButtonHolder(
VueSpacer(),
Button("cancel", "Cancel", **{"@click": "onCancel"}),
Submit("submit", "Update", **{"@click": "updatePassword", "css_class": "px-6"}),
css_class="mt-4",
outlined=True,
),
)
self.vue_imports = [("profile", "../../../api/profile")]
self.vue_props = []
self.vue_watchers = []
self.vue_methods = [
"""
updatePassword() {
this.$refs.form.validate().then((isValid) => {
if (isValid) {
this.new_password1_visible = false;
this.new_password1_visible = false;
profile.changeAccountPassword(this.password).then( response => {
this.$emit('success', 'Password has been updated.');
this.resetForm();
}).catch( r => {
this.$refs.form.setErrors(r.response.data);
});
}
});
}
""",
"""
onCancel(){
this.resetForm();
}
""",
]
class ChangeProfileForm(UserChangeForm, VuetifyFormMixin):
scope_prefix = "profile_data"
vue_file = "front/src/components/forms/user/ChangeProfile.vue"
form_title = "Change Profile"
form_component_name = "changeProfile"
form_object = "profile"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Row(
Column(VueField("username", disabled=True)),
),
Row(
Column("first_name"),
Column("last_name"),
),
Row(
Column("email"),
),
ButtonHolder(
VueSpacer(),
Button("cancel", "Cancel", **{"@click": "onCancel"}),
Submit("submit", "Update", **{"@click": "updateProfile", "css_class": "px-6"}),
css_class="mt-4",
outlined=True,
),
)
self.vue_imports = [("profile", "../../../api/profile")]
self.vue_props = []
self.vue_watchers = []
self.vue_mounted = """
this.resetForm();
this.setupUserForm();
"""
self.vue_methods = [
"""
setupUserForm() {
profile.getAccountDetails()
.then( response => {
this.profile = response.data;
}).catch((e) => {
console.log(e);
});
},
updateProfile() {
this.$refs.form.validate().then((isValid) => {
if (isValid) {
const data = {...this.profile};
delete this.profile['username'];
profile.updateAccountDetails(this.profile).then( response => {
this.resetForm();
this.setupUserForm();
}).catch( r => {
this.$refs.form.setErrors(r.response.data);
});
}
});
}
""",
"""
onCancel(){
this.resetForm();
this.setupUserForm();
}
""",
]
| true | true |
f7fcc0ba51da22d909adb03856f3a892aaf2787f | 620 | py | Python | endpoints/dbots.py | PhotoBoxPW/imgsrv | 93bb2b3f7641f9de7af8c55424233d82dec47850 | [
"MIT"
] | null | null | null | endpoints/dbots.py | PhotoBoxPW/imgsrv | 93bb2b3f7641f9de7af8c55424233d82dec47850 | [
"MIT"
] | null | null | null | endpoints/dbots.py | PhotoBoxPW/imgsrv | 93bb2b3f7641f9de7af8c55424233d82dec47850 | [
"MIT"
] | null | null | null | from PIL import Image, ImageOps
from utils import http
from utils.endpoint import Endpoint, setup
@setup
class DBots(Endpoint):
def generate(self, kwargs):
image_url = kwargs['image']
no_overlay = False if not 'no_overlay' in kwargs else bool(kwargs['no_overlay'])
base = Image.open(self.get_asset('dbots.bmp'))
img = http.get_image(image_url).convert('RGBA')
img = ImageOps.fit(img, (base.width, base.height), method=Image.LANCZOS)
if not no_overlay:
img.paste(base, (0, 0), base)
img = img.convert('RGB')
return self.send_file(img) | 34.444444 | 88 | 0.648387 | from PIL import Image, ImageOps
from utils import http
from utils.endpoint import Endpoint, setup
@setup
class DBots(Endpoint):
def generate(self, kwargs):
image_url = kwargs['image']
no_overlay = False if not 'no_overlay' in kwargs else bool(kwargs['no_overlay'])
base = Image.open(self.get_asset('dbots.bmp'))
img = http.get_image(image_url).convert('RGBA')
img = ImageOps.fit(img, (base.width, base.height), method=Image.LANCZOS)
if not no_overlay:
img.paste(base, (0, 0), base)
img = img.convert('RGB')
return self.send_file(img) | true | true |
f7fcc17a9052ec4d06ff3eba20528c98e20784db | 8,527 | py | Python | API/rossmann/Rossmann.py | brunasenra/Store_Sales_Prediction | 6f7371188e37ebb905e171bd8afaae9e41f5cdf5 | [
"MIT"
] | 1 | 2022-02-13T20:29:53.000Z | 2022-02-13T20:29:53.000Z | API/rossmann/Rossmann.py | brunasenra/Store_Sales_Prediction | 6f7371188e37ebb905e171bd8afaae9e41f5cdf5 | [
"MIT"
] | null | null | null | API/rossmann/Rossmann.py | brunasenra/Store_Sales_Prediction | 6f7371188e37ebb905e171bd8afaae9e41f5cdf5 | [
"MIT"
] | 1 | 2021-08-11T19:25:21.000Z | 2021-08-11T19:25:21.000Z | import pickle
import inflection
import pandas as pd
import numpy as np
import math
import datetime
class Rossmann(object):
def __init__(self):
self.home_path = 'C:/Users/bruna/OneDrive/Favoritos compartilhados\DATA SCIENCE\BRUNA\DATA SCIENCE\PROJECTS\Store_Sales_Prediction'
# loads the rescaling
self.competition_distance_scaler = pickle.load(open(self.home_path + 'parameter/competition_distance_scaler.pkl', 'rb'))
self.competition_time_month_scaler = pickle.load(open(self.home_path + 'parameter/competition_time_month_scaler.pkl', 'rb'))
self.promo_time_week_scaler = pickle.load(open(self.home_path + 'parameter/promo_time_week_scaler.pkl', 'rb'))
self.year_scaler = pickle.load(open(self.home_path + 'parameter/year_scaler.pkl', 'rb'))
# loads the encoder
self.store_type_scaler = pickle.load(open(self.home_path + 'parameter/store_type_scaler.pkl', 'rb'))
def data_cleaning(self, df1):
## 1.2. Renaming columns
cols_old = ['Store', 'DayOfWeek', 'Date', 'Open', 'Promo', 'StateHoliday',
'SchoolHoliday', 'StoreType', 'Assortment','CompetitionDistance', 'CompetitionOpenSinceMonth',
'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval']
# snake_case
snakecase = lambda x: inflection.underscore(x)
# creates new columns from old columns in snakecase
cols_new = list(map(snakecase, cols_old))
# renames the old columns
df1.columns = cols_new
## 1.4. Checking data types
# transforms 'date' column to datetime type
df1['date'] = pd.to_datetime(df1['date'])
## 1.6. Filling out the NaN values
# competition_distance
df1['competition_distance'] = df1['competition_distance'].apply(lambda x: 200000.0 if math.isnan(x) else x)
# competition_open_since_month
df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month if math.isnan(x['competition_open_since_month']) else x['competition_open_since_month'], axis=1)
# competition_open_since_year
df1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['competition_open_since_year']) else x['competition_open_since_year'], axis=1)
# promo2_since_week
df1['promo2_since_week'] = df1.apply(lambda x: x['date'].week if math.isnan(x['promo2_since_week']) else x['promo2_since_week'], axis=1)
# promo2_since_year
df1['promo2_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['promo2_since_year']) else x['promo2_since_year'], axis=1)
# promo_interval
month_map = {1: 'Jan', 2: 'Fev', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',
7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
df1['promo_interval'].fillna(0, inplace=True)
df1['month_map'] = df1['date'].dt.month.map(month_map)
df1['is_promo'] = df1[['promo_interval','month_map']].apply(lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split(',') else 0, axis=1)
## 1.7. Changing data types
df1['competition_open_since_month'] = df1['competition_open_since_month'].astype(int)
df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int)
# transforms promotion data to int
df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)
df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)
return df1
def feature_engineering(self, df2):
# 2.0 FEATURE ENGINEERING
# year
df2['year'] = df2['date'].dt.year
# month
df2['month'] = df2['date'].dt.month
# day
df2['day'] = df2['date'].dt.day
# week of year
df2['week_of_year'] = df2['date'].dt.weekofyear
# year week
df2['year_week'] = df2['date'].dt.strftime('%Y-%W')
# Competition since
df2['competition_since'] = df2.apply( lambda x: datetime.datetime(year = x['competition_open_since_year'],
month = x['competition_open_since_month'], day = 1), axis = 1)
df2['competition_time_month'] = ((df2['date'] - df2['competition_since'])/30).apply(lambda x: x.days).astype(int)
# Promo since
df2['promo_since'] = df2['promo2_since_year'].astype(str) + '-' + df2['promo2_since_week'].astype(str)
df2['promo_since'] = df2['promo_since'].apply( lambda x: datetime.datetime.strptime
(x + '-1', '%Y-%W-%w') - datetime.timedelta(days = 7))
df2['promo_time_week'] = ((df2['date'] - df2['promo_since'])/7).apply(lambda x: x.days).astype(int)
# assortment
df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended' )
# state holiday
df2['state_holiday'] = df2['state_holiday'].apply( lambda x: 'public_holiday' if x == 'a' else 'easter_holiday'
if x == 'b' else 'christmas' if x =='c' else 'regular_day')
# 3.0 VARIABLE FILTERING
## 3.1 Row filtering
df2 = df2[df2['open'] != 0]
## 3.2 Column filtering
cols_drop = ['open', 'promo_interval', 'month_map']
df2 = df2.drop(cols_drop, axis=1)
return df2
def data_preparation(self, df5):
## 5.2 Rescaling
### 5.2.1 Rescaling competition_distance
# competition_distance
df5['competition_distance'] = self.competition_distance_scaler.fit_transform(df5[['competition_distance']])
### 5.2.2 Rescaling competition_time_month
# competition_time_month
df5['competition_time_month'] = self.competition_time_month_scaler.fit_transform(df5[['competition_time_month']])
### 5.2.3 Rescaling promo_time_week
# promo_time_week
df5['promo_time_week'] = self.promo_time_week_scaler.fit_transform(df5[['promo_time_week']])
# year
df5['year'] = self.year_scaler.fit_transform(df5[['year']])
### 5.3.1 Encoding
# state_holiday - One Hot Encoding
df5 = pd.get_dummies(df5, prefix=['state_holiday'], columns=['state_holiday'])
# store_type
df5['store_type'] = self.store_type_scaler.fit_transform(df5['store_type'])
# assortment
assortment_dict = {'basic':1, 'extra': 2, 'extended': 3}
df5['assortment'] = df5['assortment'].map(assortment_dict)
### 5.3.2 Nature Transformation
# day_of_week
df5['day_of_week_sin'] = df5['day_of_week'].apply(lambda x: np.sin(x *(2. * np.pi / 7)))
df5['day_of_week_cos'] = df5['day_of_week'].apply(lambda x: np.cos(x *(2. * np.pi / 7)))
# month
df5['month_sin'] = df5['month'].apply(lambda x: np.sin(x *(2. * np.pi / 12)))
df5['month_cos'] = df5['month'].apply(lambda x: np.cos(x *(2. * np.pi / 12)))
# day
df5['day_sin'] = df5['day'].apply(lambda x: np.sin(x *(2. * np.pi / 30)))
df5['day_cos'] = df5['day'].apply(lambda x: np.cos(x *(2. * np.pi / 30)))
# week_of_year
df5['week_of_year_sin'] = df5['week_of_year'].apply(lambda x: np.sin(x *(2. * np.pi / 52)))
df5['week_of_year_cos'] = df5['week_of_year'].apply(lambda x: np.cos(x *(2. * np.pi / 52)))
cols_selected = ['store', 'promo', 'store_type',
'assortment','competition_distance', 'competition_open_since_month',
'competition_open_since_year','promo2', 'promo2_since_week',
'promo2_since_year','competition_time_month', 'promo_time_week',
'day_of_week_sin','day_of_week_cos', 'month_sin','month_cos',
'day_sin','day_cos', 'week_of_year_sin', 'week_of_year_cos']
return df5[cols_selected]
def get_prediction(self, model, original_data, test_data):
#predicts
pred = model.predict(test_data)
# joins pred into the original data
original_data['prediction'] = np.expm1(pred)
return original_data.to_json(orient='records', date_format='iso')
| 43.953608 | 182 | 0.601736 | import pickle
import inflection
import pandas as pd
import numpy as np
import math
import datetime
class Rossmann(object):
def __init__(self):
self.home_path = 'C:/Users/bruna/OneDrive/Favoritos compartilhados\DATA SCIENCE\BRUNA\DATA SCIENCE\PROJECTS\Store_Sales_Prediction'
self.competition_distance_scaler = pickle.load(open(self.home_path + 'parameter/competition_distance_scaler.pkl', 'rb'))
self.competition_time_month_scaler = pickle.load(open(self.home_path + 'parameter/competition_time_month_scaler.pkl', 'rb'))
self.promo_time_week_scaler = pickle.load(open(self.home_path + 'parameter/promo_time_week_scaler.pkl', 'rb'))
self.year_scaler = pickle.load(open(self.home_path + 'parameter/year_scaler.pkl', 'rb'))
self.store_type_scaler = pickle.load(open(self.home_path + 'parameter/store_type_scaler.pkl', 'rb'))
def data_cleaning(self, df1):
tore', 'DayOfWeek', 'Date', 'Open', 'Promo', 'StateHoliday',
'SchoolHoliday', 'StoreType', 'Assortment','CompetitionDistance', 'CompetitionOpenSinceMonth',
'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval']
snakecase = lambda x: inflection.underscore(x)
cols_new = list(map(snakecase, cols_old))
df1.columns = cols_new
'] = pd.to_datetime(df1['date'])
_distance'] = df1['competition_distance'].apply(lambda x: 200000.0 if math.isnan(x) else x)
df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month if math.isnan(x['competition_open_since_month']) else x['competition_open_since_month'], axis=1)
df1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['competition_open_since_year']) else x['competition_open_since_year'], axis=1)
df1['promo2_since_week'] = df1.apply(lambda x: x['date'].week if math.isnan(x['promo2_since_week']) else x['promo2_since_week'], axis=1)
df1['promo2_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['promo2_since_year']) else x['promo2_since_year'], axis=1)
month_map = {1: 'Jan', 2: 'Fev', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',
7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
df1['promo_interval'].fillna(0, inplace=True)
df1['month_map'] = df1['date'].dt.month.map(month_map)
df1['is_promo'] = df1[['promo_interval','month_map']].apply(lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split(',') else 0, axis=1)
pen_since_month'] = df1['competition_open_since_month'].astype(int)
df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int)
df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)
df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)
return df1
def feature_engineering(self, df2):
df2['year'] = df2['date'].dt.year
df2['month'] = df2['date'].dt.month
df2['day'] = df2['date'].dt.day
df2['week_of_year'] = df2['date'].dt.weekofyear
df2['year_week'] = df2['date'].dt.strftime('%Y-%W')
df2['competition_since'] = df2.apply( lambda x: datetime.datetime(year = x['competition_open_since_year'],
month = x['competition_open_since_month'], day = 1), axis = 1)
df2['competition_time_month'] = ((df2['date'] - df2['competition_since'])/30).apply(lambda x: x.days).astype(int)
df2['promo_since'] = df2['promo2_since_year'].astype(str) + '-' + df2['promo2_since_week'].astype(str)
df2['promo_since'] = df2['promo_since'].apply( lambda x: datetime.datetime.strptime
(x + '-1', '%Y-%W-%w') - datetime.timedelta(days = 7))
df2['promo_time_week'] = ((df2['date'] - df2['promo_since'])/7).apply(lambda x: x.days).astype(int)
df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended' )
df2['state_holiday'] = df2['state_holiday'].apply( lambda x: 'public_holiday' if x == 'a' else 'easter_holiday'
if x == 'b' else 'christmas' if x =='c' else 'regular_day')
df2['open'] != 0]
'open', 'promo_interval', 'month_map']
df2 = df2.drop(cols_drop, axis=1)
return df2
def data_preparation(self, df5):
t_transform(df5[['competition_distance']])
ler.fit_transform(df5[['competition_time_month']])
ler.fit_transform(df5[['promo_time_week']])
df5['year'] = self.year_scaler.fit_transform(df5[['year']])
ummies(df5, prefix=['state_holiday'], columns=['state_holiday'])
df5['store_type'] = self.store_type_scaler.fit_transform(df5['store_type'])
assortment_dict = {'basic':1, 'extra': 2, 'extended': 3}
df5['assortment'] = df5['assortment'].map(assortment_dict)
'].apply(lambda x: np.sin(x *(2. * np.pi / 7)))
df5['day_of_week_cos'] = df5['day_of_week'].apply(lambda x: np.cos(x *(2. * np.pi / 7)))
df5['month_sin'] = df5['month'].apply(lambda x: np.sin(x *(2. * np.pi / 12)))
df5['month_cos'] = df5['month'].apply(lambda x: np.cos(x *(2. * np.pi / 12)))
df5['day_sin'] = df5['day'].apply(lambda x: np.sin(x *(2. * np.pi / 30)))
df5['day_cos'] = df5['day'].apply(lambda x: np.cos(x *(2. * np.pi / 30)))
df5['week_of_year_sin'] = df5['week_of_year'].apply(lambda x: np.sin(x *(2. * np.pi / 52)))
df5['week_of_year_cos'] = df5['week_of_year'].apply(lambda x: np.cos(x *(2. * np.pi / 52)))
cols_selected = ['store', 'promo', 'store_type',
'assortment','competition_distance', 'competition_open_since_month',
'competition_open_since_year','promo2', 'promo2_since_week',
'promo2_since_year','competition_time_month', 'promo_time_week',
'day_of_week_sin','day_of_week_cos', 'month_sin','month_cos',
'day_sin','day_cos', 'week_of_year_sin', 'week_of_year_cos']
return df5[cols_selected]
def get_prediction(self, model, original_data, test_data):
pred = model.predict(test_data)
original_data['prediction'] = np.expm1(pred)
return original_data.to_json(orient='records', date_format='iso')
| true | true |
f7fcc1ff561b90dc1b78a67ffbe7c047ed06d0e9 | 16,252 | py | Python | python/paddle/fluid/tests/unittests/test_jit_save_load.py | Huangheyl/Paddle | a1b640bc66a5cc9583de503e7406aeba67565e8d | [
"Apache-2.0"
] | 8 | 2019-06-16T12:36:11.000Z | 2021-03-05T05:33:21.000Z | python/paddle/fluid/tests/unittests/test_jit_save_load.py | zlsh80826/Paddle | c560a7d57aad990f374ebadd330351f18e2ca65f | [
"Apache-2.0"
] | 1 | 2020-09-10T09:05:52.000Z | 2020-09-10T09:06:22.000Z | python/paddle/fluid/tests/unittests/test_jit_save_load.py | zlsh80826/Paddle | c560a7d57aad990f374ebadd330351f18e2ca65f | [
"Apache-2.0"
] | 25 | 2019-12-07T02:14:14.000Z | 2021-12-30T06:16:30.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import pickle
import unittest
import numpy as np
import paddle
from paddle.static import InputSpec
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.io import EXTRA_VAR_INFO_FILENAME
BATCH_SIZE = 32
BATCH_NUM = 10
SEED = 10
def random_batch_reader(input_size, label_size):
def _get_random_inputs_and_labels(input_size, label_size):
np.random.seed(SEED)
input = np.random.random(size=input_size).astype('float32')
label = np.random.random(size=label_size).astype('int64')
return input, label
def __reader__():
for _ in range(BATCH_NUM):
batch_input, batch_label = _get_random_inputs_and_labels(
[BATCH_SIZE, input_size], [BATCH_SIZE, label_size])
yield batch_input, batch_label
return __reader__
class LinearNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
def forward(self, x):
return self._linear(x)
class LinearNetNotDeclarative(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNetNotDeclarative, self).__init__()
self._linear = Linear(in_size, out_size)
def forward(self, x):
return self._linear(x)
class LinearNetReturnLoss(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNetReturnLoss, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
loss = fluid.layers.mean(z)
return z, loss
def train(layer, input_size=784, label_size=1):
# create optimizer
sgd = fluid.optimizer.SGDOptimizer(
learning_rate=0.01, parameter_list=layer.parameters())
# create data loader
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(
random_batch_reader(input_size, label_size))
# train
for data in train_loader():
img, label = data
label.stop_gradient = True
cost = layer(img)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
sgd.minimize(avg_loss)
layer.clear_gradients()
return [img], layer, avg_loss
class TestJitSaveLoad(unittest.TestCase):
def setUp(self):
self.model_path = "model.test_jit_save_load"
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def train_and_save_model(self, model_path=None, configs=None):
layer = LinearNet(784, 1)
example_inputs, layer, _ = train(layer)
final_model_path = model_path if model_path else self.model_path
orig_input_types = [type(x) for x in example_inputs]
fluid.dygraph.jit.save(
layer=layer,
model_path=final_model_path,
input_spec=example_inputs,
configs=configs)
new_input_types = [type(x) for x in example_inputs]
self.assertEqual(orig_input_types, new_input_types)
return layer
def test_save_load(self):
# train and save model
train_layer = self.train_and_save_model()
# load model
program_translator = ProgramTranslator()
program_translator.enable(False)
loaded_layer = fluid.dygraph.jit.load(self.model_path)
self.load_and_inference(train_layer, loaded_layer)
self.load_dygraph_state_dict(train_layer)
self.load_and_finetune(train_layer, loaded_layer)
program_translator.enable(True)
def load_and_inference(self, train_layer, infer_layer):
train_layer.eval()
infer_layer.eval()
# inference & compare
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x).numpy(), infer_layer(x).numpy()))
def load_and_finetune(self, train_layer, load_train_layer):
train_layer.train()
load_train_layer.train()
# train & compare
img0, _, train_loss = train(train_layer)
img1, _, load_train_loss = train(load_train_layer)
self.assertTrue(
np.array_equal(train_loss.numpy(), load_train_loss.numpy()))
def load_dygraph_state_dict(self, train_layer):
train_layer.eval()
# construct new model
new_layer = LinearNet(784, 1)
model_dict, _ = fluid.dygraph.load_dygraph(self.model_path)
new_layer.set_dict(model_dict)
new_layer.eval()
# inference & compare
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x).numpy(), new_layer(x).numpy()))
def test_save_get_program_failed(self):
layer = LinearNetNotDeclarative(784, 1)
example_inputs, layer, _ = train(layer)
with self.assertRaises(RuntimeError):
fluid.dygraph.jit.save(
layer=layer,
model_path=self.model_path,
input_spec=example_inputs)
def test_load_dygraph_no_path(self):
model_path = "model.test_jit_save_load.no_path"
new_layer = LinearNet(784, 1)
with self.assertRaises(ValueError):
model_dict, _ = fluid.dygraph.load_dygraph(model_path)
class LinearNetMultiInput(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNetMultiInput, self).__init__()
self._linear1 = Linear(in_size, out_size)
# self._linear2 = Linear(in_size, out_size)
@declarative(input_spec=[
InputSpec(
[None, 8], dtype='float32'), InputSpec(
[None, 8], dtype='float32')
])
def forward(self, x, y):
x_out = self._linear1(x)
y_out = self._linear1(y)
loss = fluid.layers.mean(x_out + y_out)
return x_out, y_out, loss
class TestSaveLoadWithInputSpec(unittest.TestCase):
def setUp(self):
# enable dygraph mode
fluid.enable_dygraph()
def test_with_input_spec(self):
net = LinearNetReturnLoss(8, 8)
# set x.shape = [None, 8]
net.forward = declarative(
net.forward, input_spec=[InputSpec(
[None, 8], name='x')])
model_path = "model.input_spec.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
# check inputs and outputs
self.assertTrue(len(net.forward.inputs) == 1)
input_x = net.forward.inputs[0]
self.assertTrue(input_x.shape == (-1, 8))
self.assertTrue(input_x.name == 'x')
# 1. prune loss
configs.output_spec = net.forward.outputs[:1]
fluid.dygraph.jit.save(net, model_path, configs=configs)
# 2. load to infer
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
pred = infer_layer(x)
def test_multi_in_out(self):
net = LinearNetMultiInput(8, 8)
model_path = "model.multi_inout.output_spec1"
configs = fluid.dygraph.jit.SaveLoadConfig()
# 1. check inputs and outputs
self.assertTrue(len(net.forward.inputs) == 2)
input_x = net.forward.inputs[0]
input_y = net.forward.inputs[1]
self.assertTrue(input_x.shape == (-1, 8))
self.assertTrue(input_y.shape == (-1, 8))
# 2. prune loss
configs.output_spec = net.forward.outputs[:2]
fluid.dygraph.jit.save(net, model_path, configs=configs)
# 3. load to infer
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
y = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
# 4. predict
pred_x, pred_y = infer_layer(x, y)
# 1. prune y and loss
model_path = "model.multi_inout.output_spec2"
configs.output_spec = net.forward.outputs[:1]
fluid.dygraph.jit.save(net, model_path, [input_x], configs)
# 2. load again
infer_layer2 = fluid.dygraph.jit.load(model_path, configs=configs)
# 3. predict
pred_xx = infer_layer2(x)
# 4. assert pred_x == pred_xx
self.assertTrue(np.allclose(pred_x.numpy(), pred_xx.numpy()))
class TestJitSaveLoadConfig(unittest.TestCase):
def setUp(self):
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def basic_save_load(self, layer, model_path, configs):
# 1. train & save
example_inputs, train_layer, _ = train(layer)
fluid.dygraph.jit.save(
layer=train_layer,
model_path=model_path,
input_spec=example_inputs,
configs=configs)
# 2. load
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
train_layer.eval()
# 3. inference & compare
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x).numpy(), infer_layer(x).numpy()))
def test_model_filename(self):
layer = LinearNet(784, 1)
model_path = "model.save_load_config.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.model_filename = "__simplenet__"
self.basic_save_load(layer, model_path, configs)
def test_params_filename(self):
layer = LinearNet(784, 1)
model_path = "model.save_load_config.params_filename"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.params_filename = "__params__"
self.basic_save_load(layer, model_path, configs)
def test_separate_params(self):
layer = LinearNet(784, 1)
model_path = "model.save_load_config.separate_params"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.separate_params = True
self.basic_save_load(layer, model_path, configs)
def test_output_spec(self):
train_layer = LinearNetReturnLoss(8, 8)
adam = fluid.optimizer.AdamOptimizer(
learning_rate=0.1, parameter_list=train_layer.parameters())
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
for i in range(10):
out, loss = train_layer(x)
loss.backward()
adam.minimize(loss)
train_layer.clear_gradients()
model_path = "model.save_load_config.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.output_spec = [out]
fluid.dygraph.jit.save(
layer=train_layer,
model_path=model_path,
input_spec=[x],
configs=configs)
train_layer.eval()
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x)[0].numpy(), infer_layer(x).numpy()))
class MultiLoadingLinearNet(fluid.dygraph.Layer):
def __init__(self, size, model_path):
super(MultiLoadingLinearNet, self).__init__()
self._linear = Linear(size, size)
self._load_linear1 = fluid.dygraph.jit.load(model_path)
self._load_linear2 = fluid.dygraph.jit.load(model_path)
@declarative
def forward(self, x):
tmp1 = self._linear(x)
tmp2 = self._load_linear1(tmp1)
tmp3 = self._load_linear2(tmp2)
y = self._linear(tmp3)
return y
class TestJitMultipleLoading(unittest.TestCase):
def setUp(self):
self.linear_size = 4
self.model_path = "model.jit_multi_load"
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
# train and save base model
self.train_and_save_orig_model()
def train_and_save_orig_model(self):
layer = LinearNet(self.linear_size, self.linear_size)
example_inputs, layer, _ = train(layer, self.linear_size, 1)
fluid.dygraph.jit.save(
layer=layer, model_path=self.model_path, input_spec=example_inputs)
def test_load_model_retransform_inference(self):
multi_loaded_layer = MultiLoadingLinearNet(self.linear_size,
self.model_path)
state_dict = multi_loaded_layer.state_dict()
name_set = set()
for _, var in state_dict.items():
self.assertTrue(var.name not in name_set)
name_set.add(var.name)
class LinearNetReturnHidden(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNetReturnHidden, self).__init__()
self._linear_1 = Linear(in_size, out_size)
self._linear_2 = Linear(in_size, out_size)
@declarative
def forward(self, x):
y = self._linear_1(x)
z = self._linear_2(y)
loss = fluid.layers.mean(z)
return y, loss
class TestJitPruneModelAndLoad(unittest.TestCase):
def setUp(self):
self.linear_size = 4
self.model_path = "model.jit_prune_model_and_load"
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def train_and_save(self):
train_layer = LinearNetReturnHidden(8, 8)
adam = fluid.optimizer.AdamOptimizer(
learning_rate=0.1, parameter_list=train_layer.parameters())
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
for i in range(10):
hidden, loss = train_layer(x)
loss.backward()
adam.minimize(loss)
train_layer.clear_gradients()
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.output_spec = [hidden]
fluid.dygraph.jit.save(
layer=train_layer,
model_path=self.model_path,
input_spec=[x],
configs=configs)
return train_layer
def test_load_pruned_model(self):
train_layer = self.train_and_save()
train_layer.eval()
infer_layer = fluid.dygraph.jit.load(self.model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x)[0].numpy(), infer_layer(x).numpy()))
def test_load_var_not_in_extra_var_info(self):
self.train_and_save()
# chage extra var info
var_info_path = os.path.join(self.model_path, EXTRA_VAR_INFO_FILENAME)
with open(var_info_path, 'rb') as f:
extra_var_info = pickle.load(f)
extra_var_info.clear()
with open(var_info_path, 'wb') as f:
pickle.dump(extra_var_info, f, protocol=2)
with self.assertRaises(RuntimeError):
fluid.dygraph.jit.load(self.model_path)
if __name__ == '__main__':
unittest.main()
| 34.875536 | 79 | 0.646197 |
from __future__ import print_function
import os
import pickle
import unittest
import numpy as np
import paddle
from paddle.static import InputSpec
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.io import EXTRA_VAR_INFO_FILENAME
BATCH_SIZE = 32
BATCH_NUM = 10
SEED = 10
def random_batch_reader(input_size, label_size):
def _get_random_inputs_and_labels(input_size, label_size):
np.random.seed(SEED)
input = np.random.random(size=input_size).astype('float32')
label = np.random.random(size=label_size).astype('int64')
return input, label
def __reader__():
for _ in range(BATCH_NUM):
batch_input, batch_label = _get_random_inputs_and_labels(
[BATCH_SIZE, input_size], [BATCH_SIZE, label_size])
yield batch_input, batch_label
return __reader__
class LinearNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
def forward(self, x):
return self._linear(x)
class LinearNetNotDeclarative(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNetNotDeclarative, self).__init__()
self._linear = Linear(in_size, out_size)
def forward(self, x):
return self._linear(x)
class LinearNetReturnLoss(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNetReturnLoss, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
loss = fluid.layers.mean(z)
return z, loss
def train(layer, input_size=784, label_size=1):
sgd = fluid.optimizer.SGDOptimizer(
learning_rate=0.01, parameter_list=layer.parameters())
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(
random_batch_reader(input_size, label_size))
for data in train_loader():
img, label = data
label.stop_gradient = True
cost = layer(img)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
sgd.minimize(avg_loss)
layer.clear_gradients()
return [img], layer, avg_loss
class TestJitSaveLoad(unittest.TestCase):
def setUp(self):
self.model_path = "model.test_jit_save_load"
fluid.enable_dygraph()
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def train_and_save_model(self, model_path=None, configs=None):
layer = LinearNet(784, 1)
example_inputs, layer, _ = train(layer)
final_model_path = model_path if model_path else self.model_path
orig_input_types = [type(x) for x in example_inputs]
fluid.dygraph.jit.save(
layer=layer,
model_path=final_model_path,
input_spec=example_inputs,
configs=configs)
new_input_types = [type(x) for x in example_inputs]
self.assertEqual(orig_input_types, new_input_types)
return layer
def test_save_load(self):
train_layer = self.train_and_save_model()
program_translator = ProgramTranslator()
program_translator.enable(False)
loaded_layer = fluid.dygraph.jit.load(self.model_path)
self.load_and_inference(train_layer, loaded_layer)
self.load_dygraph_state_dict(train_layer)
self.load_and_finetune(train_layer, loaded_layer)
program_translator.enable(True)
def load_and_inference(self, train_layer, infer_layer):
train_layer.eval()
infer_layer.eval()
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x).numpy(), infer_layer(x).numpy()))
def load_and_finetune(self, train_layer, load_train_layer):
train_layer.train()
load_train_layer.train()
img0, _, train_loss = train(train_layer)
img1, _, load_train_loss = train(load_train_layer)
self.assertTrue(
np.array_equal(train_loss.numpy(), load_train_loss.numpy()))
def load_dygraph_state_dict(self, train_layer):
train_layer.eval()
new_layer = LinearNet(784, 1)
model_dict, _ = fluid.dygraph.load_dygraph(self.model_path)
new_layer.set_dict(model_dict)
new_layer.eval()
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x).numpy(), new_layer(x).numpy()))
def test_save_get_program_failed(self):
layer = LinearNetNotDeclarative(784, 1)
example_inputs, layer, _ = train(layer)
with self.assertRaises(RuntimeError):
fluid.dygraph.jit.save(
layer=layer,
model_path=self.model_path,
input_spec=example_inputs)
def test_load_dygraph_no_path(self):
model_path = "model.test_jit_save_load.no_path"
new_layer = LinearNet(784, 1)
with self.assertRaises(ValueError):
model_dict, _ = fluid.dygraph.load_dygraph(model_path)
class LinearNetMultiInput(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNetMultiInput, self).__init__()
self._linear1 = Linear(in_size, out_size)
@declarative(input_spec=[
InputSpec(
[None, 8], dtype='float32'), InputSpec(
[None, 8], dtype='float32')
])
def forward(self, x, y):
x_out = self._linear1(x)
y_out = self._linear1(y)
loss = fluid.layers.mean(x_out + y_out)
return x_out, y_out, loss
class TestSaveLoadWithInputSpec(unittest.TestCase):
def setUp(self):
fluid.enable_dygraph()
def test_with_input_spec(self):
net = LinearNetReturnLoss(8, 8)
net.forward = declarative(
net.forward, input_spec=[InputSpec(
[None, 8], name='x')])
model_path = "model.input_spec.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
self.assertTrue(len(net.forward.inputs) == 1)
input_x = net.forward.inputs[0]
self.assertTrue(input_x.shape == (-1, 8))
self.assertTrue(input_x.name == 'x')
configs.output_spec = net.forward.outputs[:1]
fluid.dygraph.jit.save(net, model_path, configs=configs)
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
pred = infer_layer(x)
def test_multi_in_out(self):
net = LinearNetMultiInput(8, 8)
model_path = "model.multi_inout.output_spec1"
configs = fluid.dygraph.jit.SaveLoadConfig()
self.assertTrue(len(net.forward.inputs) == 2)
input_x = net.forward.inputs[0]
input_y = net.forward.inputs[1]
self.assertTrue(input_x.shape == (-1, 8))
self.assertTrue(input_y.shape == (-1, 8))
configs.output_spec = net.forward.outputs[:2]
fluid.dygraph.jit.save(net, model_path, configs=configs)
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
y = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
pred_x, pred_y = infer_layer(x, y)
model_path = "model.multi_inout.output_spec2"
configs.output_spec = net.forward.outputs[:1]
fluid.dygraph.jit.save(net, model_path, [input_x], configs)
infer_layer2 = fluid.dygraph.jit.load(model_path, configs=configs)
pred_xx = infer_layer2(x)
self.assertTrue(np.allclose(pred_x.numpy(), pred_xx.numpy()))
class TestJitSaveLoadConfig(unittest.TestCase):
def setUp(self):
fluid.enable_dygraph()
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def basic_save_load(self, layer, model_path, configs):
example_inputs, train_layer, _ = train(layer)
fluid.dygraph.jit.save(
layer=train_layer,
model_path=model_path,
input_spec=example_inputs,
configs=configs)
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
train_layer.eval()
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x).numpy(), infer_layer(x).numpy()))
def test_model_filename(self):
layer = LinearNet(784, 1)
model_path = "model.save_load_config.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.model_filename = "__simplenet__"
self.basic_save_load(layer, model_path, configs)
def test_params_filename(self):
layer = LinearNet(784, 1)
model_path = "model.save_load_config.params_filename"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.params_filename = "__params__"
self.basic_save_load(layer, model_path, configs)
def test_separate_params(self):
layer = LinearNet(784, 1)
model_path = "model.save_load_config.separate_params"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.separate_params = True
self.basic_save_load(layer, model_path, configs)
def test_output_spec(self):
train_layer = LinearNetReturnLoss(8, 8)
adam = fluid.optimizer.AdamOptimizer(
learning_rate=0.1, parameter_list=train_layer.parameters())
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
for i in range(10):
out, loss = train_layer(x)
loss.backward()
adam.minimize(loss)
train_layer.clear_gradients()
model_path = "model.save_load_config.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.output_spec = [out]
fluid.dygraph.jit.save(
layer=train_layer,
model_path=model_path,
input_spec=[x],
configs=configs)
train_layer.eval()
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x)[0].numpy(), infer_layer(x).numpy()))
class MultiLoadingLinearNet(fluid.dygraph.Layer):
def __init__(self, size, model_path):
super(MultiLoadingLinearNet, self).__init__()
self._linear = Linear(size, size)
self._load_linear1 = fluid.dygraph.jit.load(model_path)
self._load_linear2 = fluid.dygraph.jit.load(model_path)
@declarative
def forward(self, x):
tmp1 = self._linear(x)
tmp2 = self._load_linear1(tmp1)
tmp3 = self._load_linear2(tmp2)
y = self._linear(tmp3)
return y
class TestJitMultipleLoading(unittest.TestCase):
def setUp(self):
self.linear_size = 4
self.model_path = "model.jit_multi_load"
fluid.enable_dygraph()
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
self.train_and_save_orig_model()
def train_and_save_orig_model(self):
layer = LinearNet(self.linear_size, self.linear_size)
example_inputs, layer, _ = train(layer, self.linear_size, 1)
fluid.dygraph.jit.save(
layer=layer, model_path=self.model_path, input_spec=example_inputs)
def test_load_model_retransform_inference(self):
multi_loaded_layer = MultiLoadingLinearNet(self.linear_size,
self.model_path)
state_dict = multi_loaded_layer.state_dict()
name_set = set()
for _, var in state_dict.items():
self.assertTrue(var.name not in name_set)
name_set.add(var.name)
class LinearNetReturnHidden(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(LinearNetReturnHidden, self).__init__()
self._linear_1 = Linear(in_size, out_size)
self._linear_2 = Linear(in_size, out_size)
@declarative
def forward(self, x):
y = self._linear_1(x)
z = self._linear_2(y)
loss = fluid.layers.mean(z)
return y, loss
class TestJitPruneModelAndLoad(unittest.TestCase):
def setUp(self):
self.linear_size = 4
self.model_path = "model.jit_prune_model_and_load"
fluid.enable_dygraph()
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def train_and_save(self):
train_layer = LinearNetReturnHidden(8, 8)
adam = fluid.optimizer.AdamOptimizer(
learning_rate=0.1, parameter_list=train_layer.parameters())
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
for i in range(10):
hidden, loss = train_layer(x)
loss.backward()
adam.minimize(loss)
train_layer.clear_gradients()
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.output_spec = [hidden]
fluid.dygraph.jit.save(
layer=train_layer,
model_path=self.model_path,
input_spec=[x],
configs=configs)
return train_layer
def test_load_pruned_model(self):
train_layer = self.train_and_save()
train_layer.eval()
infer_layer = fluid.dygraph.jit.load(self.model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x)[0].numpy(), infer_layer(x).numpy()))
def test_load_var_not_in_extra_var_info(self):
self.train_and_save()
var_info_path = os.path.join(self.model_path, EXTRA_VAR_INFO_FILENAME)
with open(var_info_path, 'rb') as f:
extra_var_info = pickle.load(f)
extra_var_info.clear()
with open(var_info_path, 'wb') as f:
pickle.dump(extra_var_info, f, protocol=2)
with self.assertRaises(RuntimeError):
fluid.dygraph.jit.load(self.model_path)
if __name__ == '__main__':
unittest.main()
| true | true |
f7fcc281f7f6a6500b2331ced40e634989aeda1c | 21,591 | py | Python | surgery.py | AlohaBazinga/Surgery-Robot-Detection-Segmentation | f42a3562cbb6a77ba195f5aa4828876afaf02500 | [
"MIT"
] | null | null | null | surgery.py | AlohaBazinga/Surgery-Robot-Detection-Segmentation | f42a3562cbb6a77ba195f5aa4828876afaf02500 | [
"MIT"
] | null | null | null | surgery.py | AlohaBazinga/Surgery-Robot-Detection-Segmentation | f42a3562cbb6a77ba195f5aa4828876afaf02500 | [
"MIT"
] | null | null | null | """
Mask R-CNN
Train on the surgery robot dataset.
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
#Train a new model starting from pre-trained COCO weights
python surgery.py train --dataset=/home/.../mask_rcnn/data/surgery/ --weights=coco
#Train a new model starting from pre-trained ImageNet weights
python surgery.py train --dataset=/home/.../mask_rcnn/data/surgery/ --weights=imagenet
# Continue training the last model you trained. This will find
# the last trained weights in the model directory.
python surgery.py train --dataset=/home/.../mask_rcnn/data/surgery/ --weights=last
#Detect and color splash on a image with the last model you trained.
#This will find the last trained weights in the model directory.
python surgery.py splash --weights=last --image=/home/...../*.jpg
#Detect and color splash on a video with a specific pre-trained weights of yours.
python sugery.py splash --weights=/home/.../logs/mask_rcnn_surgery_0030.h5 --video=/home/simon/Videos/Center.wmv
"""
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
from matplotlib import pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from mrcnn import visualize
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class SurgeryConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "surgery"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 2 # Background + objects
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
############################################################
# Dataset
############################################################
class SurgeryDataset(utils.Dataset):
def load_VIA(self, dataset_dir, subset, hc=False):
"""Load the surgery dataset from VIA.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val or predict
"""
# Add classes. We have only one class to add.
self.add_class("surgery", 1, "adidas")
self.add_class("surgery", 2, "apple")
if hc is True:
for i in range(1,14):
self.add_class("surgery", i, "{}".format(i))
self.add_class("surgery", 14, "arm")
# Train or validation dataset?
assert subset in ["train", "val", "predict"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {name:'a'},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
polygons = [r['shape_attributes'] for r in a['regions'].values()]
names = [r['region_attributes'] for r in a['regions'].values()]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"surgery",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,
names=names)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a surgery dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "surgery":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
class_names = info["names"]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Assign class_ids by reading class_names
class_ids = np.zeros([len(info["polygons"])])
# In the surgery dataset, pictures are labeled with name 'a' and 'r' representing arm and ring.
for i, p in enumerate(class_names):
#"name" is the attributes name decided when labeling, etc. 'region_attributes': {name:'a'}
if p['name'] == 'adidas':
class_ids[i] = 1
elif p['name'] == 'apple':
class_ids[i] = 2
#assert code here to extend to other labels
class_ids = class_ids.astype(int)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), class_ids
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "surgery":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def load_mask_hc(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a surgery dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "surgery":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
#"name" is the attributes name decided when labeling, etc. 'region_attributes': {name:'a'}
class_names = info["names"]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Assign class_ids by reading class_names
class_ids = np.zeros([len(info["polygons"])])
# In the surgery dataset, pictures are labeled with name 'a' and 'r' representing arm and ring.
for i, p in enumerate(class_names):
if p['name'] == 'adidas':
class_ids[i] = 14
elif p['name'] == 'error':
pass
else:
class_ids[i] = int(p['name'])
#assert code here to extend to other labels
class_ids = class_ids.astype(int)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), class_ids
def train(model, *dic):
"""Train the model."""
# Training dataset.
dataset_train = SurgeryDataset()
dataset_train.load_VIA(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = SurgeryDataset()
dataset_val.load_VIA(args.dataset, "val")
dataset_val.prepare()
# *** This training schedu le is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=60,
layers='heads')
def color_splash(image, mask):
"""Apply color splash effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
# Copy color pixels from the original color image where mask is set
if mask.shape[0] > 0:
splash = np.where(mask, image, gray).astype(np.uint8)
else:
splash = gray
return splash
def detect_and_color_splash(model, image_path=None, video_path=None, out_dir=''):
assert image_path or video_path
class_names = ['BG', 'adidas', 'apple']
# Image or video?
if image_path:
# Run model detection and generate the color splash effect
print("Running on {}".format(args.image))
# Read image
image = skimage.io.imread(args.image)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color splash
# splash = color_splash(image, r['masks'])
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'], making_image=True)
file_name = 'splash.png'
# Save output
# file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
# save_file_name = os.path.join(out_dir, file_name)
# skimage.io.imsave(save_file_name, splash)
elif video_path:
import cv2
# Video capture
vcapture = cv2.VideoCapture(video_path)
# width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
# height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = 1600
height = 1600
fps = vcapture.get(cv2.CAP_PROP_FPS)
# Define codec and create video writer
file_name = "splash_{:%Y%m%dT%H%M%S}.wmv".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
#For video, we wish classes keep the same mask in frames, generate colors for masks
colors = visualize.random_colors(len(class_names))
while success:
print("frame: ", count)
# Read next image
plt.clf()
plt.close()
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
r = model.detect([image], verbose=0)[0]
# Color splash
# splash = color_splash(image, r['masks'])
splash = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'], colors=colors, making_video=True)
# Add image to video writer
vwriter.write(splash)
count += 1
vwriter.release()
print("Saved to ", file_name)
############################################################
# RLE Encoding
############################################################
def rle_encode(mask):
"""Encodes a mask in Run Length Encoding (RLE).
Returns a string of space-separated values.
"""
assert mask.ndim == 2, "Mask must be of shape [Height, Width]"
# Flatten it column wise
m = mask.T.flatten()
# Compute gradient. Equals 1 or -1 at transition points
g = np.diff(np.concatenate([[0], m, [0]]), n=1)
# 1-based indicies of transition points (where gradient != 0)
rle = np.where(g != 0)[0].reshape([-1, 2]) + 1
# Convert second index in each pair to lenth
rle[:, 1] = rle[:, 1] - rle[:, 0]
return " ".join(map(str, rle.flatten()))
def rle_decode(rle, shape):
"""Decodes an RLE encoded list of space separated
numbers and returns a binary mask."""
rle = list(map(int, rle.split()))
rle = np.array(rle, dtype=np.int32).reshape([-1, 2])
rle[:, 1] += rle[:, 0]
rle -= 1
mask = np.zeros([shape[0] * shape[1]], np.bool)
for s, e in rle:
assert 0 <= s < mask.shape[0]
assert 1 <= e <= mask.shape[0], "shape: {} s {} e {}".format(shape, s, e)
mask[s:e] = 1
# Reshape and transpose
mask = mask.reshape([shape[1], shape[0]]).T
return mask
def mask_to_rle(image_id, mask, scores):
"Encodes instance masks to submission format."
assert mask.ndim == 3, "Mask must be [H, W, count]"
# If mask is empty, return line with image ID only
if mask.shape[-1] == 0:
return "{},".format(image_id)
# Remove mask overlaps
# Multiply each instance mask by its score order
# then take the maximum across the last dimension
order = np.argsort(scores)[::-1] + 1 # 1-based descending
mask = np.max(mask * np.reshape(order, [1, 1, -1]), -1)
# Loop over instance masks
lines = []
for o in order:
m = np.where(mask == o, 1, 0)
# Skip if empty
if m.sum() == 0.0:
continue
rle = rle_encode(m)
lines.append("{}, {}".format(image_id, rle))
return "\n".join(lines)
def detect(model, dataset_dir, subset):
"""Run detection on images in the given directory."""
print("Running on {}".format(dataset_dir))
os.makedirs('RESULTS')
submit_dir = os.path.join(os.getcwd(), "RESULTS/")
# Read dataset
dataset = SurgeryDataset()
dataset.load_VIA(dataset_dir, subset)
dataset.prepare()
# Load over images
submission = []
for image_id in dataset.image_ids:
# Load image and run detection
image = dataset.load_image(image_id)
# Detect objects
r = model.detect([image], verbose=0)[0]
# Encode image to RLE. Returns a string of multiple lines
source_id = dataset.image_info[image_id]["id"]
rle = mask_to_rle(source_id, r["masks"], r["scores"])
submission.append(rle)
# Save image with masks
canvas = visualize.display_instances(
image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'], detect=True)
# show_bbox=False, show_mask=False,
# title="Predictions",
# detect=True)
canvas.print_figure("{}/{}.png".format(submit_dir, dataset.image_info[image_id]["id"][:-4]))
# Save to csv file
submission = "ImageId,EncodedPixels\n" + "\n".join(submission)
file_path = os.path.join(submit_dir, "submit.csv")
with open(file_path, "w") as f:
f.write(submission)
print("Saved to ", submit_dir)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect rings and robot arms.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'splash'")
parser.add_argument('--dataset', required=False,
metavar="/home/simon/mask_rcnn/data/surgery",
help='Directory of the surgery dataset')
parser.add_argument('--weights', required=True,
metavar="/home/simon/logs/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply the color splash effect on')
parser.add_argument('--video', required=False,
metavar="path or URL to video",
help='Video to apply the color splash effect on')
parser.add_argument('--subset', required=False,
metavar="Dataset sub-directory",
help="Subset of dataset to run prediction on")
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "splash":
assert args.image or args.video,\
"Provide --image or --video to apply color splash"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = SurgeryConfig()
else:
class InferenceConfig(SurgeryConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()[1]
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "detect":
detect(model, args.dataset, args.subset)
elif args.command == "splash":
detect_and_color_splash(model, image_path=args.image,
video_path=args.video)
else:
print("'{}' is not recognized. "
"Use 'train' or 'splash'".format(args.command))
# dataset_dir = '/home/simon/deeplearning/mask_rcnn/data'
# dataset_train = SurgeryDataset()
# dataset_train.VIA(dataset_dir, "train")
# # dataset_train.prepare()
# a, b = dataset_train.load_mask(130)
# print(a.shape, b.shape)
# print(b)
| 39.835793 | 117 | 0.58793 |
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
from matplotlib import pyplot as plt
ROOT_DIR = os.path.abspath("../../")
sys.path.append(ROOT_DIR)
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from mrcnn import visualize
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
elif video_path:
import cv2
vcapture = cv2.VideoCapture(video_path)
width = 1600
height = 1600
fps = vcapture.get(cv2.CAP_PROP_FPS)
file_name = "splash_{:%Y%m%dT%H%M%S}.wmv".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
colors = visualize.random_colors(len(class_names))
while success:
print("frame: ", count)
plt.clf()
plt.close()
success, image = vcapture.read()
if success:
image = image[..., ::-1]
r = model.detect([image], verbose=0)[0]
splash = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'], colors=colors, making_video=True)
vwriter.write(splash)
count += 1
vwriter.release()
print("Saved to ", file_name)
| true | true |
f7fcc2eeece09eb126dbd4dd889e1f5e156a60c6 | 100,142 | py | Python | pyglet/extlibs/png.py | Torxed/pyglet | 0a35e67e43d069b952e3b02773cdf5b064124069 | [
"BSD-3-Clause"
] | 1 | 2021-05-26T16:11:21.000Z | 2021-05-26T16:11:21.000Z | pyglet/extlibs/png.py | Torxed/pyglet | 0a35e67e43d069b952e3b02773cdf5b064124069 | [
"BSD-3-Clause"
] | 5 | 2021-01-21T09:46:12.000Z | 2022-02-14T13:54:44.000Z | pyglet/extlibs/png.py | Torxed/pyglet | 0a35e67e43d069b952e3b02773cdf5b064124069 | [
"BSD-3-Clause"
] | 2 | 2021-11-02T11:01:53.000Z | 2022-02-14T10:11:21.000Z | # Retrieved from https://github.com/drj11/pypng
# Revision: f5c4c76d81093b6c3f39f83b203f6832c496c110
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
#
# Original concept by Johann C. Rocholl.
#
# LICENCE (MIT)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Pure Python PNG Reader/Writer
This Python module implements support for PNG images (see PNG
specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
and writes PNG files with all allowable bit depths
(1/2/4/8/16/24/32/48/64 bits per pixel) and colour combinations:
greyscale (1/2/4/8/16 bit); RGB, RGBA, LA (greyscale with alpha) with
8/16 bits per channel; colour mapped images (1/2/4/8 bit).
Adam7 interlacing is supported for reading and
writing. A number of optional chunks can be specified (when writing)
and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
For help, type ``import png; help(png)`` in your python interpreter.
A good place to start is the :class:`Reader` and :class:`Writer`
classes.
Requires Python 2.3. Limited support is available for Python 2.2, but
not everything works. Best with Python 2.4 and higher. Installation is
trivial, but see the ``README.txt`` file (with the source distribution)
for details.
This file can also be used as a command-line utility to convert
`Netpbm <http://netpbm.sourceforge.net/>`_ PNM files to PNG, and the
reverse conversion from PNG to PNM. The interface is similar to that
of the ``pnmtopng`` program from Netpbm. Type ``python png.py --help``
at the shell prompt for usage and a list of options.
A note on spelling and terminology
----------------------------------
Generally British English spelling is used in the documentation. So
that's "greyscale" and "colour". This not only matches the author's
native language, it's also used by the PNG specification.
The major colour models supported by PNG (and hence by PyPNG) are:
greyscale, RGB, greyscale--alpha, RGB--alpha. These are sometimes
referred to using the abbreviations: L, RGB, LA, RGBA. In this case
each letter abbreviates a single channel: *L* is for Luminance or Luma
or Lightness which is the channel used in greyscale images; *R*, *G*,
*B* stand for Red, Green, Blue, the components of a colour image; *A*
stands for Alpha, the opacity channel (used for transparency effects,
but higher values are more opaque, so it makes sense to call it
opacity).
A note on formats
-----------------
When getting pixel data out of this module (reading) and presenting
data to this module (writing) there are a number of ways the data could
be represented as a Python value. Generally this module uses one of
three formats called "flat row flat pixel", "boxed row flat pixel", and
"boxed row boxed pixel". Basically the concern is whether each pixel
and each row comes in its own little tuple (box), or not.
Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
has RGB components:
Boxed row flat pixel::
list([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Each row appears as its own list, but the pixels are flattened so
that three values for one pixel simply follow the three values for
the previous pixel. This is the most common format used, because it
provides a good compromise between space and convenience. PyPNG regards
itself as at liberty to replace any sequence type with any sufficiently
compatible other sequence type; in practice each row is an array (from
the array module), and the outer list is sometimes an iterator rather
than an explicit list (so that streaming is possible).
Flat row flat pixel::
[R,G,B, R,G,B, R,G,B,
R,G,B, R,G,B, R,G,B]
The entire image is one single giant sequence of colour values.
Generally an array will be used (to save space), not a list.
Boxed row boxed pixel::
list([ (R,G,B), (R,G,B), (R,G,B) ],
[ (R,G,B), (R,G,B), (R,G,B) ])
Each row appears in its own list, but each pixel also appears in its own
tuple. A serious memory burn in Python.
In all cases the top row comes first, and for each row the pixels are
ordered from left-to-right. Within a pixel the values appear in the
order, R-G-B-A (or L-A for greyscale--alpha).
There is a fourth format, mentioned because it is used internally,
is close to what lies inside a PNG file itself, and has some support
from the public API. This format is called packed. When packed,
each row is a sequence of bytes (integers from 0 to 255), just as
it is before PNG scanline filtering is applied. When the bit depth
is 8 this is essentially the same as boxed row flat pixel; when the
bit depth is less than 8, several pixels are packed into each byte;
when the bit depth is 16 (the only value more than 8 that is supported
by the PNG image format) each pixel value is decomposed into 2 bytes
(and `packed` is a misnomer). This format is used by the
:meth:`Writer.write_packed` method. It isn't usually a convenient
format, but may be just right if the source data for the PNG image
comes from something that uses a similar format (for example, 1-bit
BMPs, or another PNG file).
And now, my famous members
--------------------------
"""
__version__ = "0.0.18"
import itertools
import math
import re
import operator
import struct
import sys
import warnings
import zlib
from io import open
from array import array
from functools import reduce
try:
# `cpngfilters` is a Cython module: it must be compiled by
# Cython for this import to work.
# If this import does work, then it overrides pure-python
# filtering functions defined later in this file (see `class
# pngfilters`).
import cpngfilters as pngfilters
except ImportError:
pass
__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
_adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def group(s, n):
# See http://www.python.org/doc/2.6/library/functions.html#zip
return list(zip(*[iter(s)]*n))
def isarray(x):
return isinstance(x, array)
def tostring(row):
return row.tostring()
def interleave_planes(ipixels, apixels, ipsize, apsize):
"""
Interleave (colour) planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the `ipsize` elements of
data from each pixel in `ipixels` followed by the `apsize` elements
of data from each pixel in `apixels`. Conventionally `ipixels`
and `apixels` are byte arrays so the sizes are bytes, but it
actually works with any arrays of the same type. The returned
array is the same type as the input arrays which should be the
same type as each other.
"""
itotal = len(ipixels)
atotal = len(apixels)
newtotal = itotal + atotal
newpsize = ipsize + apsize
# Set up the output buffer
# See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
out = array(ipixels.typecode)
# It's annoying that there is no cheap way to set the array size :-(
out.extend(ipixels)
out.extend(apixels)
# Interleave in the pixel data
for i in range(ipsize):
out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
for i in range(apsize):
out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
return out
def check_palette(palette):
"""Check a palette argument (to the :class:`Writer` class)
for validity. Returns the palette as a list if okay; raises an
exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ValueError("a palette must have between 1 and 256 entries")
seen_triple = False
for i,t in enumerate(p):
if len(t) not in (3,4):
raise ValueError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ValueError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ValueError(
"palette entry %d: values must be integer: 0 <= x <= 255" % i)
return p
def check_sizes(size, width, height):
"""Check that these arguments, in supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ValueError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ValueError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ValueError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size
def check_color(c, greyscale, which):
"""Checks that a colour argument for transparent or
background options is the right form. Returns the colour
(which, if it's a bar integer, is "corrected" to a 1-tuple).
"""
if c is None:
return c
if greyscale:
try:
len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ValueError("%s for greyscale must be 1-tuple" %
which)
if not isinteger(c[0]):
raise ValueError(
"%s colour for greyscale must be integer" % which)
else:
if not (len(c) == 3 and
isinteger(c[0]) and
isinteger(c[1]) and
isinteger(c[2])):
raise ValueError(
"%s colour must be a triple of integers" % which)
return c
class Error(Exception):
def __str__(self):
return self.__class__.__name__ + ': ' + ' '.join(self.args)
class FormatError(Error):
"""Problem with input file format. In other words, PNG file does
not conform to the specification in some way and is invalid.
"""
class ChunkError(FormatError):
pass
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(self, width=None, height=None,
size=None,
greyscale=False,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
bytes_per_sample=None, # deprecated
planes=None,
colormap=None,
maxval=None,
chunk_limit=2**20,
x_pixels_per_unit = None,
y_pixels_per_unit = None,
unit_is_meter = False):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Input data is greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16.
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
x_pixels_per_unit
Number of pixels a unit along the x axis (write a
`pHYs` chunk).
y_pixels_per_unit
Number of pixels a unit along the y axis (write a
`pHYs` chunk). Along with `x_pixel_unit`, this gives
the pixel size ratio.
unit_is_meter
`True` to indicate that the unit (for the `pHYs`
chunk) is metre.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument. If `size` is used it should be a pair (*width*,
*height*).
`greyscale` and `alpha` are booleans that specify whether
an image is greyscale (or colour), and whether it has an
alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each source pixel value must be an integer between 0 and
``2**bitdepth-1``. For example, 8-bit images have values
between 0 and 255. PNG only stores images with bit depths of
1,2,4,8, or 16. When `bitdepth` is not one of these values,
the next highest valid bit depth is selected, and an ``sBIT``
(significant bits) chunk is generated that specifies the
original precision of the source image. In this case the
supplied pixel values will be rescaled to fit the range of
the selected bit depth.
The details of which bit depth / colour model combinations the
PNG file format supports directly, are somewhat arcane
(refer to the PNG specification for full details). Briefly:
"small" bit depths (1,2,4) are only allowed with greyscale and
colour mapped images; colour mapped images cannot have bit depth
16.
For colour mapped images (in other words, when the `palette`
argument is specified) the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8. (It is valid to have a
PNG image with a palette and an ``sBIT`` chunk, but the meaning
is slightly different; it would be awkward to press the
`bitdepth` argument into service for this.)
The `palette` option, when specified, causes a colour
mapped image to be created: the PNG colour type is set to 3;
`greyscale` must not be set; `alpha` must not be set;
`transparent` must not be set; the bit depth must be 1,2,4,
or 8. When a colour mapped image is created, the pixel values
are palette indexes and the `bitdepth` argument specifies the
size of these indexes (not the size of the colour values in
the palette).
The palette argument value should be a sequence of 3- or
4-tuples. 3-tuples specify RGB palette entries; 4-tuples
specify RGBA palette entries. If both 4-tuples and 3-tuples
appear in the sequence then all the 4-tuples must come
before all the 3-tuples. A ``PLTE`` chunk is created; if there
are 4-tuples then a ``tRNS`` chunk is created as well. The
``PLTE`` chunk will contain all the RGB triples in the same
sequence; the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence. Palette entries
are always 8-bit.
If specified, the `transparent` and `background` parameters must
be a tuple with three integer values for red, green, blue, or
a simple integer (or singleton tuple) for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a `float`). A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file, they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module. Values from 1 to 9 specify
compression, with 9 being "more compressed" (usually smaller
and slower, but it doesn't always work out that way). 0 means
no compression. -1 and ``None`` both mean that the default
level of compession will be picked by the ``zlib`` module
(which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interace method, *Adam7*). This does
not affect how the pixels should be presented to the encoder,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be partially decoded
by the browser to give a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image. In order to avoid using large amounts of
memory, multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if width <= 0 or height <= 0:
raise ValueError("width and height must be greater than zero")
if not isinteger(width) or not isinteger(height):
raise ValueError("width and height must be integers")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2**32-1 or height > 2**32-1:
raise ValueError("width and height cannot exceed 2**32-1")
if alpha and transparent is not None:
raise ValueError(
"transparent colour not allowed with alpha channel")
if bytes_per_sample is not None:
warnings.warn('please use bitdepth instead of bytes_per_sample',
DeprecationWarning)
if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
raise ValueError(
"bytes per sample must be .125, .25, .5, 1, or 2")
bitdepth = int(8*bytes_per_sample)
del bytes_per_sample
if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
raise ValueError("bitdepth (%r) must be a positive integer <= 16" %
bitdepth)
self.rescale = None
palette = check_palette(palette)
if palette:
if bitdepth not in (1,2,4,8):
raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ValueError("transparent and palette not compatible")
if alpha:
raise ValueError("alpha and palette not compatible")
if greyscale:
raise ValueError("greyscale and palette not compatible")
else:
# No palette, check for sBIT chunk generation.
if alpha or not greyscale:
if bitdepth not in (8,16):
targetbitdepth = (8,16)[bitdepth > 8]
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
else:
assert greyscale
assert not alpha
if bitdepth not in (1,2,4,8,16):
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5,6,7)
targetbitdepth = 8
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
if bitdepth < 8 and (alpha or not greyscale and not palette):
raise ValueError(
"bitdepth < 8 only permitted with greyscale or palette")
if bitdepth > 8 and palette:
raise ValueError(
"bit depth must be 8 or less for images with palette")
transparent = check_color(transparent, greyscale, 'transparent')
background = check_color(background, greyscale, 'background')
# It's important that the true boolean values (greyscale, alpha,
# colormap, interlace) are converted to bool because Iverson's
# convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = bool(greyscale)
self.alpha = bool(alpha)
self.colormap = bool(palette)
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = palette
self.x_pixels_per_unit = x_pixels_per_unit
self.y_pixels_per_unit = y_pixels_per_unit
self.unit_is_meter = bool(unit_is_meter)
self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
assert self.color_type in (0,2,3,4,6)
self.color_planes = (3,1)[self.greyscale or self.colormap]
self.planes = self.color_planes + self.alpha
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth/8) * self.planes
def make_palette(self):
"""Create the byte sequences for a ``PLTE`` and if necessary a
``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be
``None`` if no ``tRNS`` chunk is necessary.
"""
p = array('B')
t = array('B')
for x in self.palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
p = tostring(p)
t = tostring(t)
if t:
return p,t
return p,None
def write(self, outfile, rows):
"""Write a PNG image to the output file. `rows` should be
an iterable that yields each row in boxed row flat pixel
format. The rows should be the rows of the original image,
so there should be ``self.height`` rows of ``self.width *
self.planes`` values. If `interlace` is specified (when
creating the instance), then an interlaced PNG file will
be written. Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing will require the entire image to be in working
memory.
"""
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*rows))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, rows)
if nrows != self.height:
raise ValueError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
def write_passes(self, outfile, rows, packed=False):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
"""
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(_signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, b'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, self.color_type,
0, 0, self.interlace))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, b'gAMA',
struct.pack("!L", int(round(self.gamma*1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(outfile, b'sBIT',
struct.pack('%dB' % self.planes,
*[self.rescale[0]]*self.planes))
# :chunk:order: Without a palette (PLTE chunk), ordering is
# relatively relaxed. With one, gAMA chunk must precede PLTE
# chunk which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p,t = self.make_palette()
write_chunk(outfile, b'PLTE', p)
if t:
# tRNS chunk is optional. Only needed if palette entries
# have alpha.
write_chunk(outfile, b'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
write_chunk(outfile, b'tRNS',
struct.pack("!1H", *self.transparent))
else:
write_chunk(outfile, b'tRNS',
struct.pack("!3H", *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
write_chunk(outfile, b'bKGD',
struct.pack("!1H", *self.background))
else:
write_chunk(outfile, b'bKGD',
struct.pack("!3H", *self.background))
# http://www.w3.org/TR/PNG/#11pHYs
if self.x_pixels_per_unit is not None and self.y_pixels_per_unit is not None:
tup = (self.x_pixels_per_unit, self.y_pixels_per_unit, int(self.unit_is_meter))
write_chunk(outfile, b'pHYs', struct.pack("!LLB",*tup))
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# Choose an extend function based on the bitdepth. The extend
# function packs/decomposes the pixel values into bytes and
# stuffs them onto the data array.
data = array('B')
if self.bitdepth == 8 or packed:
extend = data.extend
elif self.bitdepth == 16:
# Decompose into bytes
def extend(sl):
fmt = '!%dH' % len(sl)
data.extend(array('B', struct.pack(fmt, *sl)))
else:
# Pack into bytes
assert self.bitdepth < 8
# samples per byte
spb = int(8/self.bitdepth)
def extend(sl):
a = array('B', sl)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
l = float(len(a))
extra = math.ceil(l / float(spb))*spb - l
a.extend([0]*int(extra))
# Pack into bytes
l = group(a, spb)
l = [reduce(lambda x,y:
(x << self.bitdepth) + y, e) for e in l]
data.extend(l)
if self.rescale:
oldextend = extend
factor = \
float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
def extend(sl):
oldextend([int(round(factor*x)) for x in sl])
# Build the first row, testing mostly to see if we need to
# changed the extend function to cope with NumPy integer types
# (they cause our ordinary definition of extend to fail, so we
# wrap it). See
# http://code.google.com/p/pypng/issues/detail?id=44
enumrows = enumerate(rows)
del rows
# First row's filter type.
data.append(0)
# :todo: Certain exceptions in the call to ``.next()`` or the
# following try would indicate no row data supplied.
# Should catch.
i,row = next(enumrows)
try:
# If this fails...
extend(row)
except:
# ... try a version that converts the values to int first.
# Not only does this work for the (slightly broken) NumPy
# types, there are probably lots of other, unknown, "nearly"
# int types it works for.
def wrapmapint(f):
return lambda sl: f([int(x) for x in sl])
extend = wrapmapint(extend)
del wrapmapint
extend(row)
for i,row in enumrows:
# Add "None" filter type. Currently, it's essential that
# this filter type be used for every scanline as we do not
# mark the first row of a reduced pass image; that means we
# could accidentally compute the wrong filtered scanline if
# we used "up", "average", or "paeth" on such a line.
data.append(0)
extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(tostring(data))
if len(compressed):
write_chunk(outfile, b'IDAT', compressed)
# Because of our very witty definition of ``extend``,
# above, we must re-use the same ``data`` object. Hence
# we use ``del`` to empty this one, rather than create a
# fresh one (which would be my natural FP instinct).
del data[:]
if len(data):
compressed = compressor.compress(tostring(data))
else:
compressed = b''
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, b'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, b'IEND')
return i+1
def write_array(self, outfile, pixels):
"""
Write an array in flat row flat pixel format as a PNG file on
the output file. See also :meth:`write` method.
"""
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
"""
if self.rescale:
raise Error("write_packed method not suitable for bit depth %d" %
self.rescale[0])
return self.write_passes(outfile, rows, packed=True)
def convert_pnm(self, infile, outfile):
"""
Convert a PNM file containing raw pixel data into a PNG file
with the parameters set in the writer object. Works for
(binary) PGM, PPM, and PAM formats.
"""
if self.interlace:
pixels = array('B')
pixels.fromfile(infile,
(self.bitdepth//8) * self.color_planes *
self.width * self.height)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.file_scanlines(infile))
def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
"""
Convert a PPM and PGM file containing raw pixel data into a
PNG outfile with the parameters set in the writer object.
"""
pixels = array('B')
pixels.fromfile(ppmfile,
(self.bitdepth//8) * self.color_planes *
self.width * self.height)
apixels = array('B')
apixels.fromfile(pgmfile,
(self.bitdepth//8) *
self.width * self.height)
pixels = interleave_planes(pixels, apixels,
(self.bitdepth//8) * self.color_planes,
(self.bitdepth//8))
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def file_scanlines(self, infile):
"""
Generates boxed rows in flat pixel format, from the input file
`infile`. It assumes that the input file is in a "Netpbm-like"
binary format, and is positioned at the beginning of the first
pixel. The number of pixels to read is taken from the image
dimensions (`width`, `height`, `planes`) and the number of bytes
per value is implied by the image `bitdepth`.
"""
# Values per row
vpr = self.width * self.planes
row_bytes = vpr
if self.bitdepth > 8:
assert self.bitdepth == 16
row_bytes *= 2
fmt = '>%dH' % vpr
def line():
return array('H', struct.unpack(fmt, infile.read(row_bytes)))
else:
def line():
scanline = array('B', infile.read(row_bytes))
return scanline
for y in range(self.height):
yield line()
def array_scanlines(self, pixels):
"""
Generates boxed rows (flat pixels) from flat rows (flat pixels)
in an array.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array. `pixels` is
the full source image in flat row flat pixel format. The
generator yields each scanline of the reduced passes in turn, in
boxed row flat pixel format.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# number of values in reduced image row.
row_len = ppr*self.planes
for y in range(ystart, self.height, ystep):
if xstep == 1:
offset = y * vpr
yield pixels[offset:offset+vpr]
else:
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:row_len])
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset+i:end_offset:skip]
yield row
def write_chunk(outfile, tag, data=b''):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2**32-1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(_signature)
for chunk in chunks:
write_chunk(out, *chunk)
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
# The output array. Which, pathetically, we extend one-byte at a
# time (fortunately this is linear).
out = array('B', [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = (x - line[ai]) & 0xff
out.append(x)
ai += 1
def up():
for i,x in enumerate(line):
x = (x - prev[i]) & 0xff
out.append(x)
def average():
ai = -fo
for i,x in enumerate(line):
if ai >= 0:
x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
else:
x = (x - (prev[i] >> 1)) & 0xff
out.append(x)
ai += 1
def paeth():
# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
ai = -fo # also used for ci
for i,x in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = (x - Pr) & 0xff
out.append(x)
ai += 1
if not prev:
# We're on the first line. Some of the filters can be reduced
# to simpler cases which makes handling the line "off the top"
# of the image simpler. "up" becomes "none"; "paeth" becomes
# "left" (non-trivial, but true). "average" needs to be handled
# specially.
if type == 2: # "up"
type = 0
elif type == 3:
prev = [0]*len(line)
elif type == 4: # "paeth"
type = 1
if type == 0:
out.extend(line)
elif type == 1:
sub()
elif type == 2:
up()
elif type == 3:
average()
else: # type == 4
paeth()
return out
# Regex for decoding mode string
RegexModeDecode = re.compile("(LA?|RGBA?);?([0-9]*)", flags=re.IGNORECASE)
def from_array(a, mode=None, info={}):
"""Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3 = 24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:`png.Writer` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
match = RegexModeDecode.match(mode)
if not match:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode, bitdepth = match.groups()
alpha = 'A' in mode
if bitdepth:
bitdepth = int(bitdepth)
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
if 'alpha' in info:
if bool(info['alpha']) != alpha:
raise Error("info['alpha'] should match mode.")
info['alpha'] = alpha
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get("bitdepth") and bitdepth != info['bitdepth']:
raise Error("bitdepth (%d) should match bitdepth of info (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
if 'size' in info:
assert len(info["size"]) == 2
# Check width, height, size all match where used.
for dimension,axis in [('width', 0), ('height', 1)]:
if dimension in info:
if info[dimension] != info['size'][axis]:
raise Error(
"info[%r] should match info['size'][%r]." %
(dimension, axis))
info['width'],info['height'] = info['size']
if 'height' not in info:
try:
info['height'] = len(a)
except TypeError:
raise Error("len(a) does not work, supply info['height'] instead.")
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a,t = itertools.tee(a)
row = next(t)
del t
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
if threed:
# Flatten the threed rows
a = (itertools.chain.from_iterable(x) for x in a)
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in ["width", "height", "bitdepth", "greyscale", "alpha"]:
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image:
"""A PNG image. You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""
.. note ::
The constructor is not public. Please do not call it.
"""
self.rows = rows
self.info = info
def save(self, file):
"""Save the image to *file*. If *file* looks like an open file
descriptor then it is used, otherwise it is treated as a
filename and a fresh file is opened.
In general, you can only call this method once; after it has
been called the first time and the PNG image has been saved, the
source data will have been streamed, and cannot be streamed
again.
"""
w = Writer(**self.info)
try:
file.write
def close(): pass
except AttributeError:
file = open(file, 'wb')
def close(): file.close()
try:
w.write(file, self.rows)
finally:
close()
class _readable:
"""
A simple file-like interface for strings and arrays.
"""
def __init__(self, buf):
self.buf = buf
self.offset = 0
def read(self, n):
r = self.buf[self.offset:self.offset+n]
if isarray(r):
r = r.tostring()
self.offset += n
return r
try:
str(b'dummy', 'ascii')
except TypeError:
as_str = str
else:
def as_str(x):
return str(x, 'ascii')
class Reader:
"""
PNG decoder in pure Python.
"""
def __init__(self, _guess=None, **kw):
"""
Create a PNG decoder object.
The constructor expects exactly one keyword argument. If you
supply a positional argument instead, it will guess the input
type. You can choose among the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``array`` or ``string`` with PNG data.
"""
if ((_guess is not None and len(kw) != 0) or
(_guess is None and len(kw) != 1)):
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
# A pair of (len,type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type). See preamble
# method for how this is used.
self.atchunk = None
if _guess is not None:
if isarray(_guess):
kw["bytes"] = _guess
elif isinstance(_guess, str):
kw["filename"] = _guess
elif hasattr(_guess, 'read'):
kw["file"] = _guess
if "filename" in kw:
self.file = open(kw["filename"], "rb")
elif "file" in kw:
self.file = kw["file"]
elif "bytes" in kw:
self.file = _readable(kw["bytes"])
else:
raise TypeError("expecting filename, file or bytes array")
def chunk(self, seek=None, lenient=False):
"""
Read the next PNG chunk from the input file; returns a
(*type*, *data*) tuple. *type* is the chunk's type as a
byte string (all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `seek` argument is
specified then it will keep reading chunks until it either runs
out of file or finds the type specified by the argument. Note
that in general the order of chunks in PNGs is unspecified, so
using `seek` can cause you to miss chunks.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self.chunklentype()
length, type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError('Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError('Chunk %s too short for checksum.' % type)
if seek and type != seek:
continue
verify = zlib.crc32(type)
verify = zlib.crc32(data, verify)
# Whether the output from zlib.crc32 is signed or not varies
# according to hideous implementation details, see
# http://bugs.python.org/issue1202 .
# We coerce it to be positive here (in a way which works on
# Python 2.3 and older).
verify &= 2**32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = "Checksum error in %s chunk: 0x%08X != 0x%08X." % (type, a, b)
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t,v = self.chunk()
yield t,v
if t == b'IEND':
break
def undo_filter(self, filter_type, scanline, previous):
"""Undo the filter for a scanline. `scanline` is a sequence of
bytes that does not include the initial filter type byte.
`previous` is decoded previous scanline (for straightlaced
images this is the previous pixel row, but for interlaced
images, it is the previous scanline in the reduced image, which
in general is not the previous pixel row in the final image).
When there is no previous scanline (the first row of a
straightlaced image, or the first row in one of the passes in an
interlaced image), then this argument should be ``None``.
The scanline will have the effects of filtering removed, and the
result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
# Yes, with the Cython extension making the undo_filter fast,
# updating scanline inplace makes the code 3 times faster
# (reading 50 images of 800x800 went from 40s to 16s)
result = scanline
if filter_type == 0:
return result
if filter_type not in (1,2,3,4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous pixel. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = array('B', [0]*len(scanline))
def sub():
"""Undo sub filter."""
ai = 0
# Loop starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def up():
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def average():
"""Undo average filter."""
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def paeth():
"""Undo Paeth filter."""
# Also used for ci.
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
(None,
pngfilters.undo_filter_sub,
pngfilters.undo_filter_up,
pngfilters.undo_filter_average,
pngfilters.undo_filter_paeth)[filter_type](fu, scanline, previous, result)
return result
def deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return in flat row flat pixel format.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Make a result array, and make it big enough. Interleaving
# writes to the output array randomly (well, not quite), so the
# entire output array must be in memory.
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, [0]*vpr*self.height)
source_offset = 0
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# The previous (reconstructed) scanline. None at the
# beginning of a pass to indicate that there is no previous
# line.
recon = None
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
for y in range(ystart, self.height, ystep):
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset:source_offset+row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self.serialtoflat(recon, ppr)
if xstep == 1:
assert xstart == 0
offset = y * vpr
a[offset:offset+vpr] = flat
else:
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset+i:end_offset:skip] = \
flat[i::self.planes]
return a
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result will
be a freshly allocated object, not shared with
argument.
"""
if self.bitdepth == 8:
return array('B', raw)
if self.bitdepth == 16:
raw = tostring(raw)
return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = [self.bitdepth * i
for i in reversed(list(range(spb)))]
for o in raw:
out.extend([mask&(o>>i) for i in shifts])
return out[:width]
return map(asvalues, rows)
def serialtoflat(self, bytes, width=None):
"""Convert serial format (byte stream) pixel data to flat row
flat pixel.
"""
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array('H',
struct.unpack('!%dH' % (len(bytes)//2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = list(map(self.bitdepth.__mul__, reversed(list(range(spb)))))
l = width
for o in bytes:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out
def iterstraight(self, raw):
"""Iterator that undoes the effect of filtering, and yields
each row in serialised format (as a sequence of bytes).
Assumes input is straightlaced. `raw` should be an iterable
that yields the raw bytes in chunks of arbitrary size.
"""
# length of row, in bytes
rb = self.row_bytes
a = array('B')
# The previous (reconstructed) scanline. None indicates first
# line of image.
recon = None
for some in raw:
a.extend(some)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1:rb+1]
del a[:rb+1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError(
'Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
"""If signature (header) has not been read then read and
validate it; otherwise do nothing.
"""
if self.signature:
return
self.signature = self.file.read(8)
if self.signature != _signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
"""
Extract the image metadata by reading the initial part of
the PNG file up to the start of the ``IDAT`` chunk. All the
chunks that precede the ``IDAT`` chunk are read and either
processed for metadata or discarded.
If the optional `lenient` argument evaluates to `True`, checksum
failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self.chunklentype()
if self.atchunk is None:
raise FormatError(
'This PNG file has no IDAT chunks.')
if self.atchunk[1] == b'IDAT':
return
self.process_chunk(lenient=lenient)
def chunklentype(self):
"""Reads just enough of the input to determine the next
chunk's length and type, returned as a (*length*, *type*) pair
where *type* is a string. If there are no more chunks, ``None``
is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length,type = struct.unpack('!I4s', x)
if length > 2**31-1:
raise FormatError('Chunk %s is too large: %d.' % (type,length))
return length,type
def process_chunk(self, lenient=False):
"""Process the next chunk and its data. This only processes the
following chunk types, all others are ignored: ``IHDR``,
``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``, ``pHYs``.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
type, data = self.chunk(lenient=lenient)
method = '_process_' + as_str(type)
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise Error("unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError("Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0,1):
raise FormatError("Unknown interlace method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
% self.interlace)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not (self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3,1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth)/float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sbit chunk if present.
self.sbit = None
def _process_PLTE(self, data):
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2**self.bitdepth)*3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte)/3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_sBIT(self, data):
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def _process_pHYs(self, data):
# http://www.w3.org/TR/PNG/#11pHYs
self.phys = data
fmt = "!LLB"
if len(data) != struct.calcsize(fmt):
raise FormatError("pHYs chunk has incorrect length.")
self.x_pixels_per_unit, self.y_pixels_per_unit, unit = struct.unpack(fmt,data)
self.unit_is_meter = bool(unit)
def read(self, lenient=False):
"""
Read the PNG file and decode it. Returns (`width`, `height`,
`pixels`, `metadata`).
May use excessive memory.
`pixels` are returned in boxed row flat pixel format.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
try:
type, data = self.chunk(lenient=lenient)
except ValueError as e:
raise ChunkError(e.args[0])
if type == b'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != b'IDAT':
continue
# type == b'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
def iterdecomp(idat):
"""Iterator that yields decompressed strings. `idat` should
be an iterator that yields the ``IDAT`` chunk data.
"""
# Currently, with no max_length parameter to decompress,
# this routine will do one yield per IDAT chunk: Not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in idat:
# :todo: add a max_length argument here to limit output
# size.
yield array('B', d.decompress(data))
yield array('B', d.flush())
self.preamble(lenient=lenient)
raw = iterdecomp(iteridat())
if self.interlace:
raw = array('B', itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth>8]
# Like :meth:`group` but producing an array.array object for
# each row.
pixels = map(lambda *row: array(arraycode, row),
*[iter(self.deinterlace(raw))]*self.width*self.planes)
else:
pixels = self.iterboxed(self.iterstraight(raw))
meta = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
meta[attr] = getattr(self, attr)
meta['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
meta[attr] = a
if self.plte:
meta['palette'] = self.palette()
return self.width, self.height, pixels, meta
def read_flat(self):
"""
Read a PNG file and decode it into flat row flat pixel format.
Returns (*width*, *height*, *pixels*, *metadata*).
May use excessive memory.
`pixels` are returned in flat row flat pixel format.
See also the :meth:`read` method which returns pixels in the
more stream-friendly boxed row flat pixel format.
"""
x, y, pixel, meta = self.read()
arraycode = 'BH'[meta['bitdepth']>8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, meta
def palette(self, alpha='natural'):
"""Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
chunks should have already been processed (for example, by
calling the :meth:`preamble` method). All the tuples are the
same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
there is a ``tRNS`` chunk. Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array('B', self.plte), 3)
if self.trns or alpha == 'force':
trns = array('B', self.trns or [])
trns.extend([255]*(len(plte)-len(trns)))
plte = list(map(operator.add, plte, group(trns, 1)))
return plte
def asDirect(self):
"""Returns the image data as a direct representation of an
``x * y * planes`` array. This method is intended to remove the
need for callers to deal with palettes and transparency
themselves. Images with a palette (colour type 3)
are converted to RGB or RGBA; images with transparency (a
``tRNS`` chunk) are converted to LA or RGBA as appropriate.
When returned in this format the pixel values represent the
colour value directly without needing to refer to palettes or
transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *pixels*, *meta*)
This method normally returns pixel values with the bit depth
they have in the source image, but when the source PNG has an
``sBIT`` chunk it is inspected and can reduce the bit depth of
the result pixels; pixel values will be reduced according to
the bit depth specified in the ``sBIT`` chunk (PNG nerds should
note a single result bit depth is used for all channels; the
maximum of the ones specified in the ``sBIT`` chunk. An RGB565
image will be rescaled to 6-bit RGB666).
The *meta* dictionary that is returned reflects the `direct`
format and not the original source image. For example, an RGB
source image with a ``tRNS`` chunk to represent a transparent
colour, will have ``planes=3`` and ``alpha=False`` for the
source image, but the *meta* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because an alpha
channel is synthesized and added.
*pixels* is the pixel data in boxed row flat pixel format (just
like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x,y,pixels,meta = self.read()
if self.colormap:
meta['colormap'] = False
meta['alpha'] = bool(self.trns)
meta['bitdepth'] = 8
meta['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = [plte[x] for x in row]
yield array('B', itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2**meta['bitdepth']-1
planes = meta['planes']
meta['alpha'] = True
meta['planes'] += 1
typecode = 'BH'[meta['bitdepth']>8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = list(zip(opa)) # convert to 1-tuples
yield array(typecode,
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > meta['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit,self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth == meta['bitdepth']:
targetbitdepth = None
if targetbitdepth:
shift = meta['bitdepth'] - targetbitdepth
meta['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield [p >> shift for p in row]
pixels = itershift(pixels)
return x,y,pixels,meta
def asFloat(self, maxval=1.0):
"""Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
"""
x,y,pixels,info = self.asDirect()
sourcemaxval = 2**info['bitdepth']-1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval)/float(sourcemaxval)
def iterfloat():
for row in pixels:
yield [factor * p for p in row]
return x,y,iterfloat(),info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width,height,pixels,meta = get()
maxval = 2**meta['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield [int(round(x*factor)) for x in row]
if maxval == targetmaxval:
return width, height, pixels, meta
else:
return width, height, iterscale(), meta
def asRGB8(self):
"""Return the image data as an RGB pixels with 8-bits per
sample. This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that they
are all between 0 and 255 (8-bit). In the case where the
source image has a bit depth < 8 the transformation preserves
all the information; where the source image has bit depth
> 8, then rescaling to 8-bit values loses precision. No
dithering is performed. Like :meth:`asRGB`, an alpha channel
in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *pixels*, *metadata*).
*width*, *height*, *metadata* are as per the
:meth:`read` method.
*pixels* is the pixel data in boxed row flat pixel format.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""Return the image data as RGBA pixels with 8-bits per
sample. This method is similar to :meth:`asRGB8` and
:meth:`asRGBA`: The result pixels have an alpha channel, *and*
values are rescaled to the range 0 to 255. The alpha channel is
synthesized if necessary (with a small speed penalty).
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""Return image as RGB pixels. RGB colour images are passed
through unchanged; greyscales are expanded into RGB
triplets (there is a small speed overhead for doing this).
An alpha channel in the source image will raise an
exception.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width,height,pixels,meta
meta['greyscale'] = False
typecode = 'BH'[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = array(typecode, [0]) * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width,height,iterrgb(),meta
def asRGBA(self):
"""Return image as RGBA pixels. Greyscales are expanded into
RGB triplets; an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``, and
``metadata['alpha']`` will be ``True``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha'] and not meta['greyscale']:
return width,height,pixels,meta
typecode = 'BH'[meta['bitdepth'] > 8]
maxval = 2**meta['bitdepth'] - 1
maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
def newarray():
return array(typecode, maxbuffer)
if meta['alpha'] and meta['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
pngfilters.convert_la_to_rgba(row, a)
yield a
elif meta['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_l_to_rgba(row, a)
yield a
else:
assert not meta['alpha'] and not meta['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_rgb_to_rgba(row, a)
yield a
meta['alpha'] = True
meta['greyscale'] = False
return width,height,convert(),meta
def check_bitdepth_colortype(bitdepth, colortype):
"""Check that `bitdepth` and `colortype` are both valid,
and specified in a valid combination. Returns if valid,
raise an Exception if not valid.
"""
if bitdepth not in (1,2,4,8,16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0,2,3,4,6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
if bitdepth < 8 and colortype not in (0,3):
raise FormatError("Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
def isinteger(x):
try:
return int(x) == x
except (TypeError, ValueError):
return False
# === Support for users without Cython ===
try:
pngfilters
except NameError:
class pngfilters:
def undo_filter_sub(filter_unit, scanline, previous, result):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(filter_unit, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
undo_filter_sub = staticmethod(undo_filter_sub)
def undo_filter_up(filter_unit, scanline, previous, result):
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
undo_filter_up = staticmethod(undo_filter_up)
def undo_filter_average(filter_unit, scanline, previous, result):
"""Undo up filter."""
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
undo_filter_average = staticmethod(undo_filter_average)
def undo_filter_paeth(filter_unit, scanline, previous, result):
"""Undo Paeth filter."""
# Also used for ci.
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
undo_filter_paeth = staticmethod(undo_filter_paeth)
def convert_la_to_rgba(row, result):
for i in range(3):
result[i::4] = row[0::2]
result[3::4] = row[1::2]
convert_la_to_rgba = staticmethod(convert_la_to_rgba)
def convert_l_to_rgba(row, result):
"""Convert a grayscale image to RGBA. This method assumes
the alpha channel in result is already correctly
initialized.
"""
for i in range(3):
result[i::4] = row
convert_l_to_rgba = staticmethod(convert_l_to_rgba)
def convert_rgb_to_rgba(row, result):
"""Convert an RGB image to RGBA. This method assumes the
alpha channel in result is already correctly initialized.
"""
for i in range(3):
result[i::4] = row[i::3]
convert_rgb_to_rgba = staticmethod(convert_rgb_to_rgba)
# === Command Line Support ===
def read_pam_header(infile):
"""
Read (the rest of a) PAM header. `infile` should be positioned
immediately after the initial 'P7' line (at the beginning of the
second line). Returns are as for `read_pnm_header`.
"""
# Unlike PBM, PGM, and PPM, we can read the header a line at a time.
header = dict()
while True:
l = infile.readline().strip()
if l == b'ENDHDR':
break
if not l:
raise EOFError('PAM ended prematurely')
if l[0] == b'#':
continue
l = l.split(None, 1)
if l[0] not in header:
header[l[0]] = l[1]
else:
header[l[0]] += b' ' + l[1]
required = [b'WIDTH', b'HEIGHT', b'DEPTH', b'MAXVAL']
WIDTH,HEIGHT,DEPTH,MAXVAL = required
present = [x for x in required if x in header]
if len(present) != len(required):
raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
width = int(header[WIDTH])
height = int(header[HEIGHT])
depth = int(header[DEPTH])
maxval = int(header[MAXVAL])
if (width <= 0 or
height <= 0 or
depth <= 0 or
maxval <= 0):
raise Error(
'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
return 'P7', width, height, depth, maxval
def read_pnm_header(infile, supported=(b'P5', b'P6')):
"""
Read a PNM header, returning (format,width,height,depth,maxval).
`width` and `height` are in pixels. `depth` is the number of
channels in the image; for PBM and PGM it is synthesized as 1, for
PPM as 3; for PAM images it is read from the header. `maxval` is
synthesized (as 1) for PBM images.
"""
# Generally, see http://netpbm.sourceforge.net/doc/ppm.html
# and http://netpbm.sourceforge.net/doc/pam.html
# Technically 'P7' must be followed by a newline, so by using
# rstrip() we are being liberal in what we accept. I think this
# is acceptable.
type = infile.read(3).rstrip()
if type not in supported:
raise NotImplementedError('file format %s not supported' % type)
if type == b'P7':
# PAM header parsing is completely different.
return read_pam_header(infile)
# Expected number of tokens in header (3 for P4, 4 for P6)
expected = 4
pbm = (b'P1', b'P4')
if type in pbm:
expected = 3
header = [type]
# We have to read the rest of the header byte by byte because the
# final whitespace character (immediately following the MAXVAL in
# the case of P6) may not be a newline. Of course all PNM files in
# the wild use a newline at this point, so it's tempting to use
# readline; but it would be wrong.
def getc():
c = infile.read(1)
if not c:
raise Error('premature EOF reading PNM header')
return c
c = getc()
while True:
# Skip whitespace that precedes a token.
while c.isspace():
c = getc()
# Skip comments.
while c == '#':
while c not in b'\n\r':
c = getc()
if not c.isdigit():
raise Error('unexpected character %s found in header' % c)
# According to the specification it is legal to have comments
# that appear in the middle of a token.
# This is bonkers; I've never seen it; and it's a bit awkward to
# code good lexers in Python (no goto). So we break on such
# cases.
token = b''
while c.isdigit():
token += c
c = getc()
# Slight hack. All "tokens" are decimal integers, so convert
# them here.
header.append(int(token))
if len(header) == expected:
break
# Skip comments (again)
while c == '#':
while c not in '\n\r':
c = getc()
if not c.isspace():
raise Error('expected header to end with whitespace, not %s' % c)
if type in pbm:
# synthesize a MAXVAL
header.append(1)
depth = (1,3)[type == b'P6']
return header[0], header[1], header[2], depth, header[3]
def write_pnm(file, width, height, pixels, meta):
"""Write a Netpbm PNM/PAM file.
"""
bitdepth = meta['bitdepth']
maxval = 2**bitdepth - 1
# Rudely, the number of image planes can be used to determine
# whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM).
planes = meta['planes']
# Can be an assert as long as we assume that pixels and meta came
# from a PNG file.
assert planes in (1,2,3,4)
if planes in (1,3):
if 1 == planes:
# PGM
# Could generate PBM if maxval is 1, but we don't (for one
# thing, we'd have to convert the data, not just blat it
# out).
fmt = 'P5'
else:
# PPM
fmt = 'P6'
header = '%s %d %d %d\n' % (fmt, width, height, maxval)
if planes in (2,4):
# PAM
# See http://netpbm.sourceforge.net/doc/pam.html
if 2 == planes:
tupltype = 'GRAYSCALE_ALPHA'
else:
tupltype = 'RGB_ALPHA'
header = ('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
'TUPLTYPE %s\nENDHDR\n' %
(width, height, planes, maxval, tupltype))
file.write(header.encode('ascii'))
# Values per row
vpr = planes * width
# struct format
fmt = '>%d' % vpr
if maxval > 0xff:
fmt = fmt + 'H'
else:
fmt = fmt + 'B'
for row in pixels:
file.write(struct.pack(fmt, *row))
file.flush()
def color_triple(color):
"""
Convert a command line colour value to a RGB triple of integers.
FIXME: Somewhere we need support for greyscale backgrounds etc.
"""
if color.startswith('#') and len(color) == 4:
return (int(color[1], 16),
int(color[2], 16),
int(color[3], 16))
if color.startswith('#') and len(color) == 7:
return (int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16))
elif color.startswith('#') and len(color) == 13:
return (int(color[1:5], 16),
int(color[5:9], 16),
int(color[9:13], 16))
def _add_common_options(parser):
"""Call *parser.add_option* for each of the options that are
common between this PNG--PNM conversion tool and the gen
tool.
"""
parser.add_option("-i", "--interlace",
default=False, action="store_true",
help="create an interlaced PNG file (Adam7)")
parser.add_option("-t", "--transparent",
action="store", type="string", metavar="#RRGGBB",
help="mark the specified colour as transparent")
parser.add_option("-b", "--background",
action="store", type="string", metavar="#RRGGBB",
help="save the specified background colour")
parser.add_option("-g", "--gamma",
action="store", type="float", metavar="value",
help="save the specified gamma value")
parser.add_option("-c", "--compression",
action="store", type="int", metavar="level",
help="zlib compression level (0-9)")
return parser
def _main(argv):
"""
Run the PNG encoder with options from the command line.
"""
# Parse command line arguments
from optparse import OptionParser
version = '%prog ' + __version__
parser = OptionParser(version=version)
parser.set_usage("%prog [options] [imagefile]")
parser.add_option('-r', '--read-png', default=False,
action='store_true',
help='Read PNG, write PNM')
parser.add_option("-a", "--alpha",
action="store", type="string", metavar="pgmfile",
help="alpha channel transparency (RGBA)")
_add_common_options(parser)
(options, args) = parser.parse_args(args=argv[1:])
# Convert options
if options.transparent is not None:
options.transparent = color_triple(options.transparent)
if options.background is not None:
options.background = color_triple(options.background)
# Prepare input and output files
if len(args) == 0:
infilename = '-'
infile = sys.stdin
elif len(args) == 1:
infilename = args[0]
infile = open(infilename, 'rb')
else:
parser.error("more than one input file")
outfile = sys.stdout
if sys.platform == "win32":
import msvcrt, os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
if options.read_png:
# Encode PNG to PPM
png = Reader(file=infile)
width,height,pixels,meta = png.asDirect()
write_pnm(outfile, width, height, pixels, meta)
else:
# Encode PNM to PNG
format, width, height, depth, maxval = \
read_pnm_header(infile, (b'P5',b'P6',b'P7'))
# When it comes to the variety of input formats, we do something
# rather rude. Observe that L, LA, RGB, RGBA are the 4 colour
# types supported by PNG and that they correspond to 1, 2, 3, 4
# channels respectively. So we use the number of channels in
# the source image to determine which one we have. We do not
# care about TUPLTYPE.
greyscale = depth <= 2
pamalpha = depth in (2,4)
supported = [2**x-1 for x in range(1,17)]
try:
mi = supported.index(maxval)
except ValueError:
raise NotImplementedError(
'your maxval (%s) not in supported list %s' %
(maxval, str(supported)))
bitdepth = mi+1
writer = Writer(width, height,
greyscale=greyscale,
bitdepth=bitdepth,
interlace=options.interlace,
transparent=options.transparent,
background=options.background,
alpha=bool(pamalpha or options.alpha),
gamma=options.gamma,
compression=options.compression)
if options.alpha:
pgmfile = open(options.alpha, 'rb')
format, awidth, aheight, adepth, amaxval = \
read_pnm_header(pgmfile, 'P5')
if amaxval != '255':
raise NotImplementedError(
'maxval %s not supported for alpha channel' % amaxval)
if (awidth, aheight) != (width, height):
raise ValueError("alpha channel image size mismatch"
" (%s has %sx%s but %s has %sx%s)"
% (infilename, width, height,
options.alpha, awidth, aheight))
writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
else:
writer.convert_pnm(infile, outfile)
if __name__ == '__main__':
try:
_main(sys.argv)
except Error as e:
print(e, file=sys.stderr)
| 37.97573 | 91 | 0.57072 |
__version__ = "0.0.18"
import itertools
import math
import re
import operator
import struct
import sys
import warnings
import zlib
from io import open
from array import array
from functools import reduce
try:
import cpngfilters as pngfilters
except ImportError:
pass
__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
_adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def group(s, n):
return list(zip(*[iter(s)]*n))
def isarray(x):
return isinstance(x, array)
def tostring(row):
return row.tostring()
def interleave_planes(ipixels, apixels, ipsize, apsize):
itotal = len(ipixels)
atotal = len(apixels)
newtotal = itotal + atotal
newpsize = ipsize + apsize
= array(ipixels.typecode)
out.extend(ipixels)
out.extend(apixels)
# Interleave in the pixel data
for i in range(ipsize):
out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
for i in range(apsize):
out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
return out
def check_palette(palette):
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ValueError("a palette must have between 1 and 256 entries")
seen_triple = False
for i,t in enumerate(p):
if len(t) not in (3,4):
raise ValueError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ValueError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ValueError(
"palette entry %d: values must be integer: 0 <= x <= 255" % i)
return p
def check_sizes(size, width, height):
if not size:
return width, height
if len(size) != 2:
raise ValueError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ValueError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ValueError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size
def check_color(c, greyscale, which):
if c is None:
return c
if greyscale:
try:
len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ValueError("%s for greyscale must be 1-tuple" %
which)
if not isinteger(c[0]):
raise ValueError(
"%s colour for greyscale must be integer" % which)
else:
if not (len(c) == 3 and
isinteger(c[0]) and
isinteger(c[1]) and
isinteger(c[2])):
raise ValueError(
"%s colour must be a triple of integers" % which)
return c
class Error(Exception):
def __str__(self):
return self.__class__.__name__ + ': ' + ' '.join(self.args)
class FormatError(Error):
class ChunkError(FormatError):
pass
class Writer:
def __init__(self, width=None, height=None,
size=None,
greyscale=False,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
bytes_per_sample=None, # deprecated
planes=None,
colormap=None,
maxval=None,
chunk_limit=2**20,
x_pixels_per_unit = None,
y_pixels_per_unit = None,
unit_is_meter = False):
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if width <= 0 or height <= 0:
raise ValueError("width and height must be greater than zero")
if not isinteger(width) or not isinteger(height):
raise ValueError("width and height must be integers")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2**32-1 or height > 2**32-1:
raise ValueError("width and height cannot exceed 2**32-1")
if alpha and transparent is not None:
raise ValueError(
"transparent colour not allowed with alpha channel")
if bytes_per_sample is not None:
warnings.warn('please use bitdepth instead of bytes_per_sample',
DeprecationWarning)
if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
raise ValueError(
"bytes per sample must be .125, .25, .5, 1, or 2")
bitdepth = int(8*bytes_per_sample)
del bytes_per_sample
if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
raise ValueError("bitdepth (%r) must be a positive integer <= 16" %
bitdepth)
self.rescale = None
palette = check_palette(palette)
if palette:
if bitdepth not in (1,2,4,8):
raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ValueError("transparent and palette not compatible")
if alpha:
raise ValueError("alpha and palette not compatible")
if greyscale:
raise ValueError("greyscale and palette not compatible")
else:
# No palette, check for sBIT chunk generation.
if alpha or not greyscale:
if bitdepth not in (8,16):
targetbitdepth = (8,16)[bitdepth > 8]
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
else:
assert greyscale
assert not alpha
if bitdepth not in (1,2,4,8,16):
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5,6,7)
targetbitdepth = 8
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
if bitdepth < 8 and (alpha or not greyscale and not palette):
raise ValueError(
"bitdepth < 8 only permitted with greyscale or palette")
if bitdepth > 8 and palette:
raise ValueError(
"bit depth must be 8 or less for images with palette")
transparent = check_color(transparent, greyscale, 'transparent')
background = check_color(background, greyscale, 'background')
# It's important that the true boolean values (greyscale, alpha,
# convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = bool(greyscale)
self.alpha = bool(alpha)
self.colormap = bool(palette)
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = palette
self.x_pixels_per_unit = x_pixels_per_unit
self.y_pixels_per_unit = y_pixels_per_unit
self.unit_is_meter = bool(unit_is_meter)
self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
assert self.color_type in (0,2,3,4,6)
self.color_planes = (3,1)[self.greyscale or self.colormap]
self.planes = self.color_planes + self.alpha
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth/8) * self.planes
def make_palette(self):
p = array('B')
t = array('B')
for x in self.palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
p = tostring(p)
t = tostring(t)
if t:
return p,t
return p,None
def write(self, outfile, rows):
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*rows))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, rows)
if nrows != self.height:
raise ValueError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
def write_passes(self, outfile, rows, packed=False):
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(_signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, b'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, self.color_type,
0, 0, self.interlace))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, b'gAMA',
struct.pack("!L", int(round(self.gamma*1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(outfile, b'sBIT',
struct.pack('%dB' % self.planes,
*[self.rescale[0]]*self.planes))
# :chunk:order: Without a palette (PLTE chunk), ordering is
# relatively relaxed. With one, gAMA chunk must precede PLTE
# chunk which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p,t = self.make_palette()
write_chunk(outfile, b'PLTE', p)
if t:
# tRNS chunk is optional. Only needed if palette entries
# have alpha.
write_chunk(outfile, b'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
write_chunk(outfile, b'tRNS',
struct.pack("!1H", *self.transparent))
else:
write_chunk(outfile, b'tRNS',
struct.pack("!3H", *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
write_chunk(outfile, b'bKGD',
struct.pack("!1H", *self.background))
else:
write_chunk(outfile, b'bKGD',
struct.pack("!3H", *self.background))
# http://www.w3.org/TR/PNG/#11pHYs
if self.x_pixels_per_unit is not None and self.y_pixels_per_unit is not None:
tup = (self.x_pixels_per_unit, self.y_pixels_per_unit, int(self.unit_is_meter))
write_chunk(outfile, b'pHYs', struct.pack("!LLB",*tup))
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# Choose an extend function based on the bitdepth. The extend
# function packs/decomposes the pixel values into bytes and
# stuffs them onto the data array.
data = array('B')
if self.bitdepth == 8 or packed:
extend = data.extend
elif self.bitdepth == 16:
# Decompose into bytes
def extend(sl):
fmt = '!%dH' % len(sl)
data.extend(array('B', struct.pack(fmt, *sl)))
else:
# Pack into bytes
assert self.bitdepth < 8
# samples per byte
spb = int(8/self.bitdepth)
def extend(sl):
a = array('B', sl)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
l = float(len(a))
extra = math.ceil(l / float(spb))*spb - l
a.extend([0]*int(extra))
# Pack into bytes
l = group(a, spb)
l = [reduce(lambda x,y:
(x << self.bitdepth) + y, e) for e in l]
data.extend(l)
if self.rescale:
oldextend = extend
factor = \
float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
def extend(sl):
oldextend([int(round(factor*x)) for x in sl])
# Build the first row, testing mostly to see if we need to
# changed the extend function to cope with NumPy integer types
# (they cause our ordinary definition of extend to fail, so we
# wrap it). See
# http://code.google.com/p/pypng/issues/detail?id=44
enumrows = enumerate(rows)
del rows
# First row's filter type.
data.append(0)
i,row = next(enumrows)
try:
extend(row)
except:
def wrapmapint(f):
return lambda sl: f([int(x) for x in sl])
extend = wrapmapint(extend)
del wrapmapint
extend(row)
for i,row in enumrows:
# this filter type be used for every scanline as we do not
# mark the first row of a reduced pass image; that means we
# could accidentally compute the wrong filtered scanline if
# we used "up", "average", or "paeth" on such a line.
data.append(0)
extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(tostring(data))
if len(compressed):
write_chunk(outfile, b'IDAT', compressed)
# Because of our very witty definition of ``extend``,
# above, we must re-use the same ``data`` object. Hence
# we use ``del`` to empty this one, rather than create a
# fresh one (which would be my natural FP instinct).
del data[:]
if len(data):
compressed = compressor.compress(tostring(data))
else:
compressed = b''
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, b'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, b'IEND')
return i+1
def write_array(self, outfile, pixels):
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def write_packed(self, outfile, rows):
if self.rescale:
raise Error("write_packed method not suitable for bit depth %d" %
self.rescale[0])
return self.write_passes(outfile, rows, packed=True)
def convert_pnm(self, infile, outfile):
if self.interlace:
pixels = array('B')
pixels.fromfile(infile,
(self.bitdepth//8) * self.color_planes *
self.width * self.height)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.file_scanlines(infile))
def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
pixels = array('B')
pixels.fromfile(ppmfile,
(self.bitdepth//8) * self.color_planes *
self.width * self.height)
apixels = array('B')
apixels.fromfile(pgmfile,
(self.bitdepth//8) *
self.width * self.height)
pixels = interleave_planes(pixels, apixels,
(self.bitdepth//8) * self.color_planes,
(self.bitdepth//8))
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def file_scanlines(self, infile):
# Values per row
vpr = self.width * self.planes
row_bytes = vpr
if self.bitdepth > 8:
assert self.bitdepth == 16
row_bytes *= 2
fmt = '>%dH' % vpr
def line():
return array('H', struct.unpack(fmt, infile.read(row_bytes)))
else:
def line():
scanline = array('B', infile.read(row_bytes))
return scanline
for y in range(self.height):
yield line()
def array_scanlines(self, pixels):
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# number of values in reduced image row.
row_len = ppr*self.planes
for y in range(ystart, self.height, ystep):
if xstep == 1:
offset = y * vpr
yield pixels[offset:offset+vpr]
else:
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:row_len])
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset+i:end_offset:skip]
yield row
def write_chunk(outfile, tag, data=b''):
rite(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2**32-1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
out.write(_signature)
for chunk in chunks:
write_chunk(out, *chunk)
def filter_scanline(type, line, fo, prev=None):
assert 0 <= type < 5
out = array('B', [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = (x - line[ai]) & 0xff
out.append(x)
ai += 1
def up():
for i,x in enumerate(line):
x = (x - prev[i]) & 0xff
out.append(x)
def average():
ai = -fo
for i,x in enumerate(line):
if ai >= 0:
x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
else:
x = (x - (prev[i] >> 1)) & 0xff
out.append(x)
ai += 1
def paeth():
for i,x in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = (x - Pr) & 0xff
out.append(x)
ai += 1
if not prev:
# to simpler cases which makes handling the line "off the top"
# of the image simpler. "up" becomes "none"; "paeth" becomes
# "left" (non-trivial, but true). "average" needs to be handled
# specially.
if type == 2: # "up"
type = 0
elif type == 3:
prev = [0]*len(line)
elif type == 4: # "paeth"
type = 1
if type == 0:
out.extend(line)
elif type == 1:
sub()
elif type == 2:
up()
elif type == 3:
average()
else: # type == 4
paeth()
return out
# Regex for decoding mode string
RegexModeDecode = re.compile("(LA?|RGBA?);?([0-9]*)", flags=re.IGNORECASE)
def from_array(a, mode=None, info={}):
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
match = RegexModeDecode.match(mode)
if not match:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode, bitdepth = match.groups()
alpha = 'A' in mode
if bitdepth:
bitdepth = int(bitdepth)
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
if 'alpha' in info:
if bool(info['alpha']) != alpha:
raise Error("info['alpha'] should match mode.")
info['alpha'] = alpha
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get("bitdepth") and bitdepth != info['bitdepth']:
raise Error("bitdepth (%d) should match bitdepth of info (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
if 'size' in info:
assert len(info["size"]) == 2
# Check width, height, size all match where used.
for dimension,axis in [('width', 0), ('height', 1)]:
if dimension in info:
if info[dimension] != info['size'][axis]:
raise Error(
"info[%r] should match info['size'][%r]." %
(dimension, axis))
info['width'],info['height'] = info['size']
if 'height' not in info:
try:
info['height'] = len(a)
except TypeError:
raise Error("len(a) does not work, supply info['height'] instead.")
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a,t = itertools.tee(a)
row = next(t)
del t
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
if threed:
# Flatten the threed rows
a = (itertools.chain.from_iterable(x) for x in a)
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in ["width", "height", "bitdepth", "greyscale", "alpha"]:
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image:
def __init__(self, rows, info):
self.rows = rows
self.info = info
def save(self, file):
w = Writer(**self.info)
try:
file.write
def close(): pass
except AttributeError:
file = open(file, 'wb')
def close(): file.close()
try:
w.write(file, self.rows)
finally:
close()
class _readable:
def __init__(self, buf):
self.buf = buf
self.offset = 0
def read(self, n):
r = self.buf[self.offset:self.offset+n]
if isarray(r):
r = r.tostring()
self.offset += n
return r
try:
str(b'dummy', 'ascii')
except TypeError:
as_str = str
else:
def as_str(x):
return str(x, 'ascii')
class Reader:
def __init__(self, _guess=None, **kw):
if ((_guess is not None and len(kw) != 0) or
(_guess is None and len(kw) != 1)):
raise TypeError("Reader() takes exactly 1 argument")
self.signature = None
self.transparent = None
self.atchunk = None
if _guess is not None:
if isarray(_guess):
kw["bytes"] = _guess
elif isinstance(_guess, str):
kw["filename"] = _guess
elif hasattr(_guess, 'read'):
kw["file"] = _guess
if "filename" in kw:
self.file = open(kw["filename"], "rb")
elif "file" in kw:
self.file = kw["file"]
elif "bytes" in kw:
self.file = _readable(kw["bytes"])
else:
raise TypeError("expecting filename, file or bytes array")
def chunk(self, seek=None, lenient=False):
self.validate_signature()
while True:
f not self.atchunk:
self.atchunk = self.chunklentype()
length, type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError('Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError('Chunk %s too short for checksum.' % type)
if seek and type != seek:
continue
verify = zlib.crc32(type)
verify = zlib.crc32(data, verify)
verify &= 2**32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = "Checksum error in %s chunk: 0x%08X != 0x%08X." % (type, a, b)
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data
def chunks(self):
while True:
t,v = self.chunk()
yield t,v
if t == b'IEND':
break
def undo_filter(self, filter_type, scanline, previous):
result = scanline
if filter_type == 0:
return result
if filter_type not in (1,2,3,4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
fu = max(1, self.psize)
if not previous:
previous = array('B', [0]*len(scanline))
def sub():
ai = 0
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def up():
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def average():
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def paeth():
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
(None,
pngfilters.undo_filter_sub,
pngfilters.undo_filter_up,
pngfilters.undo_filter_average,
pngfilters.undo_filter_paeth)[filter_type](fu, scanline, previous, result)
return result
def deinterlace(self, raw):
vpr = self.width * self.planes
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, [0]*vpr*self.height)
source_offset = 0
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
recon = None
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
row_size = int(math.ceil(self.psize * ppr))
for y in range(ystart, self.height, ystep):
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset:source_offset+row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
flat = self.serialtoflat(recon, ppr)
if xstep == 1:
assert xstart == 0
offset = y * vpr
a[offset:offset+vpr] = flat
else:
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset+i:end_offset:skip] = \
flat[i::self.planes]
return a
def iterboxed(self, rows):
def asvalues(raw):
if self.bitdepth == 8:
return array('B', raw)
if self.bitdepth == 16:
raw = tostring(raw)
return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
assert self.bitdepth < 8
width = self.width
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = [self.bitdepth * i
for i in reversed(list(range(spb)))]
for o in raw:
out.extend([mask&(o>>i) for i in shifts])
return out[:width]
return map(asvalues, rows)
def serialtoflat(self, bytes, width=None):
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array('H',
struct.unpack('!%dH' % (len(bytes)//2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = list(map(self.bitdepth.__mul__, reversed(list(range(spb)))))
l = width
for o in bytes:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out
def iterstraight(self, raw):
rb = self.row_bytes
a = array('B')
recon = None
for some in raw:
a.extend(some)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1:rb+1]
del a[:rb+1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
raise FormatError(
'Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
if self.signature:
return
self.signature = self.file.read(8)
if self.signature != _signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self.chunklentype()
if self.atchunk is None:
raise FormatError(
'This PNG file has no IDAT chunks.')
if self.atchunk[1] == b'IDAT':
return
self.process_chunk(lenient=lenient)
def chunklentype(self):
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length,type = struct.unpack('!I4s', x)
if length > 2**31-1:
raise FormatError('Chunk %s is too large: %d.' % (type,length))
return length,type
def process_chunk(self, lenient=False):
type, data = self.chunk(lenient=lenient)
method = '_process_' + as_str(type)
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise Error("unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError("Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0,1):
raise FormatError("Unknown interlace method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
% self.interlace)
ap = bool(self.color_type & 1)
greyscale = not (self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3,1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth)/float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
self.plte = None
self.trns = None
self.sbit = None
def _process_PLTE(self, data):
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2**self.bitdepth)*3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte)/3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_sBIT(self, data):
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def _process_pHYs(self, data):
# http://www.w3.org/TR/PNG/#11pHYs
self.phys = data
fmt = "!LLB"
if len(data) != struct.calcsize(fmt):
raise FormatError("pHYs chunk has incorrect length.")
self.x_pixels_per_unit, self.y_pixels_per_unit, unit = struct.unpack(fmt,data)
self.unit_is_meter = bool(unit)
def read(self, lenient=False):
def iteridat():
while True:
try:
type, data = self.chunk(lenient=lenient)
except ValueError as e:
raise ChunkError(e.args[0])
if type == b'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != b'IDAT':
continue
# type == b'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
def iterdecomp(idat):
# Currently, with no max_length parameter to decompress,
# this routine will do one yield per IDAT chunk: Not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in idat:
# :todo: add a max_length argument here to limit output
# size.
yield array('B', d.decompress(data))
yield array('B', d.flush())
self.preamble(lenient=lenient)
raw = iterdecomp(iteridat())
if self.interlace:
raw = array('B', itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth>8]
# Like :meth:`group` but producing an array.array object for
# each row.
pixels = map(lambda *row: array(arraycode, row),
*[iter(self.deinterlace(raw))]*self.width*self.planes)
else:
pixels = self.iterboxed(self.iterstraight(raw))
meta = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
meta[attr] = getattr(self, attr)
meta['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
meta[attr] = a
if self.plte:
meta['palette'] = self.palette()
return self.width, self.height, pixels, meta
def read_flat(self):
x, y, pixel, meta = self.read()
arraycode = 'BH'[meta['bitdepth']>8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, meta
def palette(self, alpha='natural'):
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array('B', self.plte), 3)
if self.trns or alpha == 'force':
trns = array('B', self.trns or [])
trns.extend([255]*(len(plte)-len(trns)))
plte = list(map(operator.add, plte, group(trns, 1)))
return plte
def asDirect(self):
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x,y,pixels,meta = self.read()
if self.colormap:
meta['colormap'] = False
meta['alpha'] = bool(self.trns)
meta['bitdepth'] = 8
meta['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = [plte[x] for x in row]
yield array('B', itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2**meta['bitdepth']-1
planes = meta['planes']
meta['alpha'] = True
meta['planes'] += 1
typecode = 'BH'[meta['bitdepth']>8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = list(zip(opa)) # convert to 1-tuples
yield array(typecode,
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > meta['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit,self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth == meta['bitdepth']:
targetbitdepth = None
if targetbitdepth:
shift = meta['bitdepth'] - targetbitdepth
meta['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield [p >> shift for p in row]
pixels = itershift(pixels)
return x,y,pixels,meta
def asFloat(self, maxval=1.0):
x,y,pixels,info = self.asDirect()
sourcemaxval = 2**info['bitdepth']-1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval)/float(sourcemaxval)
def iterfloat():
for row in pixels:
yield [factor * p for p in row]
return x,y,iterfloat(),info
def _as_rescale(self, get, targetbitdepth):
width,height,pixels,meta = get()
maxval = 2**meta['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield [int(round(x*factor)) for x in row]
if maxval == targetmaxval:
return width, height, pixels, meta
else:
return width, height, iterscale(), meta
def asRGB8(self):
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
width,height,pixels,meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width,height,pixels,meta
meta['greyscale'] = False
typecode = 'BH'[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = array(typecode, [0]) * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width,height,iterrgb(),meta
def asRGBA(self):
width,height,pixels,meta = self.asDirect()
if meta['alpha'] and not meta['greyscale']:
return width,height,pixels,meta
typecode = 'BH'[meta['bitdepth'] > 8]
maxval = 2**meta['bitdepth'] - 1
maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
def newarray():
return array(typecode, maxbuffer)
if meta['alpha'] and meta['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
pngfilters.convert_la_to_rgba(row, a)
yield a
elif meta['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_l_to_rgba(row, a)
yield a
else:
assert not meta['alpha'] and not meta['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_rgb_to_rgba(row, a)
yield a
meta['alpha'] = True
meta['greyscale'] = False
return width,height,convert(),meta
def check_bitdepth_colortype(bitdepth, colortype):
if bitdepth not in (1,2,4,8,16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0,2,3,4,6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
if bitdepth < 8 and colortype not in (0,3):
raise FormatError("Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
def isinteger(x):
try:
return int(x) == x
except (TypeError, ValueError):
return False
# === Support for users without Cython ===
try:
pngfilters
except NameError:
class pngfilters:
def undo_filter_sub(filter_unit, scanline, previous, result):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(filter_unit, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
undo_filter_sub = staticmethod(undo_filter_sub)
def undo_filter_up(filter_unit, scanline, previous, result):
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
undo_filter_up = staticmethod(undo_filter_up)
def undo_filter_average(filter_unit, scanline, previous, result):
"""Undo up filter."""
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
undo_filter_average = staticmethod(undo_filter_average)
def undo_filter_paeth(filter_unit, scanline, previous, result):
"""Undo Paeth filter."""
# Also used for ci.
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
undo_filter_paeth = staticmethod(undo_filter_paeth)
def convert_la_to_rgba(row, result):
for i in range(3):
result[i::4] = row[0::2]
result[3::4] = row[1::2]
convert_la_to_rgba = staticmethod(convert_la_to_rgba)
def convert_l_to_rgba(row, result):
"""Convert a grayscale image to RGBA. This method assumes
the alpha channel in result is already correctly
initialized.
"""
for i in range(3):
result[i::4] = row
convert_l_to_rgba = staticmethod(convert_l_to_rgba)
def convert_rgb_to_rgba(row, result):
"""Convert an RGB image to RGBA. This method assumes the
alpha channel in result is already correctly initialized.
"""
for i in range(3):
result[i::4] = row[i::3]
convert_rgb_to_rgba = staticmethod(convert_rgb_to_rgba)
# === Command Line Support ===
def read_pam_header(infile):
# Unlike PBM, PGM, and PPM, we can read the header a line at a time.
header = dict()
while True:
l = infile.readline().strip()
if l == b'ENDHDR':
break
if not l:
raise EOFError('PAM ended prematurely')
if l[0] == b'
continue
l = l.split(None, 1)
if l[0] not in header:
header[l[0]] = l[1]
else:
header[l[0]] += b' ' + l[1]
required = [b'WIDTH', b'HEIGHT', b'DEPTH', b'MAXVAL']
WIDTH,HEIGHT,DEPTH,MAXVAL = required
present = [x for x in required if x in header]
if len(present) != len(required):
raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
width = int(header[WIDTH])
height = int(header[HEIGHT])
depth = int(header[DEPTH])
maxval = int(header[MAXVAL])
if (width <= 0 or
height <= 0 or
depth <= 0 or
maxval <= 0):
raise Error(
'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
return 'P7', width, height, depth, maxval
def read_pnm_header(infile, supported=(b'P5', b'P6')):
# Generally, see http://netpbm.sourceforge.net/doc/ppm.html
# and http://netpbm.sourceforge.net/doc/pam.html
# Technically 'P7' must be followed by a newline, so by using
# rstrip() we are being liberal in what we accept. I think this
# is acceptable.
type = infile.read(3).rstrip()
if type not in supported:
raise NotImplementedError('file format %s not supported' % type)
if type == b'P7':
# PAM header parsing is completely different.
return read_pam_header(infile)
# Expected number of tokens in header (3 for P4, 4 for P6)
expected = 4
pbm = (b'P1', b'P4')
if type in pbm:
expected = 3
header = [type]
# We have to read the rest of the header byte by byte because the
# final whitespace character (immediately following the MAXVAL in
# the case of P6) may not be a newline. Of course all PNM files in
# the wild use a newline at this point, so it's tempting to use
def getc():
c = infile.read(1)
if not c:
raise Error('premature EOF reading PNM header')
return c
c = getc()
while True:
while c.isspace():
c = getc()
while c == '#':
while c not in b'\n\r':
c = getc()
if not c.isdigit():
raise Error('unexpected character %s found in header' % c)
token = b''
while c.isdigit():
token += c
c = getc()
header.append(int(token))
if len(header) == expected:
break
while c == '#':
while c not in '\n\r':
c = getc()
if not c.isspace():
raise Error('expected header to end with whitespace, not %s' % c)
if type in pbm:
header.append(1)
depth = (1,3)[type == b'P6']
return header[0], header[1], header[2], depth, header[3]
def write_pnm(file, width, height, pixels, meta):
bitdepth = meta['bitdepth']
maxval = 2**bitdepth - 1
planes = meta['planes']
assert planes in (1,2,3,4)
if planes in (1,3):
if 1 == planes:
# thing, we'd have to convert the data, not just blat it
fmt = 'P5'
else:
fmt = 'P6'
header = '%s %d %d %d\n' % (fmt, width, height, maxval)
if planes in (2,4):
if 2 == planes:
tupltype = 'GRAYSCALE_ALPHA'
else:
tupltype = 'RGB_ALPHA'
header = ('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
'TUPLTYPE %s\nENDHDR\n' %
(width, height, planes, maxval, tupltype))
file.write(header.encode('ascii'))
vpr = planes * width
fmt = '>%d' % vpr
if maxval > 0xff:
fmt = fmt + 'H'
else:
fmt = fmt + 'B'
for row in pixels:
file.write(struct.pack(fmt, *row))
file.flush()
def color_triple(color):
if color.startswith('#') and len(color) == 4:
return (int(color[1], 16),
int(color[2], 16),
int(color[3], 16))
if color.startswith('#') and len(color) == 7:
return (int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16))
elif color.startswith('#') and len(color) == 13:
return (int(color[1:5], 16),
int(color[5:9], 16),
int(color[9:13], 16))
def _add_common_options(parser):
parser.add_option("-i", "--interlace",
default=False, action="store_true",
help="create an interlaced PNG file (Adam7)")
parser.add_option("-t", "--transparent",
action="store", type="string", metavar="#RRGGBB",
help="mark the specified colour as transparent")
parser.add_option("-b", "--background",
action="store", type="string", metavar="#RRGGBB",
help="save the specified background colour")
parser.add_option("-g", "--gamma",
action="store", type="float", metavar="value",
help="save the specified gamma value")
parser.add_option("-c", "--compression",
action="store", type="int", metavar="level",
help="zlib compression level (0-9)")
return parser
def _main(argv):
from optparse import OptionParser
version = '%prog ' + __version__
parser = OptionParser(version=version)
parser.set_usage("%prog [options] [imagefile]")
parser.add_option('-r', '--read-png', default=False,
action='store_true',
help='Read PNG, write PNM')
parser.add_option("-a", "--alpha",
action="store", type="string", metavar="pgmfile",
help="alpha channel transparency (RGBA)")
_add_common_options(parser)
(options, args) = parser.parse_args(args=argv[1:])
if options.transparent is not None:
options.transparent = color_triple(options.transparent)
if options.background is not None:
options.background = color_triple(options.background)
if len(args) == 0:
infilename = '-'
infile = sys.stdin
elif len(args) == 1:
infilename = args[0]
infile = open(infilename, 'rb')
else:
parser.error("more than one input file")
outfile = sys.stdout
if sys.platform == "win32":
import msvcrt, os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
if options.read_png:
png = Reader(file=infile)
width,height,pixels,meta = png.asDirect()
write_pnm(outfile, width, height, pixels, meta)
else:
format, width, height, depth, maxval = \
read_pnm_header(infile, (b'P5',b'P6',b'P7'))
greyscale = depth <= 2
pamalpha = depth in (2,4)
supported = [2**x-1 for x in range(1,17)]
try:
mi = supported.index(maxval)
except ValueError:
raise NotImplementedError(
'your maxval (%s) not in supported list %s' %
(maxval, str(supported)))
bitdepth = mi+1
writer = Writer(width, height,
greyscale=greyscale,
bitdepth=bitdepth,
interlace=options.interlace,
transparent=options.transparent,
background=options.background,
alpha=bool(pamalpha or options.alpha),
gamma=options.gamma,
compression=options.compression)
if options.alpha:
pgmfile = open(options.alpha, 'rb')
format, awidth, aheight, adepth, amaxval = \
read_pnm_header(pgmfile, 'P5')
if amaxval != '255':
raise NotImplementedError(
'maxval %s not supported for alpha channel' % amaxval)
if (awidth, aheight) != (width, height):
raise ValueError("alpha channel image size mismatch"
" (%s has %sx%s but %s has %sx%s)"
% (infilename, width, height,
options.alpha, awidth, aheight))
writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
else:
writer.convert_pnm(infile, outfile)
if __name__ == '__main__':
try:
_main(sys.argv)
except Error as e:
print(e, file=sys.stderr)
| true | true |
f7fcc4e17e205afef2697f69efdf88043812acb9 | 2,629 | py | Python | Sawtooth/families/suse/processor/main.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 2 | 2018-11-07T20:52:53.000Z | 2019-10-20T15:57:01.000Z | Sawtooth/families/suse/processor/main.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 3 | 2021-12-14T20:57:54.000Z | 2022-01-21T23:50:36.000Z | Sawtooth/families/suse/processor/main.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 2 | 2018-11-16T04:20:06.000Z | 2019-03-28T23:49:13.000Z | # Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""
main program, process suse family logic.
"""
import sys
import argparse
from sawtooth_sdk.processor.config import get_log_dir #pylint: disable=import-error
from sawtooth_sdk.processor.log import log_configuration #pylint: disable=import-error
from sawtooth_sdk.processor.log import init_console_logging #pylint: disable=import-error
from sawtooth_sdk.processor.core import TransactionProcessor #pylint: disable=import-error
from processor.handler import SuseTransactionHandler
DISTRIBUTION_NAME = 'suserum-suse'
def parse_args(args):
"""
Parse Arguments.
Args:
args (*args): Program Arguments
Returns:
args: list of arguments
Raises:
None
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-C', '--connect',
default='tcp://localhost:4004',
help='Endpoint for the validator connection')
parser.add_argument(
'-v', '--verbose',
action='count',
default=0,
help='Increase output sent to stderr')
return parser.parse_args(args)
def main(args=None):
"""
Main.
Raises:
RunTimeException, connection error
"""
if args is None:
args = sys.argv[1:]
opts = parse_args(args)
processor = None
try:
processor = TransactionProcessor(url=opts.connect)
log_dir = get_log_dir()
#use the transaction processor zmq idenity for filename
log_configuration(
log_dir=log_dir,
name="suse-" + str(processor.zmq_id)[2:-1])
init_console_logging(verbose_level=opts.verbose)
handler = SuseTransactionHandler()
processor.add_handler(handler)
processor.start()
except KeyboardInterrupt:
pass
except RuntimeError as err:
raise Exception("Error: {}".format(err))
finally:
if processor is not None:
processor.stop()
| 27.968085 | 90 | 0.664511 |
import sys
import argparse
from sawtooth_sdk.processor.config import get_log_dir
from sawtooth_sdk.processor.log import log_configuration
from sawtooth_sdk.processor.log import init_console_logging
from sawtooth_sdk.processor.core import TransactionProcessor
from processor.handler import SuseTransactionHandler
DISTRIBUTION_NAME = 'suserum-suse'
def parse_args(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-C', '--connect',
default='tcp://localhost:4004',
help='Endpoint for the validator connection')
parser.add_argument(
'-v', '--verbose',
action='count',
default=0,
help='Increase output sent to stderr')
return parser.parse_args(args)
def main(args=None):
if args is None:
args = sys.argv[1:]
opts = parse_args(args)
processor = None
try:
processor = TransactionProcessor(url=opts.connect)
log_dir = get_log_dir()
log_configuration(
log_dir=log_dir,
name="suse-" + str(processor.zmq_id)[2:-1])
init_console_logging(verbose_level=opts.verbose)
handler = SuseTransactionHandler()
processor.add_handler(handler)
processor.start()
except KeyboardInterrupt:
pass
except RuntimeError as err:
raise Exception("Error: {}".format(err))
finally:
if processor is not None:
processor.stop()
| true | true |
f7fcc69b926d09654446678e8d99284285e79b1e | 6,329 | py | Python | salt/modules/zenoss.py | mseidl/salt-1 | fbf31295d1544565b7b4199b55fa8c550515fd68 | [
"Apache-2.0"
] | 2 | 2018-11-08T02:59:24.000Z | 2021-01-04T00:30:50.000Z | salt/modules/zenoss.py | mseidl/salt-1 | fbf31295d1544565b7b4199b55fa8c550515fd68 | [
"Apache-2.0"
] | 4 | 2020-09-04T10:19:34.000Z | 2020-11-09T12:55:59.000Z | salt/modules/zenoss.py | mseidl/salt-1 | fbf31295d1544565b7b4199b55fa8c550515fd68 | [
"Apache-2.0"
] | 5 | 2017-06-16T23:48:13.000Z | 2021-04-08T17:43:48.000Z | # -*- coding: utf-8 -*-
'''
Module for working with the Zenoss API
.. versionadded:: 2016.3.0
:depends: requests
:configuration: This module requires a 'zenoss' entry in the master/minion config.
For example:
.. code-block:: yaml
zenoss:
hostname: https://zenoss.example.com
username: admin
password: admin123
verify_ssl: True
ca_bundle: /etc/ssl/certs/ca-certificates.crt
'''
from __future__ import absolute_import, print_function, unicode_literals
import re
import logging
try:
import requests # pylint: disable=unused-import
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
import salt.utils.json
import salt.utils.http
# Disable INFO level logs from requests/urllib3
urllib3_logger = logging.getLogger('urllib3')
urllib3_logger.setLevel(logging.WARNING)
log = logging.getLogger(__name__)
__virtualname__ = 'zenoss'
def __virtual__():
'''
Only load if requests is installed
'''
if HAS_LIBS:
return __virtualname__
else:
return False, 'The \'{0}\' module could not be loaded: ' \
'\'requests\' is not installed.'.format(__virtualname__)
ROUTERS = {'MessagingRouter': 'messaging',
'EventsRouter': 'evconsole',
'ProcessRouter': 'process',
'ServiceRouter': 'service',
'DeviceRouter': 'device',
'NetworkRouter': 'network',
'TemplateRouter': 'template',
'DetailNavRouter': 'detailnav',
'ReportRouter': 'report',
'MibRouter': 'mib',
'ZenPackRouter': 'zenpack'}
def _session():
'''
Create a session to be used when connecting to Zenoss.
'''
config = __salt__['config.option']('zenoss')
return salt.utils.http.session(user=config.get("username"),
password=config.get("password"),
verify_ssl=config.get("verify_ssl", True),
ca_bundle=config.get("ca_bundle"),
headers={"Content-type": "application/json; charset=utf-8"})
def _router_request(router, method, data=None):
'''
Make a request to the Zenoss API router
'''
if router not in ROUTERS:
return False
req_data = salt.utils.json.dumps([dict(
action=router,
method=method,
data=data,
type='rpc',
tid=1)])
config = __salt__['config.option']('zenoss')
log.debug('Making request to router %s with method %s', router, method)
url = '{0}/zport/dmd/{1}_router'.format(config.get('hostname'), ROUTERS[router])
response = _session().post(url, data=req_data)
# The API returns a 200 response code even whe auth is bad.
# With bad auth, the login page is displayed. Here I search for
# an element on the login form to determine if auth failed.
if re.search('name="__ac_name"', response.content):
log.error('Request failed. Bad username/password.')
raise Exception('Request failed. Bad username/password.')
return salt.utils.json.loads(response.content).get('result', None)
def _determine_device_class():
'''
If no device class is given when adding a device, this helps determine
'''
if __salt__['grains.get']('kernel') == 'Linux':
return '/Server/Linux'
def find_device(device=None):
'''
Find a device in Zenoss. If device not found, returns None.
Parameters:
device: (Optional) Will use the grain 'fqdn' by default
CLI Example:
salt '*' zenoss.find_device
'''
data = [{'uid': '/zport/dmd/Devices', 'params': {}, 'limit': None}]
all_devices = _router_request('DeviceRouter', 'getDevices', data=data)
for dev in all_devices['devices']:
if dev['name'] == device:
# We need to save the has for later operations
dev['hash'] = all_devices['hash']
log.info('Found device %s in Zenoss', device)
return dev
log.info('Unable to find device %s in Zenoss', device)
return None
def device_exists(device=None):
'''
Check to see if a device already exists in Zenoss.
Parameters:
device: (Optional) Will use the grain 'fqdn' by default
CLI Example:
salt '*' zenoss.device_exists
'''
if not device:
device = __salt__['grains.get']('fqdn')
if find_device(device):
return True
return False
def add_device(device=None, device_class=None, collector='localhost', prod_state=1000):
'''
A function to connect to a zenoss server and add a new device entry.
Parameters:
device: (Optional) Will use the grain 'fqdn' by default.
device_class: (Optional) The device class to use. If none, will determine based on kernel grain.
collector: (Optional) The collector to use for this device. Defaults to 'localhost'.
prod_state: (Optional) The prodState to set on the device. If none, defaults to 1000 ( production )
CLI Example:
salt '*' zenoss.add_device
'''
if not device:
device = __salt__['grains.get']('fqdn')
if not device_class:
device_class = _determine_device_class()
log.info('Adding device %s to zenoss', device)
data = dict(deviceName=device, deviceClass=device_class, model=True, collector=collector, productionState=prod_state)
response = _router_request('DeviceRouter', 'addDevice', data=[data])
return response
def set_prod_state(prod_state, device=None):
'''
A function to set the prod_state in zenoss.
Parameters:
prod_state: (Required) Integer value of the state
device: (Optional) Will use the grain 'fqdn' by default.
CLI Example:
salt zenoss.set_prod_state 1000 hostname
'''
if not device:
device = __salt__['grains.get']('fqdn')
device_object = find_device(device)
if not device_object:
return "Unable to find a device in Zenoss for {0}".format(device)
log.info('Setting prodState to %d on %s device', prod_state, device)
data = dict(uids=[device_object['uid']], prodState=prod_state, hashcheck=device_object['hash'])
return _router_request('DeviceRouter', 'setProductionState', [data])
| 29.995261 | 121 | 0.630905 |
from __future__ import absolute_import, print_function, unicode_literals
import re
import logging
try:
import requests
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
import salt.utils.json
import salt.utils.http
urllib3_logger = logging.getLogger('urllib3')
urllib3_logger.setLevel(logging.WARNING)
log = logging.getLogger(__name__)
__virtualname__ = 'zenoss'
def __virtual__():
if HAS_LIBS:
return __virtualname__
else:
return False, 'The \'{0}\' module could not be loaded: ' \
'\'requests\' is not installed.'.format(__virtualname__)
ROUTERS = {'MessagingRouter': 'messaging',
'EventsRouter': 'evconsole',
'ProcessRouter': 'process',
'ServiceRouter': 'service',
'DeviceRouter': 'device',
'NetworkRouter': 'network',
'TemplateRouter': 'template',
'DetailNavRouter': 'detailnav',
'ReportRouter': 'report',
'MibRouter': 'mib',
'ZenPackRouter': 'zenpack'}
def _session():
config = __salt__['config.option']('zenoss')
return salt.utils.http.session(user=config.get("username"),
password=config.get("password"),
verify_ssl=config.get("verify_ssl", True),
ca_bundle=config.get("ca_bundle"),
headers={"Content-type": "application/json; charset=utf-8"})
def _router_request(router, method, data=None):
if router not in ROUTERS:
return False
req_data = salt.utils.json.dumps([dict(
action=router,
method=method,
data=data,
type='rpc',
tid=1)])
config = __salt__['config.option']('zenoss')
log.debug('Making request to router %s with method %s', router, method)
url = '{0}/zport/dmd/{1}_router'.format(config.get('hostname'), ROUTERS[router])
response = _session().post(url, data=req_data)
if re.search('name="__ac_name"', response.content):
log.error('Request failed. Bad username/password.')
raise Exception('Request failed. Bad username/password.')
return salt.utils.json.loads(response.content).get('result', None)
def _determine_device_class():
if __salt__['grains.get']('kernel') == 'Linux':
return '/Server/Linux'
def find_device(device=None):
data = [{'uid': '/zport/dmd/Devices', 'params': {}, 'limit': None}]
all_devices = _router_request('DeviceRouter', 'getDevices', data=data)
for dev in all_devices['devices']:
if dev['name'] == device:
dev['hash'] = all_devices['hash']
log.info('Found device %s in Zenoss', device)
return dev
log.info('Unable to find device %s in Zenoss', device)
return None
def device_exists(device=None):
if not device:
device = __salt__['grains.get']('fqdn')
if find_device(device):
return True
return False
def add_device(device=None, device_class=None, collector='localhost', prod_state=1000):
if not device:
device = __salt__['grains.get']('fqdn')
if not device_class:
device_class = _determine_device_class()
log.info('Adding device %s to zenoss', device)
data = dict(deviceName=device, deviceClass=device_class, model=True, collector=collector, productionState=prod_state)
response = _router_request('DeviceRouter', 'addDevice', data=[data])
return response
def set_prod_state(prod_state, device=None):
if not device:
device = __salt__['grains.get']('fqdn')
device_object = find_device(device)
if not device_object:
return "Unable to find a device in Zenoss for {0}".format(device)
log.info('Setting prodState to %d on %s device', prod_state, device)
data = dict(uids=[device_object['uid']], prodState=prod_state, hashcheck=device_object['hash'])
return _router_request('DeviceRouter', 'setProductionState', [data])
| true | true |
f7fcc69d89c322c1fef976c8e3607520cb7d8d3f | 445 | py | Python | listening-to-the-lawn/replacelinks.py | victor-shepardson/victor-shepardson.github.io | e8eb21fd6b48efbf101db4cfd008a5832ceae743 | [
"MIT"
] | null | null | null | listening-to-the-lawn/replacelinks.py | victor-shepardson/victor-shepardson.github.io | e8eb21fd6b48efbf101db4cfd008a5832ceae743 | [
"MIT"
] | null | null | null | listening-to-the-lawn/replacelinks.py | victor-shepardson/victor-shepardson.github.io | e8eb21fd6b48efbf101db4cfd008a5832ceae743 | [
"MIT"
] | null | null | null | import sys, string
# remove links from svg hackily
def gen(fname):
lines = [ line.strip() for line in open(fname, 'r').readlines()]
outname = fname+'.replaced'
outfile = open(outname, 'w')
cur_id='lol'
for line in lines:
line0 = string.replace(line, '<a', '<g')
line1 = string.replace(line0, '</a', '</g')
outfile.write(line1+'\n')
if __name__ == "__main__":
fnames = sys.argv[1:]
print fnames
for fname in fnames:
gen(fname) | 23.421053 | 65 | 0.649438 | import sys, string
def gen(fname):
lines = [ line.strip() for line in open(fname, 'r').readlines()]
outname = fname+'.replaced'
outfile = open(outname, 'w')
cur_id='lol'
for line in lines:
line0 = string.replace(line, '<a', '<g')
line1 = string.replace(line0, '</a', '</g')
outfile.write(line1+'\n')
if __name__ == "__main__":
fnames = sys.argv[1:]
print fnames
for fname in fnames:
gen(fname) | false | true |
f7fcc7135bb2a90da76c1ee56a062852cd818cb4 | 2,188 | py | Python | task4.py | vknayak/Imdb-Scraping | 59f363decb7e586de75d4d1934ecd250098afd6c | [
"MIT"
] | null | null | null | task4.py | vknayak/Imdb-Scraping | 59f363decb7e586de75d4d1934ecd250098afd6c | [
"MIT"
] | null | null | null | task4.py | vknayak/Imdb-Scraping | 59f363decb7e586de75d4d1934ecd250098afd6c | [
"MIT"
] | null | null | null | import pprint,os,json,requests,string
from bs4 import BeautifulSoup
with open("position_wise_movies.json","r+") as naik:
python_data=json.load(naik)
def scrape_movie_details(movie_name):
movie_details={"name":"","director":[],"country":"","language":[],"poster_image_url":"","bio":"","runtime":"","genre":[]}
for movie_url in python_data:
if movie_url['name']==movie_name:
url=movie_url['url']
break
response=requests.get(url)
soup=BeautifulSoup(response.text,"html.parser")
movie_name=soup.find('div',class_="title_wrapper").h1.text
name=""
for char in movie_name:
if char!="(":
name+=char
else:
movie_details['name']=name.strip()
break
directors=soup.find('div',class_="credit_summary_item")
directors_list=[]
direct=directors.findAll('a')
languages_list=[]
for j in direct:
directors_list.append(j.text)
movie_details['director']=directors_list
for_country=soup.findAll("div",class_="txt-block")
for i in for_country:
if "Country" in i .text:
country=i.find('a').text
elif "Language" in i.text:
languages=i.findAll('a')
for lang in languages:
languages_list.append(lang.text)
movie_details['country']=country
movie_details['language']=languages_list
poster=soup.find("div",class_="poster").a["href"]
link="https://www.imdb.com"+poster
movie_details['poster_image_url']=link
bio=soup.find('div',class_="summary_text").text
movie_details['bio']=bio.strip()
runtime=soup.find("time").text
runtime_list=(runtime.strip())
exact_runtime=int(runtime_list[0])*60
run=runtime_list.split()
if len(run)>=2:
if (runtime_list[3] in string.digits) and (runtime_list[4] in string.digits):
exact_runtime+=int(runtime_list[3:5])
elif (runtime_list[3] in string.digits):
exact_runtime+=int(runtime_list[3])
movie_details['runtime']=exact_runtime
genre_list=soup.findAll("div",class_="see-more inline canwrap")
exact_genre_list=[]
for i in genre_list:
if "Genres" in i.text:
genre=i.findAll('a')
for each_genre in genre:
exact_genre_list.append(each_genre.text)
movie_details['genre']=exact_genre_list
return movie_details
# movie_name=input("enter movie name")
# print(scrape_movie_details(movie_name)) | 32.176471 | 122 | 0.731261 | import pprint,os,json,requests,string
from bs4 import BeautifulSoup
with open("position_wise_movies.json","r+") as naik:
python_data=json.load(naik)
def scrape_movie_details(movie_name):
movie_details={"name":"","director":[],"country":"","language":[],"poster_image_url":"","bio":"","runtime":"","genre":[]}
for movie_url in python_data:
if movie_url['name']==movie_name:
url=movie_url['url']
break
response=requests.get(url)
soup=BeautifulSoup(response.text,"html.parser")
movie_name=soup.find('div',class_="title_wrapper").h1.text
name=""
for char in movie_name:
if char!="(":
name+=char
else:
movie_details['name']=name.strip()
break
directors=soup.find('div',class_="credit_summary_item")
directors_list=[]
direct=directors.findAll('a')
languages_list=[]
for j in direct:
directors_list.append(j.text)
movie_details['director']=directors_list
for_country=soup.findAll("div",class_="txt-block")
for i in for_country:
if "Country" in i .text:
country=i.find('a').text
elif "Language" in i.text:
languages=i.findAll('a')
for lang in languages:
languages_list.append(lang.text)
movie_details['country']=country
movie_details['language']=languages_list
poster=soup.find("div",class_="poster").a["href"]
link="https://www.imdb.com"+poster
movie_details['poster_image_url']=link
bio=soup.find('div',class_="summary_text").text
movie_details['bio']=bio.strip()
runtime=soup.find("time").text
runtime_list=(runtime.strip())
exact_runtime=int(runtime_list[0])*60
run=runtime_list.split()
if len(run)>=2:
if (runtime_list[3] in string.digits) and (runtime_list[4] in string.digits):
exact_runtime+=int(runtime_list[3:5])
elif (runtime_list[3] in string.digits):
exact_runtime+=int(runtime_list[3])
movie_details['runtime']=exact_runtime
genre_list=soup.findAll("div",class_="see-more inline canwrap")
exact_genre_list=[]
for i in genre_list:
if "Genres" in i.text:
genre=i.findAll('a')
for each_genre in genre:
exact_genre_list.append(each_genre.text)
movie_details['genre']=exact_genre_list
return movie_details
| true | true |
f7fcc8ec873d0e355d4f5eda801ae6697426ef74 | 573 | py | Python | Where my anagrams at.py | FairfieldBW/Code-Wars | 62eb36ade3fdce7b95943bbbeb9ce07e78b561ed | [
"MIT"
] | null | null | null | Where my anagrams at.py | FairfieldBW/Code-Wars | 62eb36ade3fdce7b95943bbbeb9ce07e78b561ed | [
"MIT"
] | null | null | null | Where my anagrams at.py | FairfieldBW/Code-Wars | 62eb36ade3fdce7b95943bbbeb9ce07e78b561ed | [
"MIT"
] | null | null | null | '''
Write a function that will find all the anagrams of a word from a list. You will be given two inputs a word and an array with words. You should return an array of all the anagrams or an empty array if there are none.
'''
def anagrams(word, words):
word = list(word)
word.sort()
anagrams = []
for listWord in words:
listListWord = list(listWord.replace(" ", ""))
listListWord.sort()
print(listListWord)
if listListWord == word:
anagrams.append(listWord.replace(" ", ""))
return anagrams
print(anagrams('abba', ['a abb', 'abcd', 'bbaa', 'dada'])) | 28.65 | 217 | 0.685864 |
def anagrams(word, words):
word = list(word)
word.sort()
anagrams = []
for listWord in words:
listListWord = list(listWord.replace(" ", ""))
listListWord.sort()
print(listListWord)
if listListWord == word:
anagrams.append(listWord.replace(" ", ""))
return anagrams
print(anagrams('abba', ['a abb', 'abcd', 'bbaa', 'dada'])) | true | true |
f7fcc909d1166175f9a7c254b39953d051f457b6 | 1,652 | py | Python | gateController/consumers.py | Bicskow/rpiGateOpener | 0a3c9a98e18911d4ba323fdb9ede07d2621a7100 | [
"MIT"
] | null | null | null | gateController/consumers.py | Bicskow/rpiGateOpener | 0a3c9a98e18911d4ba323fdb9ede07d2621a7100 | [
"MIT"
] | null | null | null | gateController/consumers.py | Bicskow/rpiGateOpener | 0a3c9a98e18911d4ba323fdb9ede07d2621a7100 | [
"MIT"
] | null | null | null | import json
from gpiozero import LED
from time import sleep
from channels.generic.websocket import WebsocketConsumer
from .mailSender import MailSender
class GateTriggerConsumer(WebsocketConsumer):
def connect(self):
sleep(0.5)
self.accept()
def disconnect(self, close_code):
pass
def receive(self, text_data):
text_data_json = json.loads(text_data)
command = text_data_json['command']
self.triggerGate(command)
def triggerGate(self, command):
try:
if command == "pedestrian_access":
led = LED(2, active_high=False)
led.on()
sleep(0.2)
led.off()
led.close()
self.send(text_data=json.dumps({'result': 'gate_triggered'}))
MailSender.sendMail("Pedestrian access triggered")
return
elif command == "vehicle_access":
led = LED(3, active_high=False)
led.on()
sleep(0.2)
led.off()
led.close()
self.send(text_data=json.dumps({'result': 'gate_triggered'}))
MailSender.sendMail("Vehicle access triggered")
return
self.send(text_data=json.dumps({'result': 'bad_command'}))
MailSender.sendMail("Gate trigger failed with bad command")
except Exception as e:
self.send(text_data=json.dumps({'result': 'internal_server_error'}))
MailSender.sendMail(f"Gate trigger failed with exception: {e}")
| 35.913043 | 81 | 0.554479 | import json
from gpiozero import LED
from time import sleep
from channels.generic.websocket import WebsocketConsumer
from .mailSender import MailSender
class GateTriggerConsumer(WebsocketConsumer):
def connect(self):
sleep(0.5)
self.accept()
def disconnect(self, close_code):
pass
def receive(self, text_data):
text_data_json = json.loads(text_data)
command = text_data_json['command']
self.triggerGate(command)
def triggerGate(self, command):
try:
if command == "pedestrian_access":
led = LED(2, active_high=False)
led.on()
sleep(0.2)
led.off()
led.close()
self.send(text_data=json.dumps({'result': 'gate_triggered'}))
MailSender.sendMail("Pedestrian access triggered")
return
elif command == "vehicle_access":
led = LED(3, active_high=False)
led.on()
sleep(0.2)
led.off()
led.close()
self.send(text_data=json.dumps({'result': 'gate_triggered'}))
MailSender.sendMail("Vehicle access triggered")
return
self.send(text_data=json.dumps({'result': 'bad_command'}))
MailSender.sendMail("Gate trigger failed with bad command")
except Exception as e:
self.send(text_data=json.dumps({'result': 'internal_server_error'}))
MailSender.sendMail(f"Gate trigger failed with exception: {e}")
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.