hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6982eb014cddfb256c18bfdc6bcd8c2bd2f4ff0d | 284 | py | Python | tests/test_init.py | toaomatis/greenchoice-variable-tariffs | 4dad9374a84f13a68d867e84944c577e7104ca8b | [
"MIT"
] | 2 | 2021-12-30T16:41:29.000Z | 2022-01-03T13:04:44.000Z | tests/test_init.py | toaomatis/greenchoice-variable-tariffs | 4dad9374a84f13a68d867e84944c577e7104ca8b | [
"MIT"
] | 5 | 2021-12-29T14:07:28.000Z | 2022-01-02T13:34:39.000Z | tests/test_init.py | toaomatis/greenchoice-variable-tariffs | 4dad9374a84f13a68d867e84944c577e7104ca8b | [
"MIT"
] | null | null | null | """Test component setup."""
from homeassistant.core import HomeAssistant
from custom_components.greenchoice_variable_tariffs import async_setup
async def test_async_setup(hass: HomeAssistant):
"""Test the component gets setup."""
assert await async_setup(hass, {}) is True
| 28.4 | 70 | 0.78169 |
f4e36c2dabcbb012720ba1acaccc21ad439344aa | 71 | py | Python | src/1sem/pow.py | freepvps/hsesamples | adbf35c1c94521d78fb75f72287512a37e49bdc8 | [
"MIT"
] | 2 | 2019-10-19T22:29:50.000Z | 2019-10-19T22:29:52.000Z | src/1sem/pow.py | freepvps/hsesamples | adbf35c1c94521d78fb75f72287512a37e49bdc8 | [
"MIT"
] | null | null | null | src/1sem/pow.py | freepvps/hsesamples | adbf35c1c94521d78fb75f72287512a37e49bdc8 | [
"MIT"
] | null | null | null | a = float(input('value: '))
b = float(input('power: '))
print(a ** b)
| 14.2 | 27 | 0.549296 |
c993f152165c570f8cb0a4e6d87e27fbb10f2748 | 1,608 | py | Python | uvicorn/protocols/utils.py | victoraugustolls/uvicorn | 9cd575daca0d0d3d4e2208b2934eee809fc3299c | [
"BSD-3-Clause"
] | 2 | 2020-02-20T15:15:59.000Z | 2021-03-28T02:47:08.000Z | uvicorn/protocols/utils.py | victoraugustolls/uvicorn | 9cd575daca0d0d3d4e2208b2934eee809fc3299c | [
"BSD-3-Clause"
] | 9 | 2021-04-12T13:44:34.000Z | 2021-04-13T16:50:08.000Z | env/lib/python3.9/site-packages/uvicorn/protocols/utils.py | simotwo/AbileneParadox-ddd | c85961efb37aba43c0d99ed1c36d083507e2b2d3 | [
"MIT"
] | 1 | 2020-12-29T04:28:49.000Z | 2020-12-29T04:28:49.000Z | import urllib
def get_remote_addr(transport):
socket_info = transport.get_extra_info("socket")
if socket_info is not None:
try:
info = socket_info.getpeername()
return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None
except OSError:
# This case appears to inconsistently occur with uvloop
# bound to a unix domain socket.
return None
info = transport.get_extra_info("peername")
if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
return (str(info[0]), int(info[1]))
return None
def get_local_addr(transport):
socket_info = transport.get_extra_info("socket")
if socket_info is not None:
info = socket_info.getsockname()
return (str(info[0]), int(info[1])) if isinstance(info, tuple) else None
info = transport.get_extra_info("sockname")
if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
return (str(info[0]), int(info[1]))
return None
def is_ssl(transport):
return bool(transport.get_extra_info("sslcontext"))
def get_client_addr(scope):
client = scope.get("client")
if not client:
return ""
return "%s:%d" % client
def get_path_with_query_string(scope):
path_with_query_string = urllib.parse.quote(
scope.get("root_path", "") + scope["path"]
)
if scope["query_string"]:
path_with_query_string = "{}?{}".format(
path_with_query_string, scope["query_string"].decode("ascii")
)
return path_with_query_string
| 30.339623 | 84 | 0.643657 |
83e966da7265badb242e64dcbf1e374e961ebd20 | 5,369 | py | Python | napari/utils/theme.py | 10XGenomics/napari | 2c7ce0ecbf66185b6984b1b36ebaa38672ab450b | [
"BSD-3-Clause"
] | null | null | null | napari/utils/theme.py | 10XGenomics/napari | 2c7ce0ecbf66185b6984b1b36ebaa38672ab450b | [
"BSD-3-Clause"
] | null | null | null | napari/utils/theme.py | 10XGenomics/napari | 2c7ce0ecbf66185b6984b1b36ebaa38672ab450b | [
"BSD-3-Clause"
] | null | null | null | # syntax_style for the console must be one of the supported styles from
# pygments - see here for examples https://help.farbox.com/pygments.html
import re
import warnings
from ast import literal_eval
try:
from qtpy import QT_VERSION
major, minor, *rest = QT_VERSION.split('.')
use_gradients = (int(major) >= 5) and (int(minor) >= 12)
except Exception:
use_gradients = False
def __getattr__(attr):
if attr == "palettes":
warnings.warn(
"palette is deprecated and will be removed after version 0.4.6."
" Please use get_theme and register_theme instead",
category=DeprecationWarning,
stacklevel=2,
)
return _themes
raise AttributeError
_themes = {
'dark': {
'folder': 'dark',
'background': 'rgb(38, 41, 48)',
'foreground': 'rgb(65, 72, 81)',
'primary': 'rgb(90, 98, 108)',
'secondary': 'rgb(134, 142, 147)',
'highlight': 'rgb(106, 115, 128)',
'text': 'rgb(240, 241, 242)',
'icon': 'rgb(209, 210, 212)',
'warning': 'rgb(153, 18, 31)',
'current': 'rgb(0, 122, 204)',
'syntax_style': 'native',
'console': 'rgb(0, 0, 0)',
'canvas': 'black',
},
'light': {
'folder': 'light',
'background': 'rgb(239, 235, 233)',
'foreground': 'rgb(214, 208, 206)',
'primary': 'rgb(188, 184, 181)',
'secondary': 'rgb(150, 146, 144)',
'highlight': 'rgb(163, 158, 156)',
'text': 'rgb(59, 58, 57)',
'icon': 'rgb(107, 105, 103)',
'warning': 'rgb(255, 18, 31)',
'current': 'rgb(253, 240, 148)',
'syntax_style': 'default',
'console': 'rgb(255, 255, 255)',
'canvas': 'white',
},
}
gradient_pattern = re.compile(r'([vh])gradient\((.+)\)')
darken_pattern = re.compile(r'{{\s?darken\((\w+),?\s?([-\d]+)?\)\s?}}')
lighten_pattern = re.compile(r'{{\s?lighten\((\w+),?\s?([-\d]+)?\)\s?}}')
opacity_pattern = re.compile(r'{{\s?opacity\((\w+),?\s?([-\d]+)?\)\s?}}')
def darken(color: str, percentage=10):
if color.startswith('rgb('):
color = literal_eval(color.lstrip('rgb(').rstrip(')'))
ratio = 1 - float(percentage) / 100
red, green, blue = color
red = min(max(int(red * ratio), 0), 255)
green = min(max(int(green * ratio), 0), 255)
blue = min(max(int(blue * ratio), 0), 255)
return f'rgb({red}, {green}, {blue})'
def lighten(color: str, percentage=10):
if color.startswith('rgb('):
color = literal_eval(color.lstrip('rgb(').rstrip(')'))
ratio = float(percentage) / 100
red, green, blue = color
red = min(max(int(red + (255 - red) * ratio), 0), 255)
green = min(max(int(green + (255 - green) * ratio), 0), 255)
blue = min(max(int(blue + (255 - blue) * ratio), 0), 255)
return f'rgb({red}, {green}, {blue})'
def opacity(color: str, value=255):
if color.startswith('rgb('):
color = literal_eval(color.lstrip('rgb(').rstrip(')'))
red, green, blue = color
return f'rgba({red}, {green}, {blue}, {max(min(int(value), 255), 0)})'
def gradient(stops, horizontal=True):
if not use_gradients:
return stops[-1]
if horizontal:
grad = 'qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, '
else:
grad = 'qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, '
_stops = [f'stop: {n} {stop}' for n, stop in enumerate(stops)]
grad += ", ".join(_stops) + ")"
return grad
def template(css, **theme):
def darken_match(matchobj):
color, percentage = matchobj.groups()
return darken(theme[color], percentage)
def lighten_match(matchobj):
color, percentage = matchobj.groups()
return lighten(theme[color], percentage)
def opacity_match(matchobj):
color, percentage = matchobj.groups()
return opacity(theme[color], percentage)
def gradient_match(matchobj):
horizontal = matchobj.groups()[1] == 'h'
stops = [i.strip() for i in matchobj.groups()[1].split('-')]
return gradient(stops, horizontal)
for k, v in theme.items():
css = gradient_pattern.sub(gradient_match, css)
css = darken_pattern.sub(darken_match, css)
css = lighten_pattern.sub(lighten_match, css)
css = opacity_pattern.sub(opacity_match, css)
css = css.replace('{{ %s }}' % k, v)
return css
def get_theme(name):
"""Get a theme based on its name
Parameters
----------
name : str
Name of requested theme.
Returns:
--------
theme: dict of str: str
Theme mapping elements to colors. A copy is created
so that manipulating this theme can be done without
side effects.
"""
if name in _themes:
theme = _themes[name]
return theme.copy()
else:
raise ValueError(
f"Unrecognized theme {name}. Availabe themes are {available_themes()}"
)
def register_theme(name, theme):
"""Get a theme based on its name
Parameters
----------
name : str
Name of requested theme.
theme: dict of str: str
Theme mapping elements to colors.
"""
_themes[name] = theme
def available_themes():
"""List available themes
Returns:
--------
list of str
Names of available themes.
"""
return list(_themes)
| 29.179348 | 82 | 0.568449 |
8e97a27ed5493db015de8ae532e72ba660d6140a | 1,589 | py | Python | src3/binancedex/cli.py | unification-com/binancedex-leaderboard | 1e06857093cef8fa27784bca8e28a356dda27db7 | [
"MIT"
] | null | null | null | src3/binancedex/cli.py | unification-com/binancedex-leaderboard | 1e06857093cef8fa27784bca8e28a356dda27db7 | [
"MIT"
] | null | null | null | src3/binancedex/cli.py | unification-com/binancedex-leaderboard | 1e06857093cef8fa27784bca8e28a356dda27db7 | [
"MIT"
] | 1 | 2019-07-02T09:47:00.000Z | 2019-07-02T09:47:00.000Z | import logging
import os
from time import sleep, time
import click
from binancedex.api import get_trades
from binancedex.models import Session, Trade
from binancedex.stats import BASE_ASSET, render
log = logging.getLogger(__name__)
@click.group()
def main():
logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
@main.command()
def report():
"""
Generate a static report
"""
render()
@main.command()
def fetch_all_trades():
"""
Exhaustively fetches all the trades
"""
symbol = f'{BASE_ASSET}_BNB'
now = int(time() * 1000)
items = 1000
offset = 0
while items == 1000:
fetch_trades = get_trades(symbol, now, offset=offset)
for trade in fetch_trades:
exists = Session.query(Trade).filter_by(
trade_id=trade['tradeId']).first()
if not exists:
trade_obj = Trade.from_trade(trade)
Session.add(trade_obj)
Session.commit()
items = len(fetch_trades)
offset = offset + items
sleep(1)
@main.command()
def fetch_latest_trades():
"""
This one is to be wired into a cron job
:return:
"""
symbol = f'{BASE_ASSET}_BNB'
now = int(time() * 1000)
fetch_trades = get_trades(symbol, now)
for trade in fetch_trades:
exists = Session.query(Trade).filter_by(
trade_id=trade['tradeId']).first()
if not exists:
trade_obj = Trade.from_trade(trade)
Session.add(trade_obj)
Session.commit()
if __name__ == "__main__":
main()
| 20.636364 | 66 | 0.614852 |
d44d4328cabfed2868ee2cd07502d3bbcd0c1b5e | 7,354 | py | Python | bikesanity/output_transformers/pdf_templates/base_pdf_template.py | JohnHenrySplitMyHeart/bikesanity | bf27f162017fdc919534e16ac12b940e1b873e93 | [
"Apache-2.0"
] | 4 | 2021-01-22T14:13:25.000Z | 2021-05-04T16:59:35.000Z | bikesanity/output_transformers/pdf_templates/base_pdf_template.py | JohnHenrySplitMyHeart/bikesanity | bf27f162017fdc919534e16ac12b940e1b873e93 | [
"Apache-2.0"
] | null | null | null | bikesanity/output_transformers/pdf_templates/base_pdf_template.py | JohnHenrySplitMyHeart/bikesanity | bf27f162017fdc919534e16ac12b940e1b873e93 | [
"Apache-2.0"
] | null | null | null | import os
import shutil
from fpdf import FPDF
from bikesanity.io_utils.resources import create_temp_from_resource
class BasePdfTemplate(FPDF):
DEJAVU_FONT = 'DejaVu'
A4_WIDTH = 210
A4_HEIGHT = 297
MARGIN = 20
TOP_MARGIN = 10
PAGE_WIDTH = A4_WIDTH - (MARGIN*2)
IMAGE_DPI = 200
MM_PER_INCH = 25.4
CAPTION_WIDTH = 150
def __init__(self, title, author, part=None):
self.draw_header = False
self.journal_title = title
self.author = author
self.part = part
super().__init__(orientation='P', unit='mm', format='A4')
self.tmp_files = []
self.load_font_resource('DejaVuSans.ttf', '')
self.load_font_resource('DejaVuSans-Bold.ttf', 'B')
self.load_font_resource('DejaVuSans-Oblique.ttf', 'I')
self.page_title = title
self.setup_pages()
self.image_pair = False
self.page_added = True
self.image_path = None
def load_font_resource(self, font_name, weight):
# Get a temporary file from the named resource
temp_font_file = create_temp_from_resource(['fonts', font_name])
# Add the font from this temporary file (only method FPDF supports)
self.add_font(self.DEJAVU_FONT, weight, temp_font_file, uni=True)
# Remove the temp file once its loaded
self.tmp_files.append(temp_font_file)
def setup_pages(self):
self.set_font(self.DEJAVU_FONT, '', 14)
self.set_margins(self.MARGIN, self.TOP_MARGIN, self.MARGIN)
self.add_page()
def update_page_title(self, name):
self.draw_header = False
self.page_title = name
def limit_title(self, title, max_width=PAGE_WIDTH):
terms = title.split(' ')
terms_to_use = []
for i in range(0, len(terms)):
terms_to_use.append(terms[i])
title = ' '.join(terms_to_use)
if self.get_string_width(title) > max_width: break
return title
def header(self):
if not self.draw_header:
self.draw_header = True
return
self.set_font(self.DEJAVU_FONT, 'B', 12)
# Limit title if too long
title = self.limit_title(self.page_title)
# Calculate width of title and position
w = self.get_string_width(title) + 6
self.set_x((210 - w) / 2)
# Title
self.cell(w, 9, title, 0, 1, 'C', 0)
# Line break
self.ln(10)
self.page_top = self.get_y()
def footer(self):
# No footer on first few pages
if self.page_no() < 3: return
# Don't draw footer if content overlaps
if self.get_y() > self.A4_HEIGHT - self.TOP_MARGIN: return
# Position at 1.5 cm from bottom
self.set_y(-15)
self.set_font(self.DEJAVU_FONT, 'I', 8)
# Text color in gray
self.set_text_color(128)
footer_text = '{0} by {1}{2} - Page {3}'.format(self.journal_title, self.author, ' (part {0})'.format(self.part) if self.part else '', self.page_no()-2)
# Page number
self.cell(0, 10, footer_text, 0, 0, 'C')
def cover_title(self, title, subtitle, author, distance_statement, part=None):
self.set_font(self.DEJAVU_FONT, 'B', 20)
self.ln(15)
# Title
self.multi_cell(0, 20, title, 0, 'C', 0)
self.ln(1)
if part:
self.set_font(self.DEJAVU_FONT, '', 20)
self.cell(0, 5, 'Part {0}'.format(part), 0, 0, 'C')
self.ln(6)
# Line break
self.ln(6)
self.set_font(self.DEJAVU_FONT, '', 16)
self.multi_cell(0, 10, subtitle, 0, 'C', 0)
self.ln(4)
self.set_font(self.DEJAVU_FONT, 'I', 10)
self.multi_cell(0, 5, distance_statement, 0, 'C', 0)
self.ln(5)
self.set_font(self.DEJAVU_FONT, '', 16)
self.multi_cell(0, 20, author, 0, 'C', 0)
self.ln(8)
def add_toc(self, toc_items):
self.set_font(self.DEJAVU_FONT, 'B', 18)
self.cell(0, 5, 'Table of Contents', 0, 0, 'C', 0)
self.ln(15)
self.set_font(self.DEJAVU_FONT, 'I', 9)
for toc_item in toc_items:
if toc_item.is_header:
self.set_font(self.DEJAVU_FONT, 'B', 9)
else:
self.set_font(self.DEJAVU_FONT, 'I', 9)
# Limit the title if it's too long
title = self.limit_title(toc_item.title, 125)
str_size = self.get_string_width(title)
self.cell(str_size+2, 9, title)
# Filling dots
page_cell_size=self.get_string_width(toc_item.page_no) + 2
dot_length = self.PAGE_WIDTH - str_size - page_cell_size - 10
nb = int(dot_length // self.get_string_width('.'))
dots = '.' * nb
self.cell(dot_length, 9, dots, 0, 0, 'R')
# Page number
self.cell(page_cell_size, 9, toc_item.page_no, 0, 1,'R')
def section_title(self, title):
self.set_font(self.DEJAVU_FONT, 'B', 18)
self.multi_cell(0, 10, title, 0, 'C', 0)
self.ln(20)
def chapter_title(self, label, date, distance, total_distance):
self.set_font(self.DEJAVU_FONT, 'B', 14)
# Colors of frame, background and text
self.set_draw_color(0, 0, 0)
self.set_fill_color(230, 230, 0)
# Thickness of frame (1 mm)
self.set_line_width(0.5)
# Background color
self.set_fill_color(200, 220, 255)
# Title
self.multi_cell(0, 10, label, 1, 'C', 1)
# Line break
self.ln(4)
if not distance: return
if total_distance and date:
distance_statement = '{0} - of total {1} - on {2}'.format(distance, total_distance, date)
elif total_distance:
distance_statement = '{0} - of total {1}'.format(distance, total_distance)
else:
distance_statement = distance
self.set_font(self.DEJAVU_FONT, 'I', 10)
self.cell(0, 5, distance_statement, 0, 0, 'L', 0)
self.ln(20)
def add_image_format_tolerant(self, image_path, x=None, y=None, width=None, height=None):
for ext in [ None, '.jpeg', '.png']:
if self.try_add_image(image_path, x, y, width, height, ext):
break
def try_add_image(self, image_path, x, y, width, height, ext=None):
updated_ext = image_path[:image_path.rfind('.')] + ext if ext else image_path
if ext and image_path != updated_ext:
shutil.copyfile(image_path, updated_ext)
try:
self.image(updated_ext, x=x, y=y, w=width if width else 0, h=height if height else 0)
return True
except Exception as exc:
return False
def clipping_rect(self, x, y, w, h, outline=False):
op= 'S' if outline else 'n'
self._out('q {0} {1} {2} {3} re W {4}'.format(
x * self.k,
(self.h - y) * self.k,
w * self.k,
-h * self.k,
op
))
def unset_clipping(self):
self._out('Q')
def cleanup_tmp_files(self):
for tmp_file in self.tmp_files:
try:
os.remove(tmp_file)
os.remove(tmp_file + '.pkl')
os.remove(tmp_file + '.cw127.pkl')
except:
pass
| 31.698276 | 160 | 0.575333 |
9152aa1296200d3213ad597ceca10904a367689b | 3,087 | py | Python | tests/integration/cloudformation/test_connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 1 | 2015-10-15T12:55:01.000Z | 2015-10-15T12:55:01.000Z | tests/integration/cloudformation/test_connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 1 | 2021-04-30T21:19:53.000Z | 2021-04-30T21:19:53.000Z | tests/integration/cloudformation/test_connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 1 | 2020-07-25T22:31:28.000Z | 2020-07-25T22:31:28.000Z | #!/usr/bin/env python
import time
import json
from tests.unit import unittest
from boto.cloudformation.connection import CloudFormationConnection
BASIC_EC2_TEMPLATE = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template EC2InstanceSample",
"Parameters": {
},
"Mappings": {
"RegionMap": {
"us-east-1": {
"AMI": "ami-7f418316"
}
}
},
"Resources": {
"Ec2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
"AMI"
]
},
"UserData": {
"Fn::Base64": "a" * 15000
}
}
}
},
"Outputs": {
"InstanceId": {
"Description": "InstanceId of the newly created EC2 instance",
"Value": {
"Ref": "Ec2Instance"
}
},
"AZ": {
"Description": "Availability Zone of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"AvailabilityZone"
]
}
},
"PublicIP": {
"Description": "Public IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicIp"
]
}
},
"PrivateIP": {
"Description": "Private IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateIp"
]
}
},
"PublicDNS": {
"Description": "Public DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicDnsName"
]
}
},
"PrivateDNS": {
"Description": "Private DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateDnsName"
]
}
}
}
}
class TestCloudformationConnection(unittest.TestCase):
def setUp(self):
self.connection = CloudFormationConnection()
self.stack_name = 'testcfnstack' + str(int(time.time()))
def test_large_template_stack_size(self):
# See https://github.com/boto/boto/issues/1037
body = self.connection.create_stack(
self.stack_name,
template_body=json.dumps(BASIC_EC2_TEMPLATE))
self.addCleanup(self.connection.delete_stack, self.stack_name)
if __name__ == '__main__':
unittest.main()
| 27.810811 | 82 | 0.435374 |
516e4fe1ab14e43309bc43c2592ec12e9937772c | 2,347 | py | Python | src/simulator/services/persistent_state/persistent_state_view.py | ed741/PathBench | 50fe138eb1f824f49fe1a862705e435a1c3ec3ae | [
"BSD-3-Clause"
] | 46 | 2020-12-25T04:09:15.000Z | 2022-03-25T12:32:42.000Z | src/simulator/services/persistent_state/persistent_state_view.py | ed741/PathBench | 50fe138eb1f824f49fe1a862705e435a1c3ec3ae | [
"BSD-3-Clause"
] | 36 | 2020-12-21T16:10:02.000Z | 2022-01-03T01:42:01.000Z | src/simulator/services/persistent_state/persistent_state_view.py | judicaelclair/PathBenchURO | 101e67674efdfa8e27e1cf7787dac9fdf99552fe | [
"BSD-3-Clause"
] | 11 | 2021-01-06T23:34:12.000Z | 2022-03-21T17:21:47.000Z | from structures import DynamicColour, Colour
from simulator.services.event_manager.events.colour_update_event import ColourUpdateEvent
from simulator.services.persistent_state.persistent_state_object import PersistentStateObject
from simulator.services.persistent_state.persistent_state import PersistentState
from utility.compatibility import Final
from typing import Dict, Any
class PersistentStateView(PersistentStateObject):
_state: PersistentState
index: int
colours: Dict[str, DynamicColour]
EFFECTIVE_VIEW: Final[int] = -1
def __init__(self, state: PersistentState, index: int) -> None:
super().__init__(state)
self.index = index
self.colours = {}
def __colour_callback(self, colour: DynamicColour) -> None:
if self.index == self._state.views.view_idx:
self._state.views.effective_view.colours[colour.name].set_all(colour.colour, colour.visible)
for s in self._state.all_services:
s.ev_manager.post(ColourUpdateEvent(colour, self))
def _add_colour(self, name: str, colour: Colour, visible: bool) -> DynamicColour:
if name in self.colours:
return self.colours[name]
dc = self.colours[name] = DynamicColour(colour, name, self.__colour_callback, visible)
self.__colour_callback(dc)
return dc
def _from_view(self, other: 'PersistentStateView') -> None:
self.colours = {k: v for k, v in self.colours.items() if k in other.colours}
for n, c in other.colours.items():
if n not in self.colours:
self._add_colour(n, c.colour, c.visible)
else:
self.colours[n].set_all(c.colour, c.visible)
def _from_json(self, data: Dict[str, Any]) -> None:
for n, c in data["colours"].items():
colour = Colour(*c["colour"])
visible = bool(c["visible"])
self.colours[n] = DynamicColour(colour, n, self.__colour_callback, visible)
def _to_json(self) -> Dict[str, Any]:
data = {}
dc = data["colours"] = {}
for n, c in self.colours.items():
dc[n] = {}
dc[n]["colour"] = tuple(c.colour)
dc[n]["visible"] = c.visible
return data
def is_effective(self) -> bool:
return self.index == PersistentStateView.EFFECTIVE_VIEW
| 39.116667 | 104 | 0.654879 |
907ae397f1a294417ce823957536e9d4a7f785cd | 15,025 | py | Python | aries_cloudagent/storage/tests/test_indy_storage.py | msembinelli/aries-cloudagent-python | a5a29dab30238f52dcfb6645aab115d01720a5c7 | [
"Apache-2.0"
] | 1 | 2020-07-02T12:36:32.000Z | 2020-07-02T12:36:32.000Z | aries_cloudagent/storage/tests/test_indy_storage.py | msembinelli/aries-cloudagent-python | a5a29dab30238f52dcfb6645aab115d01720a5c7 | [
"Apache-2.0"
] | 1 | 2020-03-06T12:11:29.000Z | 2020-03-06T12:11:29.000Z | aries_cloudagent/storage/tests/test_indy_storage.py | msembinelli/aries-cloudagent-python | a5a29dab30238f52dcfb6645aab115d01720a5c7 | [
"Apache-2.0"
] | 1 | 2020-08-07T08:03:17.000Z | 2020-08-07T08:03:17.000Z | import json
import pytest
import os
import indy.anoncreds
import indy.crypto
import indy.did
import indy.wallet
from asynctest import mock as async_mock
from aries_cloudagent.wallet import indy as test_wallet
from aries_cloudagent.wallet.indy import IndyWallet
from aries_cloudagent.storage.error import StorageError, StorageSearchError
from aries_cloudagent.storage.indy import IndyStorage
from aries_cloudagent.storage.record import StorageRecord
from .. import indy as test_module
from . import test_basic_storage
@pytest.fixture()
async def store():
key = await IndyWallet.generate_wallet_key()
wallet = IndyWallet(
{
"auto_create": True,
"auto_remove": True,
"name": "test-wallet",
"key": key,
"key_derivation_method": "RAW", # much slower tests with argon-hashed keys
}
)
await wallet.open()
yield IndyStorage(wallet)
await wallet.close()
@pytest.mark.indy
class TestIndyStorage(test_basic_storage.TestBasicStorage):
"""Tests for indy storage."""
@pytest.mark.asyncio
async def test_record(self):
with async_mock.patch.object(
test_wallet, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
fake_wallet = IndyWallet(
{
"auto_create": True,
"auto_remove": True,
"name": "test_pg_wallet",
"key": await IndyWallet.generate_wallet_key(),
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": json.dumps({"url": "dummy"}),
"storage_creds": json.dumps(
{
"account": "postgres",
"password": "mysecretpassword",
"admin_account": "postgres",
"admin_password": "mysecretpassword",
}
),
}
)
await fake_wallet.open()
storage = IndyStorage(fake_wallet)
for record_x in [
None,
StorageRecord(
type="connection",
value=json.dumps(
{
"initiator": "self",
"invitation_key": "9XgL7Y4TBTJyVJdomT6axZGUFg9npxcrXnRT4CG8fWYg",
"state": "invitation",
"routing_state": "none",
"error_msg": None,
"their_label": None,
"created_at": "2019-05-14 21:58:24.143260+00:00",
"updated_at": "2019-05-14 21:58:24.143260+00:00",
}
),
tags={
"initiator": "self",
"invitation_key": "9XgL7Y4TBTJyVJdomT6axZGUFg9npxcrXnRT4CG8fWYg",
"state": "invitation",
"routing_state": "none",
},
id=None,
),
StorageRecord(
type=None,
value=json.dumps(
{
"initiator": "self",
"invitation_key": "9XgL7Y4TBTJyVJdomT6axZGUFg9npxcrXnRT4CG8fWYg",
"state": "invitation",
"routing_state": "none",
"error_msg": None,
"their_label": None,
"created_at": "2019-05-14 21:58:24.143260+00:00",
"updated_at": "2019-05-14 21:58:24.143260+00:00",
}
),
tags={
"initiator": "self",
"invitation_key": "9XgL7Y4TBTJyVJdomT6axZGUFg9npxcrXnRT4CG8fWYg",
"state": "invitation",
"routing_state": "none",
},
id="f96f76ec-0e9b-4f32-8237-f4219e6cf0c7",
),
StorageRecord(
type="connection",
value=None,
tags={
"initiator": "self",
"invitation_key": "9XgL7Y4TBTJyVJdomT6axZGUFg9npxcrXnRT4CG8fWYg",
"state": "invitation",
"routing_state": "none",
},
id="f96f76ec-0e9b-4f32-8237-f4219e6cf0c7",
),
]:
with pytest.raises(StorageError):
await storage.add_record(record_x)
with pytest.raises(StorageError):
await storage.get_record(None, "dummy-id")
with pytest.raises(StorageError):
await storage.get_record("connection", None)
with async_mock.patch.object(
test_module.non_secrets, "get_wallet_record", async_mock.CoroutineMock()
) as mock_get_record:
mock_get_record.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonInvalidStructure
)
with pytest.raises(test_module.StorageError):
await storage.get_record("connection", "dummy-id")
with async_mock.patch.object(
test_module.non_secrets,
"update_wallet_record_value",
async_mock.CoroutineMock(),
) as mock_update_value, async_mock.patch.object(
test_module.non_secrets,
"update_wallet_record_tags",
async_mock.CoroutineMock(),
) as mock_update_tags, async_mock.patch.object(
test_module.non_secrets,
"delete_wallet_record",
async_mock.CoroutineMock(),
) as mock_delete:
mock_update_value.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonInvalidStructure
)
mock_update_tags.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonInvalidStructure
)
mock_delete.side_effect = test_module.IndyError(
test_module.ErrorCode.CommonInvalidStructure
)
rec = StorageRecord(
type="connection",
value=json.dumps(
{
"initiator": "self",
"invitation_key": "9XgL7Y4TBTJyVJdomT6axZGUFg9npxcrXnRT4CG8fWYg",
"state": "invitation",
"routing_state": "none",
"error_msg": None,
"their_label": None,
"created_at": "2019-05-14 21:58:24.143260+00:00",
"updated_at": "2019-05-14 21:58:24.143260+00:00",
}
),
tags={
"initiator": "self",
"invitation_key": "9XgL7Y4TBTJyVJdomT6axZGUFg9npxcrXnRT4CG8fWYg",
"state": "invitation",
"routing_state": "none",
},
id="f96f76ec-0e9b-4f32-8237-f4219e6cf0c7",
)
with pytest.raises(test_module.StorageError):
await storage.update_record_value(rec, "dummy-value")
with pytest.raises(test_module.StorageError):
await storage.update_record_tags(rec, {"tag": "tag"})
with pytest.raises(test_module.StorageError):
await storage.delete_record(rec)
@pytest.mark.asyncio
async def test_storage_search_x(self):
with async_mock.patch.object(
test_wallet, "load_postgres_plugin", async_mock.MagicMock()
) as mock_load, async_mock.patch.object(
indy.wallet, "create_wallet", async_mock.CoroutineMock()
) as mock_create, async_mock.patch.object(
indy.wallet, "open_wallet", async_mock.CoroutineMock()
) as mock_open, async_mock.patch.object(
indy.anoncreds, "prover_create_master_secret", async_mock.CoroutineMock()
) as mock_master, async_mock.patch.object(
indy.wallet, "close_wallet", async_mock.CoroutineMock()
) as mock_close, async_mock.patch.object(
indy.wallet, "delete_wallet", async_mock.CoroutineMock()
) as mock_delete:
fake_wallet = IndyWallet(
{
"auto_create": True,
"auto_remove": True,
"name": "test_pg_wallet",
"key": await IndyWallet.generate_wallet_key(),
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": json.dumps({"url": "dummy"}),
"storage_creds": json.dumps(
{
"account": "postgres",
"password": "mysecretpassword",
"admin_account": "postgres",
"admin_password": "mysecretpassword",
}
),
}
)
await fake_wallet.open()
storage = IndyStorage(fake_wallet)
search = storage.search_records("connection")
with pytest.raises(StorageSearchError):
await search.fetch(10)
with async_mock.patch.object(
indy.non_secrets, "open_wallet_search", async_mock.CoroutineMock()
) as mock_indy_open_search, async_mock.patch.object(
indy.non_secrets, "close_wallet_search", async_mock.CoroutineMock()
) as mock_indy_close_search:
mock_indy_open_search.side_effect = test_module.IndyError("no open")
search = storage.search_records("connection")
with pytest.raises(StorageSearchError):
await search.open()
await search.close()
with async_mock.patch.object(
indy.non_secrets, "open_wallet_search", async_mock.CoroutineMock()
) as mock_indy_open_search, async_mock.patch.object(
indy.non_secrets,
"fetch_wallet_search_next_records",
async_mock.CoroutineMock(),
) as mock_indy_fetch, async_mock.patch.object(
indy.non_secrets, "close_wallet_search", async_mock.CoroutineMock()
) as mock_indy_close_search:
mock_indy_fetch.side_effect = test_module.IndyError("no fetch")
search = storage.search_records("connection")
await search.open()
with pytest.raises(StorageSearchError):
await search.fetch(10)
await search.close()
with async_mock.patch.object(
indy.non_secrets, "open_wallet_search", async_mock.CoroutineMock()
) as mock_indy_open_search, async_mock.patch.object(
indy.non_secrets, "close_wallet_search", async_mock.CoroutineMock()
) as mock_indy_close_search:
mock_indy_close_search.side_effect = test_module.IndyError("no close")
search = storage.search_records("connection")
await search.open()
with pytest.raises(StorageSearchError):
await search.close()
# TODO get these to run in docker ci/cd
@pytest.mark.asyncio
@pytest.mark.postgres
async def test_postgres_wallet_storage_works(self):
"""
Ensure that postgres wallet operations work (create and open wallet, store and search, drop wallet)
"""
postgres_url = os.environ.get("POSTGRES_URL")
if not postgres_url:
pytest.fail("POSTGRES_URL not configured")
wallet_key = await IndyWallet.generate_wallet_key()
postgres_wallet = IndyWallet(
{
"auto_create": False,
"auto_remove": False,
"name": "test_pg_wallet",
"key": wallet_key,
"key_derivation_method": "RAW",
"storage_type": "postgres_storage",
"storage_config": '{"url":"' + postgres_url + '", "max_connections":5}',
"storage_creds": '{"account":"postgres","password":"mysecretpassword","admin_account":"postgres","admin_password":"mysecretpassword"}',
}
)
await postgres_wallet.create()
await postgres_wallet.open()
storage = IndyStorage(postgres_wallet)
# add and then fetch a record
record = StorageRecord(
type="connection",
value=json.dumps(
{
"initiator": "self",
"invitation_key": "9XgL7Y4TBTJyVJdomT6axZGUFg9npxcrXnRT4CG8fWYg",
"state": "invitation",
"routing_state": "none",
"error_msg": None,
"their_label": None,
"created_at": "2019-05-14 21:58:24.143260+00:00",
"updated_at": "2019-05-14 21:58:24.143260+00:00",
}
),
tags={
"initiator": "self",
"invitation_key": "9XgL7Y4TBTJyVJdomT6axZGUFg9npxcrXnRT4CG8fWYg",
"state": "invitation",
"routing_state": "none",
},
id="f96f76ec-0e9b-4f32-8237-f4219e6cf0c7",
)
await storage.add_record(record)
g_rec = await storage.get_record(record.type, record.id)
# now try search
search = None
try:
search = storage.search_records("connection")
await search.open()
records = await search.fetch(10)
finally:
if search:
await search.close()
await postgres_wallet.close()
await postgres_wallet.remove()
| 42.205056 | 151 | 0.519201 |
a4f58c6c79925b02838db6e27714fc9c2504b1ee | 2,572 | py | Python | old/examples/exchangeMethods/TempEx.py | radical-cybertools/radical.repex | 6fea43716c1f3ee9c83211d31e7f85cbc677c0c6 | [
"MIT"
] | 3 | 2015-10-21T14:19:01.000Z | 2017-07-11T22:51:58.000Z | old/examples/exchangeMethods/TempEx.py | radical-cybertools/radical.repex | 6fea43716c1f3ee9c83211d31e7f85cbc677c0c6 | [
"MIT"
] | 77 | 2015-10-14T21:47:04.000Z | 2021-11-21T15:16:57.000Z | old/examples/exchangeMethods/TempEx.py | radical-cybertools/radical.repex | 6fea43716c1f3ee9c83211d31e7f85cbc677c0c6 | [
"MIT"
] | 1 | 2016-06-22T14:31:40.000Z | 2016-06-22T14:31:40.000Z | #!/usr/bin/env python
import os
import sys
import math
import numpy as np
import random
####------------
Replicas = int(sys.argv[1])
Cycle = int(sys.argv[2])
def TemperatureExchange(Replicas):
exchangeList = range(Replicas)
#random.shuffle(exchangeList)
#####Read the mdinfo files######
Temp = 0.0
PotEng = 0.0
Replica_Temps = []
Replica_Energies = []
for n in range (Replicas):
f = open('mdinfo_{0}'.format(n)) #Perhaps it's possible to read the outfile instead of mdinfo?
lines = f.readlines()
#f.close
for i,j in enumerate(lines):
if "TEMP(K)" in lines[i]:
Temp = float(lines[i].split()[8])
Replica_Temps.append(Temp)
elif "EPtot" in lines[i]:
PotEng = float(lines[i].split()[8])
Replica_Energies.append(PotEng)
f.close
#print Replica_Energies
#print Replica_Temps
##### Perform Exchange Computation
#Build exchange matrix [matrix of dimensionless energies, E/kT]
Kb = 0.0019872041 #Boltzmann Constant in kcal/mol
Replica_Temps = np.array(Replica_Temps)
Replica_Energies = np.array(Replica_Energies)
Replica_Temps = np.reciprocal(np.multiply(Kb,Replica_Temps)) # Turns this into dimensionless temperatures (beta)
#print Replica_Temps
###Consider all pairs for exchange
#print Replica_Temps
exchangeList = []
for i in range (Replicas):
for j in range (Replicas):
p = math.exp(np.multiply((Replica_Energies[i]-Replica_Energies[j]),(Replica_Temps[i]-Replica_Temps[j])))
###Once an exchange partner is found, move to the next i
#Find mechanism to skip values of i that have found exchange pairs as j
if p > 1:
exchangeList.append('%d %d'%(i, j))
#i ,j append i,j to exchangeList
break
else:
q = random.random()
if q < p:
exchangeList.append('%d %d'%(i, j))
#i,j append i,j to exchangeList
break
else:
exchangeList.append('%d %d'%(i, i))
break
f = open('exchangePairs_{0}.dat'.format(Cycle), 'w')
for p in exchangeList:
line = ' '.join(str(x) for x in p)
f.write(line + '\n')
f.close
TemperatureExchange(Replicas)
| 29.563218 | 116 | 0.541602 |
7277a16dbd17781ff085c9861b0c0bce95d85e4b | 18,717 | py | Python | bootstrapped/bootstrap.py | shyla-kupis/bootstrapped | 59d0cea9d701e41b1b58ed68ab125243d5d791f0 | [
"BSD-3-Clause"
] | 578 | 2017-02-22T18:18:20.000Z | 2020-12-27T18:49:25.000Z | bootstrapped/bootstrap.py | shyla-kupis/bootstrapped | 59d0cea9d701e41b1b58ed68ab125243d5d791f0 | [
"BSD-3-Clause"
] | 29 | 2017-02-23T02:22:24.000Z | 2020-11-17T04:59:27.000Z | bootstrapped/bootstrap.py | shyla-kupis/bootstrapped | 59d0cea9d701e41b1b58ed68ab125243d5d791f0 | [
"BSD-3-Clause"
] | 86 | 2017-02-22T20:32:37.000Z | 2020-12-29T04:32:45.000Z | # Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
'''Functions that allow one to create bootstrapped confidence intervals'''
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import numpy as _np
import multiprocessing as _multiprocessing
import scipy.sparse as _sparse
class BootstrapResults(object):
def __init__(self, lower_bound, value, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.value = value
if self.lower_bound > self.upper_bound:
self.lower_bound, self.upper_bound = self.upper_bound, self.lower_bound
def __str__(self):
return '{1} ({0}, {2})'.format(self.lower_bound, self.value,
self.upper_bound)
def __repr__(self):
return self.__str__()
def _apply(self, other, func):
return BootstrapResults(func(self.lower_bound, other),
func(self.value, other),
func(self.upper_bound, other))
def __add__(self, other):
return self._apply(float(other), lambda x, other: other + x)
def __radd__(self, other):
return self._apply(float(other), lambda x, other: other + x)
def __sub__(self, other):
return self._apply(float(other), lambda x, other: x - other)
def __rsub__(self, other):
return self._apply(float(other), lambda x, other: other - x)
def __mul__(self, other):
return self._apply(float(other), lambda x, other: x * other)
def __rmul__(self, other):
return self._apply(float(other), lambda x, other: x * other)
def error_width(self):
'''Returns: upper_bound - lower_bound'''
return self.upper_bound - self.lower_bound
def error_fraction(self):
'''Returns the error_width / value'''
if self.value == 0:
return _np.inf
else:
return self.error_width() / self.value
def is_significant(self):
return _np.sign(self.upper_bound) == _np.sign(self.lower_bound)
def get_result(self):
'''Returns:
-1 if statistically significantly negative
+1 if statistically significantly positive
0 otherwise
'''
return int(self.is_significant()) * _np.sign(self.value)
def _get_confidence_interval(bootstrap_dist, stat_val, alpha, is_pivotal):
'''Get the bootstrap confidence interval for a given distribution.
Args:
bootstrap_distribution: numpy array of bootstrap results from
bootstrap_distribution() or bootstrap_ab_distribution()
stat_val: The overall statistic that this method is attempting to
calculate error bars for.
alpha: The alpha value for the confidence intervals.
is_pivotal: if true, use the pivotal method. if false, use the
percentile method.
'''
if is_pivotal:
low = 2 * stat_val - _np.percentile(bootstrap_dist, 100 * (1 - alpha / 2.))
val = stat_val
high = 2 * stat_val - _np.percentile(bootstrap_dist, 100 * (alpha / 2.))
else:
low = _np.percentile(bootstrap_dist, 100 * (alpha / 2.))
val = _np.percentile(bootstrap_dist, 50)
high = _np.percentile(bootstrap_dist, 100 * (1 - alpha / 2.))
return BootstrapResults(low, val, high)
def _needs_sparse_unification(values_lists):
non_zeros = values_lists[0] != 0
for v in values_lists:
v_nz = v != 0
non_zeros = (non_zeros + v_nz) > 0
non_zero_size = non_zeros.sum()
for v in values_lists:
if non_zero_size != v.data.shape[0]:
return True
return False
def _validate_arrays(values_lists):
t = values_lists[0]
t_type = type(t)
if not isinstance(t, _sparse.csr_matrix) and not isinstance(t, _np.ndarray):
raise ValueError(('The arrays must either be of type '
'scipy.sparse.csr_matrix or numpy.array'))
for _, values in enumerate(values_lists[1:]):
if not isinstance(values, t_type):
raise ValueError('The arrays must all be of the same type')
if t.shape != values.shape:
raise ValueError('The arrays must all be of the same shape')
if isinstance(t, _sparse.csr_matrix):
if values.shape[0] > 1:
raise ValueError(('The sparse matrix must have shape 1 row X N'
' columns'))
if isinstance(t, _sparse.csr_matrix):
if _needs_sparse_unification(values_lists):
raise ValueError(('The non-zero entries in the sparse arrays'
' must be aligned: see '
'bootstrapped.unify_sparse_vectors function'))
def _generate_distributions(values_lists, num_iterations):
if isinstance(values_lists[0], _sparse.csr_matrix):
# in the sparse case we dont actually need to bootstrap
# the full sparse array since most values are 0
# instead for each bootstrap iteration we:
# 1. generate B number of non-zero entries to sample from the
# binomial distribution
# 2. resample with replacement the non-zero entries from values
# B times
# 3. create a new sparse array with the B resamples, zero otherwise
results = [[] for _ in range(len(values_lists))]
pop_size = values_lists[0].shape[1]
non_sparse_size = values_lists[0].data.shape[0]
p = non_sparse_size * 1.0 / pop_size
for _ in range(num_iterations):
ids = _np.random.choice(
non_sparse_size,
_np.random.binomial(pop_size, p),
replace=True,
)
for arr, values in zip(results, values_lists):
data = values.data
d = _sparse.csr_matrix(
(
data[ids],
(_np.zeros_like(ids), _np.arange(len(ids)))
),
shape=(1, pop_size),
)
arr.append(d)
return [_sparse.vstack(r) for r in results]
else:
values_shape = values_lists[0].shape[0]
ids = _np.random.choice(
values_shape,
(num_iterations, values_shape),
replace=True,
)
results = [values[ids] for values in values_lists]
return results
def _bootstrap_sim(values_lists, stat_func_lists, num_iterations,
iteration_batch_size, seed):
'''Returns simulated bootstrap distribution.
See bootstrap() funciton for arg descriptions.
'''
if seed is not None:
_np.random.seed(seed)
num_iterations = int(num_iterations)
iteration_batch_size = int(iteration_batch_size)
results = [[] for _ in values_lists]
for rng in range(0, num_iterations, iteration_batch_size):
max_rng = min(iteration_batch_size, num_iterations - rng)
values_sims = _generate_distributions(values_lists, max_rng)
for i, values_sim, stat_func in zip(range(len(values_sims)), values_sims, stat_func_lists):
results[i].extend(stat_func(values_sim))
return _np.array(results)
def _bootstrap_distribution(values_lists, stat_func_lists,
num_iterations, iteration_batch_size, num_threads):
'''Returns the simulated bootstrap distribution. The idea is to sample the same
indexes in a bootstrap re-sample across all arrays passed into values_lists.
This is especially useful when you want to co-sample records in a ratio metric.
numerator[k].sum() / denominator[k].sum()
and not
numerator[ j ].sum() / denominator[k].sum()
Args:
values_lists: list of numpy arrays (or scipy.sparse.csr_matrix)
each represents a set of values to bootstrap. All arrays in values_lists
must be of the same length.
stat_func_lists: statistic to bootstrap for each element in values_lists.
num_iterations: number of bootstrap iterations / resamples / simulations
to perform.
iteration_batch_size: The bootstrap sample can generate very large
matrices. This argument limits the memory footprint by
batching bootstrap rounds. If unspecified the underlying code
will produce a matrix of len(values) x num_iterations. If specified
the code will produce sets of len(values) x iteration_batch_size
(one at a time) until num_iterations have been simulated.
Defaults to no batching.
num_threads: The number of therads to use. This speeds up calculation of
the bootstrap. Defaults to 1. If -1 is specified then
multiprocessing.cpu_count() is used instead.
Returns:
The set of bootstrap resamples where each stat_function is applied on
the bootsrapped values.
'''
_validate_arrays(values_lists)
if iteration_batch_size is None:
iteration_batch_size = num_iterations
num_iterations = int(num_iterations)
iteration_batch_size = int(iteration_batch_size)
num_threads = int(num_threads)
if num_threads == -1:
num_threads = _multiprocessing.cpu_count()
if num_threads <= 1:
results = _bootstrap_sim(values_lists, stat_func_lists,
num_iterations, iteration_batch_size, None)
else:
pool = _multiprocessing.Pool(num_threads)
iter_per_job = _np.ceil(num_iterations * 1.0 / num_threads)
results = []
for seed in _np.random.randint(0, 2**32 - 1, num_threads):
r = pool.apply_async(_bootstrap_sim, (values_lists, stat_func_lists,
iter_per_job,
iteration_batch_size, seed))
results.append(r)
results = _np.hstack([res.get() for res in results])
pool.close()
return results
def bootstrap(values, stat_func, denominator_values=None, alpha=0.05,
num_iterations=10000, iteration_batch_size=10, is_pivotal=True,
num_threads=1, return_distribution=False):
'''Returns bootstrap estimate.
Args:
values: numpy array (or scipy.sparse.csr_matrix) of values to bootstrap
stat_func: statistic to bootstrap. We provide several default functions:
* stat_functions.mean
* stat_functions.sum
* stat_functions.std
denominator_values: optional array that does division after the
statistic is aggregated. This lets you compute group level division
statistics. One corresponding entry per record in @values.
Example:
SUM(value) / SUM(denom) instead of MEAN(value / denom)
Ex. Cost Per Click
cost per click across a group
SUM(revenue) / SUM(clicks)
mean cost per click for each
MEAN(revenue / clicks)
alpha: alpha value representing the confidence interval.
Defaults to 0.05, i.e., 95th-CI.
num_iterations: number of bootstrap iterations to run. The higher this
number the more sure you can be about the stability your bootstrap.
By this - we mean the returned interval should be consistent across
runs for the same input. This also consumes more memory and makes
analysis slower. Defaults to 10000.
iteration_batch_size: The bootstrap sample can generate very large
matrices. This argument limits the memory footprint by
batching bootstrap rounds. If unspecified the underlying code
will produce a matrix of len(values) x num_iterations. If specified
the code will produce sets of len(values) x iteration_batch_size
(one at a time) until num_iterations have been simulated.
Defaults to 10. Passing None will calculate the full simulation in one step.
is_pivotal: if true, use the pivotal method for bootstrapping confidence
intervals. If false, use the percentile method.
num_threads: The number of therads to use. This speeds up calculation of
the bootstrap. Defaults to 1. If -1 is specified then
multiprocessing.cpu_count() is used instead.
Returns:
BootstrapResults representing CI and estimated value.
'''
if denominator_values is None:
values_lists = [values]
stat_func_lists = [stat_func]
def do_division(x):
return x
stat_val = stat_func(values)[0]
else:
values_lists = [values, denominator_values]
stat_func_lists = [stat_func] * 2
def do_division(num, denom):
return num / denom
stat_val = stat_func(values)[0] / stat_func(denominator_values)[0]
distribution_results = _bootstrap_distribution(values_lists,
stat_func_lists,
num_iterations,
iteration_batch_size,
num_threads)
bootstrap_dist = do_division(*distribution_results)
if return_distribution:
return bootstrap_dist
else:
return _get_confidence_interval(bootstrap_dist, stat_val, alpha,
is_pivotal)
def bootstrap_ab(test, ctrl, stat_func, compare_func, test_denominator=None,
ctrl_denominator=None, alpha=0.05, num_iterations=10000,
iteration_batch_size=10, scale_test_by=1.0,
is_pivotal=True, num_threads=1, return_distribution=False):
'''Returns bootstrap confidence intervals for an A/B test.
Args:
test: numpy array (or scipy.sparse.csr_matrix) of test results
ctrl: numpy array (or scipy.sparse.csr_matrix) of ctrl results
stat_func: statistic to bootstrap. We provide several default functions:
* stat_functions.mean
* stat_functions.sum
* stat_functions.std
compare_func: Function to compare test and control against.
* compare_functions.difference
* compare_functions.percent_change
* compare_functions.ratio
* compare_functions.percent_difference
test_denominator: optional array that does division after the statistic
is aggregated. This lets you compute group level division
statistics. One corresponding entry per record in test.
Example:
SUM(value) / SUM(denom) instead of MEAN(value / denom)
Ex. Cost Per Click
cost per click across a group (clicks is denominator)
SUM(revenue) / SUM(clicks)
mean cost per click for each record
MEAN(revenue / clicks)
ctrl_denominator: see test_denominator.
alpha: alpha value representing the confidence interval.
Defaults to 0.05, i.e., 95th-CI.
num_iterations: number of bootstrap iterations to run. The higher this
number the more sure you can be about the stability your bootstrap.
By this - we mean the returned interval should be consistent across
runs for the same input. This also consumes more memory and makes
analysis slower.
iteration_batch_size: The bootstrap sample can generate very large
arrays. This function iteration_batch_size limits the memory
footprint by batching bootstrap rounds. Defaults to 10. Passing None
will attempt to calculate the full simulation in one step.
scale_test_by: The ratio between test and control population
sizes. Use this if your test and control split is different from a
50/50 split. Defaults to 1.0.
is_pivotal: if true, use the pivotal method for bootstrapping confidence
intervals. If false, use the percentile method.
num_threads: The number of therads to use. This speeds up calculation of
the bootstrap. Defaults to 1. If -1 is specified then
multiprocessing.cpu_count() is used instead.
Returns:
BootstrapResults representing CI and estimated value.
'''
both_denominators = test_denominator is not None and \
ctrl_denominator is not None
both_numerators = test is not None and ctrl is not None
if both_numerators and not both_denominators:
test_lists = [test]
ctrl_lists = [ctrl]
stat_func_lists = [stat_func]
def do_division(x):
return x
test_val = stat_func(test)[0]
ctrl_val = stat_func(ctrl)[0]
elif both_numerators and both_denominators:
test_lists = [test, test_denominator]
ctrl_lists = [ctrl, ctrl_denominator]
stat_func_lists = [stat_func] * 2
def do_division(num, denom):
return num / denom
test_val = stat_func(test)[0] / stat_func(test_denominator)[0]
ctrl_val = stat_func(ctrl)[0] / stat_func(ctrl_denominator)[0]
elif not both_numerators:
raise ValueError('Both test and ctrl numerators must be specified.')
else:
raise ValueError('Both test and ctrl denominators must be specified.')
test_results = _bootstrap_distribution(test_lists, stat_func_lists,
num_iterations, iteration_batch_size,
num_threads)
ctrl_results = _bootstrap_distribution(ctrl_lists, stat_func_lists,
num_iterations, iteration_batch_size,
num_threads)
test_dist = do_division(*test_results)
ctrl_dist = do_division(*ctrl_results)
test_ctrl_dist = compare_func(test_dist * scale_test_by, ctrl_dist)
if return_distribution:
return test_ctrl_dist
else:
test_ctrl_val = compare_func(test_val * scale_test_by, ctrl_val)
return _get_confidence_interval(test_ctrl_dist, test_ctrl_val, alpha,
is_pivotal)
| 40.512987 | 99 | 0.630763 |
8cb553ea9bcf31cdcc9905b1c19bf4a0cbfdc70b | 6,023 | py | Python | Crawlers/scripts/parse_movie.py | Co1lin/iMovies | 76cdab1f42f08fc855bf19f8a8f4650e163bcc3e | [
"MIT"
] | 3 | 2020-09-12T15:37:03.000Z | 2021-08-30T11:11:09.000Z | Crawlers/scripts/parse_movie.py | Co1lin/iMovies | 76cdab1f42f08fc855bf19f8a8f4650e163bcc3e | [
"MIT"
] | null | null | null | Crawlers/scripts/parse_movie.py | Co1lin/iMovies | 76cdab1f42f08fc855bf19f8a8f4650e163bcc3e | [
"MIT"
] | 1 | 2020-11-13T06:56:27.000Z | 2020-11-13T06:56:27.000Z | import os
import re
import json
import traceback
from lxml import etree
'''
from HTML:
导演
编剧
剧情简介
短评
[{
'writer': ,
'date': ,
'content': ,
},]
'''
def list_to_string_sep(list):
res = ' / '.join(
[
item.strip()
for item in list
]
)
return res
def list_to_string_para(list):
res = '\n'.join(
[
item.strip()
for item in list
]
)
return res
DIR = '../DoubanCrawler/'
SUBJ_DIR = DIR + 'subjects/'
new_data = {
# 'name': {
# 'title': '',
# 'score': '',
# 'vote_count': 0,
# 'director': ,
# 'scriptwriter': ,
# 'introduction': ,
# 'types': '', # use space between different types
# 'regions': '', # use space between different types
# 'release_date': '',
# 'cover_url': '',
# 'url': '',
# 'comments': [
# ],
# 'actors': [
#
# ],
# 'actors_pages': {
# 'name': ,
# 'url': ,
# }
# }
}
with open('./error_parse_movie.log', 'w') as error_log:
with open(DIR + 'new_new_index.json', 'r') as findex:
old_data = json.load(findex)
counter = 1
for movie in old_data:
# if counter != 15:
# counter += 1
# continue
report = str(counter) + ' ' + movie
print(report)
# get the html file
with open(SUBJ_DIR + movie + '.html', 'r') as webpage:
try:
file_str = webpage.read()
file_str = bytes(bytearray(file_str, encoding='utf-8'))
html = etree.HTML(file_str)
#html = etree.parse(SUBJ_DIR + key + '.html', etree.HTMLParser(encoding='utf-8'))
director = list_to_string_sep(html.xpath('//a[@rel="v:directedBy"]//text()'))
scriptwriter = list_to_string_sep(html.xpath('//*[@id="info"]/span[2]/span[2]/a//text()'))
introduction = html.xpath('//span[@class="all hidden"]//text()')
if not introduction:
introduction = html.xpath('//span[@property="v:summary"]//text()')
introduction = list_to_string_para(introduction)
comments = []
try:
for i in range(1, 6):
comment = { }
comment['writer'] = html.xpath('//*[@id="hot-comments"]/div[' + str(i) + ']/div/h3/span[2]/a/text()')[0].strip()
comment['date'] = html.xpath('//*[@id="hot-comments"]/div[' + str(i) + ']/div/h3/span[2]/span[3]/text()')[0].strip()
comment['content'] = html.xpath('//*[@id="hot-comments"]/div[' + str(i) + ']/div/p/span/text()')[0].strip()
comments.append(comment)
except:
error_log('Comments are not enough!' + 'in : ' + report + '\n')
# print(comments)
except Exception as e:
print('-------- Error when processing the movie info webpage:')
err = traceback.format_exc()
print(err)
error_log.write('-------- Error when processing the movie info webpage:' + report + '\n')
error_log.write(err + '\n')
continue
# get urls of actors' pages
actors_pages = {}
with open(SUBJ_DIR + movie + '-celebrities.html', 'r') as webpage:
try:
file_str = webpage.read()
file_str = bytes(bytearray(file_str, encoding='utf-8'))
html = etree.HTML(file_str)
actor_link_list = html.xpath('//*[@id="celebrities"]/div[h2/text()="演员 Cast"]//*[@class="celebrity"]/a/attribute::href')
actor_name_list = html.xpath('//*[@id="celebrities"]/div[h2/text()="演员 Cast"]//*[@class="celebrity"]/a/attribute::title')
# if len(actor_name_list) > 10:
# actor_link_list = actor_link_list[:10]
# actor_name_list = actor_name_list[:10]
#
# for name in actor_name_list:
# actor_pages[name] =
for i in range(0, min(10, len(actor_name_list))):
actors_pages[actor_name_list[i]] = actor_link_list[i]
except Exception as e:
print('-------- Error when processing the actor info webpage:')
err = traceback.format_exc()
print(err)
error_log.write('-------- Error when processing the actor info webpage:' + report + '\n')
error_log.write(err + '\n')
continue
try:
# update the new data
# copy the original data
new_data[movie] = { } # create the dict first!
for key in old_data[movie]:
new_data[movie][key] = old_data[movie][key]
# insert new data
new_data[movie]['director'] = director
new_data[movie]['scriptwriter'] = scriptwriter
new_data[movie]['introduction'] = introduction
new_data[movie]['comments'] = comments
new_data[movie]['actors_pages'] = actors_pages
except Exception as e:
print('-------- Error when adding new data:')
err = traceback.format_exc()
print(err)
error_log.write('-------- Error when adding new data:' + report + '\n')
error_log.write(err + '\n')
continue
counter += 1
with open('./complete_info.json', 'w') as output:
json.dump(new_data, output, ensure_ascii=False) | 37.409938 | 144 | 0.463722 |
1b4610a1a14fd1b2dbafb304bd5fe42316019af7 | 44 | py | Python | gocats/_version.py | rmflight/GOcats | fc7b367583a5a579a76c58a83a37fe13c69ebccc | [
"Unlicense"
] | 10 | 2017-03-31T19:12:22.000Z | 2021-09-28T01:29:38.000Z | gocats/_version.py | rmflight/GOcats | fc7b367583a5a579a76c58a83a37fe13c69ebccc | [
"Unlicense"
] | 8 | 2018-04-23T15:40:56.000Z | 2021-03-31T14:22:06.000Z | gocats/_version.py | rmflight/GOcats | fc7b367583a5a579a76c58a83a37fe13c69ebccc | [
"Unlicense"
] | 3 | 2017-04-23T14:15:41.000Z | 2021-06-20T18:38:01.000Z | __version__ = "1.1.7"
short_version = "1.1"
| 14.666667 | 21 | 0.659091 |
bf029d98fa968b2d91edc255ea433f20574322b2 | 787 | py | Python | tests/filtering/test_filter_against_latest_blocks.py | bellaj/web3py | 882335941a0781a3a3a14b7616f727005e88d88d | [
"MIT"
] | null | null | null | tests/filtering/test_filter_against_latest_blocks.py | bellaj/web3py | 882335941a0781a3a3a14b7616f727005e88d88d | [
"MIT"
] | null | null | null | tests/filtering/test_filter_against_latest_blocks.py | bellaj/web3py | 882335941a0781a3a3a14b7616f727005e88d88d | [
"MIT"
] | null | null | null | import random
from flaky import flaky
from web3.utils.compat import (
Timeout,
)
@flaky(max_runs=3)
def test_filter_against_latest_blocks(web3, sleep_interval, wait_for_block, skip_if_testrpc):
skip_if_testrpc(web3)
seen_blocks = []
txn_filter = web3.eth.filter("latest")
txn_filter.watch(seen_blocks.append)
current_block = web3.eth.blockNumber
wait_for_block(web3, current_block + 3)
with Timeout(5) as timeout:
while len(seen_blocks) < 2:
timeout.sleep(sleep_interval())
txn_filter.stop_watching(3)
expected_block_hashes = [
web3.eth.getBlock(n)['hash'] for n in range(current_block + 1, current_block + 3)
]
assert len(seen_blocks) >= 2
assert set(expected_block_hashes).issubset(seen_blocks)
| 23.848485 | 93 | 0.70648 |
981e8059150a4872d9bd36c1947ed7e5189880c8 | 945 | py | Python | showyourwork/workflow/scripts/tectonic.py | rodluger/showyourwork-sandbox | 4ee993b433ab6d3e35e2bd652c6b3c21601fe468 | [
"MIT"
] | 1 | 2021-08-03T17:10:39.000Z | 2021-08-03T17:10:39.000Z | showyourwork/workflow/scripts/tectonic.py | rodluger/showyourwork-sandbox | 4ee993b433ab6d3e35e2bd652c6b3c21601fe468 | [
"MIT"
] | null | null | null | showyourwork/workflow/scripts/tectonic.py | rodluger/showyourwork-sandbox | 4ee993b433ab6d3e35e2bd652c6b3c21601fe468 | [
"MIT"
] | null | null | null | import json
import urllib
import urllib.request
import tarfile
TEMP = snakemake.params["TEMP"]
OS = snakemake.params["OS"]
def get_tectonic_link():
"""
Get the download link for the latest Linux release of tectonic on GitHub.
"""
link = None
with urllib.request.urlopen(
"https://api.github.com/repos/tectonic-typesetting/tectonic/releases"
) as url:
data = json.loads(url.read().decode())
for entry in data:
if entry.get("tag_name", "") == "continuous":
assets = entry.get("assets")
for asset in assets:
if OS in asset.get("name", ""):
link = asset.get("browser_download_url")
return link
# Download the tarball
link = get_tectonic_link()
urllib.request.urlretrieve(link, "tectonic.tar.gz")
# Extract it
with tarfile.open("tectonic.tar.gz") as file:
file.extractall(TEMP) | 25.540541 | 77 | 0.608466 |
be3391dab5a0852d5f54d0c699ca9b308167e8e1 | 2,799 | py | Python | pithy/py/lex.py | gwk/pithy | a48fabe9b4724c5005a034bd18b2e70d10617a78 | [
"CC0-1.0"
] | 7 | 2019-05-04T00:51:38.000Z | 2021-12-10T15:36:31.000Z | pithy/py/lex.py | gwk/pithy | a48fabe9b4724c5005a034bd18b2e70d10617a78 | [
"CC0-1.0"
] | null | null | null | pithy/py/lex.py | gwk/pithy | a48fabe9b4724c5005a034bd18b2e70d10617a78 | [
"CC0-1.0"
] | 1 | 2016-07-30T22:38:08.000Z | 2016-07-30T22:38:08.000Z | # Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
from typing import Dict, List
from ..lex import Lexer, LexMode, c_like_punctuation_patterns, whitespace_patterns
patterns=dict(
# These are ordered roughly to improve matching speed on a sample (all .py files in the repository).
# Python keywords must come before `name`.
const = r'None|True|False|Ellipsis',
kw_as = 'as',
kw_async = 'async',
kw_await = 'await',
kw_break = 'break',
kw_class = 'class',
kw_continue = 'continue',
kw_def = 'def',
kw_elif = 'elif',
kw_else = 'else',
kw_for = 'for',
kw_from = 'from',
kw_if = 'if',
kw_import = 'import',
kw_while = 'while',
kw_yield = 'yield',
name = r'[_A-Za-z][_A-Za-z0-9]*', # Most common.
**whitespace_patterns,
int_h = r'0x[_0-9A-Fa-f]+',
int_b = r'0b[_01]+',
int_o = r'0o[_07]+',
flt = r'([0-9]+\.|\.[0-9])[_0-9]*',
int_d = r'[0-9][_0-9]*',
**c_like_punctuation_patterns,
comment_type_ignore = r'\# type: ignore',
comment_type = r'\# type:[\n]*',
comment = r'\#[^\n]*',
)
main_pattern_names = list(patterns.keys())
str_pattern_names:Dict[str,List[str]] = {}
def add_str_patterns(quote:str, label:str, multiline:bool):
'''
Note about lexing string literals:
general pattern for quoting with escapes is Q([^EQ]|EQ|EE)*Q.
It is crucial that the escape character E is excluded in the '[^EQ]' clause,
or else when matching against 'QEQQ', the pattern greedily matches 'QEQ'.
To allow a trailing escape character, the 'EE' clause is also required.
'''
q = quote
q0 = q[0]
n = r'\n' # Normally, newline is part of the exclusion set.
or_ml = '' # Multiline has an additional choice clause.
if multiline:
n = '' # For multiline, newline is not excluded.
or_ml = r'|' + q0 + r'{1,2}(?!' + q0 + ')' # Accept one or two quotes, so long as they aren't followed by a third.
# The leading "(?:r[bf]|[bf]?r?)" pattern is for all the valid combinations of [rbf] flags.
patterns.update({
f'str_{label}' : fr'(?:r[bf]|[bf]?r?){q}(?s:[^{n}\\{q0}]|\\.{or_ml})*{q}',
f'str_{label}_o' : fr'(?:r[bf]|[bf]?r?){q}',
f'str_{label}_c' : q,
f'str_{label}_re' : r'[][(){}|^?*+]+',
f'str_{label}_txt' : fr'[^\\{q[0]}]',
f'str_{label}_esc' : r'\\.',
})
for l, q in [('s', "'"), ('d', '"')]:
for multiline in [True, False]:
label = l + '3' if multiline else l
quote = q * 3 if multiline else q
add_str_patterns(label=label, quote=quote, multiline=multiline)
main_pattern_names.append('str_' + label) # TODO: generalize to allow choosing multimode lexer.
lexer = Lexer(flags='x', patterns=patterns, modes=[LexMode('main', main_pattern_names)])
| 33.722892 | 118 | 0.608432 |
64950c22a032f92fdc7ad605c93b1b857290eca5 | 18,270 | py | Python | extensions/groupchats.py | yanislavb/vk4xmpp | 88989d2a63b597cb78f030e8aab7c9bf39096628 | [
"MIT"
] | null | null | null | extensions/groupchats.py | yanislavb/vk4xmpp | 88989d2a63b597cb78f030e8aab7c9bf39096628 | [
"MIT"
] | null | null | null | extensions/groupchats.py | yanislavb/vk4xmpp | 88989d2a63b597cb78f030e8aab7c9bf39096628 | [
"MIT"
] | null | null | null | # coding: utf-8
# This file is a part of VK4XMPP transport
# © simpleApps, 2013 — 2015.
# File contains parts of code from
# BlackSmith mark.1 XMPP Bot, © simpleApps 2011 — 2014.
# Installation:
# The extension requires up to 2 fields in the main config:
# 1. ConferenceServer - the address of your (or not yours?) conference server
# Bear in mind that there can be limits on the jabber server for conference per jid. Read the wiki for more details.
# 2. CHAT_LIFETIME_LIMIT - the limit of the time after that user considered inactive and will be removed.
# Time must be formatted as text and contain the time variable measurement.
# For example: CHAT_LIFETIME_LIMIT = "28y09M21d" means chat will be removed after 28 years 9 Months 21 days from now
# You can wheter ignore or use any of these chars: smdMy.
# Used chars: s for seconds, m for minutes, d for days, M for months, y for years. The number MUST contain 2 digits as well.
# Note: if you won't set the field, plugin won't remove any chat, but still will be gathering statistics.
"""
Handles VK Multi-Dialogs
Implements XEP-0045: Multi-User Chat (over an exsisting chat)
Note: This file contains only outgoing-specific stuff (vk->xmpp)
along with the Chat class and other useful functions
The code which handles incoming stuff (xmpp->vk) is placed in the following modules:
mod_groupchat_prs for presence handling
mod_groupchat_msg for message handling
"""
MAX_UPDATE_DELAY = 3600 # 1 hour
CHAT_CLEANUP_DELAY = 86400 # 24 hours
if not require("attachments") or not require("forwarded_messages"):
raise AssertionError("extension 'groupchats' requires 'forwarded_messages' and 'attachments'")
def setAffiliation(chat, afl, jid, jidFrom=TransportID, reason=None):
"""
Set user affiliation in a chat.
Parameters:
* chat - the chat to set affiliation in
* afl - the affiliation to set to
* jid - the user's jid whose affiliation needs to be changed
* jidFrom - the chat's owner jid (or anyone who can set users roles)
* reason - special reason
"""
stanza = xmpp.Iq("set", to=chat, frm=jidFrom)
query = xmpp.Node("query", {"xmlns": xmpp.NS_MUC_ADMIN})
arole = query.addChild("item", {"jid": jid, "affiliation": afl})
if reason:
arole.setTagData("reason", reason)
stanza.addChild(node=query)
sender(Component, stanza)
def inviteUser(chat, jidTo, jidFrom, name):
"""
Invite user to a chat.
Parameters:
* chat - the chat to invite to
* jidTo - the user's jid who needs to be invited
* jidFrom - the inviter's jid
* name - the inviter's name
"""
invite = xmpp.Message(to=chat, frm=jidFrom)
x = xmpp.Node("x", {"xmlns": xmpp.NS_MUC_USER})
inv = x.addChild("invite", {"to": jidTo})
inv.setTagData("reason", _("You're invited by user «%s»") % name)
invite.addChild(node=x)
sender(Component, invite)
def joinChat(chat, name, jidFrom, status=None):
"""
Join a chat.
Parameters:
* chat - the chat to join in
* name - nickname
* jidFrom - jid which will be displayed when joined
* status - special status
"""
prs = xmpp.Presence("%s/%s" % (chat, name), frm=jidFrom, status=status)
prs.setTag("c", {"node": TRANSPORT_CAPS_HASH, "ver": hash, "hash": "sha-1"},
xmpp.NS_CAPS)
sender(Component, prs)
def leaveChat(chat, jidFrom, reason=None):
"""
Leave chat.
Parameters:
* chat - chat to leave from
* jidFrom - jid to leave with
* reason - special reason
"""
prs = xmpp.Presence(chat, "unavailable", frm=jidFrom, status=reason)
sender(Component, prs)
def chatMessage(chat, text, jidFrom, subj=None, timestamp=0):
"""
Sends a message to the chat
"""
message = xmpp.Message(chat, typ="groupchat")
if timestamp:
timestamp = time.gmtime(timestamp)
message.setTimestamp(time.strftime("%Y%m%dT%H:%M:%S", timestamp))
if not subj:
message.setBody(text)
else:
message.setSubject(text)
message.setFrom(jidFrom)
executeHandlers("msg03g", (message, chat, jidFrom))
sender(Component, message)
def setChatConfig(chat, jidFrom, exterminate=False, cb=None, args={}):
"""
Sets the chat config
"""
iq = xmpp.Iq("set", to=chat, frm=jidFrom)
query = iq.addChild("query", namespace=xmpp.NS_MUC_OWNER)
if exterminate:
query.addChild("destroy")
else:
form = utils.buildDataForm(fields=[
{"var": "FORM_TYPE", "type": "hidden", "value": xmpp.NS_MUC_ROOMCONFIG},
{"var": "muc#roomconfig_membersonly", "type": "boolean", "value": "1"},
{"var": "muc#roomconfig_publicroom", "type": "boolean", "value": "0"},
{"var": "muc#roomconfig_persistentroom", "type": "boolean", "value": "1"},
{"var": "muc#roomconfig_whois", "value": "anyone"}],
type="submit")
query.addChild(node=form)
sender(Component, iq, cb, args)
def handleOutgoingChatMessage(user, vkChat):
"""
Handles outging VK messages and sends them to XMPP
"""
if "chat_id" in vkChat:
# check if the groupchats support enabled in user's settings
if not user.settings.groupchats:
return None
if not hasattr(user, "chats"):
user.chats = {}
# TODO: make this happen in the kernel, so we don't need to check it here
if not user.vk.userID:
logger.warning("groupchats: we didn't receive user id, trying again after 10 seconds (jid: %s)", user.source)
user.vk.getUserID()
utils.runThread(handleOutgoingChatMessage, (user, vkChat), delay=10)
return None
owner = vkChat.get("admin_id", "1")
chatID = vkChat["chat_id"]
chatJID = "%s_chat#%s@%s" % (user.vk.userID, chatID, ConferenceServer)
chat = createChat(user, chatJID)
if not chat.initialized:
chat.init(owner, chatID, chatJID, vkChat["title"], vkChat["date"], vkChat["chat_active"].split(","))
if not chat.created:
if chat.creation_failed:
return None
# we can add user, vkChat to the create() method to prevent losing or messing up the messages
chat.create(user)
# read the comments above the handleMessage function
if not chat.created:
time.sleep(1.5)
chat.handleMessage(user, vkChat)
return None
return ""
def createChat(user, source):
"""
Creates a chat
Args:
user: the User object
source: the chat's jid
"""
if not hasattr(user, "chats"):
user.chats = {}
if source in user.chats:
chat = user.chats[source]
else:
user.chats[source] = chat = Chat()
return chat
class Chat(object):
"""
Class used to handle multi-user dialogs
"""
def __init__(self):
self.created = False
self.invited = False
self.initialized = False
self.exists = False
self.creation_failed = False
self.owner_nickname = None
self.source = None
self.jid = None
self.owner = None
self.subject = None
self.creation_date = None
self.id = 0
self.last_update = 0
self.raw_users = {}
self.users = {}
def init(self, owner, id, jid, subject, date, users=[]):
"""
Assigns an id and other needed attributes to the class object
Args:
owner: owner's id (str)
id: chat's id (int)
jid: chat's jid (str)
subject: chat's subject
date: the chat creation date
users: dictionary of ids, id: {"name": nickname, "jid": jid}
"""
self.id = id
self.jid = jid
self.owner = owner
self.raw_users = users
self.subject = subject
self.creation_date = date
self.initialized = True
def create(self, user):
"""
Creates a chat, joins it and sets the config
"""
logger.debug("groupchats: creating %s. Users: %s; owner: %s (jid: %s)",
self.jid, self.raw_users, self.owner, user.source)
exists = runDatabaseQuery("select user from groupchats where jid=?", (self.jid,), many=True)
if exists:
self.exists = True
logger.debug("groupchats: groupchat %s exists in the database (jid: %s)",
self.jid, user.source)
else:
logger.debug("groupchats: groupchat %s will be added to the database (jid: %s)",
self.jid, user.source)
runDatabaseQuery("insert into groupchats (jid, owner, user, last_used) values (?,?,?,?)",
(self.jid, TransportID, user.source, time.time()), True)
name = user.vk.getUserData(self.owner)["name"]
self.users[TransportID] = {"name": name, "jid": TransportID}
# We join to the chat with the room owner's name to set the room subject from their name.
joinChat(self.jid, name, TransportID, "Lost in time.")
setChatConfig(self.jid, TransportID, False, self.onConfigSet, {"user": user})
def initialize(self, user, chat):
"""
Initializes chat object:
1) requests users list if required
2) makes them members
3) invites the user
4) sets the chat subject
Parameters:
chat: chat's jid
"""
if not self.raw_users:
vkChat = self.getVKChat(user, self.id) # getting the chat users
if not vkChat and not self.invited:
logger.error("groupchats: damn vk didn't answer to the chat list "
"request, starting timer to try again (jid: %s)", user.source)
utils.runThread(self.initialize, (user, chat), delay=10)
return False
self.raw_users = vkChat.get("users")
name = "@%s" % TransportID
setAffiliation(chat, "member", user.source)
if not self.invited:
inviteUser(chat, user.source, TransportID, user.vk.getUserData(self.owner)["name"])
logger.debug("groupchats: user has been invited to chat %s (jid: %s)", chat, user.source)
self.invited = True
self.setSubject(self.subject, self.creation_date)
joinChat(chat, name, TransportID, "Lost in time.") # let's rename ourselves
self.users[TransportID] = {"name": name, "jid": TransportID}
def update(self, userObject, vkChat):
"""
Updates chat users and sends messages
Uses two user lists to prevent losing of any of them
"""
all_users = vkChat["chat_active"].split(",")
all_users = [int(user) for user in all_users if user]
if userObject.settings.show_all_chat_users:
users = self.getVKChat(userObject, self.id)
if users:
all_users = users.get("users", [])
old_users = self.users.keys()
buddies = all_users + old_users
if TransportID in buddies:
buddies.remove(TransportID)
if userObject.vk.getUserID() in buddies:
buddies.remove(userObject.vk.getUserID())
for user in buddies:
jid = vk2xmpp(user)
if user not in old_users:
logger.debug("groupchats: user %s has joined the chat %s (jid: %s)",
user, self.jid, userObject.source)
# TODO: Transport MUST NOT request a name for each user it sees.
# It should be done with a list of users
# E.g. requesting a list of users and get a list of names
name = userObject.vk.getUserData(user)["name"]
self.users[int(user)] = {"name": name, "jid": jid}
setAffiliation(self.jid, "member", jid)
joinChat(self.jid, name, jid)
elif user not in all_users:
logger.debug("groupchats: user %s has left the chat %s (jid: %s)",
user, self.jid, userObject.source)
leaveChat(self.jid, jid)
del self.users[user]
subject = vkChat["title"]
if subject and subject != self.subject:
self.setSubject(subject)
self.raw_users = all_users
def setSubject(self, subject, date=None):
"""
Changes the chat subject
"""
chatMessage(self.jid, subject, TransportID, True, date)
self.subject = subject
def onConfigSet(self, cl, stanza, user):
"""
A callback which called after attempt to create the chat
"""
chat = stanza.getFrom().getStripped()
if xmpp.isResultNode(stanza):
self.created = True
logger.debug("groupchats: stanza \"result\" received from %s, "
"continuing initialization (jid: %s)", chat, user.source)
utils.execute(self.initialize, (user, chat))
else:
logger.error("groupchats: couldn't set room %s config, the answer is: %s (jid: %s)",
chat, str(stanza), user.source)
self.creation_failed = True
# there's a possibility to mess up here if many messages were sent before we created the chat
# we have to send the messages immendiately as soon as possible, so delay can mess the messages up
def handleMessage(self, user, vkChat, retry=True):
"""
Handle incoming (VK -> XMPP) messages
"""
if self.created:
self.update(user, vkChat)
body = escape("", uhtml(vkChat["body"]))
body += parseAttachments(user, vkChat)
body += parseForwardedMessages(user, vkChat)
if body:
chatMessage(self.jid, body, vk2xmpp(vkChat["uid"]), None)
else:
source = "unknown"
userObject = self.getUserObject(self.jid)
if userObject:
source = userObject.source
# todo: FULL leave on error and try to create the chat again
logger.warning("groupchats: chat %s wasn't created well, so trying to create it again (jid: %s)", self.jid, source)
logger.warning("groupchats: is there any groupchat limit on the server?")
if retry:
# TODO: We repeat it twice on each message. We shouldn't.
self.handleMessage(user, vkChat, False)
def isUpdateRequired(self):
"""
Tells whether it's required to update the chat's last_used time
Returns:
True if required
"""
if not self.source:
return False
if not self.last_update:
return True
if (time.time() - self.last_update) > MAX_UPDATE_DELAY:
return True
return False
@api.attemptTo(3, dict, RuntimeError)
def getVKChat(cls, user, id):
"""
Get vk chat by id
"""
chat = user.vk.method("messages.getChat", {"chat_id": id})
if not chat:
raise RuntimeError("Unable to get a chat!")
return chat
@classmethod
def getParts(cls, source):
"""
Split the source and return required parts
"""
node, domain = source.split("@", 1)
if "_chat#" not in node:
return (None, None, None)
if "/" in domain:
domain = domain.split("/")[0]
creator, id = node.split("_chat#", 1)
creator = int(creator)
id = int(id)
return (creator, id, domain)
@classmethod
def getUserObject(cls, source):
"""
Gets user object by chat jid
"""
user = None
jid = None
creator, id, domain = cls.getParts(source)
if domain == ConferenceServer and creator:
jid = cls.getJIDByID(id)
if not jid:
jid = runDatabaseQuery("select user from groupchats where jid=?", (source,), many=False)
if jid:
jid = jid[0]
if jid and jid in Users:
user = Users[jid]
return user
@staticmethod
def getJIDByID(id):
for key, value in Users.iteritems():
if key == id:
return value
return None
def updateLastUsed(chat):
"""
Updates the last_used field in the database
Args:
chat: the Chat object
"""
runDatabaseQuery("update groupchats set last_used=? where jid=?", (time.time(), chat.source), set=True)
def exterminateChats(user=None, chats=[]):
"""
Calls a Dalek for exterminate the chat
The chats argument must be a list of tuples
"""
def exterminated(cl, stanza, jid):
"""
The callback that's being called when the stanza we sent's got an answer
Args:
cl: the xmpp.Client object
stanza: the result stanza
jid: the jid stanza's sent from (?)
"""
chat = stanza.getFrom().getStripped()
if xmpp.isResultNode(stanza):
logger.debug("groupchats: target exterminated! Yay! target:%s (jid: %s)", chat, jid)
else:
logger.debug("groupchats: explain! Explain! "
"The chat wasn't exterminated! Target: %s (jid: %s)", chat, jid)
logger.error("groupchats: got stanza: %s (jid: %s)", str(stanza), jid)
if user and not chats:
chats = runDatabaseQuery("select jid, owner, user from groupchats where user=?", (user.source,))
# current chats
userChats = getattr(user, "chats", [])
for (jid, owner, source) in chats:
server = owner
if "@" in owner:
server = owner.split("@")[1]
if server == TransportID:
joinChat(jid, "Dalek", owner, "Exterminate!")
logger.debug("groupchats: going to exterminate %s, owner:%s (jid: %s)", jid, owner, source)
setChatConfig(jid, owner, True, exterminated, {"jid": jid})
# remove the chat from current
if jid in userChats:
del userChats[jid]
else:
# if we try to send from another jid with prosody, we'll be killed
logger.warning("Warning: Was the transport moved from other domain? Groupchat %s deletion skipped.", jid)
runDatabaseQuery("delete from groupchats where jid=?", (jid,), set=True)
def initChatsTable():
"""
Initializes database if it doesn't exist
"""
def checkColumns():
"""
Checks and adds additional column(s) into the groupchats table
"""
info = runDatabaseQuery("pragma table_info(groupchats)")
names = [col[1] for col in info]
if "nick" not in names:
logger.warning("groupchats: adding \"nick\" column to groupchats table")
runDatabaseQuery("alter table groupchats add column nick text", set=True)
runDatabaseQuery("create table if not exists groupchats "
"(jid text, owner text,"
"user text, last_used integer, nick text)", set=True)
checkColumns()
return True
def cleanTheChatsUp():
"""
Calls Dalek(s) to exterminate inactive users or their chats, whatever they catch
"""
chats = runDatabaseQuery("select jid, owner, last_used, user from groupchats")
result = []
for (jid, owner, last_used, user) in chats:
if (time.time() - last_used) >= utils.TimeMachine(CHAT_LIFETIME_LIMIT):
result.append((jid, owner, user))
logger.debug("groupchats: time for %s expired (jid: %s)", jid, user)
if result:
exterminateChats(chats=result)
utils.runThread(cleanTheChatsUp, delay=CHAT_CLEANUP_DELAY)
def initChatExtension():
"""
Initializes the extension"
"""
global mod_xhtml
try:
import mod_xhtml
except ImportError:
mod_xhtml = None
if initChatsTable():
if isdef("CHAT_LIFETIME_LIMIT"):
cleanTheChatsUp()
else:
logger.warning("not starting chats cleaner because CHAT_LIFETIME_LIMIT is not set")
if isdef("ConferenceServer") and ConferenceServer:
# G is for Groupchats. That's it.
Handlers["msg03g"] = []
GLOBAL_USER_SETTINGS["groupchats"] = {"label": "Handle groupchats",
"desc": "If set, transport would create xmpp-chatrooms for VK Multi-Dialogs", "value": 1}
GLOBAL_USER_SETTINGS["show_all_chat_users"] = {"label": "Show all chat users",
"desc": "If set, transport will show ALL users in a conference", "value": 0}
TRANSPORT_SETTINGS["destroy_on_leave"] = {"label": "Destroy groupchat if user leaves it", "value": 0}
TransportFeatures.add(xmpp.NS_GROUPCHAT)
registerHandler("msg01", handleOutgoingChatMessage)
registerHandler("evt01", initChatExtension)
registerHandler("evt03", exterminateChats)
logger.info("extension groupchats is loaded")
else:
del setAffiliation, inviteUser, joinChat, leaveChat, \
handleOutgoingChatMessage, chatMessage, Chat, \
exterminateChats, cleanTheChatsUp, initChatExtension
| 32.451155 | 124 | 0.696333 |
6bcc5c65d2f7bcb1904ea5d40e1809602569a763 | 392 | py | Python | python/cursoemvideo/076.py | Gustavo-Martins/learning | a2167b894ab3a4bac5e3d7d4ac6671e1ee89e155 | [
"Unlicense"
] | null | null | null | python/cursoemvideo/076.py | Gustavo-Martins/learning | a2167b894ab3a4bac5e3d7d4ac6671e1ee89e155 | [
"Unlicense"
] | null | null | null | python/cursoemvideo/076.py | Gustavo-Martins/learning | a2167b894ab3a4bac5e3d7d4ac6671e1ee89e155 | [
"Unlicense"
] | null | null | null | # Items list
flourish = ('-' * 39)
items = ('Papel A4', 0.25,
'Papel A3', 0.40,
'Caderno', 14.00,
'Livro de Português', 300.00,
'Caneta', 3.50,
'Lapiseira', 23.75,
'Grafite', 2.00)
print(flourish)
print('LISTAGEM DE PREÇOS')
print(flourish)
for pos in range(0, len(items)):
if pos % 2 == 0:
print(f'{items[pos]:.<30}',end='')
else:
print(f'R$ {items[pos]:>5.2f}')
print(flourish)
| 20.631579 | 36 | 0.607143 |
00286c09a762ac964e03feee6170042aa1d043b5 | 11,548 | py | Python | neutron/agent/l2/extensions/qos.py | 2020human/neutron | 1e2cfe8c06fcc2df52daa77b2b767ed6ffc2b19f | [
"Apache-2.0"
] | null | null | null | neutron/agent/l2/extensions/qos.py | 2020human/neutron | 1e2cfe8c06fcc2df52daa77b2b767ed6ffc2b19f | [
"Apache-2.0"
] | null | null | null | neutron/agent/l2/extensions/qos.py | 2020human/neutron | 1e2cfe8c06fcc2df52daa77b2b767ed6ffc2b19f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Mellanox Technologies, Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from neutron_lib import exceptions
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
from neutron._i18n import _LW, _LI
from neutron.agent.l2 import l2_agent_extension
from neutron.agent.linux import tc_lib
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron import manager
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class QosAgentDriver(object):
"""Defines stable abstract interface for QoS Agent Driver.
QoS Agent driver defines the interface to be implemented by Agent
for applying QoS Rules on a port.
"""
# Each QoS driver should define the set of rule types that it supports, and
# corresponding handlers that has the following names:
#
# create_<type>
# update_<type>
# delete_<type>
#
# where <type> is one of VALID_RULE_TYPES
SUPPORTED_RULES = set()
@abc.abstractmethod
def initialize(self):
"""Perform QoS agent driver initialization.
"""
def create(self, port, qos_policy):
"""Apply QoS rules on port for the first time.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
self._handle_update_create_rules('create', port, qos_policy)
def consume_api(self, agent_api):
"""Consume the AgentAPI instance from the QoSAgentExtension class
This allows QosAgentDrivers to gain access to resources limited to the
NeutronAgent when this method is overridden.
:param agent_api: An instance of an agent specific API
"""
def update(self, port, qos_policy):
"""Apply QoS rules on port.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
self._handle_update_create_rules('update', port, qos_policy)
def delete(self, port, qos_policy=None):
"""Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
if qos_policy is None:
rule_types = self.SUPPORTED_RULES
else:
rule_types = set(
[rule.rule_type
for rule in self._iterate_rules(qos_policy.rules)])
for rule_type in rule_types:
self._handle_rule_delete(port, rule_type)
def _iterate_rules(self, rules):
for rule in rules:
rule_type = rule.rule_type
if rule_type in self.SUPPORTED_RULES:
yield rule
else:
LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: '
'%(rule_type)s; skipping'),
{'rule_id': rule.id, 'rule_type': rule_type})
def _handle_rule_delete(self, port, rule_type):
handler_name = "".join(("delete_", rule_type))
handler = getattr(self, handler_name)
handler(port)
def _handle_update_create_rules(self, action, port, qos_policy):
for rule in self._iterate_rules(qos_policy.rules):
if rule.should_apply_to_port(port):
handler_name = "".join((action, "_", rule.rule_type))
handler = getattr(self, handler_name)
handler(port, rule)
else:
LOG.debug("Port %(port)s excluded from QoS rule %(rule)s",
{'port': port, 'rule': rule.id})
def _get_egress_burst_value(self, rule):
"""Return burst value used for egress bandwidth limitation.
Because Egress bw_limit is done on ingress qdisc by LB and ovs drivers
so it will return burst_value used by tc on as ingress_qdisc.
"""
return tc_lib.TcCommand.get_ingress_qdisc_burst_value(
rule.max_kbps, rule.max_burst_kbps)
class PortPolicyMap(object):
def __init__(self):
# we cannot use a dict of sets here because port dicts are not hashable
self.qos_policy_ports = collections.defaultdict(dict)
self.known_policies = {}
self.port_policies = {}
def get_ports(self, policy):
return self.qos_policy_ports[policy.id].values()
def get_policy(self, policy_id):
return self.known_policies.get(policy_id)
def update_policy(self, policy):
self.known_policies[policy.id] = policy
def has_policy_changed(self, port, policy_id):
return self.port_policies.get(port['port_id']) != policy_id
def get_port_policy(self, port):
policy_id = self.port_policies.get(port['port_id'])
if policy_id:
return self.get_policy(policy_id)
def set_port_policy(self, port, policy):
"""Attach a port to policy and return any previous policy on port."""
port_id = port['port_id']
old_policy = self.get_port_policy(port)
self.known_policies[policy.id] = policy
self.port_policies[port_id] = policy.id
self.qos_policy_ports[policy.id][port_id] = port
if old_policy and old_policy.id != policy.id:
del self.qos_policy_ports[old_policy.id][port_id]
return old_policy
def clean_by_port(self, port):
"""Detach port from policy and cleanup data we don't need anymore."""
port_id = port['port_id']
if port_id in self.port_policies:
del self.port_policies[port_id]
for qos_policy_id, port_dict in self.qos_policy_ports.items():
if port_id in port_dict:
del port_dict[port_id]
if not port_dict:
self._clean_policy_info(qos_policy_id)
return
raise exceptions.PortNotFound(port_id=port['port_id'])
def _clean_policy_info(self, qos_policy_id):
del self.qos_policy_ports[qos_policy_id]
del self.known_policies[qos_policy_id]
class QosAgentExtension(l2_agent_extension.L2AgentExtension):
SUPPORTED_RESOURCE_TYPES = [resources.QOS_POLICY]
def initialize(self, connection, driver_type):
"""Initialize agent extension."""
self.resource_rpc = resources_rpc.ResourcesPullRpcApi()
self.qos_driver = manager.NeutronManager.load_class_for_provider(
'neutron.qos.agent_drivers', driver_type)()
self.qos_driver.consume_api(self.agent_api)
self.qos_driver.initialize()
self.policy_map = PortPolicyMap()
self._register_rpc_consumers(connection)
def consume_api(self, agent_api):
"""Allows an extension to gain access to resources internal to the
neutron agent and otherwise unavailable to the extension.
"""
self.agent_api = agent_api
def _register_rpc_consumers(self, connection):
"""Allows an extension to receive notifications of updates made to
items of interest.
"""
endpoints = [resources_rpc.ResourcesPushRpcCallback()]
for resource_type in self.SUPPORTED_RESOURCE_TYPES:
# We assume that the neutron server always broadcasts the latest
# version known to the agent
registry.register(self._handle_notification, resource_type)
topic = resources_rpc.resource_type_versioned_topic(resource_type)
connection.create_consumer(topic, endpoints, fanout=True)
@lockutils.synchronized('qos-port')
def _handle_notification(self, context, resource_type,
qos_policies, event_type):
# server does not allow to remove a policy that is attached to any
# port, so we ignore DELETED events. Also, if we receive a CREATED
# event for a policy, it means that there are no ports so far that are
# attached to it. That's why we are interested in UPDATED events only
if event_type == events.UPDATED:
for qos_policy in qos_policies:
self._process_update_policy(qos_policy)
@lockutils.synchronized('qos-port')
def handle_port(self, context, port):
"""Handle agent QoS extension for port.
This method applies a new policy to a port using the QoS driver.
Update events are handled in _handle_notification.
"""
port_id = port['port_id']
port_qos_policy_id = port.get('qos_policy_id')
network_qos_policy_id = port.get('network_qos_policy_id')
qos_policy_id = port_qos_policy_id or network_qos_policy_id
if qos_policy_id is None:
self._process_reset_port(port)
return
if not self.policy_map.has_policy_changed(port, qos_policy_id):
return
qos_policy = self.resource_rpc.pull(
context, resources.QOS_POLICY, qos_policy_id)
if qos_policy is None:
LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port "
"%(port_id)s is not available on server, "
"it has been deleted. Skipping."),
{'qos_policy_id': qos_policy_id, 'port_id': port_id})
self._process_reset_port(port)
else:
old_qos_policy = self.policy_map.set_port_policy(port, qos_policy)
if old_qos_policy:
self.qos_driver.delete(port, old_qos_policy)
self.qos_driver.update(port, qos_policy)
else:
self.qos_driver.create(port, qos_policy)
def delete_port(self, context, port):
self._process_reset_port(port)
def _policy_rules_modified(self, old_policy, policy):
return not (len(old_policy.rules) == len(policy.rules) and
all(i in old_policy.rules for i in policy.rules))
def _process_update_policy(self, qos_policy):
old_qos_policy = self.policy_map.get_policy(qos_policy.id)
if old_qos_policy:
if self._policy_rules_modified(old_qos_policy, qos_policy):
for port in self.policy_map.get_ports(qos_policy):
#NOTE(QoS): for now, just reflush the rules on the port.
# Later, we may want to apply the difference
# between the old and new rule lists.
self.qos_driver.delete(port, old_qos_policy)
self.qos_driver.update(port, qos_policy)
self.policy_map.update_policy(qos_policy)
def _process_reset_port(self, port):
try:
self.policy_map.clean_by_port(port)
self.qos_driver.delete(port)
except exceptions.PortNotFound:
LOG.info(_LI("QoS extension did have no information about the "
"port %s that we were trying to reset"),
port['port_id'])
| 39.278912 | 79 | 0.64955 |
c22a0079b4310d03d28d44b6a3b1150969262225 | 159 | py | Python | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_DayOfWeek_LSTM.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_DayOfWeek_LSTM.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_DayOfWeek_LSTM.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['PolyTrend'] , ['Seasonal_DayOfWeek'] , ['LSTM'] ); | 39.75 | 86 | 0.754717 |
19bc9e8e0d2fbf7aca26cb829017c7ae3a9ff7c8 | 77 | py | Python | test/tk/listTK/wrap/test_2.py | yuhangwang/plot | 4f81dab2e6e3dc384c370f8dc0a00e8df29770fb | [
"MIT"
] | 1 | 2020-04-10T08:14:43.000Z | 2020-04-10T08:14:43.000Z | test/tk/listTK/wrap/test_2.py | yuhangwang/plot | 4f81dab2e6e3dc384c370f8dc0a00e8df29770fb | [
"MIT"
] | 1 | 2016-11-30T20:37:27.000Z | 2016-12-12T11:55:50.000Z | test/tk/listTK/wrap/test_2.py | yuhangwang/plot | 4f81dab2e6e3dc384c370f8dc0a00e8df29770fb | [
"MIT"
] | 1 | 2019-12-18T07:56:00.000Z | 2019-12-18T07:56:00.000Z | from plot.tk.listTK import wrap
def test():
assert wrap(1, 2) == [[1]]
| 12.833333 | 31 | 0.597403 |
69c69a156fcdbaa5b1ee9cc3000008f2478e363e | 2,746 | py | Python | spectral_parser/preprocessing/mappings.py | GavinPHR/Spectral-Parser | 4d1f071f29ac158e67f32a105aa6483c6479fae8 | [
"MIT"
] | 4 | 2021-03-18T10:03:14.000Z | 2021-05-05T21:37:59.000Z | spectral_parser/preprocessing/mappings.py | GavinPHR/Spectral-Parser | 4d1f071f29ac158e67f32a105aa6483c6479fae8 | [
"MIT"
] | null | null | null | spectral_parser/preprocessing/mappings.py | GavinPHR/Spectral-Parser | 4d1f071f29ac158e67f32a105aa6483c6479fae8 | [
"MIT"
] | 1 | 2021-03-18T10:03:18.000Z | 2021-03-18T10:03:18.000Z | """
Nonterminal and terminal mappings as described in section
3.2.1 in my dissertation.
These data structures are two-way mappings:
you can index from integers to strings and vice versa.
"""
import collections
from tqdm import tqdm
import config
__author__ = 'Haoran Peng'
__email__ = 'gavinsweden@gmail.com'
__license__ = 'MIT'
class NonterminalMap:
def __init__(self, trees):
self.nonterm2int = dict()
self.int2nonterm = dict()
self.populate(trees)
def populate(self, trees):
i = 0
for tree in tqdm(trees, desc='Nonterminal mappings'):
for node in tree.postorder():
if node.label() not in self.nonterm2int:
self.nonterm2int[node.label()] = i
self.int2nonterm[i] = node.label()
i += 1
def __getitem__(self, item):
if type(item) == str:
return self.nonterm2int[item]
elif type(item) == int:
return self.int2nonterm[item]
else:
raise RuntimeError('Item has incorrect type.')
def __len__(self):
return len(self.nonterm2int)
def __contains__(self, item):
if type(item) == str:
return item in self.nonterm2int
elif type(item) == int:
return item in self.int2nonterm
else:
raise RuntimeError('Item has incorrect type.')
class TerminalMap:
def __init__(self, trees, start_index):
self.term2int = dict()
self.int2term = dict()
self.acc = start_index
self.populate(trees)
def populate(self, trees):
term_count = collections.Counter()
for tree in trees:
term_count.update(tree.leaves())
for term, count in term_count.items():
if count <= config.terminal_cutoff:
continue
self.term2int[term] = self.acc
self.int2term[self.acc] = term
self.acc += 1
def update_UNK(self, UNK):
assert (type(UNK) == str)
if UNK not in self.term2int:
self.term2int[UNK] = self.acc
self.int2term[self.acc] = UNK
self.acc += 1
def __getitem__(self, item):
if type(item) == str:
return self.term2int[item]
elif type(item) == int:
return self.int2term[item]
else:
raise RuntimeError('Item has incorrect type.')
def __len__(self):
return len(self.term2int)
def __contains__(self, item):
if type(item) == str:
return item in self.term2int
elif type(item) == int:
return item in self.int2term
else:
raise RuntimeError('Item has incorrect type.')
| 28.020408 | 61 | 0.577567 |
a0399e7f59548c668df697941efb3ec61dba6bfa | 1,883 | py | Python | backend/producer.py | maneeshd/stock-market-sim | 0ce6311e4a9e71737b48bc5c1f2d503d0a2f737f | [
"MIT"
] | 1 | 2020-10-22T22:47:44.000Z | 2020-10-22T22:47:44.000Z | backend/producer.py | maneeshd/kafka-stock | 0ce6311e4a9e71737b48bc5c1f2d503d0a2f737f | [
"MIT"
] | null | null | null | backend/producer.py | maneeshd/kafka-stock | 0ce6311e4a9e71737b48bc5c1f2d503d0a2f737f | [
"MIT"
] | 1 | 2021-02-17T05:40:28.000Z | 2021-02-17T05:40:28.000Z | """
Author: Maneesh Divana <maneeshd77@gmail.com>
Date: 2020-12-03
Python: 3.7.9
Reads stock data from CSV files and simulates stock streaming into AWS Kinesis Data Stream
"""
from os import path
from datetime import datetime
from time import sleep
import pandas as pd
from kinesis_api import KinesisAPI
CUR_DIR = path.realpath(path.dirname(__file__))
BASE_DIR = path.dirname(CUR_DIR)
DATA_DIR_ROOT = path.join(BASE_DIR, "data")
CSV_FILENAME = "intraday-22-oct-merged.csv"
KINESIS_STREAM_NAME = "stock-stream"
KINESIS_SHARD_PARTITION_KEY = "stock"
def simulate():
"""
Simualte real time stream and post to Kinesis Data Stream Shard
All companies at regular interval
"""
# Load CSV into DataFrame
df = pd.read_csv(path.join(DATA_DIR_ROOT, CSV_FILENAME))
print(f"Shape of DataFrame: {df.shape}")
print("DataFrame:")
print(df, "\n")
# Groups of 10 rows
group = df.groupby(df.index // 10)
# Connect to Kinesis using API
api = KinesisAPI(stream_name=KINESIS_STREAM_NAME)
print("-" * 64, "\n")
start = datetime.now()
print(f"[{start.strftime('%Y-%m-%d %H:%M:%S')}] Starting Kinesis producer...\n")
# Send records for a group of 10 companies every 1 minute
for idx, group in df.groupby(df.index // 10):
now = datetime.now()
print(f"[{now.strftime('%Y-%m-%d %H:%M:%S')}] Sending data:")
print(group)
api.write_record(
data=group.to_dict(),
partition_key=KINESIS_SHARD_PARTITION_KEY
)
print("")
sleep(60.0)
end = datetime.now()
print(f"[{end.strftime('%Y-%m-%d %H:%M:%S')}] Finished producing. Exiting...\n")
api.close()
if __name__ == "__main__":
print("====================================")
print("Stock Data Producer for AWS Kinesis")
print("====================================")
simulate()
| 26.9 | 90 | 0.62666 |
b3d1e0e0e59dca5f038c79138d472a0a98611fcc | 137 | py | Python | chapter2-variable/exercise3.py | MyLanPangzi/py4e | af5cd5fa63956ff237f880a1f9dd0bfdd6b28930 | [
"Apache-2.0"
] | null | null | null | chapter2-variable/exercise3.py | MyLanPangzi/py4e | af5cd5fa63956ff237f880a1f9dd0bfdd6b28930 | [
"Apache-2.0"
] | null | null | null | chapter2-variable/exercise3.py | MyLanPangzi/py4e | af5cd5fa63956ff237f880a1f9dd0bfdd6b28930 | [
"Apache-2.0"
] | null | null | null | # Enter Hours: 35
# Enter Rate: 2.75
# Pay: 96.25
h = input('Enter Hours: ')
r = input('Enter Rate: ')
print('Pay', float(h) * float(r))
| 19.571429 | 33 | 0.59854 |
8b3e382fbbb650eb2bb68ac7a82844f965e4ff97 | 3,393 | py | Python | rowad/hooks.py | ashish-greycube/rowad | f69ebcbb2b50aa96fc78f6b9e7d2272215c00bb3 | [
"MIT"
] | null | null | null | rowad/hooks.py | ashish-greycube/rowad | f69ebcbb2b50aa96fc78f6b9e7d2272215c00bb3 | [
"MIT"
] | null | null | null | rowad/hooks.py | ashish-greycube/rowad | f69ebcbb2b50aa96fc78f6b9e7d2272215c00bb3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "rowad"
app_title = "Rowad"
app_publisher = "GreyCube Technologies"
app_description = "customization for Rowad"
app_icon = "octicon octicon-home-fill"
app_color = "green"
app_email = "admin@greycube.in"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/rowad/css/rowad.css"
# app_include_js = "/assets/rowad/js/rowad.js"
# include js, css files in header of web template
# web_include_css = "/assets/rowad/css/rowad.css"
# web_include_js = "/assets/rowad/js/rowad.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
doctype_js = {
"Sales Order" : "public/js/sales_order.js",
"Delivery Note" : "public/js/delivery_note.js",
"Item" : "public/js/item.js",
"Task" : "public/js/task.js",
"Maintenance Schedule" : "public/js/maintenance_schedule.js",
"Maintenance Visit" : "public/js/maintenance_visit.js"
}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "rowad.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "rowad.install.before_install"
# after_install = "rowad.install.after_install"
after_migrate = "rowad.hook_methods.after_migrate"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "rowad.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
doc_events = {
"Sales Order": {
"before_submit": "rowad.api.validate_sales_order_item_user_allocation",
},
"Task": {
"validate": "rowad.api.validate_task_and_create_delivery_note_maintenance_schedule",
}
}
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "rowad.tasks.all"
# ],
# "daily": [
# "rowad.tasks.daily"
# ],
# "hourly": [
# "rowad.tasks.hourly"
# ],
# "weekly": [
# "rowad.tasks.weekly"
# ]
# "monthly": [
# "rowad.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "rowad.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "rowad.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "rowad.task.get_dashboard_data"
# }
| 24.410072 | 86 | 0.682582 |
43b21a299222f66dad9afd63b1244cf0b26bdfda | 71 | py | Python | Feature Dev Scripts/listMacros.py | RealIndrit/JsMacros-Examples | b04de1231bacdb72d81c7301ab6a47474bc4d5be | [
"MIT"
] | 2 | 2021-07-03T21:50:05.000Z | 2021-10-31T13:14:57.000Z | Feature Dev Scripts/listMacros.py | RealIndrit/JsMacros-Examples | b04de1231bacdb72d81c7301ab6a47474bc4d5be | [
"MIT"
] | 1 | 2021-03-17T11:20:16.000Z | 2021-03-17T11:21:48.000Z | Feature Dev Scripts/listMacros.py | RealIndrit/JsMacros-Examples | b04de1231bacdb72d81c7301ab6a47474bc4d5be | [
"MIT"
] | 4 | 2020-08-04T14:15:54.000Z | 2021-12-10T09:45:12.000Z | m = jsmacros.getProfile().getRegistry().getListeners("KEY")
chat.log(m) | 35.5 | 59 | 0.746479 |
52a8944f877cc393109e1387545c12e3288a2dc7 | 2,734 | py | Python | tests/test_initialization.py | priyashengole/Chatterbot | 49708c479226d6650a4cadf33a1bacd233c2ca0c | [
"BSD-3-Clause"
] | 5 | 2021-03-21T06:26:02.000Z | 2021-08-11T09:58:44.000Z | tests/test_initialization.py | priyashengole/Chatterbot | 49708c479226d6650a4cadf33a1bacd233c2ca0c | [
"BSD-3-Clause"
] | null | null | null | tests/test_initialization.py | priyashengole/Chatterbot | 49708c479226d6650a4cadf33a1bacd233c2ca0c | [
"BSD-3-Clause"
] | 1 | 2020-05-23T09:51:30.000Z | 2020-05-23T09:51:30.000Z | from .base_case import ChatBotTestCase
class StringInitializationTestCase(ChatBotTestCase):
def get_kwargs(self):
return {
'input_adapter': 'chatterbot.input.VariableInputTypeAdapter',
'output_adapter': 'chatterbot.output.OutputAdapter',
'database_uri': None
}
def test_storage_initialized(self):
from chatterbot.storage import SQLStorageAdapter
self.assertTrue(isinstance(self.chatbot.storage, SQLStorageAdapter))
def test_logic_initialized(self):
from chatterbot.logic import BestMatch
self.assertEqual(len(self.chatbot.logic.adapters), 1)
self.assertTrue(isinstance(self.chatbot.logic.adapters[0], BestMatch))
def test_input_initialized(self):
from chatterbot.input import VariableInputTypeAdapter
self.assertTrue(isinstance(self.chatbot.input, VariableInputTypeAdapter))
def test_output_initialized(self):
from chatterbot.output import OutputAdapter
self.assertTrue(isinstance(self.chatbot.output, OutputAdapter))
class DictionaryInitializationTestCase(ChatBotTestCase):
def get_kwargs(self):
return {
'storage_adapter': {
'import_path': 'chatterbot.storage.SQLStorageAdapter',
'database_uri': None
},
'input_adapter': {
'import_path': 'chatterbot.input.VariableInputTypeAdapter'
},
'output_adapter': {
'import_path': 'chatterbot.output.OutputAdapter'
},
'logic_adapters': [
{
'import_path': 'chatterbot.logic.BestMatch',
},
{
'import_path': 'chatterbot.logic.MathematicalEvaluation',
}
]
}
def test_storage_initialized(self):
from chatterbot.storage import SQLStorageAdapter
self.assertTrue(isinstance(self.chatbot.storage, SQLStorageAdapter))
def test_logic_initialized(self):
from chatterbot.logic import BestMatch
from chatterbot.logic import MathematicalEvaluation
self.assertEqual(len(self.chatbot.logic.adapters), 2)
self.assertTrue(isinstance(self.chatbot.logic.adapters[0], BestMatch))
self.assertTrue(isinstance(self.chatbot.logic.adapters[1], MathematicalEvaluation))
def test_input_initialized(self):
from chatterbot.input import VariableInputTypeAdapter
self.assertTrue(isinstance(self.chatbot.input, VariableInputTypeAdapter))
def test_output_initialized(self):
from chatterbot.output import OutputAdapter
self.assertTrue(isinstance(self.chatbot.output, OutputAdapter))
| 37.452055 | 91 | 0.66752 |
f6ff5dd94bb11f5f8940fb7a0d992b35d8e31ba4 | 713 | py | Python | Hackerrank/Python/default-arguments.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
] | 1 | 2021-01-10T13:29:21.000Z | 2021-01-10T13:29:21.000Z | Hackerrank/Python/default-arguments.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
] | null | null | null | Hackerrank/Python/default-arguments.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
] | null | null | null | class EvenStream(object):
def __init__(self):
self.current = 0
def get_next(self):
to_return = self.current
self.current += 2
return to_return
class OddStream(object):
def __init__(self):
self.current = 1
def get_next(self):
to_return = self.current
self.current += 2
return to_return
def print_from_stream(n, stream=None):
stream = stream or EvenStream()
for _ in range(n):
print(stream.get_next())
queries = int(input())
for _ in range(queries):
stream_name, n = input().split()
n = int(n)
if stream_name == "even":
print_from_stream(n)
else:
print_from_stream(n, OddStream())
| 21.606061 | 41 | 0.604488 |
d7af9081da8f9fa063ea1c2e223e27e9b771f764 | 2,209 | py | Python | LPC.py | shun60s/Vocal-Tube-Estimation | fa5e11db95905c008acbf72ee46afa16c7f4502a | [
"MIT"
] | 3 | 2021-12-20T23:34:16.000Z | 2022-03-05T22:53:30.000Z | LPC.py | shun60s/Vocal-Tube-Estimation | fa5e11db95905c008acbf72ee46afa16c7f4502a | [
"MIT"
] | null | null | null | LPC.py | shun60s/Vocal-Tube-Estimation | fa5e11db95905c008acbf72ee46afa16c7f4502a | [
"MIT"
] | null | null | null | #coding:utf-8
###########################################################
# Levinson-DurbinのアルゴリズムにてLPC係数を求める
#
# autocorr and LevinsonDurbin is baed on
# <http://aidiary.hatenablog.com/entry/20120415/1334458954>
#
###########################################################
import numpy as np
#Check version
# Python 3.6.4, 64bit on Win32 (Windows 10)
# numpy (1.14.0)
def autocorr(x, nlags=None):
"""
自己相関関数を求める
x: 信号
nlags: 自己相関関数のサイズ(lag=0からnlags-1まで)
引数がなければ(lag=0からlen(x)-1まですべて)
"""
N = len(x)
if nlags == None: nlags = N
r = np.zeros(nlags)
for lag in range(nlags):
for n in range(N - lag):
r[lag] += x[n] * x[n + lag]
return r
def LevinsonDurbin(r, lpcOrder):
"""
Levinson-Durbinのアルゴリズム
k次のLPC係数からk+1次のLPC係数を再帰的に計算して
LPC係数を求める
"""
# LPC係数(再帰的に更新される)
# a[0]は1で固定のためlpcOrder個の係数を得るためには+1が必要
a = np.zeros(lpcOrder + 1)
e = np.zeros(lpcOrder + 1)
# k = 1の場合
a[0] = 1.0
a[1] = - r[1] / r[0]
e[1] = r[0] + r[1] * a[1]
lam = - r[1] / r[0]
# kの場合からk+1の場合を再帰的に求める
for k in range(1, lpcOrder):
# lambdaを更新
lam = 0.0
for j in range(k + 1):
lam -= a[j] * r[k + 1 - j]
lam /= e[k]
# aを更新
# UとVからaを更新
U = [1]
U.extend([a[i] for i in range(1, k + 1)])
U.append(0)
V = [0]
V.extend([a[i] for i in range(k, 0, -1)])
V.append(1)
a = np.array(U) + lam * np.array(V)
# eを更新
e[k + 1] = e[k] * (1.0 - lam * lam)
return a, e[-1]
# LPC係数を求める
#
# 入力:信号
# LPCの次数
#
# 出力:LPC係数
# 差
def lpc(s, lpcOrder=32):
r = autocorr(s, lpcOrder + 1)
a, e = LevinsonDurbin(r, lpcOrder)
return a,e
# LPC予測残差を計算する
#
# 入力:LPC係数
# 信号
#
# 出力:LPC予測残差
def residual_error(a, s):
lpcOrder=len(a)
r_error=s.copy()
for i in range(lpcOrder, len(s)):
for j in range (0,lpcOrder):
r_error[i] += (a[j] * s[i-j-1])
r_error[0:lpcOrder-1]=0.0
return r_error
| 20.081818 | 60 | 0.460842 |
5616a09b5ffe75f040149140d2efe284d200ca79 | 1,603 | py | Python | modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/verbalizers/abbreviation.py | serkhanekarim/AI | 0a13880ae8e608cd00fa819dc590097abdb7ae6e | [
"Apache-2.0"
] | null | null | null | modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/verbalizers/abbreviation.py | serkhanekarim/AI | 0a13880ae8e608cd00fa819dc590097abdb7ae6e | [
"Apache-2.0"
] | null | null | null | modules/preprocessing/text/NeMo/nemo_text_processing/text_normalization/ar/verbalizers/abbreviation.py | serkhanekarim/AI | 0a13880ae8e608cd00fa819dc590097abdb7ae6e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.ar.graph_utils import NEMO_NOT_QUOTE, GraphFst
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class AbbreviationFst(GraphFst):
"""
Finite state transducer for verbalizing abbreviations
e.g. tokens { abbreviation { value: "A B C" } } -> "ABC"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="abbreviation", kind="verbalize", deterministic=deterministic)
graph = pynutil.delete("value: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| 37.27907 | 102 | 0.726138 |
e705f3d027b0ee0a0b908c368decb9ed49ac8464 | 368 | py | Python | table2ascii/alignment.py | sairamkiran9/table2ascii | 9829e77c2e7ce7ff764cb80dd1d7775a28fc2f16 | [
"MIT"
] | 24 | 2021-04-27T07:10:32.000Z | 2022-03-13T04:32:22.000Z | table2ascii/alignment.py | sairamkiran9/table2ascii | 9829e77c2e7ce7ff764cb80dd1d7775a28fc2f16 | [
"MIT"
] | 11 | 2021-04-27T07:49:28.000Z | 2022-02-27T12:46:56.000Z | table2ascii/alignment.py | sairamkiran9/table2ascii | 9829e77c2e7ce7ff764cb80dd1d7775a28fc2f16 | [
"MIT"
] | 5 | 2021-07-30T00:19:29.000Z | 2022-02-01T07:39:50.000Z | from enum import Enum
class Alignment(Enum):
"""
Enum for text alignment types within a table cell
Example::
from table2ascii import Alignment
output = table2ascii(
...
alignments=[Alignment.LEFT, Alignment.RIGHT, Alignment.CENTER, Alignment.CENTER]
)
"""
LEFT = 0
CENTER = 1
RIGHT = 2
| 17.52381 | 92 | 0.586957 |
54bdee0bb0208229a0627d700b5665e3513b2b78 | 13,355 | py | Python | pandas/core/indexers/objects.py | aa-182758/pandas | 53b3dd53c7f2c3d24aa77d5a1bc531b1fcd45d70 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/indexers/objects.py | aa-182758/pandas | 53b3dd53c7f2c3d24aa77d5a1bc531b1fcd45d70 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/indexers/objects.py | aa-182758/pandas | 53b3dd53c7f2c3d24aa77d5a1bc531b1fcd45d70 | [
"BSD-3-Clause"
] | null | null | null | """Indexer objects for computing start/end window bounds for rolling operations"""
from __future__ import annotations
from datetime import timedelta
import numpy as np
from pandas._libs.window.indexers import calculate_variable_window_bounds
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import ensure_platform_int
from pandas.tseries.offsets import Nano
get_window_bounds_doc = """
Computes the bounds of a window.
Parameters
----------
num_values : int, default 0
number of values that will be aggregated over
window_size : int, default 0
the number of rows in a window
min_periods : int, default None
min_periods passed from the top level rolling API
center : bool, default None
center passed from the top level rolling API
closed : str, default None
closed passed from the top level rolling API
step : int, default None
step passed from the top level rolling API
.. versionadded:: 1.5
win_type : str, default None
win_type passed from the top level rolling API
Returns
-------
A tuple of ndarray[int64]s, indicating the boundaries of each
window
"""
class BaseIndexer:
"""Base class for window bounds calculations."""
def __init__(
self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs
):
"""
Parameters
----------
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.index_array = index_array
self.window_size = window_size
# Set user defined kwargs as attributes that can be used in get_window_bounds
for key, value in kwargs.items():
setattr(self, key, value)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
raise NotImplementedError
class FixedWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of fixed length."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if center:
offset = (self.window_size - 1) // 2
else:
offset = 0
end = np.arange(1 + offset, num_values + 1 + offset, step, dtype="int64")
start = end - self.window_size
if closed in ["left", "both"]:
start -= 1
if closed in ["left", "neither"]:
end -= 1
end = np.clip(end, 0, num_values)
start = np.clip(start, 0, num_values)
return start, end
class VariableWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of variable length, namely for time series."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if step is not None:
raise NotImplementedError("step not implemented for variable window")
# error: Argument 4 to "calculate_variable_window_bounds" has incompatible
# type "Optional[bool]"; expected "bool"
# error: Argument 6 to "calculate_variable_window_bounds" has incompatible
# type "Optional[ndarray]"; expected "ndarray"
return calculate_variable_window_bounds(
num_values,
self.window_size,
min_periods,
center, # type: ignore[arg-type]
closed,
1,
self.index_array, # type: ignore[arg-type]
)
class VariableOffsetWindowIndexer(BaseIndexer):
"""Calculate window boundaries based on a non-fixed offset such as a BusinessDay."""
def __init__(
self,
index_array: np.ndarray | None = None,
window_size: int = 0,
index=None,
offset=None,
**kwargs,
):
super().__init__(index_array, window_size, **kwargs)
self.index = index
self.offset = offset
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if step is not None:
raise NotImplementedError("step not implemented for variable offset window")
if num_values <= 0:
return np.empty(0, dtype="int64"), np.empty(0, dtype="int64")
# if windows is variable, default is 'right', otherwise default is 'both'
if closed is None:
closed = "right" if self.index is not None else "both"
right_closed = closed in ["right", "both"]
left_closed = closed in ["left", "both"]
if self.index[num_values - 1] < self.index[0]:
index_growth_sign = -1
else:
index_growth_sign = 1
start = np.empty(num_values, dtype="int64")
start.fill(-1)
end = np.empty(num_values, dtype="int64")
end.fill(-1)
start[0] = 0
# right endpoint is closed
if right_closed:
end[0] = 1
# right endpoint is open
else:
end[0] = 0
# start is start of slice interval (including)
# end is end of slice interval (not including)
for i in range(1, num_values):
end_bound = self.index[i]
start_bound = self.index[i] - index_growth_sign * self.offset
# left endpoint is closed
if left_closed:
start_bound -= Nano(1)
# advance the start bound until we are
# within the constraint
start[i] = i
for j in range(start[i - 1], i):
if (self.index[j] - start_bound) * index_growth_sign > timedelta(0):
start[i] = j
break
# end bound is previous end
# or current index
if (self.index[end[i - 1]] - end_bound) * index_growth_sign <= timedelta(0):
end[i] = i + 1
else:
end[i] = end[i - 1]
# right endpoint is open
if not right_closed:
end[i] -= 1
return start, end
class ExpandingIndexer(BaseIndexer):
"""Calculate expanding window bounds, mimicking df.expanding()"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if step is not None:
raise NotImplementedError("step not implemented for expanding window")
end = np.arange(1, num_values + 1, dtype=np.int64)
start = np.zeros(len(end), dtype=np.int64)
return start, end
class FixedForwardWindowIndexer(BaseIndexer):
"""
Creates window boundaries for fixed-length windows that include the
current row.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
>>> df.rolling(window=indexer, min_periods=1).sum()
B
0 1.0
1 3.0
2 2.0
3 4.0
4 4.0
"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if center:
raise ValueError("Forward-looking windows can't have center=True")
if closed is not None:
raise ValueError(
"Forward-looking windows don't support setting the closed argument"
)
if step is None:
step = 1
start = np.arange(0, num_values, step, dtype="int64")
end = start + self.window_size
if self.window_size:
end = np.clip(end, 0, num_values)
return start, end
class GroupbyIndexer(BaseIndexer):
"""Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()"""
def __init__(
self,
index_array: np.ndarray | None = None,
window_size: int | BaseIndexer = 0,
groupby_indices: dict | None = None,
window_indexer: type[BaseIndexer] = BaseIndexer,
indexer_kwargs: dict | None = None,
**kwargs,
):
"""
Parameters
----------
index_array : np.ndarray or None
np.ndarray of the index of the original object that we are performing
a chained groupby operation over. This index has been pre-sorted relative to
the groups
window_size : int or BaseIndexer
window size during the windowing operation
groupby_indices : dict or None
dict of {group label: [positional index of rows belonging to the group]}
window_indexer : BaseIndexer
BaseIndexer class determining the start and end bounds of each group
indexer_kwargs : dict or None
Custom kwargs to be passed to window_indexer
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.groupby_indices = groupby_indices or {}
self.window_indexer = window_indexer
self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {}
super().__init__(
index_array=index_array,
window_size=self.indexer_kwargs.pop("window_size", window_size),
**kwargs,
)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if step is not None:
raise NotImplementedError("step not implemented for groupby window")
# 1) For each group, get the indices that belong to the group
# 2) Use the indices to calculate the start & end bounds of the window
# 3) Append the window bounds in group order
start_arrays = []
end_arrays = []
window_indices_start = 0
for key, indices in self.groupby_indices.items():
index_array: np.ndarray | None
if self.index_array is not None:
index_array = self.index_array.take(ensure_platform_int(indices))
else:
index_array = self.index_array
indexer = self.window_indexer(
index_array=index_array,
window_size=self.window_size,
**self.indexer_kwargs,
)
start, end = indexer.get_window_bounds(
len(indices), min_periods, center, closed, step
)
start = start.astype(np.int64)
end = end.astype(np.int64)
assert len(start) == len(
end
), "these should be equal in length from get_window_bounds"
# Cannot use groupby_indices as they might not be monotonic with the object
# we're rolling over
window_indices = np.arange(
window_indices_start, window_indices_start + len(indices)
)
window_indices_start += len(indices)
# Extend as we'll be slicing window like [start, end)
window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype(
np.int64, copy=False
)
start_arrays.append(window_indices.take(ensure_platform_int(start)))
end_arrays.append(window_indices.take(ensure_platform_int(end)))
if len(start_arrays) == 0:
return np.array([], dtype=np.int64), np.array([], dtype=np.int64)
start = np.concatenate(start_arrays)
end = np.concatenate(end_arrays)
return start, end
class ExponentialMovingWindowIndexer(BaseIndexer):
"""Calculate ewm window bounds (the entire window)"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if step is not None:
raise NotImplementedError(
"step not implemented for exponentail moving window"
)
return (
np.array([0], dtype=np.int64),
np.array([num_values], dtype=np.int64),
)
| 32.180723 | 88 | 0.590565 |
72c8f20ed1050e942337f10d9f953dc81a99f1b7 | 575 | py | Python | mpids/MPInumpy/errors.py | edgargabriel/mpids | 170f402ecea5af0db4eee39e8d426884dce12ad6 | [
"BSD-2-Clause"
] | 1 | 2020-01-22T03:27:31.000Z | 2020-01-22T03:27:31.000Z | mpids/MPInumpy/errors.py | jrodgers01d/mpids | f771b1d25eba5f5dc8e30e5d86ee0251775b9da1 | [
"BSD-2-Clause"
] | 1 | 2020-05-04T20:25:55.000Z | 2020-05-04T20:25:55.000Z | mpids/MPInumpy/errors.py | jrodgers01d/mpids | f771b1d25eba5f5dc8e30e5d86ee0251775b9da1 | [
"BSD-2-Clause"
] | 2 | 2019-04-08T03:01:31.000Z | 2020-04-27T15:56:28.000Z | class MPInumpyError(Exception):
""" Base exception class for MPInumpy errors. """
pass
class InvalidDistributionError(MPInumpyError):
""" Exception class for when a unsupported distribution is encountered. """
pass
class ValueError(MPInumpyError):
""" Exception class for when a invalid value is encountered. """
pass
class NotSupportedError(MPInumpyError):
""" Exception class for when a numpy feature is not supported. """
pass
class TypeError(MPInumpyError):
""" Exception class for when invalid data type is supplied. """
pass
| 28.75 | 79 | 0.718261 |
cb7003b489f2ecbff4b6b229d094593b21eff17a | 2,751 | py | Python | examples/01-filter/glyphs.py | ssg-aero/pyvista | 5150b062cf835c6c6a44f6aefa4d53a1ad832ba3 | [
"MIT"
] | 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | examples/01-filter/glyphs.py | ssg-aero/pyvista | 5150b062cf835c6c6a44f6aefa4d53a1ad832ba3 | [
"MIT"
] | 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | examples/01-filter/glyphs.py | ssg-aero/pyvista | 5150b062cf835c6c6a44f6aefa4d53a1ad832ba3 | [
"MIT"
] | 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | """
.. _glyph_example:
Plotting Glyphs (Vectors or PolyData)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use vectors in a dataset to plot and orient glyphs/geometric objects.
"""
# sphinx_gallery_thumbnail_number = 4
import pyvista as pv
from pyvista import examples
import numpy as np
###############################################################################
# Glyphying can be done via the :func:`pyvista.DataSetFilters.glyph` filter
mesh = examples.download_carotid().threshold(145, scalars="scalars")
mask = mesh['scalars'] < 210
mesh['scalars'][mask] = 0 # null out smaller vectors
# Make a geometric object to use as the glyph
geom = pv.Arrow() # This could be any dataset
# Perform the glyph
glyphs = mesh.glyph(orient="vectors", scale="scalars", factor=0.003, geom=geom)
# plot using the plotting class
pl = pv.Plotter()
pl.add_mesh(glyphs, show_scalar_bar=False, lighting=False, cmap='coolwarm')
pl.camera_position = [(146.53, 91.28, 21.70),
(125.00, 94.45, 19.81),
(-0.086, 0.007, 0.996)] # view only part of the vector field
cpos = pl.show(return_cpos=True)
###############################################################################
# Another approach is to load the vectors directly to the mesh object and then
# access the :attr:`pyvista.DataSet.arrows` property.
sphere = pv.Sphere(radius=3.14)
# make cool swirly pattern
vectors = np.vstack(
(
np.sin(sphere.points[:, 0]),
np.cos(sphere.points[:, 1]),
np.cos(sphere.points[:, 2]),
)
).T
# add and scale
sphere["vectors"] = vectors * 0.3
sphere.set_active_vectors("vectors")
# plot just the arrows
sphere.arrows.plot()
###############################################################################
# Plot the arrows and the sphere.
p = pv.Plotter()
p.add_mesh(sphere.arrows, lighting=False,
scalar_bar_args={'title': "Vector Magnitude"})
p.add_mesh(sphere, color="grey", ambient=0.6, opacity=0.5, show_edges=False)
p.show()
###############################################################################
# Subset of Glyphs
# ++++++++++++++++
#
# Sometimes you might not want glyphs for every node in the input dataset. In
# this case, you can choose to build glyphs for a subset of the input dataset
# by using a merging tolerance. Here we specify a merging tolerance of five
# percent which equates to five percent of the bounding box's length.
# Example dataset with normals
mesh = examples.load_random_hills()
# create a subset of arrows using the glyph filter
arrows = mesh.glyph(scale="Normals", orient="Normals", tolerance=0.05)
p = pv.Plotter()
p.add_mesh(arrows, color="black")
p.add_mesh(mesh, scalars="Elevation", cmap="terrain", smooth_shading=True)
p.show()
| 31.261364 | 83 | 0.610687 |
5870b59aa8db0ed10de96551d7529cd93ae39241 | 6,452 | py | Python | src/deepqmc/extra/cli.py | rickyHong/deepqmc | 529018964898e391f989acb03cc50da74538ac95 | [
"MIT"
] | null | null | null | src/deepqmc/extra/cli.py | rickyHong/deepqmc | 529018964898e391f989acb03cc50da74538ac95 | [
"MIT"
] | null | null | null | src/deepqmc/extra/cli.py | rickyHong/deepqmc | 529018964898e391f989acb03cc50da74538ac95 | [
"MIT"
] | null | null | null | import logging
import sys
import time
import traceback
from itertools import count
from math import inf
from pathlib import Path
import click
import torch
from torch.utils.tensorboard import SummaryWriter
from ..errors import TrainingCrash
from ..io import wf_from_file
from ..train import train
__all__ = ()
log = logging.getLogger(__name__)
@click.command('train-multi') # noqa: C901
@click.argument('workdir', type=click.Path(exists=True))
@click.argument('respawn', type=int)
@click.option('--multi-part', default=0)
@click.option('--timeout-short', default=30 * 60)
@click.option('--timeout-long', default=2 * 60 * 60)
@click.option('--check-interval', default=30)
@click.option('--cuda/--no-cuda', default=True)
@click.option('--max-restarts', default=3, show_default=True)
@click.option('--hook', is_flag=True)
def train_multi_at(
workdir,
respawn,
multi_part,
timeout_short,
timeout_long,
check_interval,
cuda,
max_restarts,
hook,
):
workdir = Path(workdir).resolve()
rank = int(workdir.parts[::-1][multi_part])
if hook:
log.info('Importing a dlqmc hook')
sys.path.append(str(workdir))
import dlqmc_hook # noqa: F401
state = None
chkpts = None
for cycle in count():
end_step = (cycle + 1) * respawn
for attempt in range(max_restarts + 1):
log.info('Initializing a new wave function')
wf, params, state_from_file = wf_from_file(workdir)
state = state or state_from_file
if cuda:
log.info('Moving to GPU...')
wf.cuda()
log.info('Moved to GPU')
try:
interrupted = train(
wf,
workdir=workdir,
state=state,
chkpts=chkpts,
raise_blowup=False,
save_every=respawn,
return_every=respawn,
blowup_threshold=inf,
**params.get('train_kwargs', {}),
)
except TrainingCrash as e:
log.warning(f'Training crash in cycle {cycle}, attempt {attempt}')
log.warning('\n' + traceback.format_exc().strip())
state, chkpts = e.state, e.chkpts
if (
attempt == max_restarts
or not state
or state['step'] <= cycle * respawn
):
log.warning('Aborting cycle')
(workdir / 'chkpts' / f'state-{end_step:05d}.STOP').touch()
interrupted = True
break
if state:
log.warning(f'Restarting from step {state["step"]}')
else:
log.warning('Restarting from beginning')
else:
break
if not interrupted:
return
start = time.time()
while True:
now = time.time()
root = workdir.parents[multi_part]
stem = ('*',) + workdir.parts[::-1][:multi_part]
root.glob('/'.join(stem + ('param.toml',)))
n_tasks = len(list(root.glob('/'.join(stem + ('param.toml',)))))
all_states = {
int(p.parts[-3 - multi_part]): p
for p in root.glob(
'/'.join(stem + (f'chkpts/state-{end_step:05d}.pt',))
)
}
all_stops = {
int(p.parts[-3 - multi_part]): None
for p in root.glob(
'/'.join(stem + (f'chkpts/state-{end_step:05d}.STOP',))
)
}
all_states = {**all_states, **all_stops}
n_all_states = len(all_states)
log.info(f'{n_all_states}/{n_tasks} states ready')
if n_all_states < n_tasks / 2 and now - start < timeout_long:
log.info('Missing >1/2 states and long timeout not up, waiting...')
time.sleep(check_interval)
continue
if n_all_states < n_tasks and now - start < timeout_short:
log.info('Missing some states and short timeout not up, waiting...')
time.sleep(check_interval)
continue
all_states = [(p, torch.load(p)) for p in all_states.values() if p]
log.info(f'Have {len(all_states)} states for respawning')
if not all_states:
log.error('No states for respawning, abort')
return
all_states.sort(key=lambda x: x[1]['monitor'].mean_of('mean_slow'))
all_states = all_states[: n_tasks // 2]
path, state = all_states[rank % len(all_states)]
log.info(f'Respawning from {path}')
break
def get_status(path):
path = Path(path).resolve()
with path.open() as f:
lines = f.readlines()
line = ''
restarts = 0
for l in lines:
if 'E=' in l or 'energy =' in l:
line = l
elif 'Restarting' in l:
restarts += 1
modtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(path.stat().st_mtime))
return {'modtime': modtime, 'restarts': restarts, 'line': line.strip()}
def get_status_multi(paths):
for path in sorted(paths):
p = Path(path)
yield {'path': p.parent, **get_status(p)}
@click.command()
@click.argument('paths', nargs=-1, type=click.Path(exists=True, dir_okay=False))
def status(paths):
for x in get_status_multi(paths):
click.echo('{line} -- {modtime}, restarts: {restarts} | {path}'.format_map(x))
@click.command()
@click.argument('basedir', type=click.Path(exists=False))
@click.argument('HF', type=float)
@click.argument('exact', type=float)
@click.option('--fractions', default='0,90,99,100', type=str)
@click.option('--steps', '-n', default=2_000, type=int)
def draw_hlines(basedir, hf, exact, fractions, steps):
basedir = Path(basedir)
fractions = [float(x) / 100 for x in fractions.split(',')]
for fraction in fractions:
value = hf + fraction * (exact - hf)
workdir = basedir / f'line-{value:.3f}'
with SummaryWriter(log_dir=workdir, flush_secs=15, purge_step=0) as writer:
for step in range(steps):
writer.add_scalar('E_loc_loss/mean', value, step)
writer.add_scalar('E_loc/mean', value, step)
| 35.844444 | 86 | 0.550372 |
8e08fef2dc462def0cec124000388df8542902d7 | 3,757 | py | Python | tests/df_test.py | UKHomeOffice/dq-tf-datafeeds | dac0b6081d32f2c9f8c6f5d47d8ed2b85e78b526 | [
"MIT"
] | null | null | null | tests/df_test.py | UKHomeOffice/dq-tf-datafeeds | dac0b6081d32f2c9f8c6f5d47d8ed2b85e78b526 | [
"MIT"
] | 2 | 2019-02-18T12:45:15.000Z | 2019-10-30T14:31:24.000Z | tests/df_test.py | UKHomeOffice/dq-tf-datafeeds | dac0b6081d32f2c9f8c6f5d47d8ed2b85e78b526 | [
"MIT"
] | 1 | 2021-04-11T09:09:09.000Z | 2021-04-11T09:09:09.000Z | # pylint: disable=missing-docstring, line-too-long, protected-access, E1101, C0202, E0602, W0109
import unittest
from runner import Runner
class TestE2E(unittest.TestCase):
@classmethod
def setUpClass(self):
self.snippet = """
provider "aws" {
region = "eu-west-2"
skip_credentials_validation = true
skip_get_ec2_platforms = true
}
module "data_feeds" {
source = "./mymodule"
providers = {
aws = aws
}
appsvpc_id = "1234"
opssubnet_cidr_block = "1.2.3.0/24"
data_feeds_cidr_block = "10.1.4.0/24"
data_feeds_cidr_block_az2 = "10.1.5.0/24"
peering_cidr_block = "1.1.1.0/24"
az = "eu-west-2a"
az2 = "eu-west-2b"
naming_suffix = "apps-preprod-dq"
environment = "prod"
rds_enhanced_monitoring_role = "arn:aws:iam::123456789:role/rds-enhanced-monitoring-role"
}
"""
self.runner = Runner(self.snippet)
self.result = self.runner.result
def test_data_feeds(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_subnet.data_feeds", "cidr_block"), "10.1.4.0/24")
def test_name_suffix_data_feeds(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_subnet.data_feeds", "tags"), {"Name": "subnet-datafeeds-apps-preprod-dq"})
def test_name_suffix_df_db(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_security_group.df_db", "tags"), {"Name": "sg-db-datafeeds-apps-preprod-dq"})
def test_subnet_group(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_db_subnet_group.rds", "tags"), {"Name": "rds-subnet-group-datafeeds-apps-preprod-dq"})
def test_az2_subnet(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_subnet.data_feeds_az2", "tags"), {"Name": "az2-subnet-datafeeds-apps-preprod-dq"})
def test_datafeed_rds_name(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_db_instance.datafeed_rds", "tags"), {"Name": "postgres-datafeeds-apps-preprod-dq"})
def test_datafeed_rds_id(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_db_instance.datafeed_rds", "identifier"), "postgres-datafeeds-apps-preprod-dq")
def test_datafeed_rds_deletion_protection(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_db_instance.datafeed_rds", "deletion_protection"), True)
def test_datafeed_rds_ca_cert_identifier(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_db_instance.datafeed_rds", "ca_cert_identifier"), "rds-ca-2019")
def test_datafeed_rds_backup_window(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_db_instance.datafeed_rds", "backup_window"), "00:00-01:00")
def test_datafeed_rds_maintenance_window(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_db_instance.datafeed_rds", "maintenance_window"), "mon:01:00-mon:02:00")
def test_datafeed_rds_engine_version(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_db_instance.datafeed_rds", "engine_version"), "10.13")
def test_datafeed_rds_apply_immediately(self):
self.assertEqual(self.runner.get_value("module.data_feeds.aws_db_instance.datafeed_rds", "apply_immediately"), False)
if __name__ == '__main__':
unittest.main()
| 46.9625 | 156 | 0.652116 |
c0bc7be0dc00e8821faf91f1d05dfc8b7e2192b5 | 1,169 | py | Python | __openerp__.py | lbk0116/Inventory | ad9ff0b5ddf8550a0375971a34d6c820252121fd | [
"Apache-2.0"
] | 3 | 2018-11-22T11:38:56.000Z | 2022-03-22T03:55:57.000Z | __openerp__.py | lbk0116/Inventory | ad9ff0b5ddf8550a0375971a34d6c820252121fd | [
"Apache-2.0"
] | null | null | null | __openerp__.py | lbk0116/Inventory | ad9ff0b5ddf8550a0375971a34d6c820252121fd | [
"Apache-2.0"
] | 3 | 2016-11-14T06:58:15.000Z | 2020-03-12T12:49:06.000Z | {
# The human-readable name of your module, displayed in the interface
'name' : "Asset_management" ,
# A more extensive description
'description' : """
""",
# Which modules must be installed for this one to work
'depends' : ['base'],
'category': 'assetmanagement',
# data files which are always installed
'data': [
'security/ir.model.access.csv',
'views/asset_management_view.xml',
'views/asset_management_lend_view.xml',
# 'views/asset_storage_workflow.xml',
# 'views/asset_get_workflow.xml',
#'views/asset_lend_workflow.xml',
# 'views/asset_apply_workflow.xml',
'templates.xml',
'data.xml',
'security/resource_security.xml',
'views/asset_management_link.xml',
'views/asset_management_menu.xml',
],
# data files which are only installed in "demonstration mode"
'demo': ['demo.xml' ,
],
'application': True,
'qweb':[
'static/src/xml/asset_management.xml',
],
}
| 34.382353 | 68 | 0.54491 |
2be4fd3e7d4a1269774d7424225af2166308ac99 | 66,222 | py | Python | empyrical/stats.py | RichardDale/empyrical-reloaded | ffb97de63acef5030bcf8f26f70be08208094ba5 | [
"Apache-2.0"
] | 9 | 2021-04-23T04:07:10.000Z | 2022-02-16T07:49:15.000Z | empyrical/stats.py | RichardDale/empyrical-reloaded | ffb97de63acef5030bcf8f26f70be08208094ba5 | [
"Apache-2.0"
] | 3 | 2021-08-23T07:09:59.000Z | 2022-01-31T07:24:17.000Z | empyrical/stats.py | RichardDale/empyrical-reloaded | ffb97de63acef5030bcf8f26f70be08208094ba5 | [
"Apache-2.0"
] | 4 | 2021-04-27T07:18:54.000Z | 2022-02-12T15:55:18.000Z | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pandas as pd
import numpy as np
from math import pow
from scipy import stats, optimize
from sys import float_info
from .utils import nanmean, nanstd, nanmin, up, down, roll, rolling_window
from .periods import ANNUALIZATION_FACTORS, APPROX_BDAYS_PER_YEAR
from .periods import DAILY, WEEKLY, MONTHLY, QUARTERLY, YEARLY
def _create_unary_vectorized_roll_function(function):
def unary_vectorized_roll(arr, window, out=None, **kwargs):
"""
Computes the {human_readable} measure over a rolling window.
Parameters
----------
arr : array-like
The array to compute the rolling {human_readable} over.
window : int
Size of the rolling window in terms of the periodicity of the data.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
**kwargs
Forwarded to :func:`~empyrical.{name}`.
Returns
-------
rolling_{name} : array-like
The rolling {human_readable}.
"""
allocated_output = out is None
if len(arr):
out = function(
rolling_window(_flatten(arr), min(len(arr), window)).T,
out=out,
**kwargs,
)
else:
out = np.empty(0, dtype="float64")
if allocated_output and isinstance(arr, pd.Series):
out = pd.Series(out, index=arr.index[-len(out) :])
return out
unary_vectorized_roll.__doc__ = unary_vectorized_roll.__doc__.format(
name=function.__name__,
human_readable=function.__name__.replace("_", " "),
)
return unary_vectorized_roll
def _create_binary_vectorized_roll_function(function):
def binary_vectorized_roll(lhs, rhs, window, out=None, **kwargs):
"""
Computes the {human_readable} measure over a rolling window.
Parameters
----------
lhs : array-like
The first array to pass to the rolling {human_readable}.
rhs : array-like
The second array to pass to the rolling {human_readable}.
window : int
Size of the rolling window in terms of the periodicity of the data.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
**kwargs
Forwarded to :func:`~empyrical.{name}`.
Returns
-------
rolling_{name} : array-like
The rolling {human_readable}.
"""
allocated_output = out is None
if window >= 1 and len(lhs) and len(rhs):
out = function(
rolling_window(_flatten(lhs), min(len(lhs), window)).T,
rolling_window(_flatten(rhs), min(len(rhs), window)).T,
out=out,
**kwargs,
)
elif allocated_output:
out = np.empty(0, dtype="float64")
else:
out[()] = np.nan
if allocated_output:
if out.ndim == 1 and isinstance(lhs, pd.Series):
out = pd.Series(out, index=lhs.index[-len(out) :])
elif out.ndim == 2 and isinstance(lhs, pd.Series):
out = pd.DataFrame(out, index=lhs.index[-len(out) :])
return out
binary_vectorized_roll.__doc__ = binary_vectorized_roll.__doc__.format(
name=function.__name__,
human_readable=function.__name__.replace("_", " "),
)
return binary_vectorized_roll
def _flatten(arr):
return arr if not isinstance(arr, pd.Series) else arr.values
def _adjust_returns(returns, adjustment_factor):
"""
Returns the returns series adjusted by adjustment_factor. Optimizes for the
case of adjustment_factor being 0 by returning returns itself, not a copy!
Parameters
----------
returns : pd.Series or np.ndarray
adjustment_factor : pd.Series or np.ndarray or float or int
Returns
-------
adjusted_returns : array-like
"""
if isinstance(adjustment_factor, (float, int)) and adjustment_factor == 0:
return returns
return returns - adjustment_factor
def annualization_factor(period, annualization):
"""
Return annualization factor from period entered or if a custom
value is passed in.
Parameters
----------
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
Returns
-------
annualization_factor : float
"""
if annualization is None:
try:
factor = ANNUALIZATION_FACTORS[period]
except KeyError:
raise ValueError(
"Period cannot be '{}'. "
"Can be '{}'.".format(
period, "', '".join(ANNUALIZATION_FACTORS.keys())
)
)
else:
factor = annualization
return factor
def simple_returns(prices):
"""
Compute simple returns from a timeseries of prices.
Parameters
----------
prices : pd.Series, pd.DataFrame or np.ndarray
Prices of assets in wide-format, with assets as columns,
and indexed by datetimes.
Returns
-------
returns : array-like
Returns of assets in wide-format, with assets as columns,
and index coerced to be tz-aware.
"""
if isinstance(prices, (pd.DataFrame, pd.Series)):
out = prices.pct_change().iloc[1:]
else:
# Assume np.ndarray
out = np.diff(prices, axis=0)
np.divide(out, prices[:-1], out=out)
return out
def cum_returns(returns, starting_value=0, out=None):
"""
Compute cumulative returns from simple returns.
Parameters
----------
returns : pd.Series, np.ndarray, or pd.DataFrame
Returns of the strategy as a percentage, noncumulative.
- Time series with decimal returns.
- Example::
2015-07-16 -0.012143
2015-07-17 0.045350
2015-07-20 0.030957
2015-07-21 0.004902
- Also accepts two dimensional data. In this case, each column is
cumulated.
starting_value : float, optional
The starting returns.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
cumulative_returns : array-like
Series of cumulative returns.
"""
if len(returns) < 1:
return returns.copy()
nanmask = np.isnan(returns)
if np.any(nanmask):
returns = returns.copy()
returns[nanmask] = 0
allocated_output = out is None
if allocated_output:
out = np.empty_like(returns)
np.add(returns, 1, out=out)
out.cumprod(axis=0, out=out)
if starting_value == 0:
np.subtract(out, 1, out=out)
else:
np.multiply(out, starting_value, out=out)
if allocated_output:
if returns.ndim == 1 and isinstance(returns, pd.Series):
out = pd.Series(out, index=returns.index)
elif isinstance(returns, pd.DataFrame):
out = pd.DataFrame(
out,
index=returns.index,
columns=returns.columns,
)
return out
def cum_returns_final(returns, starting_value=0):
"""
Compute total returns from simple returns.
Parameters
----------
returns : pd.DataFrame, pd.Series, or np.ndarray
Noncumulative simple returns of one or more timeseries.
starting_value : float, optional
The starting returns.
Returns
-------
total_returns : pd.Series, np.ndarray, or float
If input is 1-dimensional (a Series or 1D numpy array), the result is a
scalar.
If input is 2-dimensional (a DataFrame or 2D numpy array), the result
is a 1D array containing cumulative returns for each column of input.
"""
if len(returns) == 0:
return np.nan
if isinstance(returns, pd.DataFrame):
result = (returns + 1).prod()
else:
result = np.nanprod(returns + 1, axis=0)
if starting_value == 0:
result -= 1
else:
result *= starting_value
return result
def aggregate_returns(returns, convert_to):
"""
Aggregates returns by week, month, or year.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
convert_to : str
Can be 'weekly', 'monthly', or 'yearly'.
Returns
-------
aggregated_returns : pd.Series
"""
def cumulate_returns(x):
return cum_returns(x).iloc[-1]
if convert_to == WEEKLY:
grouping = [lambda x: x.year, lambda x: x.isocalendar()[1]]
elif convert_to == MONTHLY:
grouping = [lambda x: x.year, lambda x: x.month]
elif convert_to == QUARTERLY:
grouping = [lambda x: x.year, lambda x: int(math.ceil(x.month / 3.0))]
elif convert_to == YEARLY:
grouping = [lambda x: x.year]
else:
raise ValueError(
"convert_to must be {}, {} or {}".format(WEEKLY, MONTHLY, YEARLY)
)
return returns.groupby(grouping).apply(cumulate_returns)
def max_drawdown(returns, out=None):
"""
Determines the maximum drawdown of a strategy.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
max_drawdown : float
Note
-----
See https://en.wikipedia.org/wiki/Drawdown_(economics) for more details.
"""
allocated_output = out is None
if allocated_output:
out = np.empty(returns.shape[1:])
returns_1d = returns.ndim == 1
if len(returns) < 1:
out[()] = np.nan
if returns_1d:
out = out.item()
return out
returns_array = np.asanyarray(returns)
cumulative = np.empty(
(returns.shape[0] + 1,) + returns.shape[1:],
dtype="float64",
)
cumulative[0] = start = 100
cum_returns(returns_array, starting_value=start, out=cumulative[1:])
max_return = np.fmax.accumulate(cumulative, axis=0)
nanmin((cumulative - max_return) / max_return, axis=0, out=out)
if returns_1d:
out = out.item()
elif allocated_output and isinstance(returns, pd.DataFrame):
out = pd.Series(out)
return out
roll_max_drawdown = _create_unary_vectorized_roll_function(max_drawdown)
def annual_return(returns, period=DAILY, annualization=None):
"""
Determines the mean annual growth rate of returns. This is equivilent
to the compound annual growth rate.
Parameters
----------
returns : pd.Series or np.ndarray
Periodic returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
Returns
-------
annual_return : float
Annual Return as CAGR (Compounded Annual Growth Rate).
"""
if len(returns) < 1:
return np.nan
ann_factor = annualization_factor(period, annualization)
num_years = len(returns) / ann_factor
# Pass array to ensure index -1 looks up successfully.
ending_value = cum_returns_final(returns, starting_value=1)
return ending_value ** (1 / num_years) - 1
def cagr(returns, period=DAILY, annualization=None):
"""
Compute compound annual growth rate. Alias function for
:func:`~empyrical.stats.annual_return`
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
- See full explanation in :func:`~empyrical.stats.annual_return`.
Returns
-------
cagr : float
The CAGR value.
"""
return annual_return(returns, period, annualization)
roll_cagr = _create_unary_vectorized_roll_function(cagr)
def annual_volatility(
returns, period=DAILY, alpha=2.0, annualization=None, out=None
):
"""
Determines the annual volatility of a strategy.
Parameters
----------
returns : pd.Series or np.ndarray
Periodic returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
alpha : float, optional
Scaling relation (Levy stability exponent).
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
annual_volatility : float
"""
allocated_output = out is None
if allocated_output:
out = np.empty(returns.shape[1:])
returns_1d = returns.ndim == 1
if len(returns) < 2:
out[()] = np.nan
if returns_1d:
out = out.item()
return out
ann_factor = annualization_factor(period, annualization)
nanstd(returns, ddof=1, axis=0, out=out)
out = np.multiply(out, ann_factor ** (1.0 / alpha), out=out)
if returns_1d:
out = out.item()
return out
roll_annual_volatility = _create_unary_vectorized_roll_function(
annual_volatility,
)
def calmar_ratio(returns, period=DAILY, annualization=None):
"""
Determines the Calmar ratio, or drawdown ratio, of a strategy.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
Returns
-------
calmar_ratio : float
Calmar ratio (drawdown ratio) as float. Returns np.nan if there is no
calmar ratio.
Note
-----
See https://en.wikipedia.org/wiki/Calmar_ratio for more details.
"""
max_dd = max_drawdown(returns=returns)
if max_dd < 0:
temp = annual_return(
returns=returns, period=period, annualization=annualization
) / abs(max_dd)
else:
return np.nan
if np.isinf(temp):
return np.nan
return temp
def omega_ratio(
returns,
risk_free=0.0,
required_return=0.0,
annualization=APPROX_BDAYS_PER_YEAR,
):
"""Determines the Omega ratio of a strategy.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
risk_free : int, float
Constant risk-free return throughout the period
required_return : float, optional
Minimum acceptance return of the investor. Threshold over which to
consider positive vs negative returns. It will be converted to a
value appropriate for the period of the returns. E.g. An annual minimum
acceptable return of 100 will translate to a minimum acceptable
return of 0.018.
annualization : int, optional
Factor used to convert the required_return into a daily
value. Enter 1 if no time period conversion is necessary.
Returns
-------
omega_ratio : float
Note
-----
See https://en.wikipedia.org/wiki/Omega_ratio for more details.
"""
if len(returns) < 2:
return np.nan
if annualization == 1:
return_threshold = required_return
elif required_return <= -1:
return np.nan
else:
return_threshold = (1 + required_return) ** (1.0 / annualization) - 1
returns_less_thresh = returns - risk_free - return_threshold
numer = sum(returns_less_thresh[returns_less_thresh > 0.0])
denom = -1.0 * sum(returns_less_thresh[returns_less_thresh < 0.0])
if denom > 0.0:
return numer / denom
else:
return np.nan
def sharpe_ratio(
returns, risk_free=0, period=DAILY, annualization=None, out=None
):
"""
Determines the Sharpe ratio of a strategy.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
risk_free : int, float
Constant daily risk-free return throughout the period.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
sharpe_ratio : float
nan if insufficient length of returns or if if adjusted returns are 0.
Note
-----
See https://en.wikipedia.org/wiki/Sharpe_ratio for more details.
"""
allocated_output = out is None
if allocated_output:
out = np.empty(returns.shape[1:])
return_1d = returns.ndim == 1
if len(returns) < 2:
out[()] = np.nan
if return_1d:
out = out.item()
return out
returns_risk_adj = np.asanyarray(_adjust_returns(returns, risk_free))
ann_factor = annualization_factor(period, annualization)
np.multiply(
np.divide(
nanmean(returns_risk_adj, axis=0),
nanstd(returns_risk_adj, ddof=1, axis=0),
out=out,
),
np.sqrt(ann_factor),
out=out,
)
if return_1d:
out = out.item()
return out
roll_sharpe_ratio = _create_unary_vectorized_roll_function(sharpe_ratio)
def sortino_ratio(
returns,
required_return=0,
period=DAILY,
annualization=None,
out=None,
_downside_risk=None,
):
"""
Determines the Sortino ratio of a strategy.
Parameters
----------
returns : pd.Series or np.ndarray or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
_downside_risk : float, optional
The downside risk of the given inputs, if known. Will be calculated if
not provided.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
sortino_ratio : float or pd.Series
depends on input type
series ==> float
DataFrame ==> pd.Series
Note
-----
See `<https://www.sunrisecapital.com/wp-content/uploads/2014/06/Futures_
Mag_Sortino_0213.pdf>`__ for more details.
"""
allocated_output = out is None
if allocated_output:
out = np.empty(returns.shape[1:])
return_1d = returns.ndim == 1
if len(returns) < 2:
out[()] = np.nan
if return_1d:
out = out.item()
return out
adj_returns = np.asanyarray(_adjust_returns(returns, required_return))
ann_factor = annualization_factor(period, annualization)
average_annual_return = nanmean(adj_returns, axis=0) * ann_factor
annualized_downside_risk = (
_downside_risk
if _downside_risk is not None
else downside_risk(returns, required_return, period, annualization)
)
np.divide(average_annual_return, annualized_downside_risk, out=out)
if return_1d:
out = out.item()
elif isinstance(returns, pd.DataFrame):
out = pd.Series(out)
return out
roll_sortino_ratio = _create_unary_vectorized_roll_function(sortino_ratio)
def downside_risk(
returns, required_return=0, period=DAILY, annualization=None, out=None
):
"""
Determines the downside deviation below a threshold
Parameters
----------
returns : pd.Series or np.ndarray or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
downside_deviation : float or pd.Series
depends on input type
series ==> float
DataFrame ==> pd.Series
Note
-----
See `<https://www.sunrisecapital.com/wp-content/uploads/2014/06/Futures_
Mag_Sortino_0213.pdf>`__ for more details, specifically why using the
standard deviation of the negative returns is not correct.
"""
allocated_output = out is None
if allocated_output:
out = np.empty(returns.shape[1:])
returns_1d = returns.ndim == 1
if len(returns) < 1:
out[()] = np.nan
if returns_1d:
out = out.item()
return out
ann_factor = annualization_factor(period, annualization)
downside_diff = np.clip(
_adjust_returns(
np.asanyarray(returns),
np.asanyarray(required_return),
),
np.NINF,
0,
)
np.square(downside_diff, out=downside_diff)
nanmean(downside_diff, axis=0, out=out)
np.sqrt(out, out=out)
np.multiply(out, np.sqrt(ann_factor), out=out)
if returns_1d:
out = out.item()
elif isinstance(returns, pd.DataFrame):
out = pd.Series(out, index=returns.columns)
return out
roll_downsize_risk = _create_unary_vectorized_roll_function(downside_risk)
def excess_sharpe(returns, factor_returns, out=None):
"""
Determines the Excess Sharpe of a strategy.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns: float / series
Benchmark return to compare returns against.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
excess_sharpe : float
Note
-----
The excess Sharpe is a simplified Information Ratio that uses
tracking error rather than "active risk" as the denominator.
"""
allocated_output = out is None
if allocated_output:
out = np.empty(returns.shape[1:])
returns_1d = returns.ndim == 1
if len(returns) < 2:
out[()] = np.nan
if returns_1d:
out = out.item()
return out
active_return = _adjust_returns(returns, factor_returns)
tracking_error = np.nan_to_num(nanstd(active_return, ddof=1, axis=0))
out = np.divide(
nanmean(active_return, axis=0, out=out),
tracking_error,
out=out,
)
if returns_1d:
out = out.item()
return out
roll_excess_sharpe = _create_binary_vectorized_roll_function(excess_sharpe)
def _to_pandas(ob):
"""Convert an array-like to a pandas object.
Parameters
----------
ob : array-like
The object to convert.
Returns
-------
pandas_structure : pd.Series or pd.DataFrame
The correct structure based on the dimensionality of the data.
"""
if isinstance(ob, (pd.Series, pd.DataFrame)):
return ob
if ob.ndim == 1:
return pd.Series(ob)
elif ob.ndim == 2:
return pd.DataFrame(ob)
else:
raise ValueError(
"cannot convert array of dim > 2 to a pandas structure",
)
def _aligned_series(*many_series):
"""
Return a new list of series containing the data in the input series, but
with their indices aligned. NaNs will be filled in for missing values.
Parameters
----------
*many_series
The series to align.
Returns
-------
aligned_series : iterable[array-like]
A new list of series containing the data in the input series, but
with their indices aligned. NaNs will be filled in for missing values.
"""
head = many_series[0]
tail = many_series[1:]
n = len(head)
if isinstance(head, np.ndarray) and all(
len(s) == n and isinstance(s, np.ndarray) for s in tail
):
# optimization: ndarrays of the same length are already aligned
return many_series
# dataframe has no ``itervalues``
return (
v for _, v in pd.concat(map(_to_pandas, many_series), axis=1).items()
)
def alpha_beta(
returns,
factor_returns,
risk_free=0.0,
period=DAILY,
annualization=None,
out=None,
):
"""Calculates annualized alpha and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
alpha : float
beta : float
"""
returns, factor_returns = _aligned_series(returns, factor_returns)
return alpha_beta_aligned(
returns,
factor_returns,
risk_free=risk_free,
period=period,
annualization=annualization,
out=out,
)
def roll_alpha_beta(returns, factor_returns, window=10, **kwargs):
"""
Computes alpha and beta over a rolling window.
Parameters
----------
lhs : array-like
The first array to pass to the rolling alpha-beta.
rhs : array-like
The second array to pass to the rolling alpha-beta.
window : int
Size of the rolling window in terms of the periodicity of the data.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
**kwargs
Forwarded to :func:`~empyrical.alpha_beta`.
"""
returns, factor_returns = _aligned_series(returns, factor_returns)
return roll_alpha_beta_aligned(
returns, factor_returns, window=window, **kwargs
)
def alpha_beta_aligned(
returns,
factor_returns,
risk_free=0.0,
period=DAILY,
annualization=None,
out=None,
):
"""Calculates annualized alpha and beta.
If they are pd.Series, expects returns and factor_returns have already
been aligned on their labels. If np.ndarray, these arguments should have
the same shape.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
alpha : float
beta : float
"""
if out is None:
out = np.empty(returns.shape[1:] + (2,), dtype="float64")
b = beta_aligned(returns, factor_returns, risk_free, out=out[..., 1])
alpha_aligned(
returns,
factor_returns,
risk_free,
period,
annualization,
out=out[..., 0],
_beta=b,
)
return out
roll_alpha_beta_aligned = _create_binary_vectorized_roll_function(
alpha_beta_aligned,
)
def alpha(
returns,
factor_returns,
risk_free=0.0,
period=DAILY,
annualization=None,
out=None,
_beta=None,
):
"""Calculates annualized alpha.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
- See full explanation in :func:`~empyrical.stats.annual_return`.
_beta : float, optional
The beta for the given inputs, if already known. Will be calculated
internally if not provided.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
float
Alpha.
"""
if not (
isinstance(returns, np.ndarray)
and isinstance(factor_returns, np.ndarray)
):
returns, factor_returns = _aligned_series(returns, factor_returns)
return alpha_aligned(
returns,
factor_returns,
risk_free=risk_free,
period=period,
annualization=annualization,
out=out,
_beta=_beta,
)
roll_alpha = _create_binary_vectorized_roll_function(alpha)
def alpha_aligned(
returns,
factor_returns,
risk_free=0.0,
period=DAILY,
annualization=None,
out=None,
_beta=None,
):
"""Calculates annualized alpha.
If they are pd.Series, expects returns and factor_returns have already
been aligned on their labels. If np.ndarray, these arguments should have
the same shape.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
- See full explanation in :func:`~empyrical.stats.annual_return`.
_beta : float, optional
The beta for the given inputs, if already known. Will be calculated
internally if not provided.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
alpha : float
"""
allocated_output = out is None
if allocated_output:
out = np.empty(returns.shape[1:], dtype="float64")
if len(returns) < 2:
out[()] = np.nan
if returns.ndim == 1:
out = out.item()
return out
ann_factor = annualization_factor(period, annualization)
if _beta is None:
_beta = beta_aligned(returns, factor_returns, risk_free)
adj_returns = _adjust_returns(returns, risk_free)
adj_factor_returns = _adjust_returns(factor_returns, risk_free)
alpha_series = adj_returns - (_beta * adj_factor_returns)
out = np.subtract(
np.power(
np.add(nanmean(alpha_series, axis=0, out=out), 1, out=out),
ann_factor,
out=out,
),
1,
out=out,
)
if allocated_output and isinstance(returns, pd.DataFrame):
out = pd.Series(out)
if returns.ndim == 1:
out = out.item()
return out
roll_alpha_aligned = _create_binary_vectorized_roll_function(alpha_aligned)
def beta(returns, factor_returns, risk_free=0.0, out=None):
"""Calculates beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
beta : float
"""
if not (
isinstance(returns, np.ndarray)
and isinstance(factor_returns, np.ndarray)
):
returns, factor_returns = _aligned_series(returns, factor_returns)
return beta_aligned(
returns,
factor_returns,
risk_free=risk_free,
out=out,
)
roll_beta = _create_binary_vectorized_roll_function(beta)
def beta_aligned(returns, factor_returns, risk_free=0.0, out=None):
"""Calculates beta.
If they are pd.Series, expects returns and factor_returns have already
been aligned on their labels. If np.ndarray, these arguments should have
the same shape.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
beta : float
Beta.
"""
# Cache these as locals since we're going to call them multiple times.
nan = np.nan
isnan = np.isnan
returns_1d = returns.ndim == 1
if returns_1d:
returns = np.asanyarray(returns)[:, np.newaxis]
if factor_returns.ndim == 1:
factor_returns = np.asanyarray(factor_returns)[:, np.newaxis]
N, M = returns.shape
if out is None:
out = np.full(M, nan)
elif out.ndim == 0:
out = out[np.newaxis]
if len(returns) < 1 or len(factor_returns) < 2:
out[()] = nan
if returns_1d:
out = out.item()
return out
# Copy N times as a column vector and fill with nans to have the same
# missing value pattern as the dependent variable.
#
# PERF_TODO: We could probably avoid the space blowup by doing this in
# Cython.
# shape: (N, M)
independent = np.where(
isnan(returns),
nan,
factor_returns,
)
# Calculate beta as Cov(X, Y) / Cov(X, X).
# https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa
#
# NOTE: The usual formula for covariance is::
#
# mean((X - mean(X)) * (Y - mean(Y)))
#
# However, we don't actually need to take the mean of both sides of the
# product, because of the folllowing equivalence::
#
# Let X_res = (X - mean(X)).
# We have:
#
# mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y)))
# (1) = mean((X_res * Y) - (X_res * mean(Y)))
# (2) = mean(X_res * Y) - mean(X_res * mean(Y))
# (3) = mean(X_res * Y) - mean(X_res) * mean(Y)
# (4) = mean(X_res * Y) - 0 * mean(Y)
# (5) = mean(X_res * Y)
#
#
# The tricky step in the above derivation is step (4). We know that
# mean(X_res) is zero because, for any X:
#
# mean(X - mean(X)) = mean(X) - mean(X) = 0.
#
# The upshot of this is that we only have to center one of `independent`
# and `dependent` when calculating covariances. Since we need the centered
# `independent` to calculate its variance in the next step, we choose to
# center `independent`.
ind_residual = independent - nanmean(independent, axis=0)
covariances = nanmean(ind_residual * returns, axis=0)
# We end up with different variances in each column here because each
# column may have a different subset of the data dropped due to missing
# data in the corresponding dependent column.
# shape: (M,)
np.square(ind_residual, out=ind_residual)
independent_variances = nanmean(ind_residual, axis=0)
independent_variances[independent_variances < 1.0e-30] = np.nan
np.divide(covariances, independent_variances, out=out)
if returns_1d:
out = out.item()
return out
roll_beta_aligned = _create_binary_vectorized_roll_function(beta_aligned)
def stability_of_timeseries(returns):
"""Determines R-squared of a linear fit to the cumulative
log returns. Computes an ordinary least squares linear fit,
and returns R-squared.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
Returns
-------
float
R-squared.
"""
if len(returns) < 2:
return np.nan
returns = np.asanyarray(returns)
returns = returns[~np.isnan(returns)]
cum_log_returns = np.log1p(returns).cumsum()
rhat = stats.linregress(np.arange(len(cum_log_returns)), cum_log_returns)[
2
]
return rhat ** 2
def tail_ratio(returns):
"""Determines the ratio between the right (95%) and left tail (5%).
For example, a ratio of 0.25 means that losses are four times
as bad as profits.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
Returns
-------
tail_ratio : float
"""
if len(returns) < 1:
return np.nan
returns = np.asanyarray(returns)
# Be tolerant of nan's
returns = returns[~np.isnan(returns)]
if len(returns) < 1:
return np.nan
return np.abs(np.percentile(returns, 95)) / np.abs(
np.percentile(returns, 5)
)
def capture(returns, factor_returns, period=DAILY):
"""Compute capture ratio.
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
Returns
-------
capture_ratio : float
Note
----
See http://www.investopedia.com/terms/u/up-market-capture-ratio.asp for
details.
"""
return annual_return(returns, period=period) / annual_return(
factor_returns, period=period
)
def beta_fragility_heuristic(returns, factor_returns):
"""Estimate fragility to drops in beta.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
Returns
-------
float, np.nan
The beta fragility of the strategy.
Note
----
A negative return value indicates potential losses
could follow volatility in beta.
The magnitude of the negative value indicates the size of
the potential loss.
seealso::
`A New Heuristic Measure of Fragility and
Tail Risks: Application to Stress Testing`
https://www.imf.org/external/pubs/ft/wp/2012/wp12216.pdf
An IMF Working Paper describing the heuristic
"""
if len(returns) < 3 or len(factor_returns) < 3:
return np.nan
return beta_fragility_heuristic_aligned(
*_aligned_series(returns, factor_returns)
)
def beta_fragility_heuristic_aligned(returns, factor_returns):
"""Estimate fragility to drops in beta
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
Returns
-------
float, np.nan
The beta fragility of the strategy.
Note
----
If they are pd.Series, expects returns and factor_returns have already
been aligned on their labels. If np.ndarray, these arguments should
have the same shape.
seealso::
`A New Heuristic Measure of Fragility and
Tail Risks: Application to Stress Testing`
https://www.imf.org/external/pubs/ft/wp/2012/wp12216.pdf
An IMF Working Paper describing the heuristic
"""
if len(returns) < 3 or len(factor_returns) < 3:
return np.nan
# combine returns and factor returns into pairs
returns_series = pd.Series(returns)
factor_returns_series = pd.Series(factor_returns)
pairs = pd.concat([returns_series, factor_returns_series], axis=1)
pairs.columns = ["returns", "factor_returns"]
# exclude any rows where returns are nan
pairs = pairs.dropna()
# sort by beta
pairs = pairs.sort_values(by="factor_returns")
# find the three vectors, using median of 3
start_index = 0
mid_index = int(np.around(len(pairs) / 2, 0))
end_index = len(pairs) - 1
(start_returns, start_factor_returns) = pairs.iloc[start_index]
(mid_returns, mid_factor_returns) = pairs.iloc[mid_index]
(end_returns, end_factor_returns) = pairs.iloc[end_index]
factor_returns_range = end_factor_returns - start_factor_returns
start_returns_weight = 0.5
end_returns_weight = 0.5
# find weights for the start and end returns
# using a convex combination
if not factor_returns_range == 0:
start_returns_weight = (
mid_factor_returns - start_factor_returns
) / factor_returns_range
end_returns_weight = (
end_factor_returns - mid_factor_returns
) / factor_returns_range
# calculate fragility heuristic
heuristic = (
(start_returns_weight * start_returns)
+ (end_returns_weight * end_returns)
- mid_returns
)
return heuristic
def gpd_risk_estimates(returns, var_p=0.01):
"""Estimate VaR and ES using the Generalized Pareto Distribution (GPD)
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
var_p : float
The percentile to use for estimating the VaR and ES
Returns
-------
[threshold, scale_param, shape_param, var_estimate, es_estimate]
: list[float]
threshold - the threshold use to cut off exception tail losses
scale_param - a parameter (often denoted by sigma, capturing the
scale, related to variance)
shape_param - a parameter (often denoted by xi, capturing the shape
or type of the distribution)
var_estimate - an estimate for the VaR for the given percentile
es_estimate - an estimate for the ES for the given percentile
Note
----
see also::
`An Application of Extreme Value Theory for Measuring Risk
<https://link.springer.com/article/10.1007/s10614-006-9025-7>`
A paper describing how to use the Generalized Pareto
Distribution to estimate VaR and ES.
"""
if len(returns) < 3:
result = np.zeros(5)
if isinstance(returns, pd.Series):
result = pd.Series(result)
return result
return gpd_risk_estimates_aligned(*_aligned_series(returns, var_p))
def gpd_risk_estimates_aligned(returns, var_p=0.01):
"""Estimate VaR and ES using the Generalized Pareto Distribution (GPD)
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
var_p : float
The percentile to use for estimating the VaR and ES
Returns
-------
[threshold, scale_param, shape_param, var_estimate, es_estimate]
: list[float]
threshold - the threshold use to cut off exception tail losses
scale_param - a parameter (often denoted by sigma, capturing the
scale, related to variance)
shape_param - a parameter (often denoted by xi, capturing the shape
or type of the distribution)
var_estimate - an estimate for the VaR for the given percentile
es_estimate - an estimate for the ES for the given percentile
Note
----
seealso::
`An Application of Extreme Value Theory for Measuring Risk
<https://link.springer.com/article/10.1007/s10614-006-9025-7>`
A paper describing how to use the Generalized Pareto
Distribution to estimate VaR and ES.
"""
result = np.zeros(5)
if not len(returns) < 3:
DEFAULT_THRESHOLD = 0.2
MINIMUM_THRESHOLD = 0.000000001
try:
returns_array = pd.Series(returns).to_numpy()
except AttributeError:
# while zipline requires support for pandas < 0.25
returns_array = pd.Series(returns).as_matrix()
flipped_returns = -1 * returns_array
losses = flipped_returns[flipped_returns > 0]
threshold = DEFAULT_THRESHOLD
finished = False
scale_param = 0
shape_param = 0
while not finished and threshold > MINIMUM_THRESHOLD:
losses_beyond_threshold = losses[losses >= threshold]
param_result = gpd_loglikelihood_minimizer_aligned(
losses_beyond_threshold
)
if param_result[0] is not False and param_result[1] is not False:
scale_param = param_result[0]
shape_param = param_result[1]
var_estimate = gpd_var_calculator(
threshold,
scale_param,
shape_param,
var_p,
len(losses),
len(losses_beyond_threshold),
)
# non-negative shape parameter is required for fat tails
# non-negative VaR estimate is required for loss of some kind
if shape_param > 0 and var_estimate > 0:
finished = True
if not finished:
threshold = threshold / 2
if finished:
es_estimate = gpd_es_calculator(
var_estimate, threshold, scale_param, shape_param
)
result = np.array(
[
threshold,
scale_param,
shape_param,
var_estimate,
es_estimate,
]
)
if isinstance(returns, pd.Series):
result = pd.Series(result)
return result
def gpd_es_calculator(var_estimate, threshold, scale_param, shape_param):
result = 0
if (1 - shape_param) != 0:
# this formula is from Gilli and Kellezi pg. 8
var_ratio = var_estimate / (1 - shape_param)
param_ratio = (scale_param - (shape_param * threshold)) / (
1 - shape_param
)
result = var_ratio + param_ratio
return result
def gpd_var_calculator(
threshold, scale_param, shape_param, probability, total_n, exceedance_n
):
result = 0
if exceedance_n > 0 and shape_param > 0:
# this formula is from Gilli and Kellezi pg. 12
param_ratio = scale_param / shape_param
prob_ratio = (total_n / exceedance_n) * probability
result = threshold + (
param_ratio * (pow(prob_ratio, -shape_param) - 1)
)
return result
def gpd_loglikelihood_minimizer_aligned(price_data):
result = [False, False]
DEFAULT_SCALE_PARAM = 1
DEFAULT_SHAPE_PARAM = 1
if len(price_data) > 0:
gpd_loglikelihood_lambda = gpd_loglikelihood_factory(price_data)
optimization_results = optimize.minimize(
gpd_loglikelihood_lambda,
[DEFAULT_SCALE_PARAM, DEFAULT_SHAPE_PARAM],
method="Nelder-Mead",
)
if optimization_results.success:
resulting_params = optimization_results.x
if len(resulting_params) == 2:
result[0] = resulting_params[0]
result[1] = resulting_params[1]
return result
def gpd_loglikelihood_factory(price_data):
return lambda params: gpd_loglikelihood(params, price_data)
def gpd_loglikelihood(params, price_data):
if params[1] != 0:
return -gpd_loglikelihood_scale_and_shape(
params[0], params[1], price_data
)
else:
return -gpd_loglikelihood_scale_only(params[0], price_data)
def gpd_loglikelihood_scale_and_shape_factory(price_data):
# minimize a function of two variables requires a list of params
# we are expecting the lambda below to be called as follows:
# parameters = [scale, shape]
# the final outer negative is added because scipy only minimizes
return lambda params: -gpd_loglikelihood_scale_and_shape(
params[0], params[1], price_data
)
def gpd_loglikelihood_scale_and_shape(scale, shape, price_data):
n = len(price_data)
result = -1 * float_info.max
if scale != 0:
param_factor = shape / scale
if shape != 0 and param_factor >= 0 and scale >= 0:
result = (-n * np.log(scale)) - (
((1 / shape) + 1)
* (np.log((shape / scale * price_data) + 1)).sum()
)
return result
def gpd_loglikelihood_scale_only_factory(price_data):
# the negative is added because scipy only minimizes
return lambda scale: -gpd_loglikelihood_scale_only(scale, price_data)
def gpd_loglikelihood_scale_only(scale, price_data):
n = len(price_data)
data_sum = price_data.sum()
result = -1 * float_info.max
if scale >= 0:
result = (-n * np.log(scale)) - (data_sum / scale)
return result
def up_capture(returns, factor_returns, **kwargs):
"""
Compute the capture ratio for periods when the benchmark return is positive
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
Returns
-------
up_capture : float
Note
----
See http://www.investopedia.com/terms/u/up-market-capture-ratio.asp for
more information.
"""
return up(returns, factor_returns, function=capture, **kwargs)
def down_capture(returns, factor_returns, **kwargs):
"""
Compute the capture ratio for periods when the benchmark return is negative
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
Returns
-------
down_capture : float
Note
----
See http://www.investopedia.com/terms/d/down-market-capture-ratio.asp for
more information.
"""
return down(returns, factor_returns, function=capture, **kwargs)
def up_down_capture(returns, factor_returns, **kwargs):
"""
Computes the ratio of up_capture to down_capture.
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
Returns
-------
up_down_capture : float
the updown capture ratio
"""
return up_capture(returns, factor_returns, **kwargs) / down_capture(
returns, factor_returns, **kwargs
)
def up_alpha_beta(returns, factor_returns, **kwargs):
"""
Computes alpha and beta for periods when the benchmark return is positive.
Parameters
----------
see documentation for `alpha_beta`.
Returns
-------
float
Alpha.
float
Beta.
"""
return up(returns, factor_returns, function=alpha_beta_aligned, **kwargs)
def down_alpha_beta(returns, factor_returns, **kwargs):
"""
Computes alpha and beta for periods when the benchmark return is negative.
Parameters
----------
see documentation for `alpha_beta`.
Returns
-------
alpha : float
beta : float
"""
return down(returns, factor_returns, function=alpha_beta_aligned, **kwargs)
def roll_up_capture(returns, factor_returns, window=10, **kwargs):
"""
Computes the up capture measure over a rolling window.
see documentation for :func:`~empyrical.stats.up_capture`.
(pass all args, kwargs required)
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
window : int, required
Size of the rolling window in terms of the periodicity of the data.
- eg window = 60, periodicity=DAILY, represents a rolling 60 day window
"""
return roll(
returns, factor_returns, window=window, function=up_capture, **kwargs
)
def roll_down_capture(returns, factor_returns, window=10, **kwargs):
"""
Computes the down capture measure over a rolling window.
see documentation for :func:`~empyrical.stats.down_capture`.
(pass all args, kwargs required)
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
window : int, required
Size of the rolling window in terms of the periodicity of the data.
- eg window = 60, periodicity=DAILY, represents a rolling 60 day window
"""
return roll(
returns, factor_returns, window=window, function=down_capture, **kwargs
)
def roll_up_down_capture(returns, factor_returns, window=10, **kwargs):
"""
Computes the up/down capture measure over a rolling window.
see documentation for :func:`~empyrical.stats.up_down_capture`.
(pass all args, kwargs required)
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
window : int, required
Size of the rolling window in terms of the periodicity of the data.
- eg window = 60, periodicity=DAILY, represents a rolling 60 day window
"""
return roll(
returns,
factor_returns,
window=window,
function=up_down_capture,
**kwargs,
)
def value_at_risk(returns, cutoff=0.05):
"""
Value at risk (VaR) of a returns stream.
Parameters
----------
returns : pandas.Series or 1-D numpy.array
Non-cumulative daily returns.
cutoff : float, optional
Decimal representing the percentage cutoff for the bottom percentile of
returns. Defaults to 0.05.
Returns
-------
VaR : float
The VaR value.
"""
return np.percentile(returns, 100 * cutoff)
def conditional_value_at_risk(returns, cutoff=0.05):
"""
Conditional value at risk (CVaR) of a returns stream.
CVaR measures the expected single-day returns of an asset on that asset's
worst performing days, where "worst-performing" is defined as falling below
``cutoff`` as a percentile of all daily returns.
Parameters
----------
returns : pandas.Series or 1-D numpy.array
Non-cumulative daily returns.
cutoff : float, optional
Decimal representing the percentage cutoff for the bottom percentile of
returns. Defaults to 0.05.
Returns
-------
CVaR : float
The CVaR value.
"""
# PERF: Instead of using the 'value_at_risk' function to find the cutoff
# value, which requires a call to numpy.percentile, determine the cutoff
# index manually and partition out the lowest returns values. The value at
# the cutoff index should be included in the partition.
cutoff_index = int((len(returns) - 1) * cutoff)
return np.mean(np.partition(returns, cutoff_index)[: cutoff_index + 1])
SIMPLE_STAT_FUNCS = [
cum_returns_final,
annual_return,
annual_volatility,
sharpe_ratio,
calmar_ratio,
stability_of_timeseries,
max_drawdown,
omega_ratio,
sortino_ratio,
stats.skew,
stats.kurtosis,
tail_ratio,
cagr,
value_at_risk,
conditional_value_at_risk,
]
FACTOR_STAT_FUNCS = [
excess_sharpe,
alpha,
beta,
beta_fragility_heuristic,
gpd_risk_estimates,
capture,
up_capture,
down_capture,
]
| 30.114598 | 96 | 0.632494 |
bf4ea5ed2a4b09a9b25ac8969cbbc5233900e25f | 1,429 | py | Python | src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/effective_network_security_group_list_result_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/effective_network_security_group_list_result_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/effective_network_security_group_list_result_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroupListResult(Model):
"""Response for list effective network security groups API service call.
Variables are only populated by the server, and will be ignored when
sending a request.
:param value: A list of effective network security groups.
:type value:
list[~azure.mgmt.network.v2018_04_01.models.EffectiveNetworkSecurityGroup]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, *, value=None, **kwargs) -> None:
super(EffectiveNetworkSecurityGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
| 34.853659 | 79 | 0.622113 |
7832a261a1a2a87a65f61f49b1c810cbf356ca6a | 584 | py | Python | tools/__init__.py | openeuler-mirror/A-Tune | 7479b30e4ab688cf2ca53235d9fd72fc22f332bd | [
"MulanPSL-1.0"
] | 5 | 2020-03-09T12:03:57.000Z | 2022-03-05T06:38:40.000Z | tools/__init__.py | openeuler-mirror/A-Tune | 7479b30e4ab688cf2ca53235d9fd72fc22f332bd | [
"MulanPSL-1.0"
] | null | null | null | tools/__init__.py | openeuler-mirror/A-Tune | 7479b30e4ab688cf2ca53235d9fd72fc22f332bd | [
"MulanPSL-1.0"
] | 1 | 2020-10-14T02:35:56.000Z | 2020-10-14T02:35:56.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Huawei Technologies Co., Ltd.
# A-Tune is licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# Create: 2019-12-04
| 44.923077 | 98 | 0.734589 |
61ed71f23b5e4153f770933f63e0a29aefe4dbb4 | 154 | py | Python | pymapf/decentralized/position.py | APLA-Toolbox/pymapf | 255df006925401e5ccdf82afc7dac339221574ba | [
"MIT"
] | 25 | 2021-01-17T01:02:25.000Z | 2022-02-13T09:20:59.000Z | pymapf/decentralized/position.py | APLA-Toolbox/pymapf | 255df006925401e5ccdf82afc7dac339221574ba | [
"MIT"
] | 37 | 2021-01-16T22:36:32.000Z | 2021-11-15T11:51:59.000Z | pymapf/decentralized/position.py | APLA-Toolbox/pymapf | 255df006925401e5ccdf82afc7dac339221574ba | [
"MIT"
] | 5 | 2021-04-02T08:27:52.000Z | 2021-11-17T12:43:52.000Z | class Position:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "[%d ; %d]" % (self.x, self.y)
| 19.25 | 45 | 0.480519 |
d5f76489f430b8620c46c46d94531997c8859130 | 11,732 | py | Python | category_encoders/m_estimate.py | mcassuranceiq/categorical-encoding-public-old | 0d62c77a645bba308cb1bfda3e3bd41665a37a5e | [
"BSD-3-Clause"
] | null | null | null | category_encoders/m_estimate.py | mcassuranceiq/categorical-encoding-public-old | 0d62c77a645bba308cb1bfda3e3bd41665a37a5e | [
"BSD-3-Clause"
] | null | null | null | category_encoders/m_estimate.py | mcassuranceiq/categorical-encoding-public-old | 0d62c77a645bba308cb1bfda3e3bd41665a37a5e | [
"BSD-3-Clause"
] | null | null | null | """M-probability estimate"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from category_encoders.ordinal import OrdinalEncoder
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'Jan Motl'
class MEstimateEncoder(BaseEstimator, TransformerMixin):
"""M-probability estimate of likelihood.
This is a simplified version of target encoder, which goes under names like m-probability estimate or
additive smoothing with known incidence rates. In comparison to target encoder, m-probability estimate
has only one tunable parameter (`m`), while target encoder has two tunable parameters (`min_samples_leaf`
and `smoothing`).
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop encoded columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
handle_unknown: str
options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
randomized: bool,
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched).
sigma: float
standard deviation (spread or "width") of the normal distribution.
m: float
this is the "m" in the m-probability estimate. Higher value of m results into stronger shrinking.
M is non-negative.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target > 22.5
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = MEstimateEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, equation 7, from
https://dl.acm.org/citation.cfm?id=507538
.. [2] On estimating probabilities in tree pruning, equation 1, from
https://link.springer.com/chapter/10.1007/BFb0017010
.. [3] Additive smoothing, from
https://en.wikipedia.org/wiki/Additive_smoothing#Generalized_to_the_case_of_known_incidence_rates
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, randomized=False, sigma=0.05, m=1.0):
self.verbose = verbose
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.cols = cols
self.ordinal_encoder = None
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._sum = None
self._count = None
self.random_state = random_state
self.randomized = randomized
self.sigma = sigma
self.m = m
self.feature_names = None
# noinspection PyUnusedLocal
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and binary y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Binary target values.
Returns
-------
self : encoder
Returns self.
"""
# Unite parameters into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index).astype(float)
# The lengths must be equal
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# If columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
self.ordinal_encoder = OrdinalEncoder(
verbose=self.verbose,
cols=self.cols,
handle_unknown='value',
handle_missing='value'
)
self.ordinal_encoder = self.ordinal_encoder.fit(X)
X_ordinal = self.ordinal_encoder.transform(X)
# Training
self.mapping = self._train(X_ordinal, y)
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
# Store column names with approximately constant variance on the training data
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
When the data are used for model training, it is important to also pass the target in order to apply leave one out.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# Unite the input into pandas types
X = util.convert_input(X)
# Then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# If we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not self.cols:
return X
# Do not modify the input argument
X = X.copy(deep=True)
X = self.ordinal_encoder.transform(X)
if self.handle_unknown == 'error':
if X[self.cols].isin([-1]).any().any():
raise ValueError('Unexpected categories found in dataframe')
# Loop over the columns and replace the nominal values with the numbers
X = self._score(X, y)
# Postprocessing
# Note: We should not even convert these columns.
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def fit_transform(self, X, y=None, **fit_params):
"""
Encoders that utilize the target must make sure that the training data are transformed with:
transform(X, y)
and not with:
transform(X)
"""
# the interface requires 'y=None' in the signature but we need 'y'
if y is None:
raise(TypeError, 'fit_transform() missing argument: ''y''')
return self.fit(X, y, **fit_params).transform(X, y)
def _train(self, X, y):
# Initialize the output
mapping = {}
# Calculate global statistics
self._sum = y.sum()
self._count = y.count()
prior = self._sum/self._count
for switch in self.ordinal_encoder.category_mapping:
col = switch.get('col')
values = switch.get('mapping')
# Calculate sum and count of the target for each unique value in the feature col
stats = y.groupby(X[col]).agg(['sum', 'count']) # Count of x_{i,+} and x_i
# Calculate the m-probability estimate
estimate = (stats['sum'] + prior * self.m) / (stats['count'] + self.m)
# Ignore unique columns. This helps to prevent overfitting on id-like columns
if len(stats['count']) == self._count:
estimate[:] = prior
if self.handle_unknown == 'return_nan':
estimate.loc[-1] = np.nan
elif self.handle_unknown == 'value':
estimate.loc[-1] = prior
if self.handle_missing == 'return_nan':
estimate.loc[values.loc[np.nan]] = np.nan
elif self.handle_missing == 'value':
estimate.loc[-2] = prior
# Store the m-probability estimate for transform() function
mapping[col] = estimate
return mapping
def _score(self, X, y):
for col in self.cols:
# Score the column
X[col] = X[col].map(self.mapping[col])
# Randomization is meaningful only for training data -> we do it only if y is present
if self.randomized and y is not None:
random_state_generator = check_random_state(self.random_state)
X[col] = (X[col] * random_state_generator.normal(1., self.sigma, X[col].shape[0]))
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError("Estimator has to be fitted to return feature names.")
else:
return self.feature_names
| 36.434783 | 137 | 0.611149 |
8bc1a27251f561ea437d4bec772a488d498b6b14 | 2,769 | py | Python | datasets/init_dataset.py | hzh8311/2nd-Solution-for-CVPR2020-face-anti-spoofing-challenge | 5c21d934904bbcfc9b373da3f578d03ede842b06 | [
"MIT"
] | 3 | 2021-02-11T07:59:34.000Z | 2021-05-19T02:28:27.000Z | datasets/init_dataset.py | hzh8311/2nd-Solution-for-CVPR2020-face-anti-spoofing-challenge | 5c21d934904bbcfc9b373da3f578d03ede842b06 | [
"MIT"
] | null | null | null | datasets/init_dataset.py | hzh8311/2nd-Solution-for-CVPR2020-face-anti-spoofing-challenge | 5c21d934904bbcfc9b373da3f578d03ede842b06 | [
"MIT"
] | null | null | null | import torch.utils.data as data
import torch
from PIL import Image
import pandas as pd
import numpy as np
import os
import torch.utils.data as data
def rgb_loader(path):
return Image.open(path)
def rgb_prune_loader(path, pad=5):
im = Image.open(path)
im = np.asarray(im)
im_ = np.sum(im, -1)
row_sum = np.sum(im_, 0)
col_sum = np.sum(im_, 1)
idxs = np.where(row_sum != 0)[0]
left, right = idxs[0], idxs[-1]
idxs = np.where(col_sum != 0)[0]
top, bottom = idxs[0], idxs[-1]
left = max(0, left-pad)
right = min(im.shape[1], right+pad)
top = max(0, top-pad)
bottom = min(im.shape[0], bottom+pad)
im = im[top:bottom, left:right, :]
im = Image.fromarray(im)
return im
class ImageListDataset(data.Dataset):
"""
Builds a dataset based on a list of images.
data_root - image path prefix
data_list - annotation list location
"""
def __init__(self, opt,data_root, data_list, transform=None):
self.data_root = data_root
#self.df = pd.read_csv(data_list)
self.df = pd.read_csv(data_list,sep=' ')
# print(self.df)
if 'label' not in self.df.columns:
self.df['label'] = -1
self.transform = transform
if opt.net_type=='ResNet34DLAS_B':
self.loader = rgb_prune_loader
else:
self.loader = rgb_loader
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (rgb_img, ir_img, depth_img, target)
"""
dict_elem = self.__get_simple_item__(index)
dict_elem['meta'] = {
'idx': index,
'max_idx': len(self.df),
'get_item_func': self.__get_simple_item__
}
if self.transform is not None:
dict_elem = self.transform(dict_elem)
return dict_elem['rgb'], dict_elem['depth'], dict_elem['ir'], dict_elem['label'], dict_elem['dir']
def __get_simple_item__(self, index):
rgb_path = self.data_root + self.df.rgb.iloc[index]
# import pdb
# pdb.set_trace()
# print(self.df.rgb.iloc[index])
# print(self.df.ir.iloc[index])
ir_path = self.data_root + self.df.ir.iloc[index]
depth_path = self.data_root + self.df.depth.iloc[index]
target = self.df.label.iloc[index]
rgb_img = self.loader(rgb_path)
ir_img = self.loader(ir_path)
depth_img = self.loader(depth_path)
dict_elem = {
'rgb': rgb_img,
'ir': ir_img,
'depth': depth_img,
'label': target,
'dir': rgb_path
}
return dict_elem
def __len__(self):
return len(self.df) | 27.69 | 107 | 0.574215 |
0df62aaf2bc00e46d7f927bc25d7ab62841e0447 | 976 | py | Python | api/tests/test_investments.py | Siecje/debt | 86786479a1bc3384c115f145b60ebebce0cba468 | [
"MIT"
] | 5 | 2017-04-06T13:20:20.000Z | 2022-02-12T18:12:27.000Z | api/tests/test_investments.py | Siecje/debt | 86786479a1bc3384c115f145b60ebebce0cba468 | [
"MIT"
] | 6 | 2015-04-25T16:43:36.000Z | 2022-02-12T16:35:45.000Z | api/tests/test_investments.py | Siecje/debt | 86786479a1bc3384c115f145b60ebebce0cba468 | [
"MIT"
] | 1 | 2018-04-12T09:35:03.000Z | 2018-04-12T09:35:03.000Z | from django.core.urlresolvers import reverse
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from api.models import Investment, User
class InvestmentTests(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username='one', email='one@exmaple.com', password='one')
token = Token.objects.get(user__username=self.user.username)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.url = reverse('investment-list')
def test_get_investments(self):
investment = Investment.objects.create(
name='First', interest_rate=8.0,
min_duration=0, balance=1000, user=self.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
#self.assertJSONEqual(json.dumps(json.loads(response.content)),
# JSONRenderer().render(InvestmentSerializer(data=investment)))
| 37.538462 | 74 | 0.703893 |
d57c4fe36207c587e36e86ff283f3785aa054372 | 348 | py | Python | setup.py | mahyar-osn/seir | fef0d07b618535f4fcb69359d13b98fc8b8c6eaa | [
"Apache-2.0"
] | null | null | null | setup.py | mahyar-osn/seir | fef0d07b618535f4fcb69359d13b98fc8b8c6eaa | [
"Apache-2.0"
] | null | null | null | setup.py | mahyar-osn/seir | fef0d07b618535f4fcb69359d13b98fc8b8c6eaa | [
"Apache-2.0"
] | null | null | null | import setuptools
setuptools.setup(
author='Alan Garny',
author_email='a.garny@auckland.ac.nz',
description='OpenCOR-based Python script to model Covid-19 using the SEIR model',
scripts=[
'src/seir.py'
],
license='Apache 2.0',
name='seir',
url='https://github.com/ABI-Covid-19/seir',
version='0.1.0',
)
| 23.2 | 85 | 0.637931 |
7b5c1052e9a1b46864098e18665da77cee9410ed | 558 | py | Python | channelchat/manage.py | olufekosamuel/django-chat | a0172726d1d6dba9ae47d8f49de21b2fbb102d6d | [
"Apache-2.0"
] | null | null | null | channelchat/manage.py | olufekosamuel/django-chat | a0172726d1d6dba9ae47d8f49de21b2fbb102d6d | [
"Apache-2.0"
] | null | null | null | channelchat/manage.py | olufekosamuel/django-chat | a0172726d1d6dba9ae47d8f49de21b2fbb102d6d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "channelchat.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.875 | 76 | 0.670251 |
5bb08ec23f3e4e50705be9a191cde126d421fbfd | 6,060 | py | Python | pymongo/periodic_executor.py | Namyalg/mongo-python-driver | fc85a24888e3c1fe556eb8f755aeecb053b5815e | [
"Apache-2.0"
] | null | null | null | pymongo/periodic_executor.py | Namyalg/mongo-python-driver | fc85a24888e3c1fe556eb8f755aeecb053b5815e | [
"Apache-2.0"
] | null | null | null | pymongo/periodic_executor.py | Namyalg/mongo-python-driver | fc85a24888e3c1fe556eb8f755aeecb053b5815e | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Run a target function on a background thread."""
import threading
import time
import weakref
from typing import Any, Optional
class PeriodicExecutor(object):
def __init__(self, interval, min_interval, target, name=None):
""" "Run a target function periodically on a background thread.
If the target's return value is false, the executor stops.
:Parameters:
- `interval`: Seconds between calls to `target`.
- `min_interval`: Minimum seconds between calls if `wake` is
called very often.
- `target`: A function.
- `name`: A name to give the underlying thread.
"""
# threading.Event and its internal condition variable are expensive
# in Python 2, see PYTHON-983. Use a boolean to know when to wake.
# The executor's design is constrained by several Python issues, see
# "periodic_executor.rst" in this repository.
self._event = False
self._interval = interval
self._min_interval = min_interval
self._target = target
self._stopped = False
self._thread: Optional[threading.Thread] = None
self._name = name
self._skip_sleep = False
self._thread_will_exit = False
self._lock = threading.Lock()
def __repr__(self):
return "<%s(name=%s) object at 0x%x>" % (self.__class__.__name__, self._name, id(self))
def open(self) -> None:
"""Start. Multiple calls have no effect.
Not safe to call from multiple threads at once.
"""
with self._lock:
if self._thread_will_exit:
# If the background thread has read self._stopped as True
# there is a chance that it has not yet exited. The call to
# join should not block indefinitely because there is no
# other work done outside the while loop in self._run.
try:
assert self._thread is not None
self._thread.join()
except ReferenceError:
# Thread terminated.
pass
self._thread_will_exit = False
self._stopped = False
started: Any = False
try:
started = self._thread and self._thread.is_alive()
except ReferenceError:
# Thread terminated.
pass
if not started:
thread = threading.Thread(target=self._run, name=self._name)
thread.daemon = True
self._thread = weakref.proxy(thread)
_register_executor(self)
thread.start()
def close(self, dummy: Any = None) -> None:
"""Stop. To restart, call open().
The dummy parameter allows an executor's close method to be a weakref
callback; see monitor.py.
"""
self._stopped = True
def join(self, timeout: Optional[int] = None) -> None:
if self._thread is not None:
try:
self._thread.join(timeout)
except (ReferenceError, RuntimeError):
# Thread already terminated, or not yet started.
pass
def wake(self) -> None:
"""Execute the target function soon."""
self._event = True
def update_interval(self, new_interval: int) -> None:
self._interval = new_interval
def skip_sleep(self) -> None:
self._skip_sleep = True
def __should_stop(self):
with self._lock:
if self._stopped:
self._thread_will_exit = True
return True
return False
def _run(self):
while not self.__should_stop():
try:
if not self._target():
self._stopped = True
break
except:
with self._lock:
self._stopped = True
self._thread_will_exit = True
raise
if self._skip_sleep:
self._skip_sleep = False
else:
deadline = time.monotonic() + self._interval
while not self._stopped and time.monotonic() < deadline:
time.sleep(self._min_interval)
if self._event:
break # Early wake.
self._event = False
# _EXECUTORS has a weakref to each running PeriodicExecutor. Once started,
# an executor is kept alive by a strong reference from its thread and perhaps
# from other objects. When the thread dies and all other referrers are freed,
# the executor is freed and removed from _EXECUTORS. If any threads are
# running when the interpreter begins to shut down, we try to halt and join
# them to avoid spurious errors.
_EXECUTORS = set()
def _register_executor(executor):
ref = weakref.ref(executor, _on_executor_deleted)
_EXECUTORS.add(ref)
def _on_executor_deleted(ref):
_EXECUTORS.remove(ref)
def _shutdown_executors():
if _EXECUTORS is None:
return
# Copy the set. Stopping threads has the side effect of removing executors.
executors = list(_EXECUTORS)
# First signal all executors to close...
for ref in executors:
executor = ref()
if executor:
executor.close()
# ...then try to join them.
for ref in executors:
executor = ref()
if executor:
executor.join(1)
executor = None
| 32.934783 | 95 | 0.60462 |
7418abe9a8ec90cddbdfef0322c2e9512c5e825b | 20,198 | py | Python | src/aihwkit/nn/modules/base.py | JongchanRyu/aihwkit | fb95cd417c5ab70d78db0b8a3912932aebb53f3b | [
"Apache-2.0"
] | 1 | 2021-12-01T21:46:49.000Z | 2021-12-01T21:46:49.000Z | src/aihwkit/nn/modules/base.py | JongchanRyu/aihwkit | fb95cd417c5ab70d78db0b8a3912932aebb53f3b | [
"Apache-2.0"
] | null | null | null | src/aihwkit/nn/modules/base.py | JongchanRyu/aihwkit | fb95cd417c5ab70d78db0b8a3912932aebb53f3b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# (C) Copyright 2020, 2021 IBM. All Rights Reserved.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Base class for analog Modules."""
import warnings
from typing import (
Any, Dict, List, Optional, Tuple, NamedTuple, Union,
Generator, TYPE_CHECKING
)
from torch import Tensor, no_grad
from torch.nn import Module, Parameter
from aihwkit.exceptions import ModuleError
from aihwkit.simulator.configs.configs import (
FloatingPointRPUConfig, InferenceRPUConfig, SingleRPUConfig,
UnitCellRPUConfig, DigitalRankUpdateRPUConfig
)
from aihwkit.simulator.configs.utils import MappingParameter
from aihwkit.simulator.tiles import InferenceTile
from aihwkit.optim.context import AnalogContext
if TYPE_CHECKING:
from aihwkit.simulator.tiles import BaseTile
from collections import OrderedDict
RPUConfigAlias = Union[FloatingPointRPUConfig, SingleRPUConfig,
UnitCellRPUConfig, InferenceRPUConfig,
DigitalRankUpdateRPUConfig]
class AnalogModuleBase(Module):
"""Base class for analog Modules.
Base ``Module`` for analog layers that use analog tiles. When subclassing,
please note:
* the :meth:`_setup_tile()` method is expected to be called by the subclass
constructor, and it does not only create a tile.
* :meth:`register_analog_tile` needs to be called for each created analog tile
* this module does *not* call torch's ``Module`` init as the child is
likely again derived from Module
* the ``weight`` and ``bias`` Parameters are not guaranteed to be in
sync with the tile weights and biases during the lifetime of the instance,
for performance reasons. The canonical way of reading and writing
weights is via the :meth:`set_weights()` and :meth:`get_weights()` as opposed
to using the attributes directly.
* the ``BaseTile`` subclass that is created is retrieved from the
``rpu_config.tile_class`` attribute.
Args:
in_features: input vector size (number of columns).
out_features: output vector size (number of rows).
bias: whether to use a bias row on the analog tile or not.
realistic_read_write: whether to enable realistic read/write
for setting initial weights and during reading of the weights.
weight_scaling_omega: the weight value that the current max
weight value will be scaled to. If zero, no weight scaling will
be performed.
mapping: Configuration of the hardware architecture (e.g. tile size).
"""
# pylint: disable=abstract-method, too-many-instance-attributes
ANALOG_CTX_PREFIX: str = 'analog_ctx_'
ANALOG_SHARED_WEIGHT_PREFIX: str = 'analog_shared_weights_'
ANALOG_STATE_PREFIX: str = 'analog_tile_state_'
ANALOG_OUT_SCALING_ALPHA_PREFIX: str = 'analog_out_scaling_alpha_'
def __init__(
self,
in_features: int,
out_features: int,
bias: bool,
realistic_read_write: bool = False,
weight_scaling_omega: Optional[float] = None,
mapping: Optional[MappingParameter] = None,
) -> None:
# pylint: disable=super-init-not-called
self._analog_tile_counter = 0
self._registered_helper_parameter = [] # type: list
self._load_rpu_config = True
if mapping is None:
mapping = MappingParameter()
self.use_bias = bias
self.digital_bias = bias and mapping.digital_bias
self.analog_bias = bias and not mapping.digital_bias
self.weight_scaling_omega = mapping.weight_scaling_omega if weight_scaling_omega is None \
else weight_scaling_omega
if weight_scaling_omega is not None:
warnings.warn(DeprecationWarning('\nSetting the weight_scaling_omega through the '
'layers input parameters will be deprecated in the '
'future. Please set it through the MappingParameter '
'of the rpu_config.\n'))
self.weight_scaling_omega_columnwise = mapping.weight_scaling_omega_columnwise
self.learn_out_scaling_alpha = mapping.learn_out_scaling_alpha
if self.learn_out_scaling_alpha and self.weight_scaling_omega == 0:
raise ValueError('out_scaling_alpha can only be learned if weight_scaling_omega > 0')
self.realistic_read_write = realistic_read_write
self.in_features = in_features
self.out_features = out_features
def register_analog_tile(self, tile: 'BaseTile', name: Optional[str] = None) -> None:
"""Register the analog context of the tile.
Note:
Needs to be called at the end init to register the tile
for the analog optimizers.
Args:
tile: tile to register
name: Optional tile name used as the parameter name
"""
if name is None:
name = str(self._analog_tile_counter)
ctx_name = self.ANALOG_CTX_PREFIX + name
if ctx_name not in self._registered_helper_parameter:
self._registered_helper_parameter.append(ctx_name)
self.register_parameter(ctx_name, tile.get_analog_ctx())
if tile.shared_weights is not None:
if not isinstance(tile.shared_weights, Parameter):
tile.shared_weights = Parameter(tile.shared_weights)
par_name = self.ANALOG_SHARED_WEIGHT_PREFIX + str(self._analog_tile_counter)
self.register_parameter(par_name, tile.shared_weights)
if par_name not in self._registered_helper_parameter:
self._registered_helper_parameter.append(par_name)
if self.learn_out_scaling_alpha:
if not isinstance(tile.out_scaling_alpha, Parameter):
tile.out_scaling_alpha = Parameter(tile.out_scaling_alpha)
par_name = self.ANALOG_OUT_SCALING_ALPHA_PREFIX + str(self._analog_tile_counter)
self.register_parameter(par_name, tile.out_scaling_alpha)
if par_name not in self._registered_helper_parameter:
self._registered_helper_parameter.append(par_name)
self._analog_tile_counter += 1
def unregister_parameter(self, param_name: str) -> None:
"""Unregister module parameter from parameters.
Raises:
ModuleError: In case parameter is not found
"""
param = getattr(self, param_name, None)
if not isinstance(param, Parameter):
raise ModuleError(f"Cannot find parameter {param_name} to unregister")
param_data = param.detach().clone()
delattr(self, param_name)
setattr(self, param_name, param_data)
def analog_tiles(self) -> Generator['BaseTile', None, None]:
""" Generator to loop over all registered analog tiles of the module """
for param in self.parameters():
if isinstance(param, AnalogContext):
yield param.analog_tile
def named_analog_tiles(self) -> Generator[Tuple[str, 'BaseTile'], None, None]:
""" Generator to loop over all registered analog tiles of the module with names. """
for name, param in self.named_parameters():
if isinstance(param, AnalogContext):
new_name = name.split(self.ANALOG_CTX_PREFIX)[-1]
yield (new_name, param.analog_tile)
def analog_tile_count(self) -> int:
"""Return the number of registered tiles.
Returns:
Number of registered tiles
"""
return getattr(self, '_analog_tile_counter', 0)
def _setup_tile(
self,
rpu_config: RPUConfigAlias,
) -> 'BaseTile':
"""Create a single analog tile with the given RPU configuration.
Create an analog tile to be used for the basis of this layer
operations, while using the attributes ``(in_features,
out_features, bias)`` given to this instance during init.
After tile creation, the tile needs to be registered using
:meth:`register_analog_tile`.
Args:
rpu_config: resistive processing unit configuration.
Returns:
An analog tile with the requested parameters.
"""
# pylint: disable=protected-access
# Create the tile.
return rpu_config.tile_class(self.out_features, self.in_features, rpu_config,
bias=self.analog_bias)
def set_weights(
self,
weight: Tensor,
bias: Optional[Tensor] = None,
force_exact: bool = False
) -> None:
"""Set the weight (and bias) values with given tensors.
This uses an realistic write if the property ``realistic_read_write``
of the layer is set, unless it is overwritten by ``force_exact``.
If ``weight_scaling_omega`` is larger than 0, the weights are set in a
scaled manner (assuming a digital output scale). See
:meth:`~aihwkit.simulator.tiles.base.BaseTile.set_weights_scaled`
for details.
Note:
This is the recommended way for setting the weight/bias matrix of
the analog tile, as it will correctly store the weights into the
internal memory. Directly writing to ``self.weight`` and
``self.bias`` might yield wrong results as they are not always in
sync with the analog tile Parameters, for performance reasons.
Args:
weight: weight matrix
bias: bias vector
force_exact: forces an exact write to the analog tiles
Raises:
ModuleError: in case of multiple defined analog tiles in the module
"""
shape = [self.out_features, self.in_features]
weight = weight.clone().reshape(shape)
realistic = self.realistic_read_write and not force_exact
analog_tiles = list(self.analog_tiles())
if len(analog_tiles) != 1:
raise ModuleError("AnalogModuleBase.set_weights only supports a single tile.")
analog_tile = analog_tiles[0]
if self.weight_scaling_omega > 0.0:
analog_tile.set_weights_scaled(
weight, bias if self.analog_bias else None,
realistic=realistic,
omega=self.weight_scaling_omega,
weight_scaling_omega_columnwise=self.weight_scaling_omega_columnwise,
learn_out_scaling_alpha=self.learn_out_scaling_alpha)
else:
analog_tile.set_weights(weight, bias if self.analog_bias else None,
realistic=realistic)
if bias is not None and self.digital_bias:
with no_grad():
self.bias.data[:] = bias[:]
self._sync_weights_from_tile()
def get_weights(
self,
force_exact: bool = False
) -> Tuple[Tensor, Optional[Tensor]]:
"""Get the weight (and bias) tensors.
This uses an realistic read if the property ``realistic_read_write`` of
the layer is set, unless it is overwritten by ``force_exact``. It
scales the analog weights by the digital alpha scale if
``weight_scaling_omega`` is positive (see
:meth:`~aihwkit.simulator.tiles.base.BaseTile.get_weights_scaled`).
Note:
This is the recommended way for setting the weight/bias matrix from
the analog tile, as it will correctly fetch the weights from the
internal memory. Accessing ``self.weight`` and ``self.bias`` might
yield wrong results as they are not always in sync with the
analog tile library, for performance reasons.
Args:
force_exact: forces an exact read to the analog tiles
Returns:
tuple: weight matrix, bias vector
Raises:
ModuleError: in case of multiple defined analog tiles in the module
"""
analog_tiles = list(self.analog_tiles())
if len(analog_tiles) != 1:
raise ModuleError("AnalogModuleBase.get_weights only supports a single tile.")
analog_tile = analog_tiles[0]
realistic = self.realistic_read_write and not force_exact
if self.weight_scaling_omega > 0.0:
weight, bias = analog_tile.get_weights_scaled(
realistic=realistic,
weight_scaling_omega_columnwise=self.weight_scaling_omega_columnwise)
else:
weight, bias = analog_tile.get_weights(realistic=realistic)
if self.digital_bias:
with no_grad():
bias = self.bias.data.detach().cpu()
return weight, bias
def _sync_weights_from_tile(self) -> None:
"""Update the layer weight and bias from the values on the analog tile.
Update the ``self.weight`` and ``self.bias`` Parameters with an
exact copy of the internal analog tile weights.
"""
tile_weight, tile_bias = self.get_weights(force_exact=True) # type: Tuple[Tensor, Tensor]
self.weight.data[:] = tile_weight.reshape(self.weight.shape)
if self.analog_bias:
with no_grad():
self.bias.data[:] = tile_bias.reshape(self.bias.shape)
def _sync_weights_to_tile(self) -> None:
"""Update the tile values from the layer weights and bias.
Update the internal tile weights with an exact copy of the values of
the ``self.weight`` and ``self.bias`` Parameters.
"""
self.set_weights(self.weight, self.bias if self.analog_bias else None,
force_exact=True)
def _set_load_rpu_config_state(self, load_rpu_config: bool = True) -> None:
self._load_rpu_config = load_rpu_config
def load_state_dict(self, # pylint: disable=arguments-differ
state_dict: 'OrderedDict[str, Tensor]',
strict: bool = True,
load_rpu_config: bool = True) -> NamedTuple:
"""Specializes torch's ``load_state_dict`` to add a flag whether to
load the RPU config from the saved state.
Args:
state_dict: see torch's ``load_state_dict``
strict: see torch's ``load_state_dict``
load_rpu_config: Whether to load the saved RPU
config or use the current RPU config of the model.
Caution:
If ``load_rpu_config=False`` the RPU config can
be changed from the stored model. However, the user has to
make sure that the changed RPU config makes sense.
For instance, changing the device type might
change the expected fields in the hidden
parameters and result in an error.
Returns:
see torch's ``load_state_dict``
Raises: ModuleError: in case the rpu_config class mismatches
for ``load_rpu_config=False``.
"""
self._set_load_rpu_config_state(load_rpu_config)
return super().load_state_dict(state_dict, strict)
def _load_from_state_dict(
self,
state_dict: Dict,
prefix: str,
local_metadata: Dict,
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str]) -> None:
"""Copy parameters and buffers from `state_dict` into only this
module, but not its descendants.
This method is a specialization of ``Module._load_from_state_dict``
that takes into account the extra ``analog_tile_state`` key used by
analog layers.
Raises:
ModuleError: in case the rpu_config class mismatches.
"""
for name, analog_tile in list(self.named_analog_tiles()):
key = prefix + self.ANALOG_STATE_PREFIX + name
if key not in state_dict: # legacy
key = prefix + 'analog_tile_state'
if key in state_dict:
analog_state = state_dict.pop(key).copy()
if not self._load_rpu_config:
if analog_tile.rpu_config.__class__ != analog_state['rpu_config'].__class__:
raise ModuleError("RPU config mismatch during loading: "
"Tried to replace "
f"{analog_state['rpu_config'].__class__.__name__} "
f"with {analog_tile.rpu_config.__class__.__name__}")
analog_state['rpu_config'] = analog_tile.rpu_config
analog_tile.__setstate__(analog_state)
elif strict:
missing_keys.append(key)
# update the weight / analog bias (not saved explicitly)
self._sync_weights_from_tile()
# remove helper parameters.
rm_keys = []
for par_name in self._registered_helper_parameter:
key = prefix + par_name
if key in state_dict:
state_dict.pop(key)
rm_keys.append(key)
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys,
unexpected_keys, error_msgs)
# remove the missing keys of the helper parameters
for key in rm_keys:
missing_keys.remove(key)
def state_dict(
self,
destination: Any = None,
prefix: str = '',
keep_vars: bool = False
) -> Dict:
"""Return a dictionary containing a whole state of the module."""
self._sync_weights_from_tile()
current_state = super().state_dict(destination, prefix, keep_vars)
for name, analog_tile in self.named_analog_tiles():
analog_state = analog_tile.__getstate__()
analog_state_name = prefix + self.ANALOG_STATE_PREFIX + name
current_state[analog_state_name] = analog_state
return current_state
def drift_analog_weights(self, t_inference: float = 0.0) -> None:
"""(Program) and drift the analog weights.
Args:
t_inference: assumed time of inference (in sec)
Raises:
ModuleError: if the layer is not in evaluation mode.
"""
if self.training:
raise ModuleError('drift_analog_weights can only be applied in '
'evaluation mode')
for analog_tile in self.analog_tiles():
if isinstance(analog_tile, InferenceTile):
analog_tile.drift_weights(t_inference)
def program_analog_weights(self) -> None:
"""Program the analog weights.
Raises:
ModuleError: if the layer is not in evaluation mode.
"""
if self.training:
raise ModuleError('program_analog_weights can only be applied in '
'evaluation mode')
for analog_tile in self.analog_tiles():
if isinstance(analog_tile, InferenceTile):
analog_tile.program_weights()
def extra_repr(self) -> str:
"""Set the extra representation of the module.
Returns:
A string with the extra representation.
"""
output = super().extra_repr()
if self.realistic_read_write:
output += ', realistic_read_write={}'.format(self.realistic_read_write)
if self.weight_scaling_omega > 0:
output += ', weight_scaling_omega={:.3f}'.format(self.weight_scaling_omega)
if self.analog_bias:
output += ', analog bias'
if self.digital_bias:
output += ', digital bias'
return output
| 40.23506 | 98 | 0.634073 |
3357dbe6d5f9dadcf2709381305214a63c482dd9 | 7,019 | py | Python | authtools/forms.py | kanymanyman/django-authtools | 3529efbff6f2255dfe3cc945a1981c6fc939d0ce | [
"BSD-2-Clause"
] | 288 | 2015-01-11T14:24:03.000Z | 2022-03-25T16:11:32.000Z | authtools/forms.py | kanymanyman/django-authtools | 3529efbff6f2255dfe3cc945a1981c6fc939d0ce | [
"BSD-2-Clause"
] | 86 | 2015-02-23T21:08:45.000Z | 2021-10-17T18:51:02.000Z | authtools/forms.py | kanymanyman/django-authtools | 3529efbff6f2255dfe3cc945a1981c6fc939d0ce | [
"BSD-2-Clause"
] | 113 | 2015-01-08T03:54:41.000Z | 2022-03-08T18:37:12.000Z | from __future__ import unicode_literals
from django import forms, VERSION as DJANGO_VERSION
from django.forms.utils import flatatt
from django.contrib.auth.forms import (
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
PasswordResetForm as OldPasswordResetForm,
UserChangeForm as DjangoUserChangeForm,
AuthenticationForm as DjangoAuthenticationForm,
)
from django.contrib.auth import get_user_model, password_validation
from django.contrib.auth.hashers import identify_hasher, UNUSABLE_PASSWORD_PREFIX
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.html import format_html
User = get_user_model()
def is_password_usable(pw):
"""Decide whether a password is usable only by the unusable password prefix.
We can't use django.contrib.auth.hashers.is_password_usable either, because
it not only checks against the unusable password, but checks for a valid
hasher too. We need different error messages in those cases.
"""
return not pw.startswith(UNUSABLE_PASSWORD_PREFIX)
class BetterReadOnlyPasswordHashWidget(ReadOnlyPasswordHashWidget):
"""
A ReadOnlyPasswordHashWidget that has a less intimidating output.
"""
def render(self, name, value, attrs=None, renderer=None):
final_attrs = flatatt(self.build_attrs(attrs or {}))
if not value or not is_password_usable(value):
summary = ugettext("No password set.")
else:
try:
identify_hasher(value)
except ValueError:
summary = ugettext("Invalid password format or unknown"
" hashing algorithm.")
else:
summary = ugettext('*************')
return format_html('<div{attrs}><strong>{summary}</strong></div>',
attrs=final_attrs, summary=summary)
class UserChangeForm(DjangoUserChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
password = self.fields.get('password')
if password:
password.widget = BetterReadOnlyPasswordHashWidget()
class UserCreationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required
fields, plus a repeated password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
'duplicate_username': _("A user with that %(username)s already exists."),
}
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above,"
" for verification."))
class Meta:
model = User
fields = (User.USERNAME_FIELD,) + tuple(User.REQUIRED_FIELDS)
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
def validate_uniqueness_of_username_field(value):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
try:
User._default_manager.get_by_natural_key(value)
except User.DoesNotExist:
return value
raise forms.ValidationError(self.error_messages['duplicate_username'] % {
'username': User.USERNAME_FIELD,
})
self.fields[User.USERNAME_FIELD].validators.append(validate_uniqueness_of_username_field)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(self.error_messages['password_mismatch'])
return password2
def _post_clean(self):
super(UserCreationForm, self)._post_clean()
# Validate the password after self.instance is updated with form data
# by super().
password = self.cleaned_data.get('password2')
if password:
try:
password_validation.validate_password(password, self.instance)
except forms.ValidationError as error:
self.add_error('password2', error)
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class CaseInsensitiveUsernameFieldCreationForm(UserCreationForm):
"""
This form is the same as UserCreationForm, except that usernames are lowercased before they
are saved. This is to disallow the existence of email address usernames which differ only in
case.
"""
def clean_USERNAME_FIELD(self):
username = self.cleaned_data.get(User.USERNAME_FIELD)
if username:
username = username.lower()
return username
# set the correct clean method on the class so that child classes can override and call super()
setattr(
CaseInsensitiveUsernameFieldCreationForm,
'clean_' + User.USERNAME_FIELD,
CaseInsensitiveUsernameFieldCreationForm.clean_USERNAME_FIELD
)
# alias for the old name for backwards-compatability
CaseInsensitiveEmailUserCreationForm = CaseInsensitiveUsernameFieldCreationForm
class FriendlyPasswordResetForm(OldPasswordResetForm):
error_messages = dict(getattr(OldPasswordResetForm, 'error_messages', {}))
error_messages['unknown'] = _("This email address doesn't have an "
"associated user account. Are you "
"sure you've registered?")
def clean_email(self):
"""Return an error message if the email address being reset is unknown.
This is to revert https://code.djangoproject.com/ticket/19758
The bug #19758 tries not to leak emails through password reset because
only usernames are unique in Django's default user model.
django-authtools leaks email addresses through the registration form.
In the case of django-authtools not warning the user doesn't add any
security, and worsen user experience.
"""
email = self.cleaned_data['email']
results = list(self.get_users(email))
if not results:
raise forms.ValidationError(self.error_messages['unknown'])
return email
class AuthenticationForm(DjangoAuthenticationForm):
def __init__(self, request=None, *args, **kwargs):
super(AuthenticationForm, self).__init__(request, *args, **kwargs)
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.fields['username'].widget = username_field.formfield().widget
| 38.994444 | 97 | 0.674455 |
cbb6abc569d39daec6ca2b337778345decc7a455 | 8,270 | py | Python | onlinecourse/views.py | sabinamp/django-cloud-app | a837d15205a01937724a266e930c77536b8bf512 | [
"Apache-2.0"
] | null | null | null | onlinecourse/views.py | sabinamp/django-cloud-app | a837d15205a01937724a266e930c77536b8bf512 | [
"Apache-2.0"
] | null | null | null | onlinecourse/views.py | sabinamp/django-cloud-app | a837d15205a01937724a266e930c77536b8bf512 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponseRedirect
# <HINT> Import any new Models here
from .models import Course, Enrollment, Question, Choice, Submission, Lesson
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse
from django.views import generic
from django.contrib.auth import login, logout, authenticate
from collections import OrderedDict
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your views here.
def registration_request(request):
context = {}
if request.method == 'GET':
return render(request, 'onlinecourse/user_registration_bootstrap.html', context)
elif request.method == 'POST':
# Check if user exists
username = request.POST['username']
password = request.POST['psw']
first_name = request.POST['firstname']
last_name = request.POST['lastname']
user_exist = False
try:
User.objects.get(username=username)
user_exist = True
except:
logger.error("New user")
if not user_exist:
user = User.objects.create_user(username=username, first_name=first_name, last_name=last_name,
password=password)
login(request, user)
return redirect("onlinecourse:index")
else:
context['message'] = "User already exists."
return render(request, 'onlinecourse/user_registration_bootstrap.html', context)
def login_request(request):
context = {}
if request.method == "POST":
username = request.POST['username']
password = request.POST['psw']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('onlinecourse:index')
else:
context['message'] = "Invalid username or password."
return render(request, 'onlinecourse/user_login_bootstrap.html', context)
else:
return render(request, 'onlinecourse/user_login_bootstrap.html', context)
def logout_request(request):
logout(request)
return redirect('onlinecourse:index')
def check_if_enrolled(user, course):
is_enrolled = False
if user.id is not None:
# Check if user enrolled
num_results = Enrollment.objects.filter(user=user, course=course).count()
if num_results > 0:
is_enrolled = True
return is_enrolled
# CourseListView
class CourseListView(generic.ListView):
template_name = 'onlinecourse/course_list_bootstrap.html'
context_object_name = 'course_list'
def get_queryset(self):
user = self.request.user
courses = Course.objects.order_by('-total_enrollment')[:10]
for course in courses:
if user.is_authenticated:
course.is_enrolled = check_if_enrolled(user, course)
return courses
class CourseDetailView(generic.DetailView):
model = Course
template_name = 'onlinecourse/course_detail_bootstrap.html'
def enroll(request, course_id):
course = get_object_or_404(Course, pk=course_id)
user = request.user
is_enrolled = check_if_enrolled(user, course)
if not is_enrolled and user.is_authenticated:
# Create an enrollment
Enrollment.objects.create(user=user, course=course, mode='honor')
course.total_enrollment += 1
course.save()
return HttpResponseRedirect(reverse(viewname='onlinecourse:course_details', args=(course.id,)))
# <HINT> A example method to collect the selected choices from the exam form from the request object
def extract_answers(request):
submitted_choices = []
for key in request.POST:
if key.startswith('choice'):
value = request.POST[key] # print("{}: {}".format(choice_id, request.POST.getlist(choice_id)))
choice_id = int(value)
submitted_choices.append(choice_id)
return submitted_choices
# <HINT> Create a submit view to create an exam submission record for a course enrollment,
# you may implement it based on following logic:
# Get user and course object, then get the associated enrollment object created when the user enrolled the course
# Create a submission object referring to the enrollment
# Collect the selected choices from exam form
# Add each selected choice object to the submission object
# Redirect to show_exam_result with the submission id
# def submit(request, course_id):
def submit(request, course_id):
current_user = request.user
course = get_object_or_404(Course, pk=course_id)
if request.method == 'POST':
enrollment = Enrollment.objects.get(user=current_user, course=course)
submission = Submission.objects.create(enrollment=enrollment)
submission.choices.set(extract_answers(request=request))
submission.save()
return HttpResponseRedirect(reverse(viewname='onlinecourse:show_exam_result', args=(course.id, submission.id)))
# Python code t get difference of two lists
# Using set()
def diff(li1, li2):
return list(set(li1) - set(li2)) + list(set(li2) - set(li1))
# <HINT> Create an exam result view to check if learner passed exam and show their question results and result for each question,
# you may implement it based on the following logic:
# Get course and submission based on their ids
# Get the selected choice ids from the submission record
# For each selected choice, check if it is a correct answer or not
# Calculate the total score
def show_exam_result(request, course_id, submission_id):
current_user = request.user
score = 0
course = get_object_or_404(Course, pk=course_id)
question_list = []
question_list_ids = []
for each_lesson in course.lesson_set.all():
questions = each_lesson.question_set.all()
question_list.extend(questions)
for q in questions:
question_list_ids.append(q.id)
context = {'user': current_user, 'course': course, 'question_list': question_list, }
submission = get_object_or_404(Submission, pk=submission_id)
selected_choices = submission.choices.all()
answered_questions = []
# Order it by key-question id
selected_ids_q = dict()
for selected in selected_choices:
selected_ids_q[selected.id] = selected.question.id
answered_questions.append(selected.question)
for each_question in set(answered_questions):
choices_question = []
for key in selected_ids_q.keys():
if selected_ids_q[key] == each_question.id:
choices_question.append(key)
each_question.q_grade = compute_question_grade(each_question, set(choices_question))
print("each question grade"+str(each_question.q_grade))
score = score + each_question.q_grade
print("current score "+str(score))
if len(selected_choices) == 0:
context['error_message'] = "No answer selected."
context['grade'] = 0.0
return render(request, 'onlinecourse/exam_result_bootstrap.html', context)
else:
grade = score / len(question_list) # represents percentage value
not_answered_questions = diff(set(question_list), set(answered_questions))
context = {'user': current_user, 'course': course, 'not_answered_questions': not_answered_questions,
'grade': grade, 'selected_choices': selected_choices}
return render(request, 'onlinecourse/exam_result_bootstrap.html', context)
def compute_question_grade(question, selected_ids):
all_answers_count = question.choice_set.filter(is_correct=True).count()
print("all_answers_count is" + str(all_answers_count))
selected_correct_count = question.choice_set.filter(is_correct=True, id__in=selected_ids).count()
print("selected_correct_count is " + str(selected_correct_count))
selected_incorrect_count = question.choice_set.filter(is_correct=False, id__in=selected_ids).count()
if selected_correct_count <= selected_incorrect_count:
result = 0
else:
result = ((selected_correct_count - selected_incorrect_count) / all_answers_count) * 100
return result
| 39.009434 | 129 | 0.703023 |
f4c4cad53fb5c0dd517616007a0f660128f91004 | 1,076 | py | Python | zadanka/l5zad1.py | wrutkowski1000/wizualizacja-danych | aacbe2e25e8a5624a3585958aa6a3a6512f1fac7 | [
"MIT"
] | null | null | null | zadanka/l5zad1.py | wrutkowski1000/wizualizacja-danych | aacbe2e25e8a5624a3585958aa6a3a6512f1fac7 | [
"MIT"
] | null | null | null | zadanka/l5zad1.py | wrutkowski1000/wizualizacja-danych | aacbe2e25e8a5624a3585958aa6a3a6512f1fac7 | [
"MIT"
] | null | null | null | class Material:
def __init__(self, r, d, s):
self.rodzaj = r
self.dlugosc = d
self.szerokosc = s
def wyswietl_nazwe(self):
print(self.rodzaj)
class Ubrania(Material):
def __init__(self, r, d, s, roz, kolo, dla):
self.rodzaj = r
self.dlugosc = d
self.szerokosc = s
self.rozmiar = roz
self.kolor = kolo
self.dla_kogo = dla
def wyswietl_dane(self):
print("rozmiar: ",self.rozmiar, " , kolor: ", self.kolor, " , dla: ", self.dla_kogo);
class Sweter(Ubrania):
def __init__(self, d, s, roz, kolo, dla, rswetra):
self.rodzaj = "sweter"
self.dlugosc = d
self.szerokosc = s
self.rozmiar = roz
self.kolor = kolo
self.dla_kogo = dla
self.rodzaj_swetra = rswetra
def wyswietl_dane(self):
print("rozmiar: ", self.rozmiar, " , kolor: ", self.kolor, " , dla: ", self.dla_kogo, " rodzaj swetra: ", self.rodzaj_swetra);
ubranko = Sweter(10, 50, 15, "czarny", "dla mnie","bawelniany")
ubranko.wyswietl_dane() | 26.9 | 134 | 0.581784 |
a0df8c179bd7f25690b81ffd50b53aa36c0038b8 | 958 | py | Python | cacao_app/contrib/sites/migrations/0002_set_site_domain_and_name.py | CacaoMovil/guia-de-cacao-django | 14d18edb76502736f6f31955509c3b413f1f91fc | [
"BSD-3-Clause"
] | 1 | 2016-03-07T17:03:45.000Z | 2016-03-07T17:03:45.000Z | cacao_app/contrib/sites/migrations/0002_set_site_domain_and_name.py | CacaoMovil/guia-de-cacao-django | 14d18edb76502736f6f31955509c3b413f1f91fc | [
"BSD-3-Clause"
] | 4 | 2016-04-29T20:48:31.000Z | 2021-06-10T20:39:26.000Z | cacao_app/contrib/sites/migrations/0002_set_site_domain_and_name.py | CacaoMovil/guia-de-cacao-django | 14d18edb76502736f6f31955509c3b413f1f91fc | [
"BSD-3-Clause"
] | 3 | 2016-03-04T19:46:45.000Z | 2016-05-11T19:46:00.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "cacao.kronoscode.com",
"name": "cacao_app"
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| 23.365854 | 72 | 0.617954 |
8b770eaa17430f1e47fc01e5f334632a176f1989 | 23,541 | py | Python | nlpml/dataset.py | johann-petrak/nlp-ml | 86e1e86b8b2fa915cacd2ca8a39635ce982d9f4e | [
"Apache-2.0"
] | null | null | null | nlpml/dataset.py | johann-petrak/nlp-ml | 86e1e86b8b2fa915cacd2ca8a39635ce982d9f4e | [
"Apache-2.0"
] | null | null | null | nlpml/dataset.py | johann-petrak/nlp-ml | 86e1e86b8b2fa915cacd2ca8a39635ce982d9f4e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
'''
Temporary implementation of the Dataset interface without dependency on
pytorch to make accessing the data file in a sorted way possible.
A Dataset is something that allows direct acces to every item using bracket-notation, e.g
myds[22]. There is no initial assumption about how a dataset is represented, e.g. one file on
harddisk, a databse, something in memory etc. nor about the possibility of parallel access to
the same dataset from separate threads or processes. Specific subclasses may implement
these or other aspects in a specific way.
IMPORTANT! This has been copied over from a different library and currently
contains some features which are not used or relevant in here.
'''
# TODO: all wrappers should also hand through calls to setitem and other base methods!!
import os
import random
import sys
import pickle
import json
import numbers
import pathlib
class ExtendedDataset(object):
"""
Our own base class for datasets which adds a few conventions:
is_writable is False by default but some Datasets can be defined to be writable.
Writable datasets implement __setitem__(self, key, item) for a key that is an integer.
is_multi_read is True by default: if a dataset can be reasonably wrapped and/or read by several
clients in multiple threads at the same time, this should be True, otherwise false. If
doing this would corrupt the dataset or impose a significant performance penalty it should be False.
Note: all classes that derive from this class should invoke the parent init method!
"""
def __init__(self):
self.is_writable = False
self.is_multi_read = True
def __setitem__(self, key, value):
"""
Write/save a specific instance (identified by instance number)
:param key: the instance number, must be an integer
:param value: the instance
:return:
"""
raise Exception("Dataset is not writable!")
class ListDataset(ExtendedDataset):
"""
A list wrapped into a dataset.
"""
def __init__(self, thelist):
super().__init__()
if not isinstance(thelist, list):
raise Exception("Need a list!")
self.data = thelist
def __getitem__(self, key):
return self.data[key]
def __len__(self):
return len(self.data)
class ShuffledDataset(ExtendedDataset):
"""
Represents a shuffled version of another dataset. A shuffled dataset wraps an existing
dataset by mapping the indices of the original dataset to a permutation of those indices.
The shuffle method can be used to re-shuffle the dataset.
"""
def __init__(self, dataset, seed=None):
"""
:param seed: if an integer and > 0, shuffle the list of instances randomly, using the given seed.
If the seed is 0, the RNGs random random seed is used, if seed is -1, the seed is not set at all
and whatever the current state of the random generator is is used. If None, no shuffling is
carried out. If this is None or not an integer, same as 0.
"""
super().__init__()
self.dataset = dataset
self.seed = seed
self.idxs = list(range(len(dataset)))
self.shuffle(seed)
def shuffle(self, seed=0):
"""
Shuffle instance list order,
:param seed: random seed to set, if seed is 0, a random random seed is used, if -1, seed is not set.
If seed is None, no shuffling is carried out.
:return:
"""
if isinstance(seed, numbers.Integral): # also allow for np.int8(n) and the like
if seed != -1:
if seed == 0:
random.seed()
else:
random.seed(seed)
random.shuffle(self.idxs)
else: # not an integer seed: None or some other type
# same as seed 0
random.seed()
random.shuffle(self.idxs)
def __getitem__(self, key):
return self.dataset[self.idxs[key]]
def __len__(self):
return len(self.idxs)
class EveryNthDataset(ExtendedDataset):
"""
Wraps a dataset to only provide every nth row, starting with the kth row.
For example with n=3 and k=0, the rows 0,1,2,3,4 correspond to the
rows 0,3,6,9,12 of the wrapped dataset, with n=3 and k=2, we get
rows 2,5,8,11,14 etc. The wrapped dataset must allow to get used by more than
one client at the same time!
"""
def __init__(self, dataset, n, k):
"""
Wrap dataset to access every nth row, starting with the kth row (k: zero-based).
Important: if the wrapped dataset does not allow (reasonable) concurrent access, it may
still be possible to wrap several separate dataset instances which all point to the
same underlying resource (e.g. LineTsvDataset)
:param dataset: the dataset to wrap, must allow multiple concurrent access
:param n: the increment
:param k: the offset, must be < n
"""
super().__init__()
if (not isinstance(n, numbers.Integral)) or (not isinstance(k, numbers.Integral)):
raise Exception("n and k must be integers.")
if n < 2 or k < 0 or k >= n:
raise Exception("n must be >= 2 and k must be >= 0 and < n")
self.n = n
self.k = k
self.dataset = dataset
# precalculate the length
otherlen = len(dataset)
# the size of this dataset is int((otherlen + (n-k) - 1)/n)
self.len = int((otherlen) / n)
# if k is < remainder, add one
if k < otherlen % n:
self.len += 1
def __getitem__(self, item):
if not isinstance(item, numbers.Integral):
raise Exception("Item must be an integer")
if item >= self.len or item < 0:
raise IndexError("Item must be >= 0 and < {}, not {}".format(self.len, item))
# the index to access in the original dataset is int(n*item)+k
print("DEBUG: returning item ", item*self.n + self.k)
return self.dataset[item * self.n + self.k]
def __len__(self):
return self.len
# TODO: this should get replaced by ProcessingDataset where the transformations
# are really restricted to processing resource instances which can be pickled,
# so that this works properly with multiprocessing.
class TransformDataset(ExtendedDataset):
def __init__(self, dataset, transforms):
super().__init__()
self.dataset = dataset
if isinstance(transforms, list):
self.transforms = transforms
else:
self.transforms = [transforms]
def __len__(self):
return len(self.dataset)
def __getitem__(self, key):
tmp = self.dataset[key]
for tr in self.transforms:
tmp = tr(tmp)
return tmp
class LineTsvDataset(ExtendedDataset):
"""
Represent a large TSV file or simple one document/text per line file as a dataset.
When creating the instance, an index file is
is created and stored along with the original file, unless it already exists.
NOTE: this works only if lines are separated with "\n"!!!
"""
def have_current_index(self):
if not os.path.exists(self.indexfile):
return False
# if we have an index file, check if its modification date is more recent than
# that of the data file: if not, return false
return os.path.getmtime(self.indexfile) > os.path.getmtime(self.file)
def __init__(self, file, indexfile=None, reinit=False,
encoding="utf8", cols=None, logevery=1000):
"""
Create the dataset instance from the given file.
:param file: the tsv file
:param indexfile: index file to use, by default the original file path with ".dsindex" appended
:param reinit: if True, forces re-creation of the index file even if it already exists
:param if cols is None, the whole line is returned to the iterator, otherwise if it is a number, that
column is returned, otherwise if it is a list of numbers, those fields are returned
"""
self.reader = None # set this first so even if the super init throws an exception, __del__ still finds it
super().__init__()
self.file = file
if not os.path.exists(file):
raise Exception("File does not exist: {}".format(file))
if indexfile is None:
indexfile = file + ".dsindex"
self.indexfile = indexfile
self.encoding = encoding
self.cols = cols
self.logevery = logevery
# if we need to create the cache file, do this now.
if reinit or not self.have_current_index():
self.idx2offlen = self._index4file(file)
with open(indexfile, "wb") as indexwriter:
pickle.dump(self.idx2offlen, indexwriter)
else:
with open(indexfile, "rb") as indexloader:
self.idx2offlen = pickle.load(indexloader)
self.len = len(self.idx2offlen)
def __del__(self):
# print("DEBUG: calling __del__")
if self.reader is not None:
# print("DEBUG: closing reader!")
self.reader.close()
def _index4file(self, file):
idx2offlen = []
with open(file, "rb") as reader:
startoffset = 0
linenr = 0
# since this is reading in binary mode, the terminator is always "\n"
# NOTE: we could also read in text mode, specify the newline or automatically
# recognize both both Windows and Linux newlines and then count by encoding the
# utf8 string we get into bytes and hope for the best. However, we expect all
# line corpora to be in Linux format for now!
for linebytes in reader:
# line = bytes.decode(self.encoding)
linelen = len(linebytes)
idx2offlen.append((startoffset, linelen))
# print("DEBUG indexing {}/{}".format(startoffset,l))
startoffset += linelen
linenr += 1
if self.logevery is not None and linenr % self.logevery == 0:
print("Lines indexed: {}".format(linenr), file=sys.stderr)
return idx2offlen
def __len__(self):
return self.len
def __getitem__(self, index):
if self.reader is None:
# print("DEBUG: opening reader!")
self.reader = open(self.file, "rb")
if index >= self.len or index < -self.len:
raise IndexError()
off, linelen = self.idx2offlen[index]
self.reader.seek(off, os.SEEK_SET)
bytes = self.reader.read(linelen)
line = bytes.decode(self.encoding)
if self.cols is None:
return line
else:
fields = line.split("\t")
if isinstance(self.cols, list):
return [fields[i] for i in self.cols]
else:
return fields[self.cols]
class DirFilesDataset(ExtendedDataset):
def path4(self, index):
if not self.is_dynamic:
if not isinstance(index, numbers.Integral) or index < 0:
raise Exception("Dataset is not dynamic and index is not a non-negative integer: {}".format(index))
if index >= self.len:
raise IndexError("Index {} larger than maximum item number in dataset {}".format(index, self.len))
if self.path4id is not None:
name = self.path4id(index)
elif self.is_dynamic and isinstance(index, str):
name = index
elif self.paths is not None:
name = self.paths[index]
else:
raise Exception("ODDD!!")
if self.ext is not None:
fname = name + "." + self.ext
else:
fname = name + "." + self.as_format
fpath = os.path.join(self.directory, fname)
return fpath
def get_dir_paths(self):
"""
Return a list of matching files in the directory (or directory tree).
:return: list of files in some order.
"""
files = []
if self.ext is not None:
toendwith = "." + self.ext
else:
toendwith = "." + self.as_format
endlen = len(toendwith)
for r, d, fs in os.walk(self.directory, topdown=True, followlinks=True):
files.extend((f[0:-endlen] for f in fs if f.endswith(toendwith)))
if not self.tree:
break
return files
def load_paths(self):
"""
Create the list of file paths and store it in self.paths.
If self.paths is a string, try to load the paths list from that file. If it does not exist, load
by finding the matching files in the directory.
:return:
"""
if isinstance(self.paths, str):
fname = os.path.join(self.directory, self.paths)
if os.path.exists(fname):
if fname.endswith(".pickle"):
with open(fname, "rb") as fp:
self.paths = pickle.load(fp)
elif fname.endswith(".json"):
with open(fname, "rt") as fp:
self.paths = json.load(fp)
else:
self.paths = []
with open(fname, "rt", encoding="utf-8") as fp:
for line in fp:
line = line.rstrip()
self.paths.append(line)
assert isinstance(self.path, list)
else:
self.paths = self.get_dir_paths()
if fname.endswith(".pickle"):
with open(fname, "wb") as fp:
self.paths = pickle.dump(self.paths, fp)
elif fname.endswith(".json"):
with open(fname, "wt") as fp:
self.paths = json.dumo(self.paths, fp)
else:
self.paths = []
with open(fname, "wt", encoding="utf-8") as fp:
for f in self.paths:
print(f, file=fp)
elif self.paths is None:
self.paths = self.get_dir_paths()
else:
raise Exception("Odd: not loading path if it is not None or a file path, but {}".format(type(self.paths)))
self.len = len(self.paths)
def __init__(self, directory, format='pickle', paths=None, tree=False, path4id=None, is_writable=True,
ext=None, size=0, is_dynamic=False):
"""
Create a dataset where instances are files in a directory. This can be either a normal dataset where the size
needs to be known in advance or, if "is_dynamic" is True, something that allows to grow the dataset
and set non-integer keys (see below).
By default, the content of this dataset is the content of the directory (recursively if tree is not False),
restricted to all the files that match the extension implied by the format. The mapping between
row number and file path is determined at creation time and immutable afterwards.
If paths is given, then either a list is expected that maps relative file paths to each id, or
a string that identifies a file in the directory that contains all the file paths and which will
get loaded at inite time. If that file does not exist, it is created on the fly.
The size of the dataset is the size of the list (unless is_dynamic).
Accessing an item where the file does not exist returns None.
That item can be changed into a different value by storing it in which case the file from the
paths list is created.
Finally, if path4id is given, it must be a callable that returns a filename for a row number.
If is_dynamic is True: the path4id function, if given, is not used to get the size, only numbers >= 0 are used.
The initial size of the dataset is set to the size we got from the initial paths and may get increased if
a row with a larger index is used, but there is no guarantee at all about the size of a dynamic dataset,
len should never be used for such a dataset! Note that accessing rows by numeric id will not work for any
row that is outside the known range.
:param directory: the directory to use for the files representing each row in the dataset
:param format: the format to use, currently this also has to be the file extension to use, one of
pickle, json, torch. Alternately this can be a tuple of the format (extension, reader, writer) where
reader(path) and writer(obj, path) are callables. The format or extensions must be given without a leading
dot!
:param paths: a list of relative file paths (without the extension!!!)
:param tree: if True, recurse the directory to find the initial file names, only relevant if paths and
path4id are not specified.
:param path4id: if specified, must be a callable that returns a relative path (without extension)
for a row number (>=0)
:param ext: if not None, the file extension to use for all the files, default is the format name
:param size: specify the size if path4id is used. If is_dynamic is true and path4id is used, specify the
initial size.
:param is_dynamic: if True, the size of the dataset is not fixed, writing to a new id/key will increase the
size. The id when reading can be a string when reading or writing. if path4id is defined, the string
is passed to that function and a path is expected. If path4id is not specified and the id is a string,
then that string is directly interpreted as a path.
"""
super().__init__()
self.directory = directory
self.is_writable = is_writable
self.paths = paths
self.tree = tree
self.ext = ext
self.reader = None
self.writer = None
self.is_dynamic = is_dynamic
if format not in ['pickle', 'json', 'torch']:
# check if it is a user defined format
if (isinstance(format, tuple) or isinstance(format, list)) and len(format) == 3:
self.ext, self.reader, self.writer = format
else:
raise Exception("Format must be one of pickle, json, torch or a triple (ext,reader,writer)")
self.as_format = format
self.path4id = path4id
if paths is None and path4id is None:
# get all the matching path names, either just in the directory or recursively and store
self.load_paths()
elif isinstance(paths, str) and path4id is None:
self.load_paths()
elif isinstance(paths, list) and path4id is None:
self.len = len(self.paths)
elif path4id and paths is None:
self.len = size
else:
raise Exception("Cannot use both path4id and paths at the same time, one or none of them needed")
def __len__(self):
return self.len
def __getitem__(self, index):
"""
Get the entry with the given index. If the dataset is dynamic, this should be the id/key
of the entry instead.
:param index:
:return:
"""
fpath = self.path4(index)
if not os.path.exists(fpath):
return None
# if we have a custom format reader, use it
if self.reader:
return self.reader(fpath)
if self.as_format == "json":
with open(fpath, "rt", encoding="utf8") as reader:
return json.load(reader)
elif self.as_format == "pickle":
with open(fpath, "rb") as reader:
return pickle.load(reader)
elif self.as_format == "torch":
import torch
with open(fpath, "rb") as reader:
return torch.load(reader, map_location="cpu")
def __setitem__(self, index, value):
fpath = self.path4(index)
parent = pathlib.Path(fpath).parent
if not parent.exists():
parent.mkdir(parents=True, exist_ok=True)
# if we have a custom format writer, use it
if self.writer:
self.writer(value, fpath)
elif self.as_format == "json":
with open(fpath, "wt", encoding="utf8") as writer:
json.dump(value, writer)
elif self.as_format == "pickle":
with open(fpath, "wb") as writer:
pickle.dump(value, writer)
elif self.as_format == "torch":
import torch
with open(fpath, "wb") as writer:
torch.save(value, writer)
class CachedDataset(ExtendedDataset):
def __init__(self, basedataset, cachedataset, cacheonread=False):
"""
Create a caching dataset. This will access data from the cachedataset, if it does not exist in there (entry is,
None) will instead fall back to the base dataset. In other words, a cache dataset must be set up as a
direct access dataset that is capable of returning None for non-existing items.
The cache can be set up to cache on read or cache on write.
NOTE: both datasets should maybe have the same size already but this is not checked so that a dynamic
DirFilesDataset instance can be used as well!
:param basedataset: any dataset
:param cachedataset: any ExtendedDataset which allows for empty slots to be represented as None
:param cacheonread: if True, writes to the cache as soon as an item has been read from the base dataset.
Otherwise will only write to the cache dataset when an item is set. This allows to cache the result
of processing efficiently.
"""
super().__init__()
self.is_writable = True
self.basedataset = basedataset
self.cachedataset = cachedataset
self.cacheonread = cacheonread
def __len__(self):
return len(self.basedataset)
def __getitem__(self, index):
tmp = self.cachedataset[index]
if tmp is None:
tmp = self.basedataset
if self.cacheonread:
self[index] = tmp
return tmp
def __setitem__(self, index, value):
self.cachedataset[index] = value
if __name__ == "__main__":
# just run a quick sanity test
with open("tmp_linetsvdataset.tsv", "wt", encoding="utf8") as writer:
print("this is the first line!", file=writer)
print("Some umlauts like ä or Ü or ś and Ñ and ì...", file=writer)
print("this is another line", file=writer)
print("and another", file=writer)
print("Last one!!!", file=writer)
ds = LineTsvDataset(file="tmp_linetsvdataset.tsv", reinit=True)
for i, line in enumerate(ds):
print("LineTsvDataset line {}:".format(i), line)
print("Last line: ", ds[-1])
print("First line: ", ds[-5])
from torch.utils.data import DataLoader
def cfn1(l):
print("We got:",l)
return l
dl = DataLoader(ds, batch_size=2, shuffle=True, collate_fn=cfn1)
for batch in dl:
print("Batch: ", batch)
ds2tmp = LineTsvDataset(file="tmp_linetsvdataset.tsv", reinit=False)
ds2 = TransformDataset(ds2tmp, len)
dl2 = DataLoader(ds2, batch_size=2, shuffle=True)
for batch in dl2:
print("Batch2: ", batch)
| 41.739362 | 119 | 0.616244 |
f0cdb65b37cae1c5c7dd55bf8f2dcde6df5a6929 | 4,764 | py | Python | exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/encoder/v2/protobuf/__init__.py | dmolenda-sumo/opentelemetry-python | f92431e90d258ad6f0f4f496b2e9b778bcb1f627 | [
"Apache-2.0"
] | 1 | 2021-02-26T02:37:54.000Z | 2021-02-26T02:37:54.000Z | exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/encoder/v2/protobuf/__init__.py | dmolenda-sumo/opentelemetry-python | f92431e90d258ad6f0f4f496b2e9b778bcb1f627 | [
"Apache-2.0"
] | null | null | null | exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/encoder/v2/protobuf/__init__.py | dmolenda-sumo/opentelemetry-python | f92431e90d258ad6f0f4f496b2e9b778bcb1f627 | [
"Apache-2.0"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zipkin Export Encoder for Protobuf
API spec: https://github.com/openzipkin/zipkin-api/blob/master/zipkin.proto
"""
from typing import List, Optional, Sequence
from opentelemetry.exporter.zipkin.encoder import Encoder
from opentelemetry.exporter.zipkin.encoder.v2.protobuf.gen import zipkin_pb2
from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint
from opentelemetry.sdk.trace import Event
from opentelemetry.trace import Span, SpanContext, SpanKind
class ProtobufEncoder(Encoder):
"""Zipkin Export Encoder for Protobuf
API spec: https://github.com/openzipkin/zipkin-api/blob/master/zipkin.proto
"""
SPAN_KIND_MAP = {
SpanKind.INTERNAL: zipkin_pb2.Span.Kind.SPAN_KIND_UNSPECIFIED,
SpanKind.SERVER: zipkin_pb2.Span.Kind.SERVER,
SpanKind.CLIENT: zipkin_pb2.Span.Kind.CLIENT,
SpanKind.PRODUCER: zipkin_pb2.Span.Kind.PRODUCER,
SpanKind.CONSUMER: zipkin_pb2.Span.Kind.CONSUMER,
}
@staticmethod
def content_type():
return "application/x-protobuf"
def serialize(
self, spans: Sequence[Span], local_endpoint: NodeEndpoint
) -> str:
encoded_local_endpoint = self._encode_local_endpoint(local_endpoint)
# pylint: disable=no-member
encoded_spans = zipkin_pb2.ListOfSpans()
for span in spans:
encoded_spans.spans.append(
self._encode_span(span, encoded_local_endpoint)
)
return encoded_spans.SerializeToString()
def _encode_span(
self, span: Span, encoded_local_endpoint: zipkin_pb2.Endpoint
) -> zipkin_pb2.Span:
context = span.get_span_context()
# pylint: disable=no-member
encoded_span = zipkin_pb2.Span(
trace_id=self._encode_trace_id(context.trace_id),
id=self._encode_span_id(context.span_id),
name=span.name,
timestamp=self._nsec_to_usec_round(span.start_time),
duration=self._nsec_to_usec_round(span.end_time - span.start_time),
local_endpoint=encoded_local_endpoint,
kind=self.SPAN_KIND_MAP[span.kind],
)
tags = self._extract_tags_from_span(span)
if tags:
encoded_span.tags.update(tags)
annotations = self._encode_annotations(span.events)
if annotations:
encoded_span.annotations.extend(annotations)
debug = self._encode_debug(context)
if debug:
encoded_span.debug = debug
parent_id = self._get_parent_id(span.parent)
if parent_id is not None:
encoded_span.parent_id = self._encode_span_id(parent_id)
return encoded_span
def _encode_annotations(
self, span_events: Optional[List[Event]]
) -> Optional[List]:
annotations = self._extract_annotations_from_events(span_events)
if annotations is None:
encoded_annotations = None
else:
encoded_annotations = []
for annotation in annotations:
encoded_annotations.append(
zipkin_pb2.Annotation(
timestamp=annotation["timestamp"],
value=annotation["value"],
)
)
return encoded_annotations
@staticmethod
def _encode_local_endpoint(
local_endpoint: NodeEndpoint,
) -> zipkin_pb2.Endpoint:
encoded_local_endpoint = zipkin_pb2.Endpoint(
service_name=local_endpoint.service_name,
)
if local_endpoint.ipv4 is not None:
encoded_local_endpoint.ipv4 = local_endpoint.ipv4.packed
if local_endpoint.ipv6 is not None:
encoded_local_endpoint.ipv6 = local_endpoint.ipv6.packed
if local_endpoint.port is not None:
encoded_local_endpoint.port = local_endpoint.port
return encoded_local_endpoint
@staticmethod
def _encode_span_id(span_id: int) -> bytes:
return span_id.to_bytes(length=8, byteorder="big", signed=False)
@staticmethod
def _encode_trace_id(trace_id: int) -> bytes:
return trace_id.to_bytes(length=16, byteorder="big", signed=False)
| 36.646154 | 79 | 0.680521 |
8586d9288fff4ba1b595adf12403a797941e90a2 | 220 | py | Python | lab_4.py | eyal21-meet/meet2019y1lab4 | 68d609325ab138d79034aa5ced0275b3fda13b70 | [
"MIT"
] | null | null | null | lab_4.py | eyal21-meet/meet2019y1lab4 | 68d609325ab138d79034aa5ced0275b3fda13b70 | [
"MIT"
] | null | null | null | lab_4.py | eyal21-meet/meet2019y1lab4 | 68d609325ab138d79034aa5ced0275b3fda13b70 | [
"MIT"
] | null | null | null | fruit = input("choose a fruit: ")
if fruit == "apples":
print("bin 1")
elif fruit == "oranges":
print("bin 2")
elif fruit == "bananas":
print("bin 3")
else:
print("Error! I do not recognize this fruit!")
| 22 | 50 | 0.6 |
be4dde1e15942f9051340e750bcea4f4c9d42132 | 2,260 | py | Python | barcode/itf.py | Azd325/python-barcode | b41b1d5d479fb0ad3290a0a6235a8d3203d34ee9 | [
"MIT"
] | null | null | null | barcode/itf.py | Azd325/python-barcode | b41b1d5d479fb0ad3290a0a6235a8d3203d34ee9 | [
"MIT"
] | null | null | null | barcode/itf.py | Azd325/python-barcode | b41b1d5d479fb0ad3290a0a6235a8d3203d34ee9 | [
"MIT"
] | null | null | null | """Module: barcode.itf
:Provided barcodes: Interleaved 2 of 5
"""
__docformat__ = 'restructuredtext en'
from barcode.base import Barcode
from barcode.charsets import itf
from barcode.errors import (
IllegalCharacterError,
)
MIN_SIZE = 0.2
MIN_QUIET_ZONE = 6.4
class ITF(Barcode):
"""Initializes a new ITF instance.
:parameters:
code : String
ITF (Interleaved 2 of 5) numeric string
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
narrow: Integer
Width of the narrow elements (default: 2)
wide: Integer
Width of the wide elements (default: 5)
wide/narrow must be in the range 2..3
"""
name = 'ITF'
def __init__(self, code, writer=None, narrow=2, wide=5):
if not code.isdigit():
raise IllegalCharacterError('ITF code can only contain numbers.')
# Length must be even, prepend 0 if necessary
if len(code) % 2 != 0:
code = '0' + code
self.code = code
self.writer = writer or Barcode.default_writer()
self.narrow = narrow
self.wide = wide
def __unicode__(self):
return self.code
__str__ = __unicode__
def get_fullcode(self):
return self.code
def build(self):
data = itf.START
for i in range(0, len(self.code), 2):
bars_digit = int(self.code[i])
spaces_digit = int(self.code[i + 1])
for j in range(5):
data += itf.CODES[bars_digit][j].upper()
data += itf.CODES[spaces_digit][j].lower()
data += itf.STOP
raw = ''
for e in data:
if e == 'W':
raw += '1' * self.wide
if e == 'w':
raw += '0' * self.wide
if e == 'N':
raw += '1' * self.narrow
if e == 'n':
raw += '0' * self.narrow
return [raw]
def render(self, writer_options, text=None):
options = {
'module_width': MIN_SIZE / self.narrow,
'quiet_zone': MIN_QUIET_ZONE,
}
options.update(writer_options or {})
return Barcode.render(self, options, text)
| 27.901235 | 77 | 0.548673 |
766baed969cb779a65a871f013ec1be92ec34f70 | 942 | py | Python | example/home/test/test_subscriptions.py | kbayliss/wagtail-grapple | 1e322780cdaaa658bd30b1338599fb44c8806e74 | [
"BSD-3-Clause"
] | 59 | 2019-07-11T15:24:17.000Z | 2020-07-13T01:17:28.000Z | example/home/test/test_subscriptions.py | kbayliss/wagtail-grapple | 1e322780cdaaa658bd30b1338599fb44c8806e74 | [
"BSD-3-Clause"
] | 54 | 2019-07-09T16:29:55.000Z | 2020-07-05T23:13:36.000Z | example/home/test/test_subscriptions.py | kbayliss/wagtail-grapple | 1e322780cdaaa658bd30b1338599fb44c8806e74 | [
"BSD-3-Clause"
] | 23 | 2019-07-23T19:07:49.000Z | 2020-06-26T22:56:14.000Z | from example.tests.test_grapple import BaseGrappleTest
from grapple.settings import has_channels
if has_channels:
class TestRegisterSubscription(BaseGrappleTest):
def test_subscription(self):
query = """
{
__schema {
subscriptionType {
fields {
name
}
}
}
}
"""
results = self.client.execute(query)
subscriptions = results["data"]["__schema"]["subscriptionType"]["fields"]
# We check here that the subscription defined in example/home/subscriptions.py and
# added in example/home/wagtail_hooks.py is indeed added to the graphene schema.
# Note: it is hard to test subscriptions, but there is some place for improvement here.
self.assertIn("hello", [item["name"] for item in subscriptions])
| 34.888889 | 99 | 0.571125 |
56849d5529a94e8c2a21dc2d52bdc8e0c883f7d9 | 853 | py | Python | passport/rwts/d5timing.py | jepler/passport.py | 88c3224f7299320c129d9a63253aaf195c4c48db | [
"MIT"
] | 8 | 2018-07-18T20:06:03.000Z | 2020-11-13T04:53:34.000Z | passport/rwts/d5timing.py | jepler/passport.py | 88c3224f7299320c129d9a63253aaf195c4c48db | [
"MIT"
] | 4 | 2021-04-07T09:26:51.000Z | 2022-03-23T19:00:12.000Z | passport/rwts/d5timing.py | jepler/passport.py | 88c3224f7299320c129d9a63253aaf195c4c48db | [
"MIT"
] | 1 | 2022-03-23T13:59:50.000Z | 2022-03-23T13:59:50.000Z | from passport.rwts.dos33 import DOS33RWTS
class D5TimingBitRWTS(DOS33RWTS):
def reset(self, logical_sectors):
DOS33RWTS.reset(self, logical_sectors)
self.data_prologue = (logical_sectors[2][0xE7],
0xAA,
logical_sectors[2][0xFC])
self.data_epilogue = (logical_sectors[3][0x35],
0xAA)
def find_address_prologue(self, track):
starting_revolutions = track.revolutions
while (track.revolutions < starting_revolutions + 2):
if next(track.nibble()) == 0xD5:
bit = next(track.bit())
if bit == 0: return True
track.rewind(1)
return False
def verify_address_epilogue_at_point(self, track, logical_track_num, physical_sector_num):
return True
| 37.086957 | 94 | 0.589683 |
c62960791ab83d9289c6625322e918002a0c619c | 5,326 | py | Python | openaerostruct/structures/vonmises_wingbox.py | fkopsaf/OpenAeroStruct | 414bd76a7f14f1bd52d6dacc6694382d52e5fabc | [
"Apache-2.0"
] | null | null | null | openaerostruct/structures/vonmises_wingbox.py | fkopsaf/OpenAeroStruct | 414bd76a7f14f1bd52d6dacc6694382d52e5fabc | [
"Apache-2.0"
] | null | null | null | openaerostruct/structures/vonmises_wingbox.py | fkopsaf/OpenAeroStruct | 414bd76a7f14f1bd52d6dacc6694382d52e5fabc | [
"Apache-2.0"
] | 1 | 2018-09-24T04:58:37.000Z | 2018-09-24T04:58:37.000Z | from __future__ import print_function, division
import numpy as np
from openmdao.api import ExplicitComponent
from openaerostruct.structures.utils import norm, unit
class VonMisesWingbox(ExplicitComponent):
""" Compute the von Mises stress in each element.
Parameters
----------
nodes[ny, 3] : numpy array
Flattened array with coordinates for each FEM node.
radius[ny-1] : numpy array
Radii for each FEM element.
disp[ny, 6] : numpy array
Displacements of each FEM node.
Returns
-------
vonmises[ny-1, 2] : numpy array
von Mises stress magnitudes for each FEM element.
"""
def initialize(self):
self.options.declare('surface', types=dict)
def setup(self):
self.surface = surface = self.options['surface']
self.ny = surface['mesh'].shape[1]
self.add_input('nodes', val=np.zeros((self.ny, 3)), units='m')
self.add_input('disp', val=np.zeros((self.ny, 6)), units='m')
self.add_input('Qz', val=np.zeros((self.ny - 1)), units='m**3')
self.add_input('Iz', val=np.zeros((self.ny - 1)), units='m**4')
self.add_input('J', val=np.zeros((self.ny - 1)), units='m**4')
self.add_input('A_enc', val=np.zeros((self.ny - 1)), units='m**2')
self.add_input('spar_thickness', val=np.zeros((self.ny - 1)), units='m')
self.add_input('skin_thickness', val=np.zeros((self.ny - 1)), units='m')
self.add_input('htop', val=np.zeros((self.ny - 1)), units='m')
self.add_input('hbottom', val=np.zeros((self.ny - 1)), units='m')
self.add_input('hfront', val=np.zeros((self.ny - 1)), units='m')
self.add_input('hrear', val=np.zeros((self.ny - 1)), units='m')
self.add_output('vonmises', val=np.zeros((self.ny-1, 4)),units='N/m**2')
self.E = surface['E']
self.G = surface['G']
self.tssf = top_skin_strength_factor = surface['strength_factor_for_upper_skin']
self.declare_partials('*', '*', method='cs')
def compute(self, inputs, outputs):
disp = inputs['disp']
nodes = inputs['nodes']
A_enc = inputs['A_enc']
Qy = inputs['Qz']
Iz = inputs['Iz']
J = inputs['J']
htop = inputs['htop']
hbottom = inputs['hbottom']
hfront = inputs['hfront']
hrear = inputs['hrear']
spar_thickness = inputs['spar_thickness']
skin_thickness = inputs['skin_thickness']
vonmises = outputs['vonmises']
# Only use complex type for these arrays if we're using cs to check derivs
dtype = type(disp[0, 0])
T = np.zeros((3, 3), dtype=dtype)
x_gl = np.array([1, 0, 0], dtype=dtype)
E = self.E
G = self.G
num_elems = self.ny - 1
for ielem in range(num_elems):
P0 = nodes[ielem, :]
P1 = nodes[ielem+1, :]
L = norm(P1 - P0)
x_loc = unit(P1 - P0)
y_loc = unit(np.cross(x_loc, x_gl))
z_loc = unit(np.cross(x_loc, y_loc))
T[0, :] = x_loc
T[1, :] = y_loc
T[2, :] = z_loc
u0x, u0y, u0z = T.dot(disp[ielem, :3])
r0x, r0y, r0z = T.dot(disp[ielem, 3:])
u1x, u1y, u1z = T.dot(disp[ielem+1, :3])
r1x, r1y, r1z = T.dot(disp[ielem+1, 3:])
axial_stress = E * (u1x - u0x) / L # this is stress = modulus * strain; positive is tensile
torsion_stress = G * J[ielem] / L * (r1x - r0x) / 2 / spar_thickness[ielem] / A_enc[ielem] # this is Torque / (2 * thickness_min * Area_enclosed)
top_bending_stress = E / (L**2) * (6 * u0y + 2 * r0z * L - 6 * u1y + 4 * r1z * L ) * htop[ielem] # this is moment * htop / I
bottom_bending_stress = - E / (L**2) * (6 * u0y + 2 * r0z * L - 6 * u1y + 4 * r1z * L ) * hbottom[ielem] # this is moment * htop / I
front_bending_stress = - E / (L**2) * (-6 * u0z + 2 * r0y * L + 6 * u1z + 4 * r1y * L ) * hfront[ielem] # this is moment * htop / I
rear_bending_stress = E / (L**2) * (-6 * u0z + 2 * r0y * L + 6 * u1z + 4 * r1y * L ) * hrear[ielem] # this is moment * htop / I
vertical_shear = E / (L**3) *(-12 * u0y - 6 * r0z * L + 12 * u1y - 6 * r1z * L ) * Qy[ielem] / (2 * spar_thickness[ielem]) # shear due to bending (VQ/It) note: the I used to get V cancels the other I
# print("==========",ielem,"================")
# print("vertical_shear", vertical_shear)
# print("top",top_bending_stress)
# print("bottom",bottom_bending_stress)
# print("front",front_bending_stress)
# print("rear",rear_bending_stress)
# print("axial", axial_stress)
# print("torsion", torsion_stress)
vonmises[ielem, 0] = np.sqrt((top_bending_stress + rear_bending_stress + axial_stress)**2 + 3*torsion_stress**2) / self.tssf
vonmises[ielem, 1] = np.sqrt((bottom_bending_stress + front_bending_stress + axial_stress)**2 + 3*torsion_stress**2)
vonmises[ielem, 2] = np.sqrt((front_bending_stress + axial_stress)**2 + 3*(torsion_stress-vertical_shear)**2)
vonmises[ielem, 3] = np.sqrt((rear_bending_stress + axial_stress)**2 + 3*(torsion_stress+vertical_shear)**2) / self.tssf
| 41.937008 | 212 | 0.562336 |
89e00288263365f51922344ba26f7a4510e3e429 | 1,000 | py | Python | mysettings.py | gentrio/kotlin-web-site-cn | 61cdd680160948207dc1f54eda966f37d8f54481 | [
"Apache-2.0"
] | 8 | 2020-10-27T23:14:10.000Z | 2021-05-21T10:01:49.000Z | mysettings.py | angeloko23/kotlin-web-site | 73d8e2f3c78eb0f3962d76306ea49dd14d78454f | [
"Apache-2.0"
] | 13 | 2020-09-08T14:04:14.000Z | 2022-03-08T23:29:41.000Z | mysettings.py | angeloko23/kotlin-web-site | 73d8e2f3c78eb0f3962d76306ea49dd14d78454f | [
"Apache-2.0"
] | 1 | 2020-09-20T16:59:51.000Z | 2020-09-20T16:59:51.000Z | from src.markdown.makrdown import jinja_aware_markdown
CACHE_TYPE = "null"
PREFERRED_URL_SCHEME = 'http'
SERVER_NAME = 'localhost:5000'
FLATPAGES_EXTENSION = '.md'
FLATPAGES_HTML_RENDERER = jinja_aware_markdown
FREEZER_IGNORE_404_NOT_FOUND = True
FLATPAGES_AUTO_RELOAD = True
FREEZER_STATIC_IGNORE = ["*"]
ERROR_404_HELP = False
GITHUB_URL = 'https://github.com/JetBrains/kotlin'
TWITTER_URL = 'https://twitter.com/kotlin'
EDIT_ON_GITHUB_URL = 'https://github.com/JetBrains/kotlin-web-site/edit/master/'
PDF_URL = '/docs/kotlin-docs.pdf'
FORUM_URL = 'http://devnet.jetbrains.com/community/kotlin'
SITE_GITHUB_URL = 'http://github.com/JetBrains/kotlin-web-site'
CODE_URL = 'https://github.com/JetBrains/kotlin-examples/tree/master'
TEXT_USING_GRADLE = "In this tutorial, we're going to be using Gradle but the same can be accomplished using either IntelliJ IDEA project structure or Maven. For details on setting up Gradle to work with Kotlin, see [Using Gradle](/docs/reference/using-gradle.html)." | 50 | 267 | 0.795 |
4b260d90887db9f32400e48b7cb524813e0f2cfc | 4,665 | py | Python | tests/utils/test_qos.py | ovh/depc | ce428b6ba790ee4a2e7150b4cb68fdcbfdfae2f4 | [
"BSD-3-Clause"
] | 77 | 2019-01-30T10:12:36.000Z | 2021-10-19T16:25:53.000Z | tests/utils/test_qos.py | ovh/depc | ce428b6ba790ee4a2e7150b4cb68fdcbfdfae2f4 | [
"BSD-3-Clause"
] | 13 | 2019-02-20T16:57:57.000Z | 2022-03-01T23:10:26.000Z | tests/utils/test_qos.py | ovh/depc | ce428b6ba790ee4a2e7150b4cb68fdcbfdfae2f4 | [
"BSD-3-Clause"
] | 10 | 2019-01-30T13:30:39.000Z | 2021-08-02T05:55:18.000Z | import pandas as pd
from deepdiff import DeepDiff
from depc.utils.qos import OperationTypes
from depc.utils.qos import _compute_qos
DEFAULT_ARGS = {"agg_op": OperationTypes.AND, "auto_fill": True, "float_decimal": 3}
def test_compute_qos_empty():
expected = {
"qos": None,
"bools_dps": {},
"periods": {"ok": 0, "ko": 0},
}
actual = _compute_qos([], start=0, end=1, **DEFAULT_ARGS)
assert DeepDiff(expected, actual, ignore_order=True) == {}
def test_compute_qos_one_good_datapoint():
data = pd.Series({1595980800: True})
expected = {
"bools_dps": {1595980800: True, 1596067199: True},
"periods": {"ko": 0, "ok": 86399},
"qos": 100.0,
}
actual = _compute_qos([data], start=1595980800, end=1596067199, **DEFAULT_ARGS)
assert DeepDiff(expected, actual, ignore_order=True) == {}
def test_compute_qos_one_bad_datapoint():
data = pd.Series({1595980800: False})
expected = {
"bools_dps": {1595980800: False, 1596067199: False},
"periods": {"ko": 86399, "ok": 0},
"qos": 0.0,
}
actual = _compute_qos([data], start=1595980800, end=1596067199, **DEFAULT_ARGS)
assert DeepDiff(expected, actual, ignore_order=True) == {}
def test_compute_qos_one_good_datapoint_no_autofill():
data = pd.Series({1595980800: True})
expected = {
"bools_dps": {1595980800: True},
"periods": {"ko": 0, "ok": 1},
"qos": 100.0,
}
actual = _compute_qos(
[data],
start=1595980800,
end=1596067199,
agg_op=OperationTypes.AND,
auto_fill=False,
float_decimal=3,
)
assert DeepDiff(expected, actual, ignore_order=True) == {}
def test_compute_qos_one_bad_datapoint_no_autofill():
data = pd.Series({1595980800: False})
expected = {
"bools_dps": {1595980800: False},
"periods": {"ko": 1, "ok": 0},
"qos": 0.0,
}
actual = _compute_qos(
[data],
start=1595980800,
end=1596067199,
agg_op=OperationTypes.AND,
auto_fill=False,
float_decimal=3,
)
assert DeepDiff(expected, actual, ignore_order=True) == {}
def test_compute_qos_good_values():
data = pd.Series({1595980800: True, 1595994060: True})
expected = {
"bools_dps": {1595980800: True, 1596067199: True},
"periods": {"ko": 0, "ok": 86399},
"qos": 100.0,
}
actual = _compute_qos([data], start=1595980800, end=1596067199, **DEFAULT_ARGS)
assert DeepDiff(expected, actual, ignore_order=True) == {}
def test_compute_qos_one_minute_downtime():
data = pd.Series({1595980800: True, 1595994000: False, 1595994060: True})
expected = {
"bools_dps": {
1595980800: True,
1595994000: False,
1595994060: True,
1596067199: True,
},
"periods": {"ko": 60, "ok": 86339},
"qos": 99.931,
}
actual = _compute_qos([data], start=1595980800, end=1596067199, **DEFAULT_ARGS)
assert DeepDiff(expected, actual, ignore_order=True) == {}
def test_compute_qos_two_series():
s1 = pd.Series({1595980800: True, 1595994000: False, 1595994060: True})
s2 = pd.Series({1595980800: True, 1595994001: True, 1595994060: True})
expected = {
"bools_dps": {
1595980800: True,
1595994000: False,
1595994060: True,
1596067199: True,
},
"periods": {"ko": 60, "ok": 86339},
"qos": 99.931,
}
actual = _compute_qos([s1, s2], start=1595980800, end=1596067199, **DEFAULT_ARGS)
assert DeepDiff(expected, actual, ignore_order=True) == {}
def test_compute_qos_two_series_bad_values():
s1 = pd.Series({1595980800: True, 1595994000: False, 1595994060: True})
s2 = pd.Series({1595980800: False, 1595994000: False, 1595994060: False})
expected = {
"bools_dps": {1595980800: False, 1596067199: False},
"periods": {"ko": 86399, "ok": 0},
"qos": 0.0,
}
actual = _compute_qos([s1, s2], start=1595980800, end=1596067199, **DEFAULT_ARGS)
assert DeepDiff(expected, actual, ignore_order=True) == {}
def test_compute_qos_two_series_good_values():
s1 = pd.Series({1595980800: True, 1595994000: True, 1595994060: True})
s2 = pd.Series({1595980800: True, 1595994000: True, 1595994060: True})
expected = {
"bools_dps": {1595980800: True, 1596067199: True},
"periods": {"ko": 0, "ok": 86399},
"qos": 100.0,
}
actual = _compute_qos([s1, s2], start=1595980800, end=1596067199, **DEFAULT_ARGS)
assert DeepDiff(expected, actual, ignore_order=True) == {}
| 32.395833 | 85 | 0.615863 |
b5b8e0602ea278d799853c3864dda1b72766c073 | 1,274 | py | Python | laws/tests/test_bill_stream.py | MeirKriheli/Open-Knesset | 0e1de0b0c47ae3fb341bd3afc6030d532fff6945 | [
"BSD-3-Clause"
] | 69 | 2015-02-03T12:02:56.000Z | 2022-02-16T13:08:01.000Z | laws/tests/test_bill_stream.py | OriHoch/Open-Knesset | 538bcdc2632d8d17a8ddddbc4567106684b9996b | [
"BSD-3-Clause"
] | 446 | 2015-01-01T11:10:33.000Z | 2021-11-01T08:15:39.000Z | laws/tests/test_bill_stream.py | OriHoch/Open-Knesset | 538bcdc2632d8d17a8ddddbc4567106684b9996b | [
"BSD-3-Clause"
] | 67 | 2015-01-01T09:13:58.000Z | 2021-11-01T07:51:08.000Z | # encoding: utf-8
#
from datetime import datetime
from actstream import Action
from django.test import TestCase
from laws.models import Vote, Bill, KnessetProposal
just_id = lambda x: x.id
APP = 'laws'
class BillStreamTest(TestCase):
def setUp(self):
super(BillStreamTest, self).setUp()
self.vote_1 = Vote.objects.create(time=datetime(2010, 12, 18),
title='vote 1')
self.vote_2 = Vote.objects.create(time=datetime(2011, 4, 4),
title='vote 2')
self.bill = Bill.objects.create(stage='1', title='bill 1', popular_name="The Bill")
self.bill.pre_votes.add(self.vote_1)
self.bill.first_vote = self.vote_2
self.kp_1 = KnessetProposal.objects.create(booklet_number=2, bill=self.bill, date=datetime(2005, 1, 22))
def teardown(self):
super(BillStreamTest, self).tearDown()
def testGenerate(self):
self.bill.generate_activity_stream()
s = Action.objects.stream_for_actor(self.bill)
self.assertEqual(s.count(), 3)
def tearDown(self):
self.bill.pre_votes.all().delete()
self.vote_1.delete()
self.vote_2.delete()
self.kp_1.delete()
self.bill.delete()
| 31.073171 | 112 | 0.624019 |
fd20c12a6ad1a213c90cfae6c67902fb4d3a50f6 | 20,965 | py | Python | functionApproximation/cpd/myALSOptimiser.py | FlorianThaler/masterThesis_codes | 1f1dd1d929bd9b752105f6cfa2a3b6d9c575a8d3 | [
"MIT"
] | null | null | null | functionApproximation/cpd/myALSOptimiser.py | FlorianThaler/masterThesis_codes | 1f1dd1d929bd9b752105f6cfa2a3b6d9c575a8d3 | [
"MIT"
] | null | null | null | functionApproximation/cpd/myALSOptimiser.py | FlorianThaler/masterThesis_codes | 1f1dd1d929bd9b752105f6cfa2a3b6d9c575a8d3 | [
"MIT"
] | null | null | null | """
Author: Florian Thaler
Email: florian.thaler@edu.uni-graz.at
Description: Part of the code package corresponding to my master thesis. This file implements the optimiser
used for the update process in the context of function approximation.
Year: 2019
"""
###########################################################
# importing stuff
###########################################################
import numpy as np
import logging
import time
###########################################################
# definition of functions used in the update process
# (as non class or memeber functions)
###########################################################
def myMatrixFunc(A, rank, degr, L, xiVecs, psiVecs, eta):
"""
this function implements the evaluation of the linear operator showing up in the optimality condition of first
order.
@param[in] ### A ### a matrix of # rank # rows and # degr # columns, at which the operator will be evaluated
@param[in] ### rank ### number of rows of A
@param[in] ### degr ### number of columns of A
@param[in] ### L ### number of data points used in the optimisation process - RECALL: the linear operator which
has to be evaluated here is a sum of L terms.
@param[in] ### xiVecs ### auxiliary quantity of L rows and # degr # columns
@param[in] ### psiVecs ### auxiliary quantity of L rows and # rank # columns
@param[in] ### eta ### penalisation parameter
@return ### retVal ### matrix of the same shape as A, corresponding to the evaluation of the linear operator
appearing in the first order necessity condition
"""
retVal = np.zeros((rank, degr))
for l in range(0, L):
tmp = psiVecs[l, :].reshape(-1, 1).dot(xiVecs[l, :].reshape(1, -1))
retVal += np.sum(A * tmp) * tmp
retVal /= L
# adding penalty term
retVal += eta * A
return retVal
def myRhsFunc(rank, degr, L, xiVecs, psiVecs, yData):
"""
this function implements the evaluation of the rhs of the linear system corresponding to necessary optimality
condition of first order.
@param[in] ### rank ### number of rows of return value
@param[in] ### degr ### number of columns of return value
@param[in] ### L ### number of data points used in the optimisation process - RECALL: the term which
has to be computed here is a sum of L terms.
@param[in] ### xiVecs ### auxiliary quantity of L rows and # degr # columns
@param[in] ### psiVecs ### auxiliary quantity of L rows and # rank # columns
@param[in] ### yData ###
@return ### retVal ### matrix of # rank # rows, # degr # columns corresponding to the mentioned rhs
"""
retVal = np.zeros((rank, degr))
for l in range(0, L):
retVal += yData[l] * psiVecs[l, :].reshape(-1, 1).dot(xiVecs[l, :].reshape(1, -1))
retVal /= L
return retVal
def costFunctional(xList, yData, L, modelFunc, eta):
"""
this function implements the evaluation of the cost functional.
@param[in] ### xList ### list of data points in the approximation domain where functional should be evaluated
it consists of a list of numpy arrays, where each of this arrays contains the respective coordinates
@param[in] ### yList ### numpy array of exact function values.
@param[in] ### L ### number of data points
@param[in] ### modelFunc ### instance of model function which is used to approximate the function corresponding
to the function values yData
@param[in] ### eta ### penalisation parameter
@return ### retVal ### scalar corresponding to the function value of the cost
"""
retVal = 0
retVal += np.linalg.norm(modelFunc.evaluate(xList, L) - yData, 2) ** 2
retVal /= L
# add penalty term
for d in range(0, modelFunc.getDim()):
retVal += eta * np.linalg.norm(modelFunc.getCoeffs(d), 'fro') ** 2
retVal *= 0.5
return retVal
def approximationError(xList, yData, L, modelFunc, norm = 'l2'):
"""
this function computes the approximation error, i.e. the error between approximated function
values and real function values.
@param[in] ### xList ### as usual: a list of numpy arrays corresponding to the coordinates of the
data points
@param[in] ### yData ### numpy array representing the real function values
@param[in] ### modelFunc ### instance of class MyCPDRadialAnsatzFunc
@param[in] ### norm ### string indicating which norm should be computed. by default it is the l2 norm;
one can choose the l^{\infty} norm as well
@return ### retVal ### scalar corresponding to the approximation error
"""
retVal = -1
if norm == 'l2':
retVal = np.linalg.norm(modelFunc.evaluate(xList, L) - yData, 2)
elif norm == 'lInf':
retVal = max(np.abs(modelFunc.evaluate(xList, L) - yData))
elif norm == 'mse':
retVal = (1 / (2 * L)) * np.linalg.norm(modelFunc.evaluate(xList, L) - yData, 2) ** 2
return retVal
###########################################################
# definition of several classes
###########################################################
class MyALSOptimiser:
"""
this class is just the basic class of each of the ALS optimiser used (or tried out respectively ...)
"""
def __init__(self, eta, maxNumALSIter, epsALS):
"""
the usual initialiser method ...
@param[in] ### eta ### penalisation parameter
@param[in] ### maxNumALSIter ### maximal number of ALS iterations
@param[in] ### epsALS ### used as stopping criterion for the ALS method. if current and previous value
of cost function differ by less than epsALS, than method will be stopped.
"""
self.eta = eta
self.maxNumALSIter = maxNumALSIter
self.epsALS = epsALS
class MyALSRbfOptimiser(MyALSOptimiser):
"""
this class - a derivation of MyALSOptimiser - implements the ALS optimisation procedure in the context
of function approximation by means of functions in CPD format on the basis of Gaussian ansatz functions.
NOTE:
> the class is not applicable for non Gaussian ansatz functions ...
"""
def __init__(self, eta = 1e-4, maxNumALSIter = 200, epsALS = 1e-1):
"""
call initialiser method of super class and set default values
@param[in] ### eta ### penalisation parameter
@param[in] ### maxNumALSIter ### maximal number of ALS iterations
@param[in] ### epsALS ### (additional) stopping criterion for the ALS iteration
"""
super().__init__(eta, maxNumALSIter, epsALS)
def myRbfCgOptimiser(self, L, xData, yData, modelFunc, path2ModDir, modFileName, \
maxNumCGIter = 8, epsCG = 1e-2, resNormFrac = 1e-1, warmUp = True, verbose = True, write2File = True):
"""
this function implements the ALS update procedure by means of a CG approach.
@param[in] ### L ### number of data points
@param[in] ### xData ### data set in matrix format, where each row contains the coordinates of the
corresponding data points.
@param[in] ### yData ### real function values
@param[in] ### modelFunc ### instance of class MyCPDRadialAnsatzFunc
@param[in] ### path2ModDir ### string corresponding to the path to the directory where model data should
be stored.
@param[in] ### modFileName ### string corresponding to the basic model name - without extension
@param[in] ### maxNumCGIter ### maximal number of CG iterations which will performed per axis and per
ALS iteration to solve the linear system corresponding to the necessary optimality condition of first
order
@param[in] ### epsCG ### further stopping criterion for cg method - iteration will be stopped, when
residual is smaller than epsCG
@param[in] ### resNormFrac ### further stopping criterion for the cg method - iteration will be stopped,
when norm of the residual is smaller than resNormFrac times the initial residual
@param[in] ### warmUp ### boolean variable used to decide if some kind of warm up training will be
performed
@param[in] ### verbose ### boolean variable indicating if during training messages should be printed to
console
@param[in] ### write2File ### boolean variable indicating if logging data should be written to file.
NOTE:
> this function modifies member variables of the instance modelFunc !!!
"""
# introduce some variable representing how often data was already written to file - needed to determine
# the proper file name in the context of storing parameter to file.
writeCounter = 0
if write2File:
# write initial parameters to file
logging.info('> write model parameters to file')
modelFunc.writeParams2File(path2ModDir, modFileName + str(writeCounter))
writeCounter += 1
# gather some data which will be needed quite often ...
currDim = modelFunc.getDim() # current dimension
currDegrs = modelFunc.getDegrs() # current degree of cpd function
currRank = modelFunc.getRank() # current rank of cpd function
# introduce lists to store the value of the cost functional after each optimisation step and the norms of the
# gradients of the cost functional w.r.t. to the variable which is the current optimisation variable also after
# a full cg iteration step
costFuncValList = []
cgPerformanceList = []
lInfApprErrList = []
# l2ApprErrList = []
mseList = []
# put input data into list
# > ### IS THIS REALLY NECESSARY? ###
xList = []
for d in range(0, currDim):
xList.append(xData[d, :])
# initialise cg performance list as list of lists ...
cgPerformanceList.append([])
####################################################################################################################
# --- do some warm up optimisation ---
####################################################################################################################
if warmUp == True:
# make only a few ALS iterations, but using a very high number of (possible) CG iterations
numWarmUpALSIter = 1
maxNumCGWarmUpIter = 2 * max([currDegrs[d] * currRank for d in range(0, currDim)])
if verbose == True:
print(' + start warm up procedure')
print(' * perform ' + str(numWarmUpALSIter) + ' ALS iterations')
print(' * CG parameters:')
print(' - maximal number of iterations = ' + str(maxNumCGWarmUpIter))
print(' - residual accuracy = ' + str(epsCG))
logging.info(' + start warm up procedure')
logging.info(' * perform ' + str(numWarmUpALSIter) + ' ALS iterations')
logging.info(' * CG parameters:')
logging.info(' - maximal number of iterations = ' + str(maxNumCGWarmUpIter))
logging.info(' - residual accuracy = ' + str(epsCG))
for i in range(0, numWarmUpALSIter):
for d in range(0, currDim):
# now start cg method ...
psiVecs = np.zeros((L, currRank))
xiVecs = np.zeros((L, currDegrs[d]))
#######################################
# compute auxiliary quantities ...
phiVecTr = np.zeros((currDegrs[d], L))
psiVecTr = np.ones((currRank, L))
t0 = time.time()
for m in range(0, currDim):
if m != d:
for k in range(0, currRank):
psiVecTr[k, :] *= modelFunc.evalLinCombRadBasFunc1d(xList[m], m, k)
for nu in range(0, currDegrs[d]):
phiVecTr[nu, :] = modelFunc.evalRadBasFunc1d(xList[d], d, nu)
xiVecs = phiVecTr.transpose()
psiVecs = psiVecTr.transpose()
x = modelFunc.getCoeffs(d)
b = myRhsFunc(currRank, currDegrs[d], L, xiVecs, psiVecs, yData)
r = b - myMatrixFunc(x, currRank, currDegrs[d], L, xiVecs, psiVecs, self.eta)
resNorm = np.sum(r * r)
resNorm0 = np.sqrt(resNorm)
searchDir = r.copy()
k = 0
while k < maxNumCGWarmUpIter and np.sqrt(resNorm) > epsCG:
z = myMatrixFunc(searchDir, currRank, currDegrs[d], L, xiVecs, psiVecs, self.eta)
tmp = np.sum(r * r)
alpha = (tmp) / (np.sum(searchDir * z))
# determine new iterate
x += alpha * searchDir
# adjust residuals
rNew = (r - alpha * z).copy()
resNorm = np.sum(rNew * rNew)
beta = (resNorm) / (tmp)
r = rNew.copy()
searchDir = (r + beta * searchDir).copy()
k += 1
modelFunc.setCoeffs(d, x)
if write2File:
# write parameters to file
logging.info('> write model parameters to file')
modelFunc.writeParams2File(path2ModDir, modFileName + str(writeCounter))
writeCounter += 1
####################################################################################################################
# --- start serious optimsation here ---
####################################################################################################################
# determine the number of parameters which have to approximated
totNumParams = 0
for d in range(0, currDim):
totNumParams += currDegrs[d] * modelFunc.getRank()
if verbose == True:
print(' ----------------------------------------------------------------------')
print(' + start ALS optimisation procedure')
print(' * number of parameters to estimate in total = ' + str(totNumParams))
print(' * number of data points = ' + str(L))
print(' * maximal number of ALS iterations = ' + str(self.maxNumALSIter))
print(' * descent bound = ' + str(self.epsALS))
print(' * CG parameters:')
print(' - maximal number of iterations = ' + str(maxNumCGIter))
print(' - residual accuracy = ' + str(epsCG))
print(' - residual fraction = ' + str(resNormFrac))
print(' --------------------------------------------------------------------')
logging.info(' ----------------------------------------------------------------------')
logging.info(' + start ALS optimisation procedure')
logging.info(' * number of parameters to estimate in total = ' + str(totNumParams))
logging.info(' * number of data points = ' + str(L))
logging.info(' * maximal number of ALS iterations = ' + str(self.maxNumALSIter))
logging.info(' * descent bound = ' + str(self.epsALS))
logging.info(' * CG parameters:')
logging.info(' - maximal number of iterations = ' + str(maxNumCGIter))
logging.info(' - residual accuracy = ' + str(epsCG))
logging.info(' - residual fraction = ' + str(resNormFrac))
logging.info(' --------------------------------------------------------------------')
# introduce iteration counter
i = 0
costFuncValNew = costFunctional(xList, yData, L, modelFunc, self.eta)
costFuncValOld = costFuncValNew - 1
# ### START ALS ITERATIONS HERE ###
while (i < self.maxNumALSIter) and (np.abs(costFuncValOld - costFuncValNew) > self.epsALS):
print(' start ALS iteration number # ' + str(i + 1) + ' # ')
logging.info(' start ALS iteration number # ' + str(i + 1) + ' # ')
for d in range(0, currDim):
# now start cg method ...
#######################################
# compute auxiliary quantities ...
#######################################
psiVecs = np.zeros((L, currRank))
xiVecs = np.zeros((L, currDegrs[d]))
xiVecTr = np.zeros((currDegrs[d], L))
psiVecTr = np.ones((currRank, L))
t0 = time.time()
for m in range(0, currDim):
if m != d:
for k in range(0, currRank):
psiVecTr[k, :] *= modelFunc.evalLinCombRadBasFunc1d(xList[m], m, k)
for nu in range(0, currDegrs[d]):
xiVecTr[nu, :] = modelFunc.evalRadBasFunc1d(xList[d], d, nu)
t1 = time.time()
xiVecs = xiVecTr.transpose()
psiVecs = psiVecTr.transpose()
if verbose == True:
print(' -----------------------------------------------------------')
print(' * start cg iteration to optimise w.r.t. x' + str(d + 1) + '- coordinates')
logging.info(' -----------------------------------------------------------')
logging.info(' * start cg iteration to optimise w.r.t. x' + str(d + 1) + '- coordinates')
#######################################
# start with CG ...
#######################################
x = modelFunc.getCoeffs(d)
b = myRhsFunc(currRank, currDegrs[d], L, xiVecs, psiVecs, yData)
r = b - myMatrixFunc(x, currRank, currDegrs[d], L, xiVecs, psiVecs, self.eta)
resNorm = np.sum(r * r)
resNorm0 = np.sqrt(resNorm)
searchDir = r.copy()
k = 0
stop = False
reason = ''
while stop == False:
z = myMatrixFunc(searchDir, currRank, currDegrs[d], L, xiVecs, psiVecs, self.eta)
tmp = np.sum(r * r)
alpha = (tmp) / (np.sum(searchDir * z))
# determine new iterate
x += alpha * searchDir
# adjust residuals
rNew = (r - alpha * z).copy()
resNorm = np.sum(rNew * rNew)
beta = (resNorm) / (tmp)
r = rNew.copy()
searchDir = (r + beta * searchDir).copy()
k += 1
stop = (k >= maxNumCGIter) or np.sqrt(resNorm) <= np.max(np.asarray([epsCG, resNormFrac * resNorm0]))
reason = (k >= maxNumCGIter) * ' ### maximal number of CG iterations reached ### ' \
+ (np.sqrt(resNorm) <= np.max(np.asarray([epsCG, resNormFrac * resNorm0]))) * ' ### residual is sufficiently small ### '
t2 = time.time()
# log results
cgPerformanceList[d].append((k, np.sqrt(resNorm), reason))
if verbose == True:
print(' ---------------------------------------------------------')
print(' * cg iteration stopped since: ### ' + reason + ' ###')
print(' - iteration stopped after # ' + str(k) + ' # iterations')
print(' - residual accuracy = # ' + str(resNorm) + ' #')
print(' ---------------------------------------------------------')
print(' * it took ' + str(t2 - t0) + ' s to perform the whole cg step')
print(' * it took ' + str(t1 - t0) + ' s to compute the vectors psiVec, phiVec')
print(' * norm of the gradient: ' + str(np.sqrt(resNorm)))
logging.info(' ---------------------------------------------------------')
logging.info(' * cg iteration stopped since: ### ' + reason + ' ###')
logging.info(' - iteration stopped after # ' + str(k) + ' # iterations')
logging.info(' - residual accuracy = # ' + str(resNorm) + ' #')
logging.info(' ---------------------------------------------------------')
logging.info(' * it took ' + str(t2 - t0) + ' s to perform the whole cg step')
logging.info(' * it took ' + str(t1 - t0) + ' s to compute the vectors psiVec, phiVec')
logging.info(' * norm of the gradient: ' + str(np.sqrt(resNorm)))
modelFunc.setCoeffs(d, x)
currCostFuncVal = costFunctional(xList, yData, L, modelFunc, self.eta)
# l2ApprErr = approximationError(xList, yData, L, modelFunc, norm = 'l2')
mse = approximationError(xList, yData, L, modelFunc, norm = 'mse')
lInfApprErr = approximationError(xList, yData, L, modelFunc, norm = 'lInf')
# log results
costFuncValList.append(currCostFuncVal)
# l2ApprErrList.append(l2ApprErr)
mseList.append(mse)
lInfApprErrList.append(lInfApprErr)
if verbose == True:
print(' -----------------------------------------------------------')
print(' ALS iteration finished')
print(' * value of the cost functional = ' + str(currCostFuncVal))
# print(' * l2 approximation error = ' + str(l2ApprErr))
print(' * mse = ' + str(mse))
print(' * lInf approximation error = ' + str(lInfApprErr))
print(' --------------------------------------------------------------------')
logging.info(' -----------------------------------------------------------')
logging.info(' ALS iteration finished')
logging.info(' * value of the cost functional = ' + str(currCostFuncVal))
# logging.info(' * l2 approximation error = ' + str(l2ApprErr))
logging.info(' * mse = ' + str(mse))
logging.info(' * lInf approximation error = ' + str(lInfApprErr))
logging.info(' --------------------------------------------------------------------')
i += 1
costFuncValOld = costFuncValNew
costFuncValNew = costFunctional(xList, yData, L, modelFunc, self.eta)
if write2File:
# write parameters to file
logging.info('> write model parameters to file')
modelFunc.writeParams2File(path2ModDir, modFileName + str(writeCounter))
writeCounter += 1
# return costFuncValList, l2ApprErrList, lInfApprErrList, cgPerformanceList
return costFuncValList, mseList, lInfApprErrList, cgPerformanceList
####################################################################################################################
####################################################################################################################
####################################################################################################################
########################################################################################################################
| 38.752311 | 126 | 0.567851 |
f7ce7827627ec1926ce18a91606df13796bfbc43 | 42,760 | py | Python | sim/lib/plot.py | j-groeneveld/covid | a8d993c866dcd56bf1c5f6f0a2120eae883aa029 | [
"MIT"
] | null | null | null | sim/lib/plot.py | j-groeneveld/covid | a8d993c866dcd56bf1c5f6f0a2120eae883aa029 | [
"MIT"
] | null | null | null | sim/lib/plot.py | j-groeneveld/covid | a8d993c866dcd56bf1c5f6f0a2120eae883aa029 | [
"MIT"
] | 1 | 2021-07-28T13:52:58.000Z | 2021-07-28T13:52:58.000Z | import time
import bisect
import numpy as np
import pandas as pd
import networkx as nx
import scipy
import scipy.optimize
from scipy.interpolate import interp1d
import scipy as sp
import random as rd
import os, math
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import date2num, num2date
from lib.measures import (MeasureList, BetaMultiplierMeasureBySite,
SocialDistancingForAllMeasure, BetaMultiplierMeasureByType,
SocialDistancingForPositiveMeasure, SocialDistancingByAgeMeasure, SocialDistancingForSmartTracing, ComplianceForAllMeasure)
from lib.rt import compute_daily_rts, R_T_RANGE
import numpy as np
import seaborn as sns
from matplotlib.colors import ListedColormap
TO_HOURS = 24.0
DPI = 200
NO_PLOT = False
TEST_LAG = 48.0 # hours
matplotlib.rcParams.update({
"figure.autolayout": False,
"figure.figsize": (6, 4),
"figure.dpi": 150,
"axes.linewidth": 0.8,
"xtick.major.width": 0.8,
"xtick.minor.width": 0.8,
"ytick.major.width": 0.8,
"ytick.minor.width": 0.8,
"text.usetex": True,
"font.family": "serif", # use serif rather than sans-serif
"font.serif": "Times New Roman", # use "Times New Roman" as the standard font
"font.size": 16,
"axes.titlesize": 16,
"axes.labelsize": 16,
"legend.fontsize": 14,
"legend.frameon": True,
"xtick.labelsize": 14,
"ytick.labelsize": 14,
"lines.linewidth": 2.0,
"lines.markersize": 4,
"grid.linewidth": 0.4,
})
def days_to_datetime(arr, start_date):
# timestamps
ts = arr * 24 * 60 * 60 + pd.Timestamp(start_date).timestamp()
return pd.to_datetime(ts, unit='s')
def lockdown_widget(lockdown_at, start_date, lockdown_label_y, ymax,
lockdown_label, ax, ls='--', xshift=0.0, zorder=None):
# Convert x-axis into posix timestamps and use pandas to plot as dates
lckdn_x = days_to_datetime(lockdown_at, start_date=start_date)
ax.plot([lckdn_x, lckdn_x], [0, ymax], linewidth=2.5, linestyle=ls,
color='black', label='_nolegend_', zorder=zorder)
lockdown_label_y = lockdown_label_y or ymax*0.4
ax.text(x=lckdn_x - pd.Timedelta(2.1 + xshift, unit='d'),
y=lockdown_label_y, s=lockdown_label, rotation=90)
def target_widget(show_target,start_date, ax, zorder=None):
txx = np.linspace(0, show_target.shape[0] - 1, num=show_target.shape[0])
txx = days_to_datetime(txx, start_date=start_date)
ax.plot(txx, show_target, linewidth=4, linestyle='', marker='X', ms=6,
color='red', label='COVID-19 case data', zorder=zorder)
class Plotter(object):
"""
Plotting class
"""
def __init__(self):
# plot constants
# check out https://colorhunt.co/
self.color_expo = '#ffcc00'
self.color_iasy = '#00a8cc'
self.color_ipre = '#005082'
self.color_isym = '#000839'
self.color_testing = '#ffa41b'
self.color_posi = '#21bf73'
self.color_nega = '#fd5e53'
self.color_all = '#ffa41b'
self.color_positive = '#00a8cc'
self.color_age = '#005082'
self.color_tracing = '#000839'
self.color_infected = '#000839'
self.filling_alpha = 0.5
self.color_different_scenarios = [
'#dc2ade',
'#21ff53',
'#323edd',
'#ff9021',
'#4d089a',
'#cc0066',
'#ff6666',
'#216353',
'#66cccc',
'#ff2222'
]
self.color_different_scenarios_alt = [
'#a1dab4',
'#41b6c4',
'#2c7fb8',
'#253494',
]
# sequential
# self.color_different_scenarios = [
# # '#ffffcc',
# '#c7e9b4',
# '#7fcdbb',
# '#41b6c4',
# '#2c7fb8',
# '#253494',
# '#000000'
# ]
# 2D visualization
self.density_alpha = 0.7
self.marker_home = "^"
self.marker_site = "o"
self.color_home = '#000839'
self.color_site = '#000000'
self.size_home = 80
self.size_site = 300
def __is_state_at(self, sim, r, state, t):
if state == 'posi' or state == 'nega':
return (sim.state_started_at[state][r] - TEST_LAG <= t) & (sim.state_ended_at[state][r] - TEST_LAG > t)
else:
return (sim.state_started_at[state][r] <= t) & (sim.state_ended_at[state][r] > t)
def __state_started_before(self, sim, r, state, t):
if state == 'posi' or state == 'nega':
return (sim.state_started_at[state][r] - TEST_LAG <= t)
else:
return (sim.state_started_at[state][r] <= t)
def __is_contained_at(self, sim, r, measure, t):
contained = np.zeros(sim.n_people, dtype='bool')
for i in range(sim.n_people):
if measure == 'SocialDistancingForAllMeasure':
contained[i] = sim.measure_list[r].is_contained_prob(SocialDistancingForAllMeasure, t=t, j=i)
elif measure == 'SocialDistancingForSmartTracing':
contained[i] = sim.measure_list[r].is_contained_prob(SocialDistancingForSmartTracing, t=t, j=i)
elif measure == 'SocialDistancingByAgeMeasure':
contained[i] = sim.measure_list[r].is_contained_prob(SocialDistancingByAgeMeasure, t=t, age=sim.people_age[r, i])
elif measure == 'SocialDistancingForPositiveMeasure':
contained[i] = sim.measure_list[r].is_contained_prob(SocialDistancingForPositiveMeasure,
t=t, j=i,
state_posi_started_at=sim.state_started_at['posi'][r, :],
state_posi_ended_at=sim.state_ended_at['posi'][r, :],
state_resi_started_at=sim.state_started_at['resi'][r, :],
state_dead_started_at=sim.state_started_at['dead'][r, :])
else:
raise ValueError('Social distancing measure unknown.')
return contained
def __comp_state_cumulative(self, sim, state, acc):
'''
Computes `state` variable over time [0, self.max_time] with given accuracy `acc
'''
ts, means, stds = [], [], []
for t in np.linspace(0.0, sim.max_time, num=acc, endpoint=True):
restarts = [np.sum(self.__state_started_before(sim, r, state, t))
for r in range(sim.random_repeats)]
ts.append(t/TO_HOURS)
means.append(np.mean(restarts))
stds.append(np.std(restarts))
return np.array(ts), np.array(means), np.array(stds)
def __comp_state_over_time(self, sim, state, acc):
'''
Computes `state` variable over time [0, self.max_time] with given accuracy `acc
'''
ts, means, stds = [], [], []
for t in np.linspace(0.0, sim.max_time, num=acc, endpoint=True):
restarts = [np.sum(self.__is_state_at(sim, r, state, t))
for r in range(sim.random_repeats)]
ts.append(t/TO_HOURS)
means.append(np.mean(restarts))
stds.append(np.std(restarts))
return np.array(ts), np.array(means), np.array(stds)
def __comp_contained_over_time(self, sim, measure, acc):
'''
Computes `state` variable over time [0, self.max_time] with given accuracy `acc
'''
ts, means, stds = [], [], []
for t in np.linspace(0.0, sim.max_time, num=acc, endpoint=True):
restarts = [np.sum(self.__is_contained_at(sim, r, measure, t))
for r in range(sim.random_repeats)]
ts.append(t/TO_HOURS)
means.append(np.mean(restarts))
stds.append(np.std(restarts))
return np.array(ts), np.array(means), np.array(stds)
def plot_cumulative_infected(self, sim, title='Example', filename='daily_inf_0',
figsize=(6, 5), errorevery=20, acc=1000, ymax=None,
lockdown_label='Lockdown', lockdown_at=None,
lockdown_label_y=None, show_target=None,
start_date='1970-01-01',
subplot_adjust=None, legend_loc='upper right'):
''''
Plots daily infected split by group
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ts, iasy_mu, iasy_sig = self.__comp_state_cumulative(sim, 'iasy', acc)
# _, ipre_mu, ipre_sig = self.__comp_state_cumulative(sim, 'ipre', acc)
_, isym_mu, isym_sig = self.__comp_state_cumulative(sim, 'isym', acc)
# _, expo_mu, iexpo_sig = self.__comp_state_cumulative(sim, 'expo', acc)
# _, posi_mu, posi_sig = self.__comp_state_cumulative(sim, 'posi', acc)
line_xaxis = np.zeros(ts.shape)
line_iasy = iasy_mu
line_isym = iasy_mu + isym_mu
error_isym = np.sqrt(iasy_sig**2 + isym_sig**2)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
# lines
ax.plot(ts, line_iasy, c='black', linestyle='-')
ax.errorbar(ts, line_isym, yerr=error_isym, c='black', linestyle='-',
elinewidth=0.8, errorevery=errorevery, capsize=3.0)
# filling
ax.fill_between(ts, line_xaxis, line_iasy, alpha=self.filling_alpha, label='Asymptomatic',
edgecolor=self.color_iasy, facecolor=self.color_iasy, linewidth=0, zorder=0)
ax.fill_between(ts, line_iasy, line_isym, alpha=self.filling_alpha, label='Symptomatic',
edgecolor=self.color_isym, facecolor=self.color_isym, linewidth=0, zorder=0)
# limits
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
# extra
if lockdown_at is not None:
lockdown_widget(lockdown_at, start_date,
lockdown_label_y, ymax,
lockdown_label, ax)
if show_target is not None:
target_widget(show_target, start_date, ax)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set ticks every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
# legend
ax.legend(loc=legend_loc, borderaxespad=0.5)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_infected(self, sim, title='Example', filename='daily_inf_0',
figsize=(6, 5), errorevery=20, acc=1000, ymax=None,
lockdown_label='Lockdown', lockdown_at=None,
lockdown_label_y=None, show_target=None,
lockdown_end=None,
start_date='1970-01-01',
subplot_adjust=None, legend_loc='upper right'):
''''
Plots daily infected split by group
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ts, iasy_mu, iasy_sig = self.__comp_state_over_time(sim, 'iasy', acc)
_, ipre_mu, ipre_sig = self.__comp_state_over_time(sim, 'ipre', acc)
_, isym_mu, isym_sig = self.__comp_state_over_time(sim, 'isym', acc)
# _, expo_mu, iexpo_sig = self.__comp_state_over_time(sim, 'expo', acc)
# _, posi_mu, posi_sig = self.__comp_state_over_time(sim, 'posi', acc)
line_xaxis = np.zeros(ts.shape)
line_iasy = iasy_mu
line_ipre = iasy_mu + ipre_mu
line_isym = iasy_mu + ipre_mu + isym_mu
error_isym = np.sqrt(iasy_sig**2 + ipre_sig**2 + isym_sig**2)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
# lines
ax.plot(ts, line_iasy,
c='black', linestyle='-')
ax.plot(ts, line_ipre,
c='black', linestyle='-')
ax.errorbar(ts, line_isym, yerr=error_isym, c='black', linestyle='-',
elinewidth=0.8, errorevery=errorevery, capsize=3.0)
# filling
ax.fill_between(ts, line_xaxis, line_iasy, alpha=self.filling_alpha, label='Asymptomatic',
edgecolor=self.color_iasy, facecolor=self.color_iasy, linewidth=0, zorder=0)
ax.fill_between(ts, line_iasy, line_ipre, alpha=self.filling_alpha, label='Pre-symptomatic',
edgecolor=self.color_ipre, facecolor=self.color_ipre, linewidth=0, zorder=0)
ax.fill_between(ts, line_ipre, line_isym, alpha=self.filling_alpha, label='Symptomatic',
edgecolor=self.color_isym, facecolor=self.color_isym, linewidth=0, zorder=0)
# limits
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + ipre_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
# extra
if lockdown_at is not None:
lockdown_widget(lockdown_at, start_date,
lockdown_label_y, ymax,
lockdown_label, ax)
if lockdown_end is not None:
lockdown_widget(lockdown_at=lockdown_end, start_date=start_date,
lockdown_label_y=lockdown_label_y, ymax=ymax,
lockdown_label='End of lockdown', ax=ax, ls='dotted')
if show_target is not None:
target_widget(show_target, start_date, ax)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set ticks every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
# legend
ax.legend(loc=legend_loc, borderaxespad=0.5)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_tested(self, sim, title='Example', filename='daily_tested_0', figsize=(10, 10), errorevery=20,
acc=1000, ymax=None):
''''
Plots daily tested, positive daily tested, negative daily tested
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# automatically shifted by `test_lag` in the function
ts, posi_mu, posi_sig = self.__comp_state_over_time(sim, 'posi', acc)
_, nega_mu, nega_sig = self.__comp_state_over_time(sim, 'nega', acc)
line_xaxis = np.zeros(ts.shape)
line_posi = posi_mu
line_nega = posi_mu + nega_mu
error_posi = posi_sig
error_nega = nega_sig + posi_sig
T = posi_mu.shape[0]
# lines
ax.errorbar(ts, posi_mu, yerr=posi_sig, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, nega_mu, yerr=nega_sig, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
# filling
ax.fill_between(ts, line_xaxis, posi_mu, alpha=self.filling_alpha, label=r'Positive tests',
edgecolor=self.color_posi, facecolor=self.color_posi, linewidth=0, zorder=0)
ax.fill_between(ts, posi_mu, nega_mu, alpha=self.filling_alpha, label=r'Negative tests',
edgecolor=self.color_nega, facecolor=self.color_nega, linewidth=0, zorder=0)
# axis
ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max(posi_mu + nega_mu)
ax.set_ylim((0, ymax))
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel(r'[people]')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.70)
ax.set_title(title, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_at_home(self, sim, title='Example', filename='daily_at_home_0', figsize=(10, 10), errorevery=20, acc=1000, ymax=None):
''''
Plots daily tested, positive daily tested, negative daily tested
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ts, all_mu, all_sig = self.__comp_contained_over_time(sim, 'SocialDistancingForAllMeasure', acc)
_, positive_mu, positive_sig = self.__comp_contained_over_time(sim, 'SocialDistancingForPositiveMeasure', acc)
_, age_mu, age_sig = self.__comp_contained_over_time(sim, 'SocialDistancingByAgeMeasure', acc)
_, tracing_mu, tracing_sig = self.__comp_contained_over_time(sim, 'SocialDistancingForSmartTracing', acc)
_, iasy_mu, iasy_sig = self.__comp_state_over_time(sim, 'iasy', acc)
_, ipre_mu, ipre_sig = self.__comp_state_over_time(sim, 'ipre', acc)
_, isym_mu, isym_sig = self.__comp_state_over_time(sim, 'isym', acc)
line_xaxis = np.zeros(ts.shape)
line_all = all_mu
line_positive = positive_mu
line_age = age_mu
line_tracing = tracing_mu
line_infected = iasy_mu + ipre_mu + isym_mu
error_all = all_sig
error_positive = positive_sig
error_age = age_sig
error_tracing = tracing_sig
error_infected = np.sqrt(np.square(iasy_sig) + np.square(ipre_sig) + np.square(isym_sig))
# lines
ax.errorbar(ts, line_infected, label=r'Total infected', errorevery=errorevery, c=self.color_infected, linestyle='--', yerr=error_infected)
ax.errorbar(ts, line_all, yerr=error_all, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, line_positive, yerr=error_positive, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, line_age, yerr=error_age, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, line_tracing, yerr=error_tracing, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
# filling
ax.fill_between(ts, line_xaxis, line_all, alpha=self.filling_alpha, label=r'SD for all',
edgecolor=self.color_all, facecolor=self.color_all, linewidth=0, zorder=0)
ax.fill_between(ts, line_xaxis, line_positive, alpha=self.filling_alpha, label=r'SD for positively tested',
edgecolor=self.color_positive, facecolor=self.color_positive, linewidth=0, zorder=0)
ax.fill_between(ts, line_xaxis, line_age, alpha=self.filling_alpha, label=r'SD for age group',
edgecolor=self.color_age, facecolor=self.color_age, linewidth=0, zorder=0)
ax.fill_between(ts, line_xaxis, line_tracing, alpha=self.filling_alpha, label=r'SD for traced contacts',
edgecolor=self.color_tracing, facecolor=self.color_tracing, linewidth=0, zorder=0)
# axis
ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max([all_mu, positive_mu, age_mu, tracing_mu])
ax.set_ylim((0, ymax))
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel(r'[people]')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.70)
ax.set_title(title, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def compare_total_infections(self, sims, titles, figtitle='Title',
filename='compare_inf_0', figsize=(10, 10), errorevery=20, acc=1000, ymax=None,
lockdown_label='Lockdown', lockdown_at=None, lockdown_label_y=None,
show_positives=False, show_legend=True, legendYoffset=0.0, legend_is_left=False,
subplot_adjust=None, start_date='1970-01-01', first_one_dashed=False):
''''
Plots total infections for each simulation, named as provided by `titles`
to compare different measures/interventions taken. Colors taken as defined in __init__, and
averaged over random restarts, using error bars for std-dev
'''
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
for i in range(len(sims)):
if acc > sims[i].max_time:
acc = sims[i].max_time
ts, iasy_mu, iasy_sig = self.__comp_state_over_time(sims[i], 'iasy', acc)
_, ipre_mu, ipre_sig = self.__comp_state_over_time(sims[i], 'ipre', acc)
_, isym_mu, isym_sig = self.__comp_state_over_time(sims[i], 'isym', acc)
_, posi_mu, posi_sig = self.__comp_state_over_time(sims[i], 'posi', acc)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
line_xaxis = np.zeros(ts.shape)
line_infected = iasy_mu + ipre_mu + isym_mu
error_infected = np.sqrt(np.square(iasy_sig) + np.square(ipre_sig) + np.square(isym_sig))
# lines
if show_positives:
ax.errorbar(ts, line_infected, yerr=error_infected, label='[Infected] ' + titles[i], errorevery=errorevery,
c=self.color_different_scenarios[i], linestyle='-')
T = posi_mu.shape[0]
ax.errorbar(ts, posi_mu, yerr=posi_sig, label='[Tested positive]', errorevery=errorevery,
c=self.color_different_scenarios[i], linestyle='--', elinewidth=0.8)
else:
ax.errorbar(ts, line_infected, yerr=error_infected, label=titles[i], errorevery=errorevery, elinewidth=0.8,
capsize=3.0, c=self.color_different_scenarios[i], linestyle='--' if i == 0 and first_one_dashed else '-')
# axis
# ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + ipre_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
if lockdown_at is not None:
lockdown_widget(lockdown_at, start_date,
lockdown_label_y, ymax,
lockdown_label, ax, xshift=0.5)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set ticks every week
# ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2))
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
if show_legend:
# legend
if legend_is_left:
leg = ax.legend(loc='upper left', borderaxespad=0.5)
else:
leg = ax.legend(loc='upper right', borderaxespad=0.5)
if legendYoffset != 0.0:
# Get the bounding box of the original legend
bb = leg.get_bbox_to_anchor().inverse_transformed(ax.transAxes)
# Change to location of the legend.
bb.y0 += legendYoffset
bb.y1 += legendYoffset
leg.set_bbox_to_anchor(bb, transform = ax.transAxes)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def compare_total_fatalities_and_hospitalizations(self, sims, titles, figtitle=r'Hospitalizations and Fatalities',
filename='compare_inf_0', figsize=(10, 10), errorevery=20, acc=1000, ymax=None, lockdown_at=None,
subplot_adjust=None, start_date='1970-01-01', first_one_dashed=False):
''''
Plots total fatalities and hospitalizations for each simulation, named as provided by `titles`
to compare different measures/interventions taken. Colors taken as defined in __init__, and
averaged over random restarts, using error bars for std-dev
'''
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# hospitalizations
for i in range(len(sims)):
if acc > sims[i].max_time:
acc = sims[i].max_time
ts, hosp_mu, hosp_sig = self.__comp_state_over_time(
sims[i], 'hosp', acc)
ts, dead_mu, dead_sig = self.__comp_state_over_time(
sims[i], 'dead', acc)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
# lines
ax.errorbar(ts, hosp_mu, yerr=hosp_sig, label=titles[i], errorevery=errorevery,
c=self.color_different_scenarios[i], linestyle='-', elinewidth=0.8, capsize=3.0)
ax.errorbar(ts, dead_mu, yerr=dead_sig, errorevery=errorevery,
c=self.color_different_scenarios[i], linestyle='--', elinewidth=0.8, capsize=3.0)
# axis
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + ipre_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
if lockdown_at is not None:
ax.plot(lockdown_at * np.ones(acc), np.linspace(0, ymax, num=acc),
linewidth=1, linestyle='--', color='black', zorder=10)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set ticks every week
# ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2))
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
# legend
# ax.legend(loc='upper right', borderaxespad=0.5)
ax.legend(loc='upper left', borderaxespad=0.5)
subplot_adjust = subplot_adjust or {
'bottom': 0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_2d_infections_at_time(self, sim, at_time, density_bandwidth=1.0, restart=0,
title='Example', filename='2d_inf_0', figsize=(10, 10), acc=1000, ymax=None):
'''
Plots 2d visualization using mobility object. The bandwidth set by `density_bandwidth`
determines the bandwidth of the RBF kernel in KDE used to generate the plot.
Smaller means more affected by local changes. Set the colors and markers in the __init__ function
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# infections
r = restart
is_expo = self.__is_state_at(sim, r, 'expo', at_time)
is_iasy = self.__is_state_at(sim, r, 'iasy', at_time)
is_ipre = self.__is_state_at(sim, r, 'ipre', at_time)
is_isym = self.__is_state_at(sim, r, 'isym', at_time)
is_infected = is_iasy | is_ipre | is_isym
no_state = (1 - is_infected) & (1 - is_expo)
idx_expo = np.where(is_expo)[0]
idx_infected = np.where(is_infected)[0]
idx_none = np.where(no_state)[0]
# self.color_isym = 'red'
# self.color_expo= 'yellow'
### sites
site_loc = sim.site_loc
ax.scatter(site_loc[:, 0], site_loc[:, 1], alpha=self.filling_alpha, label='public sites',
marker=self.marker_site, color=self.color_site, facecolors=self.color_site, s=self.size_site)
### home locations and their states
home_loc = sim.home_loc
# no state
ax.scatter(home_loc[idx_none, 0], home_loc[idx_none, 1],
marker=self.marker_home, color=self.color_home,
facecolors='none', s=self.size_home)
try:
# expo
ax.scatter(home_loc[idx_expo, 0], home_loc[idx_expo, 1],
marker=self.marker_home, color=self.color_home,
facecolors=self.color_expo, s=self.size_home, label='exposed households')
sns.kdeplot(home_loc[idx_expo, 0], home_loc[idx_expo, 1], shade=True, alpha=self.density_alpha,
shade_lowest=False, cbar=False, ax=ax, color=self.color_expo, bw=density_bandwidth, zorder=0)
# infected
ax.scatter(home_loc[idx_infected, 0], home_loc[idx_infected, 1],
marker=self.marker_home, color=self.color_home,
facecolors=self.color_isym, s=self.size_home, label='infected households')
sns.kdeplot(home_loc[idx_infected, 0], home_loc[idx_infected, 1], shade=True, alpha=self.density_alpha,
shade_lowest=False, cbar=False, ax=ax, color=self.color_isym, bw=density_bandwidth, zorder=0)
except:
print('KDE failed, likely no exposed and infected at this time. Try different timing.')
plt.close()
return
# axis
ax.set_xlim((-0.1, 1.1))
ax.set_ylim((-0.1, 1.1))
plt.axis('off')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.85)
ax.set_title(title, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def compare_hospitalizations_over_time(self, sims, titles, figtitle='Hospitalizations', filename='compare_hosp_0',
capacity_line_at=20, figsize=(10, 10), errorevery=20, acc=1000, ymax=None):
''''
Plots total hospitalizations for each simulation, named as provided by `titles`
to compare different measures/interventions taken. Colors taken as defined in __init__, and
averaged over random restarts, using error bars for std-dev.
The value of `capacity_line_at` defines the y-intercept of the hospitalization capacity line
'''
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
for i in range(len(sims)):
if acc > sims[i].max_time:
acc = sims[i].max_time
ts, line_hosp, error_sig = self.__comp_state_over_time(
sims[i], 'hosp', acc)
line_xaxis = np.zeros(ts.shape)
# lines
ax.errorbar(ts, line_hosp, yerr=error_sig, errorevery=errorevery,
c='black', linestyle='-', elinewidth=0.8)
# filling
ax.fill_between(ts, line_xaxis, line_hosp, alpha=self.filling_alpha, zorder=0,
label=r'Hospitalized under: ' + titles[i], edgecolor=self.color_different_scenarios[i],
facecolor=self.color_different_scenarios[i], linewidth=0)
# capacity line
ax.plot(ts, capacity_line_at * np.ones(ts.shape[0]), label=r'Max. hospitalization capacity',
c='red', linestyle='--', linewidth=4.0)
# axis
ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max(line_hosp + error_sig)
ax.set_ylim((0, ymax))
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel(r'[people]')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.70)
ax.set_title(figtitle, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_positives_vs_target(self, sim, targets, title='Example',
filename='inference_0', figsize=(6, 5), errorevery=1, acc=17, ymax=None,
start_date='1970-01-01', lockdown_label='Lockdown', lockdown_at=None,
lockdown_label_y=None, subplot_adjust=None):
''''
Plots daily tested averaged over random restarts, using error bars for std-dev
together with targets from inference
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig, ax = plt.subplots(figsize=figsize)
# inference
# automatically shifted by `test_lag` in the function
ts, posi_mu, posi_sig = self.__comp_state_over_time(sim, 'posi', acc)
T = posi_mu.shape[0]
xx = days_to_datetime(ts, start_date=start_date)
ax.plot(xx, posi_mu, c='k', linestyle='-',
label='COVID-19 simulated case data')
ax.fill_between(xx, posi_mu - posi_sig, posi_mu + posi_sig,
color='grey', alpha=0.1, linewidth=0.0)
# target
target_widget(targets, start_date, ax)
# axis
#ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max(posi_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('Positive cases')
if lockdown_at is not None:
lockdown_widget(lockdown_at, start_date,
lockdown_label_y, ymax,
lockdown_label, ax)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
#set ticks every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
# legend
ax.legend(loc='upper left', borderaxespad=0.5)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI)#, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_rts(self, sims, filename, start_date, titles=None, sigma=None,
r_t_range=R_T_RANGE, window=3, figsize=(6, 5),
subplot_adjust=None, lockdown_label='Lockdown',
lockdown_at=None, lockdown_label_y=None, ymax=None,
colors=['grey'], fill_between=True, draw_dots=True,
errorevery=1, show_legend=False, xtick_interval=1, ci=0.9):
# If a single summary is provided
if not isinstance(sims, list):
sims = [sims]
sigma = [sigma]
results = list()
for i, sim in enumerate(sims):
res = compute_daily_rts(sim, start_date, sigma[i], r_t_range, window, ci)
results.append(res)
# Colors
ABOVE = [1,0,0]
MIDDLE = [1,1,1]
BELOW = [0,0,0]
cmap = ListedColormap(np.r_[
np.linspace(BELOW,MIDDLE,25),
np.linspace(MIDDLE,ABOVE,25)
])
color_mapped = lambda y: np.clip(y, .5, 1.5)-.5
ymax_computed = 0.0 # Keep track of max y to set limit
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
for i, result in enumerate(results):
index = result['ML'].index
values = result['ML'].values
# Plot dots and line
ax.plot(index, values, c=colors[i], zorder=1, alpha=1.0)
if draw_dots:
ax.scatter(index, values, s=40, lw=0.0,
c=cmap(color_mapped(values)),
edgecolors='k', zorder=2)
# Aesthetically, extrapolate credible interval by 1 day either side
lowfn = interp1d(date2num(index), result[f'Low_{ci*100:.0f}'].values,
bounds_error=False, fill_value='extrapolate')
highfn = interp1d(date2num(index), result[f'High_{ci*100:.0f}'].values,
bounds_error=False, fill_value='extrapolate')
extended = pd.date_range(start=index[0], end=index[-1])
error_low = lowfn(date2num(extended))
error_high = highfn(date2num(extended))
if fill_between:
ax.fill_between(extended, error_low, error_high,
color=colors[i], alpha=0.1, linewidth=0.0)
else:
# Ignore first value which is just prior, not informed by data
ax.errorbar(x=index[1:], y=values[1:], label=titles[i],
yerr=np.vstack((result[f'Low_{ci*100:.0f}'], result[f'High_{ci*100:.0f}']))[:,1:],
color=colors[i], linewidth=1.0,
elinewidth=0.8, capsize=3.0,
errorevery=errorevery)
ymax_computed = max(ymax_computed, np.max(error_high))
# Plot horizontal line at R_t = 1
ax.axhline(1.0, c='k', lw=1, alpha=.25);
# limits
ymax = ymax or 1.2 * ymax_computed
ax.set_ylim((0, ymax_computed))
if show_legend:
ax.legend(loc='upper left', borderaxespad=0.5)
# extra
if lockdown_at is not None:
lockdown_widget(lockdown_at, start_date,
lockdown_label_y, ymax,
lockdown_label, ax, zorder=-200)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# Set label
ax.set_ylabel(r'$R_t$')
#set ticks every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=xtick_interval))
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI)#, bbox_inches='tight')
if NO_PLOT:
plt.close()
| 40 | 146 | 0.592961 |
399442d93edce3347d3065d8ba6f924ec8c3e447 | 1,960 | py | Python | developer_tools/dymola_python_testing/ModelicaPy/buildingspy/development/__init__.py | klfrick2/HYBRID | d8a82bcdb9d0516a22205eed0de75f63764fa004 | [
"ECL-2.0",
"Apache-2.0"
] | 16 | 2021-02-10T21:37:01.000Z | 2022-02-20T13:25:11.000Z | developer_tools/dymola_python_testing/ModelicaPy/buildingspy/development/__init__.py | klfrick2/HYBRID | d8a82bcdb9d0516a22205eed0de75f63764fa004 | [
"ECL-2.0",
"Apache-2.0"
] | 11 | 2021-03-16T14:33:34.000Z | 2022-02-15T19:05:55.000Z | developer_tools/dymola_python_testing/ModelicaPy/buildingspy/development/__init__.py | klfrick2/HYBRID | d8a82bcdb9d0516a22205eed0de75f63764fa004 | [
"ECL-2.0",
"Apache-2.0"
] | 8 | 2021-06-03T00:22:26.000Z | 2022-03-14T21:47:39.000Z | #Licensed under Apache 2.0 License.
#© 2020 Battelle Energy Alliance, LLC
#ALL RIGHTS RESERVED
#.
#Prepared by Battelle Energy Alliance, LLC
#Under Contract No. DE-AC07-05ID14517
#With the U. S. Department of Energy
#.
#NOTICE: This computer software was prepared by Battelle Energy
#Alliance, LLC, hereinafter the Contractor, under Contract
#No. AC07-05ID14517 with the United States (U. S.) Department of
#Energy (DOE). The Government is granted for itself and others acting on
#its behalf a nonexclusive, paid-up, irrevocable worldwide license in this
#data to reproduce, prepare derivative works, and perform publicly and
#display publicly, by or on behalf of the Government. There is provision for
#the possible extension of the term of this license. Subsequent to that
#period or any extension granted, the Government is granted for itself and
#others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide
#license in this data to reproduce, prepare derivative works, distribute
#copies to the public, perform publicly and display publicly, and to permit
#others to do so. The specific term of the license can be identified by
#inquiry made to Contractor or DOE. NEITHER THE UNITED STATES NOR THE UNITED
#STATES DEPARTMENT OF ENERGY, NOR CONTRACTOR MAKES ANY WARRANTY, EXPRESS OR
#IMPLIED, OR ASSUMES ANY LIABILITY OR RESPONSIBILITY FOR THE USE, ACCURACY,
#COMPLETENESS, OR USEFULNESS OR ANY INFORMATION, APPARATUS, PRODUCT, OR
#PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT INFRINGE PRIVATELY
#OWNED RIGHTS.
'''
This module contains the classes
- *refactor*, a module that assists in refactoring Modelica classes,
- *Tester* that runs the unit tests of the `Buildings` library,
- *Validator* that validates the html code of the info section of the `.mo` files, and
- *Annex60* that synchronizes Modelica libraries with the `Annex60` library.
- *ErrorDictionary* that contains information about possible error strings.
'''
| 51.578947 | 86 | 0.787755 |
f1424bf14c2d8e8c60b521fda0f3280404ddf9b1 | 78 | py | Python | cursoemvideo/python/aula/aula8.1.py | mateusjustino/cursos | 10927bf62f89b5847bb0acd998e9e9191472d0f4 | [
"MIT"
] | null | null | null | cursoemvideo/python/aula/aula8.1.py | mateusjustino/cursos | 10927bf62f89b5847bb0acd998e9e9191472d0f4 | [
"MIT"
] | null | null | null | cursoemvideo/python/aula/aula8.1.py | mateusjustino/cursos | 10927bf62f89b5847bb0acd998e9e9191472d0f4 | [
"MIT"
] | null | null | null | import emoji
print(emoji.emojize('Olá mundo :sunglasses:', use_aliases=True))
| 26 | 64 | 0.782051 |
76df6755811c487e4318fee01ddb65c72ba8c0bf | 467 | py | Python | equipment/serializers.py | WesGtoX/fpso-management | a6e07fdd921f58f82db5f488f33b120306ab2ce9 | [
"MIT"
] | null | null | null | equipment/serializers.py | WesGtoX/fpso-management | a6e07fdd921f58f82db5f488f33b120306ab2ce9 | [
"MIT"
] | null | null | null | equipment/serializers.py | WesGtoX/fpso-management | a6e07fdd921f58f82db5f488f33b120306ab2ce9 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from equipment.models import Equipment
class EquipmentCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Equipment
fields = ('id', 'name', 'code', 'location', 'status', 'vessel')
read_only_fields = ('status',)
class EquipmentRetrieveSerializer(serializers.ModelSerializer):
class Meta:
model = Equipment
fields = ('id', 'name', 'code', 'location', 'status')
| 25.944444 | 71 | 0.678801 |
6fd2c4b42a54f1727ef3049e4f7689e0a4251bf1 | 3,873 | py | Python | queue-health/poll/poller.py | nikhiljindal/test-infra | 3b7081b1d4eceffb74a31f50b6b84cfc19c45f98 | [
"Apache-2.0"
] | null | null | null | queue-health/poll/poller.py | nikhiljindal/test-infra | 3b7081b1d4eceffb74a31f50b6b84cfc19c45f98 | [
"Apache-2.0"
] | null | null | null | queue-health/poll/poller.py | nikhiljindal/test-infra | 3b7081b1d4eceffb74a31f50b6b84cfc19c45f98 | [
"Apache-2.0"
] | 2 | 2018-06-05T08:43:58.000Z | 2019-08-24T22:11:19.000Z | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cStringIO
import datetime
import subprocess
import sys
import time
import traceback
import requests
def get_submit_queue_json(path):
for n in range(3):
uri = 'http://submit-queue.k8s.io/%s' % path
print >>sys.stderr, 'GET %s' % uri
resp = requests.get(uri)
if resp.ok:
break
time.sleep(2**n)
resp.raise_for_status()
return resp.json()
def is_blocked():
ci = get_submit_queue_json('health')
return ci['MergePossibleNow'] != True
def get_stats():
stats = get_submit_queue_json('sq-stats')
return stats['Initialized'] == True, stats['MergesSinceRestart']
def poll():
prs = get_submit_queue_json('prs')
e2e = get_submit_queue_json('github-e2e-queue')
online, merge_count = get_stats()
return (
online, # Is mergebot initialized?
len(prs['PRStatus']), # number of open PRs
len(e2e['E2EQueue']), # number of items in the e2e queue
len(e2e['E2ERunning']), # Worthless: number of keys in this dict.
is_blocked(), # Whether we can merge
merge_count, # Number of merges the bot has done
)
def load_stats(uri):
while True:
try:
return subprocess.check_output(['gsutil', '-q', 'cat', uri])
except subprocess.CalledProcessError:
traceback.print_exc()
time.sleep(5)
def save_stats(uri, buf):
proc = subprocess.Popen(
# TODO(fejta): add -Z if this gets resolved:
# https://github.com/GoogleCloudPlatform/gsutil/issues/364
['gsutil', '-q', '-h', 'Content-Type:text/plain',
'cp', '-a', 'public-read', '-', uri],
stdin=subprocess.PIPE)
proc.communicate(buf.getvalue())
code = proc.wait()
if code:
print >>sys.stderr, 'Failed to copy stats to %s: %d' % (uri, code)
def poll_forever(uri, service_account=None):
if service_account:
print >>sys.stderr, 'Activating service account using: %s' % service_account
subprocess.check_call(
['gcloud', 'auth', 'activate-service-account', '--key-file=%s' % service_account])
print >>sys.stderr, 'Loading historical stats from %s...' % uri
buf = cStringIO.StringIO()
buf.write(load_stats(uri))
secs = 60
while True:
try:
print >>sys.stderr, 'Waiting %ds...' % secs
time.sleep(secs)
now = datetime.datetime.now()
print >>sys.stderr, 'Polling current status...'
online, prs, queue, running, blocked, merge_count = False, 0, 0, 0, False, 0
try:
online, prs, queue, running, blocked, merge_count = poll()
except KeyboardInterrupt:
raise
except (KeyError, IOError):
traceback.print_exc()
continue
data = '{} {} {} {} {} {} {}\n'.format(now, online, prs, queue, running, blocked, merge_count)
print >>sys.stderr, 'Appending to history: %s' % data
buf.write(data)
print >>sys.stderr, 'Saving historical stats to %s...' % uri
save_stats(uri, buf)
except KeyboardInterrupt:
break
if __name__ == '__main__':
poll_forever(*sys.argv[1:])
| 31.745902 | 106 | 0.619158 |
3da27b512d225a819bc690c4bae0e888ceb16a2f | 726 | py | Python | labs/models.py | babraham123/mysite | 76a792b18bc6404bec1281bc7c2d69b738aa03d1 | [
"MIT"
] | null | null | null | labs/models.py | babraham123/mysite | 76a792b18bc6404bec1281bc7c2d69b738aa03d1 | [
"MIT"
] | null | null | null | labs/models.py | babraham123/mysite | 76a792b18bc6404bec1281bc7c2d69b738aa03d1 | [
"MIT"
] | null | null | null | from django.db import models
from django import forms
class Table(models.Model):
title = models.CharField(max_length=200)
key = models.CharField(max_length=100)
ip = models.GenericIPAddressField()
port = models.PositiveIntegerField()
def __unicode__(self):
return self.title
class KeyForm(forms.Form):
key = forms.CharField(widget=forms.PasswordInput(), max_length=100)
class Lab(models.Model):
title = models.CharField(max_length=100)
filename = models.CharField(max_length=100)
description = models.CharField(max_length=1000)
icon_address = models.CharField(max_length=100)
created = models.DateField('date created')
def __unicode__(self):
return self.title | 33 | 71 | 0.731405 |
5e180aabe0f96b08c70d947e664dd2ab473bf1da | 6,582 | py | Python | models/GRCN.py | STK101/GRCN | 7389000a13d5969bcc77dc4cf73a4107acc68403 | [
"MIT"
] | null | null | null | models/GRCN.py | STK101/GRCN | 7389000a13d5969bcc77dc4cf73a4107acc68403 | [
"MIT"
] | null | null | null | models/GRCN.py | STK101/GRCN | 7389000a13d5969bcc77dc4cf73a4107acc68403 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, SGConv, GATConv, knn_graph
from sklearn.neighbors import kneighbors_graph
from sklearn.metrics.pairwise import rbf_kernel
import numpy as np
from .model_utils import GCNConv_diag, GCNConv_dense, EOS
import torch_sparse as ts
class Model(torch.nn.Module):
def __init__(self, num_nodes, num_features, num_classes, device, args):
super(Model, self).__init__()
self.num_nodes = num_nodes
self.num_features = num_features
self.graph_nhid = int(args.hid_graph.split(":")[0])
self.graph_nhid2 = int(args.hid_graph.split(":")[1])
self.nhid = args.nhid
self.conv1 = GCNConv_dense(num_features, self.nhid)
self.conv2 = GCNConv_dense(self.nhid, num_classes)
if args.layertype == "dense":
self.conv_graph = GCNConv_dense(num_features, self.graph_nhid)
self.conv_graph2 = GCNConv_dense(self.graph_nhid, self.graph_nhid2)
elif args.layertype == "diag":
self.conv_graph = GCNConv_diag(num_features, device)
self.conv_graph2 = GCNConv_diag(num_features, device)
else:
exit("wrong layer type")
self.F = args.F
self.F_graph = args.F_graph
self.dropout = args.dropout
self.K = args.compl_param.split(":")[0]
self.mask = None
self.Adj_new = None
self._normalize = args.normalize
self.device = device
self.reduce = args.reduce
self.sparse = args.sparse
self.norm_mode = "sym"
self.embedding = None
def init_para(self):
self.conv1.init_para()
self.conv2.init_para()
self.conv_graph.init_para()
self.conv_graph2.init_para()
def graph_parameters(self):
return list(self.conv_graph.parameters()) + list(self.conv_graph2.parameters())
def base_parameters(self):
return list(self.conv1.parameters()) + list(self.conv2.parameters())
def cal_similarity_graph(self, node_embeddings):
# similarity_graph = torch.mm(node_embeddings, node_embeddings.t())
similarity_graph = torch.mm(node_embeddings[:, :int(self.num_features/2)], node_embeddings[:, :int(self.num_features/2)].t())
similarity_graph += torch.mm(node_embeddings[:, int(self.num_features/2):], node_embeddings[:, int(self.num_features/2):].t())
return similarity_graph
def normalize(self, adj, mode="sym" ,sparse=False):
if not sparse:
if mode == "sym":
inv_sqrt_degree = 1. / (torch.sqrt(adj.sum(dim=1, keepdim=False)) + EOS)
return inv_sqrt_degree[:, None] * adj * inv_sqrt_degree[None, :]
elif mode == "row":
inv_degree = 1. / (adj.sum(dim=1, keepdim=False) + EOS)
return inv_degree[:, None] * adj
else:
exit("wrong norm mode")
else:
adj = adj.coalesce()
if mode == "sym":
inv_sqrt_degree = 1. / (torch.sqrt(torch.sparse.sum(adj, dim=1).values()) + EOS)
D_value = inv_sqrt_degree[adj.indices()[0]] * inv_sqrt_degree[adj.indices()[1]]
elif mode == "row":
inv_degree = 1. / (torch.sparse.sum(adj, dim=1).values() + EOS)
D_value = inv_degree[adj.indices()[0]]
else:
exit("wrong norm mode")
new_values = adj.values() * D_value
return torch.sparse.FloatTensor(adj.indices(), new_values, adj.size()).to(self.device)
def _sparse_graph(self, raw_graph, K, sparse):
if self.reduce == "knn":
values, indices = raw_graph.topk(k=int(K), dim=-1)
# print(values, indices)
assert torch.sum(torch.isnan(values)) == 0
assert torch.max(indices) < raw_graph.shape[1]
if not sparse:
self.mask = torch.zeros(raw_graph.shape).to(self.device)
self.mask[torch.arange(raw_graph.shape[0]).view(-1,1), indices] = 1.
self.mask[indices, torch.arange(raw_graph.shape[1]).view(-1,1)] = 1.
else:
inds = torch.stack([torch.arange(raw_graph.shape[0]).view(-1,1).expand(-1,int(K)).contiguous().view(1,-1)[0].to(self.device),
indices.view(1,-1)[0]])
inds = torch.cat([inds, torch.stack([inds[1], inds[0]])], dim=1)
values = torch.cat([values.view(1,-1)[0], values.view(1,-1)[0]])
return inds, values
else:
exit("wrong sparsification method")
self.mask.requires_grad = False
sparse_graph = raw_graph * self.mask
return sparse_graph
def _node_embeddings(self, input, Adj, sparse=False):
norm_Adj = self.normalize(Adj, self.norm_mode, sparse)
if self.F_graph != "identity":
node_embeddings = self.F_graph(self.conv_graph(input, norm_Adj, sparse))
node_embeddings = self.conv_graph2(node_embeddings, norm_Adj, sparse)
else:
node_embeddings = self.conv_graph(input, norm_Adj, sparse)
node_embeddings = self.conv_graph2(node_embeddings, norm_Adj, sparse)
if self._normalize:
node_embeddings = F.normalize(node_embeddings, dim=1, p=2)
return node_embeddings
def forward(self, input, Adj):
Adj.requires_grad = False
node_embeddings = self._node_embeddings(input, Adj, self.sparse)
Adj_new = self.cal_similarity_graph(node_embeddings)
self.Adj_new = Adj_new
if not self.sparse:
Adj_new = self._sparse_graph(Adj_new, self.K, self.sparse)
Adj_new = self.normalize(Adj + Adj_new, self.norm_mode)
else:
Adj_new_indices, Adj_new_values = self._sparse_graph(Adj_new, self.K, self.sparse)
new_inds = torch.cat([Adj.indices(), Adj_new_indices], dim=1)
new_values = torch.cat([Adj.values(), Adj_new_values])
Adj_new = torch.sparse.FloatTensor(new_inds, new_values, Adj.size()).to(self.device)
Adj_new = self.normalize(Adj_new, self.norm_mode, self.sparse)
x = self.conv1(input, Adj_new, self.sparse)
x = F.dropout(self.F(x), training=self.training, p=self.dropout)
x = self.conv2(x, Adj_new, self.sparse)
self.embedding = x
return F.log_softmax(x, dim=1)
| 47.014286 | 142 | 0.601945 |
65495a4374c18884d615339f62d7198fbf2ebd26 | 3,480 | py | Python | nwd/trie.py | taiyingchen/new-word-detection | 9ab95cffccdf79c93d134b27340559773093df3b | [
"BSD-3-Clause"
] | 3 | 2020-05-02T09:20:46.000Z | 2021-04-14T15:21:38.000Z | nwd/trie.py | dying1020/new-word-detection | 9ab95cffccdf79c93d134b27340559773093df3b | [
"BSD-3-Clause"
] | null | null | null | nwd/trie.py | dying1020/new-word-detection | 9ab95cffccdf79c93d134b27340559773093df3b | [
"BSD-3-Clause"
] | null | null | null | import os
import pickle
from queue import Queue
from collections import defaultdict
from marisa_trie import BytesTrie
INT_BYTES = 4
BYTEORDER = 'big'
class Trie(object):
def __init__(self):
self.root = {'children': {}, 'depth': 0}
self.total = 0
self.trie_file_path = os.path.join(
os.path.dirname(__file__), "./Trie.pkl")
def build(self, tokens):
"""Build trie from a list of tokens
"""
node = self.root
depth = 0
for token in tokens:
depth += 1
if token not in node['children']:
node['children'][token] = {'freq': 0, 'depth': depth, 'visit': False, 'value': tokens[:depth], 'children': {}}
node = node['children'][token]
node['freq'] += 1
self.total += 1
def get(self, tokens):
result = {'found': True, 'value': ''}
node = self.root
for token in tokens:
if token not in node['children']:
result['found'] = False
return result
node = node['children'][token]
result = {**result, **node}
return result
def bfs(self, min_depth, max_depth=-1):
"""Generator for breadth-first search
Returns
-------
node : Trie tree node
"""
queue = Queue()
queue.put(self.root)
while not queue.empty():
node = queue.get()
if max_depth != -1 and node['depth'] > max_depth:
return
elif node['depth'] >= min_depth:
yield node
for child in node['children'].values():
if not child['visit']:
child['visit'] = True
queue.put(child)
return
def visualize(self):
self.visualize_util(self.root, '')
def visualize_util(self, node, pre, console=True, queue=[]):
for i, child in enumerate(node['children']):
if i != len(node['children'])-1:
print(f'{pre}├──{child}') if console else queue.append(f'{pre}├──{child}')
self.visualize_util(node['children'][child], pre+'│ ', console, queue)
else:
print(f'{pre}└──{child}') if console else queue.append(f'{pre}└──{child}')
self.visualize_util(node['children'][child], pre+' ', console, queue)
def __len__(self):
return self.total
def load(self):
"""Load trie from pickle file
"""
with open(self.trie_file_path, "rb") as f:
data = pickle.load(f)
self.root = data
def test(self):
"""Testing interface
"""
queue = []
self.visualize_util(self.root, '', console=False, queue=queue)
return '\n'.join(queue)
class BTrie(BytesTrie):
def build(self, trie):
trie = {k: v.to_bytes(INT_BYTES, byteorder=BYTEORDER) for k, v in trie.items()}
super().__init__(trie.items())
return self
def items(self, key=''):
return {k: int.from_bytes(v, byteorder=BYTEORDER) for k, v in super().items(key)}
def merge(self, trie):
# Get all elements in both trie
self_trie = self.items()
other_trie = trie.items()
merge_trie = defaultdict(int)
merge_trie.update(self_trie)
for k, v in other_trie.items():
merge_trie[k] += v
return self.build(merge_trie)
| 30 | 126 | 0.528448 |
583762d4041d52849230cbf4c0fa934ee926a9e7 | 776 | py | Python | qiniu4tornado/main.py | EnjoyOnlineLtd/qiniu4tornado | b02a3f964c8857050ab1ea7f9705b61631e28d76 | [
"MIT"
] | null | null | null | qiniu4tornado/main.py | EnjoyOnlineLtd/qiniu4tornado | b02a3f964c8857050ab1ea7f9705b61631e28d76 | [
"MIT"
] | null | null | null | qiniu4tornado/main.py | EnjoyOnlineLtd/qiniu4tornado | b02a3f964c8857050ab1ea7f9705b61631e28d76 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from qiniu4tornado import etag
def main():
parser = argparse.ArgumentParser(prog='qiniu')
sub_parsers = parser.add_subparsers()
parser_etag = sub_parsers.add_parser(
'etag', description='calculate the etag of the file', help='etag [file...]')
parser_etag.add_argument(
'etag_files', metavar='N', nargs='+', help='the file list for calculate')
args = parser.parse_args()
try:
etag_files = args.etag_files
except AttributeError:
etag_files = None
if etag_files:
r = [etag(file) for file in etag_files]
if len(r) == 1:
print(r[0])
else:
print(' '.join(r))
if __name__ == '__main__':
main()
| 22.171429 | 84 | 0.604381 |
f23ba00a7274a2ae568b51f01cd834aa488cbead | 30,115 | py | Python | tensor2tensor/layers/latent_layers.py | vishwas1234567/tensor2tensor | d62e2ee1b069d3d9b327d4d2dd6f9e50b7e62bb3 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/layers/latent_layers.py | vishwas1234567/tensor2tensor | d62e2ee1b069d3d9b327d4d2dd6f9e50b7e62bb3 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/layers/latent_layers.py | vishwas1234567/tensor2tensor | d62e2ee1b069d3d9b327d4d2dd6f9e50b7e62bb3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for latent variable models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_image_attention as cia
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import transformer_layers
from tensor2tensor.utils import beam_search
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
DO_SUMMARIES = True
def compress_self_attention_layer(x, hparams, name=None):
"""Attend function."""
with tf.variable_scope(name, default_name="compress_self_attention"):
x, xshape, _ = cia.maybe_reshape_4d_to_3d(x)
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads,
hparams.attention_dropout)
res = common_layers.layer_postprocess(x, y, hparams)
return tf.reshape(res, xshape)
def compute_nats_and_bits_per_dim(data_dim,
latent_dim,
average_reconstruction,
average_prior):
"""Computes negative ELBO, which is an upper bound on the negative likelihood.
Args:
data_dim: int-like indicating data dimensionality.
latent_dim: int-like indicating latent dimensionality.
average_reconstruction: Scalar Tensor indicating the reconstruction cost
averaged over all data dimensions and any data batches.
average_prior: Scalar Tensor indicating the negative log-prior probability
averaged over all latent dimensions and any data batches.
Returns:
Tuple of scalar Tensors, representing the nats and bits per data dimension
(e.g., subpixels) respectively.
"""
with tf.name_scope(None, default_name="compute_nats_per_dim"):
data_dim = tf.cast(data_dim, average_reconstruction.dtype)
latent_dim = tf.cast(latent_dim, average_prior.dtype)
negative_log_likelihood = data_dim * average_reconstruction
negative_log_prior = latent_dim * average_prior
negative_elbo = negative_log_likelihood + negative_log_prior
nats_per_dim = tf.divide(negative_elbo, data_dim, name="nats_per_dim")
bits_per_dim = tf.divide(nats_per_dim, tf.log(2.), name="bits_per_dim")
return nats_per_dim, bits_per_dim
def multinomial_sample(x, vocab_size=None, sampling_method="random",
temperature=1.0):
"""Multinomial sampling from a n-dimensional tensor.
Args:
x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial.
vocab_size: Number of classes in multinomial distribution.
sampling_method: String, "random" or otherwise deterministic.
temperature: Positive float.
Returns:
Tensor of shape [...].
"""
vocab_size = vocab_size or common_layers.shape_list(x)[-1]
if sampling_method == "random" and temperature > 0.0:
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1)
else:
samples = tf.argmax(x, axis=-1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return reshaped_samples
def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams):
"""Latent prediction and loss.
Args:
latents_pred: Tensor of shape [..., depth].
latents_discrete_hot: Tensor of shape [..., vocab_size].
vocab_size: an int representing the vocab size.
hparams: HParams.
Returns:
sample: Tensor of shape [...], a sample from a multinomial distribution.
loss: Tensor of shape [...], the softmax cross-entropy.
"""
with tf.variable_scope("latent_logits"):
latents_logits = tf.layers.dense(latents_pred, vocab_size,
name="logits_dense")
if hparams.logit_normalization:
latents_logits *= tf.rsqrt(1e-8 +
tf.reduce_mean(tf.square(latents_logits)))
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=latents_discrete_hot, logits=latents_logits)
# TODO(trandustin): tease this out from ae_latent_softmax.
# we use just the loss portion to anchor prior / encoder on text.
sample = multinomial_sample(latents_logits,
vocab_size,
hparams.sampling_method,
hparams.sampling_temp)
return sample, loss
def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams):
"""Samples from the latent space in the autoencoder.
Args:
latents_dense_in: Tensor of shape [batch, length_q, ...]. Only the shape of
its first two dimensions are used. length_q is the latent length, which is
height * width * hparams.num_latents / (2**hparams.num_compress_steps).
inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Encodings
to attend to in decoder.
ed: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q,
length_kv]. Encoder-decoder attention bias.
embed: Callable which embeds discrete latent hot-vectors and a hidden size
and returns dense vectors.
hparams: HParams.
Returns:
Tensor of shape [batch, length].
"""
def symbols_to_logits_fn(ids):
"""Go from ids to logits."""
ids = tf.expand_dims(ids, axis=2) # Ids start with added all-zeros.
latents_discrete = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0]])
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
latents_dense = embed(
tf.one_hot(latents_discrete, depth=2**hparams.bottleneck_bits),
hparams.hidden_size)
latents_pred = transformer_latent_decoder(
latents_dense, inputs, ed, hparams, name="latent_prediction")
logits = tf.layers.dense(
latents_pred, 2**hparams.bottleneck_bits, name="logits_dense")
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :]
return logits
initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32)
length = tf.shape(latents_dense_in)[1]
ids, _, _ = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
1,
length,
2**hparams.bottleneck_bits,
alpha=0.0,
eos_id=-1,
stop_early=False)
res = tf.expand_dims(ids[:, 0, :], axis=2) # Pick first beam.
return res[:, 1:] # Remove the added all-zeros from ids.
def residual_block_layer(inputs, hparams):
"""Residual block over inputs.
Runs a residual block consisting of
conv: kernel_size x kernel_size
conv: 1x1
dropout, add and normalize according to hparams.layer_postprocess_sequence.
Args:
inputs: Tensor of shape [batch, height, width, hparams.hidden_size].
hparams: HParams.
Returns:
Tensor of shape [batch, height, width, hparams.hidden_size].
"""
kernel = (hparams.res_kernel_size, hparams.res_kernel_size)
x = inputs
for i in range(hparams.num_res_layers):
with tf.variable_scope("res_conv_%d" % i):
# kernel_size x kernel_size conv block
y = common_layers.conv_block(
common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"),
hparams.hidden_size, [((1, 1), kernel)],
strides=(1, 1),
padding="SAME",
name="residual_conv")
# 1x1 conv block
y = common_layers.conv_block(
y,
hparams.hidden_size, [((1, 1), (1, 1))],
strides=(1, 1),
padding="SAME",
name="residual_dense")
x = common_layers.layer_postprocess(x, y, hparams)
return x
def compress_encoder(inputs,
hparams,
strides=(2, 2),
kernel_size=(3, 3),
name=None):
"""Encoder that compresses 2-D inputs by 2**num_compress_steps.
Args:
inputs: Tensor of shape [batch, height, width, channels].
hparams: HParams.
strides: Tuple, strides for conv block.
kernel_size: Tuple, kernel window size for conv block.
name: string, variable scope.
Returns:
Tensor of shape [batch, latent_length, hparams.hidden_size], where
latent_length is
hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps).
"""
with tf.variable_scope(name, default_name="compress"):
x = inputs
for i in range(hparams.num_compress_steps // 2):
with tf.variable_scope("compress_conv_%d" % i):
y = common_layers.conv_block(
common_layers.layer_norm(
x, hparams.hidden_size, name="lnorm"),
hparams.hidden_size,
dilation_rates_and_kernel_sizes=[((1, 1), kernel_size)],
strides=strides,
padding="SAME",
name="compress_conv_%d" % i)
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
if hparams.do_compress_attend:
y = compress_self_attention_layer(
x, hparams, name="compress_selfatt_%d" % i)
y += x
x = y
x = residual_block_layer(x, hparams)
# If using multiple copies of latents, blow up the hidden size and then
# reshape to increase by num_latents.
shape_x = common_layers.shape_list(x)
x = tf.layers.dense(x,
hparams.num_latents * hparams.hidden_size,
name=name + "_dense")
return tf.reshape(x, [shape_x[0],
shape_x[1] * shape_x[2] * hparams.num_latents,
hparams.hidden_size])
def compress_encoder_2d(x, hparams, name=None):
"""Encoder that compresses 2-D inputs by 2**num_compress_steps.
Args:
x: Tensor of shape [batch, height, width, channels].
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, latent_length, hparams.hidden_size], where
latent_length is
hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps).
"""
return compress_encoder(
x,
hparams,
strides=(2, 2),
kernel_size=(hparams.kernel_size, hparams.kernel_size),
name=name)
def compress_encoder_1d(x, hparams, name=None):
"""Encoder that compresses 1-D inputs by 2**num_compress_steps.
Args:
x: Tensor of shape [batch, length, channels].
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, latent_length, hparams.hidden_size], where
latent_length is
hparams.num_latents * length / 2**hparams.num_compress_steps.
"""
x = tf.expand_dims(x, axis=2)
return compress_encoder(x,
hparams,
strides=(2, 1),
kernel_size=(hparams.kernel_size, 1),
name=name)
def decompress_decoder(inputs,
hparams,
strides=(2, 2),
kernel=(3, 3),
name=None):
"""Decoder that decompresses 2-D inputs by 2**num_compress_steps.
Args:
inputs: Tensor of shape [batch, compress_height, compress_width, channels].
hparams: HParams.
strides: Tuple, strides for conv block.
kernel: Tuple, kernel window size for conv block.
name: string, variable scope.
Returns:
Tensor of shape [batch, height, width, hparams.hidden_size].
"""
with tf.variable_scope(name, default_name="decompress"):
x = inputs
x = tf.layers.dense(x, hparams.hidden_size, name=name + "_dense")
x = residual_block_layer(x, hparams)
for i in range(hparams.num_compress_steps // 2):
j = hparams.num_compress_steps // 2 - i - 1
with tf.variable_scope(name + "_%d" % j):
if hparams.do_decompress_attend:
y = compress_self_attention_layer(
x, hparams, name="decompress_selfatt")
x += y
y = tf.layers.conv2d_transpose(
x,
hparams.hidden_size,
kernel,
strides=strides,
padding="SAME",
activation=tf.nn.relu if i > 0 else None,
name="decompress_conv")
x = y
return x
def decompress_decoder_2d(x, hparams, name=None):
"""Decoder that decompresses 2-D inputs by 2**num_compress_steps.
Args:
x: Tensor of shape [batch, compress_height, compress_width, channels].
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, height, width, hparams.hidden_size].
"""
return decompress_decoder(x, hparams,
strides=(2, 2),
kernel=(hparams.kernel_size, hparams.kernel_size),
name=name)
def decompress_decoder_1d(x, hparams, name=None):
"""Decoder that decompresses 1-D inputs by 2**num_compress_steps.
Args:
x: Tensor of shape [batch, compress_length, channels].
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, length, hparams.hidden_size].
"""
x = tf.expand_dims(x, axis=2)
output = decompress_decoder(x, hparams,
strides=(2, 1),
kernel=(hparams.kernel_size, 1),
name=name)
return tf.squeeze(output, axis=2)
def transformer_text_encoder(inputs,
target_space,
hparams,
name=None):
"""Transformer text encoder over inputs with unmasked full attention.
Args:
inputs: Tensor of shape [batch, length, 1, hparams.hidden_size].
target_space: int. Used for encoding inputs under a target space id.
hparams: HParams.
name: string, variable scope.
Returns:
encoder_output: Tensor of shape [batch, length, hparams.hidden_size].
ed: Tensor of shape [batch, 1, 1, length]. Encoder-decoder attention bias
for any padded tokens.
"""
with tf.variable_scope(name, default_name="transformer_text_encoder"):
inputs = common_layers.flatten4d3d(inputs)
[
encoder_input,
encoder_self_attention_bias,
ed,
] = transformer_layers.transformer_prepare_encoder(
inputs, target_space=target_space, hparams=hparams)
encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout)
encoder_output = transformer_layers.transformer_encoder(
encoder_input, encoder_self_attention_bias, hparams)
return encoder_output, ed
def transformer_image_decoder(targets,
encoder_output,
ed_attention_bias,
hparams,
name=None):
"""Transformer image decoder over targets with local attention.
Args:
targets: Tensor of shape [batch, ...], and whose size is batch * height *
width * hparams.num_channels * hparams.hidden_size.
encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size].
ed_attention_bias: Tensor which broadcasts with shape [batch,
hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, height, width * hparams.num_channels,
hparams.hidden_size].
"""
with tf.variable_scope(name, default_name="transformer_dec"):
batch_size = common_layers.shape_list(targets)[0]
targets = tf.reshape(targets, [batch_size,
hparams.img_len,
hparams.img_len,
hparams.num_channels * hparams.hidden_size])
decoder_input, _, _ = cia.prepare_decoder(targets, hparams)
decoder_output = cia.transformer_decoder_layers(
decoder_input,
encoder_output,
hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams,
attention_type=hparams.dec_attention_type,
encoder_decoder_attention_bias=ed_attention_bias,
name="decoder")
decoder_output = tf.reshape(decoder_output,
[batch_size,
hparams.img_len,
hparams.img_len * hparams.num_channels,
hparams.hidden_size])
return decoder_output
def transformer_latent_decoder(x,
encoder_output,
ed_attention_bias,
hparams,
name=None):
"""Transformer decoder over latents using latent_attention_type.
Args:
x: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the
latent length, which is
height * width * hparams.num_latents / (2**hparams.num_compress_steps).
encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size].
ed_attention_bias: Tensor which broadcasts with shape [batch,
hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.
hparams: HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, length_q, hparams.hidden_size].
"""
with tf.variable_scope(name, default_name="transformer_latent_dec"):
batch_size = common_layers.shape_list(x)[0]
compressed_img_len = (hparams.img_len //
2**(hparams.num_compress_steps // 2))
x = tf.reshape(x, [batch_size,
compressed_img_len,
compressed_img_len * hparams.num_latents,
hparams.hidden_size])
decoder_input, _, _ = cia.prepare_decoder(x, hparams)
decoder_output = cia.transformer_decoder_layers(
decoder_input,
encoder_output,
hparams.num_latent_layers or hparams.num_hidden_layers,
hparams,
attention_type=hparams.latent_attention_type,
encoder_decoder_attention_bias=ed_attention_bias,
name="decoder")
decoder_output = tf.reshape(decoder_output,
[batch_size,
compressed_img_len**2 * hparams.num_latents,
hparams.hidden_size])
return decoder_output
def bottleneck_layer(inputs,
hparams,
name="discrete_bottleneck"):
"""Computes latents given inputs (typically, compressed targets)."""
[
latents_dense,
latents_discrete,
extra_loss,
embed_fn,
_,
] = hparams.bottleneck(inputs=inputs,
filter_size=hparams.compress_filter_size,
name=name,
mode=hparams.mode)
if DO_SUMMARIES:
tf.summary.histogram("discrete_latents",
tf.reshape(latents_discrete, [-1]))
return latents_dense, latents_discrete, extra_loss, embed_fn
def latent_prediction_model(inputs,
ed_attention_bias,
latents_discrete,
latents_dense,
hparams,
vocab_size=None,
name=None):
"""Transformer-based latent prediction model.
It is an autoregressive decoder over latents_discrete given inputs.
Args:
inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Inputs to
attend to for the decoder on latents.
ed_attention_bias: Tensor which broadcasts with shape [batch,
hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.
latents_discrete: Tensor of shape [batch, length_q, vocab_size].
One-hot latents to compute log-probability of given inputs.
latents_dense: Tensor of shape [batch, length_q, hparams.hidden_size].
length_q is the latent length, which is
height * width * hparams.num_latents / (2**hparams.num_compress_steps).
hparams: HParams.
vocab_size: int or None. If None, it is 2**hparams.bottleneck_bits.
name: string, variable scope.
Returns:
latents_pred: Tensor of shape [batch, length_q, hparams.hidden_size].
latents_pred_loss: Tensor of shape [batch, length_q].
"""
with tf.variable_scope(name, default_name="latent_prediction"):
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
latents_pred = transformer_latent_decoder(tf.stop_gradient(latents_dense),
inputs,
ed_attention_bias,
hparams,
name)
if vocab_size is None:
vocab_size = 2**hparams.bottleneck_bits
if not hparams.soft_em:
# TODO(trandustin): latents_discrete is not one-hot from
# discrete_bottleneck unless hparams.soft_em is True. Refactor.
latents_discrete = tf.one_hot(latents_discrete, depth=vocab_size)
_, latent_pred_loss = ae_latent_softmax(
latents_pred, tf.stop_gradient(latents_discrete), vocab_size, hparams)
return latents_pred, latent_pred_loss
def transformer_autoencoder(inputs,
targets,
target_space,
hparams,
cache=None,
predict_mask=1.0):
"""Auto-encoder using a Transformer decoder and a prior over latent sequences.
Args:
inputs: Tensor of shape [batch, length, 1, hparams.hidden_size] or None.
targets: Tensor of shape [batch, ..., channels]. Ellipses may be 1 or 2
dimensions denoting sequence length.
target_space: int. Used for encoding inputs under a target space id.
hparams: HParams.
cache: Tensor of shape [batch, length] or None.
predict_mask: Tensor masking whether to use gold targets or predictions.
Returns:
decoder_output: Tensor of shape [batch, ..., hparams.hidden_size] presenting
pre-logit activations. After a transformation (`top` in `T2TModel`), it is
used with targets to compute the "training" (reconstruction) loss.
losses: dict of str to Tensors. There are three loss terms: "extra",
"extra_loss", and "latent_pred". The first is hard-coded to 0. The latter
two are Tensors of shape [batch].
cache: Tensor of shape [batch, length], either the same as cache, or newly
computed if the cache input is None.
"""
original_targets_shape = common_layers.shape_list(targets)
batch_size = original_targets_shape[0]
if len(original_targets_shape) == 4:
compress_fn = compress_encoder_2d
decompress_fn = decompress_decoder_2d
else:
compress_fn = compress_encoder_1d
decompress_fn = decompress_decoder_1d
ed_attention_bias = None
if inputs is not None:
inputs, ed_attention_bias = transformer_text_encoder(
inputs, target_space, hparams, name="input_encoder")
losses = {"extra": 0.,
"extra_loss": 0.,
"latent_pred": 0.}
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
targets_compressed = compress_fn(targets, hparams, name="compress")
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
scale = common_layers.inverse_exp_decay(hparams.startup_steps)
else:
scale = 1.0
scale = tf.to_float(tf.less(tf.random_uniform([batch_size]), scale))
latents_dense, latents_discrete, extra_loss, _ = bottleneck_layer(
targets_compressed, hparams)
extra_loss = scale * tf.reduce_mean(extra_loss)
_, latents_pred_loss = latent_prediction_model(
inputs, ed_attention_bias, latents_discrete, latents_dense, hparams,
name="latent_pred")
latent_time = tf.less(hparams.mask_startup_steps,
tf.to_int32(tf.train.get_global_step()))
latents_pred_loss = scale * tf.reduce_mean(latents_pred_loss)
latents_pred_loss *= tf.to_float(latent_time)
# Apply dropout noise for each data point and time step.
latents_dense_shape = common_layers.shape_list(latents_dense)
latents_dense = tf.nn.dropout(
latents_dense,
keep_prob=1 - hparams.latent_dropout,
noise_shape=[latents_dense_shape[0], latents_dense_shape[1], 1])
# TODO(trandustin): Can we combine extra and extra_loss?
losses = {"extra": 0.,
"extra_loss": extra_loss,
"latent_pred": latents_pred_loss}
else:
# Set the latent length, which is num_latents times the number of latent
# pixels. The number of latent pixels is determined by a compression factor
# on the number of image pixels.
latent_len = ((hparams.img_len * hparams.img_len * hparams.num_latents) /
(2**hparams.num_compress_steps))
_, _, _, embed_fn = bottleneck_layer(targets_compressed, hparams)
latents_dense = tf.zeros([batch_size, latent_len, 1, hparams.hidden_size])
if cache is None:
cache = ae_latent_sample_beam(latents_dense,
inputs,
ed_attention_bias,
embed_fn,
hparams)
cache_one_hot = tf.one_hot(cache, depth=2**hparams.bottleneck_bits)
latents_dense = embed_fn(cache_one_hot, hparams.hidden_size)
if len(original_targets_shape) == 4:
compressed_img_len = (hparams.img_len //
2**(hparams.num_compress_steps // 2))
latents_dense = tf.reshape(latents_dense,
[batch_size,
compressed_img_len,
compressed_img_len,
hparams.num_latents * hparams.hidden_size])
latents_dense = decompress_fn(latents_dense, hparams, name="decompress")
latents_dense = tf.reshape(
latents_dense,
[-1, hparams.img_len, hparams.img_len, hparams.hidden_size])
if hparams.use_gold_targets:
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
masking = predict_mask
else:
masking = common_layers.inverse_exp_decay(hparams.mask_startup_steps)
targets, _, _ = cia.maybe_reshape_4d_to_3d(targets)
mask = tf.less(masking,
tf.random_uniform(common_layers.shape_list(targets)[:-1]))
mask = tf.expand_dims(tf.to_float(mask), 2)
latents_dense = mask * targets + (1.0 - mask) * latents_dense
latents_dense = tf.reshape(latents_dense, original_targets_shape)
if hparams.decode_autoregressive:
decoder_output = transformer_image_decoder(
latents_dense, inputs, ed_attention_bias, hparams, name="decoder")
else:
decoder_output = latents_dense
return decoder_output, losses, cache
def iaf_flow(one_hot_assignments,
scale_weights,
scale_bias,
num_codes,
summary=True,
name=None):
"""Performs a single IAF flow using scale and normalization transformations.
Args:
one_hot_assignments: Assignments Tensor with shape [num_samples, batch_size,
latent_size, num_codes].
scale_weights: Tensor corresponding to lower triangular matrix used to
autoregressively generate scale matrix from assignments. To ensure the
lower-triangular matrix has length of latent_size, scale_weights should
be a rank-one tensor with size latent_size * (latent_size + 1) / 2.
scale_bias: Bias tensor to be added to scale tensor, with shape
[latent_size, num_codes]. If scale weights are zero, initialize scale_bias
to be log(exp(1.) / 2. - 1) so initial transformation is identity.
num_codes: Number of codes in codebook.
summary: Whether to save summaries.
name: String used for name scope.
Returns:
flow_output: Transformed one-hot assignments.
inverse_log_det_jacobian: Inverse log deteriminant of Jacobian corresponding
to transformation.
"""
with tf.name_scope(name, default_name="iaf"):
# Pad the one_hot_assignments by zeroing out the first latent dimension and
# shifting the rest down by one (and removing the last dimension).
padded_assignments = tf.pad(
one_hot_assignments, [[0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :-1, :]
scale_bijector = tfp.distributions.bijectors.Affine(
scale_tril=tfp.math.fill_triangular(scale_weights))
scale = scale_bijector.forward(
tf.transpose(padded_assignments, [0, 1, 3, 2]))
# Transpose the bijector output since it performs a batch matmul.
scale = tf.transpose(scale, [0, 1, 3, 2])
scale = tf.nn.softplus(scale)
scale = scale + tf.nn.softplus(scale_bias[tf.newaxis, tf.newaxis, ...])
# Don't need last dimension since the transformation keeps it constant.
scale = scale[..., :-1]
z = one_hot_assignments[..., :-1]
unnormalized_probs = tf.concat([z * scale,
one_hot_assignments[..., -1, tf.newaxis]],
axis=-1)
normalizer = tf.reduce_sum(unnormalized_probs, axis=-1)
flow_output = unnormalized_probs / (normalizer[..., tf.newaxis])
inverse_log_det_jacobian = (-tf.reduce_sum(tf.log(scale), axis=-1)
+ num_codes * tf.log(normalizer))
if summary:
tf.summary.histogram("iaf/scale", tf.reshape(scale, [-1]))
tf.summary.histogram("iaf/inverse_log_det_jacobian",
tf.reshape(inverse_log_det_jacobian, [-1]))
return flow_output, inverse_log_det_jacobian
| 39.677207 | 80 | 0.649743 |
cd36d5703e99df029a12169db4668bf17d868c92 | 2,088 | py | Python | benchmark/openweathermap/thrift/run.py | jviotti/binary-json-size-benchmark | a515dfd05736204fb36d3571a6a6b17e5f6e4916 | [
"Apache-2.0"
] | 2 | 2022-01-14T06:09:26.000Z | 2022-02-04T02:13:03.000Z | benchmark/openweathermap/thrift/run.py | jviotti/binary-json-size-benchmark | a515dfd05736204fb36d3571a6a6b17e5f6e4916 | [
"Apache-2.0"
] | null | null | null | benchmark/openweathermap/thrift/run.py | jviotti/binary-json-size-benchmark | a515dfd05736204fb36d3571a6a6b17e5f6e4916 | [
"Apache-2.0"
] | null | null | null | def encode(json, schema):
payload = schema.Main()
payload.coord = schema.Coord()
payload.coord.lon = json['coord']['lon']
payload.coord.lat = json['coord']['lat']
payload.weather = [ schema.Weather() ]
payload.weather[0].id = json['weather'][0]['id']
payload.weather[0].main = json['weather'][0]['main']
payload.weather[0].description = json['weather'][0]['description']
payload.weather[0].icon = json['weather'][0]['icon']
payload.base = json['base']
payload.main = schema.MainObject()
payload.main.temp = json['main']['temp']
payload.main.feels_like = json['main']['feels_like']
payload.main.temp_min = json['main']['temp_min']
payload.main.temp_max = json['main']['temp_max']
payload.main.pressure = json['main']['pressure']
payload.main.humidity = json['main']['humidity']
payload.visibility = json['visibility']
payload.wind = schema.Wind()
payload.wind.speed = json['wind']['speed']
payload.wind.deg = json['wind']['deg']
payload.clouds = schema.Clouds()
payload.clouds.all = json['clouds']['all']
payload.dt = json['dt']
payload.sys = schema.Sys()
payload.sys.type = json['sys']['type']
payload.sys.id = json['sys']['id']
payload.sys.message = json['sys']['message']
payload.sys.country = json['sys']['country']
payload.sys.sunrise = json['sys']['sunrise']
payload.sys.sunset = json['sys']['sunset']
payload.timezone = json['timezone']
payload.id = json['id']
payload.name = json['name']
payload.cod = json['cod']
return payload
def decode(payload):
return {
'coord': payload.coord.__dict__,
'weather': [
payload.weather[0].__dict__
],
'base': payload.base,
'main': payload.main.__dict__,
'visibility': payload.visibility,
'wind': payload.wind.__dict__,
'clouds': payload.clouds.__dict__,
'dt': payload.dt,
'sys': payload.sys.__dict__,
'timezone': payload.timezone,
'id': payload.id,
'name': payload.name,
'cod': payload.cod
}
| 31.636364 | 70 | 0.613506 |
69e008dddf3b9176d3b4d9ed04bc12d731d81137 | 1,514 | py | Python | application.py | sumandari/gis0 | 2a61d1e7cf506c3f31c49b2d5b075669a88e2248 | [
"MIT"
] | null | null | null | application.py | sumandari/gis0 | 2a61d1e7cf506c3f31c49b2d5b075669a88e2248 | [
"MIT"
] | null | null | null | application.py | sumandari/gis0 | 2a61d1e7cf506c3f31c49b2d5b075669a88e2248 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, url_for, jsonify, request
from pymongo import MongoClient
import datetime
from pprint import pprint
import re
app = Flask(__name__)
client = MongoClient()
db = client.gis0
@app.route('/')
def index():
return render_template("leaflet2.html")
@app.route('/search', methods=['POST'])
def search():
# get data
nomer_pelanggan = request.form.get('nomer_pel')
nama_pelanggan = request.form.get('nama_pel')
jalan_pelanggan = request.form.get('jalan_pel')
print(nomer_pelanggan)
print(type(nomer_pelanggan))
if not nomer_pelanggan:
cari = list(db.pelangganlok.find({
"properties.NAMA": re.compile(nama_pelanggan, re.IGNORECASE),
"properties.ALAMAT_1": re.compile(jalan_pelanggan, re.IGNORECASE)}).sort("properties.NAMA",1))
else:
cari = list(db.pelangganlok.find({
"properties.NO__SAMBUN": nomer_pelanggan,
"properties.NAMA": re.compile(nama_pelanggan, re.IGNORECASE),
"properties.ALAMAT_1": re.compile(jalan_pelanggan, re.IGNORECASE)}).sort("properties.NAMA",1))
if len(cari) == 0:
return jsonify([])
hasil = []
for c in cari:
hasil.append({"nomer": c['properties']['NO__SAMBUN'],
"nama": c['properties']['NAMA'],
"alamat": c['properties']['ALAMAT_1'] ,
"coordinates": c['geometry']['coordinates']})
# print(hasil)
# print(nama_pelanggan)
return jsonify(hasil)
| 33.644444 | 106 | 0.641347 |
39d2e46e36d7d83d004db5a80a84576875227f85 | 2,248 | py | Python | Toolkits/VCS/repology__repology-api/repology/parser/freebsd.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | null | null | null | Toolkits/VCS/repology__repology-api/repology/parser/freebsd.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | null | null | null | Toolkits/VCS/repology__repology-api/repology/parser/freebsd.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | null | null | null | # Copyright (C) 2016-2017 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import sys
from repology.package import Package
from repology.util import GetMaintainers, SplitPackageNameVersion
def SanitizeVersion(version):
origversion = version
pos = version.rfind(',')
if pos != -1:
version = version[0:pos]
pos = version.rfind('_')
if pos != -1:
version = version[0:pos]
if version != origversion:
return version, origversion
else:
return version, None
class FreeBSDIndexParser():
def __init__(self):
pass
def Parse(self, path):
result = []
with open(path, encoding='utf-8') as indexfile:
for line in indexfile:
fields = line.strip().split('|')
if len(fields) != 13:
print('WARNING: package {} skipped, incorrect number of fields in INDEX'.format(fields[0]), file=sys.stderr)
continue
pkg = Package()
pkg.name, version = SplitPackageNameVersion(fields[0])
pkg.version, pkg.origversion = SanitizeVersion(version)
pkg.comment = fields[3]
pkg.maintainers = GetMaintainers(fields[5])
pkg.category = fields[6].split(' ')[0]
if fields[9]:
pkg.homepage = fields[9]
path = fields[1].split('/')
pkg.extrafields['portname'] = path[-1]
pkg.extrafields['origin'] = '/'.join(path[-2:])
result.append(pkg)
return result
| 29.973333 | 128 | 0.612544 |
137973efc174b12637828d333ad0a95ee7a97135 | 30,823 | py | Python | implementation/baseline.py | JoshuaW1990/bus_arrival_prediction | ee7aa06d849d4ea5307b5c650fed6d468d95f02e | [
"MIT"
] | 12 | 2017-02-01T15:09:23.000Z | 2022-01-03T03:27:26.000Z | implementation/baseline.py | JoshuaW1990/bus_arrival_prediction | ee7aa06d849d4ea5307b5c650fed6d468d95f02e | [
"MIT"
] | 1 | 2019-11-28T14:09:09.000Z | 2019-11-28T14:09:09.000Z | implementation/baseline.py | JoshuaW1990/bus_arrival_prediction | ee7aa06d849d4ea5307b5c650fed6d468d95f02e | [
"MIT"
] | 3 | 2018-07-25T15:52:25.000Z | 2021-05-10T12:07:27.000Z | """
Calculate and assess the estimated arrival time with different baseline algorithm
"""
# import modules
import pandas as pd
import os
from datetime import timedelta, datetime
#################################################################################################################
# helper function for api data, segment data, and other calcualtion #
#################################################################################################################
def filter_single_history(single_history, stop_sequence):
"""
Filter the history file with only one day and one stop sequence to remove abnormal record
:param single_history: dataframe for history table with only one day
:param stop_sequence: list of stop id
:return: dataframe for filtered history table
"""
current_history = single_history[
(single_history.next_stop_id.isin(stop_sequence)) & (single_history.dist_along_route > 0)]
if len(current_history) < 3:
return None
tmp_history = pd.DataFrame(columns=current_history.columns)
i = 1
prev_record = current_history.iloc[0]
while i < len(current_history):
next_record = current_history.iloc[i]
prev_distance = float(prev_record.total_distance)
next_distance = float(next_record.total_distance)
while prev_distance >= next_distance:
i += 1
if i == len(current_history):
break
next_record = current_history.iloc[i]
next_distance = float(next_record.total_distance)
tmp_history.loc[len(tmp_history)] = prev_record
prev_record = next_record
i += 1
if float(prev_record.total_distance) > float(tmp_history.iloc[-1].total_distance):
tmp_history.loc[len(tmp_history)] = prev_record
return tmp_history
def calculate_arrival_time(stop_dist, prev_dist, next_dist, prev_timestamp, next_timestamp):
"""
Calculate the arrival time according to the given tuple (prev_dist, next_dist), the current location, the timestamp of the prev location, and the timestamp of the next location
Algorithm:
distance_prev_next = next_dist - prev_dist
distance_prev_stop = stop_distance - prev_dist
ratio = distance_prev_stop / distance_prev_next
duration_prev_next = next_timestamp - prev_timestamp
duration_prev_stop = duration_prev_next * ratio
stop_timestamp = prev_timestamp + duration_prev_stop
return the stop_timestamp
:param stop_dist: the distance of the target stop between the prev and next tuple
:param prev_dist: the distance of the location of the bus at the previous record
:param next_dist: the distance of the location of the bus at the next record
:param prev_timestamp: the timestamp of the bus at the previous record
:param next_timestamp: the timestamp of the bus at the next record
:return result: the timestamp of the bus arrival the target stop
"""
distance_prev_next = next_dist - prev_dist
distance_prev_stop = stop_dist - prev_dist
ratio = float(distance_prev_stop) / float(distance_prev_next)
duration_prev_next = next_timestamp - prev_timestamp
duration_prev_stop = ratio * duration_prev_next.total_seconds()
duration_prev_stop = timedelta(0, duration_prev_stop)
stop_timestamp = prev_timestamp + duration_prev_stop
return stop_timestamp
def calculate_arrival_distance(time_of_day, prev_dist, next_dist, prev_timestamp, next_timestamp):
"""
calculate arrival distance according to the given input: time_of_day, prev_dist, next_dist, prev_timestamp, next_timestamp
Algorithm:
distance_prev_next = next_dist - prev_dist
duration_prev_next = next_timestamp - prev_timestamp
duration_prev_time = time_of_day - prev_timestamp
ratio = duration_prev_time / duration_prev_next
distance_prev_time = distance_prev_next * ratio
dist_along_route = distance_prev_time + prev_dist
return the dist_along_route
:param time_of_day: the given time for calculating the dist_along_route
:param prev_dist: the distance of the location of the bus for the previous record in historical data
:param next_dist: the distance of the location of the bus for the next record in historical data
:param prev_timestamp: the timestamp of the bus for the previous record in historical data
:param next_timestamp: the timestamp of the bus for the next record in historical data
:return result: dist_along_route for the bus at the given time_of_day
"""
duration_prev_next = next_timestamp - prev_timestamp
duration_prev_time = time_of_day - prev_timestamp
duration_prev_next = duration_prev_next.total_seconds()
duration_prev_time = duration_prev_time.total_seconds()
ratio = duration_prev_time / duration_prev_next
distance_prev_next = next_dist - prev_dist
distance_prev_time = distance_prev_next * ratio
dist_along_route = prev_dist + distance_prev_time
return dist_along_route
def calculate_time_from_stop(segment_df, dist_along_route, prev_record, next_record):
"""
Calculate the time from stop within the tuple (prev_record, next_record)
Algorithm:
if prev_record = next_record:
the bus is parking at the stop, return 0
Calcualte the distance within the tuple
Calculate the distance between the current location and the prev record
Calcualte the ratio of these two distances
Use the ratio to calcualte the time_from_stop
:param segment_df: dataframe for the preprocessed segment data
:param dist_along_route: distance between the intial stop and the current location of the bus
:param prev_record: single record of the route_stop_dist.csv file
:param next_record: single record of the route_stop_dist.csv file
:return: total seconds of the time_from_stop
"""
if prev_record.get('stop_id') == next_record.get('stop_id'):
return 0.0
distance_stop_stop = next_record.get('dist_along_route') - prev_record.get('dist_along_route')
distance_bus_stop = next_record.get('dist_along_route') - dist_along_route
ratio = float(distance_bus_stop) / float(distance_stop_stop)
assert ratio < 1
try:
travel_duration = segment_df[(segment_df.segment_start == prev_record.get('stop_id')) & (
segment_df.segment_end == next_record.get('stop_id'))].iloc[0]['travel_duration']
except:
travel_duration = segment_df['travel_duration'].mean()
time_from_stop = travel_duration * ratio
return time_from_stop
#################################################################################################################
# estimator functions #
#################################################################################################################
def generate_estimated_arrival_time(api_data, preprocessed_segment_data, route_stop_dist, trips):
"""
Predict the estimated arrival time according to the api data
Algorithm:
Build the empty dataframe
for row in api_data:
get the route_id according to the trip id and the trips.txt file
get the stop sequence and the dist_along_route according to the route id
get the end_index according to the stop id
get the (prev, next) stop tuple according to the dist_along_route in the record
get the count = end_index - next_index
if count < 0:
the bus has passed
continue to next row
if count = 0, the next stop is the target stop:
calcualte the time_from_stop for (prev, next) tuple
save the result as the estimated time
if count > 0:
get the stop list from next_stop to target_stop
sum the travel duration for all of them
calculate the time_from_stop for (prev, next) tuple
add the total travel duration with the time_from_stop(prev, next)
save the result as the estimated time
save the result into the dataframe
:param api_data: dataframe for the api_data.csv
:param preprocessed_segment_data: dataframe for the preprocessed final_segment.csv file according to different baseline algorithm
:param route_stop_dist: dataframe of the route_stop_dist.csv file
:param trips: dataframe for the trips.txt file
:return: dataframe to store the result including the esitmated arrival time
"""
result = pd.DataFrame(
columns=['trip_id', 'route_id', 'stop_id', 'vehicle_id', 'time_of_day', 'service_date', 'dist_along_route',
'stop_num_from_call', 'estimated_arrival_time', 'shape_id'])
print "length of the api data is: ", len(api_data)
average_travel_duration = preprocessed_segment_data['travel_duration'].mean()
for i in xrange(len(api_data)):
if i % 1000 == 0:
print i
item = api_data.iloc[i]
trip_id = item.get('trip_id')
route_id = trips[trips.trip_id == trip_id].iloc[0].route_id
single_route_stop_dist = route_stop_dist[route_stop_dist.route_id == route_id]
stop_sequence = list(single_route_stop_dist.stop_id)
target_stop = item.get('stop_id')
target_index = stop_sequence.index(target_stop)
dist_along_route = item.get('dist_along_route')
vehicle_id = item.get('vehicle_id')
time_of_day = item.get('time_of_day')
service_date = item.get('date')
shape_id = item.get('shape_id')
if dist_along_route >= single_route_stop_dist.iloc[-1].dist_along_route:
continue
for j in range(1, len(stop_sequence)):
if single_route_stop_dist.iloc[j - 1].dist_along_route < dist_along_route < single_route_stop_dist.iloc[j].dist_along_route:
prev_record = single_route_stop_dist.iloc[j - 1]
next_record = single_route_stop_dist.iloc[j]
break
elif single_route_stop_dist.iloc[j - 1].dist_along_route == dist_along_route:
prev_record = single_route_stop_dist.iloc[j - 1]
next_record = prev_record
break
else:
continue
next_index = stop_sequence.index(next_record.get('stop_id'))
count = target_index - next_index
if count < 0:
continue
elif count == 0:
total_travel_duration = calculate_time_from_stop(preprocessed_segment_data, dist_along_route, prev_record,
next_record)
else:
total_travel_duration = 0.0
for j in xrange(next_index, target_index):
segment_start = stop_sequence[j]
segment_end = stop_sequence[j + 1]
segment_record = preprocessed_segment_data[
(preprocessed_segment_data.segment_start == segment_start) & (
preprocessed_segment_data.segment_end == segment_end)]
if len(segment_record) == 0:
single_travel_duration = average_travel_duration
else:
single_travel_duration = segment_record.iloc[0]['travel_duration']
total_travel_duration += single_travel_duration
time_from_stop = calculate_time_from_stop(preprocessed_segment_data, dist_along_route, prev_record,
next_record)
total_travel_duration += time_from_stop
result.loc[len(result)] = [trip_id, route_id, target_stop, vehicle_id, time_of_day, service_date,
dist_along_route, count + 1, total_travel_duration, shape_id]
return result
def generate_estimated_arrival_time_baseline3(api_data, full_segment_data, route_stop_dist, trips):
"""
Calculate the estimated arrival time based on the baseline 3. Use the segment data according to the specific trip and the date to predict the result
Algorithm
For row in api data:
extract the trip id and the service date of that row
extract the single segment data according to the trip id and the service date
divide the single segment data through groupby(segment start, segment end)
calculate the average value for each one in the groupby function
calculate the estimated arrival time for this row according to the result
save the result
concatenate the result
:param api_data: dataframe for the api_data.csv
:param segment_data: dataframe for the preprocessed final_segment.csv file according to different baseline algorithm
:param route_stop_dist: dataframe of the route_stop_dist.csv file
:param trips: dataframe for the trips.txt file
:return: dataframe to store the result including the esitmated arrival time
"""
def helper(preprocessed_segment_data, average_travel_duration, dist_along_route, prev_record, next_record):
segment_start = prev_record.get('stop_id')
segment_end = next_record.get('stop_id')
if segment_start == segment_end:
return 0.0
distance_stop_stop = next_record.get('dist_along_route') - prev_record.get('dist_along_route')
distance_bus_stop = next_record.get('dist_along_route') - dist_along_route
ratio = float(distance_bus_stop) / float(distance_stop_stop)
assert ratio < 1
travel_duration = preprocessed_segment_data.get((segment_start, segment_end), average_travel_duration)
time_from_stop = travel_duration * ratio
return time_from_stop
result = pd.DataFrame(
columns=['trip_id', 'route_id', 'stop_id', 'vehicle_id', 'time_of_day', 'service_date', 'dist_along_route',
'stop_num_from_call', 'estimated_arrival_time', 'shape_id'])
for i in xrange(len(api_data)):
item = api_data.iloc[i]
trip_id = item.get('trip_id')
shape_id = item.get('shape_id')
route_id = trips[trips.trip_id == trip_id].iloc[0].route_id
single_route_stop_dist = route_stop_dist[route_stop_dist.shape_id == shape_id]
stop_sequence = list(single_route_stop_dist.stop_id)
target_stop = item.get('stop_id')
target_index = stop_sequence.index(target_stop)
dist_along_route = item.get('dist_along_route')
vehicle_id = item.get('vehicle_id')
time_of_day = item.get('time_of_day')
service_date = item.get('date')
# preprocess the segment data according to the trip id and the service date
segment_data = full_segment_data[(full_segment_data.service_date != service_date) | (full_segment_data.trip_id != trip_id)]
trip_list = set(trips[trips.shape_id == shape_id].trip_id)
single_segment_data = segment_data[(segment_data.trip_id.isin(trip_list))]
grouped = single_segment_data.groupby(['segment_start', 'segment_end'])
preprocessed_segment_data = grouped['travel_duration'].mean()
average_travel_duration = single_segment_data['travel_duration'].mean()
# find the segment containing the current location of the api data
prev_route_stop_dist = single_route_stop_dist[single_route_stop_dist.dist_along_route < dist_along_route]
next_route_stop_dist = single_route_stop_dist[single_route_stop_dist.dist_along_route >= dist_along_route]
if len(prev_route_stop_dist) == 0 or len(next_route_stop_dist) == 0:
continue
next_index = len(prev_route_stop_dist)
count = target_index - next_index
# check how many stops between the current location and the target stop
prev_record = prev_route_stop_dist.iloc[-1]
next_record = next_route_stop_dist.iloc[0]
if count < 0:
continue
elif count == 0:
total_travel_duration = helper(preprocessed_segment_data, average_travel_duration, dist_along_route, prev_record, next_record)
else:
total_travel_duration = 0.0
for j in xrange(next_index, target_index):
segment_start = stop_sequence[j]
segment_end = stop_sequence[j + 1]
single_travel_duration = preprocessed_segment_data.get((segment_start, segment_end), average_travel_duration)
total_travel_duration += single_travel_duration
time_from_stop = helper(preprocessed_segment_data, average_travel_duration, dist_along_route, prev_record, next_record)
total_travel_duration += time_from_stop
result.loc[len(result)] = [trip_id, route_id, target_stop, vehicle_id, time_of_day, service_date,
dist_along_route, count + 1, total_travel_duration, shape_id]
return result
def generate_actual_arrival_time(full_history, segment_df, route_stop_dist):
"""
Calculate the actual arrival time from the dataset
Algorithm:
Build the empty dataframe
for row in segment_df:
get trip_id, route_id, target_stop, service_date, etc
get single_history data according to the trip id and the service date
get single_route_stop_dist according to the route_id
get stop_sequence from single_route_stop_dist
get the dist_along_route for the target_stop from the single_route_stop_dist
set prev_index, next_index = stop_sequence.index(target_stop)
while stop_sequence(prev_index) not in set(single_history.next_stop_id):
prev_index -= 1
if prev_index == -1:
break
if prev_index == -1:
continue
prev_stop = stop_sequence(prev_index)
next_index += 1
while stop_sequence(next_index) not in set(single_history.next_stop_id):
next_index += 1
if next_index == len(stop_sequence):
break
if next_index == len(stop_sequence):
continue
next_stop = stop_sequence(next_itndex)
prev_record = single_history[single_history.next_stop_id == prev_stop].iloc[-1]
prev_time = prev_record.get('timestamp')
if prev_record.dist_from_stop == 0:
actual_arrival_time = prev_time - time_of_day
save the record
continue to next row
next_record = single_history[single_history.next_stop_id == next_stop].iloc[-1]
next_time = next_record.get('timestamp')
travel_duration = next_time - prev_time
prev_distance = prev_record.get('dist_along_route') - prev_record.get('dist_from_stop')
next_distance = prev_record.get('dist_along_route') - prev_record.get('dist_from_stop')
distance_prev_next = next_distance - prev_distance
distance_prev_stop = single_route_stop_dist[single_route_stop_dist.stop_id == target_stop].iloc[0]['dist_along_route'] - prev_distance
ratio = distance_prev_stop / distance_prev_next
time_from_stop = ratio * travel_duration
arrival_time = time_from_stop + prev_time
actual_arrival_time = arrival_time - time_of_day
save the record
:param full_history: dataframe for the historical data
:param segment_df: dataframe for the preprocessed average travel duration for the segmet data
:param route_stop_dist: dataframe for the route_stop_dist.csv file
:return: dataframe including both of the estimated arrival time and actual arrival time
"""
result = pd.DataFrame(
columns=['trip_id', 'route_id', 'stop_id', 'vehicle_id', 'time_of_day', 'service_date', 'dist_along_route', 'stop_num_from_call', 'estimated_arrival_time', 'actual_arrival_time', 'shape_id'])
grouped_list = list(segment_df.groupby(['service_date', 'trip_id', 'stop_id']))
print 'length of the segment_df is: ', len(grouped_list)
for i in xrange(len(grouped_list)):
if i % 1000 == 0:
print i
name, item = grouped_list[i]
service_date, trip_id, target_stop = name
route_id = item.iloc[0]['route_id']
shape_id = item.iloc[0]['shape_id']
single_route_stop_dist = route_stop_dist[route_stop_dist.shape_id == shape_id]
stop_sequence = list(single_route_stop_dist.stop_id)
# stop_sequence_str = [str(int(stop_id)) for stop_id in stop_sequence]
target_index = stop_sequence.index(target_stop)
dist_along_route = single_route_stop_dist[single_route_stop_dist.stop_id == target_stop].iloc[0][
'dist_along_route']
vehicle_id = item.iloc[0]['vehicle_id']
single_history = full_history[(full_history.service_date == service_date) & (full_history.trip_id == trip_id)]
single_history = filter_single_history(single_history, stop_sequence)
if single_history is None:
continue
prev_index, next_index = target_index, target_index + 1
while stop_sequence[prev_index] not in set(single_history.next_stop_id):
prev_index -= 1
if prev_index == -1:
break
if prev_index == -1:
continue
prev_stop = stop_sequence[prev_index]
while stop_sequence[next_index] not in set(single_history.next_stop_id):
next_index += 1
if next_index == len(stop_sequence):
break
if next_index == len(stop_sequence):
continue
next_stop = stop_sequence[next_index]
if prev_stop == next_stop:
print "error"
continue
prev_record = single_history[single_history.next_stop_id == prev_stop].iloc[-1]
prev_time = prev_record.get('timestamp')
prev_time = datetime.strptime(prev_time, '%Y-%m-%dT%H:%M:%SZ')
if prev_record.dist_from_stop == 0 and prev_record.next_stop_id == target_stop:
timestamp = prev_time
else:
next_record = single_history[single_history.next_stop_id == next_stop].iloc[-1]
next_time = next_record.get('timestamp')
next_time = datetime.strptime(next_time, '%Y-%m-%dT%H:%M:%SZ')
prev_distance = float(prev_record.get('total_distance'))
next_distance = float(next_record.get('total_distance'))
timestamp = calculate_arrival_time(dist_along_route, prev_distance, next_distance, prev_time, next_time)
for j in xrange(len(item)):
single_record = item.iloc[j]
time_of_day = single_record.get('time_of_day')
stop_num_from_call = single_record.get('stop_num_from_call')
estimated_arrival_time = single_record.get('estimated_arrival_time')
time_of_day = datetime.strptime(time_of_day, '%Y-%m-%d %H:%M:%S')
actual_arrival_time = timestamp - time_of_day
actual_arrival_time = actual_arrival_time.total_seconds()
dist_along_route = single_record.get('dist_along_route')
result.loc[len(result)] = [trip_id, route_id, target_stop, vehicle_id, str(time_of_day), service_date, dist_along_route, stop_num_from_call, estimated_arrival_time, actual_arrival_time, shape_id]
return result
#################################################################################################################
# preprocess for baseline algorithm #
#################################################################################################################
def preprocess_baseline1(segment_df):
"""
preprocession for the simplest baseline: not considering the weather and the time
Algorithm:
Read the database
Group the dataframe according to the segment start and the segment end
For each item in the grouped list:
obtain the name and the sub dataframe
check whether the segment_start and the segment_end is the same (we need to fix this bug later when retrieving the segment data)
Calculate the average travel duration
save the record into the new dataframe
:param segment_df:
:return: the preprocessed segment dataframe
"""
grouped = segment_df.groupby(['segment_start', 'segment_end'])
result = grouped['travel_duration'].mean()
result = result.reset_index()
return result
def preprocess_baseline2(segment_df, rush_hour):
"""
Preprocess the segment data considering the weather and the rush hour
Algorithm:
Preprocess segment_df to add a new column of rush hour
split the dataframe with groupby(segment_start, segment_end, weather, rush_hour)
Define the new dataframe
For name, item in grouped:
calcualte the average travel duration
save the record into the new dataframe
:param segment_df: dataframe after adding the rush hour from final_segment.csv file
:param rush_hour: tuple to express which is the rush hour, example: ('17:00:00', '20:00:00')
:return: dataframe for the baseline2
"""
# Preprocess segment_df to add a new column of rush hour
rush_hour_column = segment_df['timestamp'].apply(lambda x: x[11:19] < rush_hour[1] and x[11:19] > rush_hour[0])
new_segment_df = segment_df
new_segment_df['rush_hour'] = rush_hour_column
grouped = new_segment_df.groupby(['segment_start', 'segment_end', 'weather', 'rush_hour'])
result = grouped['travel_duration'].mean()
result = result.reset_index()
return result
#################################################################################################################
# main functions #
#################################################################################################################
"""
Functions for users
"""
# baseline1
def obtain_baseline1(segment_df, api_data, route_stop_dist, trips, full_history, tablename=None, save_path=None, engine=None):
"""
Generate the predicted arrival time and actual arrival time with baseline1
:param segment_df: the dataframe for the segment table
:param api_data: the dataframe for the api_data table
:param route_stop_dist: the dataframe for the route_stop_dist table
:param trips: the dataframe for the trips table
:param full_history: the dataframe for the history table
:param save_path: path of a csv file to store the baseline1 result
:param engine: database connect engine
:return: the dataframe for baseline1 result
"""
preprocessed_segment_data = preprocess_baseline1(segment_df)
segment_df = generate_estimated_arrival_time(api_data, preprocessed_segment_data, route_stop_dist, trips)
baseline_result = generate_actual_arrival_time(full_history, segment_df, route_stop_dist)
if save_path is not None:
if not os.path.exists(save_path):
os.mkdir(save_path)
baseline_result.to_csv(save_path + tablename + '.csv')
if engine is not None:
baseline_result.to_sql(name=tablename, con=engine, if_exists='replace', index_label='id')
return baseline_result
# baseline2
def obtain_baseline2(segment_df, api_data, route_stop_dist, trips, full_history, rush_hour, weather_df, tablename=None, save_path=None, engine=None):
"""
Generate the predicted arrival time and actual arrival time with baseline2
:param segment_df: the dataframe for the segment table
:param api_data: the dataframe for the api_data table
:param route_stop_dist: the dataframe for the route_stop_dist table
:param trips: the dataframe for the trips table
:param full_history: the dataframe for the history table
:param rush_hour: tuple of string to represent the rush hour, example: ('17:00:00', '20:00:00')
:param save_path: path of a csv file to store the baseline1 result
:param engine: database connect engine
:return: the dataframe for baseline2 result
"""
preprocessed_segment_data = preprocess_baseline2(segment_df, rush_hour)
api_data['rush_hour'] = api_data['time_of_day'].apply(lambda x: rush_hour[1] > x[11:19] > rush_hour[0])
grouped_segment_df = preprocessed_segment_data.groupby(['weather', 'rush_hour'])
keys = grouped_segment_df.groups.keys()
grouped_api_data = api_data.groupby(['date', 'rush_hour'])
test_date_list = sorted(list(set(api_data['date'])))
estimated_result_list = []
for current_date in test_date_list:
weather = weather_df[weather_df.date == current_date].iloc[0]['weather']
# rush hour
if (weather, True) in keys:
current_result = generate_estimated_arrival_time(grouped_api_data.get_group((current_date, True)), grouped_segment_df.get_group((weather, True)), route_stop_dist, trips)
estimated_result_list.append(current_result)
# non rush hour
if (weather, False) in keys:
current_result = generate_estimated_arrival_time(grouped_api_data.get_group((current_date, False)), grouped_segment_df.get_group((weather, False)), route_stop_dist, trips)
estimated_result_list.append(current_result)
segment_df = pd.concat(estimated_result_list, ignore_index=True)
baseline_result = generate_actual_arrival_time(full_history, segment_df, route_stop_dist)
if save_path is not None:
if not os.path.exists(save_path):
os.mkdir(save_path)
baseline_result.to_csv(save_path + tablename + '.csv')
if engine is not None:
baseline_result.to_sql(name=tablename, con=engine, if_exists='replace', index_label='id')
return baseline_result
# baseline3
def obtain_baseline3(segment_df, api_data, route_stop_dist, trips, full_history, tablename=None, save_path=None, engine=None):
"""
Generate the predicted arrival time and actual arrival time with baseline3
:param segment_df: the dataframe for the segment table
:param api_data: the dataframe for the api_data table
:param route_stop_dist: the dataframe for the route_stop_dist table
:param trips: the dataframe for the trips table
:param full_history: the dataframe for the history table
:param save_path: path of a csv file to store the baseline1 result
:param engine: database connect engine
:return: the dataframe for baseline3 result
"""
segment_df = generate_estimated_arrival_time_baseline3(api_data, segment_df, route_stop_dist, trips)
baseline_result = generate_actual_arrival_time(full_history, segment_df, route_stop_dist)
if save_path is not None:
if not os.path.exists(save_path):
os.mkdir(save_path)
baseline_result.to_csv(save_path + tablename + '.csv')
if engine is not None:
baseline_result.to_sql(name=tablename, con=engine, if_exists='replace', index_label='id')
return baseline_result
| 51.200997 | 207 | 0.677838 |
011e9c301cc6770e6993ce5dbd8345fccf99482a | 213 | py | Python | Python/100Excersises/76 to 100/85/.history/85_20201119144236.py | magusikrak/NAMI-TERM-I-GroupWork | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | null | null | null | Python/100Excersises/76 to 100/85/.history/85_20201119144236.py | magusikrak/NAMI-TERM-I-GroupWork | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | 1 | 2021-07-24T03:18:30.000Z | 2021-07-24T12:45:07.000Z | Python/100Excersises/76 to 100/85/.history/85_20201119144236.py | sugamkarki/NAMI-Year-II-TERM-I-Group_Project | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | null | null | null | myFile=open("c.txt","r")
countriesRaw=myFile.read()
countriesNeat=''
countriesRaw.split()
list1=list(map(list,countriesRaw))
# print(countriesRaw)
print(type(list1))
for country in countriesRaw:
print(country) | 23.666667 | 34 | 0.760563 |
ec2173f389509fb8f8ae98e5bcd56ad6546a29a1 | 10,844 | py | Python | models/dataloader/transform/wav.py | ZhangYikaii/Proto-CAT | 57bb2c7fd88a9489faa88e3b904218bf5fb01b4e | [
"MIT"
] | 2 | 2021-10-03T02:31:22.000Z | 2021-10-09T01:31:50.000Z | models/dataloader/transform/wav.py | ZhangYikaii/Proto-CAT | 57bb2c7fd88a9489faa88e3b904218bf5fb01b4e | [
"MIT"
] | null | null | null | models/dataloader/transform/wav.py | ZhangYikaii/Proto-CAT | 57bb2c7fd88a9489faa88e3b904218bf5fb01b4e | [
"MIT"
] | null | null | null | # import numpy as np
# import torch
# import torchvision as tv
# import librosa
# import random
# def scale(old_value, old_min, old_max, new_min, new_max):
# old_range = (old_max - old_min)
# new_range = (new_max - new_min)
# new_value = (((old_value - old_min) * new_range) / old_range) + new_min
# return new_value
# class ToTensor1D(tv.transforms.ToTensor):
# def __call__(self, tensor: np.ndarray):
# tensor_2d = super(ToTensor1D, self).__call__(tensor[..., np.newaxis])
# return tensor_2d.squeeze_(0)
# class RandomNoise():
# def __init__(self, min_noise=0.0, max_noise=0.05): #0.002, 0.01
# super(RandomNoise, self).__init__()
# self.min_noise = min_noise
# self.max_noise = max_noise
# def addNoise(self, wave):
# noise_val = random.uniform(self.min_noise, self.max_noise)
# noise = torch.from_numpy(np.random.normal(0, noise_val, wave.shape[0]))
# noisy_wave = wave + noise
# return noisy_wave
# def __call__(self, x):
# return self.addNoise(x)
# class RandomScale():
# def __init__(self, max_scale: float = 1.25):
# super(RandomScale, self).__init__()
# self.max_scale = max_scale
# @staticmethod
# def random_scale(max_scale: float, signal: torch.Tensor) -> torch.Tensor:
# scaling = np.power(max_scale, np.random.uniform(-1, 1)) #between 1.25**(-1) and 1.25**(1)
# output_size = int(signal.shape[-1] * scaling)
# ref = torch.arange(output_size, device=signal.device, dtype=signal.dtype).div_(scaling)
# # ref1 is of size output_size
# ref1 = ref.clone().type(torch.int64)
# ref2 = torch.min(ref1 + 1, torch.full_like(ref1, signal.shape[-1] - 1, dtype=torch.int64))
# r = ref - ref1.type(ref.type())
# scaled_signal = signal[..., ref1] * (1 - r) + signal[..., ref2] * r
# return scaled_signal
# def __call__(self, x: torch.Tensor) -> torch.Tensor:
# return self.random_scale(self.max_scale, x)
# class RandomCrop():
# def __init__(self, out_len: int = 44100, train: bool = True):
# super(RandomCrop, self).__init__()
# self.out_len = out_len
# self.train = train
# def random_crop(self, signal: torch.Tensor) -> torch.Tensor:
# if self.train:
# left = np.random.randint(0, signal.shape[-1] - self.out_len)
# else:
# left = int(round(0.5 * (signal.shape[-1] - self.out_len)))
# orig_std = signal.float().std() * 0.5
# output = signal[..., left:left + self.out_len]
# out_std = output.float().std()
# if out_std < orig_std:
# output = signal[..., :self.out_len]
# new_out_std = output.float().std()
# if orig_std > new_out_std > out_std:
# output = signal[..., -self.out_len:]
# return output
# def __call__(self, x: torch.Tensor) -> torch.Tensor:
# return self.random_crop(x) if x.shape[-1] > self.out_len else x
# class RandomPadding():
# def __init__(self, out_len: int = 88200, train: bool = True):
# super(RandomPadding, self).__init__()
# self.out_len = out_len
# self.train = train
# def random_pad(self, signal: torch.Tensor) -> torch.Tensor:
# if self.train:
# left = np.random.randint(0, self.out_len - signal.shape[-1])
# else:
# left = int(round(0.5 * (self.out_len - signal.shape[-1])))
# right = self.out_len - (left + signal.shape[-1])
# pad_value_left = signal[..., 0].float().mean().to(signal.dtype)
# pad_value_right = signal[..., -1].float().mean().to(signal.dtype)
# output = torch.cat((
# torch.zeros(signal.shape[:-1] + (left,), dtype=signal.dtype, device=signal.device).fill_(pad_value_left),
# signal,
# torch.zeros(signal.shape[:-1] + (right,), dtype=signal.dtype, device=signal.device).fill_(pad_value_right)
# ), dim=-1)
# return output
# def __call__(self, x: torch.Tensor) -> torch.Tensor:
# return self.random_pad(x) if x.shape[-1] < self.out_len else x
# class FrequencyMask():
# def __init__(self, max_width, numbers):
# super(FrequencyMask, self).__init__()
# self.max_width = max_width
# self.numbers = numbers
# def addFreqMask(self, wave):
# #print(wave.shape)
# for _ in range(self.numbers):
# #choose the length of mask
# mask_len = random.randint(0, self.max_width)
# start = random.randint(0, wave.shape[1] - mask_len) #start of the mask
# end = start + mask_len
# wave[:, start:end, : ] = 0
# return wave
# def __call__(self, wave):
# return self.addFreqMask(wave)
# class TimeMask():
# def __init__(self, max_width, numbers):
# super(TimeMask, self).__init__()
# self.max_width = max_width
# self.numbers = numbers
# def addTimeMask(self, wave):
# for _ in range(self.numbers):
# #choose the length of mask
# mask_len = random.randint(0, self.max_width)
# start = random.randint(0, wave.shape[2] - mask_len) #start of the mask
# end = start + mask_len
# wave[ : , : , start:end] = 0
# return wave
# def __call__(self, wave):
# return self.addTimeMask(wave)
# for LRW:
import cv2
import random
import numpy as np
__all__ = ['Compose', 'Normalize', 'CenterCrop', 'RgbToGray', 'RandomCrop',
'HorizontalFlip', 'AddNoise', 'NormalizeUtterance']
class Compose(object):
"""Compose several preprocess together.
Args:
preprocess (list of ``Preprocess`` objects): list of preprocess to compose.
"""
def __init__(self, preprocess):
self.preprocess = preprocess
def __call__(self, sample):
for t in self.preprocess:
sample = t(sample)
return sample
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.preprocess:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RgbToGray(object):
"""Convert image to grayscale.
Converts a numpy.ndarray (H x W x C) in the range
[0, 255] to a numpy.ndarray of shape (H x W x C) in the range [0.0, 1.0].
"""
def __call__(self, frames):
"""
Args:
img (numpy.ndarray): Image to be converted to gray.
Returns:
numpy.ndarray: grey image
"""
frames = np.stack([cv2.cvtColor(_, cv2.COLOR_RGB2GRAY) for _ in frames], axis=0)
return frames
def __repr__(self):
return self.__class__.__name__ + '()'
class Normalize(object):
"""Normalize a ndarray image with mean and standard deviation.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, frames):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
frames = (frames - self.mean) / self.std
return frames
def __repr__(self):
return self.__class__.__name__+'(mean={0}, std={1})'.format(self.mean, self.std)
class CenterCrop(object):
"""Crop the given image at the center
"""
def __init__(self, size):
self.size = size
def __call__(self, frames):
"""
Args:
img (numpy.ndarray): Images to be cropped.
Returns:
numpy.ndarray: Cropped image.
"""
t, h, w = frames.shape
th, tw = self.size
delta_w = int(round((w - tw))/2.)
delta_h = int(round((h - th))/2.)
frames = frames[:, delta_h:delta_h+th, delta_w:delta_w+tw]
return frames
class RandomCrop(object):
"""Crop the given image at the center
"""
def __init__(self, size):
self.size = size
def __call__(self, frames):
"""
Args:
img (numpy.ndarray): Images to be cropped.
Returns:
numpy.ndarray: Cropped image.
"""
t, h, w = frames.shape
th, tw = self.size
delta_w = random.randint(0, w-tw)
delta_h = random.randint(0, h-th)
frames = frames[:, delta_h:delta_h+th, delta_w:delta_w+tw]
return frames
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class HorizontalFlip(object):
"""Flip image horizontally.
"""
def __init__(self, flip_ratio):
self.flip_ratio = flip_ratio
def __call__(self, frames):
"""
Args:
img (numpy.ndarray): Images to be flipped with a probability flip_ratio
Returns:
numpy.ndarray: Cropped image.
"""
t, h, w = frames.shape
if random.random() < self.flip_ratio:
for index in range(t):
frames[index] = cv2.flip(frames[index], 1)
return frames
class NormalizeUtterance():
"""Normalize per raw audio by removing the mean and divided by the standard deviation
"""
def __call__(self, signal):
signal_std = 0. if np.std(signal)==0. else np.std(signal)
signal_mean = np.mean(signal)
return (signal - signal_mean) / signal_std
class AddNoise(object):
"""Add SNR noise [-1, 1]
"""
def __init__(self, noise, snr_levels=[-5, 0, 5, 10, 15, 20, 9999]):
assert noise.dtype in [np.float32, np.float64], "noise only supports float data type"
self.noise = noise
self.snr_levels = snr_levels
def get_power(self, clip):
clip2 = clip.copy()
clip2 = clip2 **2
return np.sum(clip2) / (len(clip2) * 1.0)
def __call__(self, signal):
assert signal.dtype in [np.float32, np.float64], "signal only supports float32 data type"
snr_target = random.choice(self.snr_levels)
if snr_target == 9999:
return signal
else:
# -- get noise
start_idx = random.randint(0, len(self.noise)-len(signal))
noise_clip = self.noise[start_idx:start_idx+len(signal)]
sig_power = self.get_power(signal)
noise_clip_power = self.get_power(noise_clip)
factor = (sig_power / noise_clip_power ) / (10**(snr_target / 10.0))
desired_signal = (signal + noise_clip*np.sqrt(factor)).astype(np.float32)
return desired_signal
| 30.038781 | 120 | 0.573312 |
002328c71992cb6f88a78aacd6d4ce3836f825ab | 912 | py | Python | BayesMadeSimple/dice_soln.py | sunny2309/scipy_conf_notebooks | 30a85d5137db95e01461ad21519bc1bdf294044b | [
"MIT"
] | 2 | 2021-01-09T15:57:26.000Z | 2021-11-29T01:44:21.000Z | BayesMadeSimple/dice_soln.py | sunny2309/scipy_conf_notebooks | 30a85d5137db95e01461ad21519bc1bdf294044b | [
"MIT"
] | 5 | 2019-11-15T02:00:26.000Z | 2021-01-06T04:26:40.000Z | BayesMadeSimple/dice_soln.py | sunny2309/scipy_conf_notebooks | 30a85d5137db95e01461ad21519bc1bdf294044b | [
"MIT"
] | null | null | null | """This file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
from thinkbayes import Suite
class Dice(Suite):
"""Represents hypotheses about which die was rolled."""
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: integer number of sides on the die
data: integer die roll
"""
if hypo < data:
return 0
else:
return 1.0/hypo
def main():
suite = Dice([4, 6, 8, 12, 20])
suite.Update(6)
print('After one 6')
suite.Print()
for roll in [8, 7, 7, 5, 4]:
suite.Update(roll)
print('After more rolls')
suite.Print()
if __name__ == '__main__':
main()
| 20.727273 | 68 | 0.623904 |
2c1727b6b32a8cd31b8a4f335cf3ac62c55c77fe | 71,850 | py | Python | mesonbuild/mesonlib.py | MisterDA/meson | e6a167ce092a36017a4ff56b3fb045be62377a25 | [
"Apache-2.0"
] | null | null | null | mesonbuild/mesonlib.py | MisterDA/meson | e6a167ce092a36017a4ff56b3fb045be62377a25 | [
"Apache-2.0"
] | null | null | null | mesonbuild/mesonlib.py | MisterDA/meson | e6a167ce092a36017a4ff56b3fb045be62377a25 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012-2020 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of random helper functionality."""
from pathlib import Path
import enum
import sys
import stat
import time
import platform, subprocess, operator, os, shlex, shutil, re
import collections
from functools import lru_cache, wraps, total_ordering
from itertools import tee, filterfalse
from tempfile import TemporaryDirectory
import typing as T
import uuid
import textwrap
from mesonbuild import mlog
if T.TYPE_CHECKING:
from .build import ConfigurationData
from .coredata import KeyedOptionDictType, UserOption
from .compilers.compilers import CompilerType
from .interpreterbase import ObjectHolder
FileOrString = T.Union['File', str]
_T = T.TypeVar('_T')
_U = T.TypeVar('_U')
have_fcntl = False
have_msvcrt = False
# TODO: this is such a hack, this really should be either in coredata or in the
# interpreter
# {subproject: project_meson_version}
project_meson_versions = collections.defaultdict(str) # type: T.DefaultDict[str, str]
try:
import fcntl
have_fcntl = True
except Exception:
pass
try:
import msvcrt
have_msvcrt = True
except Exception:
pass
from glob import glob
if os.path.basename(sys.executable) == 'meson.exe':
# In Windows and using the MSI installed executable.
python_command = [sys.executable, 'runpython']
else:
python_command = [sys.executable]
meson_command = None
class MesonException(Exception):
'''Exceptions thrown by Meson'''
def __init__(self, *args: object, file: T.Optional[str] = None,
lineno: T.Optional[int] = None, colno: T.Optional[int] = None):
super().__init__(*args)
self.file = file
self.lineno = lineno
self.colno = colno
class EnvironmentException(MesonException):
'''Exceptions thrown while processing and creating the build environment'''
class GitException(MesonException):
def __init__(self, msg: str, output: T.Optional[str] = None):
super().__init__(msg)
self.output = output.strip() if output else ''
GIT = shutil.which('git')
def git(cmd: T.List[str], workingdir: str, check: bool = False, **kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
cmd = [GIT] + cmd
p, o, e = Popen_safe(cmd, cwd=workingdir, **kwargs)
if check and p.returncode != 0:
raise GitException('Git command failed: ' + str(cmd), e)
return p, o, e
def quiet_git(cmd: T.List[str], workingdir: str, check: bool = False) -> T.Tuple[bool, str]:
if not GIT:
m = 'Git program not found.'
if check:
raise GitException(m)
return False, m
p, o, e = git(cmd, workingdir, check)
if p.returncode != 0:
return False, e
return True, o
def verbose_git(cmd: T.List[str], workingdir: str, check: bool = False) -> bool:
if not GIT:
m = 'Git program not found.'
if check:
raise GitException(m)
return False
p, _, _ = git(cmd, workingdir, check, stdout=None, stderr=None)
return p.returncode == 0
def set_meson_command(mainfile: str) -> None:
global python_command
global meson_command
# On UNIX-like systems `meson` is a Python script
# On Windows `meson` and `meson.exe` are wrapper exes
if not mainfile.endswith('.py'):
meson_command = [mainfile]
elif os.path.isabs(mainfile) and mainfile.endswith('mesonmain.py'):
# Can't actually run meson with an absolute path to mesonmain.py, it must be run as -m mesonbuild.mesonmain
meson_command = python_command + ['-m', 'mesonbuild.mesonmain']
else:
# Either run uninstalled, or full path to meson-script.py
meson_command = python_command + [mainfile]
# We print this value for unit tests.
if 'MESON_COMMAND_TESTS' in os.environ:
mlog.log('meson_command is {!r}'.format(meson_command))
def is_ascii_string(astring: T.Union[str, bytes]) -> bool:
try:
if isinstance(astring, str):
astring.encode('ascii')
elif isinstance(astring, bytes):
astring.decode('ascii')
except UnicodeDecodeError:
return False
return True
def check_direntry_issues(direntry_array: T.Union[T.List[T.Union[str, bytes]], str, bytes]) -> None:
import locale
# Warn if the locale is not UTF-8. This can cause various unfixable issues
# such as os.stat not being able to decode filenames with unicode in them.
# There is no way to reset both the preferred encoding and the filesystem
# encoding, so we can just warn about it.
e = locale.getpreferredencoding()
if e.upper() != 'UTF-8' and not is_windows():
if not isinstance(direntry_array, list):
direntry_array = [direntry_array]
for de in direntry_array:
if is_ascii_string(de):
continue
mlog.warning(textwrap.dedent('''
You are using {!r} which is not a Unicode-compatible
locale but you are trying to access a file system entry called {!r} which is
not pure ASCII. This may cause problems.
'''.format(e, de)), file=sys.stderr)
# Put this in objects that should not get dumped to pickle files
# by accident.
import threading
an_unpicklable_object = threading.Lock()
class FileMode:
# The first triad is for owner permissions, the second for group permissions,
# and the third for others (everyone else).
# For the 1st character:
# 'r' means can read
# '-' means not allowed
# For the 2nd character:
# 'w' means can write
# '-' means not allowed
# For the 3rd character:
# 'x' means can execute
# 's' means can execute and setuid/setgid is set (owner/group triads only)
# 'S' means cannot execute and setuid/setgid is set (owner/group triads only)
# 't' means can execute and sticky bit is set ("others" triads only)
# 'T' means cannot execute and sticky bit is set ("others" triads only)
# '-' means none of these are allowed
#
# The meanings of 'rwx' perms is not obvious for directories; see:
# https://www.hackinglinuxexposed.com/articles/20030424.html
#
# For information on this notation such as setuid/setgid/sticky bits, see:
# https://en.wikipedia.org/wiki/File_system_permissions#Symbolic_notation
symbolic_perms_regex = re.compile('[r-][w-][xsS-]' # Owner perms
'[r-][w-][xsS-]' # Group perms
'[r-][w-][xtT-]') # Others perms
def __init__(self, perms: T.Optional[str] = None, owner: T.Optional[str] = None,
group: T.Optional[str] = None):
self.perms_s = perms
self.perms = self.perms_s_to_bits(perms)
self.owner = owner
self.group = group
def __repr__(self) -> str:
ret = '<FileMode: {!r} owner={} group={}'
return ret.format(self.perms_s, self.owner, self.group)
@classmethod
def perms_s_to_bits(cls, perms_s: T.Optional[str]) -> int:
'''
Does the opposite of stat.filemode(), converts strings of the form
'rwxr-xr-x' to st_mode enums which can be passed to os.chmod()
'''
if perms_s is None:
# No perms specified, we will not touch the permissions
return -1
eg = 'rwxr-xr-x'
if not isinstance(perms_s, str):
msg = 'Install perms must be a string. For example, {!r}'
raise MesonException(msg.format(eg))
if len(perms_s) != 9 or not cls.symbolic_perms_regex.match(perms_s):
msg = 'File perms {!r} must be exactly 9 chars. For example, {!r}'
raise MesonException(msg.format(perms_s, eg))
perms = 0
# Owner perms
if perms_s[0] == 'r':
perms |= stat.S_IRUSR
if perms_s[1] == 'w':
perms |= stat.S_IWUSR
if perms_s[2] == 'x':
perms |= stat.S_IXUSR
elif perms_s[2] == 'S':
perms |= stat.S_ISUID
elif perms_s[2] == 's':
perms |= stat.S_IXUSR
perms |= stat.S_ISUID
# Group perms
if perms_s[3] == 'r':
perms |= stat.S_IRGRP
if perms_s[4] == 'w':
perms |= stat.S_IWGRP
if perms_s[5] == 'x':
perms |= stat.S_IXGRP
elif perms_s[5] == 'S':
perms |= stat.S_ISGID
elif perms_s[5] == 's':
perms |= stat.S_IXGRP
perms |= stat.S_ISGID
# Others perms
if perms_s[6] == 'r':
perms |= stat.S_IROTH
if perms_s[7] == 'w':
perms |= stat.S_IWOTH
if perms_s[8] == 'x':
perms |= stat.S_IXOTH
elif perms_s[8] == 'T':
perms |= stat.S_ISVTX
elif perms_s[8] == 't':
perms |= stat.S_IXOTH
perms |= stat.S_ISVTX
return perms
class File:
def __init__(self, is_built: bool, subdir: str, fname: str):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
self.hash = hash((is_built, subdir, fname))
def __str__(self) -> str:
return self.relative_name()
def __repr__(self) -> str:
ret = '<File: {0}'
if not self.is_built:
ret += ' (not built)'
ret += '>'
return ret.format(self.relative_name())
@staticmethod
@lru_cache(maxsize=None)
def from_source_file(source_root: str, subdir: str, fname: str) -> 'File':
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir: str, fname: str) -> 'File':
return File(True, subdir, fname)
@staticmethod
def from_absolute_file(fname: str) -> 'File':
return File(False, '', fname)
@lru_cache(maxsize=None)
def rel_to_builddir(self, build_to_src: str) -> str:
if self.is_built:
return self.relative_name()
else:
return os.path.join(build_to_src, self.subdir, self.fname)
@lru_cache(maxsize=None)
def absolute_path(self, srcdir: str, builddir: str) -> str:
absdir = srcdir
if self.is_built:
absdir = builddir
return os.path.join(absdir, self.relative_name())
def endswith(self, ending: str) -> bool:
return self.fname.endswith(ending)
def split(self, s: str) -> T.List[str]:
return self.fname.split(s)
def __eq__(self, other: object) -> bool:
if not isinstance(other, File):
return NotImplemented
if self.hash != other.hash:
return False
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self) -> int:
return self.hash
@lru_cache(maxsize=None)
def relative_name(self) -> str:
return os.path.join(self.subdir, self.fname)
def get_compiler_for_source(compilers: T.Iterable['CompilerType'], src: str) -> 'CompilerType':
"""Given a set of compilers and a source, find the compiler for that source type."""
for comp in compilers:
if comp.can_compile(src):
return comp
raise MesonException('No specified compiler can handle file {!s}'.format(src))
def classify_unity_sources(compilers: T.Iterable['CompilerType'], sources: T.Iterable[str]) -> T.Dict['CompilerType', T.List[str]]:
compsrclist = {} # type: T.Dict[CompilerType, T.List[str]]
for src in sources:
comp = get_compiler_for_source(compilers, src)
if comp not in compsrclist:
compsrclist[comp] = [src]
else:
compsrclist[comp].append(src)
return compsrclist
class MachineChoice(enum.IntEnum):
"""Enum class representing one of the two abstract machine names used in
most places: the build, and host, machines.
"""
BUILD = 0
HOST = 1
def get_lower_case_name(self) -> str:
return PerMachine('build', 'host')[self]
def get_prefix(self) -> str:
return PerMachine('build.', '')[self]
class PerMachine(T.Generic[_T]):
def __init__(self, build: _T, host: _T) -> None:
self.build = build
self.host = host
def __getitem__(self, machine: MachineChoice) -> _T:
return {
MachineChoice.BUILD: self.build,
MachineChoice.HOST: self.host,
}[machine]
def __setitem__(self, machine: MachineChoice, val: _T) -> None:
setattr(self, machine.get_lower_case_name(), val)
def miss_defaulting(self) -> "PerMachineDefaultable[T.Optional[_T]]":
"""Unset definition duplicated from their previous to None
This is the inverse of ''default_missing''. By removing defaulted
machines, we can elaborate the original and then redefault them and thus
avoid repeating the elaboration explicitly.
"""
unfreeze = PerMachineDefaultable() # type: PerMachineDefaultable[T.Optional[_T]]
unfreeze.build = self.build
unfreeze.host = self.host
if unfreeze.host == unfreeze.build:
unfreeze.host = None
return unfreeze
def __repr__(self) -> str:
return 'PerMachine({!r}, {!r})'.format(self.build, self.host)
class PerThreeMachine(PerMachine[_T]):
"""Like `PerMachine` but includes `target` too.
It turns out just one thing do we need track the target machine. There's no
need to computer the `target` field so we don't bother overriding the
`__getitem__`/`__setitem__` methods.
"""
def __init__(self, build: _T, host: _T, target: _T) -> None:
super().__init__(build, host)
self.target = target
def miss_defaulting(self) -> "PerThreeMachineDefaultable[T.Optional[_T]]":
"""Unset definition duplicated from their previous to None
This is the inverse of ''default_missing''. By removing defaulted
machines, we can elaborate the original and then redefault them and thus
avoid repeating the elaboration explicitly.
"""
unfreeze = PerThreeMachineDefaultable() # type: PerThreeMachineDefaultable[T.Optional[_T]]
unfreeze.build = self.build
unfreeze.host = self.host
unfreeze.target = self.target
if unfreeze.target == unfreeze.host:
unfreeze.target = None
if unfreeze.host == unfreeze.build:
unfreeze.host = None
return unfreeze
def matches_build_machine(self, machine: MachineChoice) -> bool:
return self.build == self[machine]
def __repr__(self) -> str:
return 'PerThreeMachine({!r}, {!r}, {!r})'.format(self.build, self.host, self.target)
class PerMachineDefaultable(PerMachine[T.Optional[_T]]):
"""Extends `PerMachine` with the ability to default from `None`s.
"""
def __init__(self) -> None:
super().__init__(None, None)
def default_missing(self) -> "PerMachine[T.Optional[_T]]":
"""Default host to build
This allows just specifying nothing in the native case, and just host in the
cross non-compiler case.
"""
freeze = PerMachine(self.build, self.host)
if freeze.host is None:
freeze.host = freeze.build
return freeze
def __repr__(self) -> str:
return 'PerMachineDefaultable({!r}, {!r})'.format(self.build, self.host)
class PerThreeMachineDefaultable(PerMachineDefaultable, PerThreeMachine[T.Optional[_T]]):
"""Extends `PerThreeMachine` with the ability to default from `None`s.
"""
def __init__(self) -> None:
PerThreeMachine.__init__(self, None, None, None)
def default_missing(self) -> "PerThreeMachine[T.Optional[_T]]":
"""Default host to build and target to host.
This allows just specifying nothing in the native case, just host in the
cross non-compiler case, and just target in the native-built
cross-compiler case.
"""
freeze = PerThreeMachine(self.build, self.host, self.target)
if freeze.host is None:
freeze.host = freeze.build
if freeze.target is None:
freeze.target = freeze.host
return freeze
def __repr__(self) -> str:
return 'PerThreeMachineDefaultable({!r}, {!r}, {!r})'.format(self.build, self.host, self.target)
def is_sunos() -> bool:
return platform.system().lower() == 'sunos'
def is_osx() -> bool:
return platform.system().lower() == 'darwin'
def is_linux() -> bool:
return platform.system().lower() == 'linux'
def is_android() -> bool:
return platform.system().lower() == 'android'
def is_haiku() -> bool:
return platform.system().lower() == 'haiku'
def is_openbsd() -> bool:
return platform.system().lower() == 'openbsd'
def is_windows() -> bool:
platname = platform.system().lower()
return platname == 'windows'
def is_cygwin() -> bool:
return sys.platform == 'cygwin'
def is_debianlike() -> bool:
return os.path.isfile('/etc/debian_version')
def is_dragonflybsd() -> bool:
return platform.system().lower() == 'dragonfly'
def is_netbsd() -> bool:
return platform.system().lower() == 'netbsd'
def is_freebsd() -> bool:
return platform.system().lower() == 'freebsd'
def is_irix() -> bool:
return platform.system().startswith('irix')
def is_hurd() -> bool:
return platform.system().lower() == 'gnu'
def is_qnx() -> bool:
return platform.system().lower() == 'qnx'
def is_aix() -> bool:
return platform.system().lower() == 'aix'
def exe_exists(arglist: T.List[str]) -> bool:
try:
if subprocess.run(arglist, timeout=10).returncode == 0:
return True
except (FileNotFoundError, subprocess.TimeoutExpired):
pass
return False
@lru_cache(maxsize=None)
def darwin_get_object_archs(objpath: str) -> T.List[str]:
'''
For a specific object (executable, static library, dylib, etc), run `lipo`
to fetch the list of archs supported by it. Supports both thin objects and
'fat' objects.
'''
_, stdo, stderr = Popen_safe(['lipo', '-info', objpath])
if not stdo:
mlog.debug('lipo {}: {}'.format(objpath, stderr))
return None
stdo = stdo.rsplit(': ', 1)[1]
# Convert from lipo-style archs to meson-style CPUs
stdo = stdo.replace('i386', 'x86')
stdo = stdo.replace('arm64', 'aarch64')
# Add generic name for armv7 and armv7s
if 'armv7' in stdo:
stdo += ' arm'
return stdo.split()
def detect_vcs(source_dir: T.Union[str, Path]) -> T.Optional[T.Dict[str, str]]:
vcs_systems = [
dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'),
dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -i', rev_regex = '(.*)', dep = '.hg/dirstate'),
dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'),
dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'),
]
if isinstance(source_dir, str):
source_dir = Path(source_dir)
parent_paths_and_self = collections.deque(source_dir.parents)
# Prepend the source directory to the front so we can check it;
# source_dir.parents doesn't include source_dir
parent_paths_and_self.appendleft(source_dir)
for curdir in parent_paths_and_self:
for vcs in vcs_systems:
if Path.is_dir(curdir.joinpath(vcs['repo_dir'])) and shutil.which(vcs['cmd']):
vcs['wc_dir'] = str(curdir)
return vcs
return None
def current_vs_supports_modules() -> bool:
vsver = os.environ.get('VSCMD_VER', '')
return vsver.startswith('16.9.0') and '-pre.' in vsver
# a helper class which implements the same version ordering as RPM
class Version:
def __init__(self, s: str) -> None:
self._s = s
# split into numeric, alphabetic and non-alphanumeric sequences
sequences1 = re.finditer(r'(\d+|[a-zA-Z]+|[^a-zA-Z\d]+)', s)
# non-alphanumeric separators are discarded
sequences2 = [m for m in sequences1 if not re.match(r'[^a-zA-Z\d]+', m.group(1))]
# numeric sequences are converted from strings to ints
sequences3 = [int(m.group(1)) if m.group(1).isdigit() else m.group(1) for m in sequences2]
self._v = sequences3
def __str__(self) -> str:
return '%s (V=%s)' % (self._s, str(self._v))
def __repr__(self) -> str:
return '<Version: {}>'.format(self._s)
def __lt__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.lt)
return NotImplemented
def __gt__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.gt)
return NotImplemented
def __le__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.le)
return NotImplemented
def __ge__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.ge)
return NotImplemented
def __eq__(self, other: object) -> bool:
if isinstance(other, Version):
return self._v == other._v
return NotImplemented
def __ne__(self, other: object) -> bool:
if isinstance(other, Version):
return self._v != other._v
return NotImplemented
def __cmp(self, other: 'Version', comparator: T.Callable[[T.Any, T.Any], bool]) -> bool:
# compare each sequence in order
for ours, theirs in zip(self._v, other._v):
# sort a non-digit sequence before a digit sequence
ours_is_int = isinstance(ours, int)
theirs_is_int = isinstance(theirs, int)
if ours_is_int != theirs_is_int:
return comparator(ours_is_int, theirs_is_int)
if ours != theirs:
return comparator(ours, theirs)
# if equal length, all components have matched, so equal
# otherwise, the version with a suffix remaining is greater
return comparator(len(self._v), len(other._v))
def _version_extract_cmpop(vstr2: str) -> T.Tuple[T.Callable[[T.Any, T.Any], bool], str]:
if vstr2.startswith('>='):
cmpop = operator.ge
vstr2 = vstr2[2:]
elif vstr2.startswith('<='):
cmpop = operator.le
vstr2 = vstr2[2:]
elif vstr2.startswith('!='):
cmpop = operator.ne
vstr2 = vstr2[2:]
elif vstr2.startswith('=='):
cmpop = operator.eq
vstr2 = vstr2[2:]
elif vstr2.startswith('='):
cmpop = operator.eq
vstr2 = vstr2[1:]
elif vstr2.startswith('>'):
cmpop = operator.gt
vstr2 = vstr2[1:]
elif vstr2.startswith('<'):
cmpop = operator.lt
vstr2 = vstr2[1:]
else:
cmpop = operator.eq
return (cmpop, vstr2)
def version_compare(vstr1: str, vstr2: str) -> bool:
(cmpop, vstr2) = _version_extract_cmpop(vstr2)
return cmpop(Version(vstr1), Version(vstr2))
def version_compare_many(vstr1: str, conditions: T.Union[str, T.Iterable[str]]) -> T.Tuple[bool, T.List[str], T.List[str]]:
if isinstance(conditions, str):
conditions = [conditions]
found = []
not_found = []
for req in conditions:
if not version_compare(vstr1, req):
not_found.append(req)
else:
found.append(req)
return not_found == [], not_found, found
# determine if the minimum version satisfying the condition |condition| exceeds
# the minimum version for a feature |minimum|
def version_compare_condition_with_min(condition: str, minimum: str) -> bool:
if condition.startswith('>='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('<='):
return False
elif condition.startswith('!='):
return False
elif condition.startswith('=='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('='):
cmpop = operator.le
condition = condition[1:]
elif condition.startswith('>'):
cmpop = operator.lt
condition = condition[1:]
elif condition.startswith('<'):
return False
else:
cmpop = operator.le
# Declaring a project(meson_version: '>=0.46') and then using features in
# 0.46.0 is valid, because (knowing the meson versioning scheme) '0.46.0' is
# the lowest version which satisfies the constraint '>=0.46'.
#
# But this will fail here, because the minimum version required by the
# version constraint ('0.46') is strictly less (in our version comparison)
# than the minimum version needed for the feature ('0.46.0').
#
# Map versions in the constraint of the form '0.46' to '0.46.0', to embed
# this knowledge of the meson versioning scheme.
condition = condition.strip()
if re.match(r'^\d+.\d+$', condition):
condition += '.0'
return T.cast(bool, cmpop(Version(minimum), Version(condition)))
def default_libdir() -> str:
if is_debianlike():
try:
pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
if pc.returncode == 0:
archpath = stdo.decode().strip()
return 'lib/' + archpath
except Exception:
pass
if is_freebsd() or is_irix():
return 'lib'
if os.path.isdir('/usr/lib64') and not os.path.islink('/usr/lib64'):
return 'lib64'
return 'lib'
def default_libexecdir() -> str:
# There is no way to auto-detect this, so it must be set at build time
return 'libexec'
def default_prefix() -> str:
return 'c:/' if is_windows() else '/usr/local'
def get_library_dirs() -> T.List[str]:
if is_windows():
return ['C:/mingw/lib'] # TODO: get programmatically
if is_osx():
return ['/usr/lib'] # TODO: get programmatically
# The following is probably Debian/Ubuntu specific.
# /usr/local/lib is first because it contains stuff
# installed by the sysadmin and is probably more up-to-date
# than /usr/lib. If you feel that this search order is
# problematic, please raise the issue on the mailing list.
unixdirs = ['/usr/local/lib', '/usr/lib', '/lib']
if is_freebsd():
return unixdirs
# FIXME: this needs to be further genericized for aarch64 etc.
machine = platform.machine()
if machine in ('i386', 'i486', 'i586', 'i686'):
plat = 'i386'
elif machine.startswith('arm'):
plat = 'arm'
else:
plat = ''
# Solaris puts 32-bit libraries in the main /lib & /usr/lib directories
# and 64-bit libraries in platform specific subdirectories.
if is_sunos():
if machine == 'i86pc':
plat = 'amd64'
elif machine.startswith('sun4'):
plat = 'sparcv9'
usr_platdir = Path('/usr/lib/') / plat
if usr_platdir.is_dir():
unixdirs += [str(x) for x in (usr_platdir).iterdir() if x.is_dir()]
if os.path.exists('/usr/lib64'):
unixdirs.append('/usr/lib64')
lib_platdir = Path('/lib/') / plat
if lib_platdir.is_dir():
unixdirs += [str(x) for x in (lib_platdir).iterdir() if x.is_dir()]
if os.path.exists('/lib64'):
unixdirs.append('/lib64')
return unixdirs
def has_path_sep(name: str, sep: str = '/\\') -> bool:
'Checks if any of the specified @sep path separators are in @name'
for each in sep:
if each in name:
return True
return False
if is_windows():
# shlex.split is not suitable for splitting command line on Window (https://bugs.python.org/issue1724822);
# shlex.quote is similarly problematic. Below are "proper" implementations of these functions according to
# https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments and
# https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
_whitespace = ' \t\n\r'
_find_unsafe_char = re.compile(r'[{}"]'.format(_whitespace)).search
def quote_arg(arg: str) -> str:
if arg and not _find_unsafe_char(arg):
return arg
result = '"'
num_backslashes = 0
for c in arg:
if c == '\\':
num_backslashes += 1
else:
if c == '"':
# Escape all backslashes and the following double quotation mark
num_backslashes = num_backslashes * 2 + 1
result += num_backslashes * '\\' + c
num_backslashes = 0
# Escape all backslashes, but let the terminating double quotation
# mark we add below be interpreted as a metacharacter
result += (num_backslashes * 2) * '\\' + '"'
return result
def split_args(cmd: str) -> T.List[str]:
result = []
arg = ''
num_backslashes = 0
num_quotes = 0
in_quotes = False
for c in cmd:
if c == '\\':
num_backslashes += 1
else:
if c == '"' and not (num_backslashes % 2):
# unescaped quote, eat it
arg += (num_backslashes // 2) * '\\'
num_quotes += 1
in_quotes = not in_quotes
elif c in _whitespace and not in_quotes:
if arg or num_quotes:
# reached the end of the argument
result.append(arg)
arg = ''
num_quotes = 0
else:
if c == '"':
# escaped quote
num_backslashes = (num_backslashes - 1) // 2
arg += num_backslashes * '\\' + c
num_backslashes = 0
if arg or num_quotes:
result.append(arg)
return result
else:
def quote_arg(arg: str) -> str:
return shlex.quote(arg)
def split_args(cmd: str) -> T.List[str]:
return shlex.split(cmd)
def join_args(args: T.Iterable[str]) -> str:
return ' '.join([quote_arg(x) for x in args])
def do_replacement(regex: T.Pattern[str], line: str, variable_format: str,
confdata: 'ConfigurationData') -> T.Tuple[str, T.Set[str]]:
missing_variables = set() # type: T.Set[str]
if variable_format == 'cmake':
start_tag = '${'
backslash_tag = '\\${'
else:
assert variable_format in ['meson', 'cmake@']
start_tag = '@'
backslash_tag = '\\@'
def variable_replace(match: T.Match[str]) -> str:
# Pairs of escape characters before '@' or '\@'
if match.group(0).endswith('\\'):
num_escapes = match.end(0) - match.start(0)
return '\\' * (num_escapes // 2)
# Single escape character and '@'
elif match.group(0) == backslash_tag:
return start_tag
# Template variable to be replaced
else:
varname = match.group(1)
var_str = ''
if varname in confdata:
(var, desc) = confdata.get(varname)
if isinstance(var, str):
var_str = var
elif isinstance(var, int):
var_str = str(var)
else:
msg = 'Tried to replace variable {!r} value with ' \
'something other than a string or int: {!r}'
raise MesonException(msg.format(varname, var))
else:
missing_variables.add(varname)
return var_str
return re.sub(regex, variable_replace, line), missing_variables
def do_define(regex: T.Pattern[str], line: str, confdata: 'ConfigurationData', variable_format: str) -> str:
def get_cmake_define(line: str, confdata: 'ConfigurationData') -> str:
arr = line.split()
define_value=[]
for token in arr[2:]:
try:
(v, desc) = confdata.get(token)
define_value += [str(v)]
except KeyError:
define_value += [token]
return ' '.join(define_value)
arr = line.split()
if variable_format == 'meson' and len(arr) != 2:
raise MesonException('#mesondefine does not contain exactly two tokens: %s' % line.strip())
varname = arr[1]
try:
(v, desc) = confdata.get(varname)
except KeyError:
return '/* #undef %s */\n' % varname
if isinstance(v, bool):
if v:
return '#define %s\n' % varname
else:
return '#undef %s\n' % varname
elif isinstance(v, int):
return '#define %s %d\n' % (varname, v)
elif isinstance(v, str):
if variable_format == 'meson':
result = v
else:
result = get_cmake_define(line, confdata)
result = '#define %s %s\n' % (varname, result)
(result, missing_variable) = do_replacement(regex, result, variable_format, confdata)
return result
else:
raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
def get_variable_regex(variable_format: str = 'meson') -> T.Pattern[str]:
# Only allow (a-z, A-Z, 0-9, _, -) as valid characters for a define
# Also allow escaping '@' with '\@'
if variable_format in ['meson', 'cmake@']:
regex = re.compile(r'(?:\\\\)+(?=\\?@)|\\@|@([-a-zA-Z0-9_]+)@')
elif variable_format == 'cmake':
regex = re.compile(r'(?:\\\\)+(?=\\?\$)|\\\${|\${([-a-zA-Z0-9_]+)}')
else:
raise MesonException('Format "{}" not handled'.format(variable_format))
return regex
def do_conf_str (data: list, confdata: 'ConfigurationData', variable_format: str,
encoding: str = 'utf-8') -> T.Tuple[T.List[str],T.Set[str], bool]:
def line_is_valid(line : str, variable_format: str) -> bool:
if variable_format == 'meson':
if '#cmakedefine' in line:
return False
else: #cmake format
if '#mesondefine' in line:
return False
return True
regex = get_variable_regex(variable_format)
search_token = '#mesondefine'
if variable_format != 'meson':
search_token = '#cmakedefine'
result = []
missing_variables = set()
# Detect when the configuration data is empty and no tokens were found
# during substitution so we can warn the user to use the `copy:` kwarg.
confdata_useless = not confdata.keys()
for line in data:
if line.startswith(search_token):
confdata_useless = False
line = do_define(regex, line, confdata, variable_format)
else:
if not line_is_valid(line,variable_format):
raise MesonException('Format "{}" mismatched'.format(variable_format))
line, missing = do_replacement(regex, line, variable_format, confdata)
missing_variables.update(missing)
if missing:
confdata_useless = False
result.append(line)
return result, missing_variables, confdata_useless
def do_conf_file(src: str, dst: str, confdata: 'ConfigurationData', variable_format: str,
encoding: str = 'utf-8') -> T.Tuple[T.Set[str], bool]:
try:
with open(src, encoding=encoding, newline='') as f:
data = f.readlines()
except Exception as e:
raise MesonException('Could not read input file %s: %s' % (src, str(e)))
(result, missing_variables, confdata_useless) = do_conf_str(data, confdata, variable_format, encoding)
dst_tmp = dst + '~'
try:
with open(dst_tmp, 'w', encoding=encoding, newline='') as f:
f.writelines(result)
except Exception as e:
raise MesonException('Could not write output file %s: %s' % (dst, str(e)))
shutil.copymode(src, dst_tmp)
replace_if_different(dst, dst_tmp)
return missing_variables, confdata_useless
CONF_C_PRELUDE = '''/*
* Autogenerated by the Meson build system.
* Do not edit, your changes will be lost.
*/
#pragma once
'''
CONF_NASM_PRELUDE = '''; Autogenerated by the Meson build system.
; Do not edit, your changes will be lost.
'''
def dump_conf_header(ofilename: str, cdata: 'ConfigurationData', output_format: str) -> None:
if output_format == 'c':
prelude = CONF_C_PRELUDE
prefix = '#'
elif output_format == 'nasm':
prelude = CONF_NASM_PRELUDE
prefix = '%'
ofilename_tmp = ofilename + '~'
with open(ofilename_tmp, 'w', encoding='utf-8') as ofile:
ofile.write(prelude)
for k in sorted(cdata.keys()):
(v, desc) = cdata.get(k)
if desc:
if output_format == 'c':
ofile.write('/* %s */\n' % desc)
elif output_format == 'nasm':
for line in desc.split('\n'):
ofile.write('; %s\n' % line)
if isinstance(v, bool):
if v:
ofile.write('%sdefine %s\n\n' % (prefix, k))
else:
ofile.write('%sundef %s\n\n' % (prefix, k))
elif isinstance(v, (int, str)):
ofile.write('%sdefine %s %s\n\n' % (prefix, k, v))
else:
raise MesonException('Unknown data type in configuration file entry: ' + k)
replace_if_different(ofilename, ofilename_tmp)
def replace_if_different(dst: str, dst_tmp: str) -> None:
# If contents are identical, don't touch the file to prevent
# unnecessary rebuilds.
different = True
try:
with open(dst, 'rb') as f1, open(dst_tmp, 'rb') as f2:
if f1.read() == f2.read():
different = False
except FileNotFoundError:
pass
if different:
os.replace(dst_tmp, dst)
else:
os.unlink(dst_tmp)
@T.overload
def unholder(item: 'ObjectHolder[_T]') -> _T: ...
@T.overload
def unholder(item: T.List['ObjectHolder[_T]']) -> T.List[_T]: ...
@T.overload
def unholder(item: T.List[_T]) -> T.List[_T]: ...
@T.overload
def unholder(item: T.List[T.Union[_T, 'ObjectHolder[_T]']]) -> T.List[_T]: ...
def unholder(item): # type: ignore # TODO fix overload (somehow)
"""Get the held item of an object holder or list of object holders."""
if isinstance(item, list):
return [i.held_object if hasattr(i, 'held_object') else i for i in item]
if hasattr(item, 'held_object'):
return item.held_object
return item
def listify(item: T.Any, flatten: bool = True) -> T.List[T.Any]:
'''
Returns a list with all args embedded in a list if they are not a list.
This function preserves order.
@flatten: Convert lists of lists to a flat list
'''
if not isinstance(item, list):
return [item]
result = [] # type: T.List[T.Any]
for i in item:
if flatten and isinstance(i, list):
result += listify(i, flatten=True)
else:
result.append(i)
return result
def extract_as_list(dict_object: T.Dict[_T, _U], key: _T, pop: bool = False) -> T.List[_U]:
'''
Extracts all values from given dict_object and listifies them.
'''
fetch = dict_object.get
if pop:
fetch = dict_object.pop
# If there's only one key, we don't return a list with one element
return listify(fetch(key, []), flatten=True)
def typeslistify(item: 'T.Union[_T, T.Sequence[_T]]',
types: 'T.Union[T.Type[_T], T.Tuple[T.Type[_T]]]') -> T.List[_T]:
'''
Ensure that type(@item) is one of @types or a
list of items all of which are of type @types
'''
if isinstance(item, types):
item = T.cast(T.List[_T], [item])
if not isinstance(item, list):
raise MesonException('Item must be a list or one of {!r}'.format(types))
for i in item:
if i is not None and not isinstance(i, types):
raise MesonException('List item must be one of {!r}'.format(types))
return item
def stringlistify(item: T.Union[T.Any, T.Sequence[T.Any]]) -> T.List[str]:
return typeslistify(item, str)
def expand_arguments(args: T.Iterable[str]) -> T.Optional[T.List[str]]:
expended_args = [] # type: T.List[str]
for arg in args:
if not arg.startswith('@'):
expended_args.append(arg)
continue
args_file = arg[1:]
try:
with open(args_file) as f:
extended_args = f.read().split()
expended_args += extended_args
except Exception as e:
mlog.error('Expanding command line arguments:', args_file, 'not found')
mlog.exception(e)
return None
return expended_args
def partition(pred: T.Callable[[_T], object], iterable: T.Iterator[_T]) -> T.Tuple[T.Iterator[_T], T.Iterator[_T]]:
"""Use a predicate to partition entries into false entries and true
entries.
>>> x, y = partition(is_odd, range(10))
>>> (list(x), list(y))
([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
"""
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
def Popen_safe(args: T.List[str], write: T.Optional[str] = None,
stdout: T.Union[T.BinaryIO, int] = subprocess.PIPE,
stderr: T.Union[T.BinaryIO, int] = subprocess.PIPE,
**kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
import locale
encoding = locale.getpreferredencoding()
# Redirect stdin to DEVNULL otherwise the command run by us here might mess
# up the console and ANSI colors will stop working on Windows.
if 'stdin' not in kwargs:
kwargs['stdin'] = subprocess.DEVNULL
if not sys.stdout.encoding or encoding.upper() != 'UTF-8':
p, o, e = Popen_safe_legacy(args, write=write, stdout=stdout, stderr=stderr, **kwargs)
else:
p = subprocess.Popen(args, universal_newlines=True, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
o, e = p.communicate(write)
# Sometimes the command that we run will call another command which will be
# without the above stdin workaround, so set the console mode again just in
# case.
mlog.setup_console()
return p, o, e
def Popen_safe_legacy(args: T.List[str], write: T.Optional[str] = None,
stdout: T.Union[T.BinaryIO, int] = subprocess.PIPE,
stderr: T.Union[T.BinaryIO, int] = subprocess.PIPE,
**kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
p = subprocess.Popen(args, universal_newlines=False, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
input_ = None # type: T.Optional[bytes]
if write is not None:
input_ = write.encode('utf-8')
o, e = p.communicate(input_)
if o is not None:
if sys.stdout.encoding:
o = o.decode(encoding=sys.stdout.encoding, errors='replace').replace('\r\n', '\n')
else:
o = o.decode(errors='replace').replace('\r\n', '\n')
if e is not None:
if sys.stderr.encoding:
e = e.decode(encoding=sys.stderr.encoding, errors='replace').replace('\r\n', '\n')
else:
e = e.decode(errors='replace').replace('\r\n', '\n')
return p, o, e
def iter_regexin_iter(regexiter: T.Iterable[str], initer: T.Iterable[str]) -> T.Optional[str]:
'''
Takes each regular expression in @regexiter and tries to search for it in
every item in @initer. If there is a match, returns that match.
Else returns False.
'''
for regex in regexiter:
for ii in initer:
if not isinstance(ii, str):
continue
match = re.search(regex, ii)
if match:
return match.group()
return None
def _substitute_values_check_errors(command: T.List[str], values: T.Dict[str, str]) -> None:
# Error checking
inregex = ['@INPUT([0-9]+)?@', '@PLAINNAME@', '@BASENAME@'] # type: T.List[str]
outregex = ['@OUTPUT([0-9]+)?@', '@OUTDIR@'] # type: T.List[str]
if '@INPUT@' not in values:
# Error out if any input-derived templates are present in the command
match = iter_regexin_iter(inregex, command)
if match:
m = 'Command cannot have {!r}, since no input files were specified'
raise MesonException(m.format(match))
else:
if len(values['@INPUT@']) > 1:
# Error out if @PLAINNAME@ or @BASENAME@ is present in the command
match = iter_regexin_iter(inregex[1:], command)
if match:
raise MesonException('Command cannot have {!r} when there is '
'more than one input file'.format(match))
# Error out if an invalid @INPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match2 = re.search(inregex[0], each)
if match2 and match2.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} inputs'
raise MesonException(m.format(match2.group(), len(values['@INPUT@'])))
if '@OUTPUT@' not in values:
# Error out if any output-derived templates are present in the command
match = iter_regexin_iter(outregex, command)
if match:
m = 'Command cannot have {!r} since there are no outputs'
raise MesonException(m.format(match))
else:
# Error out if an invalid @OUTPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match2 = re.search(outregex[0], each)
if match2 and match2.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} outputs'
raise MesonException(m.format(match2.group(), len(values['@OUTPUT@'])))
def substitute_values(command: T.List[str], values: T.Dict[str, str]) -> T.List[str]:
'''
Substitute the template strings in the @values dict into the list of
strings @command and return a new list. For a full list of the templates,
see get_filenames_templates_dict()
If multiple inputs/outputs are given in the @values dictionary, we
substitute @INPUT@ and @OUTPUT@ only if they are the entire string, not
just a part of it, and in that case we substitute *all* of them.
'''
# Error checking
_substitute_values_check_errors(command, values)
# Substitution
outcmd = [] # type: T.List[str]
rx_keys = [re.escape(key) for key in values if key not in ('@INPUT@', '@OUTPUT@')]
value_rx = re.compile('|'.join(rx_keys)) if rx_keys else None
for vv in command:
if not isinstance(vv, str):
outcmd.append(vv)
elif '@INPUT@' in vv:
inputs = values['@INPUT@']
if vv == '@INPUT@':
outcmd += inputs
elif len(inputs) == 1:
outcmd.append(vv.replace('@INPUT@', inputs[0]))
else:
raise MesonException("Command has '@INPUT@' as part of a "
"string and more than one input file")
elif '@OUTPUT@' in vv:
outputs = values['@OUTPUT@']
if vv == '@OUTPUT@':
outcmd += outputs
elif len(outputs) == 1:
outcmd.append(vv.replace('@OUTPUT@', outputs[0]))
else:
raise MesonException("Command has '@OUTPUT@' as part of a "
"string and more than one output file")
# Append values that are exactly a template string.
# This is faster than a string replace.
elif vv in values:
outcmd.append(values[vv])
# Substitute everything else with replacement
elif value_rx:
outcmd.append(value_rx.sub(lambda m: values[m.group(0)], vv))
else:
outcmd.append(vv)
return outcmd
def get_filenames_templates_dict(inputs: T.List[str], outputs: T.List[str]) -> T.Dict[str, T.Union[str, T.List[str]]]:
'''
Create a dictionary with template strings as keys and values as values for
the following templates:
@INPUT@ - the full path to one or more input files, from @inputs
@OUTPUT@ - the full path to one or more output files, from @outputs
@OUTDIR@ - the full path to the directory containing the output files
If there is only one input file, the following keys are also created:
@PLAINNAME@ - the filename of the input file
@BASENAME@ - the filename of the input file with the extension removed
If there is more than one input file, the following keys are also created:
@INPUT0@, @INPUT1@, ... one for each input file
If there is more than one output file, the following keys are also created:
@OUTPUT0@, @OUTPUT1@, ... one for each output file
'''
values = {} # type: T.Dict[str, T.Union[str, T.List[str]]]
# Gather values derived from the input
if inputs:
# We want to substitute all the inputs.
values['@INPUT@'] = inputs
for (ii, vv) in enumerate(inputs):
# Write out @INPUT0@, @INPUT1@, ...
values['@INPUT{}@'.format(ii)] = vv
if len(inputs) == 1:
# Just one value, substitute @PLAINNAME@ and @BASENAME@
values['@PLAINNAME@'] = plain = os.path.basename(inputs[0])
values['@BASENAME@'] = os.path.splitext(plain)[0]
if outputs:
# Gather values derived from the outputs, similar to above.
values['@OUTPUT@'] = outputs
for (ii, vv) in enumerate(outputs):
values['@OUTPUT{}@'.format(ii)] = vv
# Outdir should be the same for all outputs
values['@OUTDIR@'] = os.path.dirname(outputs[0])
# Many external programs fail on empty arguments.
if values['@OUTDIR@'] == '':
values['@OUTDIR@'] = '.'
return values
def _make_tree_writable(topdir: str) -> None:
# Ensure all files and directories under topdir are writable
# (and readable) by owner.
for d, _, files in os.walk(topdir):
os.chmod(d, os.stat(d).st_mode | stat.S_IWRITE | stat.S_IREAD)
for fname in files:
fpath = os.path.join(d, fname)
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
def windows_proof_rmtree(f: str) -> None:
# On Windows if anyone is holding a file open you can't
# delete it. As an example an anti virus scanner might
# be scanning files you are trying to delete. The only
# way to fix this is to try again and again.
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
# Start by making the tree wriable.
_make_tree_writable(f)
for d in delays:
try:
shutil.rmtree(f)
return
except FileNotFoundError:
return
except OSError:
time.sleep(d)
# Try one last time and throw if it fails.
shutil.rmtree(f)
def windows_proof_rm(fpath: str) -> None:
"""Like windows_proof_rmtree, but for a single file."""
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
for d in delays:
try:
os.unlink(fpath)
return
except FileNotFoundError:
return
except OSError:
time.sleep(d)
os.unlink(fpath)
class TemporaryDirectoryWinProof(TemporaryDirectory):
"""
Like TemporaryDirectory, but cleans things up using
windows_proof_rmtree()
"""
def __exit__(self, exc: T.Any, value: T.Any, tb: T.Any) -> None:
try:
super().__exit__(exc, value, tb)
except OSError:
windows_proof_rmtree(self.name)
def cleanup(self) -> None:
try:
super().cleanup()
except OSError:
windows_proof_rmtree(self.name)
def detect_subprojects(spdir_name: str, current_dir: str = '',
result: T.Optional[T.Dict[str, T.List[str]]] = None) -> T.Optional[T.Dict[str, T.List[str]]]:
if result is None:
result = {}
spdir = os.path.join(current_dir, spdir_name)
if not os.path.exists(spdir):
return result
for trial in glob(os.path.join(spdir, '*')):
basename = os.path.basename(trial)
if trial == 'packagecache':
continue
append_this = True
if os.path.isdir(trial):
detect_subprojects(spdir_name, trial, result)
elif trial.endswith('.wrap') and os.path.isfile(trial):
basename = os.path.splitext(basename)[0]
else:
append_this = False
if append_this:
if basename in result:
result[basename].append(trial)
else:
result[basename] = [trial]
return result
def substring_is_in_list(substr: str, strlist: T.List[str]) -> bool:
for s in strlist:
if substr in s:
return True
return False
class OrderedSet(T.MutableSet[_T]):
"""A set that preserves the order in which items are added, by first
insertion.
"""
def __init__(self, iterable: T.Optional[T.Iterable[_T]] = None):
# typing.OrderedDict is new in 3.7.2, so we can't use that, but we can
# use MutableMapping, which is fine in this case.
self.__container = collections.OrderedDict() # type: T.MutableMapping[_T, None]
if iterable:
self.update(iterable)
def __contains__(self, value: object) -> bool:
return value in self.__container
def __iter__(self) -> T.Iterator[_T]:
return iter(self.__container.keys())
def __len__(self) -> int:
return len(self.__container)
def __repr__(self) -> str:
# Don't print 'OrderedSet("")' for an empty set.
if self.__container:
return 'OrderedSet("{}")'.format(
'", "'.join(repr(e) for e in self.__container.keys()))
return 'OrderedSet()'
def __reversed__(self) -> T.Iterator[_T]:
# Mypy is complaining that sets cant be reversed, which is true for
# unordered sets, but this is an ordered, set so reverse() makes sense.
return reversed(self.__container.keys()) # type: ignore
def add(self, value: _T) -> None:
self.__container[value] = None
def discard(self, value: _T) -> None:
if value in self.__container:
del self.__container[value]
def move_to_end(self, value: _T, last: bool = True) -> None:
# Mypy does not know about move_to_end, because it is not part of MutableMapping
self.__container.move_to_end(value, last) # type: ignore
def pop(self, last: bool = True) -> _T:
# Mypy does not know about the last argument, because it is not part of MutableMapping
item, _ = self.__container.popitem(last) # type: ignore
return item
def update(self, iterable: T.Iterable[_T]) -> None:
for item in iterable:
self.__container[item] = None
def difference(self, set_: T.Union[T.Set[_T], 'OrderedSet[_T]']) -> 'OrderedSet[_T]':
return type(self)(e for e in self if e not in set_)
class BuildDirLock:
def __init__(self, builddir: str) -> None:
self.lockfilename = os.path.join(builddir, 'meson-private/meson.lock')
def __enter__(self) -> None:
self.lockfile = open(self.lockfilename, 'w')
try:
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
except (BlockingIOError, PermissionError):
self.lockfile.close()
raise MesonException('Some other Meson process is already using this build directory. Exiting.')
def __exit__(self, *args: T.Any) -> None:
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_UN)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
self.lockfile.close()
def relpath(path: str, start: str) -> str:
# On Windows a relative path can't be evaluated for paths on two different
# drives (i.e. c:\foo and f:\bar). The only thing left to do is to use the
# original absolute path.
try:
return os.path.relpath(path, start)
except (TypeError, ValueError):
return path
def path_is_in_root(path: Path, root: Path, resolve: bool = False) -> bool:
# Check wheter a path is within the root directory root
try:
if resolve:
path.resolve().relative_to(root.resolve())
else:
path.relative_to(root)
except ValueError:
return False
return True
def relative_to_if_possible(path: Path, root: Path, resolve: bool = False) -> Path:
try:
if resolve:
return path.resolve().relative_to(root.resolve())
else:
return path.relative_to(root)
except ValueError:
return path
class LibType(enum.IntEnum):
"""Enumeration for library types."""
SHARED = 0
STATIC = 1
PREFER_SHARED = 2
PREFER_STATIC = 3
class ProgressBarFallback: # lgtm [py/iter-returns-non-self]
'''
Fallback progress bar implementation when tqdm is not found
Since this class is not an actual iterator, but only provides a minimal
fallback, it is safe to ignore the 'Iterator does not return self from
__iter__ method' warning.
'''
def __init__(self, iterable: T.Optional[T.Iterable[str]] = None, total: T.Optional[int] = None,
bar_type: T.Optional[str] = None, desc: T.Optional[str] = None):
if iterable is not None:
self.iterable = iter(iterable)
return
self.total = total
self.done = 0
self.printed_dots = 0
if self.total and bar_type == 'download':
print('Download size:', self.total)
if desc:
print('{}: '.format(desc), end='')
# Pretend to be an iterator when called as one and don't print any
# progress
def __iter__(self) -> T.Iterator[str]:
return self.iterable
def __next__(self) -> str:
return next(self.iterable)
def print_dot(self) -> None:
print('.', end='')
sys.stdout.flush()
self.printed_dots += 1
def update(self, progress: int) -> None:
self.done += progress
if not self.total:
# Just print one dot per call if we don't have a total length
self.print_dot()
return
ratio = int(self.done / self.total * 10)
while self.printed_dots < ratio:
self.print_dot()
def close(self) -> None:
print('')
try:
from tqdm import tqdm
except ImportError:
# ideally we would use a typing.Protocol here, but it's part of typing_extensions until 3.8
ProgressBar = ProgressBarFallback # type: T.Union[T.Type[ProgressBarFallback], T.Type[ProgressBarTqdm]]
else:
class ProgressBarTqdm(tqdm):
def __init__(self, *args: T.Any, bar_type: T.Optional[str] = None, **kwargs: T.Any) -> None:
if bar_type == 'download':
kwargs.update({'unit': 'bytes', 'leave': True})
else:
kwargs.update({'leave': False})
kwargs['ncols'] = 100
super().__init__(*args, **kwargs)
ProgressBar = ProgressBarTqdm
def get_wine_shortpath(winecmd: T.List[str], wine_paths: T.Sequence[str]) -> str:
"""Get A short version of @wine_paths to avoid reaching WINEPATH number
of char limit.
"""
wine_paths = list(OrderedSet(wine_paths))
getShortPathScript = '%s.bat' % str(uuid.uuid4()).lower()[:5]
with open(getShortPathScript, mode='w') as f:
f.write("@ECHO OFF\nfor %%x in (%*) do (\n echo|set /p=;%~sx\n)\n")
f.flush()
try:
with open(os.devnull, 'w') as stderr:
wine_path = subprocess.check_output(
winecmd +
['cmd', '/C', getShortPathScript] + wine_paths,
stderr=stderr).decode('utf-8')
except subprocess.CalledProcessError as e:
print("Could not get short paths: %s" % e)
wine_path = ';'.join(wine_paths)
finally:
os.remove(getShortPathScript)
if len(wine_path) > 2048:
raise MesonException(
'WINEPATH size {} > 2048'
' this will cause random failure.'.format(
len(wine_path)))
return wine_path.strip(';')
def run_once(func: T.Callable[..., _T]) -> T.Callable[..., _T]:
ret = [] # type: T.List[_T]
@wraps(func)
def wrapper(*args: T.Any, **kwargs: T.Any) -> _T:
if ret:
return ret[0]
val = func(*args, **kwargs)
ret.append(val)
return val
return wrapper
class OptionProxy(T.Generic[_T]):
def __init__(self, value: _T, choices: T.Optional[T.List[str]] = None):
self.value = value
self.choices = choices
def set_value(self, v: _T) -> None:
# XXX: should this be an error
self.value = v
class OptionOverrideProxy(collections.abc.MutableMapping):
'''Mimic an option list but transparently override selected option
values.
'''
# TODO: the typing here could be made more explicit using a TypeDict from
# python 3.8 or typing_extensions
def __init__(self, overrides: T.Dict['OptionKey', T.Any], *options: 'KeyedOptionDictType'):
self.overrides = overrides.copy()
self.options: T.Dict['OptionKey', UserOption] = {}
for o in options:
self.options.update(o)
def __getitem__(self, key: 'OptionKey') -> T.Union['UserOption', OptionProxy]:
if key in self.options:
opt = self.options[key]
if key in self.overrides:
return OptionProxy(opt.validate_value(self.overrides[key]), getattr(opt, 'choices', None))
return opt
raise KeyError('Option not found', key)
def __setitem__(self, key: 'OptionKey', value: T.Union['UserOption', OptionProxy]) -> None:
self.overrides[key] = value.value
def __delitem__(self, key: 'OptionKey') -> None:
del self.overrides[key]
def __iter__(self) -> T.Iterator['OptionKey']:
return iter(self.options)
def __len__(self) -> int:
return len(self.options)
def copy(self) -> 'OptionOverrideProxy':
return OptionOverrideProxy(self.overrides.copy(), self.options.copy())
class OptionType(enum.Enum):
"""Enum used to specify what kind of argument a thing is."""
BUILTIN = 0
BASE = 1
COMPILER = 2
PROJECT = 3
BACKEND = 4
# This is copied from coredata. There is no way to share this, because this
# is used in the OptionKey constructor, and the coredata lists are
# OptionKeys...
_BUILTIN_NAMES = {
'prefix',
'bindir',
'datadir',
'includedir',
'infodir',
'libdir',
'libexecdir',
'localedir',
'localstatedir',
'mandir',
'sbindir',
'sharedstatedir',
'sysconfdir',
'auto_features',
'backend',
'buildtype',
'debug',
'default_library',
'errorlogs',
'install_umask',
'layout',
'optimization',
'stdsplit',
'strip',
'unity',
'unity_size',
'warning_level',
'werror',
'wrap_mode',
'force_fallback_for',
'pkg_config_path',
'cmake_prefix_path',
}
def _classify_argument(key: 'OptionKey') -> OptionType:
"""Classify arguments into groups so we know which dict to assign them to."""
if key.name.startswith('b_'):
assert key.machine is MachineChoice.HOST, str(key)
return OptionType.BASE
elif key.lang is not None:
return OptionType.COMPILER
elif key.name in _BUILTIN_NAMES:
return OptionType.BUILTIN
elif key.name.startswith('backend_'):
assert key.machine is MachineChoice.HOST, str(key)
return OptionType.BACKEND
else:
assert key.machine is MachineChoice.HOST, str(key)
return OptionType.PROJECT
@total_ordering
class OptionKey:
"""Represents an option key in the various option dictionaries.
This provides a flexible, powerful way to map option names from their
external form (things like subproject:build.option) to something that
internally easier to reason about and produce.
"""
__slots__ = ['name', 'subproject', 'machine', 'lang', '_hash', 'type']
name: str
subproject: str
machine: MachineChoice
lang: T.Optional[str]
_hash: int
type: OptionType
def __init__(self, name: str, subproject: str = '',
machine: MachineChoice = MachineChoice.HOST,
lang: T.Optional[str] = None, _type: T.Optional[OptionType] = None):
# the _type option to the constructor is kinda private. We want to be
# able tos ave the state and avoid the lookup function when
# pickling/unpickling, but we need to be able to calculate it when
# constructing a new OptionKey
object.__setattr__(self, 'name', name)
object.__setattr__(self, 'subproject', subproject)
object.__setattr__(self, 'machine', machine)
object.__setattr__(self, 'lang', lang)
object.__setattr__(self, '_hash', hash((name, subproject, machine, lang)))
if _type is None:
_type = _classify_argument(self)
object.__setattr__(self, 'type', _type)
def __setattr__(self, key: str, value: T.Any) -> None:
raise AttributeError('OptionKey instances do not support mutation.')
def __getstate__(self) -> T.Dict[str, T.Any]:
return {
'name': self.name,
'subproject': self.subproject,
'machine': self.machine,
'lang': self.lang,
'_type': self.type,
}
def __setstate__(self, state: T.Dict[str, T.Any]) -> None:
"""De-serialize the state of a pickle.
This is very clever. __init__ is not a constructor, it's an
initializer, therefore it's safe to call more than once. We create a
state in the custom __getstate__ method, which is valid to pass
splatted to the initializer.
"""
# Mypy doesn't like this, because it's so clever.
self.__init__(**state) # type: ignore
def __hash__(self) -> int:
return self._hash
def __eq__(self, other: object) -> bool:
if isinstance(other, OptionKey):
return (
self.name == other.name and
self.subproject == other.subproject and
self.machine is other.machine and
self.lang == other.lang)
return NotImplemented
def __lt__(self, other: object) -> bool:
if isinstance(other, OptionKey):
return (
self.name < other.name and
self.subproject < other.subproject and
self.machine < other.machine and
self.lang < other.lang)
return NotImplemented
def __str__(self) -> str:
out = self.name
if self.lang:
out = f'{self.lang}_{out}'
if self.machine is MachineChoice.BUILD:
out = f'build.{out}'
if self.subproject:
out = f'{self.subproject}:{out}'
return out
def __repr__(self) -> str:
return f'OptionKey({repr(self.name)}, {repr(self.subproject)}, {repr(self.machine)}, {repr(self.lang)})'
@classmethod
def from_string(cls, raw: str) -> 'OptionKey':
"""Parse the raw command line format into a three part tuple.
This takes strings like `mysubproject:build.myoption` and Creates an
OptionKey out of them.
"""
try:
subproject, raw2 = raw.split(':')
except ValueError:
subproject, raw2 = '', raw
if raw2.startswith('build.'):
raw3 = raw2.lstrip('build.')
for_machine = MachineChoice.BUILD
else:
raw3 = raw2
for_machine = MachineChoice.HOST
from .compilers import all_languages
if any(raw3.startswith(f'{l}_') for l in all_languages):
lang, opt = raw3.split('_', 1)
else:
lang, opt = None, raw3
assert ':' not in opt
assert 'build.' not in opt
return cls(opt, subproject, for_machine, lang)
def evolve(self, name: T.Optional[str] = None, subproject: T.Optional[str] = None,
machine: T.Optional[MachineChoice] = None, lang: T.Optional[str] = '') -> 'OptionKey':
"""Create a new copy of this key, but with alterted members.
For example:
>>> a = OptionKey('foo', '', MachineChoice.Host)
>>> b = OptionKey('foo', 'bar', MachineChoice.Host)
>>> b == a.evolve(subproject='bar')
True
"""
# We have to be a little clever with lang here, because lang is valid
# as None, for non-compiler options
return OptionKey(
name if name is not None else self.name,
subproject if subproject is not None else self.subproject,
machine if machine is not None else self.machine,
lang if lang != '' else self.lang,
)
def as_root(self) -> 'OptionKey':
"""Convenience method for key.evolve(subproject='')."""
return self.evolve(subproject='')
def as_build(self) -> 'OptionKey':
"""Convenience method for key.evolve(machine=MachinceChoice.BUILD)."""
return self.evolve(machine=MachineChoice.BUILD)
def as_host(self) -> 'OptionKey':
"""Convenience method for key.evolve(machine=MachinceChoice.HOST)."""
return self.evolve(machine=MachineChoice.HOST)
def is_backend(self) -> bool:
"""Convenience method to check if this is a backend option."""
return self.type is OptionType.BACKEND
def is_builtin(self) -> bool:
"""Convenience method to check if this is a builtin option."""
return self.type is OptionType.BUILTIN
def is_compiler(self) -> bool:
"""Convenience method to check if this is a builtin option."""
return self.type is OptionType.COMPILER
def is_project(self) -> bool:
"""Convenience method to check if this is a project option."""
return self.type is OptionType.PROJECT
def is_base(self) -> bool:
"""Convenience method to check if this is a base option."""
return self.type is OptionType.BASE | 35.446473 | 152 | 0.603869 |
430d09e89786800d541f740a7e6fa36a94ed5494 | 2,226 | py | Python | weapon.py | cheramilm/qieziparty | 350825576f40736f13ab61109692185771996071 | [
"CC0-1.0"
] | null | null | null | weapon.py | cheramilm/qieziparty | 350825576f40736f13ab61109692185771996071 | [
"CC0-1.0"
] | null | null | null | weapon.py | cheramilm/qieziparty | 350825576f40736f13ab61109692185771996071 | [
"CC0-1.0"
] | null | null | null | from constants import *
from random import *
class Weapon:
name = ''
attackValue = 0
leftTimes = 0
totalTimes = 0
position = 0
def __init__(self, name, attack_value, left_times):
self.name = name
self.attackValue = attack_value
self.leftTimes = left_times
self.totalTimes = left_times
def __str__(self):
return "『%s』, 攻击力:『%d』,剩余攻击次数:『%d』" % (self.name, self.attackValue, self.leftTimes)
def better_than_me(self, another):
if another.attackValue >= self.attackValue and another.leftTimes >= self.leftTimes:
return 1
elif another.attackValue * another.leftTimes > self.attackValue * self.leftTimes * 2:
return 1
elif another.attackValue > self.attackValue == handAttack:
return 1
else:
return 0
def single_attack(self):
return self.attackValue * (self.leftTimes / self.totalTimes) + 0.1
def to_hand(self):
self.name = handName
self.attackValue = handAttack
self.leftTimes = handTimes
self.totalTimes = handTimes
@staticmethod
def burst(value):
if random() <= burstRate:
print("叮!棍子暴走了,小心了!!!")
return value * 2
else:
return value
def attack(self):
if self.leftTimes > 0:
self.leftTimes = self.leftTimes - 1
init_attack = self.attackValue
if self.leftTimes == 0:
self.to_hand()
return self.burst(init_attack)
else:
if self.name != handName:
print("『%s』剩余攻击力为『0』,自动丢弃!" % self.name)
self.to_hand()
return self.burst(self.attackValue)
else:
if random() <= burstRate:
print("有一根茄棍获得神农垂青,突然暴起了!!!")
return handBurstAttack
else:
print("茄棍过于疲劳,无力攻击,等神农垂青吧,好悲哀啊。。。")
return 0
hand = Weapon(handName, handAttack, handTimes)
weaponTemplates = [Weapon('檀木棍', 130, 5), Weapon('梨木棍', 25, 40), Weapon('柚木棍', 22, 40), Weapon('桦木棍', 20, 35),
Weapon('椴木棍', 15, 8), Weapon('树枝', 2, 50)]
| 30.916667 | 110 | 0.552561 |
46902cc7f49c279a1df1d19fbaafb982eda9d750 | 19,445 | py | Python | other_models/sonet/layers.py | ZJUCAGD/GTS-CNN | a329f314b795f0dea0f46db623ac955a47619e7d | [
"MIT"
] | null | null | null | other_models/sonet/layers.py | ZJUCAGD/GTS-CNN | a329f314b795f0dea0f46db623ac955a47619e7d | [
"MIT"
] | null | null | null | other_models/sonet/layers.py | ZJUCAGD/GTS-CNN | a329f314b795f0dea0f46db623ac955a47619e7d | [
"MIT"
] | null | null | null | import os, sys
DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.abspath(os.path.join(DIR, '../../'))
sys.path.append(ROOT_DIR)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.modules.batchnorm import _BatchNorm
import numpy as np
import math
import torch.utils.model_zoo as model_zoo
import time
from . import operations
from models.sonet.options import Options
from utils.kcnet_utils import Netpara, debugPrint, setup_seed, worker_init_fn
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class MyBatchNorm1d(_BatchNorm):
r"""Applies Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, momentum_decay_step=None, momentum_decay=1):
super(MyBatchNorm1d, self).__init__(num_features, eps, momentum, affine)
self.momentum_decay_step = momentum_decay_step
self.momentum_decay = momentum_decay
self.momentum_original = self.momentum
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(MyBatchNorm1d, self)._check_input_dim(input)
def forward(self, input, epoch=None):
if (epoch is not None) and (epoch >= 1) and (self.momentum_decay_step is not None) and (self.momentum_decay_step > 0):
# perform momentum decay
self.momentum = self.momentum_original * (self.momentum_decay**(epoch//self.momentum_decay_step))
if self.momentum < 0.01:
self.momentum = 0.01
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
class MyBatchNorm2d(_BatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, momentum_decay_step=None, momentum_decay=1):
super(MyBatchNorm2d, self).__init__(num_features, eps, momentum, affine)
self.momentum_decay_step = momentum_decay_step
self.momentum_decay = momentum_decay
self.momentum_original = self.momentum
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(MyBatchNorm2d, self)._check_input_dim(input)
def forward(self, input, epoch=None):
if (epoch is not None) and (epoch >= 1) and (self.momentum_decay_step is not None) and (self.momentum_decay_step > 0):
# perform momentum decay
self.momentum = self.momentum_original * (self.momentum_decay**(epoch//self.momentum_decay_step))
if self.momentum < 0.01:
self.momentum = 0.01
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
class MyLinear(nn.Module):
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
super(MyLinear, self).__init__()
self.activation = activation
self.normalization = normalization
self.linear = nn.Linear(in_features, out_features, bias=True)
if self.normalization == 'batch':
self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
elif self.normalization == 'instance':
self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True)
if self.activation == 'relu':
self.act = nn.ReLU()
elif 'elu' == activation:
self.act = nn.ELU(alpha=1.0)
elif 'swish' == self.activation:
self.act = Swish()
elif 'leakyrelu' == self.activation:
self.act = nn.LeakyReLU(0.1)
self.weight_init()
def weight_init(self):
for m in self.modules():
if isinstance(m, nn.Linear) :
n = m.in_features
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, MyBatchNorm1d) or isinstance(m, nn.InstanceNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x, epoch=None):
x = self.linear(x)
if self.normalization=='batch':
x = self.norm(x, epoch)
elif self.normalization is not None:
x = self.norm(x)
if self.activation is not None:
x = self.act(x)
return x
class MyConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, activation=None, momentum=0.1, normalization=None, bn_momentum_decay_step=None, bn_momentum_decay=1):
super(MyConv2d, self).__init__()
self.activation = activation
self.normalization = normalization
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
if self.normalization == 'batch':
self.norm = MyBatchNorm2d(out_channels, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
elif self.normalization == 'instance':
self.norm = nn.InstanceNorm2d(out_channels, momentum=momentum, affine=True)
if self.activation == 'relu':
self.act = nn.ReLU()
elif self.activation == 'elu':
self.act = nn.ELU(alpha=1.0)
elif 'swish' == self.activation:
self.act = Swish()
elif 'leakyrelu' == self.activation:
self.act = nn.LeakyReLU(0.1)
self.weight_init()
def weight_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, MyBatchNorm2d) or isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x, epoch=None):
x = self.conv(x)
if self.normalization=='batch':
x = self.norm(x, epoch)
elif self.normalization is not None:
x = self.norm(x)
if self.activation is not None:
x = self.act(x)
return x
class UpConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, output_padding=0, bias=True, activation=None, normalization=None):
super(UpConv, self).__init__()
self.activation = activation
self.normalization = normalization
self.up_sample = nn.Upsample(scale_factor=2)
self.conv = MyConv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, normalization=normalization)
self.weight_init()
def weight_init(self):
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.fill_(0.001)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.up_sample(x)
x = self.conv(x)
return x
class EquivariantLayer(nn.Module):
def __init__(self, num_in_channels, num_out_channels, activation='relu', normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
super(EquivariantLayer, self).__init__()
self.num_in_channels = num_in_channels
self.num_out_channels = num_out_channels
self.activation = activation
self.normalization = normalization
self.conv = nn.Conv1d(self.num_in_channels, self.num_out_channels, kernel_size=1, stride=1, padding=0)
if 'batch' == self.normalization:
self.norm = MyBatchNorm1d(self.num_out_channels, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
elif 'instance' == self.normalization:
self.norm = nn.InstanceNorm1d(self.num_out_channels, momentum=momentum, affine=True)
if 'relu' == self.activation:
self.act = nn.ReLU()
elif 'elu' == self.activation:
self.act = nn.ELU(alpha=1.0)
elif 'swish' == self.activation:
self.act = Swish()
elif 'leakyrelu' == self.activation:
self.act = nn.LeakyReLU(0.1)
self.weight_init()
def weight_init(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, MyBatchNorm1d) or isinstance(m, nn.InstanceNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x, epoch=None):
# x is NxK, x_max is 1xK
# x_max, _ = torch.max(x, 0, keepdim=True)
# y = self.conv(x - x_max.expand_as(x))
y = self.conv(x)
if self.normalization=='batch':
y = self.norm(y, epoch)
elif self.normalization is not None:
y = self.norm(y)
if self.activation is not None:
y = self.act(y)
return y
class KNNModule(nn.Module):
def __init__(self, in_channels, out_channels_list, activation, normalization, momentum=0.1,
bn_momentum_decay_step=None, bn_momentum_decay=1):
super(KNNModule, self).__init__()
self.layers = nn.ModuleList()
previous_out_channels = in_channels
for c_out in out_channels_list:
self.layers.append(MyConv2d(previous_out_channels, c_out, kernel_size=1, stride=1, padding=0, bias=True,
activation=activation, normalization=normalization,
momentum=momentum, bn_momentum_decay_step=bn_momentum_decay_step,
bn_momentum_decay=bn_momentum_decay))
previous_out_channels = c_out
def forward(self, coordinate, x, precomputed_knn_I, K, center_type, epoch=None):
'''
:param coordinate: Bx3xM Variable
:param x: BxCxM Variable
:param precomputed_knn_I: BxMxK'
:param K: K neighbors
:param center_type: 'center' or 'avg'
:return:
'''
# 0. compute knn
# 1. for each node, calculate the center of its k neighborhood
# 2. normalize nodes with the corresponding center
# 3. fc for these normalized points
# 4. maxpool for each neighborhood
coordinate_tensor = coordinate.data # Bx3xM
if precomputed_knn_I is not None:
assert precomputed_knn_I.size()[2] >= K
knn_I = precomputed_knn_I[:, :, 0:K]
else:
coordinate_Mx1 = coordinate_tensor.unsqueeze(3) # Bx3xMx1
coordinate_1xM = coordinate_tensor.unsqueeze(2) # Bx3x1xM
norm = torch.sum((coordinate_Mx1 - coordinate_1xM) ** 2, dim=1) # BxMxM, each row corresponds to each coordinate - other coordinates
knn_D, knn_I = torch.topk(norm, k=K, dim=2, largest=False, sorted=True) # BxMxK
# debug
# print(knn_D[0])
# print(knn_I[0])
# assert False
# get gpu_id
device_index = x.device.index
neighbors = operations.knn_gather_wrapper(coordinate_tensor, knn_I) # Bx3xMxK
if center_type == 'avg':
neighbors_center = torch.mean(neighbors, dim=3, keepdim=True) # Bx3xMx1
elif center_type == 'center':
neighbors_center = coordinate_tensor.unsqueeze(3) # Bx3xMx1
neighbors_decentered = (neighbors - neighbors_center).detach()
neighbors_center = neighbors_center.squeeze(3).detach()
# debug
# print(neighbors[0, 0])
# print(neighbors_avg[0, 0])
# print(neighbors_decentered[0, 0])
# assert False
x_neighbors = operations.knn_gather_by_indexing(x, knn_I) # BxCxMxK
x_augmented = torch.cat((neighbors_decentered, x_neighbors), dim=1) # Bx(3+C)xMxK
for layer in self.layers:
x_augmented = layer(x_augmented, epoch)
feature, _ = torch.max(x_augmented, dim=3, keepdim=False)
return neighbors_center, feature
class PointNet(nn.Module):
def __init__(self, in_channels, out_channels_list, activation, normalization, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
super(PointNet, self).__init__()
self.layers = nn.ModuleList()
previous_out_channels = in_channels
for i, c_out in enumerate(out_channels_list):
if i != len(out_channels_list)-1:
self.layers.append(EquivariantLayer(previous_out_channels, c_out, activation, normalization,
momentum, bn_momentum_decay_step, bn_momentum_decay))
else:
self.layers.append(EquivariantLayer(previous_out_channels, c_out, None, None))
previous_out_channels = c_out
def forward(self, x, epoch=None):
for layer in self.layers:
x = layer(x, epoch)
return x
class PointResNet(nn.Module):
def __init__(self, in_channels, out_channels_list, activation, normalization, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
'''
in -> out[0]
out[0] -> out[1] ----
out[1] -> out[2] |
... ... |
out[k-2]+out[1] -> out[k-1] <---
:param in_channels:
:param out_channels_list:
:param activation:
:param normalization:
:param momentum:
:param bn_momentum_decay_step:
:param bn_momentum_decay:
'''
super(PointResNet, self).__init__()
self.out_channels_list = out_channels_list
self.layers = nn.ModuleList()
previous_out_channels = in_channels
for i, c_out in enumerate(out_channels_list):
if i != len(out_channels_list)-1:
self.layers.append(EquivariantLayer(previous_out_channels, c_out, activation, normalization,
momentum, bn_momentum_decay_step, bn_momentum_decay))
else:
self.layers.append(EquivariantLayer(previous_out_channels+out_channels_list[0], c_out, None, None))
previous_out_channels = c_out
def forward(self, x, epoch=None):
'''
:param x: BxCxN
:param epoch: None or number of epoch, for BN decay.
:return:
'''
layer0_out = self.layers[0](x, epoch) # BxCxN
for l in range(1, len(self.out_channels_list)-1):
if l == 1:
x_tmp = self.layers[l](layer0_out, epoch)
else:
x_tmp = self.layers[l](x_tmp, epoch)
layer_final_out = self.layers[len(self.out_channels_list)-1](torch.cat((layer0_out, x_tmp), dim=1), epoch)
return layer_final_out
if __name__ == '__main__':
opt = Options().parse()
net1 = PointResNet(6, [64, 128, 256], activation=opt.activation, normalization=opt.normalization,
momentum=opt.bn_momentum, bn_momentum_decay_step=opt.bn_momentum_decay_step, bn_momentum_decay=opt.bn_momentum_decay)
net2 = KNNModule(3 + 256, (384, 384), activation=opt.activation, normalization=opt.normalization,
momentum=opt.bn_momentum, bn_momentum_decay_step=opt.bn_momentum_decay_step,
bn_momentum_decay=opt.bn_momentum_decay)
net3 = PointNet(3+384, (512, 1024), activation=opt.activation, normalization=opt.normalization,
momentum=opt.bn_momentum, bn_momentum_decay_step=opt.bn_momentum_decay_step, bn_momentum_decay=opt.bn_momentum_decay)
net=nn.ModuleList([net1,net2,net3])
Netpara(net)
| 42.736264 | 196 | 0.627822 |
f2858ccc56fa4e71e5e6887a1d916b7576ea8994 | 4,915 | py | Python | src/oci/cims/models/update_activity_item_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/cims/models/update_activity_item_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/cims/models/update_activity_item_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .update_item_details import UpdateItemDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateActivityItemDetails(UpdateItemDetails):
"""
Details for udpating the support ticket activity.
**Caution:** Avoid using any confidential information when you supply string values using the API.
"""
#: A constant which can be used with the activity_type property of a UpdateActivityItemDetails.
#: This constant has a value of "NOTES"
ACTIVITY_TYPE_NOTES = "NOTES"
#: A constant which can be used with the activity_type property of a UpdateActivityItemDetails.
#: This constant has a value of "PROBLEM_DESCRIPTION"
ACTIVITY_TYPE_PROBLEM_DESCRIPTION = "PROBLEM_DESCRIPTION"
#: A constant which can be used with the activity_type property of a UpdateActivityItemDetails.
#: This constant has a value of "UPDATE"
ACTIVITY_TYPE_UPDATE = "UPDATE"
#: A constant which can be used with the activity_type property of a UpdateActivityItemDetails.
#: This constant has a value of "CLOSE"
ACTIVITY_TYPE_CLOSE = "CLOSE"
def __init__(self, **kwargs):
"""
Initializes a new UpdateActivityItemDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.cims.models.UpdateActivityItemDetails.type` attribute
of this class is ``activity`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param type:
The value to assign to the type property of this UpdateActivityItemDetails.
:type type: str
:param comments:
The value to assign to the comments property of this UpdateActivityItemDetails.
:type comments: str
:param activity_type:
The value to assign to the activity_type property of this UpdateActivityItemDetails.
Allowed values for this property are: "NOTES", "PROBLEM_DESCRIPTION", "UPDATE", "CLOSE"
:type activity_type: str
"""
self.swagger_types = {
'type': 'str',
'comments': 'str',
'activity_type': 'str'
}
self.attribute_map = {
'type': 'type',
'comments': 'comments',
'activity_type': 'activityType'
}
self._type = None
self._comments = None
self._activity_type = None
self._type = 'activity'
@property
def comments(self):
"""
Gets the comments of this UpdateActivityItemDetails.
Comments updated at the time that the activity occurs.
:return: The comments of this UpdateActivityItemDetails.
:rtype: str
"""
return self._comments
@comments.setter
def comments(self, comments):
"""
Sets the comments of this UpdateActivityItemDetails.
Comments updated at the time that the activity occurs.
:param comments: The comments of this UpdateActivityItemDetails.
:type: str
"""
self._comments = comments
@property
def activity_type(self):
"""
Gets the activity_type of this UpdateActivityItemDetails.
The type of activity occurring.
Allowed values for this property are: "NOTES", "PROBLEM_DESCRIPTION", "UPDATE", "CLOSE"
:return: The activity_type of this UpdateActivityItemDetails.
:rtype: str
"""
return self._activity_type
@activity_type.setter
def activity_type(self, activity_type):
"""
Sets the activity_type of this UpdateActivityItemDetails.
The type of activity occurring.
:param activity_type: The activity_type of this UpdateActivityItemDetails.
:type: str
"""
allowed_values = ["NOTES", "PROBLEM_DESCRIPTION", "UPDATE", "CLOSE"]
if not value_allowed_none_or_none_sentinel(activity_type, allowed_values):
raise ValueError(
"Invalid value for `activity_type`, must be None or one of {0}"
.format(allowed_values)
)
self._activity_type = activity_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 35.615942 | 245 | 0.6706 |
3781b415c2b371e29e099dc93269c12029822659 | 23,629 | py | Python | pageObjects/common_functions/common_methods.py | harry-100/qa-automation-framework | 5fbe03e930820537e53f2d26b1c2b2bd2b222bf5 | [
"MIT"
] | null | null | null | pageObjects/common_functions/common_methods.py | harry-100/qa-automation-framework | 5fbe03e930820537e53f2d26b1c2b2bd2b222bf5 | [
"MIT"
] | null | null | null | pageObjects/common_functions/common_methods.py | harry-100/qa-automation-framework | 5fbe03e930820537e53f2d26b1c2b2bd2b222bf5 | [
"MIT"
] | null | null | null | from time import sleep
from selenium.webdriver.support.ui import WebDriverWait
from pageObjects.client.contact_clinical.pharma_data_page import PharmaDataPage
from utilities.BasePage import BasePage
from pageObjects.calendar.full_calendar.calender_page import CalendarPage
from pageObjects.calendar.full_calendar.client_page import ClientPage
from pageObjects.calendar.full_calendar.login_page import LoginPage
from pageObjects.manage.forms.forms_page import FormsPage
from pageObjects.manage.forms.forms_add_form_page import AddFormsPage
from pageObjects.workflow.workflow_page import WorkFlowPage
from pageObjects.notes.notes_page import NotesPage
from pageObjects.manage.handwritten_notes_page import HandWrittenNotesPage
from pageObjects.client.finances.finances_page import FinancesPage
from pageObjects.client.waitlist_tags.waitlist_tags_page import WaitlistTagsPage
from pageObjects.client.client_portal.client_portal_page import ClientPortalPage
from pageObjects.settings.settings_page import SettingsPage
from datetime import date
class CommonMethods(BasePage):
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(self.driver, 10)
super().__init__(driver)
self.finances_page_obj = FinancesPage(self.driver)
self.notes_page_obj = NotesPage(self.driver)
self.pharma_data_page_obj = PharmaDataPage(self.driver)
self.login_page_obj = LoginPage(self.driver)
self.client_page_obj = ClientPage(self.driver)
self.manage_forms_page_obj = FormsPage(self.driver)
self.manage_forms_add_form_page_obj = AddFormsPage(self.driver)
self.calendar_page_obj = CalendarPage(self.driver)
self.waitlist_tags_page_obj = WaitlistTagsPage(self.driver)
self.client_portal_page_obj = ClientPortalPage(self.driver)
self.settings_page_obj = SettingsPage(self.driver)
def delete_current_medication(self):
try:
no_of_medications = self.pharma_data_page_obj.no_of_current_medications()
sleep(1)
for i in range(1, no_of_medications + 1):
self.pharma_data_page_obj.clk_current_medication_tab()
sleep(1)
self.pharma_data_page_obj.clk_delete_current_medication_btn()
sleep(1)
self.pharma_data_page_obj.clk_delete_entry_medication_btn()
except Exception:
self.log.info("No prior medication found")
def delete_historical_medication(self):
sleep(2)
try:
no_of_historical_medications = self.pharma_data_page_obj.no_of_historical_medications()
for i in range(1, no_of_historical_medications + 1):
self.pharma_data_page_obj.clk_historical_medication_tab()
sleep(1)
self.pharma_data_page_obj.clk_delete_historical_medication_btn()
sleep(1)
self.pharma_data_page_obj.clk_delete_entry_medication_btn()
sleep(1)
except Exception:
self.log.info("No prior medications found")
def delete_prior_receipts(self):
sleep(2)
no_of_receipts = self.finances_page_obj.number_of_receipts()
try:
for i in range(no_of_receipts):
self.finances_page_obj.select_first_receipt()
self.finances_page_obj.clk_delete_receipt()
self.finances_page_obj.clk_delete_receipt_confirm()
except Exception:
self.log.info("Prior receipts not found")
def delete_client_invoices(self):
sleep(2)
try:
no_of_invoices = self.finances_page_obj.get_no_of_invoices()
sleep(0.5)
for i in range(no_of_invoices):
self.finances_page_obj.select_first_invoice()
sleep(0.5)
self.finances_page_obj.clk_delete_client_invoice()
sleep(0.5)
self.finances_page_obj.clk_delete_confirm_client_invoice()
sleep(1)
except Exception:
self.log.info("No prior invoices found")
def delete_client_non_session_charges(self):
sleep(2)
try:
no_of_non_session_charges = self.finances_page_obj.number_of_non_session_charge()
print(" no of no session charges ", no_of_non_session_charges)
sleep(0.5)
for i in range(no_of_non_session_charges):
self.finances_page_obj.sel_first_non_session_charge()
sleep(0.5)
self.finances_page_obj.clk_delete_non_session_charge()
sleep(0.5)
self.finances_page_obj.clk_delete_charge_non_session_charge()
sleep(1)
except Exception:
self.log.info("No prior non_session_charge found")
def delete_mobile_client_receipts(self):
sleep(2)
no_of_receipts = self.finances_page_obj.mobile_no_of_receipts()
for i in range(no_of_receipts):
sleep(0.5)
self.finances_page_obj.sel_mobile_first_receipt()
sleep(0.2)
self.finances_page_obj.clk_delete_receipt()
sleep(0.2)
self.finances_page_obj.clk_delete_receipt_confirm()
def delete_mobile_client_invoices(self):
sleep(2)
no_of_invoices = self.finances_page_obj.return_mobile_no_of_invoices()
try:
for i in range(no_of_invoices):
sleep(0.5)
self.finances_page_obj.clk_mobile_invoice()
sleep(0.2)
self.finances_page_obj.clk_mobile_invoice_delete()
sleep(0.2)
self.finances_page_obj.clk_mobile_invoice_delete_confirm()
except Exception:
self.log.info("no prior invocies found")
def delete_mobile_session_notes(self):
sleep(2)
no_of_sessions = self.notes_page_obj.mobile_number_of_session_notes()
try:
for i in range(no_of_sessions):
sleep(0.5)
self.notes_page_obj.mobile_clk_first_session_notes()
sleep(0.5)
self.notes_page_obj.clk_mobile_delete_session_note()
sleep(0.5)
self.notes_page_obj.clk_mobile_confirm_delete_session_note()
except Exception:
self.log.info("No prior sessions found")
def delete_mobile_non_session_charge(self):
sleep(2)
no_of_non_session_charges = self.finances_page_obj.mobile_no_of_non_session_charges()
for i in range(no_of_non_session_charges):
sleep(1)
self.finances_page_obj.sel_mobile_first_non_session_charge()
sleep(1)
self.finances_page_obj.clk_mobile_delete_non_session_charge()
sleep(1)
self.finances_page_obj.clk_mobile_delete_confirm_non_session_charge()
def delete_session_notes(self):
sleep(2)
no_of_notes = self.notes_page_obj.number_of_session_notes()
for i in range(1, no_of_notes + 1):
try:
self.notes_page_obj.select_first_session_note()
sleep(1)
self.notes_page_obj.clk_delete_session()
sleep(1)
self.notes_page_obj.clk_confirm_delete_session()
sleep(1)
try:
self.notes_page_obj.clk_delete_and_notify()
except Exception:
self.log.info("Delete and Notify button not found")
except Exception:
continue
def delete_non_session_notes(self):
sleep(2)
no_of_non_session_notes = self.notes_page_obj.number_of_non_session_notes()
try:
for i in range(1, no_of_non_session_notes + 1):
self.notes_page_obj.sel_non_session_note()
sleep(0.5)
self.notes_page_obj.clk_delete_non_session_note()
sleep(0.5)
self.notes_page_obj.clk_confirm_delete_non_session_note()
sleep(0.5)
except Exception:
self.log.info("No prior notes found")
def delete_mobile_prior_session_note(self):
sleep(2)
no_of_notes = self.notes_page_obj.mobile_number_of_session_notes()
try:
for i in range(1, no_of_notes + 1):
sleep(1)
self.notes_page_obj.mobile_clk_first_session_notes()
sleep(1)
self.notes_page_obj.clk_delete_session()
sleep(1)
self.notes_page_obj.clk_mobile_confirm_delete_session_note()
except Exception:
self.log.info("No prior sessions found")
def delete_mobile_non_session_notes(self):
sleep(2)
no_of_notes = self.notes_page_obj.mobile_no_of_non_session_notes()
try:
for i in range(1, no_of_notes + 1):
self.notes_page_obj.sel_mobile_first_non_session_note()
sleep(0.5)
self.notes_page_obj.clk_delete_non_session_note()
sleep(0.5)
self.notes_page_obj.clk_mobile_confirm_delete_non_session_note()
sleep(0.5)
except Exception:
self.log.info("No prior non-session notes found")
def delete_prior_forms_client_side(self, client_name):
self.login_page_obj.clk_clients_btn()
sleep(2)
self.client_page_obj.sel_client_name(client_name)
sleep(2)
self.client_page_obj.clk_client_sessions_documents()
sleep(1)
no_of_forms = self.manage_forms_page_obj.return_client_no_of_forms()
try:
for i in range(1, no_of_forms + 1):
sleep(1)
self.client_page_obj.clk_client_document_list_options()
sleep(1)
self.client_page_obj.clk_client_documents_delete()
sleep(1)
self.client_page_obj.clk_client_documents_warn_delete()
except Exception:
self.log.info("No prior forms found")
'''
def delete_prior_manage_forms(self):
self.login_page_obj.clk_manage_btn()
self.manage_forms_page_obj.clk_manage_forms()
sleep(1)
no_of_manage_forms = self.manage_forms_page_obj.return_manage_no_of_forms()
try:
for i in range(1, no_of_manage_forms + 1):
self.manage_forms_page_obj.select_manage_first_form()
sleep(1)
self.manage_forms_page_obj.clk_manage_forms_delete_form()
sleep(1)
self.manage_forms_page_obj.clk_manage_forms_confirm_delete_form()
sleep(0.5)
self.driver.refresh()
sleep(1)
self.manage_forms_page_obj.clk_manage_forms()
sleep(0.5)
except Exception:
self.log.info("No prior forms found")
'''
def delete_prior_manage_forms(self):
self.login_page_obj.clk_manage_btn()
self.manage_forms_page_obj.clk_manage_forms()
sleep(1)
list_of_forms = self.manage_forms_page_obj.return_list_of_forms()
print("list of forms=", list_of_forms)
for form_name in list_of_forms:
self.manage_forms_page_obj.sel_manage_forms_form_name(form_name)
sleep(2)
self.manage_forms_page_obj.clk_manage_forms_delete_form()
sleep(2)
self.manage_forms_page_obj.clk_manage_forms_confirm_delete_form()
sleep(0.5)
#self.driver.refresh()
sleep(1)
self.manage_forms_page_obj.clk_manage_forms()
sleep(0.5)
def delete_form_client_side(self, client_name):
self.login_page_obj.clk_clients_btn()
self.client_page_obj.sel_client_name(client_name)
sleep(2)
self.client_page_obj.clk_client_sessions_documents()
sleep(1)
self.client_page_obj.clk_client_document_list_options()
sleep(1)
self.client_page_obj.clk_client_documents_delete()
sleep(1)
self.client_page_obj.clk_client_documents_warn_delete()
def delete_form_manage_side(self, new_form_name):
self.manage_forms_page_obj.sel_manage_forms_form_name(new_form_name)
sleep(2)
self.manage_forms_page_obj.clk_manage_forms_delete_form()
sleep(2)
self.manage_forms_page_obj.clk_manage_forms_confirm_delete_form()
def manage_add_form(self, new_form_name, text_question):
self.login_page_obj.clk_manage_btn()
self.manage_forms_page_obj.clk_manage_forms()
self.manage_forms_page_obj.clk_manage_forms_add_form()
sleep(2)
self.manage_forms_add_form_page_obj.input_new_form_name(new_form_name)
self.manage_forms_add_form_page_obj.clk_text_question()
self.manage_forms_add_form_page_obj.input_text_question(text_question)
sleep(1)
self.manage_forms_add_form_page_obj.clk_save_new_form()
sleep(2)
self.manage_forms_add_form_page_obj.clk_new_form_text_editor()
sleep(2)
def mobile_delete_form_client_side(self, client_name):
self.login_page_obj.clk_navigation_btn()
self.client_page_obj.clk_all_clients_mobile()
self.client_page_obj.mobile_sel_client_name(client_name)
self.client_page_obj.clk_view_client_mobile()
sleep(1)
self.client_page_obj.clk_mobile_open_client_menu()
sleep(2)
self.client_page_obj.clk_client_sessions_documents()
sleep(1)
self.client_page_obj.clk_client_document_list_options()
sleep(1)
self.client_page_obj.clk_client_documents_delete()
sleep(1)
self.client_page_obj.clk_client_documents_warn_delete()
sleep(1)
def mobile_delete_form_manage_side(self, new_form_name):
self.login_page_obj.clk_navigation_btn()
self.login_page_obj.clk_mobile_forms_btn()
sleep(1)
self.manage_forms_page_obj.sel_mobile_manage_forms_form_name(new_form_name)
sleep(1)
self.manage_forms_page_obj.clk_manage_forms_delete_form()
sleep(1)
self.manage_forms_page_obj.clk_manage_forms_confirm_delete_form()
def delete_existing_session(self):
today_date = date.today()
current_weekday = today_date.weekday()
try:
if current_weekday in range(3, 6):
sleep(2)
self.calendar_page_obj.clk_move_to_next_week()
sleep(2)
no_of_sessions = self.calendar_page_obj.get_no_of_sessions_for_delete()
for i in range(no_of_sessions):
sleep(1)
self.calendar_page_obj.clk_session_for_delete()
sleep(0.5)
self.calendar_page_obj.clk_more_information()
sleep(0.5)
self.calendar_page_obj.clk_delete_session()
sleep(0.5)
self.calendar_page_obj.clk_delete_session_warn()
sleep(0.5)
try:
self.calendar_page_obj.clk_appointment_delete_confirm()
except:
pass
except Exception:
self.log.info("No prior sessions found")
def mobile_delete_existing_session(self):
today_date = date.today()
current_weekday = today_date.weekday()
try:
if current_weekday < 3 or current_weekday == 6:
self.calendar_page_obj.clk_btn_calendar_view()
self.calendar_page_obj.clk_btn_calendar_week_view()
self.calendar_page_obj.clk_mobile_session_info()
self.calendar_page_obj.clk_mobile_more_information()
self.calendar_page_obj.clk_delete_session()
self.calendar_page_obj.clk_delete_session_warn()
self.calendar_page_obj.clk_appointment_delete_confirm()
else:
self.calendar_page_obj.clk_btn_calendar_view()
self.calendar_page_obj.clk_btn_calendar_week_view()
self.calendar_page_obj.clk_move_to_next_week()
self.calendar_page_obj.clk_mobile_session_info()
self.calendar_page_obj.clk_mobile_more_information()
self.calendar_page_obj.clk_delete_session()
self.calendar_page_obj.clk_delete_session_warn()
self.calendar_page_obj.clk_appointment_delete_confirm()
except Exception:
self.log.info("No prior sessions found")
def delete_all_day_session(self):
try:
self.calendar_page_obj.clk_all_day_session_info()
self.calendar_page_obj.clk_personal_session_more_information()
self.calendar_page_obj.clk_delete_session()
self.calendar_page_obj.clk_delete_session_warn()
except Exception:
self.log.info("Prior Personal session not found")
def mobile_delete_all_day_session(self):
today_date = date.today()
current_weekday = today_date.weekday()
try:
if current_weekday < 3 or current_weekday == 6:
self.calendar_page_obj.clk_btn_calendar_view()
self.calendar_page_obj.clk_btn_calendar_week_view()
else:
self.calendar_page_obj.clk_btn_calendar_view()
self.calendar_page_obj.clk_btn_calendar_week_view()
sleep(0.5)
self.calendar_page_obj.clk_move_to_next_week()
sleep(0.5)
self.calendar_page_obj.clk_all_day_session()
sleep(1)
self.calendar_page_obj.clk_mobile_personal_session_more_information()
sleep(2)
self.calendar_page_obj.clk_delete_session()
sleep(0.5)
self.calendar_page_obj.clk_delete_session_warn()
except Exception:
self.log.info("No prior session found")
def delete_sessions_client_side(self):
try:
no_of_client_sessions = self.client_page_obj.no_of_client_sessions()
for i in range(1, no_of_client_sessions + 1):
sleep(0.5)
self.client_page_obj.select_first_client_session()
sleep(0.5)
self.client_page_obj.clk_delete_client_session()
sleep(0.5)
self.notes_page_obj.clk_mobile_confirm_delete_session_note()
sleep(1)
except Exception:
self.log.info("No prior sessions found")
def delete_mobile_historical_medication(self):
try:
no_of_historical_medications = self.pharma_data_page_obj.mobile_no_of_historical_medications()
for i in range(1, no_of_historical_medications + 1):
self.pharma_data_page_obj.clk_mobile_historical_medication_tab()
sleep(1)
self.pharma_data_page_obj.clk_delete_historical_medication_btn()
sleep(1)
self.pharma_data_page_obj.clk_delete_entry_medication_btn()
sleep(1)
except Exception:
self.log.info("No prior medications found")
def get_date_suffix(self, meeting_date):
day = meeting_date.strftime("%-d")
day = int(day)
if 4 <= day <= 20 or 24 <= day <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][day % 10 - 1]
return suffix
def client_form_url_title(self, browser_name):
if browser_name == "chrome":
exp_title = "Online Portal - Form"
elif browser_name == "firefox":
exp_title = "Online Portal - Form"
elif browser_name == "Safari" or "Safari Technology Preview":
exp_title = "Online Booking"
return exp_title
def create_client_session(self, service, date_time):
sleep(1)
self.calendar_page_obj.txt_date_time(date_time)
sleep(2)
self.calendar_page_obj.sel_service(service)
sleep(2)
self.calendar_page_obj.clk_create_session()
sleep(2)
def delete_settings_tags(self):
sleep(1)
settings_tags_list = self.waitlist_tags_page_obj.get_settings_tags_list()
for tag_name in settings_tags_list:
sleep(1)
self.waitlist_tags_page_obj.delete_settings_tag(tag_name)
sleep(1)
self.waitlist_tags_page_obj.clk_confirm_delete_settings_tag()
def create_client(self, first_name, last_name, therapist_name):
sleep(1)
self.client_page_obj.clk_add_new_client()
sleep(1)
self.client_page_obj.input_first_name(first_name)
self.client_page_obj.input_last_name(last_name)
sleep(1)
self.client_page_obj.sel_therapist(therapist_name)
sleep(1)
self.client_page_obj.clk_add_client()
def delete_client_by_name(self, client_name):
no_of_clients_name = self.client_page_obj.sel_all_clients_name(client_name)
try:
for i in range(no_of_clients_name):
self.client_page_obj.sel_client_name_for_delete(client_name)
self.client_page_obj.clk_delete_client()
self.client_page_obj.clk_confirm_delete_client()
sleep(16)
self.client_page_obj.clk_confirm_again_delete_client()
except:
pass
def create_client_portal_user(self, first_name, last_name, birth_date, phone_number, email_id, password):
sleep(1)
self.client_portal_page_obj.input_client_first_name(first_name)
self.client_portal_page_obj.input_client_last_name(last_name)
self.client_portal_page_obj.input_client_birth_date(birth_date)
self.client_portal_page_obj.input_phone_number(phone_number)
self.client_portal_page_obj.input_email_id(email_id)
self.client_portal_page_obj.input_client_password(password)
self.client_portal_page_obj.input_again_password(password)
sleep(0.5)
self.client_portal_page_obj.clk_create_new_account()
sleep(1)
def get_date_number_suffix(self, day):
day = int(day)
if 4 <= day <= 20 or 24 <= day <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][day % 10 - 1]
return suffix
def create_service(self, service_details):
service_name = service_details['service_name']
duration = service_details['duration']
service_type = service_details['service_type']
therapist_grade = service_details['therapist_grade']
service_fee = service_details['service_fee']
allowed_portal = service_details['allowed_portal']
video_bookable = service_details['video_bookable']
# create service
self.settings_page_obj.input_service_name(service_name)
self.settings_page_obj.input_service_duration(duration)
self.settings_page_obj.sel_service_type(service_type)
self.settings_page_obj.sel_therapist_grade(therapist_grade)
self.settings_page_obj.input_service_fee(service_fee)
self.settings_page_obj.sel_allowed_on_portal(allowed_portal)
self.settings_page_obj.bookable_as_video(video_bookable)
self.settings_page_obj.clk_create_service()
def mobile_delete_prsopects(self, prospect_name):
self.login_page_obj.clk_navigation_btn()
self.login_page_obj.clk_mobile_client_prospects()
self.client_page_obj.mobile_sel_prospect_name(prospect_name)
self.client_page_obj.clk_delete_user()
self.client_portal_page_obj.clk_confirm_delete_prospect()
| 41.969805 | 109 | 0.659359 |
434a3dbb80ac0282e2672f51f9e479af14b08308 | 968 | py | Python | scripts/insert_sims.py | jennhsiao/ideotype | 26406ddc66e091e918ceff212638a02e9cfbdb46 | [
"MIT"
] | 2 | 2021-01-08T03:21:48.000Z | 2021-04-08T00:21:32.000Z | scripts/insert_sims.py | jennhsiao/ideotype | 26406ddc66e091e918ceff212638a02e9cfbdb46 | [
"MIT"
] | null | null | null | scripts/insert_sims.py | jennhsiao/ideotype | 26406ddc66e091e918ceff212638a02e9cfbdb46 | [
"MIT"
] | null | null | null | """
Debug error when inserting simulation outputs into DB.
- Leveraging options in insert_sims to batch save records to DB
at a smaller number, making it easier to debug errors.
- Also starting import from specified year & cultivar
to narrow down search range.
"""
from ideotype import insert_sims
fpath_db = '/home/disk/eos8/ach315/upscale/db/ideotype.db'
dirct_weadata = '/home/disk/eos8/ach315/upscale/weadata/data/control/'
dirct_sims = '/home/disk/eos8/ach315/upscale/sims/opt/'
fpath_params = '/home/disk/eos8/ach315/upscale/params/param_opt.csv'
fpath_siteinfo = '/home/disk/eos8/ach315/upscale/weadata/site_summary.csv'
run_name = 'opt'
insert_sims(dirct_sims, fpath_db, run_name,
n_savefiles=100, session=None,
start_year=1994, start_cvar=94, start_site=722060)
# multiple files in 1994/var_93 have issues
# order messed up or duplicated rows (don't know how this happened)
# skipped all the sites in this directory at the moment
| 37.230769 | 74 | 0.765496 |
fc2d047df1b2882e1247fb23d0489648fff60f4f | 1,116 | py | Python | dependencies/ui/tab2/parameter_setter.py | statisticalbiotechnology/quandenser-pipeline | 4175f7d3de29d08dbb53e0e4f1b0f2fba8147df3 | [
"Apache-2.0"
] | 8 | 2019-05-17T14:45:30.000Z | 2021-06-24T15:53:34.000Z | dependencies/ui/tab2/parameter_setter.py | statisticalbiotechnology/quandenser-pipeline | 4175f7d3de29d08dbb53e0e4f1b0f2fba8147df3 | [
"Apache-2.0"
] | 33 | 2019-03-07T09:00:11.000Z | 2021-09-07T07:47:18.000Z | dependencies/ui/tab2/parameter_setter.py | statisticalbiotechnology/quandenser-pipeline | 4175f7d3de29d08dbb53e0e4f1b0f2fba8147df3 | [
"Apache-2.0"
] | 1 | 2019-11-21T12:32:07.000Z | 2019-11-21T12:32:07.000Z | import sys
from PySide2.QtWidgets import QDoubleSpinBox, QSpinBox
# Custom parser for both sh files and nf configs
from custom_config_parser import custom_config_parser
class parameter_setter_single(QSpinBox):
def __init__(self, parameter, nf_settings_path):
super(parameter_setter_single,self).__init__(parent = None)
self.nf_settings_path = nf_settings_path
self.nf_settings_parser = custom_config_parser()
self.nf_settings_parser.load(self.nf_settings_path)
self.parameter = parameter
self.setMaximum(9999999)
self.default()
self.valueChanged.connect(self.check_value)
def check_value(self):
self.blockSignals(True)
self.nf_settings_parser.write(f"params.{self.parameter}",
self.value(),
isString=False)
self.blockSignals(False)
def default(self):
value = self.nf_settings_parser.get(f"params.{self.parameter}")
try:
value = int(value)
except:
return None
self.setValue(value)
| 34.875 | 71 | 0.65233 |
d5eb53c21890feffde8916104737c3ca7186f63c | 30,377 | py | Python | jams/kriging.py | mcuntz/jams_python | 41b4504d2f55a77a7876fc6d146e4eb91dd8b2b9 | [
"MIT"
] | 9 | 2019-06-03T03:24:16.000Z | 2021-12-03T07:14:00.000Z | jams/kriging.py | mcuntz/jams_python | 41b4504d2f55a77a7876fc6d146e4eb91dd8b2b9 | [
"MIT"
] | 6 | 2020-03-25T21:56:59.000Z | 2021-11-08T14:58:27.000Z | jams/kriging.py | mcuntz/jams_python | 41b4504d2f55a77a7876fc6d146e4eb91dd8b2b9 | [
"MIT"
] | 5 | 2019-10-17T12:04:33.000Z | 2021-09-28T07:45:07.000Z | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy.spatial.distance import pdist, squareform
__all__ = ['kriging']
def kriging(x, y, z, semi_mod, semi_popt, xnew=None, ynew=None, plot=False,
masked=False, silent=True, eop=None, block=False):
"""
Kriging a surface from a set of 2D points with a given semivariogram
model and associated optimized parameters.
Plot the surface and the corresponding kriging variance, if wanted.
The coordinates and values of the surface can be masked outside the
convex hull of the given input points.
Optional extraction of kriged values at distinct points within the
surface is possible.
Block kriging for the average of the convex hull is possible.
Howvere kringing on a rectangular surface with subsequent averaging
is almost always faster.
Definition
----------
def kriging(x, y, z, semi_mod, semi_popt, xnew=None, ynew=None, plot=False,
masked=False, silent=True, eop=None, block=False):
Input
-----
x array, x coordinates
y array, y coordinates
z array, values
semi_mod function, semivariogram model (e.g. output from the JAMS
semivariogram routine)
semi_popt array, parameters of the semivariogram model (e.g. output
from the JAMS semivariogram routine)
xnew array (n), x coordinates of the desired surface, they will be
used to generate a 2D mesh for the surface. If left None,
values will be kriged only for the points given in eop.
ynew array (m), y coordinates of the desired surface, they will be
used to generate a 2D mesh for the surface. If left None,
values will be kriged only for the points given in eop.
eop array (k,2), x and y coordinates of distinct points where
a kriged value is desired.
Optional Input
--------------
plot bool, plots will be generated if True, otherwise not.
masked bool, if True, the output arrays will be np.ma.masked_arrays
where coordinates and values outside of the convex hull of
the input data are masked. In the generated plots these
values will be hidden. If False, the output arrays will be
np.arrays and all values within the kriging rectangle are
visible in the plots.
silent bool, if True, no runtime diagnostics are printed to the
console.
block bool, if True, calculate block kriging
Note that kringing on a rectangular surface xnew,ynew with
possible masking and calculating the mean afterwards is almost
always much faster, except for very fine xnew,ynew grids.
Output
------
if eop is None:
xnew 2D array (n,m), x coordinates of the surface grid
ynew 2D array (n,m), y coordinates of the surface grid
znew 2D array (n,m), values of the surface grid
varnew 2D array (n,m), kriging variance of the surface grid
if xnew is None and not block:
eopz array (k), kriged values at the desired distinct points of eop
eopvar array (k), kriging variance at the desired distinct points of
eop
if block:
bave average over convex_hull of x,y or xnew,ynew.
bvar kriging variance of average bave
otherwise:
xnew 2D array (n,m), x coordinates of the surface grid
ynew 2D array (n,m), y coordinates of the surface grid
znew 2D array (n,m), values of the surface grid
varnew 2D array (n,m), kriging variance of the surface grid
eopz array (k), kriged values at the desired distinct points of eop
eopvar array (k), kriging variance at the desired distinct points of
graphs:
kriging surface, shows the kriged surface
kriging variance, shows the kriging variance
References
----------
This routine is recoded and extended from a matlab script by Juliane Mai.
Examples
--------
# provide you some sample data:
>>> # seed for reproducible results in doctest
>>> np.random.seed(1)
>>> x = np.array([652225.,652175.,652205.,652235.,652265.,652165.,
... 652195.,652225.,652255.,652285.,652175.,652205.,
... 652235.,652265.,652175.,652205.,652235.,652265.,
... 652195.,652225.,652255.,652285.,652235.,652265.,
... 652225.,652255.,652285.,652195.,652200.,652200.,
... 652240.,652230.,652260.,652260.,652265.])
>>> y = np.array([5772960.,5772970.,5772970.,5772970.,5772970.,
... 5772980.,5772980.,5772980.,5772980.,5772980.,
... 5772990.,5772990.,5772990.,5772990.,5773000.,
... 5773000.,5773000.,5773000.,5773010.,5773010.,
... 5773010.,5773010.,5773020.,5773020.,5773030.,
... 5773030.,5773030.,5772985.,5772990.,5772995.,
... 5773015.,5773025.,5772985.,5772990.,5772995.])
>>> z = np.array([2.16512767,4.97776467,4.2279204 ,0. ,
... 8.25658422,0.01238773,5.05858306,8.33503939,
... 7.53470443,7.15304826,9.45150218,8.79359049,
... 0.0536634 ,0.42101194,0.22721601,1.1458486 ,
... 6.79183025,2.50622739,3.76725118,3.97934707,
... 0. ,0.24743279,1.4627512 ,0.38430722,
... 5.30171261,0. ,3.17667353,3.80908144,
... 7.12445478,4.83891708,6.10898131,2.93801857,
... 2.56170107,2.54503559,1.72767934])
# make semivariogram
>>> from semivariogram import semivariogram
>>> nL = 40
>>> di = [0]
>>> td = 180
>>> nugget,sill,orange,vark,h,g,c,semi_mod,semi_popt = semivariogram(
... x,y,z,nL,di,td,stype='omnidirectional',negscat=0.5,
... model='exponential',graph=False,lunit='m',
... p0=(0.,20.,1./8.),runtimediag=False)
# x and y coordinates for the surface
>>> xnew = np.arange(np.amin(x),np.amax(x),5.)
>>> ynew = np.arange(np.amin(y),np.amax(y),5.)
# krig the surface
>>> xnew, ynew, znew, varnew = kriging(x,y,z,semi_mod,semi_popt,
... xnew=xnew,ynew=ynew,silent=True,
... plot=False,masked=False,eop=None)
>>> from autostring import astr
>>> print(astr(znew[0][0:8],1,pp=True))
['2.8' '3.4' '3.9' '4.2' '4.3' '4.2' '4.0' '3.8']
>>> print(astr(np.mean(znew),1))
3.7
# block krig the surface
>>> bave, bvar = kriging(x, y, z, semi_mod, semi_popt,xnew=xnew, ynew=ynew,
... silent=True,plot=False,masked=False,eop=None,block=True)
>>> print(astr(bave,1,pp=True))
3.5
>>> print(astr(np.sqrt(bvar),3,pp=True))
3.096
# krig only at points of interest
>>> poi = np.array([[652209.16,5772986.26],
... [652281.10,5773014.27],
... [652202.39,5772997.96],
... [652264.51,5772992.49],
... [652274.81,5772961.62],
... [652204.93,5772992.82],
... [652232.38,5773021.34],
... [652278.25,5773019.58],
... [652199.17,5773004.12],
... [652276.71,5773006.25]])
>>> eopz, eopvar = kriging(x,y,z,semi_mod,semi_popt,xnew=None,
... ynew=None,plot=False,masked=False,
... silent=True,eop=poi)
>>> print(astr(eopz[0:8],1,pp=True))
['7.8' '0.7' '3.1' '1.2' '7.4' '6.7' '2.1' '1.1']
# krig both, whole surface and on points of interest
>>> xnew = np.arange(np.min(x),np.max(x),5.)
>>> ynew = np.arange(np.min(y),np.max(y),5.)
>>> xnew, ynew, znew, varnew, eopz, eopvar = kriging(x,y,z,semi_mod,
... semi_popt,xnew=xnew,
... ynew=ynew,plot=False,
... masked=False,silent=True,
... eop=poi)
>>> print(astr(znew[0][0:8],1,pp=True))
['2.8' '3.4' '3.9' '4.2' '4.3' '4.2' '4.0' '3.8']
>>> print(astr(eopz[0:8],1,pp=True))
['7.8' '0.7' '3.1' '1.2' '7.4' '6.7' '2.1' '1.1']
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python
library, Department of Computational Hydrosystems, Helmholtz Centre for
Environmental Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2021 Arndt Piayda, Juliane Mai, Matthias Cuntz - mc (at)
macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
History
-------
Written, Arndt Piayda & Juliane Mai, Nov 2012
Modified, Arndt Piayda, Dec 2012
- documentation change
Matthias Cuntz & Juliane Mai, Feb 2013
- block
Matthias Cuntz, Feb 2013
- block uses volume_poly
- include Langriangian multiplier in kriging variance
- quadruple integral of block variance
calculated by Monte-Carlo
Matthias Cuntz, Feb 2013
- ported to Python 3
Matthias Cuntz, Apr 2014
- assert
Matthias Cuntz, Sep 2021
- code refactoring
"""
if not silent:
import time
print('KRIG: prepare data...')
# ironing :-)
x, y, z = x.flatten(), y.flatten(), z.flatten()
semi_popt = semi_popt.flatten()
if xnew is not None:
xnew, ynew = xnew.flatten(), ynew.flatten()
if eop is not None:
eopx, eopy = eop[:, 0].flatten(), eop[:, 1].flatten()
###########################################################################
# orignal x + y data
# reshape and calculate lags and gammas
assert np.size(x) == np.size(y), (
'kriging: x and y must have same dimensions')
assert np.size(x) == np.size(z), (
'kriging: x and z must have same dimensions')
xy = np.vstack((x, y)).transpose()
lag = squareform(pdist(xy, 'euclidean'))
gamma = semi_mod(lag, semi_popt)
# make A and append row and column of one's
A = np.vstack((gamma, np.ones(np.shape(gamma)[1])))
A = np.hstack((A, np.ones(np.shape(A)[0]).reshape(-1, 1)))
A[-1, -1] = 0.
invA = np.linalg.inv(A)
#######################################################################
# calculate convex hull to hide outer areas
if masked:
from convex_hull import convex_hull
from in_poly import in_poly
if not silent:
print('KRIG: calculate hull...')
start = time.time()
hull_points = convex_hull(np.vstack((x, y)), graphic=False,
smidgen=0.0075)
if not silent:
stop = time.time()
print('KRIG: calculating hull took %0.3f sec' % (stop-start))
###########################################################################
# krig on grid
if (xnew is not None) and (not block):
if not silent:
print('KRIG: prepare mesh...')
# make 2D mesh grid
xnew, ynew = np.meshgrid(xnew, ynew)
xnew_v = xnew.flatten()
ynew_v = ynew.flatten()
length = np.size(xnew_v)
#######################################################################
# calculate every znew of xnew and ynew
if not silent:
print('KRIG: kriging...')
start = time.time()
znew = np.empty_like(xnew_v)
varnew = np.empty_like(xnew_v)
# lamnew = np.empty((np.size(xnew_v), np.size(x)))
if masked:
mask = np.empty_like(xnew_v, dtype=int)
for igrid in range(length):
# make B
b = np.sqrt((x-xnew_v[igrid])**2 + (y-ynew_v[igrid])**2)
B = semi_mod(b, semi_popt)
B = np.append(B, 1.)
# calculate lambda
lmd = np.dot(invA, B)
# shorten it
mu = lmd[-1]
lmd = lmd[:-1]
B = B[:-1]
znew[igrid] = np.dot(z, lmd)
varnew[igrid] = np.dot(lmd.transpose(), B) + mu
# lamnew[igrid, :] = lmd
###################################################################
# calculate convex hull to hide outer areas
if masked:
mask[igrid] = in_poly([xnew_v[igrid], ynew_v[igrid]],
hull_points[:, 0],
hull_points[:, 1])
znew = znew.reshape(np.shape(xnew))
varnew = varnew.reshape(np.shape(xnew))
# lamnew = lamnew.reshape(np.shape(xnew))
if masked:
mask = mask.reshape(np.shape(xnew))
mask = np.where(mask > 0, 0, 1)
xnew = np.ma.masked_array(xnew, mask)
ynew = np.ma.masked_array(ynew, mask)
znew = np.ma.masked_array(znew, mask)
varnew = np.ma.masked_array(varnew, mask)
if not silent:
stop = time.time()
print('KRIG: kriging took %0.3f sec' % (stop-start))
#######################################################################
# krig on extraction points
if eop is not None:
length = np.size(eopx)
#######################################################################
# calculate every znew of xnew and ynew
if not silent:
print('KRIG: kriging...')
start = time.time()
eopz = np.empty_like(eopx)
eopvar = np.empty_like(eopx)
# eoplam = np.empty((np.size(eopx), np.size(x)))
if masked:
mask = np.empty_like(eopx, dtype=int)
for igrid in range(length):
# make B
b = np.sqrt((x-eopx[igrid])**2 + (y-eopy[igrid])**2)
B = semi_mod(b, semi_popt)
B = np.append(B, 1.)
# calculate lambda
lmd = np.dot(invA, B)
# shorten it
mu = lmd[-1]
lmd = lmd[:-1]
B = B[:-1]
eopz[igrid] = np.dot(z, lmd)
eopvar[igrid] = np.dot(lmd.transpose(), B) + mu
# eoplam[igrid,:] = lmd
###################################################################
# calculate convex hull to hide outer areas
if masked:
mask[igrid] = in_poly([eopx[igrid], eopy[igrid]],
hull_points[:, 0], hull_points[:, 1])
if masked:
mask = np.where(mask > 0, 0, 1)
eopx = np.ma.masked_array(eopx, mask)
eopy = np.ma.masked_array(eopy, mask)
eopz = np.ma.masked_array(eopz, mask)
eopvar = np.ma.masked_array(eopvar, mask)
if not silent:
stop = time.time()
print('KRIG: kriging took %0.3f sec' % (stop-start))
###########################################################################
# block kriging
if block:
from scipy.spatial import Delaunay # for triangulation
# from scipy.integrate import dblquad # area integral
from convex_hull import convex_hull # convex hull of data points
from volume_poly import volume_poly # the volume above a polygon
if not silent:
print('KRIG: block kriging...')
start = time.time()
def semiyx(yy, xx, obsyx, f, p):
dis = np.sqrt((xx-obsyx[1])**2 + (yy-obsyx[0])**2)
return f(dis, p) # semivariogram(distance, parameter)
# Construct B-vector
B = np.empty(x.size+1, dtype=float)
B[-1] = 1.
if (xnew is not None) and (not masked):
# Assume rectangle
xmin = np.amin(xnew)
ymin = np.amin(ynew)
xmax = np.amax(xnew)
ymax = np.amax(ynew)
# Do not calc double integral because 4 triangles are double
# as fast.
# # Calc mean semivariogramm over whole region for each point
# area = (xmax-xmin)*(ymax-ymin)
# for i in range(x.size):
# tvol, tvol_err = dblquad(semiyx, xmin, xmax, lambda xx: ymin,
# lambda xx: ymax,
# args=([y[i],x[i]], semi_mod,
# semi_popt))
# B[i] = tvol / area
# Construct 4 triangles
xs = 0.5*(xmax+xmin) # centre of gravity
ys = 0.5*(ymax+ymin)
ntriangles = 4
tri = np.empty((ntriangles, 3, 2), dtype=float)
tri[0, 0, :] = [xmin, ymin]
tri[0, 1, :] = [xmax, ymin]
tri[0, 2, :] = [xs, ys]
tri[1, 0, :] = [xmin, ymin]
tri[1, 1, :] = [xmin, ymax]
tri[1, 2, :] = [xs, ys]
tri[2, 0, :] = [xmin, ymax]
tri[2, 1, :] = [xmax, ymax]
tri[2, 2, :] = [xs, ys]
tri[3, 0, :] = [xmax, ymax]
tri[3, 1, :] = [xmax, ymin]
tri[3, 2, :] = [xs, ys]
# Construct convex hull
cxy = np.empty((ntriangles, 2), dtype=float)
cxy[0, :] = [xmin, ymin]
cxy[1, :] = [xmax, ymin]
cxy[2, :] = [xmax, ymax]
cxy[3, :] = [xmin, ymax]
# Calc mean semivariogramm over whole region for each point
for i in range(x.size):
tvol, tvol_err, area = volume_poly(semiyx, tri=tri, area=True,
obsyx=[y[i], x[i]],
f=semi_mod, p=semi_popt)
B[i] = tvol / area
else:
# Get convex hull and vertices
xy = np.array(list(zip(x, y)))
d = Delaunay(xy[:, :])
cxy = convex_hull(xy.transpose())
xs = np.mean(cxy[:, 0])
ys = np.mean(cxy[:, 1])
# # All triangles
# tri = xy[d.vertices,:]
# ntriangles = tri.shape[0]
# Construct triangles from convex hull and centre of gravity
ntriangles = d.convex_hull.shape[0]
tri = np.empty((ntriangles, 3, 2), dtype=float)
for i in range(ntriangles):
tri[i, 0, :] = xy[d.convex_hull[i, 0], :]
tri[i, 1, :] = xy[d.convex_hull[i, 1], :]
tri[i, 2, :] = [xs, ys]
# Calc mean semivariogramm over whole region for each point
for i in range(x.size):
tvol, tvol_err, area = volume_poly(semiyx, tri=tri, area=True,
obsyx=[y[i], x[i]],
f=semi_mod, p=semi_popt)
B[i] = tvol / area
# calculate lambda
lmd = np.dot(invA, B)
# shorten it
mu = lmd[-1]
lmd = lmd[:-1]
B = B[:-1]
# average
baverage = np.dot(z, lmd)
# Kriging error
# Integration of quadruple integral by Monte-Carlo
n = 0.0
total = 0.0
total2 = 0.0
while True:
n += 1.0
xx1 = xmin + (xmax-xmin) * np.random.random()
xx2 = xmin + (xmax-xmin) * np.random.random()
yy1 = ymin + (ymax-ymin) * np.random.random()
yy2 = ymin + (ymax-ymin) * np.random.random()
f = semiyx(yy1, xx1, [yy2, xx2], semi_mod, semi_popt)
total += f
total2 += (f**2)
if n > 100.:
mm = total/n # E(f)
# 1/n*Var(f) = 1/n * (n/n-1)*(E(f^2)-E(f)^2)
vv = (total2/n - mm**2)/(n-1.0)
ee = np.sqrt(vv)
if ee/mm*100. < 0.1:
break
# Integral would be V*mm with err V*err
# but we need mean, i.e. mm
# on std example, bvar was negative ?
bvariance = np.abs(np.dot(lmd, B) + mu - mm)
if not silent:
stop = time.time()
print('KRIG: block kriging took %0.3f sec' % (stop-start))
###########################################################################
# plotting
if plot:
import matplotlib as mpl
import matplotlib.pyplot as plt
if not silent:
print('KRIG: plotting...')
mpl.rc('font', size=20)
mpl.rc('lines', linewidth=2)
mpl.rc('axes', linewidth=1.5)
# mpl.rc('xtick.major', width=1.5)
# mpl.rc('ytick.major', width=1.5)
mpl.rcParams['lines.markersize'] = 6
mpl.rcParams['lines.markeredgewidth'] = 1
mpl.rcParams['grid.linewidth'] = 1.5
# mpl.rcParams['legend.frameon']=False
mpl.rcParams['legend.numpoints'] = 1
mpl.rcParams['legend.handlelength'] = 1
mpl.rcParams['mathtext.default'] = 'regular'
# plotting contours of kriging
# fig1 = plt.figure('kriging: surface', figsize=(15,10))
fig1 = plt.figure(1, figsize=(15, 10))
sub1 = fig1.add_subplot(111, aspect='equal') # , aspect=1)
if xnew is not None:
lines = sub1.contour(xnew, ynew, znew, 10, linewidths=1.5,
colors='k')
fillings = sub1.contourf(xnew, ynew, znew, 10, cmap=plt.cm.jet)
if masked:
hull = sub1.plot(np.hstack((hull_points[:, 0], hull_points[0, 0])),
np.hstack((hull_points[:, 1], hull_points[0, 1])),
color='k')
if eop is not None:
scat = sub1.scatter(eopx, eopy, marker='o', c='k', s=40)
sub1.xaxis.set_major_locator(mpl.ticker.MultipleLocator(10))
sub1.yaxis.set_major_locator(mpl.ticker.MultipleLocator(10))
sub1.xaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
sub1.yaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
sub1.grid('on')
sub1.set_title('kriging')
plt.xlabel('easting')
plt.ylabel('northing')
fig1.autofmt_xdate(rotation=45)
# plt.tight_layout(pad=1, h_pad=0, w_pad=0)
# cbar need to be below autofm_xdate !!!???
if xnew is not None:
cbar = fig1.colorbar(fillings, orientation='vertical', pad=0.05,
shrink=0.7)
cbar.set_label('value')
# plotting contours of variance
# fig2 = plt.figure('kriging: variance', figsize=(15,10))
fig2 = plt.figure(2, figsize=(15, 10))
sub2 = fig2.add_subplot(111, aspect='equal') # , aspect=1)
if xnew is not None:
lines = sub2.contour(xnew, ynew, varnew, 10, linewidths=1.5,
colors='k')
fillings = sub2.contourf(xnew, ynew, varnew, 10, cmap=plt.cm.jet)
if masked:
hull = sub2.plot(np.hstack((hull_points[:, 0], hull_points[0, 0])),
np.hstack((hull_points[:, 1], hull_points[0, 1])),
color='k')
if eop is not None:
scat = sub2.scatter(eopx, eopy, marker='o', c='k', s=40)
sub2.xaxis.set_major_locator(mpl.ticker.MultipleLocator(10))
sub2.yaxis.set_major_locator(mpl.ticker.MultipleLocator(10))
sub2.xaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
sub2.yaxis.set_major_formatter(mpl.ticker.
ScalarFormatter(useOffset=False))
sub2.grid('on')
sub2.set_title('variance')
plt.xlabel('easting')
plt.ylabel('northing')
fig2.autofmt_xdate(rotation=45)
# plt.tight_layout(pad=1, h_pad=0, w_pad=0)
# cbar need to be below autofm_xdate !!!???
if xnew is not None:
cbar = fig2.colorbar(fillings, orientation='vertical', pad=0.05,
shrink=0.7)
cbar.set_label('value')
plt.show()
if eop is None:
if block:
return baverage, bvariance
else:
return xnew, ynew, znew, varnew
elif xnew is None:
return eopz, eopvar
else:
return xnew, ynew, znew, varnew, eopz, eopvar
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
# x = np.array([652225.,652175.,652205.,652235.,652265.,652165.,
# 652195.,652225.,652255.,652285.,652175.,652205.,
# 652235.,652265.,652175.,652205.,652235.,652265.,
# 652195.,652225.,652255.,652285.,652235.,652265.,
# 652225.,652255.,652285.,652195.,652200.,652200.,
# 652240.,652230.,652260.,652260.,652265.])
# y = np.array([5772960.,5772970.,5772970.,5772970.,5772970.,
# 5772980.,5772980.,5772980.,5772980.,5772980.,
# 5772990.,5772990.,5772990.,5772990.,5773000.,
# 5773000.,5773000.,5773000.,5773010.,5773010.,
# 5773010.,5773010.,5773020.,5773020.,5773030.,
# 5773030.,5773030.,5772985.,5772990.,5772995.,
# 5773015.,5773025.,5772985.,5772990.,5772995.])
# z = np.array([2.16512767,4.97776467,4.2279204 ,0. ,
# 8.25658422,0.01238773,5.05858306,8.33503939,
# 7.53470443,7.15304826,9.45150218,8.79359049,
# 0.0536634 ,0.42101194,0.22721601,1.1458486 ,
# 6.79183025,2.50622739,3.76725118,3.97934707,
# 0. ,0.24743279,1.4627512 ,0.38430722,
# 5.30171261,0. ,3.17667353,3.80908144,
# 7.12445478,4.83891708,6.10898131,2.93801857,
# 2.56170107,2.54503559,1.72767934])
# # make semivariogram
# from semivariogram import semivariogram
# nL = 40
# di = [0]
# td = 180
# nugget,sill,orange,vark,h,g,c,semi_mod,semi_popt = semivariogram(
# x,y,z,nL,di,td,stype='omnidirectional',negscat=0.5,
# model='exponential',graph=False,lunit='m',
# p0=(0.,20.,1./8.),runtimediag=False)
# # x and y coordinates for the surface
# xnew = np.arange(np.amin(x),np.amax(x),5.)
# ynew = np.arange(np.amin(y),np.amax(y),5.)
# xnew, ynew, znew, varnew = kriging(x,y,z,semi_mod,semi_popt,
# xnew=xnew,ynew=ynew,silent=True,
# plot=True,masked=False,eop=None)
# print(np.round(znew[0],3))
# # [ 3.576 3.758 3.912 3.937 3.884 3.83 3.792 3.759 3.71 3.613
# # 3.407 2.981 2.165 2.366 2.458 2.797 3.304 3.817 4.298 4.717
# # 4.918 4.77 4.478 4.238]
# print(np.round(np.mean(znew),2))
# # 3.69
# # block krig the surface
# bave, bvar = kriging(x, y, z, semi_mod, semi_popt,
# xnew=xnew, ynew=ynew,
# silent=True,plot=False,masked=False,eop=None,block=True)
# print(np.round(bave,3), np.round(np.sqrt(bvar),3))
# # 3.659 2.842
# # krig only at points of interest
# poi = np.array([[652209.16,5772986.26],
# [652281.10,5773014.27],
# [652202.39,5772997.96],
# [652264.51,5772992.49],
# [652274.81,5772961.62],
# [652204.93,5772992.82],
# [652232.38,5773021.34],
# [652278.25,5773019.58],
# [652199.17,5773004.12],
# [652276.71,5773006.25]])
# eopz, eopvar = kriging(x,y,z,semi_mod,semi_popt,xnew=None,
# ynew=None,plot=False,masked=False,
# silent=True,eop=poi)
# print(np.round(eopz,3))
# # [ 6.409 1.677 3.168 1.262 4.636 6.534 2.244 2.255 2.996 2.111]
# # krig both, whole surface and on points of interest
# xnew = np.arange(np.min(x),np.max(x),5.)
# ynew = np.arange(np.min(y),np.max(y),5.)
# xnew, ynew, znew, varnew, eopz, eopvar = kriging(x,y,z,semi_mod,
# semi_popt,xnew=xnew,
# ynew=ynew,plot=False,
# masked=False,silent=True,
# eop=poi)
# print(np.round(znew[0],3))
# # [ 3.576 3.758 3.912 3.937 3.884 3.83 3.792 3.759 3.71 3.613
# # 3.407 2.981 2.165 2.366 2.458 2.797 3.304 3.817 4.298 4.717
# # 4.918 4.77 4.478 4.238]
# print(np.round(eopz,3))
# # [ 6.409 1.677 3.168 1.262 4.636 6.534 2.244 2.255 2.996 2.111]
| 42.307799 | 85 | 0.512822 |
ce52f2ad6413db99635437824abb70faba999862 | 2,756 | py | Python | dreg_client/registry.py | djmattyg007/dreg-client | 4f5c4c427d2ee721e68a63df1b8c5c7ded8361c6 | [
"Apache-2.0"
] | 3 | 2021-09-05T14:35:43.000Z | 2022-03-04T16:16:43.000Z | dreg_client/registry.py | djmattyg007/dreg-client | 4f5c4c427d2ee721e68a63df1b8c5c7ded8361c6 | [
"Apache-2.0"
] | 2 | 2022-01-17T15:05:28.000Z | 2022-03-09T13:14:25.000Z | dreg_client/registry.py | djmattyg007/dreg-client | 4f5c4c427d2ee721e68a63df1b8c5c7ded8361c6 | [
"Apache-2.0"
] | 1 | 2022-03-08T08:58:59.000Z | 2022-03-08T08:58:59.000Z | from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Mapping, Optional, Sequence
from .client import Client
from .repository import Repository
if TYPE_CHECKING:
from requests_toolbelt.sessions import BaseUrlSession
from ._types import RequestsAuth
from .auth_service import AuthService
class Registry:
def __init__(self, client: Client, /) -> None:
self._client: Client = client
self._repositories: Dict[str, Repository] = {}
self._repositories_by_namespace: Dict[str, Dict[str, Repository]] = {}
@classmethod
def build_with_client(
cls, session: BaseUrlSession, /, *, auth_service: Optional[AuthService] = None
) -> Registry:
return cls(Client(session, auth_service=auth_service))
@classmethod
def build_with_manual_client(
cls,
base_url: str,
/,
*,
auth: RequestsAuth = None,
auth_service: Optional[AuthService] = None,
) -> Registry:
return cls(Client.build_with_session(base_url, auth=auth, auth_service=auth_service))
def namespaces(self) -> Sequence[str]:
if not self._repositories:
self.refresh()
return tuple(self._repositories_by_namespace.keys())
def repository(self, repository: str, namespace: Optional[str] = None) -> Repository:
if "/" in repository:
if namespace is not None:
raise ValueError("Cannot specify namespace twice.")
namespace, repository = repository.split("/", 1)
if namespace:
name = f"{namespace}/{repository}"
else:
name = f"library/{repository}"
try:
return self._repositories[name]
except KeyError:
return Repository(self._client, repository, namespace=namespace)
def repositories(self, namespace: Optional[str] = None) -> Mapping[str, Repository]:
if not self._repositories:
self.refresh()
if namespace:
return self._repositories_by_namespace[namespace]
return self._repositories
def refresh(self) -> None:
repositories = self._client.catalog()["repositories"]
for name in repositories:
repo: str
ns: Optional[str]
try:
ns, repo = name.split(sep="/", maxsplit=1)
except ValueError:
ns = None
repo = name
r = Repository(self._client, repo, namespace=ns)
if ns is None:
ns = "library"
self._repositories_by_namespace.setdefault(ns, {})
self._repositories_by_namespace[ns][name] = r
self._repositories[name] = r
__all__ = ("Registry",)
| 29.956522 | 93 | 0.616836 |
9f94a28d8d4b5df6edfc6cf0a105408beb0f57a8 | 1,515 | py | Python | tests/scenes/test_threepointlighting.py | patrickkesper/amira_blender_rendering | acea7b1727e5eb93998cb3154fb2080122962505 | [
"ECL-2.0",
"Apache-2.0"
] | 26 | 2020-11-13T18:57:40.000Z | 2022-03-08T18:54:02.000Z | tests/scenes/test_threepointlighting.py | patrickkesper/amira_blender_rendering | acea7b1727e5eb93998cb3154fb2080122962505 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2021-01-21T11:56:46.000Z | 2021-09-22T08:39:10.000Z | tests/scenes/test_threepointlighting.py | patrickkesper/amira_blender_rendering | acea7b1727e5eb93998cb3154fb2080122962505 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2020-11-19T15:46:33.000Z | 2021-03-26T05:42:44.000Z | #!/usr/bin/env python
# Copyright (c) 2020 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# <https://github.com/boschresearch/amira-blender-rendering>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import amira_blender_rendering.scenes.threepointlighting as tpl
import tests
"""Test file for main functionalities in amira_blender_rendering.scene.threepointlighting"""
@tests.register(name='test_scenes')
class TestThreePointLighting(unittest.TestCase):
def setUp(self):
self._instance = None
def test_class(self):
# test class integrity
self._instance = tpl.ThreePointLighting()
self.assertIsInstance(self._instance, tpl.ThreePointLighting)
def tearDown(self):
del self._instance
def main():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestThreePointLighting))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == '__main__':
main()
| 30.3 | 92 | 0.745215 |
61f3e1de78a84cce507bc846c5e873674a6a9c45 | 523 | py | Python | test/rdb_workloads/insert_many.py | zadcha/rethinkdb | bb4f5cc28242dc1e29e9a46a8a931ec54420070c | [
"Apache-2.0"
] | 21,684 | 2015-01-01T03:42:20.000Z | 2022-03-30T13:32:44.000Z | test/rdb_workloads/insert_many.py | RethonkDB/rethonkdb | 8c9c1ddc71b1b891fdb8aad7ca5891fc036b80ee | [
"Apache-2.0"
] | 4,067 | 2015-01-01T00:04:51.000Z | 2022-03-30T13:42:56.000Z | test/rdb_workloads/insert_many.py | RethonkDB/rethonkdb | 8c9c1ddc71b1b891fdb8aad7ca5891fc036b80ee | [
"Apache-2.0"
] | 1,901 | 2015-01-01T21:05:59.000Z | 2022-03-21T08:14:25.000Z | #!/usr/bin/env python
import sys, socket, random, time, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import rdb_workload_common
from vcoptparse import *
op = rdb_workload_common.option_parser_for_connect()
op["count"] = IntFlag("--count", 10000)
opts = op.parse(sys.argv)
if __name__ == '__main__':
with rdb_workload_common.make_table_and_connection(opts) as (table, conn):
rdb_workload_common.insert_many(conn=conn, table=table, count=opts['count'])
| 37.357143 | 99 | 0.751434 |
ee314e42bd11e45bf26f53de99e02541b6d61d4d | 989 | py | Python | leetcode/337_house_robber_III/337_house_robber_III.py | ryangillard/misc | d1f9919400636e6b988fa933493b94829a73331e | [
"Apache-2.0"
] | null | null | null | leetcode/337_house_robber_III/337_house_robber_III.py | ryangillard/misc | d1f9919400636e6b988fa933493b94829a73331e | [
"Apache-2.0"
] | null | null | null | leetcode/337_house_robber_III/337_house_robber_III.py | ryangillard/misc | d1f9919400636e6b988fa933493b94829a73331e | [
"Apache-2.0"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.dfs(root))
def dfs(self, root):
# Quick exit
if not root:
return 0, 0
# Left subtree
left_skip_level, left_include_level = self.dfs(root.left)
# Right subtree
right_skip_level, right_include_level = self.dfs(root.right)
# If we only take the maximums of subtrees by skipping current level
skip_level = max(left_skip_level, left_include_level) + max(right_skip_level, right_include_level)
# If we include current level and the skip levels below
include_level = root.val + left_skip_level + right_skip_level
return skip_level, include_level | 29.969697 | 106 | 0.604651 |
f7f418828c250c2b31f208cd61683f6e653e9bf9 | 1,780 | py | Python | h2o-py/tests/testdir_misc/pyunit_xgboost_gbm_monotone.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 6,098 | 2015-05-22T02:46:12.000Z | 2022-03-31T16:54:51.000Z | h2o-py/tests/testdir_misc/pyunit_xgboost_gbm_monotone.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,517 | 2015-05-23T02:10:54.000Z | 2022-03-30T17:03:39.000Z | h2o-py/tests/testdir_misc/pyunit_xgboost_gbm_monotone.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,199 | 2015-05-22T04:09:55.000Z | 2022-03-28T22:20:45.000Z | from h2o.estimators.xgboost import *
from h2o.estimators.gbm import *
from tests import pyunit_utils
def xgboost_vs_gbm_monotone_test():
assert H2OXGBoostEstimator.available() is True
monotone_constraints = {
"AGE": 1
}
xgboost_params = {
"tree_method": "exact",
"seed": 123,
"backend": "cpu", # CPU Backend is forced for the results to be comparable
"monotone_constraints": monotone_constraints
}
gbm_params = {
"seed": 42,
"monotone_constraints": monotone_constraints
}
prostate_hex = h2o.import_file(pyunit_utils.locate('smalldata/prostate/prostate.csv'))
prostate_hex["CAPSULE"] = prostate_hex["CAPSULE"].asfactor()
xgboost_model = H2OXGBoostEstimator(**xgboost_params)
xgboost_model.train(y="CAPSULE", ignored_columns=["ID"], training_frame=prostate_hex)
gbm_model = H2OGradientBoostingEstimator(**gbm_params)
gbm_model.train(y="CAPSULE", ignored_columns=["ID"], training_frame=prostate_hex)
xgb_varimp_percentage = dict(map(lambda x: (x[0], x[3]), xgboost_model.varimp(use_pandas=False)))
gbm_varimp_percentage = dict(map(lambda x: (x[0], x[3]), gbm_model.varimp(use_pandas=False)))
# We expect the variable importances of AGE to be similar
assert xgb_varimp_percentage["VOL"] > xgb_varimp_percentage["AGE"]
assert xgb_varimp_percentage["AGE"] > xgb_varimp_percentage["RACE"]
print("XGBoost varimp of AGE = %s" % xgb_varimp_percentage["AGE"])
print("GBM varimp of AGE = %s" % gbm_varimp_percentage["AGE"])
assert abs(xgb_varimp_percentage["AGE"] - gbm_varimp_percentage["AGE"]) < 0.02
if __name__ == "__main__":
pyunit_utils.standalone_test(xgboost_vs_gbm_monotone_test)
else:
xgboost_vs_gbm_monotone_test()
| 34.901961 | 101 | 0.710674 |
a4ceee23ae123a5c0e16835dba2b4fd9c8f8e29e | 2,108 | py | Python | simple_Keras_Model.py | shlpu/Statlie-Image-Processor | e40355f43f344fd02041bdc8ce57b0ee101c6cdb | [
"Apache-2.0"
] | 1 | 2019-11-23T12:58:09.000Z | 2019-11-23T12:58:09.000Z | simple_Keras_Model.py | shlpu/Statlie-Image-Processor | e40355f43f344fd02041bdc8ce57b0ee101c6cdb | [
"Apache-2.0"
] | null | null | null | simple_Keras_Model.py | shlpu/Statlie-Image-Processor | e40355f43f344fd02041bdc8ce57b0ee101c6cdb | [
"Apache-2.0"
] | 3 | 2019-03-27T00:47:08.000Z | 2022-02-05T04:52:48.000Z | from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import MaxPooling2D, Conv2D
from keras.utils import to_categorical
from keras.optimizers import rmsprop
from keras.preprocessing.image import ImageDataGenerator
import os
save_dir = os.path.join(os.getcwd(), 'Keras_Trained_model')
model_name = 'keras_cifar10_trained_model.ckpt'
from keras.datasets import cifar10
(x_train, y_train_label), (x_test, y_test_label) = cifar10.load_data()
print("X_train image Shape" , str(x_train.shape) )
print("X_test image Shape" , str(x_test.shape) )
#Perform data normalization
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train_label = to_categorical(y_train_label, 10)
y_test_label = to_categorical(y_test_label, 10)
#Define neural network model
model = Sequential()
model.add(Conv2D(48, kernel_size=(3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(Conv2D(48, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(96, (3, 3), activation='relu'))
model.add(Conv2D(96, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(256, activation='relu' ))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
'''
'''
opt = rmsprop(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy', optimizer=opt , metrics=['accuracy'])
model.fit(x_train, y_train_label,
batch_size=200, nb_epoch=1, verbose=1)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
score = model.evaluate(x_test, y_test_label, verbose=0)
print('CNN Loss value:', score[0])
print('Accuracy:'+ str(score[1]*100) + '%') | 28.876712 | 85 | 0.739089 |
dffc7d0930e0f07ea5b31049ff101c62dd05c5f4 | 2,111 | py | Python | raiden/tests/integration/cli/conftest.py | konradkonrad/raiden | b2fe21eb20cd34a689b8449488f4dc52dbe93069 | [
"MIT"
] | null | null | null | raiden/tests/integration/cli/conftest.py | konradkonrad/raiden | b2fe21eb20cd34a689b8449488f4dc52dbe93069 | [
"MIT"
] | null | null | null | raiden/tests/integration/cli/conftest.py | konradkonrad/raiden | b2fe21eb20cd34a689b8449488f4dc52dbe93069 | [
"MIT"
] | null | null | null | import pytest
from raiden.settings import RED_EYES_CONTRACT_VERSION
from raiden.tests.utils.smoketest import setup_testchain_and_raiden
def append_arg_if_existing(argname, initial_args, new_args):
cliname = '--' + argname.replace('_', '-')
if argname in initial_args:
new_args.extend([cliname, initial_args[argname]])
@pytest.fixture(scope='session')
def blockchain_provider():
result = setup_testchain_and_raiden(
transport='matrix',
matrix_server='auto',
print_step=lambda x: None,
# cli tests should work with production contracts
contracts_version=RED_EYES_CONTRACT_VERSION,
)
args = result['args']
# The setup of the testchain returns a TextIOWrapper but
# for the tests we need a filename
args['password_file'] = args['password_file'].name
return args
@pytest.fixture()
def removed_args():
return None
@pytest.fixture()
def changed_args():
return None
@pytest.fixture()
def cli_args(blockchain_provider, removed_args, changed_args):
initial_args = blockchain_provider.copy()
if removed_args is not None:
for arg in removed_args:
if arg in initial_args:
del initial_args[arg]
if changed_args is not None:
for k, v in changed_args.items():
initial_args[k] = v
args = [
'--no-sync-check',
'--tokennetwork-registry-contract-address',
initial_args['tokennetwork_registry_contract_address'],
'--secret-registry-contract-address',
initial_args['secret_registry_contract_address'],
'--endpoint-registry-contract-address',
initial_args['endpoint_registry_contract_address'],
]
append_arg_if_existing('keystore_path', initial_args, args)
append_arg_if_existing('password_file', initial_args, args)
append_arg_if_existing('datadir', initial_args, args)
append_arg_if_existing('network_id', initial_args, args)
append_arg_if_existing('eth_rpc_endpoint', initial_args, args)
append_arg_if_existing('environment_type', initial_args, args)
return args
| 30.157143 | 67 | 0.705827 |
63901bb5ba904e022f6823a15cbbcc0adc356fca | 2,448 | py | Python | Algorithms_and_Data_Structure/Implementing_Queue_in_Python.py | nyangweso-rodgers/Computer_Science_Concepts | 94e4d4fe7c892b6e1f3684250c540463c83a7b10 | [
"Apache-2.0"
] | null | null | null | Algorithms_and_Data_Structure/Implementing_Queue_in_Python.py | nyangweso-rodgers/Computer_Science_Concepts | 94e4d4fe7c892b6e1f3684250c540463c83a7b10 | [
"Apache-2.0"
] | null | null | null | Algorithms_and_Data_Structure/Implementing_Queue_in_Python.py | nyangweso-rodgers/Computer_Science_Concepts | 94e4d4fe7c892b6e1f3684250c540463c83a7b10 | [
"Apache-2.0"
] | null | null | null | # Implementing Queue Data Structure
'''
For Implementing a Queue in Python we will make use of List First
'''
price = []
price.insert(0,100) # whenever we insert element at 0th position the older element is pushed forward
price.insert(0,200)
price.insert(0,300)
print(price) # output will be [300,200,100]
'''
So what will happen when we use POP operation .
As we know it will be FIFO - First in - First out .
So the first element that was inserted will pop out first.
'''
price = []
price.insert(0,100)
price.insert(0,200)
price.insert(0,300)
print(price.pop()) # output will be 100
## Remark
'''
Though a List (Array) works fine for implementing a Queue but the problem will emerges due to Dynamic Array .
When you are inserting in a Dynamic Array , first it will allocate a random continues memory location and when its gets filled it will again allocate double the size of memory location and then it will copy all old elements to the new location. Hence they are not recommended
'''
# Using Deque
'''
A double-ended queue, or deque, has the feature of adding and removing elements from either end.
The Deque module is a part of collections library. It has the methods for adding and removing elements which can be invoked directly with arguments.
In the below program we import the collections module and declare a deque. Without need of any class we use the in-built implement methods directly.
'''
from collections import deque
queue = deque()
'''
Since in case of Queue we always add elements to the left side of the list so we will use the function appendleft()
'''
queue.appendleft(100)
queue.appendleft(200)
queue.appendleft(300)
#print(queue)
'''
Now when we do a queue.pop() the value it will return will be 100 , because it was the first value that was inserted into the queue
'''
print(queue.pop()) # output: 100
# Let's implement all these functions using a class QUEUE in Python
from collections import deque
class Queue():
def __init__(self):
self.queue = deque()
def insert(self, value):
self.value = value
self.queue.appendleft(value)
print(self.queue)
def delete(self):
self.queue.pop()
print(self.queue)
def length(self):
if len(self.queue)==0:
print("Empty")
else:
print(len(self.queue))
if __name__ =='__main__':
q = Queue()
q.insert(100)
q.insert(200)
q.delete()
q.length() | 33.081081 | 275 | 0.707925 |
69e41b4844537d2e20df69dc44bf3d6b310d81b4 | 127 | py | Python | app_settings.py | mysiar/seismic-offset-check | b1a0fab8799dc56b156d759015cebbe8c705f739 | [
"MIT"
] | null | null | null | app_settings.py | mysiar/seismic-offset-check | b1a0fab8799dc56b156d759015cebbe8c705f739 | [
"MIT"
] | null | null | null | app_settings.py | mysiar/seismic-offset-check | b1a0fab8799dc56b156d759015cebbe8c705f739 | [
"MIT"
] | null | null | null | """
App Settings
"""
ORG = 'mysiar'
APP = 'SeismicOffsetCheck'
LIMIT_X = 'limit_x'
LIMIT_Y = 'limit_y'
DB_PATH = 'db_path'
| 14.111111 | 26 | 0.653543 |
e522a840605dddbd552dc6400a994f3a0132896d | 1,037 | py | Python | tests/test_scheduler_cancel.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
] | 7 | 2016-09-27T00:21:46.000Z | 2017-03-18T20:04:29.000Z | tests/test_scheduler_cancel.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
] | null | null | null | tests/test_scheduler_cancel.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
] | 2 | 2017-03-16T21:47:43.000Z | 2020-10-20T22:58:03.000Z | # Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Cancel an active schedule. Since this is the last active schedule, it
should cleanly stop the scheduler.
"""
from antevents.base import *
from utils import make_test_publisher
import asyncio
import unittest
class CallAfter(DefaultSubscriber):
def __init__(self, num_events, fn):
self.events_left = num_events
self.fn = fn
def on_next(self, x):
self.events_left -= 1
if self.events_left == 0:
print("calling fn %s" % self.fn)
self.fn()
class TestSchedulerCancel(unittest.TestCase):
def test_case(self):
sensor = make_test_publisher(1)
sensor.subscribe(print)
s = Scheduler(asyncio.get_event_loop())
cancel_schedule = s.schedule_periodic(sensor, 1)
sensor.subscribe(CallAfter(4, cancel_schedule))
sensor.print_downstream()
s.run_forever()
print("got to end")
if __name__ == '__main__':
unittest.main()
| 28.027027 | 72 | 0.671167 |
841e4642ffa496d6fa3d86792df80918d5884e53 | 1,266 | py | Python | test/test_operations.py | azavea/tr-55 | 654b1ef65b031f87dbe5715934b4a3547c3c0c85 | [
"Apache-2.0"
] | 8 | 2016-10-06T07:03:52.000Z | 2022-01-01T08:01:11.000Z | test/test_operations.py | azavea/tr-55 | 654b1ef65b031f87dbe5715934b4a3547c3c0c85 | [
"Apache-2.0"
] | 54 | 2015-05-01T15:02:20.000Z | 2021-09-23T22:08:57.000Z | test/test_operations.py | azavea/tr-55 | 654b1ef65b031f87dbe5715934b4a3547c3c0c85 | [
"Apache-2.0"
] | 10 | 2015-04-30T20:00:55.000Z | 2021-09-17T15:13:56.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
Operation tests.
"""
import unittest
from tr55.operations import dict_plus
class TestOperations(unittest.TestCase):
"""
Dictionary operation test set.
"""
def test_plus_1(self):
"""
Test dictionary arithmetic.
"""
a = {'x': {'y': {'z': {'a': 1, 'b': 3, 'c': 13}, 'n': 144}}}
b = {'x': {'y': {'z': {'a': 1, 'b': 5, 'c': 21}, 'm': 610}}}
c = {'x': {'y': {'z': {'a': 2, 'b': 8, 'c': 34}, 'n': 144, 'm': 610}}}
self.assertEqual(dict_plus(a, b), c)
def test_plus_2(self):
"""
Test dictionary arithmetic.
"""
a = {'x': {'y': {'z': {'a': 2, 'c': 13}, 'n': 144}}}
b = {'x': {'y': {'z': {'b': 8, 'c': 21}, 'm': 610}}}
c = {'x': {'y': {'z': {'a': 2, 'b': 8, 'c': 34}, 'n': 144, 'm': 610}}}
self.assertEqual(dict_plus(a, b), c)
def test_plus_3(self):
"""
Test dictionary arithmetic.
"""
a = {'x': {'y': {'z': {'a': 2, 'c': 13}, 'n': 144}}}
b = {'x': {'y': None}}
self.assertEqual(dict_plus(a, b), a)
if __name__ == "__main__":
unittest.main()
| 26.93617 | 78 | 0.458926 |
2070ea9c3fe97292077255bf1ea72f799a5b415c | 4,168 | py | Python | discord/types/channel.py | ryry013/pycord | 6a9ea97d12b919cf4cc55dc46edace9629997c14 | [
"MIT"
] | null | null | null | discord/types/channel.py | ryry013/pycord | 6a9ea97d12b919cf4cc55dc46edace9629997c14 | [
"MIT"
] | null | null | null | discord/types/channel.py | ryry013/pycord | 6a9ea97d12b919cf4cc55dc46edace9629997c14 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from typing import List, Literal, Optional, TypedDict, Union
from .snowflake import Snowflake
from .threads import ThreadArchiveDuration, ThreadMember, ThreadMetadata
from .user import PartialUser
OverwriteType = Literal[0, 1]
class PermissionOverwrite(TypedDict):
id: Snowflake
type: OverwriteType
allow: str
deny: str
ChannelType = Literal[0, 1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 15]
class _BaseChannel(TypedDict):
id: Snowflake
name: str
class _BaseGuildChannel(_BaseChannel):
guild_id: Snowflake
position: int
permission_overwrites: List[PermissionOverwrite]
nsfw: bool
parent_id: Optional[Snowflake]
class PartialChannel(_BaseChannel):
type: ChannelType
class _TextChannelOptional(TypedDict, total=False):
topic: str
last_message_id: Optional[Snowflake]
last_pin_timestamp: str
rate_limit_per_user: int
default_auto_archive_duration: ThreadArchiveDuration
class TextChannel(_BaseGuildChannel, _TextChannelOptional):
type: Literal[0]
class NewsChannel(_BaseGuildChannel, _TextChannelOptional):
type: Literal[5]
VideoQualityMode = Literal[1, 2]
class _VoiceChannelOptional(TypedDict, total=False):
rtc_region: Optional[str]
video_quality_mode: VideoQualityMode
class VoiceChannel(_BaseGuildChannel, _VoiceChannelOptional):
type: Literal[2]
bitrate: int
user_limit: int
class CategoryChannel(_BaseGuildChannel):
type: Literal[4]
class StoreChannel(_BaseGuildChannel):
type: Literal[6]
class _StageChannelOptional(TypedDict, total=False):
rtc_region: Optional[str]
topic: str
class StageChannel(_BaseGuildChannel, _StageChannelOptional):
type: Literal[13]
bitrate: int
user_limit: int
class ForumChannel(_BaseGuildChannel):
type: Literal[15]
class _ThreadChannelOptional(TypedDict, total=False):
member: ThreadMember
owner_id: Snowflake
rate_limit_per_user: int
last_message_id: Optional[Snowflake]
last_pin_timestamp: str
class ThreadChannel(_BaseChannel, _ThreadChannelOptional):
type: Literal[10, 11, 12]
guild_id: Snowflake
parent_id: Snowflake
owner_id: Snowflake
nsfw: bool
last_message_id: Optional[Snowflake]
rate_limit_per_user: int
message_count: int
member_count: int
thread_metadata: ThreadMetadata
GuildChannel = Union[
TextChannel,
NewsChannel,
VoiceChannel,
CategoryChannel,
StoreChannel,
StageChannel,
ThreadChannel,
]
class DMChannel(_BaseChannel):
type: Literal[1]
last_message_id: Optional[Snowflake]
recipients: List[PartialUser]
class GroupDMChannel(_BaseChannel):
type: Literal[3]
icon: Optional[str]
owner_id: Snowflake
Channel = Union[GuildChannel, DMChannel, GroupDMChannel]
PrivacyLevel = Literal[1, 2]
class StageInstance(TypedDict):
id: Snowflake
guild_id: Snowflake
channel_id: Snowflake
topic: str
privacy_level: PrivacyLevel
discoverable_disabled: bool
guild_scheduled_event_id: Snowflake
| 24.232558 | 75 | 0.759837 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.